code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Setup" data-toc-modified-id="Setup-1"><span class="toc-item-num">1 </span>Setup</a></span></li><li><span><a href="#Pretrained-Embedding" data-toc-modified-id="Pretrained-Embedding-2"><span class="toc-item-num">2 </span>Pretrained Embedding</a></span></li></ul></div>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# # FastText
#
#
# http://scikit-learn.org/stable/datasets/twenty_newsgroups.html
# https://fasttext.cc/docs/en/english-vectors.html
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ## Setup
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
###############################################################
import os
# save current directory path
dirPath = os.getcwd()
# # cd to Python_Helpers Directory
os.chdir(os.path.expanduser('~/Python_Helpers/'))
# import done function
from jason import done
# return to original path
os.chdir(dirPath)
###############################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import zipfile
import datetime
# python version
import sys
print('python version:', sys.version[:3])
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
from gensim.models import FastText
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
from keras.datasets import imdb
(x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
model_ted = FastText(sentences_ted, size=100, window=5, min_count=5, workers=4,sg=1)
model_ted.wv.most_similar("Gastroenteritis")
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
train = pd.read_csv("do_not_upload/train.tsv", header=0, \
delimiter="\t")#, quoting=3)
train.head()
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
train
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
from sklearn.datasets import fetch_20newsgroups
newsgroups_train = fetch_20newsgroups(subset='train')
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
newsgroups_train.data[0]
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
newsgroups_train.target[0]
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
newsgroups_train = fetch_20newsgroups(subset='train',
remove=('headers', 'footers', 'quotes'))
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
newsgroups_train.data[0]
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
import re
re.sub('\n', '',newsgroups_train.data[0])
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ## Pretrained Embedding
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
import io
def load_vectors(fname):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
data[tokens[0]] = map(float, tokens[1:])
return data
emb = load_vectors('do_not_upload/wiki-news-300d-1M.vec')
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
emb
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
from gensim.models import FastText
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
from gensim.models import KeyedVectors
# Creating the model
# %time en_model = KeyedVectors.load_word2vec_format('do_not_upload/wiki-news-300d-1M.vec')
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
en_model
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
love = en_model.get_vector('I') + en_model.get_vector('love') + en_model.get_vector('you')
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
en_model.most_similar('hi')
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
en_model.most_similar('saurus')
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
en_model.most_similar('thai')
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
help(en_model.similar_by_vector)
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
| *Deep_Learning/Word_Embedding/Facebook_FastText/FastText.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import the needed packages
import psycopg2
import pandas as pd
import time, datetime
import json
# ## Define a function that load the comtrade json files into a dictionary
def load_comtrade_info():
comtrade_dictionary={}
json_data=open('lib/classificationHS.json')
comtrade_dictionary['comcodes']= pd.DataFrame(json.load(json_data)['results'])
json_data.close()
json_data=open('lib/partnerAreas.json')
comtrade_dictionary['partners']= pd.DataFrame(json.load(json_data)['results'])
json_data.close()
json_data=open('lib/reporterAreas.json')
comtrade_dictionary['reporters']= pd.DataFrame(json.load(json_data)['results'])
json_data.close()
return comtrade_dictionary
# ## Define three function which lookup comtrade codes in the comtrade dictionary
# I will change the country search code to use the ISO country names.. perhaps but needs SQL query to get these codes
def get_reporter_code(comtrade_dictionary, country):
'''
Search the reporter country code
'''
return comtrade_dictionary['reporters']['id'][comtrade_dictionary['reporters']['text'].str.contains(country)].iloc[0]
def get_partner_code(comtrade_dictionary, country):
'''
Search the partner country code
'''
return comtrade_dictionary['partners']['id'][comtrade_dictionary['partners']['text'].str.contains(country)].iloc[0]
def get_commodity_code(comtrade_dictionary, commodity):
'''
Search the commodity code
'''
commodity_code = comtrade_dictionary['comcodes'][comtrade_dictionary['comcodes']['text'].str.contains(commodity)]
if commodity_code.empty:
print(commodity + ' not found.')
return -1
else:
#take first one if more than one result
commodity_code = commodity_code.iloc[0]
print(commodity_code['text'])
return commodity_code
# ## Connect to the database
# +
conn = psycopg2.connect(
dbname = "comtrade", # could also be "hmrc"
host = "data-science-pgsql-dev-01.c8kuuajkqmsb.eu-west-2.rds.amazonaws.com",
user = "trade_read",
password = "<PASSWORD>")
cur = conn.cursor()
# -
# ## Get the column names and print them out
cur.execute("select COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION from INFORMATION_SCHEMA.COLUMNS where TABLE_NAME='comtrade'")
column_names=pd.DataFrame(cur.fetchall())
print(column_names)
# ## Lookup the needed comtrade codes for the SQL request
comtrade_dict = load_comtrade_info()
uk_code = get_reporter_code(comtrade_dict, 'United Kingdom')
brazil_code = get_partner_code(comtrade_dict, 'Brazil')
beef = get_commodity_code(comtrade_dict, 'Meat')['id']
# ## Download the comtrade data and put it into a pandas DataFrame
# +
t0 = time.perf_counter()
cur.execute("SELECT partner, netweight_kg, trade_value_usd, period, commodity_code FROM comtrade WHERE "\
"partner_code = %s "\
"AND period BETWEEN 201401 AND 201612"\
"AND commodity_code = %s"\
"AND reporter_code = %s", (brazil_code, beef, uk_code))
exports_imports = pd.DataFrame(cur.fetchall(), columns=['partner', 'netweight_kg', 'trade_value_usd', 'period', 'commodity_code'])
t1 = time.perf_counter()
print('Request took: ' +str(datetime.timedelta(seconds=t1-t0)))
# -
print(exports_imports)
| notebooks/COMTRADE_SQL_request_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Python: Introduction to OOP
# **Goal**: understand the basics concepts of object-oriented programming in Python!
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Class-definition" data-toc-modified-id="Class-definition-1"><span class="toc-item-num">1 </span>Class definition</a></span></li><li><span><a href="#Encapsulation-principle" data-toc-modified-id="Encapsulation-principle-2"><span class="toc-item-num">2 </span>Encapsulation principle</a></span></li><li><span><a href="#Inheritance-principle" data-toc-modified-id="Inheritance-principle-3"><span class="toc-item-num">3 </span>Inheritance principle</a></span></li><li><span><a href="#Polymorphism-principle" data-toc-modified-id="Polymorphism-principle-4"><span class="toc-item-num">4 </span>Polymorphism principle</a></span></li><li><span><a href="#Abstraction-principle" data-toc-modified-id="Abstraction-principle-5"><span class="toc-item-num">5 </span>Abstraction principle</a></span></li><li><span><a href="#Super-classes" data-toc-modified-id="Super-classes-6"><span class="toc-item-num">6 </span>Super classes</a></span></li></ul></div>
# -
# ## Class definition
# ``Object-oriented programming`` is the next step after ``procedural programming``. It does not represent a total revolution of the programming mode. It does, however, represent a better structuring of your software products. It allows you to gain in ``abstraction``, which implies a better ``modularity`` of your source codes, a better ``maintainability`` of your source codes and a better ``reuse`` of your codes (``inheritance`` concepts). In this course, we will focus on ``classes``.
# Ins simple word, a ``class`` represents a data type. An ``object`` (also called instance) represents a data whose type is a class. An ``attribute`` is a part of the state of the class (for example the numerator or the denominator of a rational number). A ``method`` is almost a function that is invoked on an ``object`` (for example a simplification method on a rational number : 2/8 => ¼). A ``property`` is often a pair (although there may be only one) of access methods (read and write) to an ``attribute``.
# In the following example, we will define a ``Rational`` class to simplify or make rational a fraction.
class Rational(object):
def __init__(self, num, den):
if den == 0:
raise ZeroDivisionError("The denominator cannot be null")
self.numerator = num
self.denominator = den
self.simplify()
def simplify(self):
divisor = 2
while divisor <= min(self.numerator, self.denominator):
while self.numerator % divisor == 0 and self.denominator % divisor == 0:
self.numerator /= divisor
self.denominator /= divisor
divisor += 1
def __repr__(self):
return "%d/%d" % (self.numerator, self.denominator)
# The special method **\_\_init\_\_** represents the ``constructor`` of the class. A ``constructor`` is a ``special method`` that the program calls upon an object’s creation. The ``constructor`` is used in the class to initialize data members to the object. With our ``Rational`` class example, you can use a ``constructor`` to assign characteristics to each rational number object. The special method **\_\_init\_\_** is the Python ``constructor``. The **\_\_init\_\_** method is the Python equivalent of the **C++ constructor** in an object-oriented approach. The **\_\_init\_\_** function is called every time an object is created from a class. The **\_\_init\_\_** method lets the class initialize the object’s attributes and serves no other purpose. It is only used within classes.
# The **simplify()** function represents the transformation method to make a denominator rational. This is to make the fraction irreducible.And finally, the special method **\_\_repr__** is used to **display** the object of a class directly without using the **print()** function. Indeed, for the display with the **print()** function, the special method **\_\_str__** is used.
r = Rational(2, 0)
r
r = Rational(4, 8)
r
r.numerator, r.denominator
r = Rational(2, 4)
r.denominator = 0
r
# ## Encapsulation principle
# ``Encapsulation`` is a **mechanism** to secure your classes. It consists in forbidding direct access to your attributes via the notion of visibility (public/private). It also consists in providing a property per attribute, to allow a secure access to your data. Traditionally, a **property** consists of two methods (but this can vary):
#
# - the **getter** allowing the retrieval of the value of the associated attribute;
# - the **setter** allowing the secure modification of the attribute value.
#
# The ``encapsulation`` allows among other things to provide a constructor to define the initial state of an object. This ``constructor`` will have to pass through the properties to set the value of each attribute.
# To replace the notion of **visibility (public or private non-existent)**, Python proposes to ``scramble`` some class members. These members must have a name that starts with **two underscore characters**. Methods ending with two of these characters will not be impacted. They will be accessible from inside the class. However, from the outside, the names of these attributes will be ``scrambled``. From the outside, the real name of a member will be **_ClassName__attrName**.
# The following code shows an example of method and attribute scrambling done on our previous class. The goal is, on the one hand, to hide the attributes of the constructor **\_\_numerator__** and **\_\_denominator__** and, on the other hand, to hide all the methods for displaying and modifying attributes.
# first encapsulation method
class Rational(object):
def __init__(self, num, den):
self.__setNumerator(num)
self.__setDenominator(den)
self.simplify()
def __getNumerator(self):
return self.__numerator
def __setNumerator(self, newNum):
if isinstance(newNum, int) == False:
raise BaseException("Numerator must be an integer")
self.__numerator = newNum
def __getDenominator(self):
self.__denominator
def __setDenominator(self, newDen):
if isinstance(newDen, int) == False:
raise BaseException("Denominator must be an integer")
if newDen == 0:
raise ZeroDivisionError("The denominator cannot be null")
self.__denominator = newDen
numerator = property(__getNumerator, __setNumerator)
denominator = property(__getDenominator, __setDenominator)
def simplify(self):
divisor = 2
while divisor <= min(self.__numerator, self.__denominator):
while self.__numerator % divisor == 0 and self.__denominator % divisor == 0:
self.__numerator /= divisor
self.__denominator /= divisor
divisor += 1
def __repr__(self):
return "%d/%d" % (self.__numerator, self.__denominator)
r = Rational(3, 2)
r
r.denominator = 15
r
r.numerator
r.denominator = 0
r
# The following example proposes another encapsulation method similar to the first one done above but using this time **decorators**. Python allows to define a **property** which is in fact a couple of secure access methods to an attribute. It is done by using the ``@property decorator`` above a method.
# second encapsulation method
class Rational(object):
def __init__(self, num, den):
self.numerator = num
self.denominator = den
self.simplify()
@property
def numerator(self):
return self.__numerator
@numerator.setter
def numerator(self, newNum):
if isinstance(newNum, int) == False:
raise BaseException("Numerator must be an integer")
self.__numerator = newNum
@property
def denominator(self):
self.__denominator
@denominator.setter
def denominator(self, newDen):
if isinstance(newDen, int) == False:
raise BaseException("Denominator must be an integer")
if newDen == 0:
raise ZeroDivisionError("The denominator cannot be null")
self.__denominator = newDen
def simplify(self):
divisor = 2
while divisor <= min(self.__numerator, self.__denominator):
while self.__numerator % divisor == 0 and self.__denominator % divisor == 0:
self.__numerator /= divisor
self.__denominator /= divisor
divisor += 1
def __repr__(self):
return "%d/%d" % (self.__numerator, self.__denominator)
r = Rational(3, 0)
r
r.denominator = 4
r
# ## Inheritance principle
# ``Inheritance`` is a concept that allows better **factoring** and **reuse** of code. To put it simply, you can define a new data type by enriching another data type. For example an administrator can be considered as a user of the application, but with additional possibilities. Even better, Python allows you to do **simple inheritance**, but also **multiple inheritance**. Let's learn more about this concept with the following example.
class User(object):
def __init__(self, firstName, lastName):
self.firstName = firstName
self.lastName = lastName
@property
def firstName(self):
return self.__firstName
@firstName.setter
def firstName(self, firstName):
if isinstance(firstName, str) == False:
raise BaseException("firstName must be an string")
firstName = firstName.strip()
if firstName == '':
raise BaseException("The firstName cannot be empty")
self.__firstName = firstName.capitalize()
@property
def lastName(self):
self.__lastName
@lastName.setter
def lastName(self, lastName):
if isinstance(lastName, str) == False:
raise BaseException("lastName must be an string")
lastName = lastName.strip()
if lastName == '':
raise BaseException("The lastName cannot be empty")
self.__lastName = lastName.upper()
def identity(self):
return "the user!"
def __repr__(self):
return "%s %s" % (self.__firstName, self.__lastName)
me = User("mohamed", "niang")
me
# ``User`` is our base class **(Parent class)**. We will start from this class to create a new class **(derived class)** with other attributes.
class Admin(User):
def __init__(self, firstName, lastName, rights):
User.__init__(self, firstName, lastName)
self.rights = rights
other = Admin("seyni", "diop", "r")
other
other.__str__()
print(other.__str__())
# Our **child** class **Admin** uses the data of our **parent** class **User** which itself **derives** from the class **object**. It is the latter that allows the display of several other **methods (\_\_str__() for example)** within the **Admin** and **User** classes. Let's go further by expanding our **Admin** class.
class Admin(User):
def __init__(self, firstName, lastName, rights):
User.__init__(self, firstName, lastName)
self.rights = rights
@property
def rights(self):
return self.__rights
@rights.setter
def rights(self, rights):
if isinstance(rights, str) == False:
raise BaseException("rights must be an string")
rights = rights.strip()
if rights == '':
raise BaseException("rights cannot be empty")
self.__rights = rights.lower()
def __repr__(self):
return "%s %s" % (User.__repr__(self), self.rights)
other = Admin("seyni", "diop", "r")
other
# ## Polymorphism principle
# ``Polymorphism`` means that an ``object`` can be seen in several ``forms``. When you do ``inheritance`` it induces ``polymorphism``. In short, it is when two **classes** linked by **inheritance** each have a **method** with the same name but which **performs a same or different task**. Let's illustrate this concept with the following example.
# first example with methods performing the same tasks
class Admin(User):
def __init__(self, firstName, lastName, rights):
User.__init__(self, firstName, lastName)
self.rights = rights
@property
def rights(self):
return self.__rights
@rights.setter
def rights(self, rights):
if isinstance(rights, str) == False:
raise BaseException("rights must be an string")
rights = rights.strip()
if rights == '':
raise BaseException("rights cannot be empty")
self.__rights = rights.lower()
def identity(self):
return "I am the administrator"
def __repr__(self):
return "%s %s" % (User.__repr__(self), self.rights)
me = Admin("mohamed", "niang", "r")
me
me.identity()
# second example with methods performing a different tasks
class Admin(User):
def __init__(self, firstName, lastName, rights):
User.__init__(self, firstName, lastName)
self.rights = rights
@property
def rights(self):
return self.__rights
@rights.setter
def rights(self, rights):
if isinstance(rights, str) == False:
raise BaseException("rights must be an string")
rights = rights.strip()
if rights == '':
raise BaseException("rights cannot be empty")
self.__rights = rights.lower()
def identity(self):
return "I am the administrator" + " who supervises " + User.identity(self)
def __repr__(self):
return "%s %s" % (User.__repr__(self), self.rights)
me = Admin("mohamed", "niang", "r")
me
me.identity()
# ## Abstraction principle
# ``Abstract`` classes are classes that are meant to be ``inherited`` but avoid implementing specific ``methods``, leaving behind only method signatures that subclasses must implement. Abstract classes are useful for defining and enforcing class ``abstractions`` at a high level, similar to the concept of interfaces in typed languages, without the need for method implementation. One conceptual approach to defining an abstract class is to stub out the class methods, and then raise a ``NotImplementedError`` if accessed. This prevents ``children classes`` from accessing parent methods without overriding them first. Let's take the example with our User class where the identity method is not implemented and call the Admin class defined above.
class User(object):
def __init__(self, firstName, lastName):
self.firstName = firstName
self.lastName = lastName
@property
def firstName(self):
return self.__firstName
@firstName.setter
def firstName(self, firstName):
if isinstance(firstName, str) == False:
raise BaseException("firstName must be an string")
firstName = firstName.strip()
if firstName == '':
raise BaseException("The firstName cannot be empty")
self.__firstName = firstName.capitalize()
@property
def lastName(self):
self.__lastName
@lastName.setter
def lastName(self, lastName):
if isinstance(lastName, str) == False:
raise BaseException("lastName must be an string")
lastName = lastName.strip()
if lastName == '':
raise BaseException("The lastName cannot be empty")
self.__lastName = lastName.upper()
def identity(self):
raise NotImplementedError("identity_user method not implemented!")
def __repr__(self):
return "%s %s" % (self.__firstName, self.__lastName)
class Admin(User):
def __init__(self, firstName, lastName, rights):
User.__init__(self, firstName, lastName)
self.rights = rights
@property
def rights(self):
return self.__rights
@rights.setter
def rights(self, rights):
if isinstance(rights, str) == False:
raise BaseException("rights must be an string")
rights = rights.strip()
if rights == '':
raise BaseException("rights cannot be empty")
self.__rights = rights.lower()
def identity(self):
return "I am the administrator" + " who supervises " + User.identity(self)
def __repr__(self):
return "%s %s" % (User.__repr__(self), self.rights)
me = Admin("mohamed", "niang", "r")
me
me.identity()
# Creating an ``abstract`` class in this way prevents improper usage of methods that are not ``overridden``, and certainly encourages methods to be defined in ``child classes``, but it does not enforce their definition. With the ``abc module`` we can prevent child classes from being instantiated when they fail to ``override abstract class methods`` of their parents and ancestors.
# ## Super classes
# ``Super classes`` are used in ``inheritance`` concepts by the built-in python ``super()`` function. It allows to access direct ``parents`` during ``inheritance`` and to solve different problems due to ``multiple inheritance``. Let's go back to the examples we dealt with earlier about ``inheritance`` and try to apply the ``super()`` function on the ``class parent constructor``.
class User(object):
def __init__(self, firstName, lastName):
self.firstName = firstName
self.lastName = lastName
@property
def firstName(self):
return self.__firstName
@firstName.setter
def firstName(self, firstName):
if isinstance(firstName, str) == False:
raise BaseException("firstName must be an string")
firstName = firstName.strip()
if firstName == '':
raise BaseException("The firstName cannot be empty")
self.__firstName = firstName.capitalize()
@property
def lastName(self):
self.__lastName
@lastName.setter
def lastName(self, lastName):
if isinstance(lastName, str) == False:
raise BaseException("lastName must be an string")
lastName = lastName.strip()
if lastName == '':
raise BaseException("The lastName cannot be empty")
self.__lastName = lastName.upper()
def identity(self):
return "the user!"
def __repr__(self):
return "%s %s" % (self.__firstName, self.__lastName)
# In the following ``Admin`` class, we will use the ``super()`` function to ``automatically instantiate`` all attributes of the parent ``User`` class. The ``power`` of the super function lies in the fact that it can ``instantiate`` all the attributes of the parent classes, for example when there are ``multiple inheritances``.
class Admin(User):
def __init__(self, firstName, lastName, rights):
super().__init__(firstName, lastName)
self.rights = rights
@property
def rights(self):
return self.__rights
@rights.setter
def rights(self, rights):
if isinstance(rights, str) == False:
raise BaseException("rights must be an string")
rights = rights.strip()
if rights == '':
raise BaseException("rights cannot be empty")
self.__rights = rights.lower()
def __repr__(self):
return "%s %s" % (User.__repr__(self), self.rights)
me = Admin("mohamed", "niang", "r")
me
| courses/18. OOP in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm
import matplotlib.patches
import squarify
import os
from matplotlib.collections import PatchCollection
from common import *
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
def st_to_str(st):
return '%.1f' % st
all_st = np.arange(0.1, 1.0, 0.1)
all_st_str = map(st_to_str, all_st)
# all_st = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
def read_groups_size(path):
groups = []
if os.path.exists(path):
with open(path, 'r') as f:
f.readline() # Ignore dataset info
from_length, to_length = map(int, f.readline().strip().split(' '))
f.readline() # Ignore distance info
for l in range(from_length, to_length):
count = int(f.readline())
group_sizes = map(int, f.readline().strip().split(' '))
for s in group_sizes:
groups.append({'length': l, 'size': s})
return pd.DataFrame(groups)
def load_groups_of_all_datasets(st):
prefix = '../../datasets/UCR'
groups = {}
for d in all_dataset_metadata:
file_name = d['file_name']
name = d['name']
size = d['size']
g = read_groups_size('%s/%s/%s_DATA_GROUPS_SIZE_%s'%(prefix, file_name, file_name, st))
groups[file_name] = {
'group_size': g,
'name': name,
'size': size
}
return groups
# -
all_dataset_groups = {}
for st in all_st_str:
all_dataset_groups[st] = load_groups_of_all_datasets(st)
# # Group Heat Maps
def plot_groups(ax, groups_df, count_limit, padded=False):
if groups_df.empty:
return None
# these values define the coordinate system for the returned rectangles
# the values will range from x to x + width and y to y + height
x = 0.
y = 0.
width = 250
height = width
groups_df = groups_df.sort_values('size', ascending=False)
values = groups_df['size'].values
cumsum_val = np.cumsum(values)
cutoff = min(max(np.argmax(cumsum_val >= count_limit), 1), 500)
values = values[:cutoff]
colors = groups_df['length'].values
colors = colors[:cutoff]
# the sum of the values must equal the total area to be laid out
# i.e., sum(values) == width * height
values = squarify.normalize_sizes(values, width, height)
if padded:
rects = squarify.padded_squarify(values, x, y, width, height)
else:
rects = squarify.squarify(values, x, y, width, height)
ax.set_xlim(0, width)
ax.set_ylim(0, height)
def to_patch(rect):
return matplotlib.patches.Rectangle((rect['x'], rect['y']), rect['dx'], rect['dy'])
patches = map(to_patch, rects)
collection = PatchCollection(patches, cmap=matplotlib.cm.plasma, alpha=0.9)
collection.set_array(colors)
ax.add_collection(collection)
ax.set_yticklabels([])
ax.set_xticklabels([])
return collection
# +
fig, axes = plt.subplots(8, 6, figsize=(16, 16))
selected_st = map(st_to_str, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
for i, file_name in enumerate(sorted_name_by_size):
for j, st in enumerate(selected_st):
group_size_df = all_dataset_groups[st][file_name]['group_size']
size = all_dataset_groups[st][file_name]['size']
name = all_dataset_groups[st][file_name]['name']
if i == 0:
axes[i][j].set_title('ST = ' + st, fontsize=16)
if j == 0:
axes[i][j].set_ylabel(name, fontsize=14)
ax = axes[i][j]
col = plot_groups(ax, group_size_df, 0.25*size, padded=True)
if col is not None:
fig.colorbar(col, ax=ax)
# fig.suptitle('ST = %.1f' % st, fontsize=15)
plt.tight_layout()
# fig.subplots_adjust(top=0.85)
plt.savefig('group_heat_map_all.eps')
# -
# # Construction time, size, and compression ratio
raw_data = np.array([2.91,4.7,0.51,2.2,0.13,1.9,0.06,1.8,0.03,1.8,0.02,1.8,0.02,1.8,0.02,1.8,0.01,1.8,
8.82,46.4,1.91,10,0.52,3.9,0.16,2.9,0.08,2.7,0.06,2.7,0.04,2.7,0.03,2.6,0.04,2.6,
64.06,106,35.95,69.5,12.63,25.5,3.6,9.9,0.56,4,0.13,3.5,0.06,3.4,0.04,3.4,0.03,3.4,
18.02,69.8,3.11,18.2,1.46,13.1,0.72,11.5,0.45,11,0.34,10.8,0.32,10.8,0.28,10.7,0.22,10.6,
158.4,111,15.29,31.4,2.72,25.4,1,24.7,0.49,24.6,0.4,24.6,0.36,24.6,0.31,24.6,0.29,24.6,
3328,838.2,85.76,113.4,5.82,104.7,1.91,104.4,1.48,104.4,1.31,104.4,1.33,104.4,1.3,104.4,1.28,104.4,
2745,1945.6,355.2,352,48.08,140.2,12.67,120.6,4.66,118,2.77,117.4,2.57,117.3,2.53,117.3,2.42,117.3,
3952,783.1,256.6,559.5,24.31,552.6,10.65,552.5,8.62,552.4,8.19,552.4,8.17,552.4,8.14,552.4,7.41,552.4])
raw_data = raw_data.reshape((8,-1))
# +
group_time = raw_data[:, range(0, 18, 2)]
group_size = raw_data[:, range(1, 19, 2)]
group_compression = {}
for st in all_st:
key = '%.1f' % st
group_compression[st] = []
for d in sorted_name_by_size:
ds = all_dataset_groups[key][d]
number_of_groups = ds['group_size'].shape[0]
avg_group_size = ds['group_size']['size'].mean()
compression = np.float(number_of_groups + avg_group_size) / ds['size']
group_compression[st].append(compression)
ds_index = sorted_name_by_size
group_time_df = pd.DataFrame(group_time, index=index, columns=all_st)
group_size_df = pd.DataFrame(group_size, index=index, columns=all_st)
group_compression_df = (1 - pd.DataFrame(group_compression, index=index)) * 100
# +
fig, ax_time = plt.subplots(1)
ax_compress = ax_time.twinx()
ax_time.set_yscale('log')
ax_time.set_ylabel('grouping time (seconds)')
ax_time.set_xlabel('ST')
group_time_df.boxplot(ax=ax_time, positions=all_st, grid='off', sym='', widths=0.05)
ax_compress.set_ylabel('average compression rate (%)')
group_compression_df.mean(axis=0).plot(ax=ax_compress, marker='x', linestyle='--', color='red')
ax_time.yaxis.grid('on')
ax_time.set_xlim(0.05, 0.95)
fig.tight_layout()
fig.savefig('group_time_and_compression.eps')
fig, ax_size = plt.subplots(1)
ax_size.set_yscale('log')
ax_size.set_ylabel('grouping size (MBs)')
ax_size.set_xlabel('ST')
group_size_df.boxplot(ax=ax_size, positions=all_st, grid='off', sym='', widths=0.05)
ax_size.set_xlim(0.05, 0.95)
ax_size.yaxis.grid('on')
fig.tight_layout()
fig.savefig('group_size.eps')
# -
print group_compression_df
| scripts/postprocessing/groups_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# #%matplotlib inline
import tensorflow as tf
import numpy as np
import skimage.io as io
import os, sys
from PIL import Image
import set_paths
FLAGS = set_paths.FLAGS
checkpoints_dir = FLAGS.checkpoints_dir
log_dir = FLAGS.log_dir + "fcn-8s/"
slim = tf.contrib.slim
from tf_image_segmentation.utils.tf_records import read_tfrecord_and_decode_into_image_annotation_pair_tensors
from tf_image_segmentation.models.fcn_8s import FCN_8s
from tf_image_segmentation.utils.pascal_voc import pascal_segmentation_lut
from tf_image_segmentation.utils.training import get_valid_logits_and_labels
from tf_image_segmentation.utils.augmentation import (distort_randomly_image_color,
flip_randomly_left_right_image_with_annotation,
scale_randomly_image_with_annotation_with_fixed_size_output)
image_train_size = [384, 384]
number_of_classes = 21
tfrecord_filename = 'pascal_augmented_train.tfrecords'
pascal_voc_lut = pascal_segmentation_lut()
class_labels = pascal_voc_lut.keys()
fcn_16s_checkpoint_path = FLAGS.save_dir + 'model_fcn16s_final.ckpt'
filename_queue = tf.train.string_input_producer(
[tfrecord_filename], num_epochs=10)
image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue)
# Various data augmentation stages
image, annotation = flip_randomly_left_right_image_with_annotation(image, annotation)
# image = distort_randomly_image_color(image)
resized_image, resized_annotation = scale_randomly_image_with_annotation_with_fixed_size_output(image, annotation, image_train_size)
resized_annotation = tf.squeeze(resized_annotation)
image_batch, annotation_batch = tf.train.shuffle_batch( [resized_image, resized_annotation],
batch_size=1,
capacity=3000,
num_threads=2,
min_after_dequeue=1000)
upsampled_logits_batch, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch,
number_of_classes=number_of_classes,
is_training=True)
valid_labels_batch_tensor, valid_logits_batch_tensor = get_valid_logits_and_labels(annotation_batch_tensor=annotation_batch,
logits_batch_tensor=upsampled_logits_batch,
class_labels=class_labels)
cross_entropies = tf.nn.softmax_cross_entropy_with_logits(logits=valid_logits_batch_tensor,
labels=valid_labels_batch_tensor)
#cross_entropy_sum = tf.reduce_sum(cross_entropies)
cross_entropy_sum = tf.reduce_mean(cross_entropies)
pred = tf.argmax(upsampled_logits_batch, dimension=3)
probabilities = tf.nn.softmax(upsampled_logits_batch)
with tf.variable_scope("adam_vars"):
train_step = tf.train.AdamOptimizer(learning_rate=0.000000001).minimize(cross_entropy_sum)
#adam_optimizer_variables = slim.get_variables_to_restore(include=['adam_vars'])
# Variable's initialization functions
init_fn = slim.assign_from_checkpoint_fn(model_path=fcn_16s_checkpoint_path,
var_list=fcn_16s_variables_mapping)
global_vars_init_op = tf.global_variables_initializer()
tf.summary.scalar('cross_entropy_loss', cross_entropy_sum)
merged_summary_op = tf.summary.merge_all()
summary_string_writer = tf.summary.FileWriter(log_folder)
# Create the log folder if doesn't exist yet
if not os.path.exists(log_folder):
os.makedirs(log_folder)
#optimization_variables_initializer = tf.variables_initializer(adam_optimizer_variables)
#The op for initializing the variables.
local_vars_init_op = tf.local_variables_initializer()
combined_op = tf.group(local_vars_init_op, global_vars_init_op)
# We need this to save only model variables and omit
# optimization-related and other variables.
model_variables = slim.get_model_variables()
saver = tf.train.Saver(model_variables)
with tf.Session() as sess:
sess.run(combined_op)
init_fn(sess)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Let's read off 3 batches just for example
for i in xrange(11127 * 10):
cross_entropy, summary_string, _ = sess.run([ cross_entropy_sum,
merged_summary_op,
train_step ])
summary_string_writer.add_summary(summary_string, 11127 * 20 + i)
print("step :" + str(i) + " Loss: " + str(cross_entropy))
if i > 0 and i % 11127 == 0:
save_path = saver.save(sess, FLAGS.save_dir + "model_fcn8s_epoch_" + str(i) + ".ckpt")
print("Model saved in file: %s" % save_path)
coord.request_stop()
coord.join(threads)
save_path = saver.save(sess, FLAGS.save_dir + "model_fcn8s_final.ckpt")
print("Model saved in file: %s" % save_path)
summary_string_writer.close()
| tf_image_segmentation/recipes/pascal_voc/FCNs/fcn_8s_train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import numpy as np
import numpy
from numpy.linalg import inv
from fractions import Fraction
# Notation setup
n = 5
print("\nG = SL(5)\n")
''' Define basis beta and varpi '''
beta = numpy.zeros((n-1,n))
varpi = numpy.zeros((n-1,n))
for i in range(n-1):
for j in range(n):
if j == i:
beta[i,j] = 1
elif j == i + 1:
beta[i,j] = -1
else:
beta[i,j] = 0
for j in range(n):
if j <= i:
varpi[i,j] = (n-i-1)/float(n)
else:
varpi[i,j] = -(i+1)/float(n)
PositiveRoots = []
for i in range(n-1):
for j in range(i+1, n):
vector = numpy.zeros(n)
vector[i] = 1
vector[j] = -1
PositiveRoots.append(vector)
def theta(basis):
'''This function takes an ordered basis and flips the order.'''
temp = numpy.copy(basis)
length = len(basis)
for i in range(length):
temp[i] = basis[length-1-i]
return temp
def V_to_a(basis, vector):
''' Express vector as a linear combination of basis elements,
returns the (n-1) coefficients
i.e., V to aaa
inputs:
hopefully basis is either beta or varpi
vector is a size n vector with all coordinates adding up to zero, that is, an element of V
outputs:
array of coordinate vectors of vector with respect to basis, size (n-1)
'''
basis = basis[:,:-1]
basis = numpy.transpose(inv(basis))
vector = vector[:-1]
return numpy.dot(basis,vector)
def last_coordinate(vector):
''' This appends the last coordinate to the vector so the sum of all coordinates is zero '''
return numpy.append(vector, -numpy.sum(vector))
def change_of_basis(basis_1, basis_2):
''' This returns the change-of-basis matrix.
basis_1 is the original basis and basis_2 is the target basis
This is a nice solution actually'''
A = None
for i in range(len(basis_1)):
if i == 0:
A = V_to_a(basis_1, basis_2[i])
else:
A = numpy.concatenate((A, V_to_a(basis_1, basis_2[i])), axis=0)
return numpy.reshape(A,(n-1, n-1)) # maybe return transpose instead?
# def w_vector(vector, sigma):
# ''' Permutes the coordinates of vector according to sigma '''
# sigma = numpy.append(sigma, sigma[0])
# new_vector = numpy.copy(vector)
# for i, item in enumerate(sigma[:-1]):
# new_vector[item] = vector[sigma[i+1]]
# return new_vector
def w_vector(mu, sigma):
''' Permutes the coordinates of vector in V according to sigma '''
new_mu = numpy.copy(mu)
for i, item in enumerate(sigma):
new_mu[item] = mu[i]
return new_mu
def w(matrix, sigma):
''' Permutes every vector in the matrix according to sigma '''
new_matrix = numpy.copy(matrix)
for i in range(len(matrix)):
new_matrix[i] = w_vector(matrix[i], sigma)
return new_matrix
# vector = (Id - theta.w)(varpi)
# vector = varpi - theta (w(varpi, [0,1]))
# change_of_basis(beta, vector)
def a_to_V(basis, coefficients):
'''Returns vector in my hyperplane, given coefficients
i.e., V \to \aaa '''
vector = numpy.zeros(len(basis)+1)
for i in range(len(basis)):
vector = vector + coefficients[i] * numpy.array(basis[i])
return vector
def sanitize(vector):
for i in range(len(vector)):
vector[i] = round(vector[i])
return vector
def mymatrix(coefficients, sigma): # coefficients = rows of identity matrix
mylambda = a_to_V(varpi, coefficients)
wlambda = w_vector( mylambda, sigma)
vector = V_to_a(theta(varpi), wlambda)
vector = coefficients - vector
vector = a_to_V(varpi, vector)
return V_to_a(beta, vector)
def FL_matrix(coefficients, sigma): # coefficients = rows of identity matrix
mylambda = a_to_V(varpi, coefficients)
wlambda = w_vector( mylambda, sigma)
vector = V_to_a(varpi, wlambda)
vector = coefficients - vector
vector = a_to_V(varpi, vector)
return V_to_a(beta, vector)
def error_matrix(coefficients, sigma): # coefficients = rows of identity matrix
mylambda = a_to_V(varpi, coefficients)
wlambda = w_vector( mylambda, sigma)
return V_to_a(beta, wlambda) - V_to_a(theta(beta),wlambda)
w = (4,3,2,1) # w = (sigma(0), sigma(1), ...) really
print "\n---------------------------------"
print "w is ", w
print "---------------------------------"
print "the FL matrix for lambda - w(lambda) is:"
finalmatrix = []
for row in numpy.identity(n-1):
print sanitize(FL_matrix(row, w))
print "\n-----------------------------------"
print "the error matrix for (1 - theta)(w(lambda)) is:"
finalmatrix = []
for row in numpy.identity(n-1):
print error_matrix(row, w)
print "\n-----------------------------------"
print "the matrix for lambda - theta.w(lambda) is:"
finalmatrix = []
for row in numpy.identity(n-1):
print mymatrix(row, w)
print "\n-----------------------------------"
''' Do not forget this: If the output matrix is (a_{i,j}) then coefficient of beta_1 is a_11 c_1 + a_12 c_2 etc. '''
# +
from itertools import permutations #for winv in permutations(range(n)):
import math # for factorial
import numpy
from numpy.linalg import inv
n = 5
def perm_to_matrix(winv):
'''winv is a permutation of length n. return the matrix of winv wrt beta'''
matrix = []
for i in range(n-1):
temp = np.zeros(n)
for j in range(n):
temp[j] = beta[i][winv[j]]
matrix.append(V_to_a(beta, temp))
matrix = np.array(matrix)
return matrix.T
beta = numpy.zeros((n-1,n))
varpi = numpy.zeros((n-1,n))
for i in range(n-1):
for j in range(n):
if j == i:
beta[i,j] = 1
elif j == i + 1:
beta[i,j] = -1
else:
beta[i,j] = 0
for j in range(n):
if j <= i:
varpi[i,j] = (n-i-1)/float(n)
else:
varpi[i,j] = -(i+1)/float(n)
# mylambda in original coordinates (varpi), row vector
mylambda = np.array([1]*(n-1))
# newlambda wrt new coordinates (beta), column vector
newlambda = np.dot(mylambda, change_of_basis(beta, varpi)).reshape(n-1,1)
mytheta = []
for i in range(n-1):
row = np.zeros(n-1)
for j in range(n-1):
if i+j == n-2:
row[j] = 1
mytheta.append(row)
def myfunc(winv, mylambda):
newlambda = np.dot(mylambda, change_of_basis(beta, varpi)).reshape(n-1,1)
out = newlambda - np.dot(mytheta, np.dot(perm_to_matrix(winv), newlambda))
fl = newlambda - np.dot(perm_to_matrix(winv), newlambda)
error = np.dot((np.eye(n-1) - mytheta), np.dot(perm_to_matrix(winv), newlambda))
return out.T #, fl.T, error.T
mybiglist = []
MAX = 3**(n-1)
for num in range(MAX):
lst = []
ntemp = num
for j in range(n-1):
digit = ntemp % 3
ntemp = (ntemp - digit) / 3
lst.append(digit+1)
mybiglist.append(lst)
def main():
successful = []
# tmpcounter = 0
for winv in permutations(range(n)):
# tmpcounter += 1
# print tmpcounter
#print 'trying for winv =', winv
for mylambda in mybiglist:
output = myfunc(winv, mylambda)[0]
mybool = True
for i in range(n-1):
# construct winv(varpi_i) and check if it is the same as varpi_i
# if yes, don't check the output[i], otherwise check.
temp = np.zeros(n)
for j in range(n):
temp[j] = varpi[i][winv[j]]
if not np.array_equal(temp, varpi[i]):
if output[i] <= 0.0001:
mybool = False
break
if mybool:
# print "Success for winv = ", winv, "with lambda = ", mylambda
successful.append((winv, mylambda))
break
return successful
success = main()
print("Total successes: " + len(success) + " out of " + math.factorial(n))
# +
'''This code finds the set Delta(winv) = \Delta_0^{Q(w)} given w. '''
def deltaofw(winv):
mylist = []
for i in range(n-1):
temp = np.zeros(n)
for j in range(n):
temp[j] = varpi[i][winv[j]]
if not np.array_equal(temp, varpi[i]):
mylist.append(i)
return mylist
'''This function finds length of winv'''
def length(winv):
count = 0
for root in PositiveRoots:
if numpy.sum(V_to_a(beta, w_vector(root, winv))) < 0:
count = count + 1
return count
w0 = [n-i-1 for i in range(n)]
w0winv = [i for i in range(n)]
# --- Uncomment below section to print ---
for winv in permutations(range(n)):
for i in range(n):
w0winv[i] = w0[winv[i]];
print "For w = ", winv, "length = ", length(winv), " Delta(w) = ", deltaofw(winv)
print "w0.winv = ", tuple(w0winv), "length = ", length(w0winv), "Delta(w0 winv)", deltaofw(w0winv), "\n"
# +
# In this part of code, I am trying to find the value of \varpi - w \varpi in terms of beta's.
# If this coefficient is large enough, then I might try to use Shahidi's trick to bound
# the negative term by the leading one in:
# \lambda - \theta w \lambda = (\lambda - w \lambda) + w.(\lambda - \theta \lambda)
# Let's see how goes.
def flfunc(winv, mylambda):
newlambda = np.dot(mylambda, change_of_basis(beta, varpi)).reshape(n-1,1)
fl = newlambda - np.dot(perm_to_matrix(winv), newlambda)
return fl.T[0]
def sanitize(vector):
for i in range(len(vector)):
vector[i] = round(vector[i])
return vector
w = (4,3,2,1,0)
for winv in permutations(range(n)):
for i in range(n-1):
mylambda = np.array([0]*(n-1))
mylambda[i] = 1
print "For winv = ", winv, "and varpi",i, ", (varpi-winv varpi) equals ", sanitize(flfunc(winv, mylambda))
# +
# Testing the specific case n = 5, w = (23)(45), c_1 = c_3 = c_4 = 1, c_2 = 2.
# Should pass and passes.
winv = (0,2,1,4,3)
mylambda = [1,2,1,2]
output = myfunc(winv, mylambda)[0]
mybool = True
for i in range(n-1):
# construct winv(varpi_i) and check if it is the same as varpi_i
# if yes, don't check the output[i], otherwise check.
temp = np.zeros(n)
for j in range(n):
temp[j] = varpi[i][winv[j]]
print "coordinate ", i, " is ", output[i]
if not np.array_equal(temp, varpi[i]):
if output[i] <= 0.0001:
mybool = False
if mybool:
print "Success for winv = ", winv, "with lambda = ", mylambda
# -
| Improvement with Partha.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Assignment: Normalizing Wavefunctions with Jupyter and Colab
#
# ## 🎯 Objective¶
# To practice Jupyter and Colab.
#
# ## 📜 Instructions
# 1. Presuming you already have a GitHub account, [connect to GitHub Classroom](https://classroom.github.com/a/1DZ_qS9k). You'll see 
# 1. Click `Accept the assignment` and then update the page . You will see a new repository with the task 
# 1. To complete the assignment you need to complete the Jupyter notebook **`Normalizing_eigenfunctions.ipynb`**.
# ## Editing a Jupyter Notebook from GitHub
#
# As described in the [notes](https://qchem.qc-edu.org/ipynb/jupyter.html#what-is-jupyter), there are several ways to edit `.ipynb` files.
#
# ### Using Google Colab (direct download and upload)
# In this tutorial we will show how to work with Jupyter notebooks through Google Colab.
# 1. Download the `.zip` archive of the GitHub repo. In order to download it click `code` and `Download Zip` 
# 1. When the repo is downloaded, go to [Google Colab](https://colab.research.google.com/) and add the `.ipynb` file from the repo: `File` $\rightarrow$ `Upload notebook` 
# You will see this window 
# 1. Upload the `.ipynb` file
#
# Voila! 
#
# ### Extension for Chromium Browsers.
# If you use a browser like Google Chrome or Microsoft Edge, then you can use the [Open in Colab](https://chrome.google.com/webstore/detail/open-in-colab/iogfkhleblhcpcekbiedikdehleodpjo) browser extension. Then,
# 1. View the target Jupyter Notebook on Github
# 2. Click the extension to open the notebook in Colab.
# 3. When you do this, you may need to grant Google/Microsoft (Colab/GitHub) permission to interchange information.
# ### Understanding the assignment
# There is an instruction written in the notebook. All you need to do is to fill the code between
# ```
# ### START YOUR CODE HERE
# ...
# ### END YOUR CODE HERE
# ```
# 
#
# The purpose of this assignment is to compute the normalization constant for the particle-in-a-box. The particle-in-a-box is perhaps the simplest (bound) quantum system. You do not need to know much about the particle-in-a-box to complete this assignment however: referring to the relevant section of the [notes](https://qchem.qc-edu.org/ipynb/ParticleIn1DBox.html#normalization-of-wavefunctions), one sees that one can choose $A_n = \sqrt{\frac{2}{a}}$.
#
# Once the code is completed, you can click the play button to evaluate the cell.
# When you are satisfied with a code just click `File` $\rightarrow$ `Save` and then `Download` $\rightarrow$ `Download .ipynb`
# 
# ## Submitting an assignment
#
# Submitting the assignment is breeze!
# When you downloaded the final version of the file just go to your repo and click `Add file` $\rightarrow$ `Upload files`; then upload your newly created file 
# Now commit your changes and click `Commit changes` 
# ## Check the result
#
# In order to check whether your code passed tests all you need to do is go to `Actions`$\rightarrow$ `Your commit` $\rightarrow$ `Test with pytest`.   In most cases the tests will run automatically, and you do not need to explicitly invoke Pytest.
#
# If you see a green check mark: ***CONGRATULATIONS!*** you passed the assignment
#
# If there is a bug, then the following may occur:
#  
#
# Try to find your mistake but if you get stuck, contact me `@RichRick1`
#  
# ## Grading Scheme
# Completing the assignment earns you an **S**. To earn an **S+**, explore whether there are other choices for the normalization constant that also pass the tests. Can you find a (correct) choice for the normalization constant that nonetheless fails the tests? How is this possible!
#
| book/tutorial_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pylab as plt
import matplotlib
from luescher_nd.database.utilities import DATA_FOLDER
# -
matplotlib.use("pgf")
sns.set(
context="paper",
style="ticks",
font_scale=1/1.7,
rc={
# "mathtext.fontset": "cm",
"pgf.rcfonts": False,
"axes.unicode_minus": False,
"font.serif": [],
"font.sans-serif": [],
},
)
A1_WIDTH = 6.0
# %load_ext blackcellmagic
file_names = [f for f in os.listdir(DATA_FOLDER) if f.endswith(".h5")]
# +
dfs = []
for file_name in file_names:
dfs.append(
pd.read_hdf(os.path.join(DATA_FOLDER, file_name), key="overlap").fillna(0)
)
df = pd.concat(dfs, ignore_index=True)
n2 = (
df.a1g.str.extractall("\((?P<nx>[0-9]+), (?P<ny>[0-9]+), (?P<nz>[0-9]+)\)")
.reset_index(1, drop=True)
.astype(int)
)
df["n2"] = (n2 ** 2).sum(axis=1)
# +
CUTOFF = 0.02
def heatmap(**kwargs):
"""
"""
frame = kwargs["data"]
pivot = frame.pivot(values="overlap", index="a1g", columns="n1d")
missing = 1 - pivot.sum(axis=0)
missing[missing.abs() <= 0.005] = np.nan
missing = pd.DataFrame(
data=[missing.values], columns=missing.index, index=["(...)"]
)
#if not all(missing.isna().values[0]):
pivot = pd.concat([pivot, missing])
ax = plt.gca()
sns.heatmap(
pivot*100,
vmin=0,
vmax=100,
cmap="BuGn",
cbar=False,
linewidths=1,
annot=True,
fmt="2.0f",
ax=ax,
)
y1, y2 = ax.get_ylim()
ax.set_ylim(y1 + 0.5, y2 - 0.5)
ax.tick_params(axis=u"both", which=u"both", length=0)
# +
grid = sns.FacetGrid(
data=df[df.nlevel.isin([0, 1, 2, 8, 9]) & df.n1d.isin([4, 10, 20, 30, 40, 50])]
.query("overlap > @CUTOFF")
.sort_values(["n1d", "n2"]),
col="nstep",
row="nlevel",
sharey=False,
sharex=False,
margin_titles=True,
aspect=1.2,
col_order=[1, 4, -1],
)
grid.map_dataframe(heatmap)
row_lables = [
text for ax in grid.axes.flat for text in ax.texts if "nlevel" in text.get_text()
]
plt.setp(row_lables, text="")
grid.set_titles(
col_template="$n_s = {col_name}$",
row_template="$n_\mathrm{{level}} = {row_name}$",
)
text = [ax.title for ax in grid.axes.flat if "-1" in ax.title.get_text()][0]
plt.setp([text], text=text.get_text().replace("-1", "\infty"))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
for nlevel, axes in zip(grid.row_names, grid.axes):
for nstep, ax in zip(grid.col_names, axes):
tf = df.query("nlevel == @nlevel and nstep == @nstep")
x_map = tf.groupby("n1d")["x"].mean().to_dict()
ax.set_xlabel("$n_{1d}$")
topax = ax.twiny()
topax.xaxis.set_ticks_position("top")
topax.xaxis.set_label_position("top")
topax.set_xlabel("$x$")
topax.set_xticklabels(
[
"${0:2.2f}$".format(x_map[int(n1d.get_text())])
for n1d in ax.get_xticklabels()
]
)
topax.set_xticks(ax.get_xticks())
topax.set_xlim(ax.get_xlim())
topax.tick_params(axis=u"both", which=u"both", length=0)
sns.despine(grid.fig, left=True, bottom=True)
for ax in grid.axes.flatten():
ax.set_yticklabels(
[f"${label.get_text()}$" for label in ax.get_yticklabels()], rotation=0
)
ax.set_xticklabels([f"${label.get_text()}$" for label in ax.get_xticklabels()])
grid.set_ylabels(r"$\left\vert [p] \right\rangle \in A_{1g}$")
ratio = grid.fig.get_figheight() / grid.fig.get_figwidth()
grid.fig.set_figheight(ratio * A1_WIDTH)
grid.fig.set_figwidth(A1_WIDTH)
for ax in grid.axes.flat:
all_texts = [text for text in ax.texts]
all_texts += [ax.title for ax in grid.axes.flat]
all_texts += [t for t in ax.xaxis.get_ticklabels()]
all_texts += [t for t in ax.yaxis.get_ticklabels()]
for text in all_texts:
if not "$" in text.get_text():
plt.setp([text], text=f"${text.get_text()}$")
print("Done")
# -
grid.savefig("a1g-state-overlap.pgf", bbox_inches="tight")
| paper/luescher-nd/figure/a1g-state-overlap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="QfpdSQv6TMei"
# # Porting a backtrader strategy
# + [markdown] id="BpF7fRlVoSD_"
# # Abstract
#
# In this notebook we'll see how to port a strategy from Backtrader to Vectorbt.
# We'll also see how to compare and troubleshoot the two versions
# + [markdown] id="test13BTJMRg"
# # Environment Setup
# First we'll need to install dependencies. Orca is needed for plotting, instead ta-lib because we want to make sure we rely on an indicator implementation which is as much as possible similar between the two strategies.
# Both Orca and ta-lib needs to be compiled from scratch
# + id="XHyACcWzS4Og"
# !pip install python-binance vectorbt plotly==4.12.0 dateparser backtrader ccxt > log.txt
# + colab={"base_uri": "https://localhost:8080/"} id="nrO-vGsIxcUf" outputId="5790d6be-6436-4833-a002-6867513ba9cf"
# !wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz > log.txt
# !tar -xzf ta-lib-0.4.0-src.tar.gz > log.txt
# %cd ta-lib/
# !./configure --prefix=$HOME > log.txt
# !make > log.txt
# !make install > log.txt
# !TA_LIBRARY_PATH=~/lib TA_INCLUDE_PATH=~/include pip install ta-lib > log.txt
# + colab={"base_uri": "https://localhost:8080/"} id="yqUBztCtezKA" outputId="6a7e69bb-c51c-42d0-86a5-6d7bbd90baa8"
# !wget https://github.com/plotly/orca/releases/download/v1.2.1/orca-1.2.1-x86_64.AppImage -O /usr/local/bin/orca > log.txt
# !chmod +x /usr/local/bin/orca > log.txt
# !apt-get install xvfb libgtk2.0-0 libgconf-2-4 > log.txt
# + id="pBY5EcdJeAAh"
import vectorbt as vbt
import backtrader as bt
from backtrader.sizers import PercentSizer
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta, timezone
from binance.client import Client as BinanceClient
# %matplotlib inline
# + [markdown] id="pZaCJp4qM5L9"
# # Variables Setup
# We'll setup some parameters here. Remember that you have to use a symbol available from the broker you're going to download data from (Binance in this case).
# As instance Binance provides a BTC/USDT symbol but doesn't provide an BTC/USD
# + id="1Mg6U3apfC23"
# Enter your parameters here
COIN_TARGET = "BTC"
COIN_REFER = "USDT"
symbol = '%s-%s' % (COIN_TARGET, COIN_REFER)
timefrom = "2021-03-04T00:00:00.000Z"
timeto = "2021-03-11T00:00:00.000Z"
init_cash = 100
fees = 0.075 # in %
start_date = datetime.strptime(timefrom, '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=timezone.utc)
end_date = datetime.strptime(timeto, '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=timezone.utc)
freq = '1m'
rsi_bottom = 35
rsi_top = 70
fast_window = 10
slow_window = 100
vbt.settings.portfolio['freq'] = freq
vbt.settings.portfolio['init_cash'] = init_cash
vbt.settings.portfolio['fees'] = fees / 100
vbt.settings.portfolio['slippage'] = 0
# + [markdown] id="6G3sgb58NBFP"
# # Download Data
# + [markdown] id="WvNadMd4p14Q"
# You can either download data using the binance client...
# + id="Cm3KHi-M-JNS" colab={"base_uri": "https://localhost:8080/"} outputId="db6b1932-c13c-4637-95e7-3f9bd4852c23"
binance_client = BinanceClient()
binance_data = vbt.BinanceData.download(
'%s%s' % (COIN_TARGET, COIN_REFER),
client=binance_client,
start=start_date,
end=end_date,
interval=BinanceClient.KLINE_INTERVAL_1MINUTE
)
data = binance_data.get()
# + [markdown] id="hKoMU5g3p-1W"
# ...or using CCXT (here we're using both for demonstration purposes but you can just pick the one you prefer)
# + colab={"base_uri": "https://localhost:8080/"} id="LJju8RIgMnFh" outputId="1446160b-2e1a-4410-d816-e8d7afd755c6"
ccxt_data = vbt.CCXTData.download(
'%s/%s' % (COIN_TARGET, COIN_REFER),
start=start_date,
end=end_date,
timeframe='1m'
)
data = ccxt_data.get()
# + [markdown] id="llMHZIKxqV0F"
# Let's keep only the columns we're interested into
# + colab={"base_uri": "https://localhost:8080/"} id="i7iVmioVfeSD" outputId="97a5f992-edce-4b7c-f81e-2c44b25c9f8d"
cols = ['Open', 'High', 'Low', 'Close', 'Volume']
ohlcv_wbuf = data[cols]
ohlcv_wbuf = ohlcv_wbuf.astype(np.float64)
print(ohlcv_wbuf.shape)
print(ohlcv_wbuf.columns)
# + [markdown] id="RqpQzC1zqaoX"
# And make sure the timeframe is the one we specified
# + colab={"base_uri": "https://localhost:8080/"} id="YXJpGBXQfiH0" outputId="ac619eee-b21c-47a1-fe23-b0b1e949ac57"
wobuf_mask = (ohlcv_wbuf.index >= start_date) & (ohlcv_wbuf.index <= end_date) # mask without buffer
ohlcv = ohlcv_wbuf.loc[wobuf_mask, :]
print(ohlcv.shape)
# + [markdown] id="yBGYx_uWqhUP"
# Let's plot the price graph!
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="W2NQuIapflbm" outputId="e93830bb-d244-451d-ab19-350b36a67cc5"
# Plot the OHLC data
ohlcv_wbuf.vbt.ohlcv.plot().show_png()
# remove show_png() to display interactive chart!
# + [markdown] id="o0gYsTAPMuns"
# # Backtrader Strategy
# We start by backtasting a simple RSI strategy which buys when < 35 and sell when > 70.
# We'll use backtrader as starting point.
# + id="xg0KO1cx1JjZ"
class FullMoney(PercentSizer):
params = (
('percents', 100 - fees),
)
data_bt = bt.feeds.PandasData(
dataname=ohlcv_wbuf,
openinterest=-1,
datetime=None,
timeframe=bt.TimeFrame.Minutes,
compression=1
)
cerebro = bt.Cerebro(quicknotify=True)
cerebro.adddata(data_bt)
broker = cerebro.getbroker()
broker.set_coc(True) # cheat-on-close
broker.setcommission(commission=fees/100)#, name=COIN_TARGET)
broker.setcash(init_cash)
cerebro.addsizer(FullMoney)
cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name="ta")
cerebro.addanalyzer(bt.analyzers.SQN, _name="sqn")
cerebro.addanalyzer(bt.analyzers.Transactions, _name="transactions")
# + [markdown] id="aUanJ36Wq4lj"
# The StrategyBase will deal with most of the routine most common logic. The real trading decisions will be taken in the BasicRSI class.
# + id="h1Kmx7Nh3NpQ"
class StrategyBase(bt.Strategy):
def __init__(self):
self.order = None
self.last_operation = "SELL"
self.status = "DISCONNECTED"
self.buy_price_close = None
self.pending_order = False
self.commissions = []
def notify_data(self, data, status, *args, **kwargs):
self.status = data._getstatusname(status)
def short(self):
self.sell()
def long(self):
self.buy_price_close = self.data0.close[0]
self.buy()
def notify_order(self, order):
self.pending_order = False
if order.status in [order.Submitted, order.Accepted]:
self.order = order
return
elif order.status in [order.Completed]:
self.commissions.append(order.executed.comm)
if order.isbuy():
self.last_operation = "BUY"
else: # Sell
self.buy_price_close = None
self.last_operation = "SELL"
self.order = None
class BasicRSI(StrategyBase):
params = dict(
period_ema_fast=fast_window,
period_ema_slow=slow_window,
rsi_bottom_threshold=rsi_bottom,
rsi_top_threshold=rsi_top
)
def __init__(self):
StrategyBase.__init__(self)
self.ema_fast = bt.indicators.EMA(period=self.p.period_ema_fast)
self.ema_slow = bt.indicators.EMA(period=self.p.period_ema_slow)
self.rsi = bt.talib.RSI(self.data, timeperiod=14)
#self.rsi = bt.indicators.RelativeStrengthIndex()
self.profit = 0
self.stop_loss_flag = True
def update_indicators(self):
self.profit = 0
if self.buy_price_close and self.buy_price_close > 0:
self.profit = float(
self.data0.close[0] - self.buy_price_close) / self.buy_price_close
def next(self):
self.update_indicators()
if self.order: # waiting for pending order
return
# stop Loss
''' if self.profit < -0.03:
self.short() '''
# take Profit
''' if self.profit > 0.03:
self.short() '''
# reset stop loss flag
if self.rsi > self.p.rsi_bottom_threshold:
self.stop_loss_flag = False
if self.last_operation != "BUY":
# if self.rsi < 30 and self.ema_fast > self.ema_slow:
if self.rsi < self.p.rsi_bottom_threshold: # and not self.stop_loss_flag:
self.long()
if self.last_operation != "SELL":
if self.rsi > self.p.rsi_top_threshold:
self.short()
# + [markdown] id="bfqq18uarHyM"
# Remember to add the strategy to cerebro
# + colab={"base_uri": "https://localhost:8080/"} id="Ur5xisBD4HoL" outputId="aafa03f2-c261-4560-83f6-e306ce376c51"
cerebro.addstrategy(BasicRSI)
# + [markdown] id="zMGll4WKrL4h"
# Run the backtesting
# + colab={"base_uri": "https://localhost:8080/"} id="7YkLFMPc4KvM" outputId="7f7b4a4d-2884-4b89-faa0-457f03a9fb9c"
initial_value = cerebro.broker.getvalue()
print('Starting Portfolio Value: %.2f' % initial_value)
result = cerebro.run()
# + [markdown] id="0h_3o-lGrO2H"
# Define some convenience functions for printing a report.
# Let's also print the actual report and plot a graph
# + colab={"base_uri": "https://localhost:8080/"} id="VGdJfUxwFM9w" outputId="30f09072-517a-4e92-c4c2-4695035ee6d7"
def print_trade_analysis(analyzer):
# Get the results we are interested in
if not analyzer.get("total"):
return
total_open = analyzer.total.open
total_closed = analyzer.total.closed
total_won = analyzer.won.total
total_lost = analyzer.lost.total
win_streak = analyzer.streak.won.longest
lose_streak = analyzer.streak.lost.longest
pnl_net = round(analyzer.pnl.net.total, 2)
strike_rate = round((total_won / total_closed) * 2)
# Designate the rows
h1 = ['Total Open', 'Total Closed', 'Total Won', 'Total Lost']
h2 = ['Strike Rate', 'Win Streak', 'Losing Streak', 'PnL Net']
r1 = [total_open, total_closed, total_won, total_lost]
r2 = [strike_rate, win_streak, lose_streak, pnl_net]
# Check which set of headers is the longest.
if len(h1) > len(h2):
header_length = len(h1)
else:
header_length = len(h2)
# Print the rows
print_list = [h1, r1, h2, r2]
row_format = "{:<15}" * (header_length + 1)
print("Trade Analysis Results:")
for row in print_list:
print(row_format.format('', *row))
def print_sqn(analyzer):
sqn = round(analyzer.sqn, 2)
print('SQN: {}'.format(sqn))
# Print analyzers - results
final_value = cerebro.broker.getvalue()
print('Final Portfolio Value: %.2f' % final_value)
print('Profit %.3f%%' % ((final_value - initial_value) / initial_value * 100))
print_trade_analysis(result[0].analyzers.ta.get_analysis())
print_sqn(result[0].analyzers.sqn.get_analysis())
data = result[0].analyzers.transactions.get_analysis()
df = pd.DataFrame.from_dict(data, orient='index', columns=['data'])
bt_transactions = pd.DataFrame(df.data.values.tolist(), df.index.tz_localize(tz='UTC'), columns=[
'amount', 'price', 'sid', 'symbol', 'value'])
# + id="tX61JGl5GNGL" colab={"base_uri": "https://localhost:8080/", "height": 654} outputId="9770b812-234d-4591-ec17-74abd5773c05"
plt.rcParams["figure.figsize"] = (15, 9) # (w, h)
cerebro.plot(style='bar', iplot= False)
# + id="Ph3fbB1k_26k"
#bt_transactions
# + [markdown] id="aEHK4JMQrizq"
# Let's create a vectorbt portfolio using the entries and exits from backtrader...
# + id="5POm9gXUXAkS"
bt_entries_mask = bt_transactions[bt_transactions.amount > 0]
bt_entries_mask.index = bt_entries_mask.index
bt_exits_mask = bt_transactions[bt_transactions.amount < 0]
bt_exits_mask.index = bt_exits_mask.index
bt_entries = pd.Series.vbt.signals.empty_like(ohlcv['Close'])
bt_entries.loc[bt_entries_mask.index] = True
bt_exits = pd.Series.vbt.signals.empty_like(ohlcv['Close'])
bt_exits.loc[bt_exits_mask.index] = True
vbt.settings.portfolio['fees'] = 0.075 / 100 #0.0025 # in %
#bt_portfolio = vbt.Portfolio.from_signals(ohlcv['Close'], bt_entries, bt_exits)
bt_portfolio = vbt.Portfolio.from_signals(ohlcv['Close'], bt_entries, bt_exits, price=ohlcv['Close'].vbt.fshift(1))
#bt_portfolio = vbt.Portfolio.from_signals(ohlcv['Close'], rsi_entries, rsi_exits, price=ohlcv['Close'].vbt.fshift(1), size=(100 - fees)/100, size_type='percent')
# + [markdown] id="Tk-Ey1rnr03T"
# ...and compare the commissions.
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="KV2jcXIsdbfY" outputId="dbabf887-7f03-481d-ade2-8fa1e406b3cc"
# bt_portfolio.orders.records_readable
# vbt_commissions = pd.DataFrame({
# 'commissions': bt_portfolio.orders.records_readable.Fees,
# 'date': bt_portfolio.orders.records_readable.Date
# })
# vbt_commissions = vbt_commissions.set_index('date')
# vbt_commissions
bt_commissions = pd.Series(result[0].commissions, index=bt_transactions.index)
vbt_commissions = bt_portfolio.orders.records_readable.Fees
vbt_commissions.index = bt_portfolio.orders.records_readable.Date
#print(bt_commissions)
#print(vbt_commissions)
commissions_delta = bt_commissions - vbt_commissions
commissions_delta.rename('Commissions (Delta)').vbt.plot().show_png()
# + [markdown] id="8U-2AI1Wr68Y"
# We can see they are extremely close (zero dot, followed by 40 other zeros and finally some meaningful digits).
# This is mostly because of rounding errors for example due to the order calculations are performed but is mostly negligible for our purposes. In fact if we print the portfolio report from both vectorbt and backtrader and we can see the result is almost identical
# + colab={"base_uri": "https://localhost:8080/"} id="E5i8uBA6g5xq" outputId="c6183374-7a15-4b6e-b2f2-a304156b6934"
print('Final Portfolio Value: %.5f' % final_value)
print('Profit %.3f%%' % ((final_value - initial_value) / initial_value * 100))
print_trade_analysis(result[0].analyzers.ta.get_analysis())
print(bt_portfolio.stats())
# + [markdown] id="1z1BoYF4tB8l"
# Let's plot a graph with the entries and exits
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="epkpPsFVc9nW" outputId="d0b63db3-b76a-47ae-fee6-34b67e511699"
fig = vbt.make_subplots(specs=[[{"secondary_y": True}]])
fig = ohlcv['Close'].vbt.plot(trace_kwargs=dict(name='Price'), fig=fig)
fig = bt_entries.vbt.signals.plot_as_entry_markers(ohlcv['Close'], fig=fig)
fig = bt_exits.vbt.signals.plot_as_exit_markers(ohlcv['Close'], fig=fig)
fig.show_png()
# + [markdown] id="5df0w5ss45pY"
# # Pure vectorbt strategy
# We'll try now to execute the same strategy but using only vectorbt.
# Let's start by creating an RSI indicator. It'll be of course on a *rolling window*, meaning it will represent the value which the RSI would have been at that point in time (as opposed of being calculated considering the current timestamp or the last one as you would normally do in a realtime scenario)
# + colab={"base_uri": "https://localhost:8080/"} id="sRX5ROgafoAW" outputId="8e9a59d1-584c-4f05-c92a-b36fe5e85c77"
RSI = vbt.IndicatorFactory.from_talib('RSI')
rsi = RSI.run(ohlcv_wbuf['Open'], timeperiod=[14])
print(rsi.real.shape)
# + [markdown] id="Jrg5xgK2uJZp"
# We plot entries and exists as before...
# + id="ETl2Ih86yfXx" colab={"base_uri": "https://localhost:8080/", "height": 367} outputId="88adf137-b3f5-41a9-d777-fcc6dc3c0b29"
vbt_entries = rsi.real_below(rsi_bottom, crossover=True)
vbt_exits = rsi.real_above(rsi_top, crossover=True)
vbt_entries, vbt_exits = pd.DataFrame.vbt.signals.clean(vbt_entries, vbt_exits)
fig = vbt.make_subplots(specs=[[{"secondary_y": True}]])
fig = ohlcv['Open'].vbt.plot(trace_kwargs=dict(name='Price'), fig=fig)
fig = vbt_entries.vbt.signals.plot_as_entry_markers(ohlcv['Open'], fig=fig)
fig = vbt_exits.vbt.signals.plot_as_exit_markers(ohlcv['Open'], fig=fig)
fig.show_png()
# + [markdown] id="7qIa4WYuvXmf"
# ...and print the Final Portfolio Value
# + colab={"base_uri": "https://localhost:8080/"} id="QkYRhQ1c9Dg4" outputId="501ac41b-e183-4ba5-8947-ecb790bf56d2"
vbt_portfolio = vbt.Portfolio.from_signals(ohlcv['Close'], vbt_entries, vbt_exits, price=ohlcv['Close'].vbt.fshift(1))
print('Final Portfolio Value (Vectorbt): %.5f' % vbt_portfolio.final_value())
print('Final Portfolio Value (Backtrader): %.5f' % final_value)
# + [markdown] id="_hfsQPLzvbfo"
# Something's wrong! Why do we get a different result when using backtrader and vectorbt?
# Let's start by comparing the entries and exits
# ('^' means XOR logical operator: It returns true when the two inputs are different)
# + colab={"base_uri": "https://localhost:8080/", "height": 717} id="LHLWJ05391UZ" outputId="e20e391f-3fc3-423b-8e3c-8c9905e1c102"
(vbt_entries ^ bt_entries).rename('Entries (Delta)').vbt.signals.plot().show_png()
(vbt_exits ^ bt_exits).rename('Exits (Delta)').vbt.signals.plot().show_png()
# + colab={"base_uri": "https://localhost:8080/"} id="BEM-2SMsLdlb" outputId="0bc9f37d-70b9-4d02-c8c0-dfc6fff09d18"
# create a selection mask for showing values which are different
mask = vbt_exits ^ bt_exits
print(vbt_exits[mask]) # show the different ones in vbt_exits
print(bt_exits[mask]) # show the different ones in bt_exits
print(rsi.real[mask]) # show the RSI value
# + [markdown] id="9cMQv5uPwEth"
# Definitely some values are different and why backtrader did not exit with an RSI greater than 70?
# + [markdown] id="79FqWYRLmmNN"
# # vectorbt strategy with backtrader indicators
# Let's dig deeper: We can compare the vectorbt and backtrader RSI indicator and see if there's any difference.
# We start by importing them as a dataframe
# + colab={"base_uri": "https://localhost:8080/"} id="U7EmkfUBoMeF" outputId="33b391a0-2276-442c-aaf8-4ba65486c655"
rsi_bt_df = pd.DataFrame({
'rsi': result[0].rsi.get(size=len(result[0]))
}, index=[result[0].datas[0].num2date(x) for x in result[0].data.datetime.get(size=len(result[0]))])
rsi_bt_df.index = rsi_bt_df.index.tz_localize(tz='UTC')
rsi_bt_df.rsi = rsi_bt_df.rsi.shift(1)
rsi_vbt_df = pd.DataFrame({
'rsi': rsi.real.values
}, index=rsi.real.index)
rsi_vbt_df_mask = (rsi_vbt_df.index >= start_date) & (rsi_vbt_df.index <= end_date) # mask without buffer
rsi_vbt_df = rsi_vbt_df.loc[rsi_vbt_df_mask, :]
print(rsi_bt_df.shape)
print(rsi_vbt_df.shape)
#rsi_bt_df.head(20)
#rsi_vbt_df.head(20)
# + [markdown] id="O1ad-R0Nw776"
# And plot the difference results
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="cqvi3bEqq0iM" outputId="ba78b942-f9cc-4948-b6bf-bac78061f070"
rsi_delta = rsi_bt_df - rsi_vbt_df
#rsi_delta.head(20)
rsi_delta.rsi.rename('RSI (Delta)').vbt.plot().show_png()
# + [markdown] id="-sBC3INTxBlW"
# We can clearly see there's a ± ~2 difference in value overall. The initial spike is due to backtrader not having enough price info at the beginning, which are instead available to vectorbt as we trimmed the data _after_ generating the series of points.
# We can print the signals both separately and overlapped
# + id="gke_KQ0dpkaU" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="66730561-7753-4ace-f24c-af58e9ad4e18"
# Overlapped
pd.DataFrame({'RSI (VBT)': rsi_vbt_df['rsi'], 'RSI (BT)': rsi_bt_df['rsi']}).vbt.plot().show_png()
# RSI signal from Backtrader
rsi_bt_df.rsi.rename('RSI (BT)').vbt.plot().show_png()
# RSI signal from Vectorbt
rsi_vbt_df.rsi.rename('RSI (VBT)').vbt.plot().show_png()
# + [markdown] id="ZPA5BN8vx60C"
# No appreciable difference though.
# So, how can we achieve the same results? We could try to feed backtrader's RSI signal to vectorbt's strategy
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="2yqGF9N5f3Ck" outputId="b22b9519-683b-4e2c-a720-ad586497eae2"
vbt_bt_entries = rsi_bt_df.rsi < rsi_bottom
vbt_bt_exits = rsi_bt_df.rsi > rsi_top
vbt_bt_entries, vbt_bt_exits = pd.DataFrame.vbt.signals.clean(vbt_bt_entries, vbt_bt_exits)
fig = vbt.make_subplots(specs=[[{"secondary_y": True}]])
fig = ohlcv['Open'].vbt.plot(trace_kwargs=dict(name='Price'), fig=fig)
fig = vbt_bt_entries.vbt.signals.plot_as_entry_markers(ohlcv['Open'], fig=fig)
fig = vbt_bt_exits.vbt.signals.plot_as_exit_markers(ohlcv['Open'], fig=fig)
fig.show_png()
# + [markdown] id="VNnTVCQiyOj8"
# and print the difference between entries and exits
# + colab={"base_uri": "https://localhost:8080/", "height": 717} id="DKampAZ72o9P" outputId="6b3aae3f-d007-48a9-9aa6-a7935a5b44d5"
(vbt_bt_entries ^ bt_entries).rename('Entries (Delta)').vbt.signals.plot().show_png()
(vbt_bt_exits ^ bt_exits).rename('Exits (Delta)').vbt.signals.plot().show_png()
# + [markdown] id="etVYPhwKyTQy"
# Nice! no difference in entries and exits events.
# + colab={"base_uri": "https://localhost:8080/"} id="vctuaVd6BWYQ" outputId="adde2c03-7262-4cb4-9c47-eeedffbf692e"
# create a selection mask for showing values which are different
mask = vbt_bt_exits ^ bt_exits
print(vbt_bt_exits[mask]) # show the different ones in vbt_bt_exits
print(bt_exits[mask]) # show the different ones in bt_exits
print(rsi_bt_df.rsi[mask]) # show the RSI value
# + [markdown] id="hC7voQcrya3U"
# We can print them on a binary Y axis as well
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="_oJ9kbBD4xTy" outputId="000932e4-d7b9-4a81-99fa-61f6c9aa1e6d"
fig = vbt_bt_entries.vbt.signals.plot(trace_kwargs=dict(name='Entries'))
vbt_bt_exits.vbt.signals.plot(trace_kwargs=dict(name='Exits'), fig=fig).show_png()
# + [markdown] id="DX5IOiWlyprp"
# So now when we go and print the Final Portfolio Value...
# + colab={"base_uri": "https://localhost:8080/"} id="bpsAFVT744CS" outputId="fc97c0df-75eb-4ce0-e92e-af935e51875f"
#vbt_bt_portfolio = vbt.Portfolio.from_signals(ohlcv['Close'], vbt_bt_entries, vbt_bt_exits)
#vbt_bt_portfolio = vbt.Portfolio.from_signals(ohlcv['Close'], vbt_bt_entries, vbt_bt_exits, price=ohlcv['Close'].vbt.fshift(1), size=(100 - fees)/100, size_type='percent')
vbt_bt_portfolio = vbt.Portfolio.from_signals(ohlcv['Close'], vbt_bt_entries, vbt_bt_exits, price=ohlcv['Close'].vbt.fshift(1))
print('Final Portfolio Value (Vectorbt): %.5f' % vbt_bt_portfolio.final_value())
print('Final Portfolio Value (Backtrader): %.5f' % final_value)
# + [markdown] id="npGklLQUywZY"
# ...we can indeed see the results are matching!
# Let's also plot the position trading windows
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="AiYNQrMN5FzT" outputId="9b30b709-86ba-420c-d41c-bc6787691597"
#print(vbt_bt_portfolio.trades.records)
vbt_bt_portfolio.trades.plot().show_png()
# + [markdown] id="b9vq-MFV4l8R"
# # Conclusions
# If we now create a portfolio from a simple holding strategy
# + id="fBl9SjctgLCN"
hold_portfolio = vbt.Portfolio.from_holding(ohlcv['Close'])
# + [markdown] id="gs24qy8vzOoB"
# and plot the portfolio value on the same graph
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="XfQ93jwL5lag" outputId="f16ce175-b744-40da-f710-cb483a446c04"
fig = vbt_portfolio.value().vbt.plot(trace_kwargs=dict(name='Value (pure vectorbt)'))
fig = vbt_bt_portfolio.value().vbt.plot(trace_kwargs=dict(name='Value (vectorbt w/ BT Ind.)'), fig=fig)
fig = bt_portfolio.value().vbt.plot(trace_kwargs=dict(name='Value (Backtrader)'), fig=fig)
hold_portfolio.value().vbt.plot(trace_kwargs=dict(name='Value (Hold)'), fig=fig).show_png()
# + [markdown] id="DfoWXgL3zYA8"
# We can see the portfolio generated with the _vectorbt + backtrader RSI signal_ exactly overlaps with the portfolio we generated from _pure backtrader strategy_. The _pure vectorbt_ portfolio is slightly off though as we find out.
# This should remind you that tiny differences in the ways signal algorithms are implemented, can even generate different entries and exits events in your strategy!
# + [markdown] id="xplDJxJE0Dbi"
# # Bonus debugging snippets
# here are some snippets which might come in handy when debugging or troubleshooting strategies
# + id="W4tP5ePw705a" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="dcad0278-843a-4538-dcd1-0897e170e1d6"
vbt_portfolio.orders.records_readable
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="a4rY2x8aCII5" outputId="2b81449e-104c-42a0-d9cb-7042abff3df4"
bt_portfolio.orders.records_readable
# + colab={"base_uri": "https://localhost:8080/"} id="2FpiL6ikA44c" outputId="79ada75f-069b-4156-87c5-6d61a7502717"
my_date = datetime.strptime('2021-03-04T00:53:00.000Z', '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=timezone.utc)
rsi.real.loc[my_date]
# + id="j0Qh07P1BlSj" colab={"base_uri": "https://localhost:8080/"} outputId="c8bc5d26-a9be-4351-8620-f8447f31b759"
bt_portfolio.value().iloc[150:].head(20)
# + colab={"base_uri": "https://localhost:8080/"} id="A4ZHlgl3f9V1" outputId="e08a177b-7d36-4854-a1a1-fd4f3712161d"
vbt_portfolio.value().iloc[150:].head(20)
# + id="9PgVrVY1Kxyg"
| examples/PortingBTStrategy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Detection with SSD
#
# In this example, we will load a SSD model and use it to detect objects.
# ### 1. Setup
#
# * First, Load necessary libs and set up caffe and caffe_root
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import os
os.chdir(caffe_root)
import sys
sys.path.insert(0, 'python')
import caffe
# caffe.set_device(0)
caffe.set_mode_cpu()
# -
# * Load LabelMap.
# +
from google.protobuf import text_format
from caffe.proto import caffe_pb2
import os
# load PASCAL VOC labels
os.chdir("/home/night/Deep_Learning/caffe")
aa = os.getcwd()
print aa
print os.listdir(aa)
labelmap_file = './data/VOC0712/labelmap_voc.prototxt'
# labelmap_file = './night/abc.sh'
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
# -
# * Load the net in the test phase for inference, and configure input preprocessing.
# +
print 0
model_def = '../models/VGGNet/VOC0712/SSD_300x300/deploy.prototxt'
model_weights = './models/VGGNet/VOC0712/SSD_300x300/VGG_VOC0712_SSD_300x300_iter_120000.caffemodel'
print 1
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
print 2
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([104,117,123])) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# -
# ### 2. SSD detection
# * Load an image.
# +
# set net to batch size of 1
image_resize = 300
net.blobs['data'].reshape(1,3,image_resize,image_resize)
image = caffe.io.load_image('examples/images/fish-bike.jpg')
plt.imshow(image)
# -
# * Run the net and examine the top_k results
# +
transformed_image = transformer.preprocess('data', image)
net.blobs['data'].data[...] = transformed_image
# Forward pass.
detections = net.forward()['detection_out']
# Parse the outputs.
det_label = detections[0,0,:,1]
det_conf = detections[0,0,:,2]
det_xmin = detections[0,0,:,3]
det_ymin = detections[0,0,:,4]
det_xmax = detections[0,0,:,5]
det_ymax = detections[0,0,:,6]
# Get detections with confidence higher than 0.6.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_labels = get_labelname(labelmap, top_label_indices)
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
# -
# * Plot the boxes
# +
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(image)
currentAxis = plt.gca()
for i in xrange(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * image.shape[1]))
ymin = int(round(top_ymin[i] * image.shape[0]))
xmax = int(round(top_xmax[i] * image.shape[1]))
ymax = int(round(top_ymax[i] * image.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = top_labels[i]
display_txt = '%s: %.2f'%(label_name, score)
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
color = colors[label]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})
| Tiny-DSOD/examples/ssd_detect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from phimal_utilities.analysis import load_tensorboard
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='paper', style='white')
# -
df = load_tensorboard('runs/lstsq_correct/')
df_deepmod = load_tensorboard('runs/deepmod_correct/')
df.keys()
coeff_keys = [key for key in df.keys() if key[:5]=='coeff']
scaled_coeff_keys = [key for key in df.keys() if key[:6]=='scaled']
lst_keys = [key for key in df.keys() if key[:5]=='lstsq']
mae_keys = [key for key in df.keys() if key[:3]=='mae']
true_coeffs = np.zeros((1, 12))
true_coeffs[0, 2] = 2.0
plt.semilogy(df.index, df['MSE_0'])
plt.semilogy(df_deepmod.index, df_deepmod['MSE_0'])
plt.semilogy(df.index, df['Regression_0'])
plt.semilogy(df_deepmod.index, df_deepmod['Regression_0'])
# %config InlineBackend.figure_format = 'svg'
plt.semilogy(df.index, np.mean(df[mae_keys], axis=1))
#plt.semilogy(df_deepmod.index, np.mean(df_deepmod[mae_keys], axis=1))
# +
plt.plot(df.index, np.sum(np.abs(df[coeff_keys]), axis=1))
plt.plot(df.index, np.sum(np.abs(df[scaled_coeff_keys]), axis=1))
plt.ylim([0, 20])
# -
plt.plot(df.index, np.sum(np.abs(df[scaled_coeff_keys]), axis=1))
plt.plot(df_deepmod.index, np.sum(np.abs(df_deepmod[scaled_coeff_keys]), axis=1))
plt.ylim([0.0, 20])
# +
plt.semilogy(df.index, np.sum(np.abs(df[coeff_keys] - true_coeffs), axis=1), label='new')
plt.semilogy(df_deepmod.index, np.sum(np.abs(df_deepmod[coeff_keys] - true_coeffs), axis=1), label='old')
plt.legend()
# -
np.mean(np.abs(df[coeff_keys[2]] - true_coeffs[0, 2][None]))
scaled_coeff_keys
true_coeffs[:, 2:3].shape
plt.plot(df.index, np.mean(np.abs(df[coeff_keys[2]] - true_coeffs[0, 2][None])[:, None], axis=1))
plt.plot(df.index, normed_l1, label='L1 norm')
normed_l1 = np.sum(np.abs(df[coeff_keys]), axis=1) / np.min(np.sum(np.abs(df[coeff_keys]), axis=1))
normed_l1_scaled = np.sum(np.abs(df[scaled_coeff_keys]), axis=1) / np.min(np.sum(np.abs(df[scaled_coeff_keys]), axis=1))
normed_coeff_error = np.mean(np.abs(df[coeff_keys] - true_coeffs), axis=1) / np.min(np.mean(np.abs(df[coeff_keys] - true_coeffs), axis=1))
normed_lib_error = np.mean(df[mae_keys], axis=1) / np.min(np.mean(df[mae_keys], axis=1).loc[1:])
# +
fig, ax = plt.subplots(nrows=1, ncols=1, constrained_layout=True) #5.5 is official width
ax.plot(df.index, df['Regression_0']/df['Regression_0'].loc[1:].min(), label='Reg.')
ax.plot(df.index, normed_l1, label='L1 norm')
#ax.plot(df.index, normed_l1_scaled, label='L1 norm scaled')
ax.plot(df.index, normed_coeff_error, label='Coeff. error')
ax.plot(df.index, normed_lib_error, label='Library error')
ax.legend(loc='upper right')
ax.set_ylim([0.95, 2.0])
#ax.set_yscale('log')
#ax.set_title('Relative losses lst. sq')
ax.set_xlabel('Epoch')
ax.set_ylabel('Relative loss')
#ax.set_xlim([500, 10000])
#ax.text(-0.26, 1.01, 'C', transform=ax.transAxes, weight='bold')
ax.ticklabel_format(axis="x", style="sci", scilimits=(3, 3))
#ax.set_ylim([3e-4, 4e-4])
# -
coeff_keys[2]
plt.plot(df.index, df[coeff_keys])
plt.plot(df.index, df[coeff_keys[2]], linewidth=2)
plt.ylim([-4, 10])
plt.plot(df.index, df[scaled_coeff_keys])
plt.plot(df.index, df[scaled_coeff_keys[2]], linewidth=2)
plt.ylim([-5, 5])
plt.plot(df_deepmod.index, df_deepmod[coeff_keys])
plt.plot(df_deepmod.index, df_deepmod[coeff_keys[2]], linewidth=2)
plt.ylim([-2, 2])
plt.plot(df_deepmod.index, df_deepmod[scaled_coeff_keys])
plt.plot(df_deepmod.index, df_deepmod[scaled_coeff_keys[2]], linewidth=2)
plt.ylim([-5, 5])
normed_l1 = np.sum(np.abs(df_deepmod[scaled_coeff_keys]), axis=1) / np.min(np.sum(np.abs(df_deepmod[scaled_coeff_keys]), axis=1))
normed_coeff_error = np.mean(np.abs(df_deepmod[coeff_keys] - true_coeffs), axis=1) / np.min(np.mean(np.abs(df_deepmod[coeff_keys] - true_coeffs), axis=1))
normed_lib_error = np.mean(df_deepmod[mae_keys], axis=1) / np.min(np.mean(df_deepmod[mae_keys], axis=1).loc[1:])
# +
fig, ax = plt.subplots(nrows=1, ncols=1, constrained_layout=True) #5.5 is official width
ax.plot(df.index, df_deepmod['Regression_0']/df_deepmod['Regression_0'].loc[1:].min(), label='Reg.')
ax.plot(df.index, normed_l1, label='L1 norm')
ax.plot(df.index, normed_coeff_error, label='Coeff. error')
ax.plot(df.index, normed_lib_error, label='Library error')
ax.legend(loc='upper right')
ax.set_ylim([0.95, 3.0])
#ax.set_yscale('log')
#ax.set_title('Relative losses lst. sq')
ax.set_xlabel('Epoch')
ax.set_ylabel('Relative loss')
#ax.set_xlim([500, 10000])
#ax.text(-0.26, 1.01, 'C', transform=ax.transAxes, weight='bold')
ax.ticklabel_format(axis="x", style="sci", scilimits=(3, 3))
#ax.set_ylim([3e-4, 4e-4])
# -
| experiments/diffusion/analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 1: Reading Datasets
#
# This is the part 1 of our 3-part series on Retrieving, Analyzing and Visualizing georeferenced data of earthquakes. We are using a list of available datasets from [Rdatasets](https://vincentarelbundock.github.io/Rdatasets/). This list in CSV format will be imported, opened and read. Then, we will look for the words `latitude` or `longitude` inside each dataset HTML code [(web crawling)](https://en.wikipedia.org/wiki/Web_crawler). Finally, from the resulting list, we will select a dataset that will be used to create the database and the map.
#
# First, we will import all the required Python libraries
import requests
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import re
import itertools
# ## Getting the the collection of datasets in CSV format
#
# In this step, we will use the URL provided by [Rdatasets](https://vincentarelbundock.github.io/Rdatasets/) to download a CSV file containing, among other features, CSV data URL and documentation (HTML) URL of over 1300 datasets.
# !wget 'http://vincentarelbundock.github.com/Rdatasets/datasets.csv'
# ### Exploring the collection of datasets
#
# Let's explore the first three rows of the collection dataset. We can see many column names, but we are interested in the `CSV` and `Doc` columns. We can also see that there are 1340 datasets in the collection.
# print the first three rows
file = open('datasets.csv', 'r')
csvReader1 = csv.reader(file)
for row in itertools.islice(csvReader1, 3):
print(row)
# get the number of datasets in the file
file = open("datasets.csv")
numline = len(file.readlines())
print (numline-1) # minus the header
# ## Processing the collection of datasets
#
# Next, we will look for the word latitude or longitude inside each dataset HTML code. To do that we will create various empty lists to store the intermediate and final results. Then, we will open again the dataset collection dataset and append in a list all the content of the `Doc` column which consists of HTML documents links. Finally, we will perform the web scrawling process itself.
# create list to store results
url_list=[]
sel_list = []
sel_list2 = []
# Be patient...the crawling process takes time...
# +
print('Working...\n')
print()
# open the collection dataset
with open('datasets.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
# append in a list all the content of the Doc column
for row in reader:
url_list.append(row['Doc'])
for url in url_list:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
r = requests.get(url)
if r.status_code == 200:
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
for item in soup:
# find latitude or longitude word
if soup.find_all(text=re.compile(r'\blatitude\b | \blongitude\b', flags=re.I | re.X)):
sel_list.append(url)
else:
continue
else:
continue
for item2 in sel_list:
if item2 not in sel_list2:
sel_list2.append(item2)
print('List of datasets containing the terms latitude or longitude: \n', sel_list2)
# -
# ## Selecting a dataset
#
# From 1340 datasets we obtained 20 HTML links containing the word latitude and/or longitude, next we will select a dataset for the next steps.
print('The selected dataset is: ', sel_list2[8])
# ***Execute `db_earthquakes.ipynb` to create a database from the selected dataset and perform some spatial analysis.***
| reading_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# plt.style.use('dark_background')
df = pd.read_csv('influencers.csv')
df.describe()
plt.plot(df['followerCount'],df['engagementRate'],'o')
followers,engagement = df['followerCount'].values,df['engagementRate'].values
# +
followers_label = ['FEW','AVERAGE','MANY','HUGE']
target_label = ['REJECTED','CONSIDERED','ACCEPTED']
engagement_label = ['LOW','MED','HIGH']
# 85%
# followers_points = [[0, 15000, 25000], [15000, 25000, 45000, 55000], [45000, 55000, 75000, 85000], [75000, 85000, 999999]]
# engagement_points = [[0, 0, 2], [0, 2, 4, 6], [4, 6, 9]]
# target_points_sugeno = [35, 55, 100]
# target_points = [[0, 35, 40], [35, 40, 55, 60], [55, 60, 100]]
# 90%
# followers_points = [[0, 10000, 15000], [10000, 15000, 35000, 40000], [35000, 40000, 55000, 60000], [55000, 60000, 9999999]]
# engagement_points =[[0, 1, 3], [1, 3, 4, 6], [4, 6, 9]]
# target_points_sugeno = [35, 55, 100]
# target_points = [[0, 25, 30], [25, 30, 55, 60], [55, 60, 100]]
# 95%
# f_diff = 3000
# f_cut = [10000, 30000, 70000, 9999999]
# e_diff = 0.5
# e_cut = [2, 5, 10]
# t_diff = 5
# t_cut = [30, 60, 100]
# followers_points = [[0, f_cut[0]-f_diff, f_cut[0]],
# [f_cut[0]-f_diff, f_cut[0], f_cut[1]-f_diff, f_cut[1]],
# [f_cut[1]-f_diff, f_cut[1], f_cut[2]-f_diff, f_cut[2]],
# [f_cut[2]-f_diff, f_cut[2], f_cut[3]]
# ]
# engagement_points = [[0, e_cut[0] - e_diff, e_cut[0]],
# [e_cut[0]-e_diff, e_cut[0], e_cut[1] - e_diff, e_cut[1]],
# [e_cut[1]-e_diff, e_cut[1], e_cut[2]]
# ]
# target_points_sugeno = t_cut
# target_points = [[0, t_cut[0] - t_diff, t_cut[0]],
# [t_cut[0] - t_diff, t_cut[0], t_cut[1] - t_diff, t_cut[1]],
# [t_cut[1] - t_diff, t_cut[1], t_cut[2]]
# ]
#100%💯
f_diff = 3500
f_cut = [10000, 30000, 70000, 9999999]
e_diff = 0.7
e_cut = [2.5, 5.5, 10]
t_diff = 5
t_cut = [30, 60, 100]
followers_points = [[0, f_cut[0]-f_diff, f_cut[0]],
[f_cut[0]-f_diff, f_cut[0], f_cut[1]-f_diff, f_cut[1]],
[f_cut[1]-f_diff, f_cut[1], f_cut[2]-f_diff, f_cut[2]],
[f_cut[2]-f_diff, f_cut[2], f_cut[3]]
]
engagement_points = [[0, e_cut[0] - e_diff, e_cut[0]],
[e_cut[0]-e_diff, e_cut[0], e_cut[1] - e_diff, e_cut[1]],
[e_cut[1]-e_diff, e_cut[1], e_cut[2]]
]
target_points_sugeno = t_cut
target_points = [[0, t_cut[0] - t_diff, t_cut[0]],
[t_cut[0] - t_diff, t_cut[0], t_cut[1] - t_diff, t_cut[1]],
[t_cut[1] - t_diff, t_cut[1], t_cut[2]]
]
print(followers_points,engagement_points,target_points,sep='\n')
# -
def decrease(x,a,b):
return -(x-b)/(b-a)
def increase(x,a,b):
return (x-a)/(b-a)
def fx_few_followers(x):
p = followers_points[0]
if x <= p[1]:
return 1;
elif p[1]< x < p[2]:
return decrease(x,p[1],p[2])
else:
return 0;
def fx_average_followers(x):
p = followers_points[1]
if p[0] < x < p[1]:
return increase(x,p[0],p[1])
elif p[1] <= x <= p[2]:
return 1;
elif p[2] < x <= p[3]:
return decrease(x,p[2],p[3])
else:
return 0;
def fx_many_followers(x):
p = followers_points[2]
if p[0] < x < p[1]:
return increase(x,p[0],p[1])
elif p[1] <= x <= p[2]:
return 1;
elif p[2] < x <= p[3]:
return decrease(x,p[2],p[3])
else:
return 0;
def fx_huge_followers(x):
p = followers_points[3]
if x >= p[1]:
return 1;
elif p[0] < x < p[1]:
return increase(x,p[0],p[1])
else:
return 0;
def fx_low_engagement(x):
p = engagement_points[0]
if x <= p[1]:
return 1;
elif p[1] < x < p[2]:
return decrease(x,p[1],p[2])
else:
return 0;
def fx_med_engagement(x):
p = engagement_points[1]
if p[0] < x < p[1]:
return increase(x,p[0],p[1])
elif p[1] <= x <= p[2]:
return 1;
elif p[2] < x <= p[3]:
return decrease(x,p[2],p[3])
else:
return 0;
def fx_high_engagement(x):
p = engagement_points[2]
if x >= p[1]:
return 1;
elif p[0] < x < p[1]:
return increase(x,p[0],p[1])
else:
return 0;
x = np.arange(0,90000,10000)
x2 = np.arange(0,6,0.1)
fig, ax = plt.subplots(1,2,figsize=(18, 4))
ax[0].plot(x,[fx_few_followers(y) for y in x], label='few')
ax[0].plot(x,[fx_average_followers(y) for y in x], label='average')
ax[0].plot(x,[fx_many_followers(y) for y in x], label='many')
ax[0].plot(x,[fx_huge_followers(y) for y in x], label='huge')
ax[0].set_title('Followers')
ax[0].legend(loc='lower right')
ax[1].plot(x2,[fx_low_engagement(y) for y in x2], label='low')
ax[1].plot(x2,[fx_med_engagement(y) for y in x2], label='medium',color='r')
ax[1].plot(x2,[fx_high_engagement(y) for y in x2], label='high')
ax[1].set_title('Engagement')
ax[1].legend(loc='lower right')
fig.suptitle('Membership function')
plt.plot()
plt.savefig('membership_function.png')
# ### Defuzzyfication rules
def fx_rejected(x):
p = target_points[0]
if x <= p[1]:
return 1;
elif p[1] < x < p[2]:
return decrease(x,p[1],p[2])
else:
return 0;
def fx_considered(x):
p = target_points[1]
if p[0] < x < p[1]:
return increase(x,p[0],p[1])
elif p[1] <= x <= p[2]:
return 1;
elif p[2] < x <= p[3]:
return decrease(x,p[2],p[3])
else:
return 0;
def fx_accepted(x):
p = target_points[2]
if x >= p[1]:
return 1;
elif p[0] < x < p[1]:
return increase(x,p[0],p[1])
else:
return 0;
x3 = np.arange(0,100)
fig, ax = plt.subplots(1,2,figsize=(18, 4))
ax[0].plot(x3, [fx_rejected(y) for y in x3], label='rejected')
ax[0].plot(x3, [fx_considered(y) for y in x3], label='considered',color='r')
ax[0].plot(x3, [fx_accepted(y) for y in x3], label='accepted')
ax[0].set_title('Mamdani rules')
ax[0].legend(loc='lower right')
ax[1].vlines(x = target_points_sugeno[0], ymin = 0, ymax = 1, label='rejected', color='b')
ax[1].vlines(x = target_points_sugeno[1], ymin = 0, ymax = 1, label='considered',color='r')
ax[1].vlines(x = target_points_sugeno[2], ymin = 0, ymax = 1, label='accepted',color='g')
ax[1].vlines(0,0,1,visible=False)
ax[1].set_title('Sugeno rules')
ax[1].legend(loc='lower right')
fig.suptitle('Defuzzyfication rules')
plt.plot()
plt.savefig('defuzzyfication_rules.png')
def fuzzyfication():
fuzzyset_followers, fuzzyset_engagement = [],[]
for i in followers:
temp = [[followers_label[0],fx_few_followers(i)],
[followers_label[1],fx_average_followers(i)],
[followers_label[2],fx_many_followers(i)],
[followers_label[3],fx_huge_followers(i)]]
fuzzyset_followers.append(temp)
for i in engagement:
temp = [[engagement_label[0],fx_low_engagement(i)],
[engagement_label[1],fx_med_engagement(i)],
[engagement_label[2],fx_high_engagement(i)]]
fuzzyset_engagement.append(temp)
return fuzzyset_followers, fuzzyset_engagement
# ### Inference rules
rule = [[0, 0,2],[0, 0, 2],[0, 1, 2] , [1, 1, 2]]
df_rule = pd.DataFrame(rule, index = followers_label, columns = engagement_label).replace({0:target_label[0],1:target_label[1],2:target_label[2]})
df_rule
def inference(x, y):
results = []
for i in range(len(x)):
classes = {}
for fol in x[i]:
for eng in y[i]:
key = df_rule[eng[0]][fol[0]]
value = min(eng[1],fol[1])
if key not in classes.keys():
classes[key] = value
else:
classes[key] = max(value, classes[key])
results.append(classes)
return results
def defuzzyfication(inferences,style='mamdani'):
score = []
for i in inferences :
if style == 'mamdani':
μB = []
points = np.arange(5,100,5)
for p in points:
reject = i['REJECTED'] if fx_rejected(p) > 0 else 0
consider = i['CONSIDERED'] if fx_considered(p) > 0 else 0
accept = i['ACCEPTED'] if fx_accepted(p) > 0 else 0
μB.append(max(reject,consider,accept))
zμB = [μB[i] * points[i] for i in range(len(points))]
z = sum(zμB)/sum(μB)
score.append(z)
elif style == 'sugeno':
p = target_points_sugeno
reject = i['REJECTED']
consider = i['CONSIDERED']
accept = i['ACCEPTED']
μB = (reject * p[0] + consider * p[1] + accept * p[2])
z = μB / (reject + consider + accept) if μB> 0 else 0
score.append(z)
else :
score = np.zeros(len(inferences))
print('not supported yet')
return score
style = 'mamdani'
fuzzyset_followers, fuzzyset_engagement = fuzzyfication()
x = inference(fuzzyset_followers,fuzzyset_engagement)
score = defuzzyfication(x,style)
df['Score'] = score
# df.sort_values(by=['Score'],ascending=False).to_csv('result.csv')
output = df.sort_values(by=['Score'],ascending=False).head(20)
filename = 'output_'+style+'.csv'
output.to_csv(filename,header=True,index=False)
print(' >>> Top 20 users <<<')
print(' method :',style)
output
# ### Test the accuracy
df_test = df.copy()
df_test['FolxEng'] = followers*engagement
df_test = df_test.sort_values(by=['FolxEng'],ascending=False)
test = df_test['id'].head(20).values
data = df.sort_values(by=['Score'],ascending=False).head(20)['id'].values
count = 0
for i in data :
if i in test:
count+=1
else:
print(i)
print('Accuracy : ',count/20*100,'%')
# *© Copyright 2019 <NAME>.*
| algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gradient Boosting on Mushroom Dataset
# ```
# I have used XGBoost for implementing Gradient boosting.There are other options as
# well, like LightGBM.
# ```
# [Dataset](https://github.com/heenalsapovadia/ml_practices_2018/blob/master/Labs/Lab1/Heenal/Data/mushroom_new.csv)
#
# [EDA on dataset](https://github.com/heenalsapovadia/ml_practices_2018/blob/master/Labs/Lab1/Heenal/PythonNotebooks/Mushroom.ipynb)
# +
# Importing the necessary libraries
import numpy as np
import pandas as pd
import xgboost as xgb
#import lightgbm as lgb
# -
# Loading the data
mush = pd.read_csv('../../Lab1/Heenal/Data/mushroom_new.csv')
mush.shape
mush.head()
# Separating the data and the target variable
X = mush.drop('class', axis=1)
y = mush['class']
# +
# Separating the data into train and test sets
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=.3)
# -
# # Applying XGBoost
# +
#The data is stored in a DMatrix object
#label is used to define our outcome variable
dtrain=xgb.DMatrix(x_train,label=y_train)
dtest=xgb.DMatrix(x_test)
# +
#setting parameters for xgboost
parameters={'max_depth':7, 'eta':1, 'silent':1,'objective':'binary:logistic','eval_metric':'auc','learning_rate':.05}
# -
#training our model
num_round=50
from datetime import datetime
start = datetime.now()
xg=xgb.train(parameters,dtrain,num_round)
stop = datetime.now()
#Execution time of the model
execution_time_xgb = stop-start
execution_time_xgb
#datetime.timedelta( , , ) representation => (days , seconds , microseconds)
#now predicting our model on test set
ypred=xg.predict(dtest)
ypred
ypred.shape
#Converting probabilities into 1 or 0
for i in range(0,2438):
if ypred[i]>=.5: # setting threshold to .5
ypred[i]=1
else:
ypred[i]=0
#calculating accuracy of our model
from sklearn.metrics import accuracy_score
accuracy_xgb = accuracy_score(y_test,ypred)
accuracy_xgb
| Labs/Lab2/Heenal/XGBoost_Mushroom.ipynb |
# $$
# \def\CC{\bf C}
# \def\QQ{\bf Q}
# \def\RR{\bf R}
# \def\ZZ{\bf Z}
# \def\NN{\bf N}
# $$
# # Sage code blocks
#
# ## Complexité de l'algorithme de tri de Python
1+1
for x in range(3):
print x
# Two sage commands without intermediate output get joined in a single input cell; no output is produced if none is specified:
1+1
2+2
3+3
# 1. Nested code block A
1+1
# 2. Nested code block B
1+1
# 3. Nested code block C (failing: the inner indent should match the itemized text indent)
#
# > ```{.python .input}
# > 1+1
# > ```
#
# **A doubly nested sage code block**
#
# 1. Calculer le plus grand élément d'une liste
1+1
def plus_grand_element(liste):
"""
Renvoie le plus grand élément de la liste
EXAMPLES::
sage: plus_grand_element([7,3,1,10,4,10,2,9])
10
sage: plus_grand_element([7])
7
"""
resultat = liste[0]
for i in range(1, len(liste)-1):
# Invariant: resultat est le plus grand element de liste[:i]
assert resultat in liste[:i]
assert all(resultat >= x for x in liste[:i])
if liste[i] > resultat:
resultat = liste[i]
return resultat
plus_grand_element([7,3,1,10,4,10,2,9])
#
# Foo (Failing).
#
# Bla:
# ```{.python .input}
# 1+1
# ```
#
# Don't forget the mandatory new line after \` ::\` ::
# sage: 1+1
#
# Blah (Failing):
1+1
# Foo (Failing):
1+1
# **Note**
#
# Blah:
1+1
# Foo:
1+1
# ## Complexité de l'algorithme de tri de Python
#
# **Exercice**
#
# 1. Estimer la complexité de la fonction suivante
#
# :
#
# sage: def fusion(l1, l2): ....: sort(l1+l2)
#
#
| tests/ipynb/rst2ipynb/sage_code_blocks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## _*Quantum SVM (quantum kernel method)*_
#
# ### Introduction
#
# Classification algorithms and methods for machine learning are essential for pattern recognition and data mining applications. Well known techniques such as support vector machines and neural networks have blossomed over the last two decades as a result of the spectacular advances in classical hardware computational capabilities and speed. This progress in computer power made it possible to apply techniques, that were theoretically developed towards the middle of the 20th century, on classification problems that were becoming increasingly challenging.
#
# A key concept in classification methods is that of a kernel. Data cannot typically be separated by a hyperplane in its original space. A common technique used to find such a hyperplane consists on applying a non-linear transformation function to the data. This function is called a feature map, as it transforms the raw features, or measurable properties, of the phenomenon or subject under study. Classifying in this new feature space -and, as a matter of fact, also in any other space, including the raw original one- is nothing more than seeing how close data points are to each other. This is the same as computing the inner product for each pair of data in the set. So, in fact we do not need to compute the non-linear feature map for each datum, but only the inner product of each pair of data points in the new feature space. This collection of inner products is called the kernel and it is perfectly possible to have feature maps that are hard to compute but whose kernels are not.
#
# In this notebook we provide an example of a classification problem that requires a feature map for which computing the kernel is not efficient classically -this means that the required computational resources are expected to scale exponentially with the size of the problem. We show how this can be solved in a quantum processor by a direct estimation of the kernel in the feature space. The method we used falls in the category of what is called supervised learning, consisting of a training phase (where the kernel is calculated and the support vectors obtained) and a test or classification phase (where new unlabelled data is classified according to the solution found in the training phase).
#
# For further information please see: [https://arxiv.org/pdf/1804.11326.pdf](https://arxiv.org/pdf/1804.11326.pdf)
#
# **This notebook shows the SVM implementation based on the quantum kernel.**
from datasets import *
from qiskit_aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name
from qiskit_aqua.input import get_input_instance
from qiskit_aqua import run_algorithm
# First we prepare the dataset, which is used for training, testing and the finally prediction.
#
# *Note: You can easily switch to a different dataset, such as the Breast Cancer dataset, by replacing 'ad_hoc_data' to 'Breast_cancer' below.*
# +
n = 2 # dimension of each data point
training_dataset_size = 20
testing_dataset_size = 10
sample_Total, training_input, test_input, class_labels = ad_hoc_data(training_size=training_dataset_size,
test_size=testing_dataset_size,
n=n, gap=0.3, PLOT_DATA=True)
datapoints, class_to_label = split_dataset_to_data_and_labels(test_input)
print(class_to_label)
# -
# With the dataset ready we initialize the necessary inputs for the algorithm:
# - the input dictionary (params)
# - the input object containing the dataset info (algo_input).
# +
params = {
'problem': {'name': 'svm_classification', 'random_seed': 10598},
'algorithm': {
'name': 'QSVM.Kernel'
},
'backend': {'name': 'qasm_simulator', 'shots': 1024},
'feature_map': {'name': 'SecondOrderExpansion', 'depth': 2, 'entanglement': 'linear'}
}
algo_input = get_input_instance('SVMInput')
algo_input.training_dataset = training_input
algo_input.test_dataset = test_input
algo_input.datapoints = datapoints[0] # 0 is data, 1 is labels
# -
# With everything setup, we can now run the algorithm.
#
# For the testing, the result includes the details and the success ratio.
#
# For the prediction, the result includes the predicted labels.
# +
result = run_algorithm(params, algo_input)
print("kernel matrix during the training:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r')
plt.show()
print("testing success ratio: ", result['testing_accuracy'])
print("predicted classes:", result['predicted_classes'])
# -
# ### The breast cancer dataset
# Now we run our algorithm with the real-world dataset: the breast cancer dataset
# +
sample_Total, training_input, test_input, class_labels = Breast_cancer(training_size=20, test_size=10, n=2, PLOT_DATA=True)
# n =2 is the dimension of each data point
datapoints, class_to_label = split_dataset_to_data_and_labels(test_input)
label_to_class = {label:class_name for class_name, label in class_to_label.items()}
print(class_to_label, label_to_class)
# +
algo_input = get_input_instance('SVMInput')
algo_input.training_dataset = training_input
algo_input.test_dataset = test_input
algo_input.datapoints = datapoints[0]
result = run_algorithm(params, algo_input)
# print(result)
print("kernel matrix during the training:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r')
plt.show()
print("testing success ratio: ", result['testing_accuracy'])
print("ground truth: {}".format(map_label_to_class_name(datapoints[1], label_to_class)))
print("predicted: {}".format(result['predicted_classes']))
# -
| artificial_intelligence/qsvm_kernel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
# +
stoplist = stopwords.words('english')
def get_synonyms_lexicon(path):
synonyms_lexicon = {}
text_entries = [l.strip() for l in open(path,encoding="utf8").readlines()]
for e in text_entries:
e = e.split(' ')
k = e[0]
v = e[1:len(e)]
synonyms_lexicon[k] = v
return synonyms_lexicon
def synonym_replacement(sentence, synonyms_lexicon):
keys = synonyms_lexicon.keys()
words = word_tokenize(sentence)
n_sentence = sentence
for w in words:
if w not in stoplist:
if w in keys:
n_sentence = n_sentence.replace(w, synonyms_lexicon[w][0]) # we replace with the first synonym
return n_sentence
if __name__ == '__main__':
text = 'Things I need to do this weekend: Groceries, electricity bill, rent and shopping' \
'It was conditioned in very thin box which caused scratches on the main screen.' \
'The involved businesses positively answered their clients who were fully refunded.'
sentences = text.split('.')
sentences.remove('')
print(sentences)
synonyms_lexicon = get_synonyms_lexicon('./ppdb-xl.txt')
for sentence in sentences:
new_sentence = synonym_replacement(sentence, synonyms_lexicon)
print(sentence)
print(new_sentence)
# -
nltk.download('punkt')
| data_augmentation/Untitled.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Predicting Remaining Useful Life
# <p style="margin:30px">
# <img style="display:inline; margin-right:50px" width=50% src="https://www.featuretools.com/wp-content/uploads/2017/12/FeatureLabs-Logo-Tangerine-800.png" alt="Featuretools" />
# <img style="display:inline" width=15% src="https://upload.wikimedia.org/wikipedia/commons/e/e5/NASA_logo.svg" alt="NASA" />
# </p>
#
# The general setup for the problem is a common one: we have a single table of sensor observations over time. Now that collecting information is easier than ever, most industries have already generated *time-series* type problems by the way that they store data. As such, it is crucial to be able to handle data in this form. Thankfully, built-in functionality from [Featuretools](https://featuretools.alteryx.com/en/stable/) handles time varying data well.
#
# We'll demonstrate an end-to-end workflow using a [Turbofan Engine Degradation Simulation Data Set](https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/#turbofan) from NASA. This notebook demonstrates a rapid way to predict the Remaining Useful Life (RUL) of an engine using an initial dataframe of time-series data. There are three sections of the notebook:
# 1. [Understand the Data](#Step-1:-Understanding-the-Data)
# 2. [Generate features](#Step-2:-DFS-and-Creating-a-Model)
# 3. [Make predictions with Machine Learning](#Step-3:-Using-the-Model)
#
# *To run the notebooks, you need to download the data yourself. Download and unzip the file from [https://ti.arc.nasa.gov/c/13/](https://ti.arc.nasa.gov/c/6/). Then create a 'data' directory and place the files in the 'data' directory.*
#
#
# ## Highlights
# * Quickly make end-to-end workflow using time-series data
# * Find interesting automatically generated features
#
# # Step 1: Understanding the Data
# Here we load in the train data and give the columns names according to the `description.txt` file.
import composeml as cp
import numpy as np
import pandas as pd
import featuretools as ft
import utils
import os
# +
data_path = 'data/train_FD004.txt'
data = utils.load_data(data_path)
data.head()
# -
# ## NASA Run To Failure Dataset
# In this dataset we have 249 engines (`engine_no`) which are monitored over time (`time_in_cycles`). Each engine had `operational_settings` and `sensor_measurements` recorded for each cycle. The **Remaining Useful Life** (RUL) is the amount of cycles an engine has left before it needs maintenance.
# What makes this dataset special is that the engines run all the way until failure, giving us precise RUL information for every engine at every point in time.
#
# To train a model that will predict RUL, we can can simulate real predictions on by choosing a random point in the life of the engine and only using the data from before that point. We can create features with that restriction easily by using [cutoff_times](https://featuretools.alteryx.com/en/stable/getting_started/handling_time.html) in Featuretools. To structure the labeling process, we will use [Compose](https://compose.alteryx.com) which is an open source project for automatically generating labels with cutoff times.
#
# ### Define Labeling Function
# To get started, we define the labeling function that will return the RUL given the remaining observations of an engine.
def remaining_useful_life(df):
return len(df) - 1
# ### Create Label Maker
# With the labeling function, we create the label maker for our prediction problem. To process the RUL for each engine, we set the `target_dataframe_name` to the engine number. By default, the `window_size` is set to the total observation size to contain the remaining observations for each engine.
lm = cp.LabelMaker(
target_dataframe_name='engine_no',
time_index='time',
labeling_function=remaining_useful_life,
)
# ### Search Labels
# Let’s imagine we want to make predictions on turbines that are up and running. Turbines in general don’t fail before 120 cycles, so we will only make labels for engines that reach at least 100 cycles. To do this, the `minimum_data` parameter is set to 100. Using Compose, we can easily tweak this parameter as the requirements of our model changes. By setting `num_examples_per_instance` to one, we limit the search to one example per engine.
# +
label_times = lm.search(
data.sort_values('time'),
num_examples_per_instance=1,
minimum_data=100,
verbose=True,
)
label_times.head()
# -
# Let's walk through a row of the `labels_times` dataframe. In the third row, we have engine number 3. At 00:00 on January 6, the remaining useful life of engine number 3 is 206. Having a dataframe in this format tells Featuretools that the feature vector for engine number 3 should only be calculated with data from before that point in time.
#
# To apply Deep Feature Synthesis we need to establish an `EntitySet` structure for our data. The key insight in this step is that we're really interested in our data as collected by `engine`. We can create an `engines` dataframe by normalizing by the `engine_no` column in the raw data. In the next section, we'll create a feature matrix for the `engines` dataframe directly rather than the base dataframe of `recordings`.
def make_entityset(data):
es = ft.EntitySet('Dataset')
es.add_dataframe(
dataframe=data,
dataframe_name='recordings',
index='index',
time_index='time',
)
es.normalize_dataframe(
base_dataframe_name='recordings',
new_dataframe_name='engines',
index='engine_no',
)
es.normalize_dataframe(
base_dataframe_name='recordings',
new_dataframe_name='cycles',
index='time_in_cycles',
)
return es
es = make_entityset(data)
es
# ## Visualize EntitySet
es.plot()
# # Step 2: DFS and Creating a Model
# With the work from the last section in hand, we can quickly build features using Deep Feature Synthesis (DFS). The function `ft.dfs` takes an `EntitySet` and stacks primitives like `Max`, `Min` and `Last` exhaustively across dataframes. Feel free to try the next step with a different primitive set to see how the results differ!
#
# We build features only using data up to and including the cutoff time of each label. This is done by setting the `cutoff_time` parameter to the label times we generated previously. Notice that the output of Compose integrates easily with Featuretools.
# +
fm, features = ft.dfs(
entityset=es,
target_dataframe_name='engines',
agg_primitives=['last', 'max', 'min'],
trans_primitives=[],
cutoff_time=label_times,
max_depth=3,
verbose=True,
)
fm.to_csv('simple_fm.csv')
# -
# ## Machine Learning Baselines
# Before we use that feature matrix to make predictions, we should check how well guessing does on this dataset. We can use a `train_test_split` from scikit-learn to split our training data once and for all. Then, we'll check the following baselines:
# 1. Always predict the median value of `y_train`
# 2. Always predict the RUL as if every engine has the median lifespan in `X_train`
#
# We'll check those predictions by finding the mean of the absolute value of the errors.
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
# +
fm = pd.read_csv('simple_fm.csv', index_col='engine_no')
X = fm.copy().fillna(0)
y = X.pop('remaining_useful_life')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=17)
medianpredict1 = [np.median(y_train) for _ in y_test]
mae = mean_absolute_error(medianpredict1, y_test)
print('Baseline by median label: Mean Abs Error = {:.2f}'.format(mae))
# +
from_train = es['recordings']['engine_no'].isin(y_train.index)
recordings_from_train = es['recordings'][from_train]
engines = recordings_from_train.groupby(['engine_no'])
median_life = np.median(engines.apply(lambda df: df.shape[0]))
from_test = es['recordings']['engine_no'].isin(y_test.index)
recordings_from_test = es['recordings'][from_test]
engines = recordings_from_test.groupby(['engine_no'])
life_in_test = engines.apply(lambda df: df.shape[0]) - y_test
medianpredict2 = median_life - life_in_test
medianpredict2 = medianpredict2.apply(lambda row: max(row, 0))
mae = mean_absolute_error(medianpredict2, y_test)
print('Baseline by median life: Mean Abs Error = {:.2f}'.format(mae))
# -
# # Step 3: Using the Model
# Now, we can use our created features to fit a `RandomForestRegressor` to our data and see if we can improve on the previous scores.
# +
reg = RandomForestRegressor(n_estimators=100)
reg.fit(X_train, y_train)
preds = reg.predict(X_test)
scores = mean_absolute_error(preds, y_test)
print('Mean Abs Error: {:.2f}'.format(scores))
high_imp_feats = utils.feature_importances(X, reg, feats=10)
# -
# ## Step 4: Build the model automatically
# Up to now, we have been training a single Random Forest model. However, there are many different model types that could be useful. Common ones include Catboost, LightGBM, etc. Using [EvalML](https://evalml.alteryx.com/en/stable/), an open source autoML library created by Alteryx, we can automatically build and tune multiple models, as well as compare the results
#
# <p align="center">
# <img width=50% src="https://evalml-web-images.s3.amazonaws.com/evalml_horizontal.svg" alt="Featuretools" />
# </p>
#
# +
import evalml
from evalml import AutoMLSearch
automl = AutoMLSearch(X_train=X_train,
y_train=y_train,
problem_type="regression",
objective="mae",
max_batches=3,
max_iterations=20)
automl.search()
# -
pipeline = automl.best_pipeline
pipeline.fit(X_train, y_train)
pipeline.feature_importance[0:10]
# ## Step 5: Predicitions on test data
# Next, we can apply the exact same transformations (including DFS) to our test data. For this particular case, the real answer isn't in the data so we don't need to worry about cutoff times. Moving forward, we will just use evalML in this demo
# +
data2 = utils.load_data('data/test_FD004.txt')
es2 = make_entityset(data2)
fm2 = ft.calculate_feature_matrix(
entityset=es2,
features=features,
verbose=True,
)
fm2.head()
# +
X = fm2.copy().fillna(0)
y = pd.read_csv(
'data/RUL_FD004.txt',
sep=' ',
header=None,
names=['remaining_useful_life'],
index_col=False,
)
preds2 = pipeline.predict(X)
mae = mean_absolute_error(preds2, y)
print('Mean Abs Error: {:.2f}'.format(mae))
medianpredict1 = [np.median(y_train) for _ in preds2]
mae = mean_absolute_error(medianpredict1, y)
print('Baseline by median label: Mean Abs Error = {:.2f}'.format(mae))
engines = es2['recordings'].groupby(['engine_no'])
medianpredict2 = median_life - engines.apply(lambda df: df.shape[0])
medianpredict2 = medianpredict2.apply(lambda row: max(row, 0))
mae = mean_absolute_error(medianpredict2, y)
print('Baseline by median life: Mean Abs Error = {:.2f}'.format(mae))
# -
# # This is the simple version of a more advanced notebook that can be found in the [second](Advanced%20Featuretools%20RUL.ipynb) notebook. That notebook will show how to use a novel entityset structure, custom primitives, and automated hyperparameter tuning to improve the score.
# <p align="center">
# <img width=50% src="https://alteryx-open-source-images.s3.amazonaws.com/OpenSource_Logo-01.jpg" alt="ayx_os" />
# </p>
#
# Featuretools was created by the developers at [Alteryx](https://www.alteryx.com). If building impactful data science pipelines is important to you or your business, please [get in touch](https://www.alteryx.com/contact-us/).
| predict-remaining-useful-life/Simple Featuretools RUL Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# .. _tut_stats_cluster_sensor_2samp_tfr:
#
# # Non-parametric between conditions cluster statistic on single trial power
#
#
# This script shows how to compare clusters in time-frequency
# power estimates between conditions. It uses a non-parametric
# statistical procedure based on permutations and cluster
# level statistics.
#
# The procedure consists in:
#
# - extracting epochs for 2 conditions
# - compute single trial power estimates
# - baseline line correct the power estimates (power ratios)
# - compute stats to see if the power estimates are significantly different
# between conditions.
#
#
# +
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.time_frequency import single_trial_power
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
# -
# Set parameters
#
# +
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = raw.info['ch_names'][picks[0]]
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject)
data_condition_1 = epochs_condition_1.get_data() # as 3D matrix
data_condition_1 *= 1e13 # change unit to fT / cm
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject)
data_condition_2 = epochs_condition_2.get_data() # as 3D matrix
data_condition_2 *= 1e13 # change unit to fT / cm
# Take only one channel
data_condition_1 = data_condition_1[:, 97:98, :]
data_condition_2 = data_condition_2[:, 97:98, :]
# Time vector
times = 1e3 * epochs_condition_1.times # change unit to ms
# Factor to downsample the temporal dimension of the PSD computed by
# single_trial_power. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 2
frequencies = np.arange(7, 30, 3) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
n_cycles = 1.5
epochs_power_1 = single_trial_power(data_condition_1, sfreq=sfreq,
frequencies=frequencies,
n_cycles=n_cycles, decim=decim)
epochs_power_2 = single_trial_power(data_condition_2, sfreq=sfreq,
frequencies=frequencies,
n_cycles=n_cycles, decim=decim)
epochs_power_1 = epochs_power_1[:, 0, :, :] # only 1 channel to get 3D matrix
epochs_power_2 = epochs_power_2[:, 0, :, :] # only 1 channel to get 3D matrix
# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
baseline_mask = times[::decim] < 0
epochs_baseline_1 = np.mean(epochs_power_1[:, :, baseline_mask], axis=2)
epochs_power_1 /= epochs_baseline_1[..., np.newaxis]
epochs_baseline_2 = np.mean(epochs_power_2[:, :, baseline_mask], axis=2)
epochs_power_2 /= epochs_baseline_2[..., np.newaxis]
# -
# Compute statistic
#
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2],
n_permutations=100, threshold=threshold, tail=0)
# View time-frequency plots
#
# +
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
evoked_contrast = np.mean(data_condition_1, 0) - np.mean(data_condition_2, 0)
plt.plot(times, evoked_contrast.T)
plt.title('Contrast of evoked response (%s)' % ch_name)
plt.xlabel('time (ms)')
plt.ylabel('Magnetic Field (fT/cm)')
plt.xlim(times[0], times[-1])
plt.ylim(-100, 200)
plt.subplot(2, 1, 2)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
plt.show()
| 0.14/_downloads/plot_cluster_stats_time_frequency.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
## Module 1
# +
## Feature engineering
# Inspect the first few lines of your data using head()
credit.head(3)
# Create a label encoder for each column. Encode the values
for column in non_numeric_columns:
le = LabelEncoder()
credit[column] = le.fit_transform(credit[column])
# Inspect the data types of the columns of the data frame
print(credit.dtypes)
# -
# +
## Your first pipeline
# Split the data into train and test, with 20% as test
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=1)
# Create a random forest classifier, fixing the seed to 2
rf_model = RandomForestClassifier(random_state=2).fit(
X_train, y_train)
# Use it to predict the labels of the test data
rf_predictions = rf_model.predict(X_test)
# Assess the accuracy of both classifiers
accuracies['rf'] = accuracy_score(y_test, rf_predictions)
# -
# +
## Grid search CV for model complexity
# Set a range for n_estimators from 10 to 40 in steps of 10
param_grid = {'n_estimators': list(range(10, 50, 10))}
# Optimize for a RandomForestClassifier using GridSearchCV
grid = GridSearchCV(RandomForestClassifier(), param_grid, cv=3)
grid.fit(X, y)
grid.best_params_
# Define a grid for n_estimators ranging from 1 to 10
param_grid = {'n_estimators': list(range(1, 11, 1))}
# Optimize for a AdaBoostClassifier using GridSearchCV
grid = GridSearchCV(AdaBoostClassifier(), param_grid, cv=3)
grid.fit(X, y)
grid.best_params_
# Define a grid for n_neighbors with values 10, 50 and 100
param_grid = {'n_neighbors': [10,50,100]}
# Optimize for KNeighborsClassifier using GridSearchCV
grid = GridSearchCV(KNeighborsClassifier(), param_grid, cv=3)
grid.fit(X, y)
grid.best_params_
# -
# +
# Categorical encodings
# Create numeric encoding for credit_history
credit_history_num = LabelEncoder().fit_transform(
credit['credit_history'])
# Create a new feature matrix including the numeric encoding
X_num = pd.concat([X, pd.Series(credit_history_num)], 1)
# Create new feature matrix with dummies for credit_history
X_hot = pd.concat(
[X, pd.get_dummies(credit['credit_history'])], 1)
# Compare the number of features of the resulting DataFrames
X_hot.shape[1] > X_num.shape[1]
# -
# +
## Feature transformations
# Function computing absolute difference from column mean
def abs_diff(x):
return np.abs(x-np.mean(x))
# Apply it to the credit amount and store to new column
credit['credit_amount_diff'] = abs_diff(credit['credit_amount'])
# Score old and new versions of this feature with f_classif()
scores = f_classif(credit[[ 'credit_amount', 'credit_amount_diff']], credit['class'])[0]
# Inspect the scores and drop the lowest-scoring feature
credit_new = credit.drop(['credit_amount'], 1)
# -
# +
## Bringing it all together
# Find the best value for max_depth among values 2, 5 and 10
grid_search = GridSearchCV(RandomForestClassifier(random_state=1),
param_grid={'max_depth':[2,5,10]})
best_value = grid_search.fit(X_train,
y_train).best_params_['max_depth']
# Using the best value from above, fit a random forest
clf = RandomForestClassifier(random_state=1,
max_depth=best_value).fit(X_train, y_train)
# Apply SelectKBest with f_classif and pick top 100 features
vt = SelectKBest(f_classif, k=100).fit(X_train, y_train)
# Refit the classifier using best_depth on the reduced data
clf_vt = RandomForestClassifier(random_state=1,
max_depth=best_value).fit(vt.transform(X_train),
y_train)
# +
### MODULE 2
# +
## Is the source or the destination bad?
def featurize(df):
return {'unique_ports': len(set(df['destination_port'])),
'average_packet': np.mean(df['packet_count']),
'average_duration': np.mean(df['duration'])
}
# Group by source computer, and apply the feature extractor
out = flows.groupby('source_computer').apply(featurize)
# Convert the iterator to a dataframe by calling list on it
X = pd.DataFrame(list(out), index=out.index)
# Check which sources in X.index are bad to create labels
y = [x in bads for x in X.index]
# Report the average accuracy of Adaboost over 3-fold CV
print(np.mean(cross_val_score(AdaBoostClassifier(), X, y)))
# -
# +
## Feature engineering on grouped data
# Create a feature counting unique protocols per source
protocols = flows.groupby('source_computer').apply(
lambda df: len(set(df['protocol'])))
# Convert this feature into a dataframe, naming the column
protocols_DF = pd.DataFrame(
protocols, index=protocols.index, columns=['protocol'])
# Now concatenate this feature with the previous dataset, X
X_more = pd.concat([X, protocols_DF], axis=1)
# Refit the classifier and report its accuracy
print(np.mean(cross_val_score(
AdaBoostClassifier(), X_more, y)))
# -
# +
## Turning a heuristic into a classifier
# Create a new dataset X_train_bad by subselecting bad hosts
X_train_bad = X_train[y_train]
# Calculate the average of unique_ports in bad examples
avg_bad_ports = np.mean(X_train_bad['unique_ports'])
# Label as positive sources that use more ports than that
pred_port = X_test['unique_ports'] > avg_bad_ports
# Print the accuracy of the heuristic
print(accuracy_score(y_test, pred_port))
# -
# +
### Combining heuristics
# Compute the mean of average_packet for bad sources
avg_bad_packet = np.mean(X_train[y_train]['average_packet'])
# Label as positive if average_packet is lower than that
pred_packet = X_test['average_packet'] < avg_bad_packet
# Find indices where pred_port and pred_packet both True
pred_port = X_test['unique_ports'] > avg_bad_ports
pred_both = pred_packet * pred_port
# Ports only produced an accuracy of 0.919. Is this better?
print(accuracy_score(y_test, pred_both))
# -
# +
## Dealing with label noise
# Fit a Gaussian Naive Bayes classifier to the training data
clf = GaussianNB().fit(X_train, y_train_noisy)
# Report its accuracy on the test data
print(accuracy_score(y_test, clf.predict(X_test)))
# Assign half the weight to the first 100 noisy examples
weights = [0.5]*100 + [1.0]*(len(y_train_noisy)-100)
# Refit using weights and report accuracy. Has it improved?
clf_weights = GaussianNB().fit(X_train, y_train_noisy, sample_weight=weights)
print(accuracy_score(y_test, clf_weights.predict(X_test)))
# -
# +
# Real-world cost analysis
# Fit a random forest classifier to the training data
clf = RandomForestClassifier(random_state=2).fit(X_train, y_train)
# Label the test data
preds = clf.predict(X_test)
# Get false positives/negatives from the confusion matrix
tp, fp, fn, tn = confusion_matrix(y_test, preds).ravel()
# Now compute the cost using the manager's advice
cost = fp*10 + fn*150
# -
# +
## Default thresholding
# Score the test data using the given classifier
scores = clf.predict_proba(X_test)
# Get labels from the scores using the default threshold
preds = [s[1] > 0.5 for s in scores]
# Use the predict method to label the test data again
preds_default = clf.predict(X_test)
# Compare the two sets of predictions
all(preds == preds_default)
# -
# +
## Optimizing the threshold
# Create a range of equally spaced threshold values
t_range = [0.0, 0.25, 0.5, 0.75, 1.0]
# Store the predicted labels for each value of the threshold
preds = [[s[1] > thr for s in scores] for thr in t_range]
# Compute the accuracy for each threshold
accuracies = [accuracy_score(y_test, p) for p in preds]
# Compute the F1 score for each threshold
f1_scores = [f1_score(y_test, p) for p in preds]
# Report the optimal threshold for accuracy, and for F1
print(t_range[argmax(accuracies)], t_range[argmax(f1_scores)])
# -
# +
## Bringing it all together
# Create a scorer assigning more cost to false positives
def my_scorer(y_test, y_est, cost_fp=10.0, cost_fn=1.0):
tn, fp, fn, tp = confusion_matrix(y_test, y_est).ravel()
return cost_fp*fp + cost_fn*fn
# fit a Decision Tree to the data and compute the loss
clf = DecisionTreeClassifier(random_state=2).fit(X_train, y_train)
print(my_scorer(y_test, clf.predict(X_test)))
# Refit, downweighting subjects whose weight is above 80
weights = [0.5 if w > 80 else 1.0 for w in X_train.weight]
clf_weighted = DecisionTreeClassifier().fit(
X_train, y_train, sample_weight=weights)
print(my_scorer(y_test, clf_weighted.predict(X_test)))
# -
# +
## Module 3
# +
## Your first pipeline - again!
# Create pipeline with feature selector and classifier
pipe = Pipeline([
('feature_selection', SelectKBest(f_classif)),
('clf', RandomForestClassifier(random_state=2))])
# Create a parameter grid
params = dict(
feature_selection__k=[10, 20],
clf__n_estimators=[2, 5])
# Initialize the grid search object
grid_search = GridSearchCV(pipe, param_grid=params)
# Fit it to the data and print the best value combination
print(grid_search.fit(X_train, y_train).best_params_)
# -
# +
# Custom scorers in pipelines
## 1
# Create a custom scorer
scorer = make_scorer(roc_auc_score)
# Initialize the CV object
gs = GridSearchCV(pipe, param_grid=params, scoring=scorer)
# Fit it to the data and print the winning combination
print(gs.fit(X_train, y_train).best_params_)
## 2
# Create a custom scorer
scorer = make_scorer(f1_score)
# Initialise the CV object
gs = GridSearchCV(pipe, param_grid=params, scoring=scorer)
# Fit it to the data and print the winning combination
print(gs.fit(X_train, y_train).best_params_)
## 3
# Create a custom scorer
scorer = make_scorer(my_metric)
# Initialise the CV object
gs = GridSearchCV(pipe, param_grid=params, scoring=scorer)
# Fit it to the data and print the winning combination
print(gs.fit(X_train, y_train).best_params_)
# +
## Pickles
# Fit a random forest to the training set
clf = RandomForestClassifier(random_state=42).fit(
X_train, y_train)
# Save it to a file, to be pushed to production
with open('model.pkl', 'wb') as file:
pickle.dump(clf, file=file)
# Now load the model from file in the production environment
with open('model.pkl', 'rb') as file:
clf_from_file = pickle.load(file)
# Predict the labels of the test dataset
preds = clf_from_file.predict(X_test)
# -
# +
## Custom function transformers in pipelines
# Define a feature extractor to flag very large values
def more_than_average(X, multiplier=1.0):
Z = X.copy()
Z[:,1] = Z[:,1] > multiplier*np.mean(Z[:,1])
return Z
# Convert your function so that it can be used in a pipeline
pipe = Pipeline([
('ft', FunctionTransformer(more_than_average)),
('clf', RandomForestClassifier(random_state=2))])
# Optimize the parameter multiplier using GridSearchCV
params = dict(ft__multiplier = [1,2,3])
grid_search = GridSearchCV(pipe, param_grid=params)
# -
# +
## Challenge the champion
# Load the current model from disk
champion = pickle.load(open('model.pkl', 'rb'))
# Fit a Gaussian Naive Bayes to the training data
challenger = GaussianNB().fit(X_train, y_train)
# Print the F1 test scores of both champion and challenger
print(f1_score(y_test, challenger.predict(X_test)))
print(f1_score(y_test, champion.predict(X_test)))
# Write back to disk the best-performing model
with open('model.pkl', 'wb') as file:
pickle.dump(champion, file=file)
# -
# +
## Cross-validation statistics
# Fit your pipeline using GridSearchCV with three folds
grid_search = GridSearchCV(
pipe, params, cv=3, return_train_score=True)
# Fit the grid search
gs = grid_search.fit(X_train, y_train)
# Store the results of CV into a pandas dataframe
results = pd.DataFrame(gs.cv_results_)
# Print the difference between mean test and training scores
print(
results['mean_test_score']-results['mean_train_score'])
# +
grid_search = GridSearchCV(pipe, params, cv=3, return_train_score=True)
gs = grid_search.fit(X_train, y_train)
results = pd.DataFrame(gs.cv_results_)
results[['mean_train_score', 'std_train_score',
'mean_test_score',' std_test_score']]
# -
# +
## Tuning the window size
# Loop over window sizes
for w_size in wrange:
# Define sliding window
sliding = arrh.loc[(t_now - w_size + 1):t_now]
# Extract X and y from the sliding window
X, y = sliding.drop('class', axis=1), sliding['class']
# Fit the classifier and store the F1 score
preds = GaussianNB().fit(X, y).predict(X_test)
accuracies.append(f1_score(y_test, preds))
# Estimate the best performing window size
optimal_window = wrange[np.argmax(accuracies)]
# -
# +
## Bringing it all together
# Create a pipeline
pipe = Pipeline([
('ft', SelectKBest()), ('clf', RandomForestClassifier(random_state=2))])
# Create a parameter grid
grid = {'ft__k':[5, 10], 'clf__max_depth':[10, 20]}
# Execute grid search CV on a dataset containing under 50s
grid_search = GridSearchCV(pipe, param_grid=grid)
arrh = arrh.iloc[np.where(arrh['age'] < 50)]
grid_search.fit(arrh.drop('class', 1), arrh['class'])
# Push the fitted pipeline to production
with open('pipe.pkl', 'wb') as file:
pickle.dump(grid_search, file)
# -
# +
### Module 4
# +
## A simple outlier
# Import the LocalOutlierFactor module
from sklearn.neighbors import LocalOutlierFactor as lof
# Create the list [1.0, 1.0, ..., 1.0, 10.0] as explained
x = [1.0]*30
x.append(10.0)
# Cast to a data frame
X = pd.DataFrame(x)
# Fit the local outlier factor and print the outlier scores
print(lof().fit_predict(X))
# -
# +
## LoF contamination
# Fit the local outlier factor and output predictions
preds = lof().fit_predict(X)
# Print the confusion matrix
print(confusion_matrix(ground_truth, preds))
# Set the contamination parameter to 0.2
preds = lof(contamination=0.2).fit_predict(X)
# Print the confusion matrix
print(confusion_matrix(ground_truth, preds))
# +
# Contamination to match outlier frequency in ground_truth
preds = lof(
contamination=np.mean(ground_truth==-1.0)).fit_predict(X)
# Print the confusion matrix
print(confusion_matrix(ground_truth, preds))
# +
## Novelty algortihmns
clf_lof = LocalOutlierFactor(novelty=True).fit(X_train)
clf_isf = IsolationForest().fit(X_train)
clf_svm = OneClassSVM().fit(X_train)
# +
## A simple novelty
# Create a list of thirty 1s and cast to a dataframe
X = pd.DataFrame([1.0]*30)
# Create an instance of a lof novelty detector
detector = lof(novelty=True)
# Fit the detector to the data
detector.fit(X)
# Use it to predict the label of an example with value 10.0
print(detector.predict(pd.DataFrame([10.0])))
# -
# +
## Three novelty detectors
# Import the novelty detector
from sklearn.svm import OneClassSVM as onesvm
# Fit it to the training data and score the test data
svm_detector = onesvm().fit(X_train)
scores = svm_detector.score_samples(X_test)
########################################################
# Import the novelty detector
from sklearn.svm import OneClassSVM as onesvm
from sklearn.ensemble import IsolationForest as isof
# Fit it to the training data and score the test data
isof_detector = isof().fit(X_train)
scores = isof_detector.score_samples(X_test)
########################################################
# Import the novelty detector
from sklearn.neighbors import LocalOutlierFactor as lof
# Fit it to the training data and score the test data
lof_detector = lof(novelty=True).fit(X_train)
scores = lof_detector.score_samples(X_test)
# +
# Import the novelty detector
from sklearn.neighbors import LocalOutlierFactor as lof
# Fit it to the training data and score the test data
lof_detector = lof(novelty=True).fit(X_train)
scores = lof_detector.score_samples(X_test)
# -
# +
## Contamination revisited
# Fit a one-class SVM detector and score the test data
nov_det = onesvm().fit(X_train)
scores = nov_det.score_samples(X_test)
# Find the observed proportion of outliers in the test data
prop = np.mean(y_test==1)
# Compute the appropriate threshold
threshold = np.quantile(scores, prop)
# Print the confusion matrix for the thresholded scores
print(confusion_matrix(y_test, scores > threshold))
# -
# +
## Find the neighbor
# Import DistanceMetric as dm
from sklearn.neighbors import DistanceMetric as dm
# Find the Euclidean distance between all pairs
dist_eucl = dm.get_metric('euclidean').pairwise(features)
# Find the Hamming distance between all pairs
dist_hamm = dm.get_metric('hamming').pairwise(features)
# Find the Chebyshev distance between all pairs
dist_cheb = dm.get_metric('chebyshev').pairwise(features)
# -
# +
# Compute outliers according to the euclidean metric
out_eucl = lof(metric='euclidean').fit_predict(features)
# Compute outliers according to the hamming metric
out_hamm = lof(metric='hamming').fit_predict(features)
# Compute outliers according to the jaccard metric
out_jacc = lof(metric='jaccard').fit_predict(features)
# Find if the metrics agree on any one datapoint
print(any(out_eucl + out_hamm + out_jacc == -3))
# -
# +
## Restricted Levenshtein
# Wrap the RD-Levenshtein metric in a custom function
def my_rdlevenshtein(u, v):
return stringdist.rdlevenshtein(u[0], v[0])
# Reshape the array into a numpy matrix
sequences = np.array(proteins['seq']).reshape(-1, 1)
# Compute the pairwise distance matrix in square form
M = squareform(pdist(sequences, my_rdlevenshtein))
# Run a LoF algorithm on the precomputed distance matrix
preds = lof(metric='precomputed').fit_predict(M)
# Compute the accuracy of the outlier predictions
print(accuracy(proteins['label'] == 'VIRUS', preds==-1))
# -
# +
## Bringing it all together
# Create a feature that contains the length of the string
proteins['len'] = proteins['seq'].apply(lambda s: len(s))
# Create a feature encoding the first letter of the string
proteins['first'] = LabelEncoder().fit_transform(
proteins['seq'].apply(lambda s: list(s)[0]))
# Extract scores from the fitted LoF object, compute its AUC
scores_lof = lof_detector.negative_outlier_factor_
print(auc(proteins['label']=='IMMUNE SYSTEM', scores_lof))
# Fit a 1-class SVM, extract its scores, and compute its AUC
svm = OneClassSVM().fit(proteins[['len', 'first']])
scores_svm = svm.score_samples(proteins[['len', 'first']])
print(auc(proteins['label']=='IMMUNE SYSTEM', scores_svm))
# -
| Designing Machine Learning Workflows in Python/Designing Machine Learning Workflows in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup as bs
import requests
import re
url = 'https://sofifa.com/players?offset=0'
def soup_maker(url):
r = requests.get(url)
markup = r.content
soup = bs(markup, 'lxml')
return soup
# # Scratch Work
soup = soup_maker(url)
scraped_data = []
tbl = soup.find('table',{'class':'table table-hover persist-area'})
tbdy = tbl.find('tbody')
all_a = tbdy.find_all('a', {'rel':None})
player_details = {}
# players on every other index
for i, lnk in enumerate(all_a):
if i % 2 == 0:
player_details['short_name'] = lnk.text
#player_details.update(get_player_details('http://sofifa.com' + lnk['href']))
player_details['link'] = 'http://sofifa.com' + lnk['href']
print(player_details)
scraped_data.append(player_details)
# +
all_deets = {}
soup = soup_maker('http://sofifa.com/player/20801/c-ronaldo-dos-santos-aveiro/')
plyr_info = soup.find('div', {'class':'player'})
# find_plyr_info
plyr_dat = {} # prepare dictionary for player data
ply_inf = plyr_info.find('div', {'class':'meta'}) # grab section with player data
infos = ply_inf.text
data_str = infos[infos.index('Age') + 4:] # grab all data in str to right of Age like height and weight
# set variables
plyr_dat['pref_pos'] = ply_inf.find('span').text
plyr_dat['full_name'] = ply_inf.text[0:ply_inf.text.find(plyr_dat['pref_pos'])-2].strip()
plyr_dat['age'] = int(data_str[:2])
plyr_dat['height'] = data_str[data_str.index(')') + 2:].split(' ')[0].replace('\"','')
plyr_dat['weight'] = data_str[data_str.index(')') + 2:].split(' ')[1]
print(plyr_dat)
# +
plyr_data = {}
soup = soup_maker('http://sofifa.com/player/20801/c-ronaldo-dos-santos-aveiro/')
plyr_stats = soup.find('div', {'class':'stats'})
plyr_val = plyr_stats.text[plyr_stats.text.find('€'):].split('\n')[0]
info = re.findall('\d+', plyr_stats.text)
plyr_data['rating'] = int(info[0])
plyr_data['potential'] = int(info[1])
if 'M' in plyr_val:
plyr_data['value'] = int(plyr_val[1:plyr_val.index('M')])*1000000
elif 'K' in plyr_val:
plyr_data['value'] = int(plyr_val[1:plyr_val.index('K')])*1000
plyr_data['wage'] = int(info[3])*1000
print(plyr_data)
# -
soup = soup_maker('http://sofifa.com/player/20801/c-ronaldo-dos-santos-aveiro/')
sp = soup.find('div', {'class':'teams'})
lnks = sp.find_all('li')
club = lnks[8].find('a').text
joined = lnks[12].text
print(club)
print(lnks[12].find('label', text='Joined').parent.contents[2])
# # Ok, now let's turn this work into functions
def gather_basic_info(soup):
plyr_dat = {} # prepare dictionary for player data
ply_inf = soup.find('div', {'class':'meta'}) # grab section with player data
infos = ply_inf.text
if 'Age' not in infos:
return(plyr_dat)
data_str = infos[infos.index('Age') + 4:] # grab all data in str to right of Age like height and weight
# set variables
plyr_dat['pref_pos'] = ply_inf.find('span').text
plyr_dat['full_name'] = ply_inf.text[0:ply_inf.text.find(plyr_dat['pref_pos'])-2].strip()
plyr_dat['age'] = int(data_str[:2])
plyr_dat['height'] = data_str[data_str.index(')') + 2:].split(' ')[0].replace('\"','')
plyr_dat['weight'] = data_str[data_str.index(')') + 2:].split(' ')[1]
return(plyr_dat)
def gather_club_info(soup):
# gets club and join data info
plyr_data = {}
lnks = soup.find_all('li')
# the link list isn't the same for each player
if lnks[8].find('a'):
plyr_data['Club'] = lnks[8].find('a').text
else:
plyr_data['Club'] = lnks[7].find('a').text
# assign data
if len(lnks) < 13:
plyr_data['Joined'] = None
return(plyr_data)
if lnks[12].find('label', text='Joined'):
plyr_data['Joined'] = lnks[12].find('label', text='Joined').parent.contents[2]
else:
plyr_data['Joined'] = None
return(plyr_data)
def gather_player_stats(soup):
# parse stats section and determine rating, potential, value, and wage
plyr_data = {}
plyr_val = soup.text[soup.text.find('€'):].split('\n')[0]
info = re.findall('\d+', soup.text)
plyr_data['rating'] = int(info[0])
plyr_data['potential'] = int(info[1])
# check units of their value
if 'M' in plyr_val:
plyr_data['value'] = int(float(plyr_val[1:plyr_val.index('M')])*1000000)
elif 'K' in plyr_val:
plyr_data['value'] = int(float(plyr_val[1:plyr_val.index('K')])*1000)
# wage is always given in thousands
if len(info) > 4:
plyr_data['wage'] = int(info[4])*1000
else:
plyr_data['wage'] = int(info[3])*1000
return(plyr_data)
def get_player_details(soup):
# take in player url and build dictionary of player info
all_deets = {}
soup = soup_maker(soup)
# gather basic name, height, weight stats
plyr_info = soup.find('div', {'class': 'player'})
all_deets.update(gather_basic_info(plyr_info))
# gather rating, value, wage, etc
plyr_stats = soup.find('div', {'class': 'stats'})
all_deets.update(gather_player_stats(plyr_stats))
club_info = soup.find('div', {'class': 'teams'})
all_deets.update(gather_club_info(club_info))
return(all_deets)
# # Now let's create our dataframe
def scrape_sofifa():
# completes entire scraping process, can take minutes to run
scraped_data = []
page_indexes = [i*60 for i in range(100)]
# iterate across 100 different player pages
for page_index in page_indexes:
print('Scraping page {}...'.format(page_index))
url = 'https://sofifa.com/players?offset=' + str(page_index)
soup = soup_maker(url)
# gather all player details for this page
tbl = soup.find('table',{'class':'table table-hover persist-area'})
tbdy = tbl.find('tbody')
all_a = tbdy.find_all('a', {'rel':None})
# players are on every other index
for i, lnk in enumerate(all_a):
if i % 2 == 0:
player_details = {}
player_details['short_name'] = lnk.text
player_details['link'] = 'http://sofifa.com' + lnk['href']
player_details.update(get_player_details('http://sofifa.com' + lnk['href']))
scraped_data.append(player_details)
return(scraped_data)
import pandas as pd
scraped_data = scrape_sofifa()
df = pd.DataFrame(scraped_data)
df.head()
df.to_csv('scraped_sofifa.csv', index=False)
df
| sofifa_scrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simulação Estocástica: Distribuição Hipergeométrica
# <NAME>, <EMAIL>.<br>
# Universidade de São Paulo, São Carlos, Brasil.<br>
# https://sites.icmc.usp.br/francisco <br>
# Copyright: Creative Commons
# <hr>
# Considere um conjunto de N objetos, dos quais m são do tipo I e N-m são do tipo II. Para um sorteio de r objetos (r<N), feito ao acaso e sem reposição, defina: X: número de objetos selecionados do tipo I. <br>
# Então:
# $$
# P(X=k)=\frac{{{N_1}\choose{k}}{{N_2}\choose{r-k}}}{{{N}\choose{r}}}, \quad N = N_1 + N_2
# $$
# Para gerarmos dados com uma distribuição geométrica, podemos usar a biblioteca Numpy.
# +
import numpy as np
import matplotlib.pyplot as plt
N1 = 4 # number de elementos do tipo I
N2 = 21 # numero de elementos do tipo II
r = 5 # numero de objetos selecionados sem reposicao
n = 1000 # numero de pontos extraídos da distribuição hipergeométrica
X = np.random.hypergeometric(N2, N1, r, 1000)
k = np.arange(0, np.max(X))
print("Valor esperado Teórico:", N2*N1/(N1+N2))
print("Valor esperado pela simulação:", np.sum(X)/len(X))
count, bins, ignored = plt.hist(X, bins=k, density=True, color='#0504aa',alpha=0.7,
rwidth=0.9)
plt.xlabel('k', fontsize = 15)
plt.ylabel('P(k)',fontsize = 15)
plt.show(True)
# -
| distribuicao-hipergeometrica.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="JndnmDMp66FL"
# ##### Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + cellView="both" colab={} colab_type="code" id="hMqWDc_m6rUC"
#@title Default title text
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="Dbh3a1h0zmJ4"
# # Train your own Keyword Spotting Model.
# [Open in Google Colab](https://colab.research.google.com/github/google-research/google-research/blob/master/speech_embedding/speech_commands.ipynb)
#
# Before running any cells please enable GPUs for this notebook to speed it up.
#
# * *Edit* → *Notebook Settings*
# * select *GPU* from the *Hardware Accelerator* drop-down
#
#
# + cellView="form" colab={} colab_type="code" id="7ZVCzvqWuqFV"
#@title Imports
# %tensorflow_version 1.x
from __future__ import division
import collections
import IPython
import functools
import math
import matplotlib.pyplot as plt
import numpy as np
import io
import os
import tensorflow as tf
import tensorflow_hub as hub
import random
import scipy.io.wavfile
import tarfile
import time
import sys
from google.colab import output
from google.colab import widgets
from base64 import b64decode
# !pip install ffmpeg-python
import ffmpeg
# + cellView="form" colab={} colab_type="code" id="3TTWa8GN0KHo"
#@title Helper functions and classes
def normalized_read(filename):
"""Reads and normalizes a wavfile."""
_, data = scipy.io.wavfile.read(open(filename, mode='rb'))
samples_99_percentile = np.percentile(np.abs(data), 99.9)
normalized_samples = data / samples_99_percentile
normalized_samples = np.clip(normalized_samples, -1, 1)
return normalized_samples
class EmbeddingDataFileList(object):
"""Container that loads audio, stores it as embeddings and can
rebalance it."""
def __init__(self, filelist,
data_dest_dir,
targets=None,
label_max=10000,
negative_label="negative",
silence_label="silence",
negative_multiplier=25,
target_samples=32000,
progress_bar=None,
embedding_model=None):
"""Creates an instance of `EmbeddingDataFileList`."""
self._negative_label = negative_label
self._silence_label = silence_label
self._data_per_label = collections.defaultdict(list)
self._labelcounts = {}
self._label_list = targets
total_examples = sum([min(len(x), label_max) for x in filelist.values()])
total_examples -= min(len(filelist[negative_label]), label_max)
total_examples -= min(len(filelist[silence_label]), label_max)
total_examples += min(len(filelist[negative_label]), negative_multiplier * label_max)
total_examples += min(len(filelist[silence_label]), negative_multiplier * label_max)
print("loading %d examples" % total_examples)
example_count = 0
for label in filelist:
if label not in self._label_list:
raise ValueError("Unknown label:", label)
label_files = filelist[label]
random.shuffle(label_files)
if label == negative_label or label == silence_label:
multplier = negative_multiplier
else:
multplier = 1
for wav_file in label_files[:label_max * multplier]:
data = normalized_read(os.path.join(data_dest_dir, wav_file))
required_padding = target_samples - data.shape[0]
if required_padding > 0:
data = np.pad(data, (required_padding, required_padding), 'constant')
self._labelcounts[label] = self._labelcounts.get(label, 0) + 1
if embedding_model:
data = embedding_model.create_embedding(data)[0][0,:,:,:]
self._data_per_label[label].append(data)
if progress_bar is not None:
example_count += 1
progress_bar.update(progress(100 * example_count/total_examples))
@property
def labels(self):
return self._label_list
def get_label(self, idx):
return self.labels.index(idx)
def _get_filtered_data(self, label, filter_fn):
idx = self.labels.index(label)
return [(filter_fn(x), idx) for x in self._data_per_label[label]]
def _multply_data(self, data, factor):
samples = int((factor - math.floor(factor)) * len(data))
return int(factor) * data + random.sample(data, samples)
def full_rebalance(self, negatives, labeled):
"""Rebalances for a given ratio of labeled to negatives."""
negative_count = self._labelcounts[self._negative_label]
labeled_count = sum(self._labelcounts[key]
for key in self._labelcounts.keys()
if key not in [self._negative_label, self._silence_label])
labeled_multiply = labeled * negative_count / (negatives * labeled_count)
for label in self._data_per_label:
if label in [self._negative_label, self._silence_label]:
continue
self._data_per_label[label] = self._multply_data(
self._data_per_label[label], labeled_multiply)
self._labelcounts[label] = len(self._data_per_label[label])
def get_all_data_shuffled(self, filter_fn):
"""Returns a shuffled list containing all the data."""
return self.get_all_data(filter_fn, shuffled=True)
def get_all_data(self, filter_fn, shuffled=False):
"""Returns a list containing all the data."""
data = []
for label in self._data_per_label:
data += self._get_filtered_data(label, filter_fn)
if shuffled:
random.shuffle(data)
return data
def cut_middle_frame(embedding, num_frames, flatten):
"""Extrats the middle frames for an embedding."""
left_context = (embedding.shape[0] - num_frames) // 2
if flatten:
return embedding[left_context:left_context+num_frames].flatten()
else:
return embedding[left_context:left_context+num_frames]
def progress(value, maximum=100):
return IPython.display.HTML("""
<progress value='{value}' max='{max}' style='width: 80%'>{value}</progress>
""".format(value=value, max=maximum))
# + cellView="form" colab={} colab_type="code" id="uGjxofKb07bk"
#@title HeadTrainerClass and head model functions
def _fully_connected_model_fn(embeddings, num_labels):
"""Builds the head model and adds a fully connected output layer."""
net = tf.layers.flatten(embeddings)
logits = tf.compat.v1.layers.dense(net, num_labels, activation=None)
return logits
framework = tf.contrib.framework
layers = tf.contrib.layers
def _conv_head_model_fn(embeddings, num_labels, context):
"""Builds the head model and adds a fully connected output layer."""
activation_fn = tf.nn.elu
normalizer_fn = functools.partial(
layers.batch_norm, scale=True, is_training=True)
with framework.arg_scope([layers.conv2d], biases_initializer=None,
activation_fn=None, stride=1, padding="SAME"):
net = embeddings
net = layers.conv2d(net, 96, [3, 1])
net = normalizer_fn(net)
net = activation_fn(net)
net = layers.max_pool2d(net, [2, 1], stride=[2, 1], padding="VALID")
context //= 2
net = layers.conv2d(net, 96, [3, 1])
net = normalizer_fn(net)
net = activation_fn(net)
net = layers.max_pool2d(net, [context, net.shape[2]], padding="VALID")
net = tf.layers.flatten(net)
logits = layers.fully_connected(
net, num_labels, activation_fn=None)
return logits
class HeadTrainer(object):
"""A tensorflow classifier to quickly train and test on embeddings.
Only use this if you are training a very small model on a very limited amount
of data. If you expect the training to take any more than 15 - 20 min then use
something else.
"""
def __init__(self, model_fn, input_shape, num_targets,
head_learning_rate=0.001, batch_size=64):
"""Creates a `HeadTrainer`.
Args:
model_fn: function that builds the tensorflow model, defines its loss
and returns the tuple (predictions, loss, accuracy).
input_shape: describes the shape of the models input feature.
Does not include a the batch dimension.
num_targets: Target number of keywords.
"""
self._input_shape = input_shape
self._output_dim = num_targets
self._batch_size = batch_size
self._graph = tf.Graph()
with self._graph.as_default():
self._feature = tf.placeholder(tf.float32, shape=([None] + input_shape))
self._labels = tf.placeholder(tf.int64, shape=(None))
module_spec = hub.create_module_spec(
module_fn=self._get_headmodule_fn(model_fn, num_targets))
self._module = hub.Module(module_spec, trainable=True)
logits = self._module(self._feature)
self._predictions = tf.nn.softmax(logits)
self._loss, self._accuracy = self._get_loss(
logits, self._labels, self._predictions)
self._update_weights = tf.train.AdamOptimizer(
learning_rate=head_learning_rate).minimize(self._loss)
self._sess = tf.Session(graph=self._graph)
with self._sess.as_default():
with self._graph.as_default():
self._sess.run(tf.local_variables_initializer())
self._sess.run(tf.global_variables_initializer())
def _get_headmodule_fn(self, model_fn, num_targets):
"""Wraps the model_fn in a tf hub module."""
def module_fn():
embeddings = tf.placeholder(
tf.float32, shape=([None] + self._input_shape))
logit = model_fn(embeddings, num_targets)
hub.add_signature(name='default', inputs=embeddings, outputs=logit)
return module_fn
def _get_loss(self, logits, labels, predictions):
"""Defines the model's loss and accuracy."""
xentropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_mean(xentropy_loss)
accuracy = tf.contrib.metrics.accuracy(tf.argmax(predictions, 1), labels)
return loss, accuracy
def save_head_model(self, save_directory):
"""Saves the model."""
with self._graph.as_default():
self._module.export(save_directory, self._sess)
def _feature_transform(self, batch_features, batch_labels):
"""Transforms lists of features and labels into into model inputs."""
return np.stack(batch_features), np.stack(batch_labels)
def _batch_data(self, data, batch_size=None):
"""Splits the input data into batches."""
batch_features = []
batch_labels = []
batch_size = batch_size or len(data)
for feature, label in data:
if feature.shape != tuple(self._input_shape):
raise ValueError(
"Feature shape ({}) doesn't match model shape ({})".format(
feature.shape, self._input_shape))
if not 0 <= label < self._output_dim:
raise ValueError('Label value ({}) outside of target range'.format(
label))
batch_features.append(feature)
batch_labels.append(label)
if len(batch_features) == batch_size:
yield self._feature_transform(batch_features, batch_labels)
del batch_features[:]
del batch_labels[:]
if batch_features:
yield self._feature_transform(batch_features, batch_labels)
def epoch_train(self, data, epochs=1, batch_size=None):
"""Trains the model on the provided data.
Args:
data: List of tuples (feature, label) where feature is a np array of
shape `self._input_shape` and label an int less than self._output_dim.
epochs: Number of times this data should be trained on.
batch_size: Number of feature, label pairs per batch. Overwrites
`self._batch_size` when set.
Returns:
tuple of accuracy, loss;
accuracy: Average training accuracy.
loss: Loss of the final batch.
"""
batch_size = batch_size or self._batch_size
accuracy_list = []
for _ in range(epochs):
for features, labels in self._batch_data(data, batch_size):
loss, accuracy, _ = self._sess.run(
[self._loss, self._accuracy, self._update_weights],
feed_dict={self._feature: features, self._labels: labels})
accuracy_list.append(accuracy)
return (sum(accuracy_list))/len(accuracy_list), loss
def test(self, data, batch_size=None):
"""Evaluates the model on the provided data.
Args:
data: List of tuples (feature, label) where feature is a np array of
shape `self._input_shape` and label an int less than self._output_dim.
batch_size: Number of feature, label pairs per batch. Overwrites
`self._batch_size` when set.
Returns:
tuple of accuracy, loss;
accuracy: Average training accuracy.
loss: Loss of the final batch.
"""
batch_size = batch_size or self._batch_size
accuracy_list = []
for features, labels in self._batch_data(data, batch_size):
loss, accuracy = self._sess.run(
[self._loss, self._accuracy],
feed_dict={self._feature: features, self._labels: labels})
accuracy_list.append(accuracy)
return sum(accuracy_list)/len(accuracy_list), loss
def infer(self, example_feature):
"""Runs inference on example_feature."""
if example_feature.shape != tuple(self._input_shape):
raise ValueError(
"Feature shape ({}) doesn't match model shape ({})".format(
example_feature.shape, self._input_shape))
return self._sess.run(
self._predictions,
feed_dict={self._feature: np.expand_dims(example_feature, axis=0)})
# + cellView="form" colab={} colab_type="code" id="QLccWY0bIJs1"
#@title TfHubWrapper Class
class TfHubWrapper(object):
"""A loads a tf hub embedding model."""
def __init__(self, embedding_model_dir):
"""Creates a `SavedModelWraper`."""
self._graph = tf.Graph()
self._sess = tf.Session(graph=self._graph)
with self._graph.as_default():
with self._sess.as_default():
module_spec = hub.load_module_spec(embedding_model_dir)
embedding_module = hub.Module(module_spec)
self._samples = tf.placeholder(
tf.float32, shape=[1, None], name='audio_samples')
self._embedding = embedding_module(self._samples)
self._sess.run(tf.global_variables_initializer())
print("Embedding model loaded, embedding shape:", self._embedding.shape)
def create_embedding(self, samples):
samples = samples.reshape((1, -1))
output = self._sess.run(
[self._embedding],
feed_dict={self._samples: samples})
return output
# + [markdown] colab_type="text" id="dnttvMi9z8ed"
# ## Load the embedding model
#
# The following info messages can be ignored
#
# > *INFO:tensorflow:Saver not created because there are no variables in the graph to restore*
#
# Don't worry tf hub is restoring all the variables.
#
# You can test the model by having it produce an embedding on zeros:
#
#
# ```
# speech_embedding_model.create_embedding(np.zeros((1,66000)))
# ```
#
#
# + colab={} colab_type="code" id="CVBtPzmLz8ef"
embedding_model_url = "https://tfhub.dev/google/speech_embedding/1"
speech_embedding_model = TfHubWrapper(embedding_model_url)
# + [markdown] colab_type="text" id="R-x8ReAxH-GT"
# ## Get and load the test data
#
# The following cell are responsible for getting the data into the colab and creating the embeddings on top which the model is trained.
#
# To train a model on a different source of data, replace the next cell with one that copies in your data and change the file scanning cell to scan it correctly.
#
# Finally, ensure that global variable MODEL_LABELS is appropriatly set.
#
# File scanning is performed to create 2 lists of wav files:
# * A training file list containing all possible training files. (All files not in testing_list.txt or validation_list.txt)
# * An evaluation file list that we will use for testing (validation_list.txt)
#
#
# File lists are actually dictionaries with the following structure:
#
# ```
# {'keyword1': ['path/to/word1/example1.wav', path/to/word1/example2.wav'],
# 'keyword2': ['path/to/word2/example1.wav', path/to/word2/example2.wav'],
# ...
# 'negativ': ['path/to/negativ_example1.wav', path/to/negativ_example2.wav']}
# ```
#
# The subsequent cells assume that the file lists are stored in the variables: *all_eval_example_files* and *all_train_example_files*.
#
#
#
# + cellView="form" colab={} colab_type="code" id="aZqyHmyKxFPN"
#@title Download and extract the speech commands data set
data_source = "http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz"
data_dest_dir = "speech_commands_v0.02"
test_list = data_dest_dir + "/testing_list.txt"
valid_list = data_dest_dir + "/validation_list.txt"
TARGET_WORDS = 'yes,no,up,down,left,right,on,off,stop,go'
ALL_WORDS = 'backward,bed,bird,cat,dog,down,eight,five,follow,forward,four,go,' + 'happy,house,learn,left,marvin,nine,no,off,on,one,right,seven,sheila,six,stop,' + 'three,tree,two,up,visual,wow,yes,zero'
# Note: This example colab doesn't train the silence output.
MODEL_LABELS = ['negative', 'silence'] + TARGET_WORDS.split(',')
# !wget http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz
print("extracting tar archive.. this may take a few minutes.")
if not os.path.exists(data_dest_dir):
os.makedirs(data_dest_dir)
#tarfile.open("speech_commands_v0.02.tar.gz", 'r:gz').extractall(data_dest_dir)
file_count = 0
progress_bar = IPython.display.display(progress(0, 100), display_id=True)
with tarfile.open("speech_commands_v0.02.tar.gz", 'r:gz') as speech_commands_tar:
for member_info in speech_commands_tar.getmembers():
if file_count % 100 == 0:
progress_bar.update(progress(100 * file_count/105800))
speech_commands_tar.extract(member_info, data_dest_dir)
file_count+=1
# + cellView="form" colab={} colab_type="code" id="C4eL0zlLYqDL"
#@title Optional: add background silence data.
#@markdown Run this tab if you want to reduce the number of false dectetions
#@markdown when no speech is present.
#@markdown It shouldn't affect the eval acuracy but may increase the training
#@markdown the data loading and training time.
background_dir = os.path.join(data_dest_dir, "_background_noise_")
silence_dir = os.path.join(data_dest_dir, "silence")
if not os.path.exists(silence_dir):
os.makedirs(silence_dir)
progress_bar = IPython.display.display(progress(0, 100), display_id=True)
noises = ["doing_the_dishes", "exercise_bike", "white_noise", "dude_miaowing",
"pink_noise", "running_tap"]
overlap = 8000
window_size = 32000
scales = [0.001, 0.0031, 0.01, 0.031, 0.05, 0.1, 0.31, 0.5, 0.8, 1]
total_silence = len(scales) * 6 * 65 * 16000 / 8000
silence_count = 0
for scale in scales:
for noise in noises:
noise_file = os.path.join(background_dir, noise + ".wav")
noise_samples = normalized_read(noise_file)
position = 0
while position + window_size <= noise_samples.shape[0]:
windowed_samples = noise_samples[position:position+window_size] * scale
file_name = "%s_%s_%s.wav" % (noise, position, scale)
output_path = os.path.join(silence_dir, file_name)
scipy.io.wavfile.write(output_path, 16000, windowed_samples)
position += overlap
silence_count += 1
progress_bar.update(progress(100 * silence_count/total_silence))
# + cellView="form" colab={} colab_type="code" id="6t9I1VXFDyfG"
#@title Scan files
progress_bar = IPython.display.display(progress(0, 100), display_id=True)
print("loading filelists from: %s " % data_dest_dir)
def get_train_test_valid_split(word):
word_dir = os.path.join(data_dest_dir, word)
all_word_files = [os.path.join(word, f) for f in os.listdir(word_dir) if os.path.isfile(os.path.join(word_dir, f))]
word_train_files = [f for f in all_word_files if f not in test_files and f not in valid_files]
word_test_files = [f for f in all_word_files if f in test_files]
word_valid_files = [f for f in all_word_files if f in valid_files]
random.shuffle(word_train_files)
random.shuffle(word_test_files)
random.shuffle(word_valid_files)
return word_train_files, word_test_files, word_valid_files
test_files = [line.rstrip() for line in open(test_list, encoding="ISO-8859-1")]
valid_files = [line.rstrip() for line in open(valid_list, encoding="ISO-8859-1")]
all_train_example_files = collections.defaultdict(list)
all_eval_example_files = collections.defaultdict(list)
silence_dir = os.path.join(data_dest_dir, "silence")
if os.path.exists(silence_dir):
all_word_list = ALL_WORDS.split(',') + ["silence"]
else:
all_word_list = ALL_WORDS.split(',')
word_count = 0
for word in all_word_list:
if word in MODEL_LABELS:
label = word
else:
label = "negative"
train_files, eval_files, _ = get_train_test_valid_split(word)
all_train_example_files[label].extend(train_files)
all_eval_example_files[label].extend(eval_files)
if progress is not None:
word_count += 1
progress_bar.update(progress(100 * word_count/len(all_word_list)))
# + [markdown] colab_type="text" id="c9Ry4fjoBfsD"
# In the following cells the wav files from both evaluation and training sets are:
# * Opened and decoded.
# * Loudness normalized.
# * Passed through the embedding model to create embeddings.
# * Added to a data structure that let's us change the balance between negative, silence (if present) and labeled outputs.
#
#
# resulting in two objects: *eval_data* and *train_data*.
#
#
# The two parameters to consider here are:
# * **examples_per_word**: The number examples for each target word that should be loaded. A higher number for the training data will lead to a better model, but it will also take longer to load/train. A good starting point is 40. Small numbers for the eval data may result in easy / hard eval subsets that could give an incorrect impression of the model quality.
# * **negatives_multiplier**: How many more non target examples should be loaded. This is set to 25 by default as the speech commands dataset maps 25 words to negative. Also applies to silence examples.
# + cellView="form" colab={} colab_type="code" id="A2y7Bmj3Wt7j"
#@title Load evaluation set wav.
#@markdown Set examples_per_word to > 500 and negatives_multiplier to 25
#@markdown to ensure you load the whole eval set.
examples_per_word = 50#@param {type:"integer"}
negatives_multiplier = 25#@param {type:"integer"}
progress_bar = IPython.display.display(progress(0, 100), display_id=True)
print("loading eval data")
eval_data = EmbeddingDataFileList(
all_eval_example_files, data_dest_dir, label_max=examples_per_word,
negative_multiplier=negatives_multiplier,
targets=MODEL_LABELS, embedding_model=speech_embedding_model,
progress_bar=progress_bar)
# + cellView="form" colab={} colab_type="code" id="QO0IJqsh07i4"
#@title Load random speech commands wav files for training.
#@markdown Set examples_per_word to > 4000 and negatives_multiplier to 25
#@markdown to ensure you load the whole training set.
examples_per_word = 50#@param {type:"integer"}
negatives_multiplier = 25#@param {type:"integer"}
progress_bar = IPython.display.display(progress(0, 100), display_id=True)
print("loading train data")
train_data = EmbeddingDataFileList(
all_train_example_files, data_dest_dir, label_max=examples_per_word,
negative_multiplier=negatives_multiplier,
targets=MODEL_LABELS, embedding_model=speech_embedding_model,
progress_bar=progress_bar)
# + [markdown] colab_type="text" id="ipFhC0H14Q9v"
# ## Train and Evaluate a Head Model
#
#
# + cellView="form" colab={} colab_type="code" id="iUhwy_1bCg3P"
#@title Rebalance and filter data.
#@markdown **Labeled_weight** and **negatives_weight** are used to control the ratio of labeled data
#@markdown and negative data shown to the model during training and evaluation.
#@markdown For every *labeled_weight* keyword examples the model is trained on,
#@markdown it is also trained on *negatives_weight* non keyword examples.
#@markdown During rebalancing examples are duplicated to ensure that this ratio holds.
labeled_weight = 8 #@param {type:"slider", min:1, max:25, step:1}
negatives_weight = 1 #@param {type:"slider", min:1, max:25, step:1}
#@markdown We assume that the keyphrase is spoken in roughly the middle
#@markdown of the loaded audio clips. With **context_size** we can choose the
#@markdown number of embeddings around the middle to use as a model input.
context_size = 16 #@param {type:"slider", min:1, max:28, step:1}
filter_fn = functools.partial(cut_middle_frame, num_frames=context_size, flatten=False)
eval_data.full_rebalance(negatives=negatives_weight, labeled=labeled_weight)
all_eval_data = eval_data.get_all_data_shuffled(filter_fn=filter_fn)
train_data.full_rebalance(negatives=negatives_weight, labeled=labeled_weight)
all_train_data = train_data.get_all_data_shuffled(filter_fn=filter_fn)
# + cellView="form" colab={} colab_type="code" id="bqtJk_XN9Tcb"
#@title Run training and evaluation
head_model = "Convolutional" #@param ["Convolutional", "Fully_Connected"] {type:"string"}
#@markdown Suggested **learning_rate** range 0.00001 - 0.01.
learning_rate = 0.001 #@param {type:"number"}
batch_size = 32
#@markdown **epochs_per_eval** and **train_eval_loops** control how long the
#@markdown the model is trained. An epoch is defined as the model having seen
#@markdown each example at least once, with some examples twice to ensure the
#@markdown correct labeled / negatives balance.
epochs_per_eval = 1 #@param {type:"slider", min:1, max:15, step:1}
train_eval_loops = 15 #@param {type:"slider", min:5, max:80, step:5}
if head_model == "Convolutional":
model_fn = functools.partial(_conv_head_model_fn, context=context_size)
else:
model_fn = _fully_connected_model_fn
trainer = HeadTrainer(model_fn=model_fn,
input_shape=[context_size,1,96],
num_targets=len(MODEL_LABELS),
head_learning_rate=learning_rate,
batch_size=batch_size)
data_trained_on = 0
data = []
train_results = []
eval_results = []
max_data = len(all_train_data) * epochs_per_eval * train_eval_loops + 10
def plot_step(plot, max_data, data, train_results, eval_results):
plot.clf()
plot.xlim(0, max_data)
plot.ylim(0.85, 1.05)
plot.plot(data, train_results, "bo")
plot.plot(data, train_results, "b", label="train_results")
if eval_results:
plot.plot(data, eval_results, "ro")
plot.plot(data, eval_results, "r", label="eval_results")
plot.legend(loc='lower right', fontsize=24)
plot.xlabel('number of examples trained on', fontsize=22)
plot.ylabel('Accuracy', fontsize=22)
plot.xticks(fontsize=20)
plot.yticks(fontsize=20)
plt.figure(figsize=(25, 7))
for loop in range(train_eval_loops):
train_accuracy, loss = trainer.epoch_train(all_train_data,
epochs=epochs_per_eval)
train_results.append(train_accuracy)
if all_eval_data:
eval_accuracy, loss = trainer.test(all_eval_data)
eval_results.append(eval_accuracy)
else:
eval_results = None
data_trained_on += len(all_train_data) * epochs_per_eval
data.append(data_trained_on)
plot_step(plt, max_data, data, train_results, eval_results)
IPython.display.display(plt.gcf())
if all_eval_data:
print("Highest eval accuracy: %.2f percent." % (100 * max(eval_results)))
IPython.display.clear_output(wait=True)
if all_eval_data:
print("Highest eval accuracy: %.2f percent." % (100 * max(eval_results)))
# + [markdown] colab_type="text" id="fxa9wsJKeiv9"
# # Export and reuse the head model
# The following cells show how the head model can be exported and reused in a graph
# + cellView="form" colab={} colab_type="code" id="s2GxL706F-BD"
#@title Save the head model
head_model_module_dir = "head_model_module_fc_all_data"
trainer.save_head_model(head_model_module_dir)
# + cellView="form" colab={} colab_type="code" id="r6PfTOh7HIt1"
#@title FullModelWrapper - Example Class
class FullModelWrapper(object):
"""A loads a save model classifier."""
def __init__(self, embedding_model_dir, head_model_dir):
self._graph = tf.Graph()
self._sess = tf.Session(graph=self._graph)
with self._graph.as_default():
self._samples = tf.placeholder(
tf.float32, shape=[1, None], name='audio_samples')
module_spec = hub.create_module_spec(
module_fn=self._get_module_fn(embedding_model_dir, head_model_dir))
self._module = hub.Module(module_spec, trainable=True)
self._predictions = self._module(self._samples)
with self._sess.as_default():
self._sess.run(tf.global_variables_initializer())
def _get_module_fn(self, embedding_model_dir, head_model_module_dir):
"""Wraps the model_fn in a tf hub module."""
def module_fn():
samples = tf.placeholder(
tf.float32, shape=[1, None], name='audio_samples')
embedding_module_spec = hub.load_module_spec(embedding_model_dir)
embedding_module = hub.Module(embedding_module_spec)
head_module_spec = hub.load_module_spec(head_model_module_dir)
emb = embedding_module(samples)
head_module = hub.Module(head_module_spec)
logits = head_module(emb)
predictions = tf.nn.softmax(logits)
hub.add_signature(name='default', inputs=samples, outputs=predictions)
return module_fn
def save_head_model(self, save_directory):
"""Saves the model."""
with self._graph.as_default():
self._module.export(save_directory, self._sess)
def infer(self, samples):
samples = samples.reshape((1, -1))
output = self._sess.run(
[self._predictions],
feed_dict={self._samples: samples})
return output
# + cellView="form" colab={} colab_type="code" id="gS9gCV8SKIfe"
#@title Test the full model on zeros
full_model = FullModelWrapper(embedding_model_url, head_model_module_dir)
full_model.infer(np.zeros((1,32000)))
| speech_embedding/speech_commands.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import unittest
import pandas as pd
from colorama import Fore, Back, Style
from datetime import datetime
# -
# <h1>Import Code to be Tested</h1>
# +
import devanalyst.simulation.statics as S_
import devanalyst.simulation.businessObjects as bo
from devanalyst.simulation.businessObjects import UserStory, UserStoriesRepo, Ticket, TicketsRepo, WorkItem, \
UserStoryStatus, Backlog, ScrumTeam, ScrumTeamsRepo, ReleaseCycleContext, GlobalRepo
from devanalyst.simulation.simulationModels import ModelsConfig, DefaultCostModel, GreedyAllocationModel, \
DistributedLagQualityModel, MeritocraticCostModel, NoLaggardsAllocationModel
import devanalyst.simulation.generateTimecards as timecard
from devanalyst.simulation.generateTimecards import IdCounter, WorkAssignments, ReleaseLog
# -
import devanalyst.test_utils.test_utils as tu_
from devanalyst.test_utils.test_utils import ExpectedOutputCleaner
# <h1>test_uss</h1>
# <p>Test User Story Status</p>
# + code_folding=[0, 3, 12, 18, 26, 36]
# Implement test logic, and run
# Helper methods to the test
def format_ticket(ticket):
return ('Ticket:' + ticket.ticketId \
+ ',\n\t\t storyId=' + ticket.userStoryId \
+ ',\n\t\t costToFix=' + str(ticket.estimatedCost) \
+ ',\n\t\t sprintReported=' + str(ticket.sprintReported) \
+ ',\n\t\t sprintFixed=' + str(ticket.sprintFixed) \
+ ',\n\t\t effortToDate=' + str(ticket.effortToDate) \
+ ',\n\t\t percentAchieved=' + str(ticket.percentAchieved))
def format_tickets(tickets):
output = ''
for ticket in tickets:
output = output + '\n\t\t{' + format_ticket(ticket) + '}'
return output
def format_uss(uss, globalRepo):
openTickets = globalRepo.ticketsRepo.getOpenTickets(uss.userStoryId)
return ('\n *** USS:' + uss.userStoryId \
+ '\n\t achieved=' + str(uss.percentAchieved) \
+ ',\n\t planned=' + str(uss.planned) \
+ ',\n\t sprintPlanned=' + str(uss.sprintPlanned) \
+ ',\n\t tickets=' + format_tickets(openTickets))
def format_item(item, item_label, sprint, timeInSprint):
return ('\n *** ' + item_label + ' at ' + timeInSprint + ' of sprint' + str(sprint) + ': ' \
+ '\n\t userStoryId=' + str(item.userStoryId) \
+ ',\n\t taskType=' + str(item.taskType) \
+ ',\n\t ticketId=' + str(item.ticketId) \
+ ',\n\t estimate=' + '{0:.2f}'.format(item.estimate) \
+ ',\n\t percentAchieved=' + str(item.percentAchieved) \
+ ',\n\t sprintPlanned=' + str(item.sprintPlanned))
# Test logic
def test_uss():
output = ''
repo = UserStoriesRepo([UserStory('Story A', 25, '<NAME>', '<NAME>'), \
UserStory('Story B', 17, '<NAME>', '<NAME>')])
# SPRINT 1
ctx = ReleaseCycleContext(teamId='', sprint=1, sprintDuration=None)
globalRepo = GlobalRepo(developersRepo=None, teamsRepo=None, storiesRepo=repo, ticketsRepo=TicketsRepo())
uss = UserStoryStatus('Story B', 0.0)
uss.planned = True
uss.sprintPlanned = 1
output = output + (format_uss(uss, globalRepo))
item = uss._generateWorkItems(ctx, globalRepo)[0]
output = output + (format_item(item, 'Item#1', 1, 'start'))
item.percentAchieved = 0.7
globalRepo.ticketsRepo.addTicket(Ticket('Bug 100','Story B', 4, 1))
globalRepo.ticketsRepo.addTicket(Ticket('Bug 101','Story B', 1.5, 1))
output = output + (format_item(item, 'Item#1', 1, 'end'))
uss.updateStatus([item], ctx, globalRepo)
# SPRINT 2
ctx.sprint = 2
uss.sprintPlanned = 2
output = output + (format_uss(uss, globalRepo))
items = uss._generateWorkItems(ctx, globalRepo)
item=items[0]
output = output + (format_item(item, 'Item#1', 2, 'start'))
item=items[1]
output = output + (format_item(item, 'Item#2', 2, 'start'))
item=items[2]
output = output + (format_item(item, 'Item#3', 2, 'start'))
items[0].percentAchieved = 0.9
items[0].actual = 18.3
items[1].percentAchieved = 1.0
items[1].actual = 4.32
items[2].percentAchieved = 0.5
items[2].actual = 0.86
item=items[0]
output = output + (format_item(item, 'Item#1', 2, 'end'))
item=items[1]
output = output + (format_item(item, 'Item#2', 2, 'end'))
item=items[2]
output = output + (format_item(item, 'Item#3', 2, 'end'))
uss.updateStatus(items, ctx, globalRepo)
output = output + (format_uss(uss, globalRepo))
return output
# Run the test
test_uss_ACTUAL = test_uss()
# + code_folding=[0]
# Uncomment to print a string output one can copy and paste into test_uss_EXPECTED
#test_uss_ACTUAL
# + code_folding=[0]
# Set expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
test_uss_EXPECTED = '\n *** USS:Story B\n\t achieved=0.0,\n\t planned=True,\n\t sprintPlanned=1,\n\t tickets=\n *** Item#1 at start of sprint1: \n\t userStoryId=Story B,\n\t taskType=UNFINISHED_STORIES,\n\t ticketId=None,\n\t estimate=17.00,\n\t percentAchieved=0.0,\n\t sprintPlanned=1\n *** Item#1 at end of sprint1: \n\t userStoryId=Story B,\n\t taskType=UNFINISHED_STORIES,\n\t ticketId=None,\n\t estimate=17.00,\n\t percentAchieved=0.7,\n\t sprintPlanned=1\n *** USS:Story B\n\t achieved=0.7,\n\t planned=True,\n\t sprintPlanned=2,\n\t tickets=\n\t\t{Ticket:Bug 100,\n\t\t storyId=Story B,\n\t\t costToFix=4,\n\t\t sprintReported=1,\n\t\t sprintFixed=NOT_SET,\n\t\t effortToDate=0.0,\n\t\t percentAchieved=0.0}\n\t\t{Ticket:Bug 101,\n\t\t storyId=Story B,\n\t\t costToFix=1.5,\n\t\t sprintReported=1,\n\t\t sprintFixed=NOT_SET,\n\t\t effortToDate=0.0,\n\t\t percentAchieved=0.0}\n *** Item#1 at start of sprint2: \n\t userStoryId=Story B,\n\t taskType=UNFINISHED_STORIES,\n\t ticketId=None,\n\t estimate=5.10,\n\t percentAchieved=0.0,\n\t sprintPlanned=2\n *** Item#2 at start of sprint2: \n\t userStoryId=Story B,\n\t taskType=DEV_TIME_BUGS,\n\t ticketId=Bug 100,\n\t estimate=4.00,\n\t percentAchieved=0.0,\n\t sprintPlanned=2\n *** Item#3 at start of sprint2: \n\t userStoryId=Story B,\n\t taskType=DEV_TIME_BUGS,\n\t ticketId=Bug 101,\n\t estimate=1.50,\n\t percentAchieved=0.0,\n\t sprintPlanned=2\n *** Item#1 at end of sprint2: \n\t userStoryId=Story B,\n\t taskType=UNFINISHED_STORIES,\n\t ticketId=None,\n\t estimate=5.10,\n\t percentAchieved=0.9,\n\t sprintPlanned=2\n *** Item#2 at end of sprint2: \n\t userStoryId=Story B,\n\t taskType=DEV_TIME_BUGS,\n\t ticketId=Bug 100,\n\t estimate=4.00,\n\t percentAchieved=1.0,\n\t sprintPlanned=2\n *** Item#3 at end of sprint2: \n\t userStoryId=Story B,\n\t taskType=DEV_TIME_BUGS,\n\t ticketId=Bug 101,\n\t estimate=1.50,\n\t percentAchieved=0.5,\n\t sprintPlanned=2\n *** USS:Story B\n\t achieved=0.97,\n\t planned=True,\n\t sprintPlanned=2,\n\t tickets=\n\t\t{Ticket:Bug 101,\n\t\t storyId=Story B,\n\t\t costToFix=1.5,\n\t\t sprintReported=1,\n\t\t sprintFixed=NOT_SET,\n\t\t effortToDate=0.86,\n\t\t percentAchieved=0.5}'
tu_.EXPECTED['uss'] = test_uss_EXPECTED
tu_.ACTUAL['uss'] = test_uss_ACTUAL
tu_.testOK('uss')
# + cell_style="split" code_folding=[0]
# Print ACTUAL output
print(Back.BLUE + Fore.WHITE + '--------------------- ACTUAL -------------------------', \
Back.RESET + Fore.BLUE + '\n' + test_uss_ACTUAL)
# + cell_style="split" code_folding=[0]
# Print EXPECTED output
print(Back.GREEN + Fore.WHITE + '--------------------- EXPECTED -----------------------', \
Back.RESET + Fore.GREEN + '\n' + test_uss_EXPECTED)
# -
# <h1>test_createTeams</h1>
# + code_folding=[0]
# Implement test logic, and run
teams_df = bo.createTeamsDF(tu_.DEV_DF, tu_.PM_DF)
object_cols = ['Scrum Team'] # Need to drop these since they print always-chaning memory address + can't save it in EXPECTED
test_createTeams_ACTUAL = teams_df.drop(object_cols, axis='columns')
# + code_folding=[0]
# Uncomment to update expected output to match the actual one
#tu_.createExpectedOutput(test_createTeams_ACTUAL, 'test_createTeams')
# + code_folding=[0]
# Load expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
list_cols = ['Developers', 'Product Managers', 'Areas of Responsibility']
test_createTeams_EXPECTED = tu_.loadExpectedOutput('test_createTeams', list_cols)
tu_.EXPECTED['createTeams'] = test_createTeams_EXPECTED
tu_.ACTUAL['createTeams'] = test_createTeams_ACTUAL
tu_.testOK('createTeams')
# -
test_createTeams_ACTUAL
test_createTeams_EXPECTED
# <h1>test_userStoryCreate</h1>
# <p>This test has multiple views, each of which is checked separately</p>
# <li>test_userStoryCreate_stories
# <li>test_userStoryCreate_estimates
# <li>test_userStoryCreate_crossCheck
# <li>test_userStoryCreate_workload
# + code_folding=[0]
# Implement test logic, and run it
#Test logic
def test_userStoryCreate():
output = {}
RELEASE_DURATION = 125
SPRINT_DURATION = 10
NUMBER_OF_SPRINTS = 25
# Configure models
modelsConfig = ModelsConfig([DefaultCostModel()], [], GreedyAllocationModel())
modelsConfig.random.reset(271)
teams_df, stories_df, globalRepo = tu_.initTestData(tu_.DEV_DF, tu_.PM_DF, \
RELEASE_DURATION, SPRINT_DURATION, modelsConfig)
modelsConfig.globalRepo = globalRepo
grouped_estimates_df = stories_df.groupby([ 'Scrum Team', 'Developer'])['Estimate'].sum()
workload_df = stories_df.groupby([ 'Scrum Team'])['User Story Id'].count()
avg_estimates_df = grouped_estimates_df.unstack().apply(lambda x: x.mean(), axis='columns')
# Reset index to match the way how EXPECTED will be saved as a CSV file
estimates_df = grouped_estimates_df.reset_index()
workload_df = workload_df.reset_index()
avg_estimates_df = avg_estimates_df.reset_index()
# The unstacking above created an column with a number as the column name, 0. That will not match once expected output is saved
# and reloaded, as it will come back as the string '0'. So rename that column to avoid spurious test failures
avg_estimates_df = avg_estimates_df.rename(index=str, columns={0: 'Avg'})
# Because of the manipulations, the index has changed and that will cause mistaches with the EXPECTED loaded from
# CSV. So re-index
avg_estimates_df.index = pd.RangeIndex(start=0, stop=avg_estimates_df.index.size, step=1)
crosscheck = [len(teams_df['Scrum Team'][0].backlog.pendingUserStories),
len(teams_df['Scrum Team'][1].backlog.pendingUserStories),
len(teams_df['Scrum Team'][2].backlog.pendingUserStories),
len(teams_df['Scrum Team'][3].backlog.pendingUserStories)]
crosscheck_df = pd.DataFrame({'Team idx': [0,1,2,3], 'Backlog size': crosscheck})
output['stories_df'] = stories_df
output['estimates_df'] = estimates_df
output['workload_df'] = workload_df
output['crosscheck_df'] = crosscheck_df
output['avg_estimates_df'] = avg_estimates_df
return output
# Run the test
test_userStoryCreate_ACTUAL = test_userStoryCreate()
# + code_folding=[0]
# Uncomment to update expected output to match the actual one
# Helper method
def create_userStoryCreate_EXPECTED():
tu_.createExpectedOutput(test_userStoryCreate_ACTUAL['stories_df'], 'test_userStoryCreate.stories_df')
tu_.createExpectedOutput(test_userStoryCreate_ACTUAL['estimates_df'], 'test_userStoryCreate.estimates_df')
tu_.createExpectedOutput(test_userStoryCreate_ACTUAL['workload_df'], 'test_userStoryCreate.workload_df')
tu_.createExpectedOutput(test_userStoryCreate_ACTUAL['crosscheck_df'], 'test_userStoryCreate.crosscheck_df')
tu_.createExpectedOutput(test_userStoryCreate_ACTUAL['avg_estimates_df'], 'test_userStoryCreate.avg_estimates_df')
# Uncomment to update expected output to match the actual one, and then put the comment back
#create_userStoryCreate_EXPECTED()
# + code_folding=[0]
# Load expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
test_userStoryCreate_EXPECTED = {}
test_userStoryCreate_EXPECTED['stories_df'] = tu_.loadExpectedOutput('test_userStoryCreate.stories_df')
test_userStoryCreate_EXPECTED['estimates_df'] = tu_.loadExpectedOutput('test_userStoryCreate.estimates_df')
test_userStoryCreate_EXPECTED['workload_df'] = tu_.loadExpectedOutput('test_userStoryCreate.workload_df')
test_userStoryCreate_EXPECTED['crosscheck_df'] = tu_.loadExpectedOutput('test_userStoryCreate.crosscheck_df')
test_userStoryCreate_EXPECTED['avg_estimates_df'] = tu_.loadExpectedOutput('test_userStoryCreate.avg_estimates_df')
# Rounding inaccuracies in saving and loading CSV will create an artificial mismatch between ACTUAL and EXPECTED
# So round EXPECTED and ACTUAL to 6 decimal places for the sensitive fields
ExpectedOutputCleaner.cleanRoundingNoise(['Avg'],
['avg_estimates_df'],
test_userStoryCreate_EXPECTED,
test_userStoryCreate_ACTUAL)
tu_.EXPECTED['test_userStoryCreate.stories_df'] = test_userStoryCreate_EXPECTED['stories_df']
tu_.EXPECTED['test_userStoryCreate.estimates_df'] = test_userStoryCreate_EXPECTED['estimates_df']
tu_.EXPECTED['test_userStoryCreate.workload_df'] = test_userStoryCreate_EXPECTED['workload_df']
tu_.EXPECTED['test_userStoryCreate.crosscheck_df'] = test_userStoryCreate_EXPECTED['crosscheck_df']
tu_.EXPECTED['test_userStoryCreate.avg_estimates_df'] = test_userStoryCreate_EXPECTED['avg_estimates_df']
tu_.ACTUAL['test_userStoryCreate.stories_df'] = test_userStoryCreate_ACTUAL['stories_df']
tu_.ACTUAL['test_userStoryCreate.estimates_df'] = test_userStoryCreate_ACTUAL['estimates_df']
tu_.ACTUAL['test_userStoryCreate.workload_df'] = test_userStoryCreate_ACTUAL['workload_df']
tu_.ACTUAL['test_userStoryCreate.crosscheck_df'] = test_userStoryCreate_ACTUAL['crosscheck_df']
tu_.ACTUAL['test_userStoryCreate.avg_estimates_df'] = test_userStoryCreate_ACTUAL['avg_estimates_df']
tu_.testOK('test_userStoryCreate.stories_df'), \
tu_.testOK('test_userStoryCreate.estimates_df'), \
tu_.testOK('test_userStoryCreate.workload_df'), \
tu_.testOK('test_userStoryCreate.crosscheck_df'), \
tu_.testOK('test_userStoryCreate.avg_estimates_df')
# + cell_style="split"
test_userStoryCreate_ACTUAL['stories_df']
# + cell_style="split"
test_userStoryCreate_EXPECTED['stories_df']
# + cell_style="split"
test_userStoryCreate_ACTUAL['estimates_df']
# + cell_style="split"
test_userStoryCreate_EXPECTED['estimates_df']
# + cell_style="split"
test_userStoryCreate_ACTUAL['workload_df']
# + cell_style="split"
test_userStoryCreate_EXPECTED['workload_df']
# + cell_style="split"
test_userStoryCreate_ACTUAL['crosscheck_df']
# + cell_style="split"
test_userStoryCreate_EXPECTED['crosscheck_df']
# + cell_style="split"
test_userStoryCreate_ACTUAL['avg_estimates_df']
# + cell_style="split"
test_userStoryCreate_EXPECTED['avg_estimates_df']
# -
# <h1>test_WorkAssignments</h1>
# + code_folding=[0]
# Implement test logic, and run it
#Test logic
def test_WorkAssignments():
output = {}
RELEASE_DURATION = 125
SPRINT_DURATION = 10
SPRINT = 1
# Configure models
modelsConfig = ModelsConfig([DefaultCostModel()], [], GreedyAllocationModel())
modelsConfig.random.reset(271)
teams_df, stories_df, globalRepo = tu_.initTestData(tu_.DEV_DF, tu_.PM_DF, \
RELEASE_DURATION, SPRINT_DURATION, modelsConfig)
modelsConfig.globalRepo = globalRepo
teamId0 = teams_df['Scrum Team'][0].teamId
modelsConfig.context = ReleaseCycleContext(teamId0, SPRINT, SPRINT_DURATION)
work = WorkAssignments(modelsConfig.context, modelsConfig.globalRepo)
initial_df = work.committedTime(SPRINT_DURATION)
# Test re-assigning of work
item = work.allocations[S_.UNPLANNED][S_.OWNER_TBD][S_.UNFINISHED_STORIES][25]
work.reAssign(item, '<NAME>', S_.CURRENT_SPRINT)
final_df = work.committedTime(SPRINT_DURATION)
output['Initial'] = initial_df
output['Final'] = final_df
return output
# Run the test
test_WorkAssignments_ACTUAL = test_WorkAssignments()
# + code_folding=[0]
# Uncomment to update expected output to match the actual one
# Helper method
def create_WorkAssignments_EXPECTED():
tu_.createExpectedOutput(test_WorkAssignments_ACTUAL['Initial'], 'test_WorkAssignments.Initial')
tu_.createExpectedOutput(test_WorkAssignments_ACTUAL['Final'], 'test_WorkAssignments.Final')
# Uncomment to update expected output to match the actual one, and then put the comment back
#create_WorkAssignments_EXPECTED()
# + code_folding=[0]
# Load expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
test_WorkAssignments_EXPECTED = {}
test_WorkAssignments_EXPECTED['Initial'] = tu_.loadExpectedOutput('test_WorkAssignments.Initial')
test_WorkAssignments_EXPECTED['Final'] = tu_.loadExpectedOutput('test_WorkAssignments.Final')
tu_.EXPECTED['test_WorkAssignments.Initial'] = test_WorkAssignments_EXPECTED['Initial']
tu_.EXPECTED['test_WorkAssignments.Final'] = test_WorkAssignments_EXPECTED['Final']
tu_.ACTUAL['test_WorkAssignments.Initial'] = test_WorkAssignments_ACTUAL['Initial']
tu_.ACTUAL['test_WorkAssignments.Final'] = test_WorkAssignments_ACTUAL['Final']
tu_.testOK('test_WorkAssignments.Initial'), \
tu_.testOK('test_WorkAssignments.Final'), \
# -
test_WorkAssignments_ACTUAL['Initial']
test_WorkAssignments_EXPECTED['Initial']
test_WorkAssignments_ACTUAL['Final']
test_WorkAssignments_EXPECTED['Final']
# <h1>test_oneSprint</h1>
# + code_folding=[0]
# Implement test logic, and run it
#Test logic
def test_oneSprint():
output = {}
# Choose what to work on at the start of a sprint.
RELEASE_DURATION = 125
SPRINT_DURATION = 10
SPRINT = 1
# Configure models
modelsConfig = ModelsConfig([DefaultCostModel()], [], GreedyAllocationModel())
modelsConfig.random.reset(271)
teams_df, stories_df, globalRepo = tu_.initTestData(tu_.DEV_DF, tu_.PM_DF, \
RELEASE_DURATION, SPRINT_DURATION, modelsConfig)
modelsConfig.globalRepo = globalRepo
# Select a team
teamId = teams_df['Scrum Team'][0].teamId
modelsConfig.context = ReleaseCycleContext(teamId, SPRINT, SPRINT_DURATION)
work = timecard.chooseWhatToDoInSprint(modelsConfig)
start_committed_df = work.committedTime(SPRINT_DURATION)
start_tasks_df = work.committedTasks()
# Deliver what the sprint actually accomplished, including ingest of defects arriving during sprint
timecard.deliverSprint(work, modelsConfig) # mutates work
inflow = timecard.inflowOfTickets(modelsConfig)
end_committed_df = work.committedTime(0) # Sprint is over, so sprint capacity parameter is 0
# Test continued
timecard.updateBacklogAfterSprint(work, modelsConfig)
end_tasks_df = work.committedTasks()
output['Start_Committed'] = start_committed_df
output['Start_Tasks'] = start_tasks_df
output['End_Committed'] = end_committed_df
output['End_Tasks'] = end_tasks_df
return output
# Run the test
test_oneSprint_ACTUAL = test_oneSprint()
# + code_folding=[0]
# Uncomment to update expected output to match the actual one
# Helper method
def create_oneSprint_EXPECTED():
tu_.createExpectedOutput(test_oneSprint_ACTUAL['Start_Committed'], 'test_oneSprint.Start_Committed')
tu_.createExpectedOutput(test_oneSprint_ACTUAL['Start_Tasks'], 'test_oneSprint.Start_Tasks')
tu_.createExpectedOutput(test_oneSprint_ACTUAL['End_Committed'], 'test_oneSprint.End_Committed')
tu_.createExpectedOutput(test_oneSprint_ACTUAL['End_Tasks'], 'test_oneSprint.End_Tasks')
# Uncomment to update expected output to match the actual one, and then put the comment back
#create_oneSprint_EXPECTED()
# + code_folding=[0]
# Load expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
test_oneSprint_EXPECTED = {}
test_oneSprint_EXPECTED['Start_Committed'] = tu_.loadExpectedOutput('test_oneSprint.Start_Committed')
test_oneSprint_EXPECTED['Start_Tasks'] = tu_.loadExpectedOutput('test_oneSprint.Start_Tasks')
test_oneSprint_EXPECTED['End_Committed'] = tu_.loadExpectedOutput('test_oneSprint.End_Committed')
test_oneSprint_EXPECTED['End_Tasks'] = tu_.loadExpectedOutput('test_oneSprint.End_Tasks')
# Rounding inaccuracies in saving and loading CSV will create an artificial mismatch between ACTUAL and EXPECTED
# So round EXPECTED and ACTUAL to 6 decimal places for sensitive fields (any float)
ExpectedOutputCleaner.cleanRoundingNoise(['Rejects (days)', 'Debugging (days)', 'Implementation (days)', 'Bandwidth',\
'NEXT SPRINT (days)', 'NEXT SPRINT Bandwidth'],
['Start_Committed', 'End_Committed'],
test_oneSprint_EXPECTED,
test_oneSprint_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Original Estimate', 'Effort Spent', 'Effort Remaining', \
'Percent Achieved', '% Global Done'],
['Start_Tasks', 'End_Tasks'],
test_oneSprint_EXPECTED,
test_oneSprint_ACTUAL)
ExpectedOutputCleaner.destringify(['Delivered in Sprint'],
['End_Tasks'],
test_oneSprint_EXPECTED)
tu_.EXPECTED['test_oneSprint.Start_Committed'] = test_oneSprint_EXPECTED['Start_Committed']
tu_.EXPECTED['test_oneSprint.Start_Tasks'] = test_oneSprint_EXPECTED['Start_Tasks']
tu_.EXPECTED['test_oneSprint.End_Committed'] = test_oneSprint_EXPECTED['End_Committed']
tu_.EXPECTED['test_oneSprint.End_Tasks'] = test_oneSprint_EXPECTED['End_Tasks']
tu_.ACTUAL['test_oneSprint.Start_Committed'] = test_oneSprint_ACTUAL['Start_Committed']
tu_.ACTUAL['test_oneSprint.Start_Tasks'] = test_oneSprint_ACTUAL['Start_Tasks']
tu_.ACTUAL['test_oneSprint.End_Committed'] = test_oneSprint_ACTUAL['End_Committed']
tu_.ACTUAL['test_oneSprint.End_Tasks'] = test_oneSprint_ACTUAL['End_Tasks']
tu_.testOK('test_oneSprint.Start_Committed'), \
tu_.testOK('test_oneSprint.Start_Tasks'), \
tu_.testOK('test_oneSprint.End_Committed'), \
tu_.testOK('test_oneSprint.End_Tasks'), \
# -
test_oneSprint_ACTUAL['Start_Committed'][:5]
test_oneSprint_EXPECTED['Start_Committed'][:5]
test_oneSprint_ACTUAL['Start_Tasks'][:5]
test_oneSprint_EXPECTED['Start_Tasks'][:5]
test_oneSprint_ACTUAL['End_Committed'][:5]
test_oneSprint_EXPECTED['End_Committed'][:5]
test_oneSprint_ACTUAL['End_Tasks'][:5]
test_oneSprint_EXPECTED['End_Tasks'][:5]
# <h1>test_multipleSprints</h1>
# + code_folding=[0]
# Implement test logic, and run it
#Test logic
def test_multipleSprints():
output = {}
# Test many sprints into the future, to see if eventually people have extra time and start using that extra time
# in the current sprint to get a head start on tasks for the next sprint
SPRINT_DURATION = 10
NUMBER_OF_SPRINTS = 15
# Configure models
modelsConfig = ModelsConfig([DefaultCostModel()], [], GreedyAllocationModel())
modelsConfig.random.reset(271)
teams_df, stories_df, globalRepo = tu_.initTestData(tu_.DEV_DF, tu_.PM_DF, \
125, SPRINT_DURATION, modelsConfig)
modelsConfig.globalRepo = globalRepo
# Select a team
teamId = teams_df['Scrum Team'][0].teamId
work = None
for i in range(NUMBER_OF_SPRINTS):
sprint = i+1
modelsConfig.context = ReleaseCycleContext(teamId, sprint, SPRINT_DURATION)
work = timecard.chooseWhatToDoInSprint(modelsConfig)
if (i== NUMBER_OF_SPRINTS -1):
break
timecard.deliverSprint(work, modelsConfig) # mutates 'work'
inflow = timecard.inflowOfTickets(modelsConfig)
timecard.updateBacklogAfterSprint(work, modelsConfig) # Does not mutate 'work'
# Work Assignments at the start of the last sprint. Should see some "looking ahead" tasks, i.e., tasks that would
# normally be done in the next sprint but are started in this sprint since we have time leftover from this sprint's
# deliverables
last = work
start_committed_df = last.committedTime(10)
start_tasks_df = last.committedTasks()
# Finish this last sprint and confirm we spent time in some of the deliveries for the next sprint (i.e., that we
# were "looking ahead")
timecard.deliverSprint(last, modelsConfig)
inflow = timecard.inflowOfTickets(modelsConfig)
timecard.updateBacklogAfterSprint(work, modelsConfig) # Does not mutate 'work'
end_committed_df = last.committedTime(0)
end_tasks_df =last.committedTasks()
output['start_committed'] = start_committed_df
output['start_tasks'] = start_tasks_df
output['end_committed'] = end_committed_df
output['end_tasks'] = end_tasks_df
return output
# Run the test
test_multipleSprints_ACTUAL = test_multipleSprints()
# + code_folding=[0]
# Uncomment to update expected output to match the actual one
# Helper method
def create_multipleSprints_EXPECTED():
tu_.createExpectedOutput(test_multipleSprints_ACTUAL['start_committed'], 'test_multipleSprints.start_committed')
tu_.createExpectedOutput(test_multipleSprints_ACTUAL['start_tasks'], 'test_multipleSprints.start_tasks')
tu_.createExpectedOutput(test_multipleSprints_ACTUAL['end_committed'], 'test_multipleSprints.end_committed')
tu_.createExpectedOutput(test_multipleSprints_ACTUAL['end_tasks'], 'test_multipleSprints.end_tasks')
# Uncomment to update expected output to match the actual one, and then put the comment back
#create_multipleSprints_EXPECTED()
# + code_folding=[0]
# Load expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
test_multipleSprints_EXPECTED = {}
test_multipleSprints_EXPECTED['start_committed'] = tu_.loadExpectedOutput('test_multipleSprints.start_committed')
test_multipleSprints_EXPECTED['start_tasks'] = tu_.loadExpectedOutput('test_multipleSprints.start_tasks')
test_multipleSprints_EXPECTED['end_committed'] = tu_.loadExpectedOutput('test_multipleSprints.end_committed')
test_multipleSprints_EXPECTED['end_tasks'] = tu_.loadExpectedOutput('test_multipleSprints.end_tasks')
# Rounding inaccuracies in saving and loading CSV will create an artificial mismatch between ACTUAL and EXPECTED
# So round EXPECTED and ACTUAL to 6 decimal places for sensitive fields (any float)
ExpectedOutputCleaner.cleanRoundingNoise(['Rejects (days)', 'Debugging (days)', 'Implementation (days)', 'Bandwidth',\
'NEXT SPRINT (days)', 'NEXT SPRINT Bandwidth'],
['start_committed', 'end_committed'],
test_multipleSprints_EXPECTED,
test_multipleSprints_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Original Estimate', 'Effort Spent', 'Effort Remaining', \
'Percent Achieved', '% Global Done'],
['start_tasks', 'end_tasks'],
test_multipleSprints_EXPECTED,
test_multipleSprints_ACTUAL)
ExpectedOutputCleaner.destringify(['Delivered in Sprint'],
['start_tasks', 'end_tasks'],
test_multipleSprints_EXPECTED)
tu_.EXPECTED['test_multipleSprints.start_committed'] = test_multipleSprints_EXPECTED['start_committed']
tu_.EXPECTED['test_multipleSprints.start_tasks'] = test_multipleSprints_EXPECTED['start_tasks']
tu_.EXPECTED['test_multipleSprints.end_committed'] = test_multipleSprints_EXPECTED['end_committed']
tu_.EXPECTED['test_multipleSprints.end_tasks'] = test_multipleSprints_EXPECTED['end_tasks']
tu_.ACTUAL['test_multipleSprints.start_committed'] = test_multipleSprints_ACTUAL['start_committed']
tu_.ACTUAL['test_multipleSprints.start_tasks'] = test_multipleSprints_ACTUAL['start_tasks']
tu_.ACTUAL['test_multipleSprints.end_committed'] = test_multipleSprints_ACTUAL['end_committed']
tu_.ACTUAL['test_multipleSprints.end_tasks'] = test_multipleSprints_ACTUAL['end_tasks']
tu_.testOK('test_multipleSprints.start_committed'), \
tu_.testOK('test_multipleSprints.start_tasks'), \
tu_.testOK('test_multipleSprints.end_committed'), \
tu_.testOK('test_multipleSprints.end_tasks'), \
# -
test_multipleSprints_ACTUAL['start_committed'][:5]
test_multipleSprints_EXPECTED['start_committed'][:5]
test_multipleSprints_ACTUAL['start_tasks'][:5]
test_multipleSprints_EXPECTED['start_tasks'][:5]
test_multipleSprints_ACTUAL['end_committed'][:5]
test_multipleSprints_EXPECTED['end_committed'][:5]
test_multipleSprints_ACTUAL['end_tasks'][:5]
test_multipleSprints_EXPECTED['end_tasks'][:5]
# <h1>test_releaseCycle</h1>
# + code_folding=[0]
# Implement test logic, and run it
# Test logic
def test_releaseCycle():
output = {}
RELEASE_DURATION = 125
SPRINT_DURATION = 10
NUMBER_OF_SPRINTS = 25
# Configure models
modelsConfig = ModelsConfig([DefaultCostModel()], [], GreedyAllocationModel())
modelsConfig.random.reset(271)
teams_df, stories_df, globalRepo = tu_.initTestData(tu_.DEV_DF, tu_.PM_DF, \
RELEASE_DURATION, SPRINT_DURATION, modelsConfig)
modelsConfig.globalRepo = globalRepo
entries_df, log = timecard.runReleaseCycle(datetime(2018, 1, 15), SPRINT_DURATION, NUMBER_OF_SPRINTS, modelsConfig)
burnout_df = timecard.releaseBurnout(entries_df)
output['Entries'] = entries_df
output['Burnout'] = burnout_df
for name in ReleaseLog.SNAPSHOTS:
log_df = log.mergeLogs(name)
output[name] = log_df
output['log'] = log #Needed for visualizations
return output, modelsConfig
# Run the test
test_releaseCycle_ACTUAL, modelsConfig = test_releaseCycle()
# + code_folding=[0]
# Uncomment to update expected output to match the actual one
# Helper method
def create_releaseCycle_EXPECTED():
tu_.createExpectedOutput(test_releaseCycle_ACTUAL['Entries'], 'test_releaseCycle.Entries')
tu_.createExpectedOutput(test_releaseCycle_ACTUAL['Burnout'], 'test_releaseCycle.Burnout')
for name in ReleaseLog.SNAPSHOTS:
tu_.createExpectedOutput(test_releaseCycle_ACTUAL[name], 'test_releaseCycle.' + name)
# Uncomment to update expected output to match the actual one, and then put the comment back
#create_releaseCycle_EXPECTED()
# + code_folding=[0]
# Load expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
test_releaseCycle_EXPECTED = {}
test_releaseCycle_EXPECTED['Entries'] = tu_.loadExpectedOutput('test_releaseCycle.Entries')
test_releaseCycle_EXPECTED['Burnout'] = tu_.loadExpectedOutput('test_releaseCycle.Burnout')
for name in ReleaseLog.SNAPSHOTS:
test_releaseCycle_EXPECTED[name] = tu_.loadExpectedOutput('test_releaseCycle.' + name)
# ReleaseLog snapshots have integer-valued columns (1, 2,3, ...), and loading the EXPECTED CSV file will convert
# them to strings ('1', '2', '3', ...), so to avoid spurious test failures rename the columns of the
# EXPECTED data we just loaded
cols_to_align = ReleaseLog.SNAPSHOTS.copy()
# These columns don't have integer-valued columns
cols_to_align.remove('Resourcing')
cols_to_align.remove('Outcome')
cols_to_align
ExpectedOutputCleaner.alignColumns(cols_to_align,
test_releaseCycle_EXPECTED,
test_releaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Time Spent'],
['Entries'],
test_releaseCycle_EXPECTED,
test_releaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Effort', 'Implementation Effort', 'Debugging Effort', 'Cum % Completion'],
['Burnout'],
test_releaseCycle_EXPECTED,
test_releaseCycle_ACTUAL)
testlets = ReleaseLog.SNAPSHOTS.copy()
# All log snapshots except 'Resourcing' and 'Outcome' have the same sensitive fields
testlets.remove('Resourcing')
testlets.remove('Outcome')
ExpectedOutputCleaner.cleanRoundingNoise([0,1,2,3,4,5,6,7,8,9,10],
testlets,
test_releaseCycle_EXPECTED,
test_releaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Rejects (days)', 'Debugging (days)', 'Implementation (days)', \
'Bandwidth', 'NEXT SPRINT (days)', 'NEXT SPRINT Bandwidth'],
['Resourcing'],
test_releaseCycle_EXPECTED,
test_releaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Original Estimate', 'Effort Spent','Effort Remaining', \
'Percent Achieved', '% Global Done'],
['Outcome'],
test_releaseCycle_EXPECTED,
test_releaseCycle_ACTUAL)
ExpectedOutputCleaner.standardizeDates(['Date'],
['Entries'],
test_releaseCycle_EXPECTED)
ExpectedOutputCleaner.destringify(['Delivered in Sprint'],
['Outcome'],
test_releaseCycle_EXPECTED)
tu_.EXPECTED['test_releaseCycle.Entries'] = test_releaseCycle_EXPECTED['Entries']
tu_.EXPECTED['test_releaseCycle.Burnout'] = test_releaseCycle_EXPECTED['Burnout']
for name in ReleaseLog.SNAPSHOTS:
tu_.EXPECTED['test_releaseCycle.' + name] = test_releaseCycle_EXPECTED[name]
tu_.ACTUAL['test_releaseCycle.Entries'] = test_releaseCycle_ACTUAL['Entries']
tu_.ACTUAL['test_releaseCycle.Burnout'] = test_releaseCycle_ACTUAL['Burnout']
for name in ReleaseLog.SNAPSHOTS:
tu_.ACTUAL['test_releaseCycle.' + name] = test_releaseCycle_ACTUAL[name]
results = []
results.append(tu_.testOK('test_releaseCycle.Entries'))
results.append(tu_.testOK('test_releaseCycle.Burnout'))
for name in ReleaseLog.SNAPSHOTS:
results.append(tu_.testOK('test_releaseCycle.' + name))
results
# + code_folding=[0]
# Uncomment to interactively visualize the release logs, and then comment again once interactive analysis is done. Commenting these
# lines after interactive analysis is completed is required as test harness can't load these visualiations
# libraries so leaving this uncommented will crash the entire test harness.
# NB: MAY NEED TO RUN TWICE (there is a bug in Jupyter notebook, I think, so first time you call this it shows no visuals)
#import devanalyst.simulation.visualizations.timecard_visuals as tc_visuals
#tc_visuals.renderReleaseCycleLog(teamId = 'Team A', release_log=test_releaseCycle_ACTUAL['log'], first=1, last=17, spurious_columns=['Team Id', 'Sprint'])
# + code_folding=[0]
# Script to debug test failures when entries change. This was very useful when the test failed because of simply the
# removal of spurious entries (tails of 0's), for which the tu_.taintFailuresToStop was very useful. This allows
# detecting the spurious rows and discard them, and at that point we have the same number of rows. Any mismatches
# after that should be just innocent rotations of some entries, for which tu_.find_mismatches(e2, a) helps find the
# few rows that account for the mismatches, and visual inspection usually is enough to verify it is simply an innocent
# rotation
def spurious_errors_debug(): # return or extract pertinent lines. This function is just a template
a = test_releaseCycle_ACTUAL['Entries']
e = test_releaseCycle_EXPECTED['Entries']
at = tu_.taintFailuresToStop(a, 'User Story', 'Time Spent')
et = tu_.taintFailuresToStop(e, 'User Story', 'Time Spent')
discard = list(et[et['TAINTED'] == True].index)
e2 = e.drop(discard)
e2.index = a.index
tu_.find_mismatches(e2, a)
e2.loc[[1178, 1182, 1184, 1216, 1218, 1220, 1232]]
a.loc[[1178, 1182, 1184, 1216, 1218, 1220, 1232]]
# -
test_releaseCycle_ACTUAL['Entries'][0:8]
test_releaseCycle_EXPECTED['Entries'][0:8]
test_releaseCycle_ACTUAL['Burnout'][:5]
test_releaseCycle_EXPECTED['Burnout'][:5]
test_releaseCycle_ACTUAL['planned_Start_CURRENT_SPRINT'][0:8]
test_releaseCycle_EXPECTED['planned_Start_CURRENT_SPRINT'][0:8]
test_releaseCycle_ACTUAL['planned_End_CURRENT_SPRINT'][0:8]
test_releaseCycle_EXPECTED['planned_End_CURRENT_SPRINT'][0:8]
test_releaseCycle_ACTUAL['planned_Start_NEXT_SPRINT'][0:8]
test_releaseCycle_EXPECTED['planned_Start_NEXT_SPRINT'][0:8]
test_releaseCycle_ACTUAL['planned_End_NEXT_SPRINT'][0:8]
test_releaseCycle_EXPECTED['planned_End_NEXT_SPRINT'][0:8]
test_releaseCycle_ACTUAL['backlog'][0:8]
test_releaseCycle_EXPECTED['backlog'][0:8]
test_releaseCycle_ACTUAL['Resourcing'][0:8]
test_releaseCycle_EXPECTED['Resourcing'][0:8]
test_releaseCycle_ACTUAL['Outcome'][0:8]
test_releaseCycle_EXPECTED['Outcome'][0:8]
# <h1>test_buggyReleaseCycle</h1>
# + code_folding=[0]
# Implement test logic, and run it
# Test logic
def test_buggyReleaseCycle():
output = {}
RELEASE_DURATION = 125
SPRINT_DURATION = 10
NUMBER_OF_SPRINTS = 25
# Configure models
modelsConfig = ModelsConfig([DefaultCostModel(0.0)], [DistributedLagQualityModel()], GreedyAllocationModel())
modelsConfig.random.reset(271)
teams_df, stories_df, globalRepo = tu_.initTestData(tu_.DEV_DF, tu_.PM_DF, \
RELEASE_DURATION, SPRINT_DURATION, modelsConfig)
modelsConfig.globalRepo = globalRepo
entries_df, log = timecard.runReleaseCycle(datetime(2018, 1, 15), SPRINT_DURATION, NUMBER_OF_SPRINTS, modelsConfig)
stories_df = UserStory.build_stories_df(globalRepo)
bugs_df = Ticket.build_bugs_df(globalRepo.ticketsRepo.tickets)
burnout_df = timecard.releaseBurnout(entries_df)
output['Entries'] = entries_df
output['User_Stories'] = stories_df
output['Tickets'] = bugs_df
output['Burnout'] = burnout_df
for name in ReleaseLog.SNAPSHOTS:
log_df = log.mergeLogs(name)
output[name] = log_df
output['log'] = log #Needed for visualizations
return output, modelsConfig
# Run the test
test_buggyReleaseCycle_ACTUAL, modelsConfig = test_buggyReleaseCycle()
# + code_folding=[0]
# Uncomment to update expected output to match the actual one
# Helper method
def create_buggyReleaseCycle_EXPECTED():
tu_.createExpectedOutput(test_buggyReleaseCycle_ACTUAL['Entries'], 'test_buggyReleaseCycle.Entries')
tu_.createExpectedOutput(test_buggyReleaseCycle_ACTUAL['User_Stories'], 'test_buggyReleaseCycle.User_Stories')
tu_.createExpectedOutput(test_buggyReleaseCycle_ACTUAL['Tickets'], 'test_buggyReleaseCycle.Tickets')
tu_.createExpectedOutput(test_buggyReleaseCycle_ACTUAL['Burnout'], 'test_buggyReleaseCycle.Burnout')
for name in ReleaseLog.SNAPSHOTS:
tu_.createExpectedOutput(test_buggyReleaseCycle_ACTUAL[name], 'test_buggyReleaseCycle.' + name)
# Uncomment to update expected output to match the actual one, and then put the comment back
#create_buggyReleaseCycle_EXPECTED()
# + code_folding=[0]
# Load expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
test_buggyReleaseCycle_EXPECTED = {}
list_cols_bugs = [] # Lists are loaded as strings, so require special processing on load
list_cols_stories = ['Open Bugs', 'Closed Bugs']
test_buggyReleaseCycle_EXPECTED['Entries'] = tu_.loadExpectedOutput('test_buggyReleaseCycle.Entries')
test_buggyReleaseCycle_EXPECTED['User_Stories'] = tu_.loadExpectedOutput('test_buggyReleaseCycle.User_Stories',
list_cols_stories)
test_buggyReleaseCycle_EXPECTED['Tickets'] = tu_.loadExpectedOutput('test_buggyReleaseCycle.Tickets',
list_cols_bugs)
test_buggyReleaseCycle_EXPECTED['Burnout'] = tu_.loadExpectedOutput('test_buggyReleaseCycle.Burnout')
for name in ReleaseLog.SNAPSHOTS:
test_buggyReleaseCycle_EXPECTED[name] = tu_.loadExpectedOutput('test_buggyReleaseCycle.' + name)
# ReleaseLog snapshots have integer-valued columns (1, 2,3, ...), and loading the EXPECTED CSV file will convert
# them to strings ('1', '2', '3', ...), so to avoid spurious test failures rename the columns of the
# EXPECTED data we just loaded
cols_to_align = ReleaseLog.SNAPSHOTS.copy()
# These columns don't have integer-valued columns
cols_to_align.remove('Resourcing')
cols_to_align.remove('Outcome')
ExpectedOutputCleaner.alignColumns(cols_to_align,
test_buggyReleaseCycle_EXPECTED,
test_buggyReleaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Time Spent'],
['Entries'],
test_buggyReleaseCycle_EXPECTED,
test_buggyReleaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Estimated Cost', 'Effort to Date', 'Percent Achieved'],
['Tickets'],
test_buggyReleaseCycle_EXPECTED,
test_buggyReleaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Effort', 'Implementation Effort', 'Debugging Effort', 'Cum % Completion'],
['Burnout'],
test_buggyReleaseCycle_EXPECTED,
test_buggyReleaseCycle_ACTUAL)
testlets = ReleaseLog.SNAPSHOTS.copy()
# All log snapshots except 'Resourcing' and Outcome' have the same sensitive fields
testlets.remove('Resourcing')
testlets.remove('Outcome')
ExpectedOutputCleaner.cleanRoundingNoise([0,1,2,3,4,5,6,7,8,9,10],
testlets,
test_buggyReleaseCycle_EXPECTED,
test_buggyReleaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Rejects (days)', 'Debugging (days)', 'Implementation (days)', \
'Bandwidth', 'NEXT SPRINT (days)', 'NEXT SPRINT Bandwidth'],
['Resourcing'],
test_buggyReleaseCycle_EXPECTED,
test_buggyReleaseCycle_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Original Estimate', 'Effort Spent','Effort Remaining', \
'Percent Achieved', 'Global Estimate', '% Global Done'],
['Outcome'],
test_buggyReleaseCycle_EXPECTED,
test_buggyReleaseCycle_ACTUAL)
ExpectedOutputCleaner.standardizeDates(['Date'],
['Entries'],
test_buggyReleaseCycle_EXPECTED)
ExpectedOutputCleaner.destringify(['Delivered in Sprint'],
['Outcome'],
test_buggyReleaseCycle_EXPECTED)
tu_.EXPECTED['test_buggyReleaseCycle.Entries'] = test_buggyReleaseCycle_EXPECTED['Entries']
tu_.EXPECTED['test_buggyReleaseCycle.User_Stories'] = test_buggyReleaseCycle_EXPECTED['User_Stories']
tu_.EXPECTED['test_buggyReleaseCycle.Tickets'] = test_buggyReleaseCycle_EXPECTED['Tickets']
tu_.EXPECTED['test_buggyReleaseCycle.Burnout'] = test_buggyReleaseCycle_EXPECTED['Burnout']
for name in ReleaseLog.SNAPSHOTS:
tu_.EXPECTED['test_buggyReleaseCycle.' + name] = test_buggyReleaseCycle_EXPECTED[name]
tu_.ACTUAL['test_buggyReleaseCycle.Entries'] = test_buggyReleaseCycle_ACTUAL['Entries']
tu_.ACTUAL['test_buggyReleaseCycle.User_Stories'] = test_buggyReleaseCycle_ACTUAL['User_Stories']
tu_.ACTUAL['test_buggyReleaseCycle.Tickets'] = test_buggyReleaseCycle_ACTUAL['Tickets']
tu_.ACTUAL['test_buggyReleaseCycle.Burnout'] = test_buggyReleaseCycle_ACTUAL['Burnout']
for name in ReleaseLog.SNAPSHOTS:
tu_.ACTUAL['test_buggyReleaseCycle.' + name] = test_buggyReleaseCycle_ACTUAL[name]
results = []
results.append(tu_.testOK('test_buggyReleaseCycle.Entries'))
results.append(tu_.testOK('test_buggyReleaseCycle.User_Stories'))
results.append(tu_.testOK('test_buggyReleaseCycle.Tickets'))
results.append(tu_.testOK('test_buggyReleaseCycle.Burnout'))
for name in ReleaseLog.SNAPSHOTS:
results.append(tu_.testOK('test_buggyReleaseCycle.' + name))
results
# + code_folding=[0]
# Uncomment to interactively visualize the release logs, and then comment again once interactive analysis is done. Commenting these
# lines after interactive analysis is completed is required as test harness can't load these visualiations
# libraries so leaving this uncommented will crash the entire test harness.
# NB: MAY NEED TO RUN TWICE (there is a bug in Jupyter notebook, I think, so first time you call this it shows no visuals)
#import devanalyst.simulation.visualizations.timecard_visuals as tc_visuals
#tc_visuals.renderReleaseCycleLog(teamId = 'Team A', release_log=test_buggyReleaseCycle_ACTUAL['log'], first=1, last=19, spurious_columns=['Team Id', 'Sprint'])
# + code_folding=[0]
# Script to debug test failures when entries change. This was very useful when the test failed because of simply the
# removal of spurious entries (tails of 0's), for which the tu_.taintFailuresToStop was very useful. This allows
# detecting the spurious rows and discard them, and at that point we have the same number of rows. Any mismatches
# after that should be just innocent rotations of some entries, for which tu_.find_mismatches(e2, a) helps find the
# few rows that account for the mismatches, and visual inspection usually is enough to verify it is simply an innocent
# rotation
def spurious_errors_debug(): # return or extract pertinent lines. This function is just a template
a = test_buggyReleaseCycle_ACTUAL['Entries']
e = test_buggyReleaseCycle_EXPECTED['Entries']
at = tu_.taintFailuresToStop(a, 'User Story', 'Time Spent')
et = tu_.taintFailuresToStop(e, 'User Story', 'Time Spent')
discard = list(et[et['TAINTED'] == True].index)
e2 = e.drop(discard)
e2.index = a.index
tu_.find_mismatches(e2, a)
e2.loc[[1178, 1182, 1184, 1216, 1218, 1220, 1232]]
a.loc[[1178, 1182, 1184, 1216, 1218, 1220, 1232]]
# -
test_buggyReleaseCycle_ACTUAL['Entries'][0:8]
test_buggyReleaseCycle_EXPECTED['Entries'][0:8]
test_buggyReleaseCycle_ACTUAL['User_Stories'][0:8]
test_buggyReleaseCycle_EXPECTED['User_Stories'][0:8]
test_buggyReleaseCycle_ACTUAL['Tickets'][0:8]
test_buggyReleaseCycle_EXPECTED['Tickets'][0:8]
test_buggyReleaseCycle_ACTUAL['Burnout'][0:8]
test_buggyReleaseCycle_EXPECTED['Burnout'][0:8]
test_buggyReleaseCycle_ACTUAL['planned_Start_CURRENT_SPRINT'][0:8]
test_buggyReleaseCycle_EXPECTED['planned_Start_CURRENT_SPRINT'][0:8]
test_buggyReleaseCycle_ACTUAL['planned_End_CURRENT_SPRINT'][0:8]
test_buggyReleaseCycle_EXPECTED['planned_End_CURRENT_SPRINT'][0:8]
test_buggyReleaseCycle_ACTUAL['planned_Start_NEXT_SPRINT'][0:8]
test_buggyReleaseCycle_EXPECTED['planned_Start_NEXT_SPRINT'][0:8]
test_buggyReleaseCycle_ACTUAL['planned_End_NEXT_SPRINT'][0:8]
test_buggyReleaseCycle_EXPECTED['planned_End_NEXT_SPRINT'][0:8]
test_buggyReleaseCycle_ACTUAL['backlog'][0:8]
test_buggyReleaseCycle_EXPECTED['backlog'][0:8]
test_buggyReleaseCycle_ACTUAL['Resourcing'][0:8]
test_buggyReleaseCycle_EXPECTED['Resourcing'][0:8]
test_buggyReleaseCycle_ACTUAL['Outcome'][0:8]
test_buggyReleaseCycle_EXPECTED['Outcome'][0:8]
# <h1>test_workStealing</h1>
# + code_folding=[0]
# Implement test logic, and run it
#Test logic
def test_workStealing():
output = {}
RELEASE_DURATION = 60
SPRINT_DURATION = 10
NUMBER_OF_SPRINTS = 20
DEVA_DF = tu_.DEV_DF[tu_.DEV_DF['Scrum Team'] == 'A']
modelsConfig = ModelsConfig([MeritocraticCostModel()], [DistributedLagQualityModel()], NoLaggardsAllocationModel())
modelsConfig.random.reset(271)
teams_df, stories_dfDUMMY, globalRepo = tu_.initTestData(DEVA_DF, tu_.PM_DF, \
RELEASE_DURATION, SPRINT_DURATION, modelsConfig)
modelsConfig.globalRepo = globalRepo
entries_df, log = timecard.runReleaseCycle(datetime(2018, 1, 15), SPRINT_DURATION, NUMBER_OF_SPRINTS, modelsConfig)
## Need to set stories3_df **after** release cycle since developer ownerships might be changed during the release
stories_df = UserStory.build_stories_df(globalRepo)
# Output dataframes
burnout = timecard.releaseBurnout(entries_df)
utilization_df = timecard.utilization(entries_df, [11, 12, 13, 14, 15, 16, 17, 18, 19])
workStolen = timecard.tabulateWorkStolen(modelsConfig)
workStolen_df = timecard.buildWorkStolen_df(workStolen)
explain_us1 = timecard.explainWorkStolen(workStolen, 'UserStory #1', log)
explain_us2 = timecard.explainWorkStolen(workStolen, 'UserStory #70', log)
lag, bal, actuals = timecard.workStealingRetrospective(13, log, modelsConfig)
output['burnout'] = burnout
output['utilization'] = utilization_df
output['works_stolen'] = workStolen_df
output['userStory1_work_stolen_explanation'] = explain_us1
output['userStory70_work_stolen_explanation'] = explain_us2
output['laggard_candidates_to_steal_sprint13'] = lag
output['balance_candidates_to_steal_sprint13'] = bal
output['actual_steal_sprint13'] = actuals
return output
# Run the test
test_workStealing_ACTUAL = test_workStealing()
# + code_folding=[0]
# Uncomment to update expected output to match the actual one
# Helper method
def create_workStealing_EXPECTED():
tu_.createExpectedOutput(test_workStealing_ACTUAL['burnout'], \
'timecard.test_workStealing.burnout')
tu_.createExpectedOutput(test_workStealing_ACTUAL['utilization'], \
'timecard.test_workStealing.utilization')
tu_.createExpectedOutput(test_workStealing_ACTUAL['works_stolen'], \
'timecard.test_workStealing.works_stolen')
tu_.createExpectedOutput(test_workStealing_ACTUAL['userStory1_work_stolen_explanation'], \
'timecard.test_workStealing.userStory1_work_stolen_explanation')
tu_.createExpectedOutput(test_workStealing_ACTUAL['userStory70_work_stolen_explanation'], \
'timecard.test_workStealing.userStory70_work_stolen_explanation')
tu_.createExpectedOutput(test_workStealing_ACTUAL['laggard_candidates_to_steal_sprint13'], \
'timecard.test_workStealing.laggard_candidates_to_steal_sprint13')
tu_.createExpectedOutput(test_workStealing_ACTUAL['balance_candidates_to_steal_sprint13'], \
'timecard.test_workStealing.balance_candidates_to_steal_sprint13')
tu_.createExpectedOutput(test_workStealing_ACTUAL['actual_steal_sprint13'], \
'timecard.test_workStealing.actual_steal_sprint13')
# Uncomment to update expected output to match the actual one, and then put the comment back
#create_workStealing_EXPECTED()
# + code_folding=[0]
# Load expected output, update the EXPECTED and ACTUAL dictionaries, and check test is OK
list_cols = [] # Lists are loaded as strings, so require special processing on load
test_workStealing_EXPECTED = {}
PFX = 'timecard.test_workStealing.'
test_workStealing_EXPECTED['burnout'] = tu_.loadExpectedOutput(PFX + 'burnout', \
list_cols)
test_workStealing_EXPECTED['utilization'] = tu_.loadExpectedOutput(PFX + 'utilization', \
list_cols)
test_workStealing_EXPECTED['works_stolen'] = tu_.loadExpectedOutput(PFX + 'works_stolen', \
list_cols)
test_workStealing_EXPECTED['userStory1_work_stolen_explanation'] = tu_.loadExpectedOutput(PFX + 'userStory1_work_stolen_explanation', \
list_cols)
test_workStealing_EXPECTED['userStory70_work_stolen_explanation'] = tu_.loadExpectedOutput(PFX + 'userStory70_work_stolen_explanation', \
list_cols)
test_workStealing_EXPECTED['laggard_candidates_to_steal_sprint13'] = tu_.loadExpectedOutput(PFX + 'laggard_candidates_to_steal_sprint13', \
list_cols)
test_workStealing_EXPECTED['balance_candidates_to_steal_sprint13'] = tu_.loadExpectedOutput(PFX + 'balance_candidates_to_steal_sprint13', \
list_cols)
test_workStealing_EXPECTED['actual_steal_sprint13'] = tu_.loadExpectedOutput(PFX + 'actual_steal_sprint13', \
list_cols)
# Rounding inaccuracies in saving and loading CSV will create an artificial mismatch between ACTUAL and EXPECTED
# So round EXPECTED and ACTUAL to 6 decimal places for sensitive fields (any float)
ExpectedOutputCleaner.cleanRoundingNoise(['Effort', 'Implementation Effort', 'Debugging Effort', 'Cum % Completion'],
['burnout'],
test_workStealing_EXPECTED,
test_workStealing_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Sprint 11', 'Sprint 12', 'Sprint 13', 'Sprint 14', 'Sprint 15', \
'Sprint 16', 'Sprint 17', 'Sprint 18', 'Sprint 19'],
['utilization'],
test_workStealing_EXPECTED,
test_workStealing_ACTUAL)
ExpectedOutputCleaner.cleanRoundingNoise(['Original Estimate', 'Effort Spent','Effort Remaining', \
'Percent Achieved', 'Global Estimate', '% Global Done'],
['userStory1_work_stolen_explanation', 'userStory70_work_stolen_explanation', \
'laggard_candidates_to_steal_sprint13', 'balance_candidates_to_steal_sprint13'],
test_workStealing_EXPECTED,
test_workStealing_ACTUAL)
ExpectedOutputCleaner.alignIndex(['userStory1_work_stolen_explanation', 'userStory70_work_stolen_explanation', \
'laggard_candidates_to_steal_sprint13', 'balance_candidates_to_steal_sprint13', \
'actual_steal_sprint13'],
test_workStealing_EXPECTED,
test_workStealing_ACTUAL)
ExpectedOutputCleaner.destringify(['Delivered in Sprint'],
['userStory1_work_stolen_explanation', 'userStory70_work_stolen_explanation', \
'laggard_candidates_to_steal_sprint13', 'balance_candidates_to_steal_sprint13'],
test_workStealing_EXPECTED)
tu_.EXPECTED[PFX + 'burnout'] = test_workStealing_EXPECTED['burnout']
tu_.EXPECTED[PFX + 'utilization'] = test_workStealing_EXPECTED['utilization']
tu_.EXPECTED[PFX + 'works_stolen'] = test_workStealing_EXPECTED['works_stolen']
tu_.EXPECTED[PFX + 'userStory1_work_stolen_explanation'] = test_workStealing_EXPECTED['userStory1_work_stolen_explanation']
tu_.EXPECTED[PFX + 'userStory70_work_stolen_explanation'] = test_workStealing_EXPECTED['userStory70_work_stolen_explanation']
tu_.EXPECTED[PFX + 'laggard_candidates_to_steal_sprint13'] = test_workStealing_EXPECTED['laggard_candidates_to_steal_sprint13']
tu_.EXPECTED[PFX + 'balance_candidates_to_steal_sprint13'] = test_workStealing_EXPECTED['balance_candidates_to_steal_sprint13']
tu_.EXPECTED[PFX + 'actual_steal_sprint13'] = test_workStealing_EXPECTED['actual_steal_sprint13']
tu_.ACTUAL[PFX + 'burnout'] = test_workStealing_ACTUAL['burnout']
tu_.ACTUAL[PFX + 'utilization'] = test_workStealing_ACTUAL['utilization']
tu_.ACTUAL[PFX + 'works_stolen'] = test_workStealing_ACTUAL['works_stolen']
tu_.ACTUAL[PFX + 'userStory1_work_stolen_explanation'] = test_workStealing_ACTUAL['userStory1_work_stolen_explanation']
tu_.ACTUAL[PFX + 'userStory70_work_stolen_explanation'] = test_workStealing_ACTUAL['userStory70_work_stolen_explanation']
tu_.ACTUAL[PFX + 'laggard_candidates_to_steal_sprint13'] = test_workStealing_ACTUAL['laggard_candidates_to_steal_sprint13']
tu_.ACTUAL[PFX + 'balance_candidates_to_steal_sprint13'] = test_workStealing_ACTUAL['balance_candidates_to_steal_sprint13']
tu_.ACTUAL[PFX + 'actual_steal_sprint13'] = test_workStealing_ACTUAL['actual_steal_sprint13']
tu_.testOK(PFX + 'burnout'), \
tu_.testOK(PFX + 'utilization'), \
tu_.testOK(PFX + 'works_stolen'), \
tu_.testOK(PFX + 'userStory1_work_stolen_explanation'), \
tu_.testOK(PFX + 'userStory70_work_stolen_explanation'), \
tu_.testOK(PFX + 'laggard_candidates_to_steal_sprint13'), \
tu_.testOK(PFX + 'balance_candidates_to_steal_sprint13'), \
tu_.testOK(PFX + 'actual_steal_sprint13'), \
# -
test_workStealing_ACTUAL['burnout']
test_workStealing_EXPECTED['burnout']
test_workStealing_ACTUAL['utilization']
test_workStealing_EXPECTED['utilization']
test_workStealing_ACTUAL['works_stolen']
test_workStealing_EXPECTED['works_stolen']
a = test_workStealing_ACTUAL['userStory1_work_stolen_explanation']
e = test_workStealing_EXPECTED['userStory1_work_stolen_explanation']
a.index, e.index
test_workStealing_ACTUAL['userStory1_work_stolen_explanation']
test_workStealing_EXPECTED['userStory1_work_stolen_explanation']
test_workStealing_ACTUAL['userStory70_work_stolen_explanation']
test_workStealing_EXPECTED['userStory70_work_stolen_explanation']
test_workStealing_ACTUAL['laggard_candidates_to_steal_sprint13']
test_workStealing_EXPECTED['laggard_candidates_to_steal_sprint13']
test_workStealing_ACTUAL['balance_candidates_to_steal_sprint13']
test_workStealing_EXPECTED['balance_candidates_to_steal_sprint13']
test_workStealing_ACTUAL['actual_steal_sprint13']
test_workStealing_EXPECTED['actual_steal_sprint13']
| devanalyst/simulation/tests/test_timecard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# * effect: rasterize (on photo), edge blur - shadow
# alpha gradient mask (brightness control)
#
# ### 主軸
#
# * 想要有一張好看的Poster,需要的是美照+美字/插圖+文字,搭配icon、裝飾、filters
# * 若可複製文字,則美感大幅提升
# * 照片:去背/不去背,需要大量素材
# *
#
# ### 問題
#
# * crop mask: 分為shape, fill -> 跟blend有點接近
# * blend: 一系列物件具有相同的bbox -> detection困難?
# * 位移:基於標的物件做位移的情況?
#
# * shape只能有一個?
#
# ### 測試checklist
#
# * 對於物件完全重疊時的有效性? eg photo + alpha gradient
# *
#
# ### 流程
#
# * detection: 找出物件bbox(x, y, w, h) & cat (以shape為標的),物件可以是復合物件
# * paramter regression(基於sequence方式): 給予(image, bbox(as mask), cat, prev_cat) -> (params, next_cat),好處-針對復合物件導出params
# * 單一物件 b0:(im, mask, "b0_cat", "null") -> (params, "null")
# * Blend: \[b0 (as crop), b1, b2, ..., b_n\], 任何情況b0都是crop
# * 沒有mask -> 單純"blend": (im, mask, "blend", "null") -> (blend_params, "b0_cat")
# * (im, mask0, "Blend", "Null") -> (blend_params, "b0_cat")
# * (im, mask0, "b0_cat", "Blend") -> (b0_params, "b1_cat")
# * (im, mask0, "b1_cat", "b0_cat") -> (b1_params, "b1_cat")
# * 有mask:
# * (im, mask0, "Blend", "Null") -> (blend_params, "b0_cat")
# * (im, mask0, "b0_cat", "Crop") -> (b0_params, "b1_cat")
# * (im, mask0, "b1_cat", "b0_cat") -> (b1_params, "b1_cat")
# * ...
#
# #### Todo
#
# * blend\[pattern, gradient\] -> 目前pattern因為自帶color,和其他color結合會有問題
# * blend\[rect, gradient\] -> 顏色無法正確顯示,需要改用alpha_composite
# * text box
# +
# # !apt update
# # !apt install -y libsm6 libxext6 libxrender-devt
# # !pip install opencv-python
# # !conda install -c anaconda sqlalchemy
# # !conda install -y -c conda-forge pycairo cairosvg imgaug faker
# +
# # %cd /workspace/CRAFT-pytorch/my-src/generator
# # !rm -R /workspace/CRAFT-pytorch/data/icons
# # !python svg2png.py --db=sqlite:////workspace/CRAFT-pytorch/crawl_noun.db --n_samples=100
# +
# %cd /workspace/CRAFT-pytorch/mysrc/generator
# !rm -R /workspace/CRAFT-pytorch/mydataset
# !python main.py --n_samples=5 --imsize=511 --folder=train
# # !python main.py --n_samples=1000 --imsize=511 --folder=train
# # !python tococo.py --root=/workspace/CRAFT-pytorch/mydataset/train
# # !python main.py --n_samples=2 --imsize=511 --folder=test
# # !python tococo.py --folder=test
# -
# %cd /workspace/CRAFT-pytorch/mysrc
# # !python train_mask.py --imsize=128 --batch_size=4
# !python train_param.py --imsize=511 --batch_size=4 --sample_interval=2
# +
# # !apt update -y && apt install -y tesseract-ocr
#libtesseract-dev libleptonica-dev tesseract-ocr-jpn
# # !pip install tesserocr
# -
| train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# -
def cnn_model_fn(features, labels, mode):
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
# +
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model_16")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=50,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=1000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=5,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
# -
| cnn_mnist_partC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Methods
# The distinction between methods and functions is somewhat sublte. They are functions that are built into objects, and are a subclass of functions. Functions are "general purpose" and do not belong to objects. We've talked about list, tuple, string, and dictionary methods. Even numbers themselves have methods. Any call to a function that belongs to an object is called a method. The format is the following:
#
# object.method(arg1, arg2..)
#
# We can observe that there is an object here.
# Let's create a list object and try out some built-in methods (we can even create our own methods, coming in the next chapter).
# ## Number Methods
# Numbers have methods, too
my_num = 2
print(my_num.__add__(3))
print(my_num.__sub__(2))
print(my_num.__mul__(2))
print(my_num.__truediv__(2))
# ## List Methods (Recap)
# +
# These are examples of list methods.
my_list = ["Bonet", "Triggly", "Ross"]
my_list.append("Triggly")
print("After appending {}".format(my_list))
another_list = my_list.copy()
print("After copying {}".format(another_list))
my_list.clear()
print("After clearing {}".format(my_list))
another_list.insert(2, "Triggly")
print("After insertion: {}".format(another_list))
the_count = another_list.count("Triggly")
print("Count of specific word: {}".format(the_count))
# -
# ## Tuple Methods (Recap)
# +
# These are some tuple methods
my_tuple = ("hate", 19.0, "redemption", "hate")
print(my_tuple.count("hate"))
print(my_tuple.index("redemption"))
# -
# ## Dictionary Methods (Recap)
# +
# These are Dictionary Methods
my_dict = {"Sally":19, "Robert":[1, 48, 29], "Wallace":0}
keys = list(my_dict.keys())
values = list(my_dict.values())
popped_value = my_dict.pop("Robert")
print(popped_key)
| Functions and Methods/Methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Q1h6F1G92pLR"
# # Code Samples
# + id="UzBVvvgDa7qU"
import sympy as sp
import numpy
import pandas
# + id="4qLyQ6EzbKvm"
sp.init_printing()
# + id="JnEeQzpLbiGY"
x, y, z, k1, k2, k3 = sp.symbols("x, y, z, k1, k2, k3")
# + colab={"base_uri": "https://localhost:8080/", "height": 48} id="DFbW2eJXb45U" outputId="b9863fe5-f6ff-4bf0-c257-1f5549e1db61"
sp.solveset(sp.sin(x) - 1, x)
# + colab={"base_uri": "https://localhost:8080/", "height": 57} id="z-5ptjHucn2Z" outputId="3bb32701-8621-46fc-918d-f1e232945a17"
matrix = sp.Matrix([sp.sin(x) -1, sp.cos(y) -1 ])
matrix
# + colab={"base_uri": "https://localhost:8080/", "height": 48} id="qsjHjz0rc3v3" outputId="2995b58c-406d-498a-f530-19bed0ab795c"
sp.solve(matrix)
# + id="qxMaghzEdjY2"
kinetics = sp.Matrix([k1*x*y - 3, k2*x/(1 -x) - 4])
# + colab={"base_uri": "https://localhost:8080/", "height": 57} id="yPd-nPvGekvK" outputId="2c4538c5-b560-4518-8113-c10fcc655230"
sp.nonlinsolve(kinetics, [x,y])
# + colab={"base_uri": "https://localhost:8080/", "height": 314} id="AYWiRQNGfPac" outputId="465f7809-aa87-4310-8c61-806d73bc11a7"
sp.plot(2*x**2 + 3*x)
# + colab={"base_uri": "https://localhost:8080/", "height": 315} id="8BzLvGJ3f6NB" outputId="cc6ccd5a-c5c6-4bbe-8d05-e3cbb18cc703"
from sympy.plotting import plot3d_parametric_line
t = sp.symbols('t')
alpha = [sp.cos(t), sp.sin(t), t]
plot3d_parametric_line(*alpha)
# + colab={"base_uri": "https://localhost:8080/", "height": 315} id="t2CSzszhgR82" outputId="5a549425-e6ae-4bc6-b082-758ab62bbcb7"
# Plots for the reaction flux
# x + y -> z; k1*x*y
flux = sp.Matrix([x, y, k1*x*y])
flux_plot = flux.subs({k1: 3})
plot3d_parametric_line(x, x**2, 3*x**3)
# + colab={"base_uri": "https://localhost:8080/", "height": 54} id="dZiL_hjviZ7I" outputId="47a5c97e-30e3-4a61-b462-3040262e63e5"
f, g = sp.symbols('f g', cls=sp.Function)
diffeq = sp.Eq(f(x).diff(x, x) - 2*f(x).diff(x) + f(x), sp.sin(x))
diffeq
# + id="PquQvKWLum0M"
result = sp.dsolve(diffeq, f(x))
# + colab={"base_uri": "https://localhost:8080/", "height": 314} id="jP0SSeMGxYPW" outputId="50f25ad5-e61f-42c1-f424-a2dc87197c02"
syms = list(result.free_symbols)
syms[0]
result1 = result.subs({syms[0]: 1, syms[1]: 1})
sp.plot(result1.rhs)
# + colab={"base_uri": "https://localhost:8080/", "height": 37} id="i20JyWTWu2BI" outputId="656e57d7-cd6c-4593-aab0-753455b259e1"
sp.solve(x**2 - 2*x + 1, x)
# + colab={"base_uri": "https://localhost:8080/", "height": 53} id="X7gHDs5UyPey" outputId="5facc3fd-e81f-4384-d31a-9bd7d55e20bf"
result1.rhs
# + [markdown] id="pRGPxny41xSa"
# # Workflow for Solving LTI Systems
# 1. Given $A, B, C$, find
# 1. $e^{At}$
# 1. $\int_0^t e^{A(t - \tau)} u(\tau) d \tau$ for
# $u(\tau) \in \{ \delta(t), 1(t), t \} $
# 1. $x(t)$
# 1. $y(t)$
#
# 1. Plot $x$, $y$
#
# 1. Solve for observability, controllability
# + [markdown] id="9nzT5sRa16m6"
# # Workflow for Reaction Networks
# 1. Simulate the original model
# 1. Convert model to sympy
# 1. Get symbolic Jaccobian
# 1. Construct LTI models for different points in state space
| save/MatrixExponential.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow as tf
import numpy as np
from nltk.corpus import stopwords #provides list of english stopwords
stop = stopwords.words('english')
# # Process Data
train, test = train_test_split(pd.read_csv('ita.txt', sep='\t',header = None, nrows=100000), test_size=.10)
print(train.shape)
print(test.shape)
print(train.head())
train['english_lower'] = train['english'].str.lower()
train['english_no_punctuation'] = train['english_lower'].str.replace('[^\w\s]','')
#train['english_no_stopwords'] = train['english_no_punctuation'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
#train["english_no_stopwords"] = train["english_no_stopwords"].fillna("fillna")
#train["english_no_stopwords"] = train["english_no_stopwords"]
# +
train['italian_lower'] = train["italian"].str.lower()
train['italian_no_punctuation'] = '_start_' + ' ' +train['italian_lower'].str.replace('[^\w\s]','')+ ' ' +'_end_'
#VERY IMPORTANT TRICK!!
#NOTICE THAT WE ADD "_start_" and "_end_" EXACTLY AT THE BEGINNING AND THE END OF EACH SENTENCE TO HAVE SOME KIND OF 'DELIMITERS'
#THAT WILL TELL OUR DECODER TO START AND FINISH. BECAUSE WE DON'T HAVE GENERAL SIGNALS OF START AND FINISH IN NATURAL LANGUAGE.
#BASICALLY '_end_' REFLECTS THE POINT IN WHICH OUR OUTPUT SENTENCE IS MORE LIKELY TO END.
# +
max_features1 = 1000
maxlen1 = 10
max_features2 = 1000
maxlen2 = 10
# -
tok1 = tf.keras.preprocessing.text.Tokenizer(num_words=max_features1)
tok1.fit_on_texts(list(train['english_no_punctuation'])) #fit to cleaned text
tf_train_english =tok1.texts_to_sequences(list(train['english_no_punctuation']))
tf_train_english =tf.keras.preprocessing.sequence.pad_sequences(tf_train_english, maxlen=maxlen1) #let's execute pad step
# +
#the processing has to be done for both
#two different tokenizers
# -
tok2 = tf.keras.preprocessing.text.Tokenizer(num_words=max_features2, filters = '*')
tok2.fit_on_texts(list(train['italian_no_punctuation'])) #fit to cleaned text
tf_train_italian = tok2.texts_to_sequences(list(train['italian_no_punctuation']))
tf_train_italian = tf.keras.preprocessing.sequence.pad_sequences(tf_train_italian, maxlen=maxlen2, padding ='post')
# # Define Model Architecture
# +
vectorized_italian = tf_train_italian
# For Decoder Input, you don't need the last word as that is only for prediction
# when we are training using Teacher Forcing.
decoder_input_data = vectorized_italian[:, :-1]
# Decoder Target Data Is Ahead By 1 Time Step From Decoder Input Data (Teacher Forcing)
decoder_target_data = vectorized_italian[:, 1:]
print(f'Shape of decoder input: {decoder_input_data.shape}')
print(f'Shape of decoder target: {decoder_target_data.shape}')
vectorized_english = tf_train_english
# Encoder input is simply the body of the issue text
encoder_input_data = vectorized_english
doc_length = encoder_input_data.shape[1]
print(f'Shape of encoder input: {encoder_input_data.shape}')
# -
vocab_size_encoder = len(tok1.word_index) + 1 #remember vocab size?
vocab_size_decoder = len(tok1.word_index) + 1
# ### Define Model Architecture
# +
#arbitrarly set latent dimension for embedding and hidden units
latent_dim = 40
# -
# Encoder Model
# +
encoder_inputs = tf.keras.Input(shape=(doc_length,), name='Encoder-Input')
# Word embeding for encoder (English text)
x = tf.keras.layers.Embedding(vocab_size_encoder, latent_dim, name='Body-Word-Embedding', mask_zero=False)(encoder_inputs)
#Batch normalization is used so that the distribution of the inputs
#to a specific layer doesn't change over time
x = tf.keras.layers.BatchNormalization(name='Encoder-Batchnorm-1')(x)
# We do not need the `encoder_output` just the hidden state.
_, state_h = tf.keras.layers.GRU(latent_dim, return_state=True, name='Encoder-Last-GRU')(x)
# Encapsulate the encoder as a separate entity so we can just
# encode without decoding if we want to.
encoder_model = tf.keras.Model(inputs=encoder_inputs, outputs=state_h, name='Encoder-Model')
seq2seq_encoder_out = encoder_model(encoder_inputs)
########################
#### Decoder Model ####
decoder_inputs = tf.keras.Input(shape=(None,), name='Decoder-Input') # for teacher forcing
# Word Embedding For Decoder (Italian text)
dec_emb = tf.keras.layers.Embedding(vocab_size_decoder, latent_dim, name='Decoder-Word-Embedding', mask_zero=False)(decoder_inputs)
#again batch normalization
dec_bn = tf.keras.layers.BatchNormalization(name='Decoder-Batchnorm-1')(dec_emb)
# Set up the decoder, using `decoder_state_input` as initial state.
decoder_gru = tf.keras.layers.GRU(latent_dim, return_state=True, return_sequences=True, name='Decoder-GRU')
decoder_gru_output, _ = decoder_gru(dec_bn, initial_state=seq2seq_encoder_out)
x = tf.keras.layers.BatchNormalization(name='Decoder-Batchnorm-2')(decoder_gru_output)
# Dense layer for prediction
decoder_dense = tf.keras.layers.Dense(vocab_size_decoder, activation='softmax', name='Final-Output-Dense')
decoder_outputs = decoder_dense(x)
########################
#### Seq2Seq Model ####
seq2seq_Model = tf.keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
seq2seq_Model.compile(optimizer=tf.keras.optimizers.Nadam(lr=0.001), loss='sparse_categorical_crossentropy')
# -
# ** Examine Model Architecture Summary **
#from seq2seq_utils import viz_model_architecture
seq2seq_Model.summary()
#viz_model_architecture(seq2seq_Model)
# # Train Model
batch_size = 1200
epochs = 1
history = seq2seq_Model.fit([encoder_input_data, decoder_input_data], np.expand_dims(decoder_target_data, -1),
batch_size=batch_size, epochs=epochs, validation_split=0.12)
seq2seq_Model.save('seq2seq_full_data_3_epochs.h5')
del seq2seq_Model
seq2seq_Model = tf.keras.models.load_model('seq2seq_full_data_3_epochs.h5')
# NOW WE HAVE ANOTHER IMPORTANT TIP!
test_text = ['<NAME> Tom']
# # See Results On Holdout Set
#max_len_title = 30
# get the encoder's features for the decoder
tok1.fit_on_texts(test_text)
raw_tokenized = tok1.texts_to_sequences(test_text)
raw_tokenized = tf.keras.preprocessing.sequence.pad_sequences(raw_tokenized, maxlen=maxlen1)
body_encoding = encoder_model.predict(raw_tokenized)
latent_dim = seq2seq_Model.get_layer('Decoder-Word-Embedding').output_shape[-1]
# Reconstruct the input into the decoder
decoder_inputs = seq2seq_Model.get_layer('Decoder-Input').input
dec_emb = seq2seq_Model.get_layer('Decoder-Word-Embedding')(decoder_inputs)
dec_bn = seq2seq_Model.get_layer('Decoder-Batchnorm-1')(dec_emb)
# Instead of setting the intial state from the encoder and forgetting about it, during inference
# we are not doing teacher forcing, so we will have to have a feedback loop from predictions back into
# the GRU, thus we define this input layer for the state so we can add this capability
# +
gru_inference_state_input = tf.keras.Input(shape=(latent_dim,), name='hidden_state_input')
# we need to reuse the weights that is why we are getting this
# If you inspect the decoder GRU that we created for training, it will take as input
# 2 tensors -> (1) is the embedding layer output for the teacher forcing
# (which will now be the last step's prediction, and will be _start_ on the first time step)
# (2) is the state, which we will initialize with the encoder on the first time step, but then
# grab the state after the first prediction and feed that back in again.
# -
gru_out, gru_state_out = seq2seq_Model.get_layer('Decoder-GRU')([dec_bn, gru_inference_state_input])
# Reconstruct dense layers
dec_bn2 = seq2seq_Model.get_layer('Decoder-Batchnorm-2')(gru_out)
dense_out = seq2seq_Model.get_layer('Final-Output-Dense')(dec_bn2)
decoder_model = tf.keras.Model([decoder_inputs, gru_inference_state_input],
[dense_out, gru_state_out])
# we want to save the encoder's embedding before its updated by decoder
# because we can use that as an embedding for other tasks.
original_body_encoding = body_encoding
body_encoding.shape
# +
#tok2.word_index.update({'_start_': 0})
#tok2.word_index.update({'_end_':len(tok2.word_index)+1})
# -
state_value = np.array(tok2.word_index['_start_']).reshape(1, 1)
state_value
decoded_sentence = []
stop_condition = False
vocabulary_inv = dict((v, k) for k, v in tok2.word_index.items())
#vocabulary_inv[0] = "<PAD/>"
#vocabulary_inv[1] = "unknown"
while not stop_condition:
#print(1)
preds, st = decoder_model.predict([state_value, body_encoding])
#preds = preds[preds>0]
# We are going to ignore indices 0 (padding) and indices 1 (unknown)
# Argmax will return the integer index corresponding to the
# prediction + 2 b/c we chopped off first two
pred_idx = np.argmax(preds[:, :, 2:]) + 2
#print(np.argmax(preds[:, :, 2:]))
# retrieve word from index prediction
#pred_word_str = tok.id2token[pred_idx]
pred_word_str = vocabulary_inv[pred_idx]
#print(pred_idx)
print(pred_word_str)
if pred_word_str == '_end_' or len(decoded_sentence) >= maxlen2:
stop_condition = True
break
decoded_sentence.append(pred_word_str)
# update the decoder for the next word
body_encoding = st
state_value = np.array(pred_idx).reshape(1, 1)
#print(state_value)
| section_3_notebooks/.ipynb_checkpoints/machine_translate_1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Codice
# +
#import e parsing file di log
def lettura_log(path):
file = open(path, 'r')
Lines = file.readlines()
coppie = []
for line in Lines:
if 'COUPLE(N_ITER,DISTANCE RMSE)' in line:
split = line.split(':')
s = split[3].replace('[','')
s = s.replace('(','')
s = s.replace(')','')
s = s.replace(']','')
s = s.split(',')
a = int(s[0])
b = float(s[1])
coppie.append((a,b))
return coppie
# +
#funzione per graficare
import matplotlib.pyplot as plt
def grafico(path):
coppie = lettura_log(path)
x_val = [x[0] for x in coppie]
y_val = [x[1] for x in coppie]
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(x_val,y_val)
ax.plot(x_val,y_val,'or')
ax.set_ylabel('RMSE Distance (Gurobi -TensorFlow)')
ax.set_xlabel('Number of Iteration')
return plt.show()
# +
def tabella(path):
coppie = lettura_log(path)
print ("N_ITER RMSE_DISTANCE")
for i in coppie:
print ("{:<14}{:<11}".format(*i))
# -
# # Versicolor
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma01_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma01_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma01_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma01_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma01_penalization1.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma01_penalization1.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma025_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma025_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma025_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma025_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma025_penalization1.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma025_penalization1.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma05_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma05_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma05_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma05_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c1_sigma05_penalization1.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c1_sigma05_penalization1.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma01_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma01_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma01_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma01_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma01_penalization1.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma01_penalization1.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma025_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma025_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma025_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma025_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma025_penalization1.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma025_penalization1.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma05_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma05_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma05_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma05_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c75_sigma05_penalization1.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c75_sigma05_penalization1.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c200_sigma01_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c200_sigma01_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c200_sigma01_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c200_sigma01_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c200_sigma01_penalization1.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c200_sigma01_penalization1.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c200_sigma025_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c200_sigma025_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c200_sigma025_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c200_sigma025_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c200_sigma025_penalization01.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c200_sigma025_penalization01.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c200_sigma05_penalization05.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c200_sigma05_penalization05.log")
grafico("../../log/Prove-Penalization-01/Versicolor/c200_sigma05_penalization1.log")
tabella("../../log/Prove-Penalization-01/Versicolor/c200_sigma05_penalization1.log")
| notebook/Grafici/Prove-2/Penalization-01/Versicolor-Chi-Penalization-01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="LCgVnQopb6TI"
# # Doc2Vec demonstration
#
# In this notebook, let us take a look at how to "learn" document embeddings and use them for text classification. We will be using the dataset of "Sentiment and Emotion in Text" from [Kaggle](https://www.kaggle.com/c/sa-emotions/data).
#
# "In a variation on the popular task of sentiment analysis, this dataset contains labels for the emotional content (such as happiness, sadness, and anger) of texts. Hundreds to thousands of examples across 13 labels. A subset of this data is used in an experiment we uploaded to Microsoft’s Cortana Intelligence Gallery."
#
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="hSB6W1seb6TJ" outputId="fa6730c6-06df-46ce-91c8-cd59211d24de"
import pandas as pd
import nltk
nltk.download('stopwords')
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
# -
#Load the dataset and explore.
try:
from google.colab import files
# !wget -P DATAPATH https://raw.githubusercontent.com/practical-nlp/practical-nlp/master/Ch4/Data/Sentiment%20and%20Emotion%20in%20Text/train_data.csv
# !wget -P DATAPATH https://raw.githubusercontent.com/practical-nlp/practical-nlp/master/Ch4/Data/Sentiment%20and%20Emotion%20in%20Text/test_data.csv
# !ls -lah DATAPATH
filepath = "DATAPATH/train_data.csv"
except ModuleNotFoundError:
filepath = "Data/Sentiment and Emotion in Text/train_data.csv"
# + colab={"base_uri": "https://localhost:8080/", "height": 212} colab_type="code" id="lSvnHBYPb6TQ" outputId="e2aac8d5-ef66-4e02-9949-32434f8cb537"
df = pd.read_csv(filepath)
print(df.shape)
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 252} colab_type="code" id="5JEI6SH7b6TU" outputId="22cc98a5-90d0-49c9-fb58-f40d743963e9"
df['sentiment'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="CHajyKpmb6TY" outputId="8749211d-1a7c-43c9-bf40-d4d22e74407a"
#Let us take the top 3 categories and leave out the rest.
shortlist = ['neutral', "happiness", "worry"]
df_subset = df[df['sentiment'].isin(shortlist)]
df_subset.shape
# + [markdown] colab_type="text" id="m2oiZzU5b6Tf"
# # Text pre-processing:
# Tweets are different. Somethings to consider:
# - Removing @mentions, and urls perhaps?
# - using NLTK Tweet tokenizer instead of a regular one
# - stopwords, numbers as usual.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Rl-FfMdLb6Th" outputId="6273576c-0495-4606-da02-a7d06358ab2d"
#strip_handles removes personal information such as twitter handles, which don't
#contribute to emotion in the tweet. preserve_case=False converts everything to lowercase.
tweeter = TweetTokenizer(strip_handles=True,preserve_case=False)
mystopwords = set(stopwords.words("english"))
#Function to tokenize tweets, remove stopwords and numbers.
#Keeping punctuations and emoticon symbols could be relevant for this task!
def preprocess_corpus(texts):
def remove_stops_digits(tokens):
#Nested function that removes stopwords and digits from a list of tokens
return [token for token in tokens if token not in mystopwords and not token.isdigit()]
#This return statement below uses the above function to process twitter tokenizer output further.
return [remove_stops_digits(tweeter.tokenize(content)) for content in texts]
#df_subset contains only the three categories we chose.
mydata = preprocess_corpus(df_subset['content'])
mycats = df_subset['sentiment']
print(len(mydata), len(mycats))
# + colab={"base_uri": "https://localhost:8080/", "height": 87} colab_type="code" id="rsGwfVebb6Tl" outputId="ce668f41-578a-467e-d3e7-20ea66b53c6d"
#Split data into train and test, following the usual process
train_data, test_data, train_cats, test_cats = train_test_split(mydata,mycats,random_state=1234)
#prepare training data in doc2vec format:
train_doc2vec = [TaggedDocument((d), tags=[str(i)]) for i, d in enumerate(train_data)]
#Train a doc2vec model to learn tweet representations. Use only training data!!
model = Doc2Vec(vector_size=50, alpha=0.025, min_count=5, dm =1, epochs=100)
model.build_vocab(train_doc2vec)
model.train(train_doc2vec, total_examples=model.corpus_count, epochs=model.epochs)
model.save("d2v.model")
print("Model Saved")
# + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="hTqo26Vsb6Ts" outputId="13f5218a-a22d-400d-bd9e-d53a51c767d7"
#Infer the feature representation for training and test data using the trained model
model= Doc2Vec.load("d2v.model")
#infer in multiple steps to get a stable representation.
train_vectors = [model.infer_vector(list_of_tokens, steps=50) for list_of_tokens in train_data]
test_vectors = [model.infer_vector(list_of_tokens, steps=50) for list_of_tokens in test_data]
#Use any regular classifier like logistic regression
from sklearn.linear_model import LogisticRegression
myclass = LogisticRegression(class_weight="balanced") #because classes are not balanced.
myclass.fit(train_vectors, train_cats)
preds = myclass.predict(test_vectors)
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_cats, preds))
#print(confusion_matrix(test_cats,preds))
| Ch4/02_Doc2Vec_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Today:
# * Multiclass classification
# * Softmax regression
# * Model
# * Cost function
# * Optimization
#
# ### Resources:
# * Softmax regression: https://mcerovic.github.io/notes/SoftmaxRegression/index.html
# * Softmax regression: http://ufldl.stanford.edu/wiki/index.php/Softmax_Regression
# * Softmax function: https://medium.com/@uniqtech/understand-the-softmax-function-in-minutes-f3a59641e86d
# * Multiclass classification: Patternt Recognition and Machine Learning - Bishop
# # Softmax regression
# Import necessary libraries
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
tf.reset_default_graph()
# Load dataset as numpy array
data = np.loadtxt('../../data/04_SoftmaxRegression/sr_data.csv', delimiter=',')
# +
# Find index of samples with class 0
idx_0 = data[:, 2] == 0.0
idx_1 = data[:, 2] == 1.0
idx_2 = data[:, 2] == 2.0
# Plot dataset
fig, ax = plt.subplots()
ax.set_xlabel('Feature 1')
ax.set_ylabel('Feature 2')
ax.scatter(data[idx_0][:, 0], data[idx_0][:,1], c='b', edgecolors='k', label='Class 0')
ax.scatter(data[idx_1][:, 0], data[idx_1][:,1], c='g', edgecolors='k', label='Class 1')
ax.scatter(data[idx_2][:, 0], data[idx_2][:,1], c='y', edgecolors='k', label='Class 2')
ax.grid(True, color='gray', linestyle='dashed')
ax.legend()
# +
# Data parameters
n_features = 2
n_labels = 3
train_size = len(data)
train_split = 0.85 # 85% data in train set, 15% in test set
split = int(train_split * train_size) # Split boundry, check for yourself!
# Training parameters
learning_rate = 0.01
training_epochs = 5000
batch_size = 100
# +
# Split dataset on features and labels
x_data = data[:, :2]
print(data[:4, 2]) # y_data
y_data = tf.one_hot(data[:, 2], n_labels, dtype=tf.uint8)
# Softmax accepts labels as one hot tensor
# https://www.tensorflow.org/api_docs/python/tf/one_hot
sess = tf.Session()
print(sess.run(y_data[:4]))
sess.close()
# Split data on training and test
x_train, x_test = x_data[:split], x_data[split:]
y_train, y_test = y_data[:split], y_data[split:]
# -
# ## Model
# +
X = tf.placeholder(tf.float32, [None, n_features], "X")
y = tf.placeholder(tf.float32, [None, n_labels], "y")
W = tf.Variable(tf.random_normal([n_features, n_labels]), name="W")
b = tf.Variable(tf.random_normal([n_labels]), name="b")
# -
hypothesis = tf.nn.softmax(tf.add(tf.matmul(X, W), b))
# ## Cost
cost = -tf.reduce_mean(tf.reduce_sum(tf.multiply(y, tf.log(hypothesis)), axis=1))
# ## Optimization
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# ## Train
corr_pred = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(y, 1))
acc = tf.reduce_mean(tf.cast(corr_pred, "float"))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 5000 * 300 / 100 = 15000
for step in range(training_epochs * train_size // batch_size):
offset = (step * batch_size) % train_size
batch_data = x_train[offset:(offset + batch_size)]
# feed_dict cannot use Tensor so we have to
# evaluete our y_train and y_test tensors
batch_labels = y_train.eval()[offset:(offset + batch_size)]
err, _ = sess.run([cost, train_op], feed_dict={X: batch_data, y: batch_labels})
if step % 100 == 0:
print(step, err)
print('acc', sess.run(acc, feed_dict={X: x_test, y: y_test.eval()}))
| source/04_GLM_Softmax_Regression/Class.ipynb |
# ## Arize AI Quick Start: Training and Serving Models with Microsoft Azure ML
#
# ##### NOTE: We do not recommend using *Run All* because it takes several minutes to deploy and update models; models cannot be queried until they are active.
#
# This part of the guide consists of the following sections:
#
# #### Setup
# * Launch an Azure Databricks cluster
# * Install Arize SDK
# * Install MLflow
# * Install the Azure ML SDK
# * Create or load an Azure ML Workspace
#
#
# #### Training a Model
# * Use MLflow Tracking to track experiment
#
# #### Building an Azure Container Image for model deployment
# * Use MLflow to build a Container Image for the trained model
#
# #### Deploy the model to expose a consumable API using Azure Container Instances (ACI)
# * Create an ACI webservice deployment using the model's Container Image
#
# #### Querying the deployed model in ACI
# * Load a sample input vector from the diabetes dataset
# * Evaluate the sample input vector by sending an HTTP request
#
# #### Publishing prediction results to Arize
# * Log resulting prediction output along with input vector using Arize's SDK
#
# #### Alternatively, if using Kubernetes: Deploy the model using Azure Kubernetes Service (AKS)
# * Option 1: Create a new AKS cluster
# * Option 2: Connect to an existing AKS cluster
# * Deploy to the model's image to the specified AKS cluster
#
# #### Querying the deployed model in AKS
# * Load a sample input vector from the wine dataset
# * Evaluate the sample input vector by sending an HTTP request
#
# #### Updating the AKS deployment
# * Build an Azure Container Image for another model
# * Deploy the new model's image to the AKS cluster
# * Query the updated model
#
# #### Cleaning up the deployments
# * Terminate the ACI webservice
# * Terminate the AKS webservice
# * Remove the AKS cluster from the Azure ML Workspace
#
# This notebook uses the `diabetes` dataset in scikit-learn and predicts the progression metric (a quantitative measure of disease progression after one year after) based on BMI, blood pressure, etc. It uses the scikit-learn ElasticNet linear regression model, where we vary the `alpha` and `l1_ratio` parameters for tuning. For more information on ElasticNet, refer to:
# * [Elastic net regularization](https://en.wikipedia.org/wiki/Elastic_net_regularization)
# * [Regularization and Variable Selection via the Elastic Net](https://web.stanford.edu/~hastie/TALKS/enet_talk.pdf)
# **Note:** This notebook expects that you use a Databricks hosted MLflow tracking server. If you would like to preview the Databricks MLflow tracking server, contact your Databricks sales representative to request access. To set up your own tracking server, see the instructions in [MLflow Tracking Servers](https://www.mlflow.org/docs/latest/tracking.html#mlflow-tracking-servers) and configure your connection to your tracking server by running [mlflow.set_tracking_uri](https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.set_tracking_uri).
# ## Setup
# 1. Ensure you are using or create a cluster specifying
# * **Databricks Runtime Version:** Databricks Runtime 5.0 or above
# * **Python Version:** Python > 3.5.3
# 1. Install required libraries or if using Databricks Runtime 5.1 or above, run Cmd 5.
# 1. Create required libraries.
# * Source **PyPI** and enter `arize`.
# * Source **PyPI** and enter `mlflow[extras]`. This installs mlflow and all its dependencies.
# * Source **PyPI** and enter `azureml-sdk[databricks]`.
# 1. Install the libraries into the cluster.
# 1. Attach this notebook to the cluster.
# !pip install arize mlflow[extras] azureml-sdk[databricks]
# #### Write Your ML Code Based on the`train_diabetes.py` Code
# This tutorial is based on the MLflow's [train_diabetes.py](https://github.com/mlflow/mlflow/blob/master/examples/sklearn_elasticnet_diabetes/train_diabetes.py) example, which uses the `sklearn.diabetes` built-in dataset to predict disease progression based on various factors.
# +
# Import various libraries including matplotlib, sklearn, mlflow
import os
import warnings
import sys
import pandas as pd
import numpy as np
from itertools import cycle
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
# Import mlflow
import mlflow
import mlflow.sklearn
# Load Diabetes datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
# Create pandas DataFrame for sklearn ElasticNet linear_model
Y = np.array([y]).transpose()
d = np.concatenate((X, Y), axis=1)
cols = ['age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6', 'progression']
data = pd.DataFrame(d, columns=cols)
# -
# #### Train the Diabetes Model
# The next function trains ElasticNet linear regression based on the input parameters of `alpha (in_alpha)` and `l1_ratio (in_l1_ratio)`.
#
# In addition, this function uses MLflow Tracking to record its
# * parameters
# * metrics
# * model
#
# **Tip:** Use `with mlflow.start_run:` in the Python code to create a new MLflow run. This is the recommended way to use MLflow in notebook cells. Whether your code completes or exits with an error, the `with` context will make sure to close the MLflow run, so you don't have to call `mlflow.end_run`.
# train_diabetes
# Uses the sklearn Diabetes dataset to predict diabetes progression using ElasticNet
# The predicted "progression" column is a quantitative measure of disease progression one year after baseline
# http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html
def train_diabetes(data, in_alpha, in_l1_ratio):
# Evaluate metrics
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
warnings.filterwarnings('ignore')
np.random.seed(40)
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
# The predicted column is "progression" which is a quantitative measure of disease progression one year after baseline
train_x = train.drop(['progression'], axis=1)
test_x = test.drop(['progression'], axis=1)
train_y = train[['progression']]
test_y = test[['progression']]
if float(in_alpha) is None:
alpha = 0.05
else:
alpha = float(in_alpha)
if float(in_l1_ratio) is None:
l1_ratio = 0.05
else:
l1_ratio = float(in_l1_ratio)
# Start an MLflow run; the "with" keyword ensures we'll close the run even if this cell crashes
with mlflow.start_run():
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
lr.fit(train_x, train_y)
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
# Print out ElasticNet model metrics
print('Elasticnet model (alpha=%f, l1_ratio=%f):' % (alpha, l1_ratio))
print(' RMSE: %s' % rmse)
print(' MAE: %s' % mae)
print(' R2: %s' % r2)
# Set tracking_URI first and then reset it back to not specifying port
# Note, we had specified this in an earlier cell
#mlflow.set_tracking_uri(mlflow_tracking_URI)
# Log mlflow attributes for mlflow UI
mlflow.log_param('alpha', alpha)
mlflow.log_param('l1_ratio', l1_ratio)
mlflow.log_metric('rmse', rmse)
mlflow.log_metric('r2', r2)
mlflow.log_metric('mae', mae)
mlflow.sklearn.log_model(lr, "model")
modelpath = "/dbfs/mlflow/test_diabetes/model-%f-%f" % (alpha, l1_ratio)
mlflow.sklearn.save_model(lr, modelpath)
# #### Experiment with Different Parameters
#
# Call `train_diabetes` with different parameters. Later, you'll be able to visualize all these runs in the MLflow experiment.
# %fs rm -r dbfs:/mlflow/test_diabetes
# +
# alpha and l1_ratio values of 0.01, 0.01
train_diabetes(data, 0.01, 0.01)
# alpha and l1_ratio values of 0.01, 0.75
train_diabetes(data, 0.01, 0.75)
# alpha and l1_ratio values of 0.01, .5
train_diabetes(data, 0.01, .5)
# alpha and l1_ratio values of 0.01, 1
train_diabetes(data, 0.01, 1)
# -
# ## View the run, experiment, run details, and notebook revision
#
# 1. Click the **Runs** icon in the notebook context bar to display the Runs sidebar. In the sidebar, you can view the run parameters and metrics. For example: <img src="https://docs.databricks.com/_static/images/mlflow/mlflow-notebook-experiments.gif"/>
#
# 1. Click the External Link icon <img src="https://docs.databricks.com/_static/images/external-link.png"/> in the Runs context bar to view the notebook experiment. For example: <img src="https://docs.databricks.com/_static/images/mlflow/quick-start-nb-experiment.png"/>
# ### Create or load an Azure ML Workspace
# Before models can be deployed to Azure ML, you must create or obtain an Azure ML Workspace. The `azureml.core.Workspace.create()` function will load a workspace of a specified name or create one if it does not already exist. For more information about creating an Azure ML Workspace, see the [Azure ML Workspace management documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace).
# +
import azureml
from azureml.core import Workspace
workspace_name = 'YOUR WORKSPACE NAME'
workspace_location = 'YOUR WORKSPACE LOCATION'
resource_group = 'YOUR RESOURCE GROUP'
subscription_id = 'YOUR SUBSCRIPTION ID'
workspace = Workspace.create(name = workspace_name,
location = workspace_location,
resource_group = resource_group,
subscription_id = subscription_id,
exist_ok=True)
# -
# ## Build an Azure Container Image for model deployment
# ### Use MLflow to build a Container Image for the trained model
#
# Use the `mlflow.azuereml.build_image` function to build an Azure Container Image for the trained MLflow model. This function also registers the MLflow model with a specified Azure ML workspace. The resulting image can be deployed to Azure Container Instances (ACI) or Azure Kubernetes Service (AKS) for real-time serving.
# Specify the run ID associated with an ElasticNet training run from. You can find a run ID and model path from the experiment run, which can be found on the run details page:
#
# 
run_id = 'YOU RUN ID OF CHOICE'
model_uri = 'runs:/' + run_id + '/model'
# +
import mlflow.azureml
model_image, azure_model = mlflow.azureml.build_image(model_uri=model_uri,
workspace=workspace,
model_name='model',
image_name='model',
description='Sklearn ElasticNet image for predicting diabetes progression',
synchronous=False)
# -
model_image.wait_for_creation(show_output=True)
# ## Deploy the model to expose a consumable API using [Azure Container Instances (ACI)](https://docs.microsoft.com/en-us/azure/container-instances/)
#
# Using the Azure ML SDK, deploy the Container Image for the trained MLflow model to ACI.
# +
from azureml.core.webservice import AciWebservice, Webservice
dev_webservice_name = 'diabetes-model'
dev_webservice_deployment_config = AciWebservice.deploy_configuration()
dev_webservice = Webservice.deploy_from_image(name=dev_webservice_name, image=model_image, deployment_config=dev_webservice_deployment_config, workspace=workspace)
dev_webservice.wait_for_deployment()
# -
# ## Query the deployed model in AzureML
# +
import pandas as pd
import numpy as np
from sklearn import datasets
#### Load diabetes dataset
diabetes = datasets.load_diabetes()
#### Create sample input vector
X = diabetes.data
y = diabetes.target
Y = np.array([y]).transpose()
d = np.concatenate((X, Y), axis=1)
cols = ['age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6', 'progression']
data = pd.DataFrame(d, columns=cols)
sample = data.drop(["progression"], axis=1).iloc[[0]]
query_input = sample.to_json(orient='split')
query_input = eval(query_input)
query_input.pop('index', None)
#print(query_input)
# -
# #### Evaluate the sample input vector by sending an HTTP request
# Query the ACI webservice's scoring endpoint by sending an HTTP POST request that contains the input vector.
# +
import requests
import json
import uuid
def query_endpoint_example(scoring_uri, inputs, service_key=None):
headers = {
'Content-Type': 'application/json',
}
if service_key is not None:
headers['Authorization'] = 'Bearer {service_key}'.format(service_key=service_key)
print('Sending batch prediction request with inputs: {}'.format(inputs))
response = requests.post(scoring_uri, data=json.dumps(inputs), headers=headers)
preds = json.loads(response.text)
print('Received response: {}'.format(preds))
return preds
# -
dev_webservice.scoring_uri
model_id='diabetes-model'
model_version='2cdc865cd53a420cb12036ea08c62083'
### Arize Helper Utility
def construct_feature_map(inputs):
keys = inputs['columns']
values = inputs['data'][0]
print(f'keys: {keys}')
print(f'values: {values}')
features = {}
for i, key in enumerate(keys):
features[key] = str(values[i])
return features
# +
from arize.api import Client as client
from arize.types import ModelTypes
## Instantiate arize client
ORG_KEY = 'YOUR ARIZE ORGANIZATION KEY'
API_KEY = 'YOUR ARIZE API KEY'
arize = Client(organization_key=ORG_KEY, api_key=API_KEY)
dev_prediction = query_endpoint_example(scoring_uri=dev_webservice.scoring_uri, inputs=query_input)
prediction_id=str(uuid.uuid4())
print('Logging prediction to arize: {}'.format(prediction_id))
arize.log_prediction(
model_id=model_id,
model_version=model_version,
model_type=ModelTypes.NUMERIC,
prediction_id=prediction_id,
prediction_label=dev_prediction[0],
features=construct_feature_map(query_input))
# -
# ## Alternatively, if using Kubernetes: Deploy the model using [Azure Kubernetes Service (AKS)](https://azure.microsoft.com/en-us/services/kubernetes-service/). (Do Option 1 or Option 2)
# ### Option 1: Create a new AKS cluster
#
# If you do not have an active AKS cluster for model deployment, create one using the Azure ML SDK.
# +
from azureml.core.compute import AksCompute, ComputeTarget
# Use the default configuration (you can also provide parameters to customize this)
prov_config = AksCompute.provisioning_configuration(vm_size='Standard_D16_v3')
aks_cluster_name = 'diabetes-cluster'
# Create the cluster
aks_target = ComputeTarget.create(workspace = workspace,
name = aks_cluster_name,
provisioning_configuration = prov_config)
# Wait for the create process to complete
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
# -
# ### Option 2: Connect to an existing AKS cluster
#
# If you already have an active AKS cluster running, you can add it to your Workspace using the Azure ML SDK.
# +
from azureml.core.compute import AksCompute, ComputeTarget
# Get the resource group from https://porta..azure.com -> Find your resource group
resource_group = '<resource-group>'
# Give the cluster a local name
aks_cluster_name = 'diabetes-cluster'
# Attatch the cluster to your workgroup
attach_config = AksCompute.attach_configuration(resource_group=resource_group, cluster_name=aks_cluster_name)
aks_target = ComputeTarget.attach(workspace, name='diabetes-compute', attach_config)
# Wait for the operation to complete
aks_target.wait_for_completion(True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
# -
# ### Deploy to the model's image to the specified AKS cluster
# +
from azureml.core.webservice import Webservice, AksWebservice
# Set configuration and service name
prod_webservice_name = 'diabetes-model-prod'
prod_webservice_deployment_config = AksWebservice.deploy_configuration()
# Deploy from image
prod_webservice = Webservice.deploy_from_image(workspace = workspace,
name = prod_webservice_name,
image = model_image,
deployment_config = prod_webservice_deployment_config,
deployment_target = aks_target)
# -
# Wait for the deployment to complete
prod_webservice.wait_for_deployment(show_output = True)
# ## Query the deployed model in production
# #### Evaluate the sample input vector by sending an HTTP request
# Query the AKS webservice's scoring endpoint by sending an HTTP POST request that includes the input vector. The production AKS deployment may require an authorization token (service key) for queries. Include this key in the HTTP request header.
# +
import requests
import json
def query_endpoint_example(scoring_uri, inputs, service_key=None):
headers = {
'Content-Type': 'application/json',
}
if service_key is not None:
headers['Authorization'] = 'Bearer {service_key}'.format(service_key=service_key)
print('Sending batch prediction request with inputs: {}'.format(inputs))
response = requests.post(scoring_uri, data=json.dumps(inputs), headers=headers)
preds = json.loads(response.text)
print('Received response: {}'.format(preds))
return preds
# -
prod_scoring_uri = prod_webservice.scoring_uri
prod_service_key = prod_webservice.get_keys()[0] if len(prod_webservice.get_keys()) > 0 else None
prod_prediction1 = query_endpoint_example(scoring_uri=prod_scoring_uri, service_key=prod_service_key, inputs=query_input)
# ## Update the production deployment
# ### Build an Azure Container Image for the new model
run_id2 = '<run-id2>'
model_uri = 'runs:/' + run_id2 + '/model'
# +
import mlflow.azureml
model_image_updated, azure_model_updated = mlflow.azureml.build_image(
model_uri=model_uri,
workspace=workspace,
model_name='model-updated',
image_name='model-updated',
description='Sklearn ElasticNet image for predicting diabetes progression',
synchronous=False)
# -
model_image_updated.wait_for_creation(show_output=True)
# ### Deploy the new model's image to the AKS cluster
#
# Using the [`azureml.core.webservice.AksWebservice.update()`](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice.akswebservice?view=azure-ml-py#update) function, replace the deployment's existing model image with the new model image.
prod_webservice.update(image=model_image_updated)
prod_webservice.wait_for_deployment(show_output = True)
# ### Query the updated model
prod_prediction2 = query_endpoint_example(scoring_uri=prod_scoring_uri, service_key=prod_service_key, inputs=query_input)
# ## Compare the predictions
print('Run ID: {} Prediction: {}'.format(run_id1, prod_prediction1))
print('Run ID: {} Prediction: {}'.format(run_id2, prod_prediction2))
# ## Clean up the deployments
# ### Terminate the ACI webservice
#
# Because ACI manages compute resources on your behalf, deleting the "dev" ACI webservice will remove all resources associated with the "dev" model deployment
dev_webservice.delete()
# ### Terminate the AKS webservice
#
# This terminates the real-time serving webservice running on the specified AKS cluster. It **does not** terminate the AKS cluster.
prod_webservice.delete()
# ### Remove the AKS cluster from the Azure ML Workspace
#
# If the cluster was created using the Azure ML SDK (see **Option 1: Create a new AKS cluster**), remove it from the Azure ML Workspace will terminate the cluster, including all of its compute resources and deployments.
#
# If the cluster was created independently (see **Option 2: Connect to an existing AKS cluster**), it will remain active after removal from the Azure ML Workspace.
aks_target.delete()
# ### Overview
# Arize is an end-to-end ML observability and model monitoring platform. The platform is designed to help ML engineers and data science practitioners surface and fix issues with ML models in production faster with:
# - Automated ML monitoring and model monitoring
# - Workflows to troubleshoot model performance
# - Real-time visualizations for model performance monitoring, data quality monitoring, and drift monitoring
# - Model prediction cohort analysis
# - Pre-deployment model validation
# - Integrated model explainability
#
# ### Website
# Visit Us At: https://arize.com/model-monitoring/
#
# ### Additional Resources
# - [What is ML observability?](https://arize.com/what-is-ml-observability/)
# - [Playbook to model monitoring in production](https://arize.com/the-playbook-to-monitor-your-models-performance-in-production/)
# - [Using statistical distance metrics for ML monitoring and observability](https://arize.com/using-statistical-distance-metrics-for-machine-learning-observability/)
# - [ML infrastructure tools for data preparation](https://arize.com/ml-infrastructure-tools-for-data-preparation/)
# - [ML infrastructure tools for model building](https://arize.com/ml-infrastructure-tools-for-model-building/)
# - [ML infrastructure tools for production](https://arize.com/ml-infrastructure-tools-for-production-part-1/)
# - [ML infrastructure tools for model deployment and model serving](https://arize.com/ml-infrastructure-tools-for-production-part-2-model-deployment-and-serving/)
# - [ML infrastructure tools for ML monitoring and observability](https://arize.com/ml-infrastructure-tools-ml-observability/)
#
# Visit the [Arize Blog](https://arize.com/blog) and [Resource Center](https://arize.com/resource-hub/) for more resources on ML observability and model monitoring.
#
# + pycharm={"name": "#%%\n"}
| arize/examples/tutorials/Partnerships/Arize_Tutorial_Databricks_MLFlow_AzureML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: encn423_env
# language: python
# name: python3
# ---
# # Wiki rima (5) tutorial Python notebook
#
# Use this notebook to help you code and calculate answers to the Week 5 tutorial question. We'll assume you're familar with Python operations now, so get straight to it.
#
# Remeber, useful equations for the tutorial should be in your class notes or the lecture summary notes on Learn.
#
# Load packages - add any others you might need
import pandas as pd
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
# +
# Begin code!
| Wiki_rima.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 7.3. Getting started with Bayesian methods
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
# %matplotlib inline
def posterior(n, h, q):
return (n + 1) * st.binom(n, q).pmf(h)
n = 100
h = 61
q = np.linspace(0., 1., 1000)
d = posterior(n, h, q)
# + podoc={"output_text": "Posterior distribution"}
fig, ax = plt.subplots(1, 1)
ax.plot(q, d, '-k')
ax.set_xlabel('q parameter')
ax.set_ylabel('Posterior distribution')
ax.set_ylim(0, d.max() + 1)
# -
| chapter07_stats/03_bayesian.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: python3
# ---
# # Some visualizations
# +
import numpy as np
import matplotlib.pyplot as plt
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 100
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = (40 * np.random.rand(N))**2 # 0 to 15 point radii
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.show()
#done
| visu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:gti770_env]
# language: python
# name: conda-env-gti770_env-py
# ---
# # Laboratoire 0 : Initiation au langage de programmation Python3
# #### Département du génie logiciel et des technologies de l’information
#
# | Étudiants | NOMS - CODE PERMANENT |
# |-----------------------|---------------------------------------------------------|
# | Cours | GTI770 - Systèmes intelligents et apprentissage machine |
# | Session | SAISON ANNÉE |
# | Groupe | X |
# | Numéro du laboratoire | X |
# | Professeur | Prof. NOM |
# | Chargé de laboratoire | NOM |
# | Date | DATE |
# ## Introduction
#
# L'objectif de ce laboratoire est de vous familiariser avec Python et son environment. Il est important de suivre les étapes de ce cours et de pratiquer. Il est également important de lire la documentation offciel de python.
# ## Les basiques de Python
# Dans ce laboratoire, nous allons voir les basiques de Python, mais ce laboratoire n'est pas exhausive, il est fortement conseillé d'aller sur la documentation de python pour avoir plus information : https://docs.python.org/3.6/index.html
#
# ### Les variables
#
# Une variable en python peut être assignée nimporte quelle type de valeur
a = 2 # a est de type int
b = "astring" # b est de type string
a = "Two" # a est maintenant de type string
# ### La fonction print
# La fonction print de Python peut afficher le contenu de nimporte quelle valeur et de nimporte quelle variable
a = 1
c = 1
print(a)
b = "astring"
print(b)
a = 'One' #Les quotes simples sont la même chose que les doubles quotes
print(a)
print("{} is {}".format(c, a)) #Print utilisant les format
# ### Les structures de Python
#
# Python supporte plusieurs types type de structure : Tuple, List, Dictionnary....
# Tres important, en Python, les indexes commencent toujours à 0.
#
# Visiter https://docs.python.org/2/tutorial/datastructures.html
#
# #### Les lists
# Les lists en Python est une structure de donnee qui permet de stocker plusieurs valeurs de different types dans une seule variable
l = []
print(l)
l = [1,2,3,4,5,6]
print(l)
l.append(7) # Concatener 7 à la fin de la liste
print(l)
l1 = [1, "1", 2, "2"]
# pour avoir la taille du list, on peut utiliser len
print(len(l))
# #### Les tuples
# Les tuples est une structure de donnee similaire aux lists. Contrairement aux lists, les tuples sont immutables.
t = (1,2,3)
print(t)
t1 = (1, "One", 2, "Two")
print(t1)
# pour avoir la taille d'un tuple, on peut utiliser aussi len
print(len(t1))
t[2] = 4 # renvoie une erreur parce que tuple est immutable
# #### Les dictionnary
# Les dictionnary/dictionnaires est une structure de donnee associative. Ils associent une clé et une valeur. Les clés doivent être obligatoirement uniques
d = {}
d['one'] = 1
d[1] = 1
d[2] = 'two'
print(d)
# pour avoir la taille d'un dictionnaire, on peut utiliser aussi len
print(len(d))
# ### Les conditions if/else en Python
#
# Comme tous les languages de programmation, Python possede des conditions sous forme de if/elif/else
a = 1
b = a + 1
c = 1
d = 2
if b % 2 == 0 and a == 1:
print("IF")
elif c == 1 or d == 2:
print("ELIF")
else:
print("ELSE")
# ### Les loops/boucle en Python
#
# Python possede 2 types de boucle While et For.
# #### While loop
# La boucle while est une boucle permettant de continuer une operation tant que la condition de la boucle est toujour remplie
n = 10
while n > 0:
print (n)
n -= 1
# #### For loop
# La boucle for est très similaire a la boucle while. En python, la boucle while est beaucoup plus souvent utilisé parce qu'on peut iterer directement sur les contenues des structures
# +
l = [7,6,5,4,3,2,1]
#For each loop sur une liste
for element in l:
print(element)
# -
#For loop avec index
for index in range(0,len(l)):
print(l[index])
#For loop avec index et element
for index,element in enumerate(l):
print(index,element)
# ### Fonctions
# Une fonction peut etre creer de maniere tres simple en Python
# +
def addAndPrint(x, y):
res = x + y
print(res)
return res
a = addAndPrint(1,2)
# -
# ### Lecture et écriture de fichier
#
# La lecture et écriture en Python est très simple.
#
# Example:
#Ecriture de fichier
fid = open('path/to/outputfile.xyz', 'w') # option w veut dire write
l1 = "This is a normal line"
fid.write(l1)
l2 = "Si tu veux une nouvelle ligne, il faut ajouter\n"
fid.write(l2)
fid.close() # Fermeture de fichier
#lecture de ficher entier
fid = open('path/to/outputfile.xyz', 'r') # option r veut dire read
txt = fid.read() # Cette ligne permet de lire tout le fichier
print(txt)
fid.close() # Fermeture de fichier
#lecture de ficher ligne par ligne
fid = open('path/to/file.xyz', 'r') # option r veut dire read
for line in fid:
print line
fid.close() # Fermeture de fichier
# ### Les objets
#
# Python est un language d'objet, cela veut dire qu'on peut créer des object et leur donner des fonctionnalités. Étant donné que ce cours n'est pas un cours de programmation, on va vous donner une description rapide pour que vous puissiez savoir les utiliser.
#
class MyClass:
def __init__(self, something):
# __init__(self,...) est un constructor
# Un constructor permet de specifier comment on peut construire un objet
self.somethingElse = something
# self.somethingElse est un attriubte de notre objet
# self est une reference a notre objet
def myMethod(self):
# myMethod est une methode de notre object
# pour specifier une méthode dans un objet, il suffit de déclarer une fonction dans le scope de l'objet
# il faut également que "self" soit le premier parametre de notre méthode
print self.somethingElse
# ### Import des packages en Python
#
# Par défault, Python ne possède pas tous les fonctionnalités, en important des packages, on donne a Python plus de fonctionnalité qu'il possède.
#
# Example:
#
# +
import numpy #import numpy the mathematical package
# Pour utiliser numpy il faut ajouter le prefix numpy devant les fonctions
a = numpy.ones(4)
# Cette écriture est longue, on peut donner un alias a un package pour gagner en temps
import numpy as np
# Meme chose qu'en haut except on utilise notre alias np
a = np.ones(4)
# -
# On peut également importe qu'une partie d'un package
# Example:
# +
from numpy import ones
a = ones(4) # Remarque: On n'a plus besoin d'utiliser le prefix ici
# Bien sur, on peut importer plusieurs fonction en meme temps
from numpy import zeros, array
# -
# À eviter:
from numpy import * # Ici, on importe tous les fonctions de numpy et on pourra les utiliser sans prefix.
# !!!!!, Cette écriture est à éviter absolutment parce qu'il réduit la visibilité du code
# et peut entrainer des erreurs.
# IMPORTANT: Ne pas utiliser cette écriture dans vos rendus, vous serez PENALISES.
# ## Numpy
#
# Numpy est un package mathématique permettant à Python de faire des manipulation matricielle, des operations mathématiques, etc...
#
# ### Basiques matrices creation
# +
import numpy as np #import numpy
print(np.ones(4)) # Vector de 4 elements de 1
print(np.zeros((2,2))) #Matrix 2x2 avec que des zeros
a = np.array([[1.,4.,4],
[5.,6.,7]])
print(a)
# pour avoir le nombre de lignes et de colonnes de la matrice a
# on utilise shape
print(a.shape) #2 lines et 3 colonnes
# -
# ### Les operations
# Les operations de bases de numpy sont élément par élément
A = np.ones(4)
print(A)
B = A * 8
print(B)
C = np.log2(B)
print(C)
# Il existe également des opérations entre matrices.
# +
A = np.array([[1,1],[2,2]])
B = np.array([[-1,1],[1,1]])
print("A:\n", A)
print("A transpose:\n", A.T)
print("B:\n", B)
print("AB:\n", A.dot(B))
# -
# ### Indexing
# The numpy arrays can be indexed in various ways, using the [] operator. Note that in numpy, the indexes start at 0.
# Example:
#
#
#
#
#
A = np.array([[1,2,3],
[4,5,6]])
print(A)
# A has 2 rows and 3 columns.
print(A[0,1]) # retourne "2": l'élément de la ligne 0, colonne 1
print(A[1,2]) # retourne "6": l'élément de la ligne 1, colonne 2
A [2,0] # Error: Cette matrice a que 2 lines
# En Python, on peut utiliser le symbol ":" pour indexer. Utilisé seul, le symbol veut dire soit "tous les lignes ou tous les colonnes"
# +
print(A[:,0]) # returns [1, 4] - la colonne 0
print(A[1,:]) # returns [4,5,6] - la ligne 1
# -
# On peut également utiliser le symbol ":" pour indexer partiellement une matrice
# +
print(A[0,1:]) # retourne [2,3]: l'élément de la ligne 0, colonne 1 à la fin
print(A[0,: 2]) # retourne [1,2]: l'élément de la ligne 0, colonne 0 jusqu'à 2 (= retourn colonne 0 et 1)
# -
# #### Python list à Numpy array
#
# Il est très facile de convertir de Python list a numpy array
l = [[1,2,3], [4,5,6]]
print(l)
na = np.array(l)
print(na)
# ## Matplotlib
# Matplotlib est un package permettant d'afficher les figures.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
x = np.arange(0,2*np.pi,0.1) #On crée une liste x de 0 à 2 * PI avec un pas de 0.1
#print x
y = np.sin(x) #On applique la fonction sinus sur la liste x
#print y
plt.plot(x,y,'o')
plt.xlabel('x axis')
plt.ylabel('y axis')
plt.title('Easy')
plt.grid(True)
plt.show()
# -
# ### Image Handling
#
# Il y a plusieurs façon de charger une image en Python. On peut utiliser cv2(opencv), matplotlib out scikit. Tous ces packages ont une chose en commun, les images chargés par n'importe quel de ces packages se trouveront sous le format matricielle
# +
# En utilisant scipy
from scipy.misc import imread, imshow
imgMatrix1 = imread("path/to/image.jpg") #imgMatrix1 est en RGB
plt.imshow(imgMatrix1)
# +
#En utilisant opencv
import cv2
imgMatrix2 = cv2.imread("path/to/image.jpg") #imgMatrix2 est en BGR
# -
# Conversion de opencv à scipy
from skimage import img_as_float
image = img_as_float(imgMatrix2) #imgMatrix2 une image chargée avec opencv
from skimage import img_as_ubyte
cv_image = img_as_ubyte(imgMatrix1) #imgMatrix1 une image chargée avec skimage
# ### Aide
#
# Pour connaitre les informations vous pouvez utiliser la commande "?" pour l'aide en ligne.
# Vous pouvez également utiliser la fonciton "help" pour avoir des information sur un object de Python
help(list)
# ### Quelques bibliothèques utiles
#
# Il y a beaucoup de bibliothèque en Python, pour ce cours, on vous conseille d'aller lire les informations/tutoriels sur des packages suivant :
# 1. scipy - https://www.scipy.org/
# 2. scikit - http://scikit-learn.org/
# 3. et d'autres packages sur le machine learning comme theano etc...
# ### Format de rendu
# Pour le rendu des labortoires, vous allez rendre les fichiers les rapports sous format de ipynb (ipython notebook).
# Pour chaque exercice, il est demandé de répondre aux question posé et ensuite le code(commenté !!) s'il y a lieu.
# ## Exercices
#
# **Pour ce premier laboratoire, vous pouvez utiliser les packages random, numpy, math, csv et matplotlib, tous les autres packages sont interdits. Vous êtes encouragés à faire des recherches sur Google pour l'utilisation de ces packages. Par contre, le copie-coller du code sans compréhension est interdit.**
#
#
# ### Exercice 1
#
#
# 1. Implémenter la fonction f(x) = x
# 2. Afficher la fonction f entre -6 et + 6
# 3. Implémenter la fonction sigmoid (https://en.wikipedia.org/wiki/Sigmoid_function)
# 4. Afficher la fonction f entre -6 et + 6
#
# ### Exercice 2
# 1. Télécharger la base de données galaxies depuis Moodle
#
# 2. En utilisant le fichier GTI770_label_data_set.csv, charger les 50 premières données de chaque classes. Toutes les images seront chargées dans une matrice (X) et les labels dans une matrice(Y). X doit avoir la forme (nombre d'image, nombre de canaux(RGB), hauteur, largeur) et Y (nombre d'image, la classe)
#
# 3. Afficher de manière aléatoire, 10 images de chaque classes.
#
# 4. Séparer les données de la matrice X en 2 matrices avec un ratio de 70% (X_train) et 30% (X_test), ces 2 matrices doivent avoir le même nombre de classes. Faire la même chose pour (Y). Vérifiez que les données ont toujours les mêmes classes.
#
# 5. Cropper toutes les images en utilisant le centre de l'image, la nouvelle dimension doit faire 50x50, vous pouvez utiliser que numpy pour cet exercice. Mettez tous ces images croppées dans une matrice(X_train_crop)
#
# 6. Afficher une image de chaque catégorie avec le crop.
#
# 7. En utilisant seulement numpy, convertir les images en binaires et mettez les dans une matrice (X_train_binary).
#
# 8. Pour chaque image, compter le nombre de pixel noir et pixel blanc, mettre tous les résultats dans une matrice (X_train_plot) avec la forme (nombre d'images, 2). Commentez la fonction de la matrice (X_train_plot)
#
# 9. Afficher tous les points contenu dans (X_train_plot) sur un graphique en utlisant matplotlib
| GTI770-TP00.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Computação vetorizada
# ## Objetivos
#
# - Compreender as capacidades da computação vetorizada;
# - Associar conceitos abstratos de Matemática a estruturas computacionais;
# - Saber estruturar dados em arrays multidimensionais;
# ## Introdução
#
# A computação científica é uma ciência interdisciplinar que procura resolver problemas de alta complexidade utilizando recursos computacionais. Ao longo das décadas, a capacidade de processamento computacional aumentou dramaticamente. Em junho de 2020, o site [[Top500]](https://www.top500.org/news/japan-captures-top500-crown-arm-powered-supercomputer/), que atualiza semestralmente a lista dos computadores mais potentes do mundo, divulgou que o primeiro da lista agora é o Fugaku, um computador de alto desempenho japonês capaz de realizar 415,5 petaflops no teste de referência Linpack para medição de desempenho. Se um petaflop já equivale à impressionante marca de um quadrilhão ($10^{15}$) de operações numéricas executadas por segundo, o que dizer de 415,5 vezes isto?! É realmente um número inimaginável. Para saber mais sobre medidas utilizadas na computação de alto desempenho, veja esta página da [[Universidade de Indiana]](https://kb.iu.edu/d/apeq).
#
# Todo este potencial serve para resolver problemas de engenharia, das ciências da saúde, das ciências atmosféricas, de energia, entre tantas outras áreas. Por outro lado, a computação científica depende de algoritmos e projetos de software otimizados e bem construídos para atingir cada vez mais eficência, portabilidade e facilidade de execução. Em ciência de dados, embora o foco não seja resolver problemas utilizando os mesmos moldes da computação científica, devemos ter em mente que grande parte das ferramentas computacionais que permitem que um problema da ciência de dados seja resolvido deve-se a um trabalho muito minucioso de cientistas da computação e engenheiros que trabalham em nível de hardware. E é aí que entra a *computação vetorizada*.
#
# O conceito de *computação vetorizada*, ou *computação baseada em arrays* está relacionado à execução de operações que podem ser feitas de uma só vez a um conjunto amplo de valores. Até agora, vimos no curso, por exemplo, que podemos percorrer uma lista de números inteiros e calcular o quadrado desses números um de cada vez utilizando um laço, ou mesmo realizando um mapeamento. A computação vetorizada, por outro lado, evita que tais operações dependam de laços e iterações. Com ela, podemos simplesmente aplicar uma função ao *array* (uma coleção de dados) inteiro de uma só vez e produzir um *array* com o resultado desejado diretamente. Ou seja, a ideia principal da computação vetorizada é evitar laços e cálculos com repetições a fim de acelerar operações matemáticas. O nome *vetorizada* está relacionado a *vetor*. Como veremos aqui, estruturas multidimensionais e, mais geralmente, *arrays*, identificam-se com a nossa compreensão de vetores, matrizes e tensores.
#
# Vetores são arrays unidimensionais. Matrizes são arrays bidimensionais. Tensores são arrays de três ou mais dimensões. Todavia, é importante salientar que o conceito de "dimensão" em *computação vetorizada* deve ser distinguido da ideia mais abstrata de dimensão como você estudadará em Cálculo Vetorial, Geometria Analítica ou Álgebra Linear. *Arrays* possuem alguns atributos, tais como "comprimento", "formato" e "dimensão, os quais dizem respeito, de certa forma, à quantidade de seus elementos e ao modo como ocupam a memória. Esses nomes variam de linguagem para linguagem. Em Python, existem funções e métodos específicos para verificar comprimento, formato e dimensão, tais como `len`, `shape` e `ndim`. As duas últimas serão apresentadas a seguir. Em outras linguagens, usa-se também `size` para desempenhar o mesmo papel que `shape`. A palavra "dimension", em Python, é encontrada de maneira explicativa em documentações. Para não confundir "comprimento", "formato" e "dimensão", redobre seu entendimento.
#
# ## Comprimento, tamanho e dimensão
#
# Para exemplificar o que queremos dizer com "comprimento", "tamanho" e "dimensão", vejamos uma ilustração. Se $x_1$ e $x_2$ são dois números inteiros, a lista `[x1,x2]` seria um array unidimensional, mas de comprimento dois (você verifica isto com `len`). Agora, imagine que $(x_1,x_2)$ seja a notação matemática para representar as coordenadas de um ponto do plano cartesiano. Sabemos da geometria que o plano cartesiano tem duas dimensões. Porém, poderíamos, computacionalmente, usar a mesma lista anterior para armazenar no computador essas duas coordenadas. A lista continuaria sendo unidimensional, porém de tamanho dois. Logo, embora a entidade matemática seja bidimensional, não necessariamente a sua representação computacional deve ser bidimensional.
#
# Vejamos outra ilustração. Uma esfera é um sólido geométrico. Cada ponto da esfera está no espaço tridimensional. Isto significa que precisamos de 3 coordenadas para localizar este ponto. Embora você talvez tenha estudado pouco ou nada sobre a geometria analítica em três dimensões, o exemplo que daremos aqui será simples. Do mesmo modo que o caso anterior, suponha você tenha não apenas $x_1$ e $x_2$ como dois números inteiros, mas também um terceiro, $x_3$, para montar as coordenadas do seu ponto espacial. Você poderia representá-lo, matematicamente, por uma tripleta $(x_1,x_2,x_3)$ sem problema algum. Por outro lado, no computador, a lista `[x1,x2,x3]` seria um *array* adequado para armazenar os valores das suas coordendas. Entretanto, esta lista continuaria sendo um array unidimensional, mas com tamanho 3. Portanto, *arrays* unidimensionais podem representar dados em dimensões maiores do que um.
#
# Vejamos outra ilustração. Uma matriz 2 x 2 pode ser escrita, em matemática, utilizando 4 números da seguinte forma:
#
# $$\begin{bmatrix}
# a_{11} & a_{12}\\
# a_{21} & a_{22}
# \end{bmatrix}$$
#
# Uma matriz é um *array* formado por outros *arrays*. Usando listas, a construção anterior poderia ser representada, no computador, por exemplo, com a lista de listas `[[a11,a12],[a21,a22]]`. Porém, teríamos que ter total controle sobre isto para poder dizer que `[a11,a12]` é a primeira linha da matriz e que `[a21,a22]` é a segunda linha. Neste exemplo, a matriz é uma entidade matemática caracterizada como bidimensional por ser um "retângulo (ou quadrado) cheio de números" , mas para a Python, ela deve ser um *array bidimensional* basicamente porque existem "duas direções" nos dados: linhas e colunas. Agora, você consegue imaginar o que seria uma matriz de matrizes?
#
# Imagine duas folhas de papel A4 postas uma sobre a outra em uma mesa. Agora, pense como se cada folha de papel fosse uma matriz. No final das contas, você tem uma "caixa com matrizes dentro". Isto é o que seria um tensor, uma "caixa de números", embora, física e matematicemente, não seja esta a definição. Uma matriz de matrizes é um *array tridimensional*. Se as suas duas matrizes fossem idênticas e iguais à do exemplo acima, você teria duas listas de listas `[[a11,a12],[a21,a22]]`, quatro listas (2 iguais a `[a11,a12]` e 2 iguais a `[a21,a22]`) e uma superlista com duas listas de listas dentro!
#
# Como veremos adiante, o *numpy* é a ferramenta ideal para lidar com tudo isso.
#
#
# ## O pacote *numpy*
#
# O *numpy* é a biblioteca padrão em Python para trabalhar com *arrays* multidimensionais e computação vetorizada. Ela praticamente dá "superpoderes" às listas e permite que trabalhemos com cálculos numéricos de maneira ágil, simples e eficiente. Com *numpy*, também podemos ler e escrever arquivos, trabalhar com sistemas lineares e realizar muito mais. Para importar o numpy use a instrução abaixo, onde `np` é um alias padrão:
#
# ```python
# import numpy as np
# ```
#
# Nesta aula, daremos uma introdução aos aspectos elementos do *numpy* para criar e manipular *arrays* multdimensionais. A grande regra é: vetorize seus cálculos numéricos o máximo possível!
import numpy as np
# ## Motivação
#
# Este exemplo compara a eficiência de operações feitas com listas comuns e com *numpy*.
# +
# 1 µs = 1/1e6 segundo
# 1 ns = 1/1e9 segundo
L = range(500)
# %timeit -n 10 [i**2 for i in L] # executa o laço 10 vezes
a = np.arange(500)
# %timeit -n 10 a**2 # eleva ao quadrado diretamente 10 vezes
# -
# ## Criação de arrays unidimensionais (1D)
a = [1,2,3]
np.array(a) # a partir de lista
np.array([1,2,3]) # diretamente
np.array([2]*5)
# ## Criação de arrays bidimensionais (2D)
A = [ [1,2], [0,2] ] # lista de listas
np.array(A) # matrix 2 x 2
np.array([ [1,2], [0,2] ]) # diretamente
A2 = [[1,2,3],[4,3,2]] # cada lista é uma linha da matriz
np.array(A2) # matriz 2 x 3
np.array([1,1],[0,1]) # parênteses externos são obrigatórios!
# ### Dimensão, formato e comprimento
x = np.array(a)
np.ndim(x) # aplica a função ndim
x.ndim # como método
np.shape(x) # formato
x.shape # sem parênteses
len(x) # comprimento
X = np.array(A)
np.ndim(X) # array bidimensional
np.shape(X)
len(X) # apenas um comprimento. Qual?
X2 = np.array(A2)
len(X2) # apenas da primeira dimensão. LINHAS
X2.shape
# ## Funções para criação de arrays
# ### `arange`
# **Exemplo:** crie um array de números ímpares positivos menores do que 36.
np.arange(1,36,2) # start,stop,step
# **Exemplo:** crie um array de números pares positivos menores ou iguais a 62.
np.arange(0,63,2)
# **Exemplo:** calcule o valor de $f(x) = x^3 + 2x$ para $x$ elementos dos arrays anteriores.
imp, par = np.arange(1,36,2), np.arange(0,63,2)
f = lambda x: x**3 + 2
fi, fp = f(imp), f(par)
print(fi)
print(fp)
# ### `linspace`
#
# **Exemplo:** crie um array igualmente espaçado de elementos em [0,1] com 11 elementos.
np.linspace(0,1,num=11)
np.linspace(0,1,11) # 'num' pode ser omitido
x = np.linspace(0,1,10,endpoint=False) # exclui último
x
y = np.arange(0,1,0.1) # equivalente
y
x == y # comparação é elemento a elemento
x == -y # apenas 0 é True
x[1:] == y[1:]
# ### `all` e `any`
np.all( x == y ) # verifica se todos são 'True'
np.any( x == -y ) # verifica se pelo menos um é 'True'
# ### `random`
# **Exemplo**: crie um *array* 1D com 5 números aleatórios entre [0,1].
r = np.random.rand(5)
r
# **Exemplo**: crie um *array* 1D com 50 números inteiros aleatórios entre [0,7].
r2 = np.random.randint(0,7+1,50) # menor, maior é 8 (exclusive), tamanho
r2
# **Exemplo**: crie uma matriz m x n com números inteiros aleatórios entre inteiros [l,h].
def gera_matriz(m,n,l,h):
return np.random.randint(l,h,(m,n)) # tupla (m,n)
gera_matriz(2,2,0,1)
gera_matriz(3,2,0,4)
gera_matriz(4,4,-2,7)
# ### `ones`
#
# Criando arrays unitários.
np.ones(4)
np.ones((3,2)) # tupla necessária para linhas e colunas
# ### `eye`
#
# Criando arrays 2D identidade. 1 na diagonal e 0 nas demais.
np.eye(3) # matriz identidade 3 x 3
# ### `zeros`
#
# Arrays nulos.
np.zeros(3)
np.zeros((2,4)) # 2 x 4
# ### `full`
#
# Arrays de valor constante.
np.full(3,0) # 1 x 3 com constante 0
np.full(shape=(3,),fill_value=0)
F1 = np.full(shape=(2,2),fill_value=1) # 2 x 2 com 1
F1
F1 == np.ones(2) # mesmo resultado que ones
# Outras maneiras:
F2 = 3*np.ones((4,4))
F2
# ## Especificando tipos de dados
#
# Observe o seguinte exemplo:
F2
F3 = np.full((4,4),3)
F3
F2 == F3 # valores iguais
F2.dtype == F3.dtype # tipos diferentes
F2.dtype
F3.dtype
# Especificamos o tipo de dados com `dtype`.
np.ones((4,2),dtype=bool) # matriz de booleanos
np.ones((4,2),dtype=str) # matriz de strings; 'U1' diz que há no máximo 1 caracter
S = np.array(['dias','mes','ano'])
S.dtype # 4 é o no. máximo de caracteres nas strings
# ## Indexação e fatiamento
#
# Funcionam de maneira similar ao uso com listas.
I = np.linspace(0,20,11)
I
I[3],I[2:4],I[5:8],I[-4:-1]
I[::-1] # invertendo o array
I2 = np.array([I,2*I,3*I,4*I])
I2
# Em arrays bidimensionais, a indexação é feita por meio de uma tupla. Porém, a explicitação dos parênteses é desnecessária.
I2[(2,3)] # 3a. linha; 4a. coluna
I2[2,3]
I2[0,:] # 1a. linha
I2[1,:] # 2a. linha
I2[-1,:] # última linha
I2[:,0] # 1a. coluna
I2[:,1] # 2a. coluna
I2[:,8] # 9a. coluna
I2[:,2:4] # 3a. - 4a. coluna
I2[1:3,6:10] # submatriz: linhas 2 - 3; 7-10
# ### Alteração de valores
#
# Os arrays são mutáveis por indexação.
A3 = np.random.rand(4,4)
A3
A3[0:4,0] = -1
A3
A3[:,-1] = -1
A3
A3[1:3,1:3] = 0
A3
# Podemos alterar valores com arrays.
A3[1,:] = -2*np.ones(4)
A3
# A indexação pode usar um comprimento de passo (*step*).
A3[0:4:3,1:3] = np.full((1,2),8) # na indexação esquerda, 1a. linha : 4a. linha : step de 3
A3
# ### `newaxis`
#
# `newaxis` é uma instância do `numpy` que permite aumentar de 1 a dimensão de um array existente.
# **Exemplo:** como inserir a diagonal de uma matriz em uma segunda matriz como uma coluna adicional?
# Criamos duas matrizes aleatórias.
# matriz 4 x 4 de inteiros aleatórios entre 0 e 9
B1 = np.random.randint(0,10,(4,4))
B1
# matriz 4 x 4 de inteiros aleatórios entre -10 e 9
B2 = np.random.randint(-10,10,(4,4))
B2
# Extraímos a diagonal da primeira.
# diagonal de B1
db1 = np.diag(B1)
db1
# Notemos agora que as dimensões são diferentes.
print(B2.ndim)
print(db1.ndim)
# Para podermos aglutinar a diagonal como uma nova coluna na primeira matriz, primeiro temos que transformar o array unidimensional para uma matriz.
db1 = db1[:,np.newaxis]
print(db1.ndim) # agora o array é bidimensional
db1
# `newaxis` é um "eixo imaginário" incluído *inplace*, mas que altera dinamicamente o array. No caso acima, o array tornou-se em uma coluna.
# Agora, podemos "colar" um array 2D com outro por uma concatenação.
# ### `concatenate`
#
# `concatenate` é usado para concatenar *arrays*. A concatenação requer uma tupla contendo os *arrays* a concatenar e o eixo de referência.
B3 = np.concatenate((B2,db1), axis=1)
B3
# No caso acima, `axis=1` indica que a concatenação é ao longo da coluna. Dessa forma, inserimos a segunda diagonal como uma coluna adicional na segunda matriz. Claramente, isto só é possível porque ambas as matrizes eram de mesmo formato.
# #### `axis`
#
# Nos arrays multidimensionais do Python, `axis` é usado para indicar a "direção" dos dados. Em arrays bidimensionais, `axis=0` refere-se à direção de cima para baixo (ao longo das linhas), ao passo que `axis=1` refere-se à direção da esquerda para a direita (ao longo das colunas).
#
# **Obs.:** note que a palavra `axis` ("eixo") deve ser usada, e não "axes" ("eixos").
# Para aglutinar uma linha na matriz anterior, fazemos uma concatenação em linha.
# array de zeros com mesmo número de colunas de B3
db2 = np.zeros(np.shape(B3)[1])
db2
db2 = db2[np.newaxis,:] # cria o "eixo imaginário" na direção 0
B4 = np.concatenate((B3,db2),axis=0) # concatena ao longo das linhas
B4
# ## Indexação avançada
#
# Podemos usar máscaras como artifícios para indexação avançada.
IA1 = np.arange(-10,11)
IA1
# Vamos criar um *array* aleatório de True e False no mesmo formato que o *array* anterior.
mask1 = np.random.randint(0,2,np.shape(IA1),dtype=bool)
mask1
# Esta *máscara booleana* pode ser aplicada no array para extrair apenas os elementos cujos índices são marcados como `True` pela máscara.
IA1[mask1]
# Há maneiras mais diretas aplicáveis a filtragens. Para extrair os valores negativos do array:
IA1 < 0 # máscara booleana
IA1[IA1 < 0]
# Para extrair os valores positivos do array:
IA1[IA1 > 0] # máscara booleana para positivos
# Para extrair os valores no intervalo $]-2,5[$, fazemos:
IA1[(IA1 > -2) & (IA1 < 5)] # & é o operador booleano 'elemento a elemento'
# Para extrair pares e ímpares, poderíamos fazer:
pares, impares = IA1[IA1 % 2 == 0] , IA1[IA1 % 2 == 1]
pares,impares
# Podemos usar listas como máscaras:
alguns = pares[[0,2,3,5]] # acessa 1o., 3o. 4o. e 6o. elemento de 'pares'
impares[alguns] # estude este caso
# No caso acima, por exemplo, -10 é uma indexação reversa que excede o compriemento do array à esquerda. Portanto, ele retorna o primeiro elemento do array, que é -9. O índice -6 acessa o sexto elemento a partir da direita, que é -1. O índice -4 acessa o quarto elemento a partir da direita. Por fim, o índice 0 acessa o primeiro elemento que é -9.
# ## Operações elemento a elemento
#
# As operações aritméticas e de cálculo são feitas elemento a elemento nos *arrays*. Já mostramos alguns exemplos acima, mas vamos tornar isto mais claro aqui.
a = np.array([1,2,3])
b = np.array([4,5,6])
# operações elemento a elemento
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a ** b)
2*a + 4*b - 6*b**2 + 1.1/2*a
# ## Funções matemáticas
#
# O `numpy` possui a maioria dass funções disponíveis no módulo `math` e outras mais. As funções são diretamente aplicáveis aos *arrays*. Lembre-se que para fazer o mesmo usando em listas, tínhamos de construir meios de iterar sobre elas e aplicar a função a cada elemento por vez. Isto não é mais necessário com o `numpy`. Eis a beleza da computação vetorizada!
#
# Vejamos uma série de exemplos.
x = np.arange(10)
x
np.sqrt(x)
np.cos(x) + 2*np.sqrt(x)
y = np.sin(2*x)
z = np.exp(x + y)
y - z
# ### Problema resolvido (Laboratório Computacional 1C)
# Observe a tabela a seguir, onde **DS (UA)** é a distância do referido planeta do até o Sol em unidades astronômicas (UA), **Tm (F)** sua temperatura superficial mínima em graus Farenheit e **TM (F)** sua temperatura superficial máxima em graus Farenheit.
#
# | | DS (UA) | Tm (F) | TM (F) | DS (km) | TmM (C) |
# |--|--|--|--|--|--|
# Mercúrio | 0.39 | -275 | 840 | ? | ? |
# Vênus | 0.723 | 870 | 870 | ? | ? |
# Terra | 1.0 | -129 | 136 | ? | ? |
# Marte | 1.524 | -195 | 70 | ? | ? |
#
#
#
# - Escreva um código para converter a temperatura dos planetas de graus Farenheit (**F**) para Celsius (**C**).
#
# - Escreva um código para converter unidades astronômicas em quilômetros.
#
# - Imprima os valores que deveriam ser inseridos na coluna **DS (km)** horizontalmente usando `print`.
#
# - Repita o item anterior para a coluna **TmM (C)**, que é a média aritmética entre **Tm** e **TM**.
#
#
# *Observação:* use notação científica (exemplo: $4.2 \times 10^8$ pode ser escrito como `4.2e8` em Python).
# #### Resolução
#
# Há várias maneiras de resolver. Aqui apresentamos uma estratégia com `lambdas`.
#
# - Montar os arrays dos dados numéricos.
DS = np.array([0.39,0.723,1.0,1.524])
Tm = np.array([-275,870,-129,-195])
TM = np.array([840,870,136,70])
# - Fórmula e cálculo da conversão Farenheit para Celsius:
C = lambda F: 5/9*(F-32)
CTm = C(Tm)
CTM = C(TM)
print(CTm) # minimas em C
print(CTM) # maximas em C
# - Fórmula e cálculo da conversão UA para km:
UA = lambda km: 1.496e+8*km
UADS = UA(DS)
print(UADS) # valores a inserir
# - Cálculo da média
TmM = 0.5*(CTm + CTM)
print(TmM)
# ### `reshape` e `hstack`
#
# A montagem do array bidimensional com os cálculos resultantes não foi requisitada no problema. Porém, vamos mostrar uma maneira de fazer isto usando `reshape`, que é uma função utilizada para reformatar os dados e `hstack`, que é usada para "empilhar" arrays horizontalmente.
#
# Note que todos os nossos *arrays* são unidimensionais. Vamos torná-los bidimensionais com formato 4 x 1 e empilhá-los horizontalmente, isto é, na direção do eixo 1 (esquerda para direita).
#
# **Obs:** consulte também `vstack`.
# +
todos = [DS,CTm,CTM,UADS,TmM] # lista com todos os arrays
for i,ar in enumerate(todos):
todos[i] = np.reshape(ar, (4,1)) # reformata
final = np.hstack(todos) # empilha
# -
# Explicando o que fizemos:
#
# - Colocamos todos os arrays em uma lista: neste ponto, nada novo.
# - Iteramos sobre a lista, reformatamos um por um e reatribuímos na mesma lista como arrays bidimensionais
#
# Para o segundo ponto, observe:
DS.shape # formato é 1 x 4 (unidimensional)
np.reshape(DS,(4,1)) # reformata
np.reshape(DS,(4,1)).shape # novo formato é 4 x 1
np.reshape(DS,(4,1)).ndim # o array agora é bidimensional
# Procedendo assim para todos, conseguimos reformatá-los e adicioná-los em uma lista. Se desejarmos, podemos sobrescrever essa lista ou não. Na resolução anterior, escolhemos sobrescrever. Assim, suponha que a lista dos arrays reformatados seja:
L = [np.reshape(DS,(4,1)),np.reshape(TmM,(4,1))] # apenas DS e TmM
L
# - Criamos o array final por empilhamento.
# Note que a lista `L` possui 2 arrays de formato 4 x 1. Para criar o array 4 x 2, faremos um empilhamento horizontal similar à uma concatenação na direção 1.
Lh = np.hstack(L)
Lh
# Agora podemos verificar que, de fato, o array está na forma como queremos.
Lh[:,0] # 1a. coluna idêntica à DS
Lh[:,0] == DS # teste
np.all( Lh[:,0] == DS ) # teste completo
Lh[:,1] # 2a. coluna idêntica a TmM
Lh[:,1] == TmM # teste
np.all( Lh[:,1] == TmM ) # teste completo
# ## *Broadcasting*
#
# *Broadcasting* é a capacidade que o *numpy* oferece para realizarmos operações em arrays com diferentes dimensões.
# ### Regras do *broadcasting*
#
# 1. Se dois *arrays* tiverem dimensões diferentes, o formato do array com menor dimensão é preenchido por 1 do lado esquerdo.
# 2. Se o formato dos *arrays* não for igual em dimensão alguma, o array com tamanho igual a 1 é esticado nesta direção para ficar no mesmo tamanho correspondente do outro array.
# 3. Se em qualquer direção os tamanhos dos *arrays* forem diferentes e nenhum deles for igual a 1, então um erro é retornado.
# #### Exemplo da Regra 1
A = np.array([[1, 2, 3],[4, 5, 6]]) # array 2D
b = np.array([10, 20, 30]) # array 1D
print(A.shape)
print(b.shape)
A + b
# A soma pode ser realizada mesmo assim. O que ocorreu? Cada linha de `A` foi somada à única linha de `b`. O *broadcasting* amplia o array de menor dimensão automaticamente da seguinte forma:
#
# Pela regra 1, o *array* `b` tem dimensão menor. Então, ele é preenchido de modo que:
#
# ```python
# A.shape -> (2, 3)
# b.shape -> (1, 3)
# ```
#
# Pela regra 2, a primeira dimensão de `A` é 2 e a de `b` é 1. Então, a dimensão de `b` é "esticada", de modo que:
#
# ```python
# A.shape -> (2, 3)
# b.shape -> (2, 3)
# ```
# A mesma operação poderia ter sido feita com:
A + np.array([b,b])
# #### Exemplo da Regra 2
A = np.arange(3).reshape((3, 1))
b = np.arange(3)
print(A.shape)
print(b.shape)
A + b
# Neste caso, ambos os arrays sofrem *broadcasting*. Ele ocorre da seguinte forma.
#
# Como
#
# ```python
# A.shape = (3, 1)
# b.shape = (3,)
# ```
# a regra 1 diz que `b` deve ser preenchido de modo que
#
# ```python
# A.shape -> (3, 1)
# b.shape -> (1, 3)
# ```
# e, pela regra 2, cada uma das dimensões 1 deve ser alterada de modo que:
#
# ```python
# A.shape -> (3, 3)
# b.shape -> (3, 3)
# ```
#
# Assim, o *broadcasting* é permitido.
# #### Exemplo da Regra 3
A = np.ones((3, 2))
b = np.arange(3)
print(A.shape)
print(b.shape)
A + b
# Neste exemplo, o *broadcasting* não é permitido. O caso é levemente diferente do primeiro exemplo em que `A` é transposta.
#
# Temos que
#
# ```python
# M.shape = (3, 2)
# a.shape = (3,)
# ```
#
# Pela regra 1, devemos ter
#
# ```python
# M.shape -> (3, 2)
# a.shape -> (1, 3)
# ```
#
# e, pela regra 2, a primeira dimensão deve ser esticada para combinar-se com a de `A` enquanto a segunda não é alterada por não ser 1.
#
# ```python
# M.shape -> (3, 2)
# a.shape -> (3, 3)
# ```
# Porém, o formato final de ambos não se combina. Sendo incompatíveis, o *broadcasting* falha.
| _build/html/_sources/ipynb/08a-computacao-vetorizada.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
#############################
# #
# Homework 4 - Pandas #
# Student - <NAME> #
# #
#############################
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
purchase_data.head(5)
# -
# Cast the df types, just in case :)
purchase_data.dtypes
# ## Player Count
# * Display the total number of players
#
# + tags=[]
# Set the empty list
total_players = []
# Calculate the total number of players
total_players.append(len(purchase_data['SN'].unique()))
# Set de Data Frame
total_players_df = pd.DataFrame(total_players, columns = ['Total Players'])
# Display the Data Frame
total_players_df
# -
# ## Purchasing Analysis (Total)
# * Run basic calculations to obtain number of unique items, average price, etc.
#
#
# * Create a summary data frame to hold the results
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display the summary data frame
#
# +
# Set the empty dictionary
purchasing_analysis_dict = {}
# Number of Unique Items
num_unique_items = len(purchase_data['Item ID'].unique())
purchasing_analysis_dict['Number of Unique Items'] = num_unique_items
# Average Purchase Price
mean_price = purchase_data['Price'].mean(axis=0)
purchasing_analysis_dict['Average Price'] = mean_price
# Total Number of Purchases
total_num_purchases = purchase_data['Item ID'].count()
purchasing_analysis_dict['Total Number of Purchases'] = total_num_purchases
# Total Revenue
total_revenue = purchase_data['Price'].sum(axis=0)
purchasing_analysis_dict['Total Revenue'] = total_revenue
# Set the summary data frame
purchasing_analysis_df = pd.DataFrame(list(purchasing_analysis_dict.values()))
purchasing_analysis_df = purchasing_analysis_df.transpose()
purchasing_analysis_df.columns = purchasing_analysis_dict.keys()
# Format fields
purchasing_analysis_df['Number of Unique Items'] = purchasing_analysis_df['Number of Unique Items'].map("{:.0f}".format)
purchasing_analysis_df['Total Number of Purchases'] = purchasing_analysis_df['Total Number of Purchases'].map("{:.0f}".format)
purchasing_analysis_df['Average Price'] = purchasing_analysis_df['Average Price'].map("${:.2f}".format)
purchasing_analysis_df['Total Revenue'] = purchasing_analysis_df['Total Revenue'].map("${:,.2f}".format)
# Display the summary data frame
purchasing_analysis_df
# -
# ## Gender Demographics
#
#
# * Percentage and Count of Male Players
#
#
# * Percentage and Count of Female Players
#
#
# * Percentage and Count of Other / Non-Disclosed
#
#
#
# +
# Set a data frame with unique Player Names
unique_players_df = purchase_data.drop_duplicates(subset=['SN', 'Gender'])
# Create a count column for each gender
count_gender = unique_players_df["Gender"].value_counts()
# Set the total
gender_demographics_df = pd.DataFrame(count_gender)
gender_demographics_df.columns = ["Total Count"]
# Calculate the sum
sum_players = gender_demographics_df['Total Count'].sum()
# Generate te final output
gender_demographics_df['Percentage of Players'] = gender_demographics_df['Total Count'] / sum_players * 100
# Format fields
gender_demographics_df['Percentage of Players'] = gender_demographics_df['Percentage of Players'].map("{:.2f}%".format)
# Display the summary data frame
gender_demographics_df
# -
#
# ## Purchasing Analysis (Gender)
# * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender
#
#
#
#
# * Create a summary data frame to hold the results
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display the summary data frame
# +
#Generate the calculations
purchase_analysis_gender_df = purchase_data.groupby('Gender').agg(
total_users = ('SN', 'nunique'),
total_orders = ('Purchase ID', 'count'),
avg_price = ('Price', 'mean'),
total_revenue = ('Price', 'sum')
)
# Calculate the average per person
purchase_analysis_gender_df['Average Purchase Total per Person'] = purchase_analysis_gender_df['total_revenue'] / purchase_analysis_gender_df['total_users']
# Rename Columns
purchase_analysis_gender_df = purchase_analysis_gender_df.rename(columns={
'total_users' : 'Total Users',
'total_orders' : 'Purchase Count',
'avg_price' : 'Average Purchase Price',
'total_revenue' : 'Total Purchase Value',
})
# Format fields
purchase_analysis_gender_df['Average Purchase Price'] = purchase_analysis_gender_df['Average Purchase Price'].map("${:,.2f}".format)
purchase_analysis_gender_df['Total Purchase Value'] = purchase_analysis_gender_df['Total Purchase Value'].map("${:,.2f}".format)
purchase_analysis_gender_df['Average Purchase Total per Person'] = purchase_analysis_gender_df['Average Purchase Total per Person'].map("${:,.2f}".format)
# Display the summary data frame
purchase_analysis_gender_df
# -
# ## Age Demographics
# * Establish bins for ages
#
#
# * Categorize the existing players using the age bins. Hint: use pd.cut()
#
#
# * Calculate the numbers and percentages by age group
#
#
# * Create a summary data frame to hold the results
#
#
# * Optional: round the percentage column to two decimal points
#
#
# * Display Age Demographics Table
#
# +
# Generate min and max ages, stablishing min and max bins.
min_age = purchase_data['Age'].min()
max_age = purchase_data['Age'].max()
# Generate bins by list comprehension
bins = [x for x in range(0, int(max_age)+1, int(max_age/9))]
# Create bin labels
labels = [f"from {round(bins[x])} to {round(bins[x+1])}" for x in range(len(bins)-1)]
# Cut the dataframe in bins
purchase_data_groups = purchase_data
purchase_data_groups['Age Group'] = pd.cut(purchase_data_groups['Age'], bins, labels = labels)
# Calculate fields
age_demographics_df = purchase_data.groupby('Age Group').agg(total_users = ('SN', 'nunique'))
# Create sum measure
sum_ages = age_demographics_df['total_users'].sum()
# Calculate percentages
age_demographics_df['Percentage of Players'] = age_demographics_df['total_users'] / sum_ages * 100
# Format fields
age_demographics_df['Percentage of Players'] = age_demographics_df['Percentage of Players'].map("{:.2f}%".format)
age_demographics_df = age_demographics_df.rename(columns={'total_users' : 'Total Users'})
# Display the summary data frame
age_demographics_df
# -
# ## Purchasing Analysis (Age)
# * Bin the purchase_data data frame by age
#
#
# * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below
#
#
# * Create a summary data frame to hold the results
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display the summary data frame
# +
# Calculate fields
purchase_data_groups = purchase_data.groupby('Age Group').agg(
total_users = ('SN', 'nunique'),
total_orders = ('Purchase ID', 'count'),
avg_price = ('Price', 'mean'),
total_revenue = ('Price', 'sum')
)
purchase_data_groups['Average Purchase Total per Person'] = purchase_data_groups['total_revenue'] / purchase_data_groups['total_users']
# Rename Columns
purchase_data_groups = purchase_data_groups.rename(columns={
'total_users' : 'Total Users',
'total_orders' : 'Purchase Count',
'avg_price' : 'Average Purchase Price',
'total_revenue' : 'Total Purchase Value',
})
# Format fields
purchase_data_groups['Average Purchase Price'] = purchase_data_groups['Average Purchase Price'].map("${:,.2f}".format)
purchase_data_groups['Total Purchase Value'] = purchase_data_groups['Total Purchase Value'].map("${:,.2f}".format)
purchase_data_groups['Average Purchase Total per Person'] = purchase_data_groups['Average Purchase Total per Person'].map("${:,.2f}".format)
# Display the summary data frame
purchase_data_groups
# -
# ## Top Spenders
# * Run basic calculations to obtain the results in the table below
#
#
# * Create a summary data frame to hold the results
#
#
# * Sort the total purchase value column in descending order
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display a preview of the summary data frame
#
#
# +
# Calculate fields
spenders_df = purchase_data.groupby('SN').agg(
total_orders = ('Purchase ID', 'count'),
avg_price = ('Price', 'mean'),
total_revenue = ('Price', 'sum')
)
spenders_df = spenders_df.sort_values('total_orders', ascending=False)
# Rename Columns
spenders_df = spenders_df.rename(columns={
'total_orders' : 'Purchase Count',
'avg_price' : 'Average Purchase Price',
'total_revenue' : 'Total Purchase Value',
})
# Format fields
spenders_df['Average Purchase Price'] = spenders_df['Average Purchase Price'].map("${:,.2f}".format)
spenders_df['Total Purchase Value'] = spenders_df['Total Purchase Value'].map("${:,.2f}".format)
# Display the summary data frame
spenders_df.head(5)
# -
# ## Most Popular Items
# * Retrieve the Item ID, Item Name, and Item Price columns
#
#
# * Group by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase value
#
#
# * Create a summary data frame to hold the results
#
#
# * Sort the purchase count column in descending order
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display a preview of the summary data frame
#
#
# +
most_popular_items_df = purchase_data[['Item ID', 'Item Name', 'Price']]
most_popular_items_gp = most_popular_items_df.groupby(['Item ID', 'Item Name']).agg(
total_orders = ('Item ID', 'count'),
avg_price = ('Price', 'mean'),
total_revenue = ('Price', 'sum'),
)
most_popular_items_gp = most_popular_items_gp.rename(columns={
'total_orders' : 'Purchase Count',
'avg_price' : 'Item Price',
'total_revenue' : 'Total Purchase Value',
})
most_popular_items_gp.sort_values('Purchase Count', ascending=False, inplace=True)
# Format fields
most_popular_items_gp['Item Price'] = most_popular_items_gp['Item Price'].map("${:,.2f}".format)
most_popular_items_gp['Total Purchase Value'] = most_popular_items_gp['Total Purchase Value'].map("${:,.2f}".format)
# Display the summary data frame
most_popular_items_gp.head(5)
# -
# ## Most Profitable Items
# * Sort the above table by total purchase value in descending order
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display a preview of the data frame
#
#
# +
# Unformat fields, replacing the currency symbol and converting to a float
most_popular_items_gp['Total Purchase Value'] = most_popular_items_gp['Total Purchase Value'].replace('[\$,]','',regex=True).astype(float)
# Sort the data
most_popular_items_gp.sort_values('Total Purchase Value', ascending=False, inplace=True)
# Format fields
most_popular_items_gp['Total Purchase Value'] = most_popular_items_gp['Total Purchase Value'].map("${:,.2f}".format)
# Display the summary data frame
most_popular_items_gp.head(5)
| HeroesOfPymoli/Solved - HeroesOfPymoli.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Accelerometer Data Prediction #
#
# The prediction is done by Binary classification model using logistic regression which turns out to be efficient enough to predict
# if the reading is of phone falling in the ground or when a car experiences a crash.
#
# The two dataset provided were "PHONE FALL DATA FILE" and "ACCIDENTAL DATA FILE"
# An additional column named 'label' is introduced in dataset where 1 signifies " a car accident" and 0 signifies " phone fall ".
#
# The two dataset are then combined together and randomly arranged to get an uniform dataset which enhances the training and testing of data.
#
# Then out of the entire set 2400 dataset are taken as training set and others are taken as testing set, which are data_train.csv and data_test.csv respectively.
#
# The model achieves a train accuracy: 98.25 % and test accuracy: 97.45454545454545 % which is a perfect fit to the model as it behaves efficiently both on training and test data set.
# ## Packages ##
#
# - numpy is the fundamental package for scientific computing with Python.
# - pandas is a common package to interact with a dataset that is stored on a csv file.
# - matplotlib is a famous library to plot graphs in Python.
#
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# %matplotlib inline
pf=pd.read_csv('data_train.csv')
print(pf)
input_parameter=["Accel X","Accel Y"]
X_train=np.array(pf[input_parameter])
X_train=X_train.T
output_parameter=['label']
Y_train=np.array(pf[output_parameter])
Y_train=Y_train.T
X_train,Y_train
plt.plot(X_train.T,Y_train.T,'ro')
plt.xlabel("input parameters")
plt.ylabel("output parameters")
plt.show()
# ## General Architecture of the learning algorithm ##
#
# **Mathematical expression of the algorithm**:
#
# For one example $x^{(i)}$:
# $$z^{(i)} = w^T x^{(i)} + b $$
# $$\hat{y}^{(i)} = h^{(i)} = sigmoid(z^{(i)})$$
# $$ \mathcal{L}(h^{(i)}, y^{(i)}) = - y^{(i)} \log(h^{(i)}) - (1-y^{(i)} ) \log(1-h^{(i)})$$
#
# The cost is then computed by summing over all training examples:
# $$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(h^{(i)}, y^{(i)})$$
# ### Initializing parameters
#
# A random initialisation of w and b parameter enhances the model to train quickly and efficiently
# +
#Initialization of parameters
def initialize_parameter(X):
w=np.random.randn(X.shape[0])*0.01
b=np.random.randn()*0.01
return w,b
# -
# ### Sigmoid function
#
# As shown earlier,there is a need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions, which restricts the value to be between 0 and 1
def sigmoid(z):
s = 1/(1+np.exp(-z))
return s
# ### Forward and Backward propagation
#
# Forward Propagation:
# - get X
# - compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$
# - calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
#
# Here are the two formulas you will be using:
#
# $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
# $$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
# +
# Cost function
def propagate(w, b, X, Y):
m=X.shape[1]
h=sigmoid(np.dot(w.T,X)+b)
cost=np.sum(-Y*np.log(h)-(1-Y)*np.log(1-h))/m
dw = np.dot(X,(h-Y).T)/m
db = np.sum(h-Y)/m
grads = {"dw": dw,
"db": db}
return grads,cost
# -
def optimize(w, b, X, Y, num_iterations, learning_rate):
costs = []
for i in range(num_iterations):
grads, cost = propagate(w,b,X,Y)
dw = grads["dw"]
db = grads["db"]
w = w-learning_rate*dw
b = b-learning_rate*db
costs.append(cost)
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
def predict(w, b, X):
m = X.shape[1]
Y_prediction = np.zeros((1,m))
h = sigmoid(np.dot(w.T,X)+b)
for i in range(h.shape[1]):
if h[0,i]>=0.5:
Y_prediction[0,i]=1
else:
Y_prediction[0,i]=0
return Y_prediction
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5):
w, b = initialize_parameter(X_train)
parameters, grads, costs = optimize(w,b,X_train,Y_train,num_iterations, learning_rate)
w = parameters["w"]
b = parameters["b"]
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
# ### Testing of model
#
# The model trained against the data_train.csv is tested against data_test.csv
pf=pd.read_csv('data_test.csv')
print(pf)
X_test=np.array(pf[input_parameter])
X_test=X_test.T
Y_test=np.array(pf[output_parameter])
Y_test=Y_test.T
d = model(X_train,Y_train,X_test,Y_test, num_iterations = 2000, learning_rate = 0.005)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
X_train=np.concatenate((X_train.T,X_test.T)).T
Y_train=np.concatenate((Y_train.T,Y_test.T)).T
parameters, grads, costs = optimize(d["w"],d["b"],X_train,Y_train,num_iterations=2000, learning_rate=0.005)
d["w"] = parameters["w"]
d["b"] = parameters["b"]
# ### Testing of new values and training the ones not present
#
# The code segment tests newly introduced values and also trains any data newly encountered.
ch=input("Do you want to test against a set of data?(Y/N):")
while(ch=='Y'):
x_acc=float(input("Enter the x-acceleration:"))
y_acc=float(input("Enter the y-acceleration:"))
x_label=np.zeros((1,2))
x_label[0][0]=x_acc
x_label[0][1]=y_acc
y_label=np.zeros((1,1))
y_label=sigmoid(np.dot(d["w"].T,x_label.T)+d["b"])
if y_label[0]>0.5:
print("Accident")
y_label[0]=1
else:
print("Phone Fall")
y_label[0]=0
pred=input("Prediction Correct?(Y/N)")
if pred=='N':
y_label=1-y_label
flag=True
for x,y in X_train.T:
if x_acc==x and y_acc==y:
flag=False
break
if flag==True:
X_train=np.concatenate((X_train.T,x_label)).T
Y_train=np.concatenate((Y_train,y_label[0]),axis=1)
parameters, grads, costs = optimize(d["w"],d["b"],X_train,Y_train,num_iterations=2000, learning_rate=0.005)
d["w"] = parameters["w"]
d["b"] = parameters["b"]
ch=input("Do you want to test against a set of data?(Y/N):")
| Accelerometer_reading_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PythonData
# language: python
# name: pythondata
# ---
# ## Create a new Jupyter Notebook file and name it Vacation_Search.ipynb.
# Import the dependencies.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# ## Import the WeatherPy_vacation.csv file from Part 1 as a new DataFrame.
# Store the CSV you saved created in part one into a DataFrame.
city_data_df = pd.read_csv("weather_data/WeatherPy_challenge.csv")
city_data_df.head()
# ## Filter the DataFrame for minimum and maximum temperature preferences, and if the rain or snow accumulation is 0 inches or not using conditional statements. Do the following:
cols = city_data_df.columns.tolist()
cols
# Prompt the customer for the minimum temperature preference.
min_temp = float(input("What is the minimum temperature you would like for your trip? "))
# Prompt the customer for the maximum temperature preference.
max_temp = float(input("What is the maximum temperature you would like for your trip? "))
# Prompt the customer to answer if he or she would like it to be raining or not.
rain = str(input('Do you want it to be raining? (yes/no)\n'))
# Prompt the customer to answer if he or she would like it to be snowing or not
snow = str(input('Do you want it to be snowing? (yes/no)\n'))
# Filter the dataset to find the cities that fit the criteria.
for index, row in city_data_df.iterrows():
if rain == "no":
rain_inches = city_data_df["Rain inches (last 3 hours)"] == 0
else:
rain_inches = city_data_df["Rain inches (last 3 hours)"] != 0
if snow == "no":
snow_inches = city_data_df["Snow inches (last 3 hours)"] == 0
else:
snow_inches = city_data_df["Snow inches (last 3 hours)"] != 0
# +
# Filter the dataset to find the cities that fit the criteria.
preferred_cities_df = city_data_df.loc[(city_data_df["Rain inches (last 3 hours)"] != rain_inches) &
(city_data_df["Snow inches (last 3 hours)"] != snow_inches) &
(city_data_df["Max Temp"] <= max_temp) & \
(city_data_df["Max Temp"] >= min_temp)]
preferred_cities_df.head(10)
# -
# ## Add the cities to a marker layer map with a pop-up marker for each city that includes:
# Create DataFrame called hotel_df to store hotel names along with city, country, max temp, and coordinates.
hotel_df = preferred_cities_df[["City", "Country", "Max Temp", "Current Description", "Lat", "Lng"]].copy()
hotel_df["Hotel Name"] = ""
hotel_df.head(10)
# +
# Dependencies and Setup
import requests
import gmaps
# Import API key
from config import g_key
# -
# Set parameters to search for a hotel.
params = {
"radius": 5000,
"type": "lodging",
"key": g_key
}
# Iterate through the DataFrame.
for index, row in hotel_df.iterrows():
# Get the latitude and longitude.
lat = row["Lat"]
lng = row["Lng"]
# Add the latitude and longitude to the params dictionary as values to the location key.
params["location"] = f"{lat},{lng}"
# Use the search term: "lodging" and our latitude and longitude.
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# Make request and get the JSON data from the search.
hotels = requests.get(base_url, params=params).json()
# Grab the first hotel from the results and store the name.
try:
hotel_df.loc[index, "Hotel Name"] = hotels["results"][0]["name"]
except (IndexError):
print("Hotel not found... skipping.")
info_box_template = """
<dl>
<dt>Hotel Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
<dt>Current Description</dt><dd>{Current Description}</dd>
<dt>Max Temp</dt><dd>{Max Temp} °F</dd>
</dl>
"""
# Store the DataFrame Row.
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
# +
# Add a heatmap of temperature for the vacation spots and a pop-up marker for each city.
locations = hotel_df[["Lat", "Lng"]]
max_temp = hotel_df["Max Temp"]
fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5)
heat_layer = gmaps.heatmap_layer(locations, weights=max_temp,dissipating=False,
max_intensity=300, point_radius=4)
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(heat_layer)
fig.add_layer(marker_layer)
# Call the figure to plot the data.
fig
# -
# ## Save and upload the new DataFrame as WeatherPy_vacation.csv.
# Create the output file (CSV).
output_data_file = "weather_data/WeatherPy_vacation.csv"
# Export the City_Data into a CSV.
hotel_df.to_csv(output_data_file, index_label="City_ID")
# ## Save and upload the new marker layer map as WeatherPy_vacation_map.png.
| Vacation_Search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <img style="float: left; padding-right: 10px; width: 150px" src="https://sc.acamica.com/icons/1j7w9h/social-300x300.png"> Acámica DS-COR3 - Ejercicio de Data Cleaning
#
# ### Marzo 2019
#
# <hr style="height:2pt">
# ## Descripción
#
# El objetivo de este notebook es practicar algunas técnicas de Data Cleaning sobre un conjunto de datos referido a precios de casas. Aquí se debe analizar la información presentada y la calidad de la misma para entender las acciones correctivas necesarias para lograr un formato adecuado para análisis y preparación de datos.
#
# **Link a dataset:** https://www.kaggle.com/code1110/houseprice-data-cleaning-visualization/data
#
# **Ejemplo de ejercicio:** https://www.kaggle.com/code1110/houseprice-data-cleaning-visualization/notebook
#
# Como guía, se pretende que el análisis cubra los siguientes aspectos:
#
# - Remover observaciones no deseadas
# - Duplicados
# - Irrelevantes o no importantes para el negocio
# - Arreglar errores de estructura
# - En strings, normalizar descripciones (e.g. case, typos)
# - Normalizar categorizaciones
# - Detectar y filtrar outliers
# - Manejar datos faltantes
# - Decidir si imputar o filtrar (drop) en base al negocio
# - Imputar o filtrar datos
# - Chequear resultados de limpieza
# - Entender tamaño del dataset luego de cada paso de filtrado
# - Analizar dataset resultante y corroborar estado de las columnas procesadas
#
| notebooks/s5_cleaning/Data_Cleaning_HousePrice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Obtaining the Data
# The data was obtained via the [TLC Trip Record Data](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page) website provided. Looking at the website, there is an API, however I chose to scrape the data available through the links on this page because of the time restraint and ease of access.
#import packages
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Get list of links from website provided
url = 'https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page'
r = requests.get(url)
soup = BeautifulSoup(r.content) # create soup of content
# Before choosing what files to download, I wanted to get an idea of the files available. To do so, I:
# 1. Created a list of all the links found on this page
# 2. Identifed the links that contained keywords indicating it was a data file
# 3. Create table of the URLs and corresponding data
# +
# create list of links for data to scrape
sources = ['yellow', 'green', 'fhv', 'fhvhv']
keyword = 'trip+data'
url_list = []
for link in soup.find_all('a'):
url = link.get('href')
for source in sources:
if (keyword in url) and (source+'_' in url):
year = url[-11:][:4]
month = url[-6:][:2]
filename = source + '-' + year + '-' + month + '.csv'
url_list.append([source, month, year, url, filename])
# -
# Saving a list of files for use in my analysis notebook
file_info = pd.DataFrame(url_list, columns = ['source', 'month', 'year', 'url', 'filename'])
file_info.to_csv('file_info.csv', index=False)
file_info.head()
# The above table now helps me easily organize the files I want. Now I can easily choose which files I want to download. I chose to use data from 2017 to current. My reasoning is that according to the [TLC Trip Records User Guide](https://www1.nyc.gov/assets/tlc/downloads/pdf/trip_record_user_guide.pdf), this is the year where they started to receive drop-off location for the FHV data. We need the data with the zone information, and also this should also give enough data to establish a pre-covid snapshot for years 2017 - 2019.
# download files for years 2017 to now
target_year = 2017
for row in url_list:
if int(row[2]) >= target_year:
path = 'data/' + row[4]
csv = requests.get(row[3])
with open(path, 'wb') as file:
file.write(csv.content)
# Since the download code takes a while to run, once it downloaded, I used a new notebook to analyze the data. This notebook can be found [HERE](Analyze.ipynb).
| Download.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pytorch]
# language: python
# name: conda-env-pytorch-py
# ---
# +
import time
import os
import torch
import pytorch_mask_rcnn as pmr
# ------------------ adjustable parameters ---------------------
use_cuda = True # choose to use GPU or not
epochs = 1 # total epochs during this train
train_num_samples = 100 # number of samples per epoch
lr = 0.001 # learning rate
dataset = 'coco' # coco or voc
data_dir = 'E:/PyTorch/data/coco2017' # dataset directory
num_classes = 91 # 91 for coco, 21 for voc
ckpt_path = '../checkpoint_coco.pth' # path where to save the checkpoint.pth
# ------------------ adjustable parameters ---------------------
device = torch.device('cuda' if torch.cuda.is_available() and use_cuda else 'cpu')
print('cuda: {}\nuse_cuda: {}\n{} GPU(s) available'.format(torch.cuda.is_available(), use_cuda, torch.cuda.device_count()))
print('\ndevice: {}'.format(device))
trainset = pmr.datasets(dataset, data_dir, 'train', train=True, device=device)
indices = torch.randperm(len(trainset)).tolist()
trainset = torch.utils.data.Subset(trainset, indices[:train_num_samples])
torch.manual_seed(3)
model = pmr.maskrcnn_resnet50(True, num_classes).to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=lr, momentum=0.9, weight_decay=0.0005)
# +
if os.path.exists(ckpt_path):
checkpoint = torch.load(ckpt_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
del checkpoint['model_state_dict']
del checkpoint['optimizer_state_dict']
torch.cuda.empty_cache()
else:
checkpoint = dict(epochs=0, num_batches=0)
epoch = checkpoint['epochs']
print('already trained: {} epochs, {} batches'.format(epoch, checkpoint['num_batches']))
since = time.time()
# ------------------train---------------------
model.train()
for _ in range(epochs):
print()
for i, data in enumerate(trainset):
optimizer.zero_grad()
losses = model(*data)
loss = sum(losses.values())
loss.backward()
optimizer.step()
if i % 100 == 0:
print(i, ' '.join(str(round(l.item(), 3)) for l in losses.values()))
epoch += 1
if epoch % 7 == 0:
for pg in optimizer.param_groups:
pg['lr'] = lr * 0.9 ** (epoch // 7)
# ------------------train---------------------
print('total time of this train: {:.2f} s'.format(time.time() - since))
checkpoint['model_state_dict'] = model.state_dict()
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
checkpoint['epochs'] = epoch
checkpoint['num_batches'] += epochs * len(trainset)
torch.save(checkpoint, ckpt_path)
num_batches = checkpoint['num_batches']
del checkpoint
torch.cuda.empty_cache()
print('already trained: {} epochs, {} batches'.format(epoch, num_batches))
# -
| train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="8dbc4462"
# Copyright 2022 d-Analytics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="gRnQBeg1POOT"
# To execute the code scroll down and run the "main" function; comment/uncomment lines to test the various filters (fft and convolution implementations).
# + id="dd792057"
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
from scipy.fft import fft, ifft
import bottleneck as bn
# %matplotlib inline
plt.rcParams.update({'font.size': 8})
# + tags=[] id="77d489a1"
def cdf_diff(winlen:int, loc1:float, scale1:float, loc2:float, scale2:float):
'Difference of 2 Gaussian CDFs'
Nrfft=winlen//2+1
x=np.linspace(0,0.5,Nrfft)
win=norm.cdf(x,loc=loc1,scale=scale1) - norm.cdf(x,loc=loc2,scale=scale2)
win[win<0]=0.
if np.mod(winlen,2) == 1:
win=np.concatenate((win,np.flip(win[1:])))
else:
win=np.concatenate((win,np.flip(win[1:-1])))
return win
# + id="BCm_iXm8spVO"
def trapezoid(winlen:int, a0:float, loc0:float, loc1:float, loc2:float, loc3:float, a1:float, smoothlen:int=3):
'Trapezoidal window: bandpass'
assert winlen > 3, 'Filter length too short'
assert a0 >= 0 and a0 <=1 and a1 >= 0 and a1 <=1, 'a0 and a1 must satisfy: 0 <= a0, a1, <= 1'
assert loc0 > 0 and loc3 < 0.5 and loc1 > loc0 and loc2 > loc1 and loc3 > loc2,\
'Corner frequencies must satisfy 0 < loc0 < loc1 < loc2 < loc3 < 0.5'
Nrfft=winlen//2+1
win=np.zeros(Nrfft)
i0=int(2*loc0*(Nrfft-1))
i1=int(2*loc1*(Nrfft-1))
i2=int(2*loc2*(Nrfft-1))
i3=int(2*loc3*(Nrfft-1))
win[0:i0]=np.linspace(0,a0,i0)
win[i0:i1]=np.linspace(a0,1.,i1-i0)
win[i1:i2]=np.linspace(1.,1.,i2-i1)
win[i2:i3]=np.linspace(1.,a1,i3-i2)
win[i3:]=np.linspace(a1,0.,Nrfft-i3)
win=np.convolve(win,np.ones(smoothlen)/smoothlen,'same')
if np.mod(winlen,2) == 1:
win=np.concatenate((win,np.flip(win[1:])))
else:
win=np.concatenate((win,np.flip(win[1:-1])))
return win
# + id="GWtb9tp8Vklc"
def gauss(winlen:int, loc:float, scale:float):
'Gaussian window: bandpass'
assert winlen > 3, 'Filter length too short'
Nrfft=winlen//2+1
x=np.linspace(0,0.5,Nrfft)
win=norm.pdf(x,loc=loc,scale=scale)
if np.mod(winlen,2) == 1:
win=np.concatenate((win,np.flip(win[1:])))
else:
win=np.concatenate((win,np.flip(win[1:-1])))
return win
# + id="ZxxW024GXpRj"
def butterworth_lc(winlen:int, fc:float, order:int=5):
'Butterworth window: low-cut'
assert winlen > 3, 'Filter length too short'
assert fc > 0 and fc < 0.5, 'Corner frequency must satisfy: 0 < fc < 0.5'
assert order > 0, '"order" must be a positive integer'
Nrfft=winlen//2+1
x=np.linspace(0.5,0.,Nrfft)
win=1/(1+(x/(0.5-fc))**(2*order))
if np.mod(winlen,2) == 1:
win=np.concatenate((win,np.flip(win[1:])))
else:
win=np.concatenate((win,np.flip(win[1:-1])))
return win
# + id="UxPhQPWSZ5wS"
def butterworth_hc(winlen:int, fc:float, order:int=5):
'Butterworth window: high-cut'
assert winlen > 3, 'Filter length too short'
assert fc > 0 and fc < 0.5, 'Corner frequency must satisfy: 0 < fc < 0.5'
assert order > 0, '"order" must be a positive integer'
Nrfft=winlen//2+1
x=np.linspace(0.,0.5,Nrfft)
win=1/(1+(x/fc)**(2*order))
if np.mod(winlen,2) == 1:
win=np.concatenate((win,np.flip(win[1:])))
else:
win=np.concatenate((win,np.flip(win[1:-1])))
return win
# + id="qCxRQlaw15q1"
def sin_nthroot(winlen:int, nroot:int=5):
'Nth-root of sine function: low-cut'
assert winlen > 3, 'Filter length too short'
assert nroot >= 0, 'nth-root must be non-negative'
Nrfft=winlen//2+1
win=np.sin(np.linspace(0,np.pi/2.,Nrfft))**(1/nroot)
if np.mod(winlen,2) == 1:
win=np.concatenate((win,np.flip(win[1:])))
else:
win=np.concatenate((win,np.flip(win[1:-1])))
return win
# + id="-PSEedcx6fbG"
def cos_nthroot(winlen:int, nroot:int=5):
'Nth-root of cosine function: high-cut'
assert winlen > 3, 'Filter length too short'
assert nroot >= 0, 'nth-root must be non-negative'
Nrfft=winlen//2+1
win=np.cos(np.linspace(0,np.pi/2.,Nrfft))**(1/nroot)
if np.mod(winlen,2) == 1:
win=np.concatenate((win,np.flip(win[1:])))
else:
win=np.concatenate((win,np.flip(win[1:-1])))
return win
# + id="TkMy9ojoYY7s"
def sinc(siglen:int=1001):
'Sinc filter'
assert siglen > 3, 'Filter length too short!'
siglen=2*(siglen//2)+1
xarr=np.linspace(-(siglen//2.25),siglen//2.25,siglen)
fsinc=np.sinc(xarr)
return fsinc
# + id="FioFocwWCNrY"
def genSweep(f0:float, f1:float, siglen:int):
'Linear sweep generator with front/back zero padding'
assert siglen > 3, 'Not enough samples to get things going'
assert f0 > 0 and f1 > f0 and f1 < 1, 'The following condition was not met: 0 < f0 < f1 < 1'
x=np.linspace(0,1,siglen)
return np.concatenate((np.zeros(siglen//10),np.sin(np.pi/8+2.*np.pi*siglen*(f0*x+((f1-f0)/2.)*x**2)),np.zeros(siglen//20)))
# + id="c4jI8ZF2ORAu"
def genSpike(k:int, siglen:int):
'Spike signal generator'
assert siglen > 3, 'Not enough samples to get things going'
assert k >= 0 and k < siglen, 'The following condition was not met: 0 <= k < siglen'
x=np.zeros(siglen)
x[k]=1
return x
# + id="dMACluqQJT3x"
def applyFffFilt(sig:np.ndarray,fftwin:np.ndarray):
'Filtering in the frequency domain'
assert len(sig) == len(fftwin), 'Signal and window arrays must be equal size'
return np.real(ifft(fftwin*fft(sig)))/np.sqrt(2*np.pi)
# + id="HiFNHBrN4Grv"
def applyConvFilt(sig:np.ndarray,fftwin:np.ndarray,threshold:float=0.95,minlen:int=15):
'Filtering in the sample domain via convolution'
FSINC=fftwin*fft(sinc(len(sig)))
filt=np.real(ifft(FSINC))
#--- get (two-sided) cumulative (L_1) energy away from the peak of sinc
Esym=2.*np.cumsum(np.abs(filt[len(filt)//2:]))-filt[len(filt)//2] # exploiting symmetry and avoiding counting the peak twice
Esym=Esym/Esym[-1] # normalizing by the total L1 energy
idx=np.maximum(np.argmax(Esym >= threshold), np.minimum(minlen//2+1,len(Esym)))
idxUsemin=len(filt)//2-idx+1
idxUsemax=len(filt)//2+idx
#--- form the filter
filt=filt[idxUsemin:idxUsemax]
filt=filt/np.sum(np.abs(filt))
sigfilt=np.convolve(sig,filt,mode='same')
return sigfilt, filt
# + id="gGeIGYu64FGM"
def plotResults(sig:np.ndarray, sigfiltconv:np.ndarray, sigfiltfft:np.ndarray, fftwin:np.ndarray, filtconv:np.ndarray):
fig, axs=plt.subplots(3, 3)
fig.set_size_inches(30, 15)
Nfreqxlabel=8; freqxlabelint=np.linspace(0,(Nfreqxlabel-1)/Nfreqxlabel,Nfreqxlabel)
#freqxlabels=['$0$', r'$\frac{1}{8}$', r'$\frac{1}{4}$', r'$\frac{3}{8}$', r'$-\frac{1}{2}$', r'$-\frac{3}{8}$', r'$-\frac{1}{4}$', r'$-\frac{1}{8}$']
freqxlabels=[str(np.round(x,2)) for x in np.fft.fftfreq(Nfreqxlabel,1)]
#--- signal function
idxSigmin=0; idxSigmax=len(sig)-1
xsig=np.arange(len(sig))
axs[0,0].plot(xsig[idxSigmin:idxSigmax],sig[idxSigmin:idxSigmax])
axs[0,0].set_xlabel('Sample')
axs[0,0].set_xlim(xsig[idxSigmin],xsig[idxSigmax])
axs[0,0].set_title(label=f'Input signal (length = {len(sig)} samples)',fontdict=None, loc='center', pad=None)
#--- FFT amplitude of the input signal
SIG=np.abs(fft(sig))
freqs=np.arange(len(SIG))
axs[0,1].fill_between(freqs,SIG,0.,color='maroon')
axs[0,1].set(xlabel='Freq (normalized)', ylabel='FFT Amplitude')
axs[0,1].set_xlim(freqs.min(),freqs.max())
axs[0,1].set_xticks(len(SIG)*freqxlabelint)
axs[0,1].set_xticklabels(freqxlabels)
axs[0,1].set_title(label='FFT amplitude of input signal',fontdict=None, loc='center', pad=None)
#--- FFT amplitude of the window function
freqs=np.arange(len(fftwin))
axs[0,2].fill_between(freqs,fftwin,0.,color='maroon')
axs[0,2].set(xlabel='Freq (normalized)', ylabel='FFT Amplitude')
axs[0,2].set_ylim(fftwin.min(),fftwin.max())
axs[0,2].set_xlim(freqs.min(),freqs.max())
axs[0,2].set_xticks(len(fftwin)*freqxlabelint)
axs[0,2].set_xticklabels(freqxlabels)
axs[0,2].set_title(label='FFT window function',fontdict=None, loc='center', pad=None)
#--- convolution-filtered signal
idxmin=0; idxmax=len(sigfiltconv)-1
# idxmin=50; idxmax=200
xfilt=np.arange(len(sigfiltconv))
axs[1,0].plot(xfilt[idxmin:idxmax],sigfiltconv[idxmin:idxmax])
axs[1,0].set_xlabel('Sample')
axs[1,0].set_xlim(xfilt[idxmin],xfilt[idxmax])
axs[1,0].set_title(label=f'Convolution-filtered (filter length = {len(filtconv)} samples)',fontdict=None, loc='center', pad=None)
#--- FFT amplitude of the convolution-filtered signal
SIG_FILT=np.abs(fft(sigfiltconv))
freqs=np.arange(len(SIG_FILT))
axs[1,1].fill_between(freqs,SIG_FILT,0.,color='maroon')
axs[1,1].set(xlabel='Freq (normalized)', ylabel='FFT Amplitude')
axs[1,1].set_xlim(freqs.min(),freqs.max())
axs[1,1].set_xticks(len(SIG_FILT)*freqxlabelint)
axs[1,1].set_xticklabels(freqxlabels)
axs[1,1].set_title(label='FFT amplitude of convolution-filtered signal',fontdict=None, loc='center', pad=None)
#--- FFT phase of the convolution-filtered signal
SIG_FILT=np.unwrap(np.angle((fft(sigfiltconv))))
freqs=np.arange(len(SIG_FILT))
axs[1,2].plot(freqs,SIG_FILT,color='maroon')
axs[1,2].set(xlabel='Freq (normalized)', ylabel='Phase (rad)')
axs[1,2].set_xlim(freqs.min(),freqs.max())
axs[1,2].set_xticks(len(SIG_FILT)*freqxlabelint)
axs[1,2].set_xticklabels(freqxlabels)
axs[1,2].set_title(label='FFT phase of convolution-filtered signal',fontdict=None, loc='center', pad=None)
#--- fft-filtered signal
idxmin=0; idxmax=len(sigfiltfft)-1
# idxmin=50; idxmax=200
xfilt=np.arange(len(sigfiltfft))
axs[2,0].plot(xfilt[idxmin:idxmax],sigfiltfft[idxmin:idxmax])
axs[2,0].set_xlabel('Sample')
axs[2,0].set_xlim(xfilt[idxmin],xfilt[idxmax])
axs[2,0].set_title(label='FFT-filtered',fontdict=None, loc='center', pad=None)
#--- FFT amplitude of the fft-filtered signal
SIG_FILT=np.abs(fft(sigfiltfft))
freqs=np.arange(len(SIG_FILT))
axs[2,1].fill_between(freqs,SIG_FILT,0.,color='maroon')
axs[2,1].set(xlabel='Freq (normalized)', ylabel='FFT Amplitude')
axs[2,1].set_xlim(freqs.min(),freqs.max())
axs[2,1].set_xticks(len(SIG_FILT)*freqxlabelint)
axs[2,1].set_xticklabels(freqxlabels)
axs[2,1].set_title(label='FFT amplitude of fft-filtered signal',fontdict=None, loc='center', pad=None)
#--- FFT phase of the fft-filtered signal
SIG_FILT=np.unwrap(np.angle((fft(sigfiltfft))))
freqs=np.arange(len(SIG_FILT))
axs[2,2].plot(freqs,SIG_FILT,color='maroon')
axs[2,2].set(xlabel='Freq (normalized)', ylabel='Phase (rad)')
axs[2,2].set_xlim(freqs.min(),freqs.max())
axs[2,2].set_xticks(len(SIG_FILT)*freqxlabelint)
axs[2,2].set_xticklabels(freqxlabels)
axs[2,2].set_title(label='FFT phase of fft-filtered signal',fontdict=None, loc='center', pad=None)
#--- final adjsutments
#fig.tight_layout()
plt.show()
# + [markdown] id="lxGVkjpVhfAu"
# # MAIN FUNCTION:
# + tags=[] colab={"base_uri": "https://localhost:8080/", "height": 831} id="f77914ee" outputId="4f419253-96d8-4ace-ee9d-9841bf3a2e49"
def main():
'Filter testing utility; all frequencies are normalized between 0 and 0.5 = Nyquist.'
#==============================
# STEP 1: generate signal
#==============================
signal=genSweep(f0=0.015,f1=0.3,siglen=1001) # sweep
# signal=genSpike(k=100,siglen=1001) # spike
# signal=sinc(siglen=1021) # sinc
#==============================
# STEP 2: select fft window
#==============================
# fftwin = cdf_diff(winlen=len(signal), loc1=0.05, scale1=0.1, loc2=0.5, scale2=1)
# fftwin = gauss(winlen=len(signal), loc=0.25, scale=0.2)
# fftwin = sin_nthroot(winlen=len(signal),nroot=5)
# fftwin = cos_nthroot(winlen=len(signal),nroot=1/3)
# fftwin = sin_nthroot(winlen=len(signal),nroot=5)*cos_nthroot(winlen=len(signal),nroot=1/2)
# fftwin = trapezoid(winlen=len(signal), a0=0.005, loc0=0.015, loc1=0.1, loc2=0.25, loc3=0.35, a1=0.05, smoothlen=15)
# fftwin = butterworth_hc(winlen=len(signal), fc=0.25, order=6)
fftwin = butterworth_lc(winlen=len(signal), fc=0.01, order=51)*butterworth_hc(winlen=len(signal), fc=0.25, order=6)
#==============================
# STEP 3: apply fft and convolution filters
#==============================
sigfiltfft = applyFffFilt(sig=signal, fftwin=fftwin)
sigfiltconv, filtconv = applyConvFilt(sig=signal, fftwin=fftwin, threshold=0.975, minlen=5)
#==============================
# STEP 4: plot results
#==============================
# sigfiltconv=filtconv # uncomment this line to examine the filter; the results are placed in the covolution-based filter axes
plotResults(sig=signal, sigfiltconv=sigfiltconv, sigfiltfft=sigfiltfft, fftwin=fftwin, filtconv=filtconv)
if __name__=="__main__":
main()
# + id="ipU-cECC_Lcy"
| bandpass_filters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Simple OOP example with Linear Regression
# ### Dr. <NAME>, Fremont, CA 94536
#
# In this notebook, we will show how to leverage the power and flexibility of the Object-oriented programming (OOP) paradigm for machine learning.
import numpy as np
# ### A very simple class `MyLinearRegression`
class MyLinearRegression:
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
# ---
# ### Create an instance and check attributes
mlr = MyLinearRegression()
mlr._fit_intercept
mlr.coef_==None
mlr.intercept_ == None
print(mlr)
# ---
# ### Built-in description method
# We can add a special built-in method `__repr__` to create a short description string
class MyLinearRegression:
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
mlr = MyLinearRegression()
print(mlr)
# ---
# ### Adding the `fit` method
# Now, we can add the core fitting method called `fit`. This uses linear algebra routines from NumPy to solve a linear regression (single or multi-variate) problem.
class MyLinearRegression:
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# ---
# ### Generate some random data for test
X = 10*np.random.random(size=(20,2))
y = 3.5*X.T[0]-1.2*X.T[1]+2*np.random.randn(20)
import matplotlib.pyplot as plt
# +
fig, ax = plt.subplots(1,2,figsize=(10,3))
ax[0].scatter(X.T[0],y)
ax[0].set_title("Output vs. first feature")
ax[0].grid(True)
ax[1].scatter(X.T[1],y)
ax[1].set_title("Output vs. second feature")
ax[1].grid(True)
fig.tight_layout()
plt.show()
# -
# ---
# ### Instantiate a new `MyLinearRegression` object and fit the data
mlr = MyLinearRegression()
print("We have not fitted the data yet. There is no regression coefficients")
print("Regression coefficients:", mlr.coef_)
mlr.fit(X,y)
print("We have fitted the data. We can print the regression coefficients now")
print("Regression coefficients:", mlr.coef_)
print("The intercept term is given by: ", mlr.intercept_)
# ---
# ### Comparison of ground truth and fitted values
# Woudn't it be nice to compare the ground truth with the predictions and see how closely they fit
coef_ = mlr.coef_
y_pred = np.dot(X,coef_)+mlr.intercept_
plt.scatter(y,y_pred,s=100,alpha=0.75,color='red',edgecolor='k')
plt.plot(y,y,c='k',linestyle='dotted')
plt.grid(True)
plt.show()
# ---
# ### Encapsulation
# But we don't want to write stand-alone code. Can we _encapsulate_ the code inside the class?
class MyLinearRegression:
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
def plot_fitted(self,reference_line=False):
"""
Plots fitted values against the true output values from the data
Arguments:
reference_line: A Boolean switch to draw a 45-degree reference line on the plot
"""
plt.title("True vs. fitted values",fontsize=14)
plt.scatter(y,self.fitted_,s=100,alpha=0.75,color='red',edgecolor='k')
if reference_line:
plt.plot(y,y,c='k',linestyle='dotted')
plt.xlabel("True values")
plt.ylabel("Fitted values")
plt.grid(True)
plt.show()
# ---
# ### Demo the new `plot_fitted` method
# Now the `MyLinearRegression` class has the ability (aka methods) to both fit the data and visualize the fitted/true output values in a plot
# A fresh instance
mlr = MyLinearRegression()
# Fitting with the data
mlr.fit(X,y)
# Call the 'plot_fitted' method
mlr.plot_fitted()
# We can pass on a switch 'reference_line' to draw a 45-degree reference line on the plot
mlr.plot_fitted(reference_line=True)
# ---
# ### Adding a `predict` method
# Now, we want to extend the functionality and add a `predict` method to enable the class to predict for any arbitrary new dataset
class MyLinearRegression:
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
def plot_fitted(self,reference_line=False):
"""
Plots fitted values against the true output values from the data
Arguments:
reference_line: A Boolean switch to draw a 45-degree reference line on the plot
"""
plt.title("True vs. fitted values",fontsize=14)
plt.scatter(y,self.fitted_,s=100,alpha=0.75,color='red',edgecolor='k')
if reference_line:
plt.plot(y,y,c='k',linestyle='dotted')
plt.xlabel("True values")
plt.ylabel("Fitted values")
plt.grid(True)
plt.show()
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
# ---
# ### Testing `predict` method with new data
# Note the number of samples is different from the training set.
num_new_samples = 10
X_new = 10*np.random.random(size=(num_new_samples,2))
y_new = 3.5*X_new.T[0]-1.2*X_new.T[1]+2*np.random.randn(num_new_samples)
mlr = MyLinearRegression()
mlr.fit(X,y)
y_pred=mlr.predict(X_new)
plt.scatter(y_new,y_pred,s=100,alpha=0.75,color='red',edgecolor='k')
plt.plot(y_new,y_new,c='k',linestyle='dotted')
plt.xlabel("True values")
plt.ylabel("Predicted values")
plt.grid(True)
plt.show()
# ---
# ### Moving towards regression metrics - degrees of freedom
# We will now move towards regression metrics (and statistical inference). For that, we first need to introduce few more attributes associated with the dataset - degrees of freedom. They will be computed when we try to fit a dataset. They will be used later to compute metric like $\textbf{adjusted } R^2$.
#
# `dft_` : degrees of freedom of the estimate of the population variance of the dependent variable<br>
# `dfe_` : degrees of freedom of the estimate of the underlying population error variance
class MyLinearRegression:
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# features and data
self.features_ = X
self.target__ = y
# degrees of freedom of population dependent variable variance
self.dft_ = X.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = X.shape[0] - X.shape[1] - 1
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
def plot_fitted(self,reference_line=False):
"""
Plots fitted values against the true output values from the data
Arguments:
reference_line: A Boolean switch to draw a 45-degree reference line on the plot
"""
plt.scatter(y,self.fitted_,s=100,alpha=0.75,color='red',edgecolor='k')
if reference_line:
plt.plot(y,y,c='k',linestyle='dotted')
plt.xlabel("True values")
plt.ylabel("Fitted values")
plt.grid(True)
plt.show()
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
mlr = MyLinearRegression()
mlr.fit(X,y)
mlr.dfe_
mlr.dft_
# ### `Metrics` class
# We could have added a whole bunch of methods directly into the `MyLinearRegression` class. But, instead, we will show the power of inheritance and define a separate class `Metrics` for computing common metrics of a regression model.
#
# Note, this class has no `__init__` method because we will never instantiate an object using this class. Rather, we will sort of absorb this class into the `MyLinearRegression` class.
class Metrics:
def sse(self):
'''returns sum of squared errors (model vs actual)'''
squared_errors = (self.resid_) ** 2
self.sq_error_ = np.sum(squared_errors)
return self.sq_error_
def sst(self):
'''returns total sum of squared errors (actual vs avg(actual))'''
avg_y = np.mean(self.target_)
squared_errors = (self.target_ - avg_y) ** 2
self.sst_ = np.sum(squared_errors)
return self.sst_
def r_squared(self):
'''returns calculated value of r^2'''
self.r_sq_ = 1 - self.sse()/self.sst()
return self.r_sq_
def adj_r_squared(self):
'''returns calculated value of adjusted r^2'''
self.adj_r_sq_ = 1 - (self.sse()/self.dfe_) / (self.sst()/self.dft_)
return self.adj_r_sq_
def mse(self):
'''returns calculated value of mse'''
self.mse_ = np.mean( (self.predict(self.features_) - self.target_) ** 2 )
return self.mse_
def pretty_print_stats(self):
'''returns report of statistics for a given model object'''
items = ( ('sse:', self.sse()), ('sst:', self.sst()),
('mse:', self.mse()), ('r^2:', self.r_squared()),
('adj_r^2:', self.adj_r_squared()))
for item in items:
print('{0:8} {1:.4f}'.format(item[0], item[1]))
# ### Class with inheritance
# Now we inherit the `Metrics` class in the `MyLinearRegression` class by passing on `Metrics` in the very defination of the `MyLinearRegression` class.
#
# We also need to add a new attribute - `resid_`. These are the residuals (the difference between the fitted values and true target_/output values), which are used by the methods in the `Metrics` perform the necessary computations.
class MyLinearRegression(Metrics):
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# features and data
self.features_ = X
self.target_ = y
# degrees of freedom of population dependent variable variance
self.dft_ = X.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = X.shape[0] - X.shape[1] - 1
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
def plot_fitted(self,reference_line=False):
"""
Plots fitted values against the true output values from the data
Arguments:
reference_line: A Boolean switch to draw a 45-degree reference line on the plot
"""
plt.title("True vs. fitted values",fontsize=14)
plt.scatter(y,self.fitted_,s=100,alpha=0.75,color='red',edgecolor='k')
if reference_line:
plt.plot(y,y,c='k',linestyle='dotted')
plt.xlabel("True values")
plt.ylabel("Fitted values")
plt.grid(True)
plt.show()
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
# ### Demo the newly acquired power of `MyLinearRegression` - the metrics
mlr = MyLinearRegression()
fit=mlr.fit(X,y)
mlr.sse()
mlr.sst()
mlr.r_squared()
mlr.pretty_print_stats()
mlr.resid_
# ---
# ### Visual diagnostics
# The success of a linear regression model depends on some fundamental assumptions about the nature of the underlying data that it tries to model. [See this article](https://www.jmp.com/en_us/statistics-knowledge-portal/what-is-regression/simple-linear-regression-assumptions.html) for a simple and intuitive understanding of these assumptions.
#
# It is, therefore, extremely important to check the quality of your linear regression model, by verifying whether these assumptions were “reasonably” satisfied (generally visual analytics methods, which are subject to interpretation, are used to check the assumptions).
#
# Visual diagnostics play a crucial part in this quality check. Following plots can be constructed from the any linear regression fitted model. They can be termed diagnostics.
#
# * Residuals vs. predicting variables plots
# * Fitted vs. residuals plot
# * Histogram of the normalized residuals
# * Q-Q plot of the normalized residuals
#
# [See this article](https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685) for a more detailed discussion and the general approach. Here, we will add these visual diagnostics to the `MyLinearRegression` class.
#
# As an instance, let's plot the fitted vs. residuals plot. Ideally, this plot should show no pattern, residuals distributed completely randomly around the zero line.
plt.scatter(mlr.fitted_,mlr.resid_)
plt.hlines(y=0,xmin=np.amin(mlr.fitted_),xmax=np.amax(mlr.fitted_),color='k',linestyle='dashed')
plt.grid(True)
plt.show()
# ---
# ### Creating a separate `Diagnostics_plots` class
class Diagnostics_plots:
def __init__():
pass
def fitted_vs_residual(self):
'''Plots fitted values vs. residuals'''
plt.title("Fitted vs. residuals plot",fontsize=14)
plt.scatter(self.fitted_,self.resid_,edgecolor='k')
plt.hlines(y=0,xmin=np.amin(self.fitted_),xmax=np.amax(self.fitted_),color='k',linestyle='dashed')
plt.xlabel("Fitted values")
plt.ylabel("Residuals")
plt.show()
def fitted_vs_features(self):
'''Plots residuals vs all feature variables in a grid'''
num_plots = self.features_.shape[1]
if num_plots%3==0:
nrows = int(num_plots/3)
else:
nrows = int(num_plots/3)+1
ncols = 3
fig, ax = plt.subplots(nrows, ncols, figsize=(15,nrows*3.5))
axes = ax.ravel()
for i in range(num_plots,nrows*ncols):
axes[i].set_visible(False)
for i in range(num_plots):
axes[i].scatter(self.features_.T[i],self.resid_,color='orange',edgecolor='k',alpha=0.8)
axes[i].grid(True)
axes[i].set_xlabel("Feature X[{}]".format(i))
axes[i].set_ylabel("Residuals")
axes[i].hlines(y=0,xmin=np.amin(self.features_.T[i]),xmax=np.amax(self.features_.T[i]),
color='k',linestyle='dashed')
plt.show()
def histogram_resid(self,normalized=True):
'''Plots a histogram of the residuals (can be normalized)'''
if normalized:
norm_r=self.resid_/np.linalg.norm(self.resid_)
else:
norm_r = self.resid_
num_bins=min(20,int(np.sqrt(self.features_.shape[0])))
plt.title("Histogram of the normalized residuals")
plt.hist(norm_r,bins=num_bins,edgecolor='k')
plt.xlabel("Normalized residuals")
plt.ylabel("Count")
plt.show()
def shapiro_test(self,normalized=True):
'''Performs Shapiro-Wilk normality test on the residuals'''
from scipy.stats import shapiro
if normalized:
norm_r=self.resid_/np.linalg.norm(self.resid_)
else:
norm_r = self.resid_
_,p = shapiro(norm_r)
if p > 0.01:
print("The residuals seem to have come from a Gaussian process")
else:
print("The residuals does not seem to have come from a Gaussian process. \
\nNormality assumptions of the linear regression may have been violated.")
def qqplot_resid(self,normalized=True):
'''Creates a quantile-quantile plot for residuals comparing with a normal distribution'''
from scipy.stats import probplot
if normalized:
norm_r=self.resid_/np.linalg.norm(self.resid_)
else:
norm_r = self.resid_
plt.title("Q-Q plot of the normalized residuals")
probplot(norm_r,dist='norm',plot=plt)
plt.xlabel("Theoretical quantiles")
plt.ylabel("Residual quantiles")
plt.show()
# ---
# ### Inheritance from more than one classes
# We can inherit from more than one classes. Already, we have defined `MyLinearRegression` so as to inherit from `Metrics` class. We can add `Diagnostic_plots` to the list too.
class MyLinearRegression(Metrics, Diagnostics_plots):
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# features and data
self.features_ = X
self.target_ = y
# degrees of freedom of population dependent variable variance
self.dft_ = X.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = X.shape[0] - X.shape[1] - 1
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
def plot_fitted(self,reference_line=False):
"""
Plots fitted values against the true output values from the data
Arguments:
reference_line: A Boolean switch to draw a 45-degree reference line on the plot
"""
plt.title("True vs. fitted values",fontsize=14)
plt.scatter(y,self.fitted_,s=100,alpha=0.75,color='red',edgecolor='k')
if reference_line:
plt.plot(y,y,c='k',linestyle='dotted')
plt.xlabel("True values")
plt.ylabel("Fitted values")
plt.grid(True)
plt.show()
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
# ---
# ### Testing diagnostics plots
num_samples=100
num_dim = 2
X = 10*np.random.random(size=(num_samples,num_dim))
y = 3.5*X.T[0]-1.2*X.T[1]+2*np.random.randn(num_samples)
mlr = MyLinearRegression()
mlr.fit(X,y)
mlr.fitted_vs_residual()
mlr.histogram_resid()
mlr.qqplot_resid()
# +
num_samples=100
num_dim = 5
X = 10*np.random.random(size=(num_samples,num_dim))
coeff = np.array([2,-3.5,1.2,4.1,-2.5])
y = np.dot(coeff,X.T)+2*np.random.randn(num_samples)
mlr.fit(X,y)
mlr.fitted_vs_features()
# -
mlr.shapiro_test()
mlr.histogram_resid()
mlr.fitted_vs_residual()
mlr.plot_fitted()
# ---
# ### Moving normal plot methods to a separate class
# We saw the power of inheritance. Therefore, to de-clutter the main class definition, we should remove the plot methods to a separate plotting class. This is also a time-tested principle of OOP that methods, which can be grouped under a common category, should have their own class, which can be inherited by one main class.
#
# We define a `Data_plots` class which now contains the `plot_fitted` method. We also add a general pairwise plot functionality to this class using the `pairplot` from `Seaborn` library.
class Data_plots:
def __init__():
pass
def pairplot(self):
'''Creates pairplot of all variables and the target using the Seaborn library'''
print ("This may take a little time. Have patience...")
from seaborn import pairplot
from pandas import DataFrame
df = DataFrame(np.hstack((self.features_,self.target_.reshape(-1,1))))
pairplot(df)
plt.show()
def plot_fitted(self,reference_line=False):
"""
Plots fitted values against the true output values from the data
Arguments:
reference_line: A Boolean switch to draw a 45-degree reference line on the plot
"""
plt.title("True vs. fitted values",fontsize=14)
plt.scatter(y,self.fitted_,s=100,alpha=0.75,color='red',edgecolor='k')
if reference_line:
plt.plot(y,y,c='k',linestyle='dotted')
plt.xlabel("True values")
plt.ylabel("Fitted values")
plt.grid(True)
plt.show()
class MyLinearRegression(Metrics, Diagnostics_plots,Data_plots):
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# features and data
self.features_ = X
self.target_ = y
# degrees of freedom of population dependent variable variance
self.dft_ = X.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = X.shape[0] - X.shape[1] - 1
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
num_samples=100
num_dim = 5
X = 10*np.random.random(size=(num_samples,num_dim))
coeff = np.array([2,-3.5,1.2,4.1,-2.5])
y = np.dot(coeff,X.T)+2*np.random.randn(num_samples)
mlr = MyLinearRegression()
mlr.fit(X,y)
mlr.plot_fitted()
mlr.pairplot()
# ---
# ### Outliers detection
# Outliers can also be an issue impacting the model quality by having a disproportionate influence on the estimated model parameters. We can use a measure called **Cook’s distance** to check for outliers. It essentially measures the effect of deleting a given observation. Points with a large Cook’s distance need to be closely examined for being potential outliers.
#
# We can create a special `Outliers` class for storing functions related to outliers.
class Outliers:
def __init__():
pass
def cook_distance(self):
'''Computes and plots Cook\'s distance'''
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import OLSInfluence as influence
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
inf=influence(lm)
(c, p) = inf.cooks_distance
plt.figure(figsize=(8,5))
plt.title("Cook's distance plot for the residuals",fontsize=14)
plt.stem(np.arange(len(c)), c, markerfmt=",", use_line_collection=True)
plt.grid(True)
plt.show()
def influence_plot(self):
'''Creates the influence plot'''
import statsmodels.api as sm
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
fig, ax = plt.subplots(figsize=(10,8))
fig = sm.graphics.influence_plot(lm, ax= ax, criterion="cooks")
plt.show()
def leverage_resid_plot(self):
'''Plots leverage vs normalized residuals' square'''
import statsmodels.api as sm
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
fig, ax = plt.subplots(figsize=(10,8))
fig = sm.graphics.plot_leverage_resid2(lm, ax= ax)
plt.show()
class MyLinearRegression(Metrics, Diagnostics_plots,Data_plots,Outliers):
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# features and data
self.features_ = X
self.target_ = y
# degrees of freedom of population dependent variable variance
self.dft_ = X.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = X.shape[0] - X.shape[1] - 1
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
num_samples=200
num_dim = 5
X = 10*np.random.random(size=(num_samples,num_dim))
coeff = np.array([2,-3.5,1.2,4.1,-2.5])
y = np.dot(coeff,X.T)+2*np.random.randn(num_samples)
mlr = MyLinearRegression()
mlr.fit(X,y)
mlr.cook_distance()
mlr.influence_plot()
mlr.leverage_resid_plot()
# ---
# ### Multicollinearity check
# For multiple linear regression, judging multicollinearity is also critical from the statistical inference point of view. This assumption assumes minimal or no linear dependence between the predicting variables.
#
# We can compute the **variance influence factors (VIF)** for each predicting variable. It is the ratio of variance in a model with multiple terms, divided by the variance of a model with one term alone.
#
# We create another special class `Multicollinearity` for this purpose.
class Multicollinearity:
def __init__():
pass
def vif(self):
'''Computes variance influence factors for each feature variable'''
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor as vif
lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()
for i in range(self.features_.shape[1]):
v=vif(np.matrix(self.features_),i)
print("Variance inflation factor for feature {}: {}".format(i,round(v,2)))
class MyLinearRegression(Metrics, Diagnostics_plots,Data_plots,Outliers,Multicollinearity):
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# features and data
self.features_ = X
self.target_ = y
# degrees of freedom of population dependent variable variance
self.dft_ = X.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = X.shape[0] - X.shape[1] - 1
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
num_samples=200
num_dim = 5
X = 10*np.random.random(size=(num_samples,num_dim))
coeff = np.array([2,-3.5,1.2,4.1,-2.5])
y = np.dot(coeff,X.T)+2*np.random.randn(num_samples)
mlr = MyLinearRegression()
mlr.fit(X,y)
mlr.vif()
# ### Syntactic sugar - `run_diagnostics` and `outlier_plots` methods added to the main class
class MyLinearRegression(Metrics, Diagnostics_plots,Data_plots,Outliers,Multicollinearity):
def __init__(self, fit_intercept=True):
self.coef_ = None
self.intercept_ = None
self._fit_intercept = fit_intercept
def __repr__(self):
return "I am a Linear Regression model!"
def fit(self, X, y):
"""
Fit model coefficients.
Arguments:
X: 1D or 2D numpy array
y: 1D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
# features and data
self.features_ = X
self.target_ = y
# degrees of freedom of population dependent variable variance
self.dft_ = X.shape[0] - 1
# degrees of freedom of population error variance
self.dfe_ = X.shape[0] - X.shape[1] - 1
# add bias if fit_intercept is True
if self._fit_intercept:
X_biased = np.c_[np.ones(X.shape[0]), X]
else:
X_biased = X
# closed form solution
xTx = np.dot(X_biased.T, X_biased)
inverse_xTx = np.linalg.inv(xTx)
xTy = np.dot(X_biased.T, y)
coef = np.dot(inverse_xTx, xTy)
# set attributes
if self._fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
self.intercept_ = 0
self.coef_ = coef
# Predicted/fitted y
self.fitted_ = np.dot(X,mlr.coef_) + mlr.intercept_
# Residuals
residuals = self.target_ - self.fitted_
self.resid_ = residuals
def predict(self, X):
"""Output model prediction.
Arguments:
X: 1D or 2D numpy array
"""
# check if X is 1D or 2D array
if len(X.shape) == 1:
X = X.reshape(-1,1)
self.predicted_ = self.intercept_ + np.dot(X, self.coef_)
return self.predicted_
def run_diagnostics(self):
'''Runs diagnostics tests and plots'''
Diagnostics_plots.fitted_vs_residual(self)
Diagnostics_plots.histogram_resid(self)
Diagnostics_plots.qqplot_resid(self)
print()
Diagnostics_plots.shapiro_test(self)
def outlier_plots(self):
'''Creates various outlier plots'''
Outliers.cook_distance(self)
Outliers.influence_plot(self)
Outliers.leverage_resid_plot(self)
# Now we can generate data and test these new methods
num_samples=200
num_dim = 5
X = 10*np.random.random(size=(num_samples,num_dim))
coeff = np.array([2,-3.5,1.2,4.1,-2.5])
y = np.dot(coeff,X.T)+2*np.random.randn(num_samples)
mlr = MyLinearRegression()
mlr.fit(X,y)
# #### Running all diagnostics
mlr.run_diagnostics()
# #### All outlier plots
mlr.outlier_plots()
| OOP_in_ML/Class_MyLinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/singr7/MIRAutoencoder/blob/master/CNNLSTMAutoEncoder_with48bins.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="_9JqXiXhqdkN"
import torch
import torch.nn as nn
# + [markdown] id="ORCKRmYPHk-z"
# #Mount the google drive
# #Create list of numpy files for western and indian dataset
# + colab={"base_uri": "https://localhost:8080/"} id="cPnD6kkyHfx8" outputId="b6b9a3f6-6117-45a9-cae8-10a96d14a99e"
import torch
import numpy as np
import os
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
western_files = []
western_file_dir = "/content/drive/My Drive/MusicResearchColabNB/MelFeatures/Western_mel_numpy"
for r,d, fileList in os.walk(western_file_dir):
for file in fileList:
western_files.append(os.path.join(r,file))
indian_files = []
indian_file_dir = "/content/drive/My Drive/MusicResearchColabNB/MelFeatures/Indian_mel_numpy"
for r,d, fileList in os.walk(indian_file_dir):
for file in fileList:
indian_files.append(os.path.join(r,file))
print(len(western_files))
print(len(indian_files))
# + [markdown] id="oMQbF-ylLmdK"
# # Balance the western dataset by taking files equal to Indian dataset files = 2008
# + colab={"base_uri": "https://localhost:8080/"} id="nXHKJAOLL-iX" outputId="70f03494-fc79-43a6-8ca9-4e0638fdcd91"
import random
#randomize the selection. To avoid getting a different random sample with every run, use seed
random.seed(234)
bal_western_files = random.sample(western_files,2008)
len(bal_western_files)
# + [markdown] id="xeBwpwaTX3Jo"
# #Define configuration class
# + id="jP4323imT53f"
class Configuration:
seq_len = 200 # taking half of the original timesteps extracted
input_dim = 48 #num of mels
embedding_dim = 64
batch_size = 2
base_dir = "/content/drive/My Drive/MusicResearchColabNB/vajra/westernAE_48bins" # need to be edited..
loss_function = torch.nn.MSELoss(reduction='sum')
lr=1e-3 # I edited it from 1e-3 to 1e-5
n_epochs = 4
model_file = "/content/drive/My Drive/MusicResearchColabNB/vajra/westernAE_48bins/models/mel.pkl" #need need edits
results_dir = os.path.join(base_dir, "./results") # may need edits
checkpoint_model_file = "/content/drive/My Drive/MusicResearchColabNB/vajra/westernAE_48bins/models/mel_checkpoint.pkl" #may need edits
kernel_size = 3 #why?
k_folds = 10
# + id="2RALRBXgZZBA"
class Encoder(torch.nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64, kernel_size=3, stride=1):
super(Encoder, self).__init__()
self.seq_len, self.n_features = seq_len, n_features
self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim
self.conv = nn.Conv1d(in_channels=seq_len,out_channels=seq_len,kernel_size=kernel_size,stride=stride, groups=seq_len)
conv_op_dim = int(((n_features - kernel_size)/ stride) + 1)
self.rnn1 = nn.LSTM(
input_size=conv_op_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=self.hidden_dim,
hidden_size=embedding_dim,
num_layers=1,
batch_first=True
)
def forward(self, x):
#x = x.reshape((10, self.seq_len, self.n_features))
# print('In Encoder')
# print(x.shape)
x = self.conv(x)
x, (_, _) = self.rnn1(x)
x, (hidden_n, _) = self.rnn2(x)
return x
# + id="lkW-A8TzZdGT"
class Decoder(torch.nn.Module):
def __init__(self, seq_len, embedding_dim=64, n_features=48):
super(Decoder, self).__init__()
self.seq_len, self.embedding_dim = seq_len, embedding_dim
self.hidden_dim, self.n_features = 2 * embedding_dim, n_features
self.rnn1 = nn.LSTM(
input_size=embedding_dim,
hidden_size=embedding_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=embedding_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.output_layer = nn.Linear(self.hidden_dim * self.seq_len, n_features * self.seq_len)
def forward(self, x):
x, (hidden_n, cell_n) = self.rnn1(x)
x, (hidden_n, cell_n) = self.rnn2(x)
#print("in decoder", x.shape)
x = x.contiguous()
x = x.view(x.shape[0], -1)
x = self.output_layer(x)
return x.reshape(x.shape[0],self.seq_len, self.n_features)
# + id="0mqrvU5MZfEA"
class RecurrentAutoencoder(torch.nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64, device='cpu'):
super(RecurrentAutoencoder, self).__init__()
self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device)
self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
# + id="aRQ-t9aNZiUD" colab={"base_uri": "https://localhost:8080/"} outputId="3207e979-bf54-4bd3-c5dd-1bd3cf9997b7"
x = torch.randn(10, 48, 400)
print(x.shape)
x = x.permute(0, 2, 1)
print(x.shape)
encoder = Encoder(400, 48, embedding_dim=64, kernel_size=3, stride=1)
encoded = encoder(x)
print(encoded.shape)
decoder = Decoder(400, 64, 48)
decoded = decoder(encoded)
print(decoded.shape)
rae = RecurrentAutoencoder(400, 48, 64)
output = rae(x)
print(output.shape)
# + id="lDpec6ICZnKN"
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import SequentialSampler
class CustomDatasetMel(Dataset):
def __init__(self, dataList):
self.data = dataList
#self.labels = labelList
def __len__(self):
return len(self.data)
def __getitem__(self, index):
import numpy as np
fileName = self.data[index]
mel_spect = np.load(fileName)
data = torch.tensor(mel_spect[:,:200], dtype=torch.float)
data = data.permute(1, 0)
#data = torch.unsqueeze(data, dim =0)
#label = torch.tensor(self.labels[index])
return data
# + colab={"base_uri": "https://localhost:8080/"} id="uAv1gLc6Zwwr" outputId="fc174177-1e20-4dad-ba2a-4c4cdf8b39b8"
import copy
import math
import numpy as np
import torch
class TrainingWrapper:
def __init__(self, config, training_loader, test_loader, device, val_loader=None, cross=10):
self.config = config
self.training_loader = training_loader
self.test_loader = test_loader
self.val_loader = val_loader
self.device = device
self.model = RecurrentAutoencoder(self.config.seq_len, self.config.input_dim, self.config.embedding_dim, device=self.device)
self.model = self.model.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config.lr)
self.criterion = self.config.loss_function.to(self.device)
self.history = dict(train=[], val=[], cross_val=[])
self.best_model_wts = copy.deepcopy(self.model.state_dict())
self.best_loss = 10000.0
#print(self.config.base_dir + self.config.model_file)
torch.save(self.model.state_dict(), self.config.model_file)
self.cross = cross
def visualizeTraining(self, epoch, trn_losses, tst_losses, val_losses, save_dir,cross):
# visualize the loss as the network trained
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 8))
plt.plot(range(0, len(trn_losses)), trn_losses, label='Training Loss')
if tst_losses:
plt.plot(range(0, len(tst_losses)), tst_losses, label='Validation Loss')
if val_losses:
plt.plot(range(0, len(val_losses)), val_losses, label='Cross Validation Loss')
minposs = tst_losses.index(min(tst_losses))
plt.axvline(minposs, linestyle='--', color='r', label='Early Stopping Checkpoint')
plt.xlabel('epochs')
plt.ylabel('loss')
# plt.ylim(0, 0.5) # consistent scale
# plt.xlim(0, len(trn_losses)) # consistent scale
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
fig.savefig(os.path.join(save_dir , 'loss_plot_{}.png'.format(cross)), bbox_inches='tight')
def train(self):
self.model.load_state_dict(torch.load(config.checkpoint_model_file))
for epoch in range(1, self.config.n_epochs + 1):
self.model = self.model.train()
train_losses = []
for i, data in enumerate(self.training_loader,0):
x = data
self.optimizer.zero_grad()
x = x.to(self.device)
output = self.model(x)
loss = self.criterion(output, x)
loss.backward()
self.optimizer.step()
train_losses.append(loss.item())
print("in training loop, epoch {}, step {}, the loss is {}".format(epoch, i, loss.item()))
val_losses = []
self.model = self.model.eval()
with torch.no_grad():
for i, data in enumerate(self.test_loader):
x = data
x = x.to(device)
output = self.model(x)
loss = self.criterion(output, x)
val_losses.append(loss.item())
cross_val_losses = []
self.model = self.model.eval()
with torch.no_grad():
for i, data in enumerate(self.val_loader):
x = data
x = x.to(device)
output = self.model(x)
loss = self.criterion(output, x)
cross_val_losses.append(loss.item())
train_loss = np.mean(train_losses)
val_loss = np.mean(val_losses)
cross_val_loss = np.mean(cross_val_losses)
self.history['train'].append(train_loss)
self.history['val'].append(val_loss)
self.history['cross_val'].append(cross_val_loss)
if val_loss < self.best_loss:
self.best_loss = val_loss
self.best_model_wts = copy.deepcopy(self.model.state_dict())
#torch.save(self.model.state_dict(), self.config.checkpoint_model_file)
if epoch % 2 == 0:
torch.save(self.model.state_dict(), self.config.checkpoint_model_file)
self.visualizeTraining(epoch, trn_losses= self.history['train'], tst_losses=self.history['val'], val_losses =self.history['cross_val'], save_dir=self.config.base_dir + "/results",cross=fold)
print(f'k-fold {fold}:: Epoch {epoch}: train loss {train_loss} val loss {val_loss}')
self.model.load_state_dict(self.best_model_wts)
torch.save(self.model.state_dict(), self.config.model_file)
return self.model.eval(), self.history
mode = 'train'
data = "mel"
config = Configuration()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import random
import os
def seed_everything(seed=1234):
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.backends.cudnn.deterministic = True
def visualizeTraining(epoch, trn_losses, tst_losses, val_losses, save_dir):
# visualize the loss as the network trained
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 8))
plt.plot(range(0, len(trn_losses)), trn_losses, label='Training Loss')
plt.plot(range(0, len(tst_losses)), tst_losses, label='Validation Loss')
#if val_losses:
plt.plot(range(0, len(val_losses)), val_losses, label='Cross Validation Loss')
minposs = tst_losses.index(min(tst_losses))
plt.axvline(minposs, linestyle='--', color='r', label='Early Stopping Checkpoint')
plt.xlabel('epochs')
plt.ylabel('loss')
# plt.ylim(0, 0.5) # consistent scale
# plt.xlim(0, len(trn_losses)) # consistent scale
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
fig.savefig(os.path.join(save_dir , 'loss_plot_{}.png'.format("MEAN")), bbox_inches='tight')
seed_everything()
train_data = bal_western_files
val_data = indian_files
# Cross validation runs
# use sklearn KFolds
from sklearn.model_selection import KFold
kfold = KFold(n_splits=config.k_folds , shuffle=True)
train_dataset = CustomDatasetMel(train_data)
val_dataset = CustomDatasetMel(val_data)
#Load the cross val dataset which is Full Indian dataset
#It is identical for all K-folds
crossval_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=config.batch_size,
sampler=SequentialSampler(val_dataset),
drop_last=False)
train_loss_mean_list = []
test_loss_mean_list = []
val_loss_mean_list = []
for fold, (train_ids, test_ids) in enumerate(kfold.split(train_dataset)):
print(f'FOLD {fold}')
print('--------------------------------')
# Sample elements randomly from a given list of ids, no replacement.
train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
test_subsampler = torch.utils.data.SubsetRandomSampler(test_ids)
# Define data loaders for training and testing data in this fold
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.batch_size,
sampler=train_subsampler,
drop_last=False)
test_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.batch_size,
sampler=test_subsampler,
drop_last=False)
print("length of of train_loader is {} & length of traindataset is {}".format(len(train_loader),len(train_dataset)))
print("length of of test_loader is {}".format(len(test_loader)))
print("length of of val_loader is {}".format(len(crossval_loader)))
if mode=="train":
trainingWrapper = TrainingWrapper(config=config, training_loader=train_loader, test_loader=test_loader, device=device, val_loader=crossval_loader, cross=fold)
model, history = trainingWrapper.train()
try:
train_loss_mean_list.append(history['train'])
test_loss_mean_list.append(history['val'])
val_loss_mean_list.append(history['cross_val'])
except:
print('Appendoing to train_loss_mean_list')
print(f'At fold {fold} with train_loss:')
if data=="mnist":
#trainingWrapper.show_reconstruction(test_loader=test_loader, n_images=50)
pass
elif mode=="test":
testWrapper = TestingWrapper(config=config, device=device)
testWrapper.save_reconstruction(test_loader)
try:
print('Before doing mean loss list calc')
train_loss_mean_list_np = np.mean(train_loss_mean_list, axis=0)
test_loss_mean_list_np = np.mean(test_loss_mean_list, axis=0)
val_loss_mean_list_np = np.mean(val_loss_mean_list, axis=0)
except:
print('# 1 Error happened while doing mean loss list#####')
print("train_loss_mean_list",train_loss_mean_list)
print("test_loss_mean_list",test_loss_mean_list)
print("val_loss_mean_list",val_loss_mean_list)
try:
tn_loss =train_loss_mean_list_np.tolist()
tt_loss= test_loss_mean_list_np.tolist()
v_loss= val_loss_mean_list_np.tolist()
visualizeTraining(0, tn_loss, tt_loss, v_loss, save_dir = config.base_dir + "/results")
except:
print('Out of here..')
# + id="M3I63RgNmk73"
train_loss= []
train_losses1=[1,2,3,4]
train_losses2=[3,4,9,8]
train_loss_list=[]
history= dict(train=[])
train_loss=np.mean(train_losses1)
print(train_loss)
history['train'].append(train_loss)
print(history)
train_loss_list.append(history['train'])
print(train_loss_list)
train_loss_list = np.mean(train_loss_list,axis=0)
print(train_loss_list)
# + id="jZBuytiBCyYc"
train_loss= []
train_losses1=[3,4,9,8]
#train_loss_list=[]
#history= dict(train=[])
train_loss=np.mean(train_losses1)
print(train_loss)
history['train'].append(train_loss)
print(history)
try:
train_loss_list.append(history['train'])
except:
np.append(train_loss_list,history['train'])
print(f'train_loss_list exception: {train_loss_list}')
print(train_loss_list)
train_loss_list = np.mean(train_loss_list,axis=0)
print(train_loss_list)
# + id="uIk79J7SUoYS"
| CNNLSTMAutoEncoder_with48bins.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Visualization of Permutation Groups
import numpy as np
import sympy as sp
import sympy.combinatorics as cmb
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
import matplotlib_inline
import networkx as nx
from IPython.display import display,Math
matplotlib_inline.backend_inline.set_matplotlib_formats('svg')
# +
def get_color(idx):
color_names = list(cmap.colors.TABLEAU_COLORS.keys())
return color_names[idx % len(color_names)]
def draw_perm(P, ax, color='k', font_color='w'):
g = nx.MultiDiGraph()
g.add_nodes_from(range(P.size))
for i in range(P.size):
g.add_edge(i, P(i))
nx.draw_circular(g, ax=ax, \
with_labels=True, \
node_color=color, \
font_color=font_color, \
font_size=8, \
node_size=150, \
node_shape='o', \
connectionstyle='arc3,rad=0.0')
def draw_perm_group(PG):
fig = plt.figure(figsize=(10,10),dpi=100)
fig.set_tight_layout(True)
axes = fig.subplots(5,5).flatten()
for i in range(len(axes)):
if i < len(PG):
draw_perm(PG[i], axes[i], get_color(i), 'w')
else:
axes[i].set_visible(False)
plt.draw()
# -
# ---
# ### $S_4$ - Symmetric group order 4
S4 = list(cmb.generators.symmetric(4))
draw_perm_group(S4)
# ---
# ### $A_4$ - Alternating group order 4
A4 = list(cmb.generators.alternating(4))
draw_perm_group(A4)
# ---
# ### $D_6$ - Dihedral group order 6
D6 = list(cmb.generators.dihedral(6))
draw_perm_group(D6)
# ---
# ### $C_8$ - Cyclic group order 8
C8 = list(cmb.generators.cyclic(8))
draw_perm_group(C8)
| permutation-groups.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# ## [Grading Students](https://www.hackerrank.com/challenges/grading/problem)
# +
import sys
def round_to_next5(n):
return n + (5 - n) % 5
def solve(grades):
results = []
for grade in grades:
if grade < 38:
results.append(grade)
elif abs(grade - round_to_next5(grade)) < 3:
results.append(round_to_next5(grade))
else:
results.append(grade)
return results
n = int(input().strip())
grades = []
grades_i = 0
for grades_i in range(n):
grades_t = int(input().strip())
grades.append(grades_t)
result = solve(grades)
print("Results:")
print ("\n".join(map(str, result)))
# -
# ---
# ## [Apple and Orange](https://www.hackerrank.com/challenges/apple-and-orange/problem)
# +
import sys
s,t = input().strip().split(' ')
s,t = [int(s),int(t)]
a,b = input().strip().split(' ')
a,b = [int(a),int(b)]
m,n = input().strip().split(' ')
m,n = [int(m),int(n)]
apple = [int(apple_temp) for apple_temp in input().strip().split(' ')]
orange = [int(orange_temp) for orange_temp in input().strip().split(' ')]
apple_count, orange_count = 0, 0
for item in apple:
if s <= a + item <= t:
apple_count += 1
for item in orange:
if s <= b + item <= t:
orange_count += 1
print(apple_count)
print(orange_count)
# -
# ---
# ## [Kangaroo](https://www.hackerrank.com/challenges/kangaroo/problem)
# +
import sys
def kangaroo(x1, v1, x2, v2):
if x1 == x2 and v1 == v2:
return "YES"
elif x1 == x2 and v1 > v2:
return "NO"
elif x1 <= x2 and v1 <= v2:
return "NO"
else:
if (x2 - x1) % (v1 - v2) == 0:
return "YES"
else:
return "NO"
x1, v1, x2, v2 = input().strip().split(' ')
x1, v1, x2, v2 = [int(x1), int(v1), int(x2), int(v2)]
result = kangaroo(x1, v1, x2, v2)
print(result)
# -
# ---
# ## [Between Two Sets](https://www.hackerrank.com/challenges/between-two-sets/problem)
# +
import sys
from functools import reduce
from math import gcd
def getTotalX(a, b):
lcm_a = reduce(lambda x,y: x*y//gcd(x,y), a)
gcd_b = reduce(gcd, b)
return sum(1 for x in range(lcm_a,gcd_b+1,lcm_a) if gcd_b%x==0)
if __name__ == "__main__":
n, m = input().strip().split(' ')
n, m = [int(n), int(m)]
a = list(map(int, input().strip().split(' ')))
b = list(map(int, input().strip().split(' ')))
total = getTotalX(a, b)
print(total)
# -
# ---
# ## [Breaking the Records](https://www.hackerrank.com/challenges/breaking-best-and-worst-records/problem)
# +
import sys
def getRecord(s):
ls = hs = s[0];
lc = hc = 0;
for score in s:
if(score > hs):
hs = score;
hc += 1;
if(score < ls):
ls = score;
lc += 1;
return hc, lc
n = int(input().strip())
s = list(map(int, input().strip().split(' ')))
result = getRecord(s)
print (" ".join(map(str, result)))
# -
# ---
# ## [Birthday Chocolate](https://www.hackerrank.com/challenges/the-birthday-bar/problem)
# +
import sys
def solve(n, s, d, m):
return sum(1 for i in range(n-m+1) if sum(s[i:i+m])==d)
n = int(input().strip())
s = list(map(int, input().strip().split(' ')))
d, m = input().strip().split(' ')
d, m = [int(d), int(m)]
result = solve(n, s, d, m)
print(result)
# -
# ---
# ## [Divisible Sum Pairs](https://www.hackerrank.com/challenges/divisible-sum-pairs/problem)
# +
import sys
def divisibleSumPairs(n, k, ar):
count = 0
for i in range(len(ar)-1):
for x in range(1+i, len(ar)):
if (ar[i] + ar[x]) % k == 0:
count += 1
return count
n, k = input().strip().split(' ')
n, k = [int(n), int(k)]
ar = list(map(int, input().strip().split(' ')))
result = divisibleSumPairs(n, k, ar)
print(result)
# -
# ---
# ## [Migratory Birds](https://www.hackerrank.com/challenges/migratory-birds/problem)
# +
import sys
def migratoryBirds(n, ar):
return sorted(reversed(list(set(ar))), key=ar.count)[-1]
n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
result = migratoryBirds(n, ar)
print(result)
# -
# ---
# ## [Day of the Programmer](https://www.hackerrank.com/challenges/day-of-the-programmer/problem)
# +
import sys
def solve(year):
if (year == 1918):
return "26.09.1918"
elif (1700 <= year <= 1917):
y = str(year)
if (year%4 == 0):
return "12.09."+y
else:
return "13.09."+y
elif (1919 <= year <= 2700):
y = str(year)
if (year%4 == 0 and (year%100)!=0) or (year%400==0):
return "12.09."+y
else:
return "13.09."+y
year = int(input().strip())
result = solve(year)
print(result)
# -
# ---
# ## [Bon Appétit](https://www.hackerrank.com/challenges/bon-appetit/problem)
# +
import sys
def bonAppetit(n, k, b, ar):
anna_bill = sum(ar[i] for i in range(n) if i != k)//2
return 'Bon Appetit' if anna_bill == b else str(b-anna_bill)
n, k = input().strip().split(' ')
n, k = [int(n), int(k)]
ar = list(map(int, input().strip().split(' ')))
b = int(input().strip())
result = bonAppetit(n, k, b, ar)
print(result)
# -
# ---
# ## [Sock Merchant](https://www.hackerrank.com/challenges/sock-merchant/problem)
# +
import sys
from collections import Counter
def sockMerchant(n, ar):
socks, pairs = Counter(map(int,ar)), 0
for s in socks: pairs += socks[s]//2
return pairs
n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
result = sockMerchant(n, ar)
print(result)
# -
# ---
# ## [Drawing Book](https://www.hackerrank.com/challenges/drawing-book/problem)
# +
import sys
def solve(n, p):
return min(p//2, n//2 - p//2)
n = int(input().strip())
p = int(input().strip())
result = solve(n, p)
print(result)
# -
# ---
# ## [Counting Valleys](https://www.hackerrank.com/challenges/counting-valleys/problem)
n = int(input())
steps = input()
level = 0
valley = 0
for ch in steps:
if ch == 'U':
level += 1
if level == 0:
valley += 1
else:
level -= 1
print(valley)
# ---
# ## [Electronics Shop](https://www.hackerrank.com/challenges/electronics-shop/problem)
# +
import sys
def getMoneySpent(keyboards, drives, s):
ans=-1
for x in keyboards:
for y in drives:
if x+y<=s:
ans=max(ans,x+y)
return ans
s,n,m = input().strip().split(' ')
s,n,m = [int(s),int(n),int(m)]
keyboards = list(map(int, input().strip().split(' ')))
drives = list(map(int, input().strip().split(' ')))
# The maximum amount of money she can spend on a keyboard and USB drive, or -1 if she can't purchase both items
moneySpent = getMoneySpent(keyboards, drives, s)
print(moneySpent)
# -
# ---
# ## [Cats and a Mouse](https://www.hackerrank.com/challenges/cats-and-a-mouse/problem)
# +
import sys
q = int(input().strip())
for a0 in range(q):
x,y,z = input().strip().split(' ')
x,y,z = [int(x),int(y),int(z)]
print(['Cat A','Cat B', 'Mouse C'][0 if abs(x-z) < abs(y-z) else 1 if abs(x-z) > abs(y-z) else 2])
# -
# ---
# ## [Ema's Supercomputer](https://www.hackerrank.com/challenges/two-pluses/problem)
# +
from itertools import repeat, chain, combinations
N, M = [int(x) for x in input().strip().split(' ')]
grid = [[c == 'G' for c in input().strip()] for _ in range(N)]
def all_plusses(i0, j0):
# edge coordinates of all plusses radiating from this location
# including the 1-square "plus"
return chain([[(i0, j0)]],
zip(zip(range(i0+1, N), repeat(j0)), \
zip(reversed(range(0, i0)), repeat(j0)), \
zip(repeat(i0), range(j0+1, M)), \
zip(repeat(i0), reversed(range(0, j0)))))
def valid_plusses(i0, j0):
# yields sets of coordinates each describing a valid
# plus originating at (i0, j0)
coords = set()
for edge in all_plusses(i0, j0):
if all(grid[i][j] for i, j in edge):
coords.update(edge)
yield frozenset(coords)
else:
return
poss_plusses = chain.from_iterable(valid_plusses(i, j) for i in range(N) for j in range(M))
max_size = -1
for p1, p2 in combinations(poss_plusses, 2):
if not (p1 & p2):
max_size = max(max_size, len(p1) * len(p2))
print(max_size)
# -
# ---
# ## [Larry's Array](https://www.hackerrank.com/challenges/larrys-array/problem)
t = int(input())
for i in range(t):
a = 0
n = int(input())
li = [int(x) for x in input().split()]
for j in range(n):
for k in range(j+1,n):
if li[j]>li[k]:
a += 1
if a%2 == 0:
print("YES")
else:
print("NO")
# ---
# ## [Almost Sorted](https://www.hackerrank.com/challenges/almost-sorted/problem)
# +
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
arr_sorted = sorted(arr)
unsorted_indices = []
for ind in range(n):
if arr[ind] != arr_sorted[ind]:
unsorted_indices.append(ind)
if len(unsorted_indices) == 2:
print("yes")
print("swap", unsorted_indices[0] + 1, unsorted_indices[1] + 1)
else:
arr_subset = arr[unsorted_indices[0]:unsorted_indices[-1] + 1]
arr_sub_set_reverse_sorted = sorted(arr_subset, reverse=True)
if arr_subset == arr_sub_set_reverse_sorted:
print("yes")
print("reverse", unsorted_indices[0] + 1, unsorted_indices[-1] + 1)
else:
print("no")
# -
# ---
# ## [Matrix Layer Rotation](https://www.hackerrank.com/challenges/matrix-rotation-algo/problem)
# +
m, n, r = [int(i) for i in input().strip().split(' ')]
mat = [[int(i) for i in input().strip().split(' ')] for _ in range(m)]
def printMat(mat):
for row in mat:
for elem in row:
print(elem,)
print
def rotateMatrix(matr, r):
offset = 0
while n - offset > n / 2 and m - offset > m / 2:
top = [(offset, i) for i in range(offset, n - offset, 1)]
right = [(i, n - 1 - offset) for i in range(offset + 1, m - 1 - offset, 1)]
bot = [(m - 1 - offset, i) for i in range(n - 1 - offset, offset - 1, -1)]
left = [(i, offset) for i in range(m - offset - 2, offset, -1)]
idx = top + right + bot + left
circle = [matr[p[0]][p[1]] for p in idx]
rMod = r % len(circle)
circle = circle[rMod:] + circle[0:rMod]
for q in range(len(idx)):
index = idx[q]
matr[index[0]][index[1]] = circle[q]
offset += 1
return matr
printMat(rotateMatrix(mat, r))
| Algorithms/Implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Qiskit Aqua Tutorials
#
# ***
#
# Welcome Qiskitters to Qiskit Aqua!
#
# ***
#
# ## Contents
# Qiskit Aqua has the following tutorials and samples for the cross-domain library and domain-specific application and stacks built upon it. Aqua currently provides Machine Learning, Chemistry, Finance and Optimization domain applications.
#
# ### 1. [Qiskit Aqua](./)<a id='aqua'></a>
#
# This folder contains some Jupyter Notebook examples showing how to run algorithms in Aqua
# along with some Python code files too.
#
# The following notebooks are noted:
#
# * [Using Aqua algorithms, a how to guide](algorithm_introduction_with_vqe.ipynb)
# * [Using Aqua's quantum evolution functionality](evolution.ipynb)
# * [The EOH (Evolution of Hamiltonian) Algorithm](eoh.ipynb)
# * [Variaitonal Eigensolver + Quantum Phase Estimation](vqe2iqpe.ipynb)
#
# The repository here may be viewed for the
# [full listing](./).
#
# ### 2. [Qiskit Chemistry](../chemistry/)<a id='chemistry'></a>
#
# This folder contains some Jupyter Notebook examples showing how to run algorithms in Qiskit Chemistry along with some Python code files too. There are also some .hdf5 files containing saved molecular data that can be used in experiments, see the main Qiskit Chemistry documentation for more information on the HDF5 driver and .hdf5 files.
#
# The following notebooks are noted:
#
# * [LiH plot using ExactEigensolver](../chemistry/energyplot.ipynb) One step up from getting started
# * [H2 dissociation curve using VQE with UCCSD](../chemistry/h2_uccsd.ipynb)
# * [LiH dissociation curve using VQE with UCCSD](../chemistry/lih_uccsd.ipynb)
# * [NaH dissociation curve using VQE with UCCSD](../chemistry/nah_uccsd.ipynb)
# * [Qiskit Chemistry, H2O ground state computation](../chemistry/h2o.ipynb) Water using VQE and UCCSD
# * [H2 ground state energy computation using Iterative QPE](../chemistry/h2_iqpe.ipynb)
# * [H2 ground state energy with VQE and SPSA](../chemistry/h2_vqe_spsa.ipynb) Near-term device experiment
#
# There are many more notebooks. The repository here may be viewed for the
# [full listing](../chemistry).
#
# ### 3. [Qiskit Machine Learning](../machine_learning/)<a id='machine_learning'></a>
#
# Qiskit Machine Learning is a set of tools, algorithms and software for use with quantum computers to carry out research and investigate how to take advantage of quantum computing power to solve machine learning problems.
#
# * [Quantum SVM algorithm: multiclass classifier extension](../machine_learning/qsvm_multiclass.ipynb)
# * [Variational Quantum Classifier (vqc)](../machine_learning/vqc.ipynb)
#
# The repository here may be viewed for the
# [full listing](../machine_learning).
#
# ### 4. [Qiskit Optimization](../optimization/)<a id='optimization'></a>
#
# Qiskit Optimization is a set of tools, algorithms and software for use with quantum computers to carry out research and investigate how to take advantage of quantum computing power to solve optimization problems.
#
# * [Using Grover Search for 3SAT problems](../optimization/grover.ipynb)
# * [Using Aqua for partition problems](../optimization/partition.ipynb)
# * [Using Aqua for stable-set problems](../optimization/stable_set.ipynb)
#
# The repository here may be viewed for the
# [full listing](../optimization).
#
# ### 5. [Qiskit Finance](../finance/)<a id='finance'></a>
#
# Qiskit Finance is a set of tools, algorithms and software for use with quantum computers to carry out research and investigate how to take advantage of quantum computing power to solve problems in the financial domain.
# Please also see the [Qiskit Finance Tutorials](https://github.com/Qiskit/qiskit-iqx-tutorials/tree/master/qiskit/advanced/aqua/finance/index.ipynb) for more examples.
#
# Quantum computing for option pricing:
# * <a href="../finance/simulation/long_butterfly.ipynb">Long Butterfly</a> (univariate, payoff with 4 segments)
# * <a href="../finance/simulation/short_butterfly.ipynb">Short Butterfly</a> (univariate, payoff with 4 segments)
# * <a href="../finance/simulation/iron_condor.ipynb">Iron Condor</a> (univariate, payoff with 5 segments)
#
# The repository here may be viewed for the
# [full listing](../finance).
#
# ***
| aqua/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import scipy as sp
import numpy as np
signal = np.array([255, 253, 247, 236, 221, 203, 183, 162, 141, 122, 106, 94, 86, 82, 84, 91, 103, 118, 136, 156, 176, 196, 214, 229, 240, 247, 248, 244, 235, 222, 205, 186, 164, 143, 122, 103, 88, 77, 70, 68, 71, 80, 92, 108, 127, 147, 167, 185, 202, 215, 224, 228, 226, 220, 209, 194, 176, 155, 133, 111, 91, 73, 59, 49, 44, 44, 49, 59, 73, 90, 109, 129, 148, 166, 181, 193, 199, 201, 198, 190, 177, 161, 141, 120, 98, 77, 57, 41, 28, 21, 18, 20, 27, 39, 55, 73, 93, 113, 132, 149, 163, 173, 178, 178, 174, 164, 150, 132, 112, 91, 70, 50, 32, 17, 7, 2, 2, 6, 16, 30, 47, 67, 87, 108, 127, 144, 157, 165, 169, 168, 162, 151, 136, 118, 98, 77, 57, 38, 22, 10, 2, 0, 2, 9, 22, 37, 56, 77, 99, 119, 138, 154, 166, 174, 176, 173, 166, 154, 138, 120, 100, 80, 60, 43, 29, 20, 14, 14, 19, 29, 43, 61, 82, 103, 125, 146, 164, 179, 189, 195, 196, 192, 183, 170, 153, 135, 115, 95, 77, 61, 49, 41, 38, 40, 48, 60, 76, 95, 116, 138, 159, 179, 196, 210, 219, 223, 221, 215, 205, 190, 172, 153, 133, 114, 96, 82, 71, 65, 64, 68, 77, 91, 108, 128, 149, 171, 191, 210, 225, 237, 243, 245, 241, 233, 220, 204, 185, 165, 145, 125, 108, 95, 86, 81, 82, 87, 98, 112, 130, 150, 171, 192, 211, 228, 242, 250, 255, 253, 247, 236, 221, 203, 183, 162, 141, 122, 106, 94, 86, 82, 84, 91, 103, 118, 136, 156, 176, 196, 214, 229, 240, 247, 248, 244, 235, 222, 205, 186, 164, 143, 122, 103, 88, 77, 70, 68, 71, 80, 92, 108, 127, 147, 167, 185, 202, 215, 224, 228, 226, 220, 209, 194, 176, 155, 133, 111, 91, 73, 59, 49, 44, 44, 49, 59, 73, 90, 109, 129, 148, 166, 181, 193, 199, 201, 198, 190, 177, 161, 141, 120, 98, 77, 57, 41, 28, 21, 18, 20, 27, 39, 55, 73, 93, 113, 132, 149, 163, 173, 178, 178, 174, 164, 150, 132, 112, 91, 70, 50, 32, 17, 7, 2, 2, 6, 16, 30, 47, 67, 87, 108, 127, 144, 157, 165, 169, 168, 162, 151, 136, 118, 98, 77, 57, 38, 22, 10, 2, 0, 2, 9, 22, 37, 56, 77, 99, 119, 138, 154, 166, 174, 176, 173, 166, 154, 138, 120, 100, 80, 60, 43, 29, 20, 14, 14, 19, 29, 43, 61, 82, 103, 125, 146, 164, 179, 189, 195, 196, 192, 183, 170, 153, 135, 115, 95, 77, 61, 49, 41, 38, 40, 48, 60, 76, 95, 116, 138, 159, 179, 196, 210, 219, 223, 221, 215, 205, 190, 172, 153, 133, 114, 96, 82, 71, 65, 64, 68, 77, 91, 108, 128, 149, 171, 191, 210, 225, 237, 243, 245, 241, 233, 220, 204, 185, 165, 145, 125, 108, 95, 86, 81, 82, 87, 98, 112, 130, 150, 171, 192, 211, 228, 242, 250])
# +
plt.subplots(1, figsize=(20, 10))
SAMPLE_RATE = 44100 # Гц
DURATION = 5 # Секунды
def generate_sine_wave(freq, sample_rate, duration):
x = np.linspace(0, duration, sample_rate*duration, endpoint=False)
frequencies = x * freq
# 2pi для преобразования в радианы
y = np.sin((2 * np.pi) * frequencies)
return x, y
# Генерируем волну с частотой 2 Гц, которая длится 5 секунд
_, nice_tone = generate_sine_wave(400, SAMPLE_RATE, DURATION)
_, noise_tone = generate_sine_wave(4000, SAMPLE_RATE, DURATION)
noise_tone = noise_tone * 0.3
mixed_tone = nice_tone + noise_tone
normalized_tone = np.int16((mixed_tone / mixed_tone.max()) * 32767)
plt.plot(normalized_tone[:2000])
# +
from scipy.io.wavfile import write
write("mysinewave.wav", SAMPLE_RATE, mixed_tone)
# +
N = SAMPLE_RATE * DURATION
yf = sp.fft.rfft(normalized_tone)
xf = sp.fft.rfftfreq(N, 1 / SAMPLE_RATE)
plt.plot(xf, np.abs(yf))
# +
points_per_freq = len(xf) / (SAMPLE_RATE / 2)
noise_idx = int(points_per_freq * 4000)
# +
yf[noise_idx] = 0
plt.plot(xf, np.abs(yf))
# +
new_sig = sp.fft.irfft(yf)
plt.plot(new_sig[:1000])
write("sin_clean.wav", SAMPLE_RATE, new_sig)
# +
N = signal.size
signal = np.uint8(signal)
yf = sp.fft.rfft(signal)
xf = sp.fft.rfftfreq(N, 1 / 256)
workYF = np.abs(yf)
result = np.where(np.isin(workYF, sorted(workYF)[-3:]))
result = xf[result]
result = result[(result != 0) & (result != 1)]
int(result)
| Module 1/Task 1/1 Task.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # KwikSort Re-Ranking with Estimating the ORACLE Axiom
#
# The notebook below exemplifies how `ir_axioms` can be used to re-rank a result set in PyTerrier using the KwikSort algorithm and an estimation of the ORACLE axiom.
# We use run files and qrels from the passage retrieval task of the TREC Deep Learning track in 2019 and 2020 as example (using BM25 as a baseline).
# In this notebook, we first train the ORACLE axiom estimation using preferences inferred from 2019 qrels and topics.
# Then, we re-rank using that trained `EstimatorAxiom` and evaluate nDCG@10, reciprocal rank, and average precision for the baseline and the re-ranked pipeline using PyTerrier's standard `Experiment` functionality.
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Preparation
#
# Install the `ir_axioms` framework and [PyTerrier](https://github.com/terrier-org/pyterrier). In Google Colab, we do this automatically.
# + pycharm={"name": "#%%\n"}
from sys import modules
if 'google.colab' in modules:
# !pip install -q ir_axioms[examples] python-terrier
# + [markdown] pycharm={"name": "#%% md\n"}
# We initialize PyTerrier and import all required libraries and load the data from [ir_datasets](https://ir-datasets.com/).
# + pycharm={"name": "#%%\n"}
from pyterrier import started, init
if not started():
init(tqdm="auto", no_download=True)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Datasets and Index
# Using PyTerrier's `get_dataset()`, we load the MS MARCO passage ranking dataset.
# + pycharm={"name": "#%%\n"}
from pyterrier.datasets import get_dataset, Dataset
# Load dataset.
dataset_name = "msmarco-passage"
dataset: Dataset = get_dataset(f"irds:{dataset_name}")
dataset_train: Dataset = get_dataset(f"irds:{dataset_name}/trec-dl-2019/judged")
dataset_test: Dataset = get_dataset(f"irds:{dataset_name}/trec-dl-2020/judged")
# + [markdown] pycharm={"name": "#%% md\n"}
# Now define paths where we will store temporary files, datasets, and the search index.
# + pycharm={"name": "#%%\n"}
from pathlib import Path
cache_dir = Path("cache/")
index_dir = cache_dir / "indices" / dataset_name.split("/")[0]
# + [markdown] pycharm={"name": "#%% md\n"}
# If the index is not ready yet, now is a good time to create it and index the MS MARCO passages.
# (Lean back and relax as this may take a while...)
# + pycharm={"name": "#%%\n"}
from pyterrier.index import IterDictIndexer
if not index_dir.exists():
indexer = IterDictIndexer(str(index_dir.absolute()))
indexer.index(
dataset.get_corpus_iter(),
fields=["text"]
)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Baseline Run
#
# We use PyTerrier's `BatchRetrieve` to create a baseline search pipeline for retrieving with BM25 from the index we just created.
# + pycharm={"name": "#%%\n"}
from pyterrier.batchretrieve import BatchRetrieve
bm25 = BatchRetrieve(str(index_dir.absolute()), wmodel="BM25")
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Import Axioms
# Here we're listing which axioms we want to use in our experiments.
# Because some axioms require API calls or are computationally expensive, we cache all axioms using `ir_axiom`'s tilde operator (`~`).
# + pycharm={"name": "#%%\n"}
from ir_axioms.axiom import (
ArgUC, QTArg, QTPArg, aSL, PROX1, PROX2, PROX3, PROX4, PROX5, TFC1, TFC3, RS_TF, RS_TF_IDF, RS_BM25, RS_PL2, RS_QL,
AND, LEN_AND, M_AND, LEN_M_AND, DIV, LEN_DIV, M_TDC, LEN_M_TDC, STMC1, STMC1_f, STMC2, STMC2_f, LNC1, TF_LNC, LB1,
REG, ANTI_REG, REG_f, ANTI_REG_f, ASPECT_REG, ASPECT_REG_f, ORIG
)
axioms = [
~ArgUC(), ~QTArg(), ~QTPArg(), ~aSL(),
~LNC1(), ~TF_LNC(), ~LB1(),
~PROX1(), ~PROX2(), ~PROX3(), ~PROX4(), ~PROX5(),
~REG(), ~REG_f(), ~ANTI_REG(), ~ANTI_REG_f(), ~ASPECT_REG(), ~ASPECT_REG_f(),
~AND(), ~LEN_AND(), ~M_AND(), ~LEN_M_AND(), ~DIV(), ~LEN_DIV(),
~RS_TF(), ~RS_TF_IDF(), ~RS_BM25(), ~RS_PL2(), ~RS_QL(),
~TFC1(), ~TFC3(), ~M_TDC(), ~LEN_M_TDC(),
~STMC1(), ~STMC1_f(), ~STMC2(), ~STMC2_f(),
ORIG()
]
# + [markdown] pycharm={"name": "#%% md\n"}
# ## KwikSort Re-ranking with Estimating the ORACLE Axiom
# We have now defined the axioms with which we want to estimate the ORACLE axiom.
# To remind, the ORACLE axiom replicates the perfect ordering induced by human relevance judgments (i.e. from qrels).
# We combine the preferences from all axioms in a random forest classifier.
# The resulting output preferences can be used with KwikSort to re-rank the top-20 baseline results.
# + pycharm={"name": "#%%\n"}
from sklearn.ensemble import RandomForestClassifier
from ir_axioms.modules.pivot import MiddlePivotSelection
from ir_axioms.backend.pyterrier.estimator import EstimatorKwikSortReranker
random_forest = RandomForestClassifier(
max_depth=3,
)
kwiksort_random_forest = bm25 % 20 >> EstimatorKwikSortReranker(
axioms=axioms,
estimator=random_forest,
index=index_dir,
dataset=dataset_name,
pivot_selection=MiddlePivotSelection(),
cache_dir=cache_dir,
verbose=True,
)
# + [markdown] pycharm={"name": "#%% md\n"}
# After setting up the trainable PyTerrier module, we pass in training topics and relevance judgments for training.
# + pycharm={"name": "#%%\n"}
kwiksort_random_forest.fit(dataset_train.get_topics(), dataset_train.get_qrels())
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Experimental Evaluation
# Because our axiomatic re-rankers are PyTerrier modules, we can now use PyTerrier's `Experiment` interface to evaluate various metrics and to compare our new approach to the BM25 baseline ranking.
# Refer to the PyTerrier [documentation](https://pyterrier.readthedocs.io/en/latest/experiments.html) to learn more about running experiments.
# (We concatenate results from the Baseline ranking for the ranking positions after the top-20 using the `^` operator.)
# + pycharm={"name": "#%%\n"}
from pyterrier.pipelines import Experiment
from ir_measures import nDCG, MAP, RR
experiment = Experiment(
[bm25, kwiksort_random_forest ^ bm25],
dataset_test.get_topics(),
dataset_test.get_qrels(),
[nDCG @ 10, RR, MAP],
["BM25", "KwikSort Random Forest"],
verbose=True,
)
experiment.sort_values(by="nDCG@10", ascending=False, inplace=True)
# + pycharm={"name": "#%%\n"}
experiment
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Extra: Feature Importances
# Inspecting the feature importances from the random forest classifier can help to identify axioms that are not used for re-ranking.
# If an axiom's feature importance is zero for most of your applications, you may consider omitting it from the ranking pipeline.
# + pycharm={"name": "#%%\n"}
random_forest.feature_importances_
| examples/pyterrier_kwiksort_learned.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Clash of Clans: How many builders do you *really* need?
#
# ### (or, how should I spend those green gems?) <img src="builder.png" style="float:center;width:100px" />
# Hello everyone! I am a mid-level town hall 8 avid clasher with 4 builders. Recently I discovered (like so many other [people](https://www.reddit.com/r/ClashOfClans/comments/2psnf3/strategy_lab_time_longer_than_builder_time_what/))that at my level research, not build time, is the limiting factor for progress. This made me wonder, is it really worth it to save up for the fifth builder? Or should I just spend gems on barracks/collector boosts, finishing research/hero upgrades in a timely fashion, etc. To solve this conundrum I decided to do a bit of simple data analysis using the upgrade time data available on the [Clash of Clans wiki](http://clashofclans.wikia.com/wiki/Clash_of_Clans_Wiki).
#
# This next section contains a bit of Python used to prepare the dataset for visualization and analysis. If you aren't interested, just skip down to the [results section](#Results)
# %matplotlib inline
import numpy as np
import pandas as pd
building_df = pd.read_csv("building_upgrade_data.csv")
building_df = building_df[building_df["town_hall"] != 11]
research_df = pd.read_csv("research_data.csv")
research_df = research_df[research_df["town_hall"] != 11]
# CONSTANTS
HOURS_PER_DAY = 24.0
MIN_PER_DAY = HOURS_PER_DAY * 60
SEC_PER_DAY = MIN_PER_DAY * 60
UNIT_MAP = {"seconds": SEC_PER_DAY, "minutes": MIN_PER_DAY,
"hours": HOURS_PER_DAY, "days": 1.0}
# +
# These functions parse the possible time strings
from functools import reduce
def parse_time(t):
return int(t[0]) / UNIT_MAP[t[1]]
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def parse_time_string(s):
return reduce(lambda x, y: x + y, map(parse_time, chunks(s.split(' '), 2)))
# -
building_df["build_days"] = building_df["build_time"].map(parse_time_string)
research_df["research_days"] = research_df["research_time"].map(parse_time_string)
def get_build_time(df):
"""This calculates total build time per town hall level"""
build_time = {}
grouped = df.groupby(["type"])
for name, group in grouped:
regrouped = group.groupby("town_hall")
prev_quant = group.iloc[0]["quantity"]
for rname, rgroup in regrouped:
quant = rgroup["quantity"].iloc[0]
build_days = quant * rgroup["build_days"].sum()
build_time.setdefault(rname, 0)
build_time[rname] += build_days
# This adds time to each town hall level based on new structure acquisition
if quant > prev_quant:
diff = quant - prev_quant
catch_up_days = diff * group[group["town_hall"] < rname]["build_days"].sum()
build_time[rname] += catch_up_days
prev_quant = quant
return pd.Series(build_time)
build_times = get_build_time(building_df)
# Get research times by town hall, don't forget to add lab upgrade time
lab_build_days = building_df.groupby("type").get_group("laboratory")[["town_hall","build_days"]]
research_times = research_df.groupby("town_hall")["research_days"].sum()
lab_build_days["total_time"] = lab_build_days["build_days"] + research_times.values
research_times = lab_build_days.set_index("town_hall")["total_time"]
times = pd.concat([research_times, build_times], axis=1)
times.columns = ["research_time", "build_time"]
times["percent_research_time"] = times["research_time"].map(
lambda x: x / times["research_time"].sum())
times["percent_build_time"] = times["build_time"].map(
lambda x: x / times["build_time"].sum())
times = times.fillna(0)
times
# # Results
| greengems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/PadmarajBhat/Real-Time-Analytics-on-Hadoop-Notes/blob/master/pyspark_tensorflow.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Gz4CQmBXBHml" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="a5ed8cc0-ba6c-4eab-c73f-2bead2d9dd3b"
# !pip install pyspark
# + id="n1Z2TmZFBc3C" colab_type="code" colab={}
import tensorflow as tf
import pandas as pd
import numpy as np
from tensorflow.keras.datasets import mnist
# + id="l7a1lSl7BrQn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="19ebdf8c-17b6-400b-9dd2-7b099daa36fc"
(train_x,train_y), (test_x, test_y) = mnist.load_data()
# + id="Ile5SMUSCA-B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="15f92b9b-da32-44c6-98ca-b1bb0495b3cd"
train_x.shape,train_y.shape, test_x.shape, test_y.shape
# + id="olGC6S-bCQl4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2293c214-57ce-4570-bc0b-2f2bba9f3460"
pd_train= pd.DataFrame(train_x.reshape(60000,784))
pd_test = pd.DataFrame(test_x.reshape(10000,784))
pd_train.shape,pd_test.shape
# + id="hn-m1DHPHNAm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="1fb32953-f9ff-47be-c98f-1242b18b4577"
pd_train.values *.1
# + [markdown] id="TJ5eyAGLKyPM" colab_type="text"
# Note that we had to multiply .1 to convert the data to float else not accepted in the tensorflow.
# + id="xb08ox-CCI1I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 339} outputId="5c501593-d244-49a4-8dd0-18e66b745a71"
model = tf.keras.Sequential([
#tf.keras.layers.Flatten(input_shape=(28,28))
tf.keras.layers.Flatten()
,tf.keras.layers.Dense(256,activation="relu")
,tf.keras.layers.Dense(128,activation="relu")
,tf.keras.layers.Dense(10,activation="softmax")
])
model.compile(optimizer="Adam"
, loss="sparse_categorical_crossentropy"
, metrics=['accuracy']
)
#model.fit(train,train_y, epochs=6)
model.fit(pd_train.values *.1,train_y,epochs=6)
model.evaluate(test_x,test_y)
# + [markdown] id="FPuyi6f0LKnO" colab_type="text"
# Tensorflow is being fed with panda which indicates that entire data is loaded. This is not a good fit for Big Data case. We need the tensorflow to work on the RDD or Df rather than panda df. Hence spark read --> convert to panda --> tensorflow pipeline fails at the second stage. For this we have https://github.com/maxpumperla/elephas
#
#
# Elephas: Take spark dataframe and trains it for the keras model. But the example does not talk about tensorflow keras but keras independent version. Not sure if tensorflow keras supports it.
#
# Why we have to worry about tf.keras or keras as independent one ?
# - tensorflow official supports keras and hence product would have enhancements
# - tensorflow has distributed stratergy to support distributed training. elephas has it too but may be tensorflow is better tested one becuase of larger community of dev and tester.
# - tensorflow has official distribution on hadoop and hence better than smaller community like elephas
#
#
# There was mis understanding i guess, they dont provide keras but instead they transform the keras mode. Now it has to be tested so that if the tensorflow keras model is compatible with the elephas.
#
# + id="3AbTbc-XEflr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="49e2ca8a-60da-4b4d-9ad4-a41fe6495d28"
# !pip install elephas
# + id="KfPEsJT_xwpe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="27b92d4e-178c-45eb-8644-e7b782450ef6"
nb_classes = 10
from elephas.ml_model import ElephasEstimator
from keras import optimizers
adam = optimizers.Adam(lr=0.01)
opt_conf = optimizers.serialize(adam)
# Initialize SparkML Estimator and set all relevant properties
estimator = ElephasEstimator()
estimator.setFeaturesCol("scaled_features") # These two come directly from pyspark,
estimator.setLabelCol("index_category") # hence the camel case. Sorry :)
estimator.set_keras_model_config(model.to_yaml()) # Provide serialized Keras model
estimator.set_categorical_labels(True)
estimator.set_nb_classes(nb_classes)
estimator.set_num_workers(1) # We just use one worker here. Feel free to adapt it.
estimator.set_epochs(20)
estimator.set_batch_size(128)
estimator.set_verbosity(1)
estimator.set_validation_split(0.15)
estimator.set_optimizer_config(opt_conf)
estimator.set_mode("synchronous")
estimator.set_loss("categorical_crossentropy")
estimator.set_metrics(['acc'])
# + id="wqx9VRyKyN08" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="78cc7119-09a2-48fb-8e3e-3ad67a23e737"
estimator.fit(train_x,train_y)
# + id="NhPEg-5ZzQ93" colab_type="code" colab={}
from pyspark.ml import Pipeline
pipeline = Pipeline(stages=[estimator])
# + id="h06Su6n6zeDq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 303} outputId="5fb6149c-1632-485c-e17c-61bb8d3194ab"
fitted_pipeline = pipeline.fit(pd_train)
# + id="sC5ZPnxHzfK_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4d695991-ea64-4fd0-f90a-26d7a582db3b"
pd_train.shape
# + [markdown] id="NtmJEvKX0hn4" colab_type="text"
# Future works:
# - new_df = pd_train + train_y
# - convert multiple columns to a single feature column
# - create a piple of conversion and estimator
# - call pipe.fit
#
# + id="UO2Z8EvK1GAz" colab_type="code" colab={}
| pyspark_tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: twit_compare
# language: python
# name: twit_compare
# ---
import pandas as pd
lg = pd.read_csv('large.csv')
lg = lg.drop(['Unnamed: 0'], axis=1)
sm = pd.read_csv('small.csv')
sm = pd.drop([['Unnamed: 0'], axis=1])
lg.drop(['Unnamed: 0'], axis=1)
print(lg.shape)
lg.head()
print(sm.shape)
sm.head()
| notebooks/validation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
ages = np.array([20, 22,25,27,21,23,37,31,61,45,41,32]) #一些年龄数据
# 通过 pandas中的cut方法可以分割数据
# factory = pd.cut(ages,4) #arr原始数据 , 4:要分割成几段
factory = pd.cut(ages,4,labels=['Youth', 'YoungAdult', 'MiddleAged', 'Senior']) #lable,对于每个类别可以自己命名
# factory = pd.cut(arr,bins=[18,25,35,60,100],labels=['a','b','c','d']) #bins 自己指定的分割界限
# factory.dtype #CategoricalDtype,可以看到,cut后返回的是一个Categorical 类型的对象
test = np.array(factory) #获取出分类后的数据
test
# -
# 下面看下等量划分
# 通过 pandas中的qcut方法可以分割数据
factory = pd.qcut(ages,4)
# factory
factory.value_counts() #可以看到,通过等量划分,每个类别中的数据个数都一样
# ### one-hot编码
#
# 对于类别型数据,最主要的一个处理,就是进行one-hot编码,看具体例子
testdata = pd.DataFrame({'age':[4,6,3,3],'pet':['cat','dog','dog','fish']})
testdata
#第一种方法,通过pandas中的提供的get_dummies方法
pd.get_dummies(testdata,columns=['pet']) #第一个参数为原始数据,columns传入需要编码转换的特征,可以为多个,返回新的数据
testdata.pet.values.reshape(-1,1)
# +
#第二种方法,使用sklearn中的OneHotEncoder方法
from sklearn.preprocessing import OneHotEncoder
OneHotEncoder().fit_transform(testdata.age.values.reshape(-1,1)).toarray()
# -
#OneHotEncoder不能对String型的数值做处理,对String类型做处理的话需要先进行转换
# OneHotEncoder().fit_transform(testdata.pet.values.reshape(-1,1)).toarray() #会报错
from sklearn.preprocessing import LabelEncoder
petvalue = LabelEncoder().fit_transform(testdata.pet)
print(petvalue) # [0 1 1 2] 将字符串类别转换成整型类别
OneHotEncoder().fit_transform(petvalue.reshape(-1,1)).toarray() #可以看到结果和上面通过get_dummies转换出的结果相同
# ### 特征降维
#
# 在实际项目中,并不是维数越高越好,为什么要进行降维操作,主要是出于以下考虑
#
# 1. 特征维数越高,模型越容易过拟合,此时更复杂的模型就不好用。
# 2. 相互独立的特征维数越高,在模型不变的情况下,在测试集上达到相同的效果表现所需要的训练样本的数目就越大。
# 3. 特征数量增加带来的训练、测试以及存储的开销都会增大。
# 4. 在某些模型中,例如基于距离计算的模型KMeans,KNN等模型,在进行距离计算时,维度过高会影响精度和性能。
# 5. 可视化分析的需要。在低维的情况下,例如二维,三维,我们可以把数据绘制出来,可视化地看到数据。当维度增高时,就难以绘制出来了。
# 6. 在机器学习中,有一个非常经典的维度灾难的概念。
| 特征工程(一).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Task 4 - Detection of face mask
# ## Problem Statement : Implement real time face mask detector
# - Maintained By: **<NAME>**
# ### Reference :
# - Big thanks to **<NAME>** explaining how to implement computer vision model practically. 🙏🙏
# - I have learned to implement this CV model from his tutorial.
# ### Import required libraries
# load the required library
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
import random
import warnings
warnings.filterwarnings('ignore')
import imutils
from imutils import paths
# load the required library
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import time
import cv2
# ### Loading the images and Preprocessing it
img_path = os.path.sep.join([os.getcwd(),'dataset'])
# Initialize the initial learning rate, number of epochs to train for and the batch size
init_lr = 1e-4
epochs = 20
batch_size = 32
# +
# load the list of the images
print("Loading the image...")
#join base_path and training img_path
image_paths = list(paths.list_images(img_path))
data = []
labels = []
# loop over the image paths
for imagePath in image_paths:
# extract the class label from path
label = imagePath.split(os.path.sep)[-2]
# laod the input image and preprocess it
image = load_img(imagePath, target_size = (224, 224))
image = img_to_array(image)
image = preprocess_input(image)
# store in the data and labels list
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
labels = np.array(labels)
# +
# Perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
# split the data into training and testing
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.20,
stratify=labels, random_state=42)
# Construct the training image generator for data image augmentation
aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15,
width_shift_range=0.2, height_shift_range=0.2,
shear_range=0.15, horizontal_flip=True,
fill_mode="nearest")
# -
# ### Model Building
# +
# Load the MobileNetV2 network, removing the fully connected layers
baseModel = MobileNetV2(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
# Construct the head of the model that will be placed on top of the base model
top_layer = baseModel.output
top_layer = AveragePooling2D(pool_size=(7, 7))(top_layer)
top_layer = Flatten(name="flatten")(top_layer)
top_layer = Dense(128, activation="relu")(top_layer)
top_layer = Dropout(0.5)(top_layer)
top_layer = Dense(2, activation="softmax")(top_layer)
# Place the top_layer on top of the base model
model = Model(inputs=baseModel.input, outputs=top_layer)
# Loop over all layers in the base model and
# freeze them so they will *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# -
# Compile the data model
print("Compiling the data model...")
opt = Adam(lr=init_lr, decay=init_lr/ epochs)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
# summary
model.summary()
# Train the head of the network
print(" Training the last layer of the network...")
history = model.fit(aug.flow(trainX, trainY, batch_size= batch_size),
steps_per_epoch=len(trainX) // batch_size,
validation_data=(testX, testY),
validation_steps=len(testX) // batch_size,
epochs=epochs)
# +
# Make predictions on the testing set
print("Evaluating the network...")
predIdxs = model.predict(testX, batch_size=batch_size)
# For each image in the testing set, find the index of the label
# with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# Show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs, target_names=lb.classes_))
# Save the model to disk
print("Saving mask detector model into disk...")
model.save('facemask_detector.model', save_format="h5")
# -
# Plot the training loss and accuracy
plt.figure(figsize=(15,8))
N = epochs
plt.style.use("ggplot")
plt.plot(np.arange(0, N), history.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), history.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), history.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), history.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.tight_layout()
plt.savefig('model_training_history.png')
# ### Evaluating model on the image
base_path = os.getcwd()
config_path = os.path.sep.join([base_path, 'model_config_'])
model_path = os.path.sep.join([base_path, 'facemask_detector.model'])
# +
# Load the face detector model
print("Loading the face detector model...")
prototxtPath = os.path.sep.join([config_path, "deploy.prototxt"])
weightsPath = os.path.sep.join([config_path, "res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
# Load the face mask detector model
print("Loading the face mask detector model...")
model = load_model(model_path)
# +
image_path = os.path.sep.join([base_path, 'examples', 'images (1).jpg'])
# Load the input image from disk, make a copy of it and
#grab the image spatial dimensions
image = cv2.imread(image_path)
orig = image.copy()
(h, w) = image.shape[:2]
# Construct a blob from the image
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))
# Pass the blob through the network and obtain the face detections
print(" Computing the face detections...")
net.setInput(blob)
detections = net.forward()
# -
print("Image without mask:")
print(detections[0, 0, 0 ,0:3])
# 7 [img_id, is_face, prob, x, y, w, h]
# +
# Loop over the detections
for i in range(0, detections.shape[2]):
# Extract the confidence (i.e, probability) associated with the detection
confidence = detections[0, 0, i, 2]
# Filter out weak detections by ensuring the confidence is greater than the minimum confidence
if confidence > 0.5:
# Compute the (x,y) coordinates of the bounding box for the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# Ensure the bounding boxes fall within the dimensions of the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# Extract the face ROI
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) #convert it from BGR to RGB channel ordering
face = cv2.resize(face, (224, 224)) #resize it to 224x224
face = img_to_array(face)
face = preprocess_input(face) # preprocess it
face = np.expand_dims(face, axis=0) # insert a new at 0th position
# Pass the face through the model to determine if the face has a mask or not
(mask, withoutMask) = model.predict(face)[0]
# Determine the class label and color that will be used to draw the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# Include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# Display the label and bounding box rectangle on the output frame
cv2.putText(image, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
# Show the output image
cv2.imshow("Output Image", image)
cv2.waitKey(0)
# -
# ### Evaluating a model on video stream
def detect_and_predict_mask(frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
faces=np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
# load our serialized face detector model from disk
print("Loading face detector model...")
prototxtPath = os.path.sep.join([config_path, "deploy.prototxt"])
weightsPath = os.path.sep.join([config_path,"res10_300x300_ssd_iter_140000.caffemodel"])
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("Loading face mask detector model...")
maskNet = load_model(model_path)
# +
# initialize the video stream and allow the camera sensor to warm up
print("Starting video stream...")
#vs = VideoStream(src=0).start()
vs = cv2.VideoCapture(0)
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
#frame = vs.read()
ret,frame = vs.read()
# Our operations on the frame come here
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=400)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
# When everything done, release the capture
vs.release()
cv2.destroyAllWindows()
#vs.stop()
# -
| Face_Mask_Detector/face_mask_detector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
count = -1
i = 1
aver = 0
sum = 0
while i != 0:
count += 1
i = eval(input('enter a number: '))
sum += i
if count == 0:
print('输入错误')
else:
aver = sum / count
print('平均值为:%.2f'%aver)
| py-lesson/HW2/Q4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Number Theory 04 - Exponentiation
# Let $f_c(x)$ be a function which outputs the remainder of $x$ when divided by $c$. Python has a function 'pow(a,b,c)' which find $f_c(a^b)$. This tutorial demonstrates how to perform the same operation without the built-in function.
#
# For convenience of tests let us import randint.
from random import randint
# ## Way 1 - Direct Calculation
# It makes sense, but evaluating $a^b$ is not ideal when $a$ and $b$ are enormous.
def mypow1(a,b,c):
return (a**b) % c
# ## Way 2 - Loop
# Why not use a loop as followed: <br>
# Set $d_0 = 1$. For $k \geq 0$, we have $d_{k+1} = f_c(ad_k)$. Repeat until $k$ reaches $b$. Then $f_c(a^b) = d_b$
def mypow2(a,b,c):
# initialize d
d = 1
# loop b times
for r in range(b):
# multiply by a and reduce modulo c
d = (d*a) % c
return d
# ## Way 3 - Binary Exponentiation
# You may think that the algorithm above is fast enough. However, there is a much faster way to execute 'pow(a,b,c)'. This is called the binary exponentiation.
#
# First write $b$ in binary form, i.e.
# $$b = \sum_{i=1}^{n} b_i 2^i, b_i \in \{0,1\}$$
#
# For each of the binary digits of b in reverse order:
# - If the digit is 1, multiply d by a and reduce modulo c, making this the new value of d.
# - Whether or not the digit is 1, square a and reduce modulo c, making this the new value of a.
#
# The final value of d is $f_c(a^b)$.
def mypow3(a,b,c):
# Initialization
d = 1
binlist = [eval(n) for n in list(format(b,'b'))] #b in binary string
n = len(binlist) #length of binlist
# loop
for i in range(1, n+1): #Very Tricky Indeed
# Update d
if binlist[n - i] == 1: #Doing from 0 (n-n) to n-1
d = (d*a) % c
# Update a
a = (a**2) % c
return d
# +
test = [randint(2,10) for n in range(3)]
result0 = pow(test[0], test[1], test[2])
result1 = mypow1(test[0], test[1], test[2])
result2 = mypow2(test[0], test[1], test[2])
result3 = mypow3(test[0], test[1], test[2])
print(result0, result1, result2, result3)
| M1C (Python)/M1C-Number-Theory/Python Number Theory 04 - Exponentiation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import re
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
# -
def process_log(log):
"Process lines of the log and return a [id, latency] row for every valid data point"
for line in log:
id = re.findall('icmp_seq.(\d+)', line)
if id:
if 'timeout' in line:
yield [int(id[0]), 0, True]
else:
latency = re.findall('time=(\d+\.\d+)', line)
if latency:
yield [int(id[0]), float(latency[0]), False]
pings = None
with open("ping.log") as log:
pings = pd.DataFrame(list(process_log(log)), columns=['seq', 'latency', 'timeout'])
latency_p999 = pings.latency.quantile(.999)
pings_p999 = pings[pings.latency < latency_p999]
non_timeout = pings_p999[pings_p999.timeout != True]
non_timeout['log_latency'] = non_timeout.latency.apply(lambda l: math.log(l))
pd.DataFrame.hist(non_timeout, column='log_latency', bins=25)
| flaky-router/Analyzing ping times.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py39
# language: python
# name: py39
# ---
import requests
s = requests.Session()
# +
for patient_id in range(1,101):
f.value = patient_id
src = f"http://dataset.isr.uc.pt/ISRUC_Sleep/subgroupI/{patient_id}.rar"
with s.get(src, stream=True) as r:
with open(f"../data/isruc-sleep/subgroupI/{patient_id}.rar","wb") as f:
for chunk in r.iter_content(chunk_size=16*1024):
f.write(chunk)
print(f"successfully downloaded file {src}")
# -
for patient_id in range(1,9):
src = f"http://dataset.isr.uc.pt/ISRUC_Sleep/subgroupII/{patient_id}.rar"
with s.get(src, stream=True) as r:
with open(f"../data/isruc-sleep/subgroupII/{patient_id}.rar","wb") as f:
for chunk in r.iter_content(chunk_size=16*1024):
f.write(chunk)
print(f"successfully downloaded file {src}")
# +
for patient_id in range(1,11):
src = f"http://dataset.isr.uc.pt/ISRUC_Sleep/subgroupIII/{patient_id}.rar"
with s.get(src, stream=True) as r:
with open(f"../data/isruc-sleep/subgroupIII/{patient_id}.rar","wb") as f:
for chunk in r.iter_content(chunk_size=16*1024):
f.write(chunk)
print(f"successfully downloaded file {src}")
# -
import subprocess
test = subprocess.run(["wget","-r","-N","-c", "-np", "https://physionet.org/files/sleep-edfx/1.0.0/"], cwd="../data/Sleep-EDF")
print(f"successfully got sleep-edf files")
from pyunpack import Archive
Archive('../data/isruc-sleep/subgroupI/1.rar').extractall('../data/isruc-sleep/subgroupI/')
| notebooks/download_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# 각 에폭(epoch)은 학습 단계와 검증 단계를 갖습니다.
for phase in ['train', 'val']:
if phase == 'train':
model.train() # 모델을 학습 모드로 설정
else:
model.eval() # 모델을 평가 모드로 설정
running_loss = 0.0
running_corrects = 0
# 데이터를 반복
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# 매개변수 경사도를 0으로 설정
optimizer.zero_grad()
# 순전파
# 학습 시에만 연산 기록을 추적
with torch.set_grad_enabled(phase == 'train'): #Context-manager that sets gradient calculation to on or off.
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# 학습 단계인 경우 역전파 + 최적화
if phase == 'train':
loss.backward()
optimizer.step()
# 통계
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# 모델을 깊은 복사(deep copy)함
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 가장 나은 모델 가중치를 불러옴
model.load_state_dict(best_model_wts)
return model
def imshow(img):
img = img /2 +0.5
npimg = img.numpy()
fig = plt.figure(figsize = (10,5))
plt.imshow(np.transpose(npimg, (1,2,0)))
plt.show()
| cs224w/transfer-learning-pytorch2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-emotion")
def get_emotion(text):
input_ids = tokenizer.encode(text + '</s>', return_tensors='pt')
output = model.generate(input_ids=input_ids,
max_length=2)
dec = [tokenizer.decode(ids) for ids in output]
label = dec[0]
return label
get_emotion("i feel as if i havent blogged in ages are at least truly blogged i am doing an update cute") # Output: 'joy'
get_emotion("i have a feeling i kinda lost my best friend") # Output: 'sadness'
# -
model
| ml/notebooks/T5-Testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''prolif'': conda)'
# name: python385jvsc74a57bd0ed16e0ce086f53f6a3b96f2d7e8fdc3cba2fa42f4f858ca7715a8f0f47550c6a
# ---
# # Visualisation
#
# This notebook showcases different ways of visualizing lig-prot and prot-prot interactions, either with atomistic details or simply at the residue level.
import MDAnalysis as mda
import prolif as plf
import numpy as np
# load topology
u = mda.Universe(plf.datafiles.TOP, plf.datafiles.TRAJ)
lig = u.select_atoms("resname LIG")
prot = u.select_atoms("protein")
# create RDKit-like molecules for visualisation
lmol = plf.Molecule.from_mda(lig)
pmol = plf.Molecule.from_mda(prot)
# get lig-prot interactions with atom info
fp = plf.Fingerprint(["HBDonor", "HBAcceptor", "Cationic", "PiStacking"])
fp.run(u.trajectory[0:1], lig, prot)
df = fp.to_dataframe(return_atoms=True)
df.T
# ## py3Dmol (3Dmol.js)
#
# With py3Dmol we can easily display the interactions.
#
# For interactions involving a ring (pi-cation, pi-stacking...etc.) ProLIF returns the index of one of the ring atoms, but for visualisation having the centroid of the ring looks nicer. We'll start by writing a function to find the centroid, given the index of one of the ring atoms.
# +
from rdkit import Chem
from rdkit import Geometry
def get_ring_centroid(mol, index):
# find ring using the atom index
Chem.SanitizeMol(mol, Chem.SanitizeFlags.SANITIZE_SETAROMATICITY)
ri = mol.GetRingInfo()
for r in ri.AtomRings():
if index in r:
break
else:
raise ValueError("No ring containing this atom index was found in the given molecule")
# get centroid
coords = mol.xyz[list(r)]
ctd = plf.utils.get_centroid(coords)
return Geometry.Point3D(*ctd)
# -
# Finally, the actual visualisation code. The API of py3Dmol is exactly the same as the GLViewer class of 3Dmol.js, for which the documentation can be found [here](https://3dmol.csb.pitt.edu/doc/$3Dmol.GLViewer.html).
# +
import py3Dmol
colors = {
"HBAcceptor": "blue",
"HBDonor": "red",
"Cationic": "green",
"PiStacking": "purple",
}
# JavaScript functions
resid_hover = """function(atom,viewer) {{
if(!atom.label) {{
atom.label = viewer.addLabel('{0}:'+atom.atom+atom.serial,
{{position: atom, backgroundColor: 'mintcream', fontColor:'black'}});
}}
}}"""
hover_func = """
function(atom,viewer) {
if(!atom.label) {
atom.label = viewer.addLabel(atom.interaction,
{position: atom, backgroundColor: 'black', fontColor:'white'});
}
}"""
unhover_func = """
function(atom,viewer) {
if(atom.label) {
viewer.removeLabel(atom.label);
delete atom.label;
}
}"""
v = py3Dmol.view(650, 600)
v.removeAllModels()
models = {}
mid = -1
for i, row in df.T.iterrows():
lresid, presid, interaction = i
lindex, pindex = row[0]
lres = lmol[lresid]
pres = pmol[presid]
# set model ids for reusing later
for resid, res, style in [(lresid, lres, {"colorscheme": "cyanCarbon"}),
(presid, pres, {})]:
if resid not in models.keys():
mid += 1
v.addModel(Chem.MolToMolBlock(res), "sdf")
model = v.getModel()
model.setStyle({}, {"stick": style})
# add residue label
model.setHoverable({}, True, resid_hover.format(resid), unhover_func)
models[resid] = mid
# get coordinates for both points of the interaction
if interaction in ["PiStacking", "EdgeToFace", "FaceToFace", "PiCation"]:
p1 = get_ring_centroid(lres, lindex)
else:
p1 = lres.GetConformer().GetAtomPosition(lindex)
if interaction in ["PiStacking", "EdgeToFace", "FaceToFace", "CationPi"]:
p2 = get_ring_centroid(pres, pindex)
else:
p2 = pres.GetConformer().GetAtomPosition(pindex)
# add interaction line
v.addCylinder({"start": dict(x=p1.x, y=p1.y, z=p1.z),
"end": dict(x=p2.x, y=p2.y, z=p2.z),
"color": colors[interaction],
"radius": .15,
"dashed": True,
"fromCap": 1,
"toCap": 1,
})
# add label when hovering the middle of the dashed line by adding a dummy atom
c = Geometry.Point3D(*plf.utils.get_centroid([p1, p2]))
modelID = models[lresid]
model = v.getModel(modelID)
model.addAtoms([{"elem": 'Z',
"x": c.x, "y": c.y, "z": c.z,
"interaction": interaction}])
model.setStyle({"interaction": interaction}, {"clicksphere": {"radius": .5}})
model.setHoverable(
{"interaction": interaction}, True,
hover_func, unhover_func)
# show protein:
# first we need to reorder atoms as in the original MDAnalysis file.
# needed because the RDKitConverter reorders them when infering bond order
# and 3Dmol.js doesn't like when atoms from the same residue are spread accross the whole file
order = np.argsort([atom.GetIntProp("_MDAnalysis_index") for atom in pmol.GetAtoms()])
mol = Chem.RenumberAtoms(pmol, order.astype(int).tolist())
mol = Chem.RemoveAllHs(mol)
pdb = Chem.MolToPDBBlock(mol, flavor=0x20 | 0x10)
v.addModel(pdb, "pdb")
model = v.getModel()
model.setStyle({}, {"cartoon": {"style":"edged"}})
v.zoomTo({"model": list(models.values())})
# -
# ## Ligand Interaction Network (LigPlot)
#
# Protein-ligand interactions are typically represented with the ligand in atomic details, residues as nodes, and interactions as edges. Such diagram can be easily displayed by calling ProLIF's builtin class `prolif.plotting.network.LigNetwork`.
# This diagram is interactive and allows moving around the residues, as well as clicking the legend to toggle the display of specific residues types or interactions.
# LigNetwork can generate two kinds of depictions:
#
# - Based on a single specific frame
# - By aggregating results from several frames
#
# In the latter case, the frequency with which an interaction is seen will control the width of the corresponding edge. You can hide the least frequent interactions by using a threshold, *i.e.* `threshold=0.3` will hide interactions that occur in less than 30% of frames.
# +
from prolif.plotting.network import LigNetwork
fp = plf.Fingerprint()
fp.run(u.trajectory[::10], lig, prot)
df = fp.to_dataframe(return_atoms=True)
net = LigNetwork.from_ifp(df, lmol,
# replace with `kind="frame", frame=0` for the other depiction
kind="aggregate", threshold=.3,
rotation=270)
net.display()
# -
# You can further customize the diagram by changing the colors in `LigNetwork.COLORS` or the residues types in `LigNetwork.RESIDUE_TYPES`. Type `help(LigNetwork)` for more details.
# The diagram can be saved as an HTML file by calling `net.save("output.html")`. It is not currently possible to export it as an image, so please make a screenshot instead.
# You can combine both saving and displaying the diagram with `net.show("output.html")`.
# ## NetworkX and pyvis
#
# NetworkX is a great library for working with graphs, but the drawing options are quickly limited so we will use networkx to create a graph, and pyvis to create interactive plots. The following code snippet will calculate the IFP, each residue (ligand or protein) is converted to a node, each interaction to an edge, and the occurence of each interaction between residues will be used to control the weight and thickness of each edge.
import networkx as nx
from pyvis.network import Network
from tqdm.auto import tqdm
from matplotlib import cm, colors
from IPython.display import IFrame
# +
# get lig-prot interactions and distance between residues
fp = plf.Fingerprint()
fp.run(u.trajectory[::10], lig, prot)
df = fp.to_dataframe()
df.head()
# -
def make_graph(values, df=None,
node_color=["#FFB2AC", "#ACD0FF"], node_shape="dot",
edge_color="#a9a9a9", width_multiplier=1):
"""Convert a pandas DataFrame to a NetworkX object
Parameters
----------
values : pandas.Series
Series with 'ligand' and 'protein' levels, and a unique value for
each lig-prot residue pair that will be used to set the width and weigth
of each edge. For example:
ligand protein
LIG1.G ALA216.A 0.66
ALA343.B 0.10
df : pandas.DataFrame
DataFrame obtained from the fp.to_dataframe() method
Used to label each edge with the type of interaction
node_color : list
Colors for the ligand and protein residues, respectively
node_shape : str
One of ellipse, circle, database, box, text or image, circularImage,
diamond, dot, star, triangle, triangleDown, square, icon.
edge_color : str
Color of the edge between nodes
width_multiplier : int or float
Each edge's width is defined as `width_multiplier * value`
"""
lig_res = values.index.get_level_values("ligand").unique().tolist()
prot_res = values.index.get_level_values("protein").unique().tolist()
G = nx.Graph()
# add nodes
# https://pyvis.readthedocs.io/en/latest/documentation.html#pyvis.network.Network.add_node
for res in lig_res:
G.add_node(res, title=res, shape=node_shape,
color=node_color[0], dtype="ligand")
for res in prot_res:
G.add_node(res, title=res, shape=node_shape,
color=node_color[1], dtype="protein")
for resids, value in values.items():
label = "{} - {}<br>{}".format(*resids, "<br>".join([f"{k}: {v}"
for k, v in (df.xs(resids,
level=["ligand", "protein"],
axis=1)
.sum()
.to_dict()
.items())]))
# https://pyvis.readthedocs.io/en/latest/documentation.html#pyvis.network.Network.add_edge
G.add_edge(*resids, title=label, color=edge_color,
weight=value, width=value*width_multiplier)
return G
# ### Regrouping all interactions
#
# We will regroup all interactions as if they were equivalent.
# +
data = (df.groupby(level=["ligand", "protein"], axis=1)
.sum()
.astype(bool)
.mean())
G = make_graph(data, df, width_multiplier=3)
# display graph
net = Network(width=600, height=500, notebook=True, heading="")
net.from_nx(G)
net.write_html("lig-prot_graph.html")
IFrame("lig-prot_graph.html", width=610, height=510)
# -
# ### Only plotting a specific interaction
#
# We can also plot a specific type of interaction.
# +
data = (df.xs("Hydrophobic", level="interaction", axis=1)
.mean())
G = make_graph(data, df, width_multiplier=3)
# display graph
net = Network(width=600, height=500, notebook=True, heading="")
net.from_nx(G)
net.write_html("lig-prot_hydrophobic_graph.html")
IFrame("lig-prot_hydrophobic_graph.html", width=610, height=510)
# -
# ### Protein-protein interaction
#
# This kind of "residue-level" visualisation is especially suitable for protein-protein interactions. Here we'll show the interactions between one helix of our G-Protein coupled receptor (transmembrane helix 3, or TM3) in red and the rest of the protein in blue.
tm3 = u.select_atoms("resid 119:152")
prot = u.select_atoms("protein and not group tm3", tm3=tm3)
fp = plf.Fingerprint()
fp.run(u.trajectory[::10], tm3, prot)
df = fp.to_dataframe()
df.head()
# +
data = (df.groupby(level=["ligand", "protein"], axis=1, sort=False)
.sum()
.astype(bool)
.mean())
G = make_graph(data, df, width_multiplier=8)
# color each node based on its degree
max_nbr = len(max(G.adj.values(), key=lambda x: len(x)))
blues = cm.get_cmap('Blues', max_nbr)
reds = cm.get_cmap('Reds', max_nbr)
for n, d in G.nodes(data=True):
n_neighbors = len(G.adj[n])
# show TM3 in red and the rest of the protein in blue
palette = reds if d["dtype"] == "ligand" else blues
d["color"] = colors.to_hex( palette(n_neighbors / max_nbr) )
# convert to pyvis network
net = Network(width=640, height=500, notebook=True, heading="")
net.from_nx(G)
net.write_html("prot-prot_graph.html")
IFrame("prot-prot_graph.html", width=650, height=510)
# -
# ### Residue interaction network
#
# Another possible application is the visualisation of the residue interaction network of the whole protein. Since this protein is a GPCR, the graph will mostly display the HBond interactions reponsible for the secondary structure of the protein (7 alpha-helices). It would also show hydrophobic interactions between neighbor residues, so I'm simply going to disable it in the Fingerprint.
prot = u.select_atoms("protein")
fp = plf.Fingerprint(['HBDonor', 'HBAcceptor', 'PiStacking', 'Anionic', 'Cationic', 'CationPi', 'PiCation'])
fp.run(u.trajectory[::10], prot, prot)
df = fp.to_dataframe()
df.head()
# To hide most of the HBond interactions responsible for the alpha-helix structuration, I will show how to do it on the pandas DataFrame for simplicity, but ideally you should copy-paste the source code inside the `fp.run` method and add the condition shown below before calculating the bitvector for a residue pair, then use the custom function instead of `fp.run`. This would make the analysis faster and more memory efficient.
# remove interactions between residues i and i±4 or less
mask = []
for l, p, interaction in df.columns:
lr = plf.ResidueId.from_string(l)
pr = plf.ResidueId.from_string(p)
if (pr == lr) or (abs(pr.number - lr.number) <= 4
and interaction in ["HBDonor", "HBAcceptor", "Hydrophobic"]):
mask.append(False)
else:
mask.append(True)
df = df[df.columns[mask]]
df.head()
# +
data = (df.groupby(level=["ligand", "protein"], axis=1, sort=False)
.sum()
.astype(bool)
.mean())
G = make_graph(data, df, width_multiplier=5)
# color each node based on its degree
max_nbr = len(max(G.adj.values(), key=lambda x: len(x)))
palette = cm.get_cmap('YlGnBu', max_nbr)
for n, d in G.nodes(data=True):
n_neighbors = len(G.adj[n])
d["color"] = colors.to_hex( palette(n_neighbors / max_nbr) )
# convert to pyvis network
net = Network(width=640, height=500, notebook=True, heading="")
net.from_nx(G)
# use specific layout
layout = nx.circular_layout(G)
for node in net.nodes:
node["x"] = layout[node["id"]][0] * 1000
node["y"] = layout[node["id"]][1] * 1000
net.toggle_physics(False)
net.write_html("residue-network_graph.html")
IFrame("residue-network_graph.html", width=650, height=510)
# -
# End of this notebook. If you have other suggestions for displaying interaction fingerprints, please create a new [Discussion](https://github.com/chemosim-lab/ProLIF/discussions) on GitHub 👍
# *List of files to be automatically copied to the docs:*
#
# * [lig-prot_graph.html](lig-prot_graph.html)
# * [lig-prot_hydrophobic_graph.html](lig-prot_hydrophobic_graph.html)
# * [prot-prot_graph.html](prot-prot_graph.html)
# * [residue-network_graph.html](residue-network_graph.html)
| docs/notebooks/visualisation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
# published: false
# ---
# ### Author: <NAME>
# In this blog, I will go over the basic steps of Linear Regression with using Scikit-Learn package.
#Import Libraries
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
import numpy as np
df = pd.read_csv('data/kc_house_data.csv')
df.info()
plt.figure(figsize=(12,10))
sns.heatmap(df.corr(), center=0);
| _posts/.ipynb_checkpoints/2021-12-12-Basics-of-Linear-Regression-with-ScikitLearn-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import gpsro_tools
# # Import DJF Colocations and ERA-5 all data maps
# +
jan_07 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2007_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_08 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2008_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_09 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2009_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_10 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2010_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_11 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2011_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_12 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2012_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_13 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2013_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_14 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2014_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_15 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2015_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_16 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2016_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_17 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2017_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_18 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2018_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_19 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2019_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_20 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/january_2020_ERA_5_colocated_occultations.npy', allow_pickle=True)
jan_combined = np.concatenate([jan_07, jan_08, jan_09, jan_10, jan_11, jan_12, jan_13, jan_14, jan_15, jan_16, jan_17, jan_18,
jan_19, jan_20])
colocations_jan_df = pd.DataFrame(jan_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp'])
colocations_jan_df['Lon'] = colocations_jan_df['Lon'] - 180
colocations_jan_df['Year'] = colocations_jan_df['Year'].astype(int)
#######################################################################################################
era_5_jan_07_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_08_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_09_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_10_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_11_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_12_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_13_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_14_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_15_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_16_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_17_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_18_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_19_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_jan_20_5x10 = np.load('../../ERA_5_monthly_TLS_maps/january_2020_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_box_jan_combined = np.concatenate([era_5_jan_07_5x10, era_5_jan_08_5x10, era_5_jan_09_5x10, era_5_jan_10_5x10,
era_5_jan_11_5x10, era_5_jan_12_5x10, era_5_jan_13_5x10, era_5_jan_14_5x10,
era_5_jan_15_5x10, era_5_jan_16_5x10, era_5_jan_17_5x10, era_5_jan_18_5x10,
era_5_jan_19_5x10, era_5_jan_20_5x10])
box_jan_df = pd.DataFrame(era_box_jan_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp'])
# +
feb_07 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2007_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_08 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2008_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_09 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2009_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_10 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2010_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_11 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2011_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_12 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2012_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_13 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2013_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_14 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2014_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_15 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2015_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_16 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2016_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_17 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2017_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_18 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2018_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_19 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2019_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_20 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/february_2020_ERA_5_colocated_occultations.npy', allow_pickle=True)
feb_combined = np.concatenate([feb_07, feb_08, feb_09, feb_10, feb_11, feb_12, feb_13, feb_14, feb_15, feb_16, feb_17, feb_18,
feb_19, feb_20])
colocations_feb_df = pd.DataFrame(feb_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp'])
colocations_feb_df['Lon'] = colocations_feb_df['Lon'] - 180
colocations_feb_df['Year'] = colocations_feb_df['Year'].astype(int)
#######################################################################################################
era_5_feb_07_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_08_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_09_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_10_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_11_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_12_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_13_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_14_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_15_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_16_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_17_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_18_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_19_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_feb_20_5x10 = np.load('../../ERA_5_monthly_TLS_maps/february_2020_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_box_feb_combined = np.concatenate([era_5_feb_07_5x10, era_5_feb_08_5x10, era_5_feb_09_5x10, era_5_feb_10_5x10,
era_5_feb_11_5x10, era_5_feb_12_5x10, era_5_feb_13_5x10, era_5_feb_14_5x10,
era_5_feb_15_5x10, era_5_feb_16_5x10, era_5_feb_17_5x10, era_5_feb_18_5x10,
era_5_feb_19_5x10, era_5_feb_20_5x10])
box_feb_df = pd.DataFrame(era_box_feb_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp'])
# +
dec_06 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2006_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_07 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2007_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_08 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2008_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_09 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2009_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_10 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2010_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_11 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2011_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_12 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2012_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_13 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2013_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_14 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2014_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_15 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2015_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_16 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2016_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_17 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2017_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_18 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2018_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_19 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/december_2019_ERA_5_colocated_occultations.npy', allow_pickle=True)
dec_combined = np.concatenate([dec_06, dec_07, dec_08, dec_09, dec_10, dec_11, dec_12, dec_13, dec_14, dec_15, dec_16, dec_17, dec_18,
dec_19])
colocations_dec_df = pd.DataFrame(dec_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp'])
colocations_dec_df['Lon'] = colocations_dec_df['Lon'] - 180
colocations_dec_df['Year'] = colocations_dec_df['Year'].astype(int)
#######################################################################################################
era_5_dec_06_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2006_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_07_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_08_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_09_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_10_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_11_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_12_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_13_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_14_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_15_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_16_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_17_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_18_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_5_dec_19_5x10 = np.load('../../ERA_5_monthly_TLS_maps/december_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True)
era_box_dec_combined = np.concatenate([era_5_dec_06_5x10, era_5_dec_07_5x10, era_5_dec_08_5x10, era_5_dec_09_5x10, era_5_dec_10_5x10,
era_5_dec_11_5x10, era_5_dec_12_5x10, era_5_dec_13_5x10, era_5_dec_14_5x10,
era_5_dec_15_5x10, era_5_dec_16_5x10, era_5_dec_17_5x10, era_5_dec_18_5x10, era_5_dec_19_5x10])
box_dec_df = pd.DataFrame(era_box_dec_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp'])
# -
# # Begin Processing of DJF Colocations
# +
daily_era5_box_removal_jan = gpsro_tools.background_and_bias_remover(colocations_jan_df, box_jan_df)
daily_era5_box_removal_feb = gpsro_tools.background_and_bias_remover(colocations_feb_df, box_feb_df)
daily_era5_box_removal_dec = gpsro_tools.background_and_bias_remover(colocations_dec_df, box_dec_df)
daily_era5_no_removal = pd.concat([colocations_jan_df, colocations_feb_df, colocations_dec_df])
daily_era5_box_removal = pd.concat([daily_era5_box_removal_jan, daily_era5_box_removal_feb, daily_era5_box_removal_dec])
# +
daily_era5_no_removal['latbin'] = daily_era5_no_removal.Lat.map(gpsro_tools.to_bin_lat)
daily_era5_no_removal['lonbin'] = daily_era5_no_removal.Lon.map(gpsro_tools.to_bin_lon)
final_hour_box = gpsro_tools.box_mean_remover(daily_era5_box_removal)
final_hour_map = gpsro_tools.box_mean_remover(daily_era5_no_removal)
diurnal_cycles_5_10_mean_removed_by_lats, diurnal_cycles_5_10_mean_removed_by_boxes = gpsro_tools.diurnal_binner(final_hour_box)
diurnal_cycles_no_mean_removed_by_lats, diurnal_cycles_no_mean_removed_by_boxes = gpsro_tools.diurnal_binner(final_hour_map)
# -
np.save('DJF_colocations_5x10_boxes', diurnal_cycles_5_10_mean_removed_by_boxes)
| DJFnotebooks/.ipynb_checkpoints/DJFcolocations-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Analisis de Redes: Repaso Estadistico
# Ejercicio 1: Hacer este gŕafico en Python.
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stas
# %matplotlib inline
x = np.arange(0.01, 1, 0.01)
values = [(0.5, 0.5),(5, 1),(1, 3),(2, 2),(2, 5)]
for i, j in values:
y = stas.beta.pdf(x,i,j)
plt.plot(x,y)
plt.show()
# Ejercicio 2: Con datos aleatorios de distribuciones beta, obtener y graficar sus propiedades descriptivas.
md = []
mn = []
mo = []
kur = []
ske = []
for i, j in values:
r = stas.beta.rvs(i, j, size=1000000)
md.append(np.median(r))
mn.append(np.mean(r))
mo.append(stas.mode(r)[0][0])
kur.append(stas.kurtosis(r))
ske.append(stas.skew(r))
# +
fig = plt.figure()
ax1 = fig.add_subplot(151)
ax1.set_title('Median')
ax1.plot(md)
ax2 = fig.add_subplot(152)
ax2.set_title('Mean')
ax2.plot(mn)
ax3 = fig.add_subplot(153)
ax3.set_title('Mode')
ax3.plot(mo)
ax4 = fig.add_subplot(154)
ax4.set_title('Kurtosis')
ax4.plot(kur)
ax5 = fig.add_subplot(155)
ax5.set_title('Skewness')
ax5.plot(ske)
axes = [ax1, ax2, ax3, ax4, ax5]
for i in axes:
plt.setp(i.get_xticklabels(), visible=False)
plt.setp(i.get_yticklabels(), visible=False)
# -
# Ejercicio 3: escogiendo un dataset realizar una regresión linear y evaluar el modelo.
| alejogm0520/Repaso_Estadistico.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import bs4 as bs
import datetime as dt
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import pandas_datareader.data as web
import pickle
import requests
# +
def save_sp500_tickers():
resp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
ticker = ticker[:-1]
tickers.append(ticker)
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
return tickers
save_sp500_tickers()
# +
def get_data_from_yahoo(reload_sp500=False):
if reload_sp500:
tickers = save_sp500_tickers()
else:
with open("sp500tickers.pickle", "rb") as f:
tickers = pickle.load(f)
if not os.path.exists('stock_dfs'):
os.makedirs('stock_dfs')
start = dt.datetime(2000, 1, 1)
end = dt.datetime.now()
for ticker in tickers:
if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):
df = web.DataReader(ticker.replace('.','-'),'yahoo', start, end)
df.to_csv('stock_dfs/{}.csv'.format(ticker))
else:
print('Already have {}'.format(ticker))
get_data_from_yahoo()
# +
def compile_data():
with open("sp500tickers.pickle", "rb") as f:
tickers = pickle.load(f)
main_df = pd.DataFrame()
for count, ticker in enumerate(tickers):
df = pd.read_csv('stock_dfs/{}.csv'.format(ticker))
df.set_index('Date', inplace=True)
df.rename(columns={'Adj Close': ticker}, inplace=True)
df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)
if main_df.empty:
main_df = df
else:
main_df = main_df.join(df, how='outer')
if count % 10 == 0:
print(count)
print(main_df.head())
main_df.to_csv('sp500_joined_closes.csv')
compile_data()
# +
def visualize_data():
df = pd.read_csv('sp500_joined_closes.csv')
df_corr = df.corr()
print(df_corr.head())
df_corr.to_csv('sp500corr.csv')
data1 = df_corr.values
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
heatmap1 = ax1.pcolor(data1, cmap=plt.cm.RdYlGn)
fig1.colorbar(heatmap1)
ax1.set_xticks(np.arange(data1.shape[1]) + 0.5, minor=False)
ax1.set_yticks(np.arange(data1.shape[0]) + 0.5, minor=False)
ax1.invert_yaxis()
ax1.xaxis.tick_top()
column_labels = df_corr.columns
row_labels = df_corr.index
ax1.set_xticklabels(column_labels)
ax1.set_yticklabels(row_labels)
plt.xticks(rotation=90)
heatmap1.set_clim(-1, 1)
plt.tight_layout()
plt.show()
visualize_data()
# -
| web scrapping.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#all_slow
# -
# # Tutorial - Migrating from Lightning
#
# > Incrementally adding fastai goodness to your Lightning training
# We're going to use the MNIST training code from Lightning's 'Quick Start' (as at August 2020), converted to a module. See `migrating_lightning.py` for the Lightning code we are importing here.
# +
from migrating_lightning import *
from fastai2.vision.all import *
# -
# ## Using fastai's training loop
# We can use the Lightning module directly:
model = LitModel()
# To use it in fastai, we first pull the DataLoaders from the module into a `DataLoaders` object:
data = DataLoaders(model.train_dataloader(), model.val_dataloader()).cuda()
# We can now create a `Learner` and fit:
learn = Learner(data, model, loss_func=F.cross_entropy, opt_func=Adam, metrics=accuracy)
learn.fit_one_cycle(1, 0.001)
# As you can see, migrating from Ignite allowed us to reduce the amount of code, and doesn't require you to change any of your existing data pipelines, optimizers, loss functions, models, etc. Once you've made this change, you can then benefit from fastai's rich set of callbacks, transforms, visualizations, and so forth.
#
# For instance, in the Lightning example, Tensorboard support was defined a special-case "logger". In fastai, Tensorboard is just another `Callback` that you can add, with the parameter `cbs=Tensorboard`, when you create your `Learner`. The callbacks all work together, so you can add an remove any schedulers, loggers, visualizers, and so forth. You don't have to learn about special types of functionality for each - they are all just plain callbacks.
#
# Note that fastai is very different from Lightning, in that it is much more than just a training loop (although we're only using the training loop in this example) - it is a complete framework including GPU-accelerated transformations, end-to-end inference, integrated applications for vision, text, tabular, and collaborative filtering, and so forth. You can use any part of the framework on its own, or combine them together, as described in the [fastai paper](https://arxiv.org/abs/2002.04688).
# ### Taking advantage of fastai Data Blocks
# One problem in the Lightning example is that it doesn't actually use a validation set - it's just using the training set a second time as a validation set.
#
# You might prefer to use fastai's Data Block API, which makes it really easy to create, visualize, and test your input data processing. Here's how you can create input data for MNIST, for instance:
mnist = DataBlock(blocks=(ImageBlock(cls=PILImageBW), CategoryBlock),
get_items=get_image_files,
splitter=GrandparentSplitter(),
get_y=parent_label)
# Here, we're telling `DataBlock` that we have a B&W image input, and a category output, our input items are file names of images, the images are labeled based on the name of the parent folder, and they are split by training vs validation based on the grandparent folder name. It's important to actually look at your data, so fastai also makes it easy to visualize your inputs and outputs, for instance:
dls = mnist.dataloaders(untar_data(URLs.MNIST_TINY))
dls.show_batch(max_n=9, figsize=(4,4))
| nbs/examples/migrating_lightning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming Assignment 1: Collinear Points
#
# For this programming assignment, we'll be using a Jupyter notebook.
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "f4e222b48ed46c0ab537391897eed067", "grade": false, "grade_id": "cell-ba30def68618f26e", "locked": true, "schema_version": 1, "solution": false}
# ## Background
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "17d6b33bbc9c1b51f0be943c2e8a2ab1", "grade": false, "grade_id": "cell-1f0aaa1b0eeb62ff", "locked": true, "schema_version": 1, "solution": false}
# ### Collinear points
#
# Definition of collinearity[1]: In geometry, collinearity of a set of points is the property of their lying on a single line. A set of points with this property is said to be collinear.
#
# 
#
# Here, points P,Q,R and A,R,B are collinear. However, points A,B,C are non-collinear. For more, refer [2].
#
# 1. https://en.wikipedia.org/wiki/Collinearity
# 2. http://www.mathcaptain.com/geometry/collinear-points.html
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "8b74e3ea5a2dfa8d91885978ff56d8fe", "grade": false, "grade_id": "cell-888539157720ea6e", "locked": true, "schema_version": 1, "solution": false}
# ### Parameterizing lines
# In order to determine whether a set of points all lie on the same line we need a standard way to define (or parametrize) a line.
#
# * One way of defining a line is as the set of points $(x,y)$ such that $y=ax+b$ for some fixed real values $a,b$.
# * We call $a$ the **slope** of the line and $b$ is the $y$-intercept which is defined as the value of $y$ when $x=0$.
# * This parameterization works for *almost* all lines. It does not work for vertical lines. For those lines we define $a$ to be **infinity** and $b$ to be the $x$ intercept of the line (the line is parallel to the $y$ axis so it does not intercept the $y$ axis (other than if it is the vertical line going through the origin).
#
# To summarize, given two different points $(x_1,y_1) \neq (x_2,y_2)$, we define the parameterization $(a,b)$ as:
# * **if $x_1=x_2$: ** $(\mbox{Inf},x_1)$
# * **Else:** $(a,b)$ such that $y_1=a x_1 +b$ and $y_2=a x_2 +b$.
#
# + [markdown] deletable=false editable=false nbgrader={"checksum": "419f1ac5599d4ec61cb37edf7f0e59a1", "grade": false, "grade_id": "cell-a2b044c997c9d772", "locked": true, "schema_version": 1, "solution": false}
# ## Task
#
# Given an input file with an arbitrary set of co-ordinates, your task is to use pyspark library functions and write a program in python3 to find if three or more points are collinear.
#
# For instance, if given these points: {(1,1), (0,1), (2,2), (3,3), (0,5), (3,4), (5,6), (0,-3), (-2,-2)}
#
# Sets of collinear points are: {((-2,-2), (1,1), (2,2), (3,3)), ((0,1), (3,4), (5,6)), ((0,-3), (0,1), (0,5))}. Note that the ordering of the points in a set or the order of the sets does not matter.
#
# Note:
# <ul>
# <li>Every set of collinear points has to have <b>at least three points</b> (any pair of points lie on a line).</li>
# <li>There are two types of test cases:
# <ul>
# <li><b>Visible Test cases</b>: Test cases given to you as a part of the notebook. These tests will help you validate your program and figure out bugs in it if any.</li>
# <li><b>Hidden Test cases</b>: Test cases that are not given as a part of the notebook, but will be used for grading. <br>Cells in this notebook that have "<i>##Hidden test cases here</i>" are read-only cells containing hidden tests.</li>
# </ul>
# </li>
# <li>Any cell that does not require you to submit code cannot be modified. For example: Assert statement unit test cells. Cells that have "**# YOUR CODE HERE**" are the ONLY ones you will need to alter. </li>
# <li>DO NOT change the names of functions. </li>
# <li>Remove the "Raise NotImplementedError()" line when you write the definition of your function.</li>
#
# </ul>
# + [markdown] deletable=false editable=false nbgrader={"checksum": "2d27b8b8eebf443e6fff930102ddebe3", "grade": false, "grade_id": "cell-311d3abd841773a7", "locked": true, "schema_version": 1, "solution": false}
# ### Description of the Approach
#
# The goal of this assignment is to make you familiar with programming using pyspark. There are many ways to find sets of collinear points from a list of points. For the purposes of this assignment, we shall stick with the below approach:
#
# 1. List all pairs of points. You can do that efficiently in spark by computing cartesian product of the list of points with itself. For example, given three points $[(1,0), (2,0), (3,0)]$, we construct a list of nine pairs
# $[((1,0),(1,0)),((1,0), (2,0)),((1,0),(3,0))$
# $((2,0),(1,0)),((2,0), (2,0)),((2,0),(3,0))$
# $((3,0),(1,0)),((3,0), (2,0)),((3,0),(3,0))]$
#
# 2. Remove the pairs in which the same point appears twice such as $((2,0),(2,0))$. After these elimination you end up (for this example) with a list of just six pairs:
# $[((1,0),(2,0)),((1,0),(3,0)),((2,0),(1,0)),((2,0),(3,0)),((3,0),(1,0)),((3,0),(2,0))]$
#
# 2. For each pair of points, find the parameterization $(a,b)$ of the line connecting them as described above.
#
# 3. Group the pairs according to their parameters. Clearly, if two pairs have the same $(a,b)$ values, all points in the two pairs lie on the same line.
#
# 3. Eliminate the groups that contain only one pair (any pair of points defines a line).
# 4. In each of the remaining groups, unpack the point-pairs to identify the individual points.
# Note that if a set of points $(x_1,y_1),\ldots,(x_k,y_k)$ lie on the same line then each point will appear $k-1$ times in the list of point-pairs. You therefore need to transform the list of points into sets to remove duplicates.
#
# 5. Output the sets of 3 or more colinear points.
#
# Your task is to implement the described algorithm in Spark. You should use RDD's all the way through and collect the results into the driver only at the end.
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "9da8b45157f4d30b98493dcd3b99765f", "grade": false, "grade_id": "cell-8160704dcdb034d0", "locked": true, "schema_version": 1, "solution": false}
# ### Notebook Setup
# + cell_style="center" deletable=false editable=false hidden=true hide_input=false nbgrader={"checksum": "0e8e74c8c4689b498605dd2056483371", "grade": false, "grade_id": "cell-5f266efcf9bf6846", "locked": true, "schema_version": 1, "solution": false}
from pyspark import SparkContext, SparkConf
#We can create a SparkConf() object and use it to initialize the spark context
conf = SparkConf().setAppName("Collinear Points").setMaster("local[4]") #Initialize spark context using 4 local cores as workers
sc = SparkContext(conf=conf)
from pyspark.rdd import RDD
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "fb5175902908ba3afb6508c020caaaee", "grade": false, "grade_id": "cell-773a47aa41687e4f", "locked": true, "schema_version": 1, "solution": false}
# ### Helper Functions
# Here are some helper functions that you are encouraged to use in your implementations. Do not change these functions.
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "0310d10b18daee816bff3d9d26fdc09c", "grade": false, "grade_id": "cell-49ad662071f3985a", "locked": true, "schema_version": 1, "solution": false}
# The function <font color="blue">format_result</font> takes an element of the form shown below in the example. It outputs a tuple of all points that are collinear (shown below).
#
# Input: ((A,slope), [C1,..., Ck]) where each of A, C1, ..., Ck is a point of form (Ax, Ay) and slope is of type float.
#
# **<font color="magenta" size=2>Example Code</font>**
# ``` python
# my_input = (((2, 1), 0.5), [(4, 2), (6, 3)])
# format_result(my_input)
# ```
# Output: (C1,..., Ck, A) each of A,C1,...,Ck is a point of form (Ax, Ay)
#
# **<font color="blue" size=2>Example Output</font>**
# ``` python
# ((4, 2), (6, 3), (2, 1))
# ```
#
# <font color="red">**Hint : **</font> The above example is given just to provide the input and output format. This function is called a different way in the spark exercise.
#
# + code_folding=[] deletable=false editable=false hidden=true nbgrader={"checksum": "58e0e93285bd51486150e0f3854ce4c7", "grade": false, "grade_id": "format_result", "locked": true, "schema_version": 1, "solution": false}
def format_result(x):
x[1].append(x[0][0])
return tuple(x[1])
# + deletable=false editable=false hidden=true nbgrader={"checksum": "c598541d9412f3c9d652d93021a91f0d", "grade": false, "grade_id": "cell-75c0eeb264968b58", "locked": true, "schema_version": 1, "solution": false}
def to_sorted_points(x):
"""
Sorts and returns a tuple of points for further processing.
"""
return tuple(sorted(x))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "7dce4a68819659eb31af8276be26184f", "grade": false, "grade_id": "cell-3c6a770a06a0457a", "locked": true, "schema_version": 1, "solution": false}
# ## Exercises
#
# Here are some functions that you will implement. You should follow the function definitions, and use them appropriately elsewhere in the notebook.
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "9096be2d76024a224dc178ba27557f96", "grade": false, "grade_id": "cell-d7c137c5428c4ccb", "locked": true, "schema_version": 1, "solution": false}
# ### Exercise 1: to_tuple
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "3041273f3018754e731d969583627caa", "grade": false, "grade_id": "cell-505e6159d7dfd5e1", "locked": true, "schema_version": 1, "solution": false}
# #### Example
# The function <font color="blue">to_tuple</font> converts each point of form 'Ax Ay' into a point of form (Ax, Ay) for further processing.
#
# **<font color="magenta" size=2>Example Code</font>**
# ``` python
# my_input = '2 3'
# to_tuple(my_input)
# ```
#
# **<font color="blue" size=2>Example Output</font>**
# ``` python
# (2, 3)
# ```
#
# <font color="red">**Hint : **</font> The above example is given just to provide the input and output format. This function is called a different way in the spark exercise.
#
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "0109e612cb2855c7e1bae792cbc84a8e", "grade": false, "grade_id": "cell-856edb9202d856c2", "locked": true, "schema_version": 1, "solution": false}
# #### Definition
# + deletable=false hidden=true nbgrader={"checksum": "32a49a9cdbb616a3db5063cc97dd062e", "grade": false, "grade_id": "to_tuple", "locked": false, "schema_version": 1, "solution": true}
## Insert your answer in this cell. DO NOT CHANGE THE NAME OF THE FUNCTION.
def to_tuple(x):
# YOUR CODE HERE
raise NotImplementedError()
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "77e1bb18ee4f063e7c76521285d8ae7b", "grade": false, "grade_id": "cell-ba456fcfa242bc50", "locked": true, "schema_version": 1, "solution": false}
# #### Unit Tests
# + deletable=false editable=false hidden=true nbgrader={"checksum": "654133c5fab414eaac5a0ca292e593d6", "grade": true, "grade_id": "to_tuple_test1", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert type(to_tuple('1 1')) == tuple, "Incorrect type: Element returned is not a tuple"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "83bbffcaecf406431a490417447aed54", "grade": true, "grade_id": "to_tuple_test2", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert type(to_tuple('1 1')[0])==int and type(to_tuple('1 1')[1])==int, "Incorrect element type: Element returned is not an integer"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "af6f5bac43b64bf483b698c251fd3b52", "grade": true, "grade_id": "to_tuple_test3", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert to_tuple('1 1') == (1,1), "Incorrect Return Value: Value obtained does not match"
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "deced821768fe61adb05a8b7c148043b", "grade": false, "grade_id": "cell-9b9993388745a787", "locked": true, "schema_version": 1, "solution": false}
# ### Exercise 2: non_duplicates
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "77f6f0e7c20bbeda98eb4853d362138a", "grade": false, "grade_id": "cell-91b1ac852bcb7b5e", "locked": true, "schema_version": 1, "solution": false}
# #### Example
#
# The function <font color="blue">non_duplicates</font> checks if a set of points contains duplicates or not.
#
# Input: Pair (A,B) where A and B are of form (Ax, Ay) and (Bx, By) respectively.
#
# **<font color="magenta" size=2>Example Code</font>**
# ``` python
# my_input = ((0,0),(1,2))
# non_duplicates(my_input)
# ```
#
# Output: Returns True if A != B, False otherwise.
#
# **<font color="blue" size=2>Example Output</font>**
# ``` python
# True
# ```
#
# <font color="red">**Hint : **</font> The above example is given just to provide the input and output format. This function is called a different way in the spark exercise.
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "84aec1e99e6612491d74e3ea4e577ff4", "grade": false, "grade_id": "cell-3bf1e0e15843bb79", "locked": true, "schema_version": 1, "solution": false}
# #### Definition
# + deletable=false hidden=true nbgrader={"checksum": "a42b6daf701373070506d926396c5ec4", "grade": false, "grade_id": "non_duplicates", "locked": false, "schema_version": 1, "solution": true}
## Insert your answer in this cell. DO NOT CHANGE THE NAME OF THE FUNCTION.
def non_duplicates(x):
"""
Use this function inside the get_cartesian() function to 'filter' out pairs with duplicate points
"""
# YOUR CODE HERE
raise NotImplementedError()
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "8543e184c1dbf549116ad3c59344fa00", "grade": false, "grade_id": "cell-9b4079a5fac4a570", "locked": true, "schema_version": 1, "solution": false}
# #### Unit Tests
# + deletable=false editable=false hidden=true nbgrader={"checksum": "9b939ddbe8b4c0e89e88380b2a53063d", "grade": true, "grade_id": "non_duplicate_test1", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert type(non_duplicates(((0,0),(1,2)))) == bool, "Incorrect Return type: Function should return a boolean value"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "4b1bf301260bd3d3e61b5a3816d6a3ab", "grade": true, "grade_id": "non_duplicate_test2", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert non_duplicates(((0,0),(1,2))) == True, "No duplicates are present"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "0ece2d7fd70a3a3a18797a99bbbc8d20", "grade": true, "grade_id": "non_duplicate_test3", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert non_duplicates(((0,0),(0,0))) == False, "Duplicates exist: (0,0)"
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "132e6ee1e338cad56c07959611b45153", "grade": false, "grade_id": "cell-f28c29e111d2c46b", "locked": true, "schema_version": 1, "solution": false}
# ### Exercise 3: get_cartesian
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "c86aea0e55feae3340e68d4c603d124d", "grade": false, "grade_id": "cell-e7d02f2639ac31d4", "locked": true, "schema_version": 1, "solution": false}
# #### Example
#
# The function <font color="blue">get_cartesian</font> does a cartesian product of an RDD with itself and returns an RDD with <b>DISTINCT</b> pairs of points.
#
# Input: An RDD containing the given list of points
#
# Output: An RDD containing The cartesian product of the RDD with itself
#
# **<font color="magenta" size=2>Example Code</font>**
# ``` python
# test_rdd = sc.parallelize([(1,0), (2,0), (3,0)])
# get_cartesian(test_rdd).collect()
# ```
#
# **<font color="blue" size=2>Example Output</font>**
# ``` python
# [((1, 0), (2, 0)), ((1, 0), (3, 0)), ((2, 0), (1, 0)), ((2, 0), (3, 0)), ((3, 0), (1, 0)), ((3, 0), (2, 0))]
# ```
#
# Refer: http://spark.apache.org/docs/latest/api/python/pyspark.html?highlight=cartesian#pyspark.RDD.cartesian
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "643ea2bdd5be5cd60076c2141f546c3e", "grade": false, "grade_id": "cell-039b50aeadf71899", "locked": true, "schema_version": 1, "solution": false}
# #### Definition
# + deletable=false hidden=true nbgrader={"checksum": "9a24d0a4d1ad28310b8d185bff55daa6", "grade": false, "grade_id": "get_cartesian", "locked": false, "schema_version": 1, "solution": true}
## Insert your answer in this cell. DO NOT CHANGE THE NAME OF THE FUNCTION.
def get_cartesian(rdd):
# YOUR CODE HERE
raise NotImplementedError()
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "82769159cfe1e095bbb9729e74ca2337", "grade": false, "grade_id": "cell-055907b27f0b6fe1", "locked": true, "schema_version": 1, "solution": false}
# #### Unit Tests
# + deletable=false editable=false hidden=true nbgrader={"checksum": "ceb67419904d5fe338ca909ac7c89b68", "grade": true, "grade_id": "get_cartesian_test1", "locked": true, "points": 2, "schema_version": 1, "solution": false}
test_rdd = sc.parallelize([(1,0), (2,0), (3,0)])
l = [((1, 0), (2, 0)), ((1, 0), (3, 0)), ((2, 0), (1, 0)), ((2, 0), (3, 0)), ((3, 0), (1, 0)), ((3, 0), (2, 0))]
assert isinstance(get_cartesian(test_rdd), RDD) == True, "Incorrect Return type: Function should return an RDD"
assert set(get_cartesian(test_rdd).collect()) == set(l), "Incorrect Return Value: Value obtained does not match"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "dc64d03fb7bf34b44d9cf6129da1359e", "grade": true, "grade_id": "get_cartesian_test2", "locked": true, "points": 3, "schema_version": 1, "solution": false}
##Hidden test cases here
# + deletable=false editable=false hidden=true nbgrader={"checksum": "d4939bafc51be2165fb3e85141ec0684", "grade": true, "grade_id": "get_cartesian_test4", "locked": true, "points": 4, "schema_version": 1, "solution": false}
##Hidden test cases here
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "c524f6aee4df0a40911dfd40bd852f37", "grade": false, "grade_id": "cell-eaec56ed0650e4d5", "locked": true, "schema_version": 1, "solution": false}
# ### Exercise 4: find_slope
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "e36f336c26e1401f436d7f3bdad3234f", "grade": false, "grade_id": "cell-0b4545887393d7f2", "locked": true, "schema_version": 1, "solution": false}
# #### Example
#
# The function <font color="blue">find_slope</font> computes slope between points A and B and returns it in the format specified below.
#
# Input: Pair (A,B) where A and B are of form (Ax, Ay) and (Bx, By) respectively.
#
# **<font color="magenta" size=2>Example Code</font>**
# ``` python
# my_input = ((1,2),(3,4))
# find_slope(my_input)
# ```
#
# Output: Pair ((A,slope), B) where A and B have the same definition as input and slope refers to the slope of the line segment connecting point A and B.
#
# **<font color="blue" size=2>Example Output</font>**
# ``` python
# (((1, 2), 1.0), (3, 4))
# ```
# <font color="brown">**Note: **</font> If Ax == Bx, use slope as "inf".
#
# <font color="red">**Hint : **</font> The above example is given just to provide the input and output format. This function is called a different way in the spark exercise.
#
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "1ca4fa51e87b7b511d25cc66c82abeb6", "grade": false, "grade_id": "cell-5f2de1e073e2055b", "locked": true, "schema_version": 1, "solution": false}
# #### Definition
# + deletable=false hidden=true nbgrader={"checksum": "18e38e5d2a856fee42f2eae55ce35ae8", "grade": false, "grade_id": "find_slope", "locked": false, "schema_version": 1, "solution": true}
## Insert your answer in this cell
def find_slope(x):
# YOUR CODE HERE
raise NotImplementedError()
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "3e167e4eb92f588cdf2938e3f1ae12bf", "grade": false, "grade_id": "cell-3d904383c1b13721", "locked": true, "schema_version": 1, "solution": false}
# #### Unit Tests
# + deletable=false editable=false hidden=true nbgrader={"checksum": "d8ae72f49d0b3166eae4a75d3bc8f10a", "grade": true, "grade_id": "find_slope1", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert type(find_slope(((1,2),(3,4)))) == tuple, "Function must return a tuple"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "4dc9713a8dd63d0ba7cb260fcdb3d08e", "grade": true, "grade_id": "find_slope2", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert find_slope(((1,2),(-7,-2)))[0][1] == 0.5, "Slope value should be 0.5"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "abf318ccd01d194400dfc7349d187a7d", "grade": true, "grade_id": "find_slope3", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert find_slope(((1,2),(3,4))) == (((1,2),1),(3,4)), "Incorrect return value: Value obtained does not match"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "9fa363f8b13eb3e1fe7e0f90682afa0c", "grade": true, "grade_id": "find_slope4", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert find_slope(((1,2),(1,5))) == (((1,2),"inf"),(1,5)), "Incorrect return value: Value obtained must have slope 'inf'"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "c22f9cf3a457ba84fcdce36f46bd7b79", "grade": true, "grade_id": "find_slope5", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert find_slope(((1,2),(2,5))) == (((1,2),3),(2,5)), "Incorrect return value: Value obtained does not match"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "c405cf6f50f24185151e5c877382ddf4", "grade": true, "grade_id": "find_slope_hidden1", "locked": true, "points": 3, "schema_version": 1, "solution": false}
##Hidden test cases here
# + deletable=false editable=false hidden=true nbgrader={"checksum": "9b955a80455a5e4463a260632dcc718a", "grade": true, "grade_id": "find_slope_hidden2", "locked": true, "points": 3, "schema_version": 1, "solution": false}
##Hidden test cases here
# + deletable=false editable=false hidden=true nbgrader={"checksum": "c5fb30b6705a032b6424b39bf81deee2", "grade": true, "grade_id": "find_slope_hidden3", "locked": true, "points": 3, "schema_version": 1, "solution": false}
##Hidden test cases here
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "473a4e95df9d605903e103bafafa0ff5", "grade": false, "grade_id": "cell-c1b7f405e6f29b5b", "locked": true, "schema_version": 1, "solution": false}
# ### Exercise 5: find_collinear
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "7077f5c7b5bb6696bc805898e52844db", "grade": false, "grade_id": "cell-914ae1e774319691", "locked": true, "schema_version": 1, "solution": false}
# #### Example
#
# The function <font color="blue">find_collinear</font> finds the set of collinear points.
#
# Input: An RDD (which is the output of the get_cartesian() function.
#
# Output: An RDD containing the list of collinear points formatted according to the <font color="blue">format_result</font> function.
#
# Approach:
# 1. Find the slope of the line between all pairs of points A = (Ax, Ay) and B = (Bx, By).
# 2. For each (A, B), find all points C = ((C1x, C1y), (C2x, C2y), ... (Cnx, Cny))
# where slope of (A,B) = slope of (A, Ci).
# 3. Return (A, B, Ck) where Ck = all points of C which satisfy the condition 1.
#
# The assert statement unit tests for this function will help you with this.
# <font color="red">**Hint : **</font> You should use the above helper functions in conjunction with Spark RDD API (refer http://spark.apache.org/docs/latest/api/python/pyspark.html?highlight=rdd#pyspark.RDD)
# Finally, use helper function format_result() appropriately from inside this function after you have implemented the above operations.
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "46068519e452e8268834897ef4f48c8d", "grade": false, "grade_id": "cell-9a31ef2dcb4560fa", "locked": true, "schema_version": 1, "solution": false}
# #### Definition
# + deletable=false hidden=true nbgrader={"checksum": "8c8bb8b22b0b52572235b7f57d655bcc", "grade": false, "grade_id": "find_collinear", "locked": false, "schema_version": 1, "solution": true}
def find_collinear(rdd):
# YOUR CODE HERE
raise NotImplementedError()
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "2073dc0d16402b142ce97179a867a9dc", "grade": false, "grade_id": "cell-ffb01083af217fe4", "locked": true, "schema_version": 1, "solution": false}
# #### Unit Tests
# + deletable=false editable=false hidden=true nbgrader={"checksum": "9433874063a97a05e1fe92a71f54cf00", "grade": false, "grade_id": "cell-adee31e44b3bf643", "locked": true, "schema_version": 1, "solution": false}
def verify_collinear_sets(collinearpointsRDD, testlist):
collinearpoints = [tuple(sorted(x)) for x in list(set(collinearpointsRDD.collect()))]
testlist = [tuple(sorted(x)) for x in list(set(testlist))]
return set(collinearpoints) == set(testlist)
# + deletable=false editable=false hidden=true nbgrader={"checksum": "a4756ea58a19fe1c2cf2d3423c5d9d1b", "grade": true, "grade_id": "find_collinear1", "locked": true, "points": 3, "schema_version": 1, "solution": false}
test_rdd = sc.parallelize([((4, 2), (2, 1)), ((4, 2), (-3, 4)), ((4, 2), (6, 3)), ((2, 1), (4, 2)), ((2, 1), (-3, 4)), ((2, 1), (6, 3)), ((-3, 4), (4, 2)), ((-3, 4), (2, 1)), ((-3, 4), (6, 3)), ((6, 3), (4, 2)), ((6, 3), (2, 1)), ((6, 3), (-3, 4))])
assert isinstance(find_collinear(test_rdd), RDD) == True, "Incorrect return type: Function must return RDD"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "a729abbaa8d514cfed9ed19460a48fd1", "grade": true, "grade_id": "find_collinear2", "locked": true, "points": 3, "schema_version": 1, "solution": false}
assert verify_collinear_sets(find_collinear(test_rdd), [((2, 1), (4, 2), (6, 3))]), "Incorrect return value: Value obtained does not match"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "2d40c0f2435a8b65112a933975fad509", "grade": true, "grade_id": "find_collinear_hidden", "locked": true, "points": 4, "schema_version": 1, "solution": false}
##Hidden test cases here
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "944edb1429e93e79c2e9d6a189961d65", "grade": false, "grade_id": "cell-fbc9a77136a57c34", "locked": true, "schema_version": 1, "solution": false}
# #### Unit Tests II : Using the output of get_cartesian(rdd)
# + deletable=false editable=false hidden=true nbgrader={"checksum": "9c6d430f0029f564f9bd81a274744f76", "grade": true, "grade_id": "find_collinear3", "locked": true, "points": 3, "schema_version": 1, "solution": false}
test_rdd = sc.parallelize([(4, -2), (2, -1), (-3,4), (6,3), (-9,4), (6, -3), (8,-4), (6,9)])
test_rdd = get_cartesian(test_rdd)
assert verify_collinear_sets(find_collinear(test_rdd), [((6, -3), (6, 3), (6, 9)), ((2, -1), (4, -2), (6, -3), (8, -4))]), "Incorrect return value: You have not implemented the find_collinear function in Python"
# + deletable=false editable=false hidden=true nbgrader={"checksum": "a5d3adf7c8e32725c8628e7a889bdcb9", "grade": true, "grade_id": "find_collinear_hidden2", "locked": true, "points": 6, "schema_version": 1, "solution": false}
##Hidden test cases here
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "20ed2f68d19f63b82e4f9e274576ddf8", "grade": false, "grade_id": "cell-a15b922949251b16", "locked": true, "schema_version": 1, "solution": false}
# ### Exercise 6: The build_collinear_set function
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "91bc5cf5330805c942498425a89f1db3", "grade": false, "grade_id": "cell-a15b922949251b15", "locked": true, "schema_version": 1, "solution": false}
# #### Example
# Using the above functions that you have written along with pyspark functions, write the **build_collinear_set** function and returns an RDD containing the set of collinear points.
#
# Input: RDD containing the given set of points
#
# Output: RDD containing the set of collinear points
#
# <font color="red">**Hint : **</font> Remember that the input RDD consists of a set of strings. Remember to pre-process them using the to_tuple function before performing other operations.
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "a2fd78cb3058dc06883309213b7439db", "grade": false, "grade_id": "cell-e65a588686d085c9", "locked": true, "schema_version": 1, "solution": false}
# #### Definition
# + deletable=false hidden=true nbgrader={"checksum": "18683d1dfea2b97144f55abf93b6aeba", "grade": false, "grade_id": "Collinear-main", "locked": false, "schema_version": 1, "solution": true}
def build_collinear_set(rdd):
# YOUR CODE HERE
raise NotImplementedError()
# Sorting each of your returned sets of collinear points. This is for grading purposes.
# YOU MUST NOT CHANGE THIS.
rdd = rdd.map(to_sorted_points)
return rdd
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "9afb511f54206dd0f33a55d971ca029c", "grade": false, "grade_id": "cell-2d08e0ec753a680b", "locked": true, "schema_version": 1, "solution": false}
# #### Unit Tests
# + deletable=false editable=false hidden=true nbgrader={"checksum": "dcd7209e7f4ddd7c36583bd6d7accff2", "grade": true, "grade_id": "Collinear-type-test", "locked": true, "points": 4, "schema_version": 1, "solution": false}
test_rdd = sc.parallelize(['4 -2', '2 -1', '-3 4', '6 3', '-9 4', '6 -3', '8 -4', '6 9'])
assert isinstance(build_collinear_set(test_rdd), RDD) == True, "build_collinear_set should return an RDD."
# + [markdown] deletable=false editable=false heading_collapsed=true nbgrader={"checksum": "96c137188b16b00df31f17b697fcefcb", "grade": false, "grade_id": "cell-cc72c2cc0fb5d36b", "locked": true, "schema_version": 1, "solution": false}
# ### The process function
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "49d80eb474bf23793b244268fe995934", "grade": false, "grade_id": "cell-4a1c492646e86a45", "locked": true, "schema_version": 1, "solution": false}
# #### Definition
# + deletable=false editable=false hidden=true nbgrader={"checksum": "5033787304edc4e1e6fa665e5781d441", "grade": false, "grade_id": "Main-function", "locked": true, "schema_version": 1, "solution": false}
def process(filename):
"""
This is the process function used for finding collinear points using inputs from different files
Input: Name of the test file
Output: Set of collinear points
"""
# Load the data file into an RDD
rdd = sc.textFile(filename)
rdd = build_collinear_set(rdd)
# Collecting the collinear points RDD in a set to remove duplicate sets of collinear points. This is for grading purposes. You may ignore this.
res = set(rdd.collect())
return res
# + [markdown] deletable=false editable=false hidden=true nbgrader={"checksum": "67c8dafd09663e5ac2ac30eb4a593995", "grade": false, "grade_id": "cell-614b27038af0cd2a", "locked": true, "schema_version": 1, "solution": false}
# #### Unit Tests: Testing the build_collinear_set function using the process function
# NOTE: You may assume that input files do not have duplicate points.
# + deletable=false editable=false hidden=true nbgrader={"checksum": "fa4bef35ce670329b42b06b4768397d5", "grade": true, "grade_id": "Collinear_soln_visible1", "locked": false, "points": 8, "schema_version": 1, "solution": false}
assert process("data.txt") == {((-2, -2), (1, 1), (2, 2), (3, 3)), ((0, 1), (3, 4), (5, 6)), ((0, -3), (0, 1), (0, 5))}, "Your implementation of build_collinear_set is not correct."
# + deletable=false editable=false hidden=true nbgrader={"checksum": "73857f5e7d394bf3184059193fb993ce", "grade": true, "grade_id": "Collinear_soln_visible2", "locked": false, "points": 8, "schema_version": 1, "solution": false}
assert process("data50.txt") == {((3, 6), (7, 4), (9, 3)), ((1, 6), (3, 6), (4, 6), (7, 6)),
((0, 2), (3, 1), (6, 0)), ((1, 0), (2, 0), (5, 0), (6, 0)),
((1, 3), (3, 6), (5, 9)), ((0, 8), (4, 6), (6, 5)),
((6, 0), (6, 1), (6, 5), (6, 9)),
((7, 2), (7, 3), (7, 4), (7, 6), (7, 8)), ((3, 1), (3, 3), (3, 6)),
((0, 2), (1, 2), (5, 2), (7, 2)), ((0, 3), (2, 5), (3, 6), (6, 9)),
((0, 2), (1, 3), (2, 4), (4, 6), (5, 7)), ((1, 2), (4, 3), (7, 4)),
((0, 3), (4, 6), (8, 9)), ((9, 3), (9, 4), (9, 5)), ((2, 5), (5, 7), (8, 9)),
((0, 5), (2, 4), (4, 3), (8, 1)), ((0, 8), (1, 6), (2, 4)),
((3, 6), (5, 2), (6, 0)), ((5, 9), (6, 9), (8, 9)),
((0, 8), (1, 8), (7, 8)), ((0, 4), (1, 3), (3, 1)), ((5, 9), (7, 6), (9, 3)),
((1, 2), (2, 4), (3, 6)), ((0, 7), (1, 5), (3, 1)),
((1, 5), (2, 4), (3, 3), (6, 0)), ((0, 2), (3, 3), (9, 5)),
((0, 7), (1, 6), (2, 5), (4, 3), (5, 2), (6, 1)),
((0, 4), (1, 5), (5, 9)), ((1, 5), (3, 6), (5, 7), (7, 8)),
((1, 6), (3, 3), (5, 0)), ((3, 6), (4, 3), (5, 0)),
((1, 2), (4, 5), (7, 8), (8, 9)), ((0, 2), (1, 1), (2, 0)),
((3, 3), (4, 5), (5, 7), (6, 9)), ((0, 2), (0, 3), (0, 4), (0, 5), (0, 7), (0, 8)),
((2, 0), (4, 3), (8, 9)), ((5, 7), (6, 5), (7, 3), (8, 1)), ((5, 0), (7, 6), (8, 9)),
((5, 0), (6, 1), (7, 2), (9, 4)), ((0, 4), (1, 2), (2, 0)),
((1, 1), (3, 1), (6, 1), (8, 1)), ((5, 7), (7, 6), (9, 5)), ((1, 1), (7, 4), (9, 5)),
((0, 4), (2, 4), (7, 4), (9, 4)), ((1, 0), (3, 1), (5, 2), (7, 3), (9, 4)),
((2, 0), (3, 3), (4, 6), (5, 9)), ((4, 3), (4, 5), (4, 6)),
((1, 0), (4, 3), (6, 5), (7, 6)), ((0, 3), (2, 4), (4, 5)),
((1, 6), (4, 5), (7, 4)), ((1, 0), (1, 1), (1, 2), (1, 3), (1, 5), (1, 6), (1, 8)),
((0, 3), (1, 3), (3, 3), (4, 3), (7, 3), (9, 3)), ((0, 4), (2, 5), (4, 6)),
((0, 7), (3, 6), (6, 5), (9, 4)), ((1, 8), (4, 6), (7, 4)),
((0, 5), (3, 3), (6, 1)), ((1, 8), (3, 6), (4, 5), (7, 2), (8, 1)),
((1, 2), (3, 1), (5, 0)), ((1, 1), (5, 2), (9, 3)),
((5, 0), (5, 2), (5, 7), (5, 9)), ((0, 5), (1, 5), (2, 5), (4, 5), (6, 5), (9, 5)),
((3, 1), (4, 5), (5, 9)), ((2, 0), (2, 4), (2, 5)), ((5, 2), (6, 5), (7, 8))}, "Your implementation of build_collinear_set is not correct."
# + deletable=false editable=false hidden=true nbgrader={"checksum": "f349a62471f5026fec0784299ef65cc5", "grade": true, "grade_id": "Collinear_soln_hidden1", "locked": false, "points": 8, "schema_version": 1, "solution": false}
##Hidden test cases here
# + deletable=false editable=false hidden=true nbgrader={"checksum": "0b07d77f4d88c302f9568ad92986a4d6", "grade": true, "grade_id": "Collinear_soln_hidden2", "locked": false, "points": 8, "schema_version": 1, "solution": false}
##Hidden test cases here
# + deletable=false editable=false hidden=true nbgrader={"checksum": "56dada29e6f9b7ebfb9c900fa26922d5", "grade": true, "grade_id": "Collinear_soln_hidden3", "locked": false, "points": 8, "schema_version": 1, "solution": false}
##Hidden test cases here
# + deletable=false editable=false hidden=true nbgrader={"checksum": "ab85358924c29e438a037d8c8291358a", "grade": true, "grade_id": "Collinear_soln_hidden4", "locked": false, "points": 8, "schema_version": 1, "solution": false}
##Hidden test cases here
| collinearPoints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import Image
# Contents
# --------
# 1. What is machine learning
# 2. Different ways of learning from data
# 3. Why probabilistic graphical models
# 4. Major types of PGMs
# ### 1. What is machine learning
# Machine learning is a scientific discipline that explores the construction and study of algorithms that can learn from data. Such algorithms operate by building a model from example inputs and using that to make predictions or decisions, rather than following strictly static program instructions.
#
# We can take an example of predicting the type of flower based on the sepal length and width of the flower. Let's say we have some data (discretized iris data set on sepal length and width). The dataset looks something like this:
# %run ../scripts/1/discretize.py
data
# ### 2. Different ways of learning from data
#
# Now let's say we want to predict the type of flower for a new given data point. There are multiple ways to solve this problem. We will consider these two ways in some detail:
#
# 1. We could find a function which can directly map an input value to it's class label.
# 2. We can find the probability distributions over the variables and then use this distribution to answer queries about the new data point.
#
# There are a lot of algorithms for finding a mapping function. For example linear regression tries to find a linear equation which explains the data. Support vector machine tries to find a plane which separates the data points. Decision Tree tries to find a set of simple greater than and less than equations to classify the data. Let's try to apply Decision Tree on this data set.
#
# We can plot the data and it looks something like this:
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# Adding a little bit of noise so that it's easier to visualize
data_with_noise = data.iloc[:, :2] + np.random.normal(loc=0, scale=0.1, size=(150, 2))
plt.scatter(data_with_noise.length, data_with_noise.width, c=['b', 'g', 'r'], s=200, alpha=0.3)
# -
# In the plot we can easily see that the blue points are concentrated on the top-left corner, green ones in bottom left and red ones in top right.
#
# Now let's try to train a Decision Tree on this data.
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data.ix[:, ['length', 'width']].values, data.type.values, test_size=0.2)
classifier = DecisionTreeClassifier(max_depth=4)
classifier.fit(X_train, y_train)
classifier.predict(X_test)
# -
classifier.score(X_test, y_test)
# So, in this case we got a classification accuracy of 56.67 %.
#
# Now moving on to our second approach using a probabilistic model.
# The most obvious way to do this classification task would be to compute a Joint Probability Distribution over all these variables and then marginalize and reduce over these according to our new data point to get the probabilities of classes.
X_train, X_test = data[:120], data[120:]
X_train
# Computing the joint probability distribution over the training data
joint_prob = data.groupby(['length', 'width', 'type']).size() / 120
joint_prob
# +
# Predicting values
# Selecting just the feature variables.
X_test_features = X_test.iloc[:, :2].values
X_test_actual_results = X_test.iloc[:, 2].values
predicted_values = []
for i in X_test_features:
predicted_values.append(np.argmax(joint_prob[i[0], i[1]]))
predicted_values = np.array(predicted_values)
predicted_values
# -
# Comparing results with the actual data.
predicted_values == X_test_actual_results
score = (predicted_values == X_test_actual_results).sum() / 30
print(score)
# ### Why Probabilistic Graphical Models
#
# In the previous example we saw how Bayesian Inference works. We construct a Joint Distribution over the data and then condition on the observed variable to compute the posterior distribution. And then we query on this posterior distribution to predict the values of new data points.
# But the problem with this method is that the Joint Probability Distribution is exponential to the number of states (cardinality) of each variable. So, for problems having a lot of features or having high cardinality of features, inference becomes a difficult task because of computational limitations. For example, for 10 random variables each having 10 states, the size of the Joint Distribution would be 10^10.
# __Proababilistic Graphical Models (PGM)__: PGM is a technique of compactly representing Joint Probability Distribution over random variables by exploiting the (conditional) independencies between the variables. PGM also provides us methods for efficiently doing inference over these joint distributions.
#
# Each graphical model is characterized by a graph structure (can be directed, undirected or both) and a set of parameters associated with each graph.
#
# The problem in the above example can be represented using a Bayesian Model (a type of graphical model) as:
Image(filename='../images/1/Iris_BN.png')
# In this case the parameters of the network would be $ P(L) $, $ P(W) $ and $ P(T | L, W) $. So, we will need to store 5 values for $ L $, 3 values for $ W $ and 45 values for $ P(T | L, W) $. So, a total of 45 + 5 + 3 = 53 values to completely parameterize the network which is actually more than 45 values which we need for $ P (T, L, W) $. But in the cases of bigger networks graphical models help in saving space. We can take the example of the student network shown below:
Image(filename='../images/1/student.png')
# Considering that $ D $ has cardinality of 2, $ I $ has cardinality of 2, $ S $ has cardinality of 2, $ G $ has cardinality of 3 and $ L $ has cardinality of 2. Also the parameters in this network would be $ P(D) $, $ P(I) $, $ P(S | I) $, $ P(G | D, I) $, $ P(L | G) $. So, the number of values needed would be $ 2 $ for $ P(D) $, $ 2 $ for $ P(I) $, $ 12 $ for $ P(G | D, I) $, $ 6 $ for $ P(L | G) $, $ 4 $ for $ P(S | I) $, total of $ 4 + 6 + 12 + 2 + 2 = 26 $ compared to $ 2 * 2 * 3 * 2 * 2 = 48 $ required for the Joint Distribution over all the variables.
# #### Types of Graphical Models
#
# There are mainly 2 types of graphical models:
# 1. Bayesian Models: A Bayesian Model consists of a directed graph and Conditional Probability Distributions(CPDs) associated with each of the node. Each CPD is of the form $ P(node | parents(node)) $ where $ parents(node) $ are the parents of the node in the graph structure.
# 2. Markov Models: A Markov Models consists of an undirected graph and are parameterized by Factors. Factors
# represent how much 2 or more variables agree with each other.
| notebooks/1. Introduction to Probabilistic Graphical Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + active=""
#
# #notes:
#
# # look at the distribution of pixels to determine which method of keypoints
# # should be used?
#
# # determine neighbors according to the meta-data, match features only among
# # neighbors
#
# # start with smaller scale image, then work up if needed, how to tell which
# # size is necessary??? - texture changes maybe
#
# # should we stack onto each image as we go... or when a new line is started
#
# # only select keypoints from the same elevation level
#
# # store keypoints, descriptors
#
# # what does keypoint, descriptor look like, how do we combine different methods
#
# #which is faster, match two, build up
# #should i be matching subsequent kps, or grabbing keypoints each time?
#
# #DO:
# multi-scale Harris interest point detector
# download feature scripts
# invariant to 1-D rotation and scaling
# Matching is then based on a distance measure between descriptor vectors.
# Orthogonal moments based on orthogonal polynomials such as Zernike moments have been shown to be invariant to some linear opera- tions, have superior reconstruction capabilities in the presence of noise, and low redundancy compared to other moment representations [57] [27] [29].
#
# Our implementation uses the first 25 (n ; 8, m > 0) coefficients in the Zernike expansion of a disk (of radius proportional to the characteristic scale) around all interest points.
#
# The set of potential matches is further reduced by examining the ratio of distances between the second best match and the best match. If this is below a given threshold (we use 1.1 since higher values tend to eliminate good matches), the match is considered of low confidence and rejected.
#
# Incremental links. This approach initially solves for the global mosaic using the over- laps of the temporal sequence. Notice this is better than simply concatenating pairwise homographies since the transform for each image is determined considering the over- lap to the next and the previous image. Given the initial layout, all new possible overlaps (links in the topology) are verified, this information is incorporated and the transforms for all images are recalculated. This process is repeated until the topology stabilizes and no new links are added. In essence, the global mosaic is created and then refined by adding constraints as new overlaps become apparent.
#
#
# affine transform (linear, 6 parameters) rather than a projective transform is solved for each image, making the matrix to be inverted at each refinement of the topology of size 6N x 6N, with N the number of images.
#
# (Top left) Image layout after initial topology estimate (using only temporal overlap information) and proposed links based on possible overlap given the topology esti- mate (top right). (Blue for verified links and green for proposed ones.). Axes correspond to mosaic coordinates (in pixels). Image layout after second (bottom left) and final (bottom right) topology refinements. (Only verified links are shown.)
#
# make synthetic survey
# define goodness parameter
# decide what method to use for making the mosaic
# make zernike/multi=scale harris
#
# # different resolutions
# # 204x153
# # 408x306
# # 816x612
#
# # quality estimates
# # correlation error
#
# # comparison -
# # autopano, pix4d (3D), ptoptimizer, ptmender
# # look at spatial relation
#
# +
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
from skimage.io import ImageCollection, imsave
from skimage.color import rgb2hsv, rgb2gray, gray2rgb
from skimage.transform import warp, SimilarityTransform, AffineTransform, ProjectiveTransform
from skimage.feature import (ORB, match_descriptors, corner_harris, corner_fast, corner_peaks, BRIEF,
plot_matches)
from skimage.measure import ransac
from numpy.random import randint
from skimage.data import imread
from skimage.measure import label
from skimage.graph import route_through_array
from sklearn.feature_extraction import image
from subprocess import Popen, PIPE
from multiprocessing import Pool, freeze_support, cpu_count
import itertools
import math
# %matplotlib inline
path_to_enblend = '/Users/jhansen/Applications/enblend-enfuse-4.0-mac/enblend'
tmp_out = '/tmp/out.png'
tmp_base = '/tmp/base.png'
tmp_img = '/tmp/img.png'
data_dir = '../data/jpg/'
img1 = imread('IMG_1765.jpg')[:,:,2]
img2 = imread('IMG_1767.jpg')[:,:,2]
# +
def hsv_imread(img_path):
return rgb2hsv(imread(img_path))
def gray_imread(img_path):
return rgb2gray(imread(img_path))
def load_images(search_dir, ftype):
search_path = os.path.join(search_dir, '*'+ftype)
files = glob(search_path)
imgs = []
for ff in files:
imgs.append({'name':os.path.split(ff)[1]})
return imgs
# -
def compare(*images, **kwargs):
"""
Function to display images side by side (from skimage example)
Parameters
----------
image0, image1, ....: ndarray
Images to display
labels: list
Labels for the different images
"""
f, ax = plt.subplots(1, len(images), **kwargs)
ax = np.array(ax, ndmin=1)
labels = kwargs.pop('labels', None)
labels = [''] * len(images)
for n, (image, label) in enumerate(zip(images, labels)):
ax[n].imshow(image, interpolation='nearest', cmap=plt.gray())
ax[n].set_title(label)
ax[n].axis('off')
plt.tight_layout()
# +
def get_best_matches(k1, k2, matches):
src = k1[matches[:,0]][:,::-1]
dst = k2[matches[:,1]][:,::-1]
# if there are not enough matches, this fails
model_robust, inliers = ransac((src, dst), AffineTransform,
min_samples=20, residual_threshold=1,
max_trials=40)
return model_robust, inliers
def develop_metadata_mosaic():
# set max pitch and roll angles as qulity
pass
# +
def plot_two_matches(img1, img2, k1, k2, matches):
fig, ax = plt.subplots(nrows=1, ncols=2)
plt.gray()
#plot_matches(ax, img1, img2, k1, k2, matches)
ax[0].imshow(img1)
ax[0].axis('off')
ax[0].scatter(k1[:, 1], k1[:, 0], facecolors='none', edgecolors='r')
ax[1].imshow(img2)
ax[1].axis('off')
ax[1].scatter(k2[:, 1], k2[:, 0], facecolors='none', edgecolors='r')
plt.show()
plt.show()
def plot_two_keypoints(img1, img2, k1, k2, s1=1, s2=1):
fig, ax = plt.subplots(nrows=1, ncols=2)
plt.gray()
ax[0].imshow(img1)
ax[0].axis('off')
ax[0].scatter(k1[:, 1], k1[:, 0], facecolors='none', edgecolors='r')
ax[1].imshow(img2)
ax[1].axis('off')
ax[1].scatter(k2[:, 1], k2[:, 0], facecolors='none', edgecolors='r')
plt.show()
# +
def generate_costs(diff_image, mask, vertical=True, gradient_cutoff=2.):
"""
Ensures equal-cost paths from edges to region of interest.
Parameters
----------
diff_image : ndarray of floats
Difference of two overlapping images.
mask : ndarray of bools
Mask representing the region of interest in ``diff_image``.
vertical : bool
Control operation orientation.
gradient_cutoff : float
Controls how far out of parallel lines can be to edges before
correction is terminated. The default (2.) is good for most cases.
Returns
-------
costs_arr : ndarray of floats
Adjusted costs array, ready for use.
"""
if vertical is not True:
return tweak_costs(diff_image.T, mask.T, vertical=vertical,
gradient_cutoff=gradient_cutoff).T
# Start with a high-cost array of 1's
costs_arr = np.ones_like(diff_image)
# Obtain extent of overlap
row, col = mask.nonzero()
cmin = col.min()
cmax = col.max()
# Label discrete regions
cslice = slice(cmin, cmax + 1)
labels = label(mask[:, cslice])
# Find distance from edge to region
upper = (labels == 0).sum(axis=0)
lower = (labels == 2).sum(axis=0)
# Reject areas of high change
ugood = np.abs(np.gradient(upper)) < gradient_cutoff
lgood = np.abs(np.gradient(lower)) < gradient_cutoff
# Give areas slightly farther from edge a cost break
costs_upper = np.ones_like(upper, dtype=np.float64)
costs_lower = np.ones_like(lower, dtype=np.float64)
costs_upper[ugood] = upper.min() / np.maximum(upper[ugood], 1)
costs_lower[lgood] = lower.min() / np.maximum(lower[lgood], 1)
# Expand from 1d back to 2d
vdist = mask.shape[0]
costs_upper = costs_upper[np.newaxis, :].repeat(vdist, axis=0)
costs_lower = costs_lower[np.newaxis, :].repeat(vdist, axis=0)
# Place these in output array
costs_arr[:, cslice] = costs_upper * (labels == 0)
costs_arr[:, cslice] += costs_lower * (labels == 2)
# Finally, place the difference image
costs_arr[mask] = diff_image[mask]
return costs_arr
# +
def calc_enblend(timg, warped):
pass
def add_alpha_channel(img, background=-1):
"""Add an alpha layer to the image.
The alpha layer is set to 1 for foreground and 0 for background.
"""
if img.ndim == 2:
img = gray2rgb(img)
return np.dstack((img, (img != background)))
# -
def add_alpha(img, mask=None):
"""
Adds a masked alpha channel to an image.
Parameters
----------
img : (M, N[, 3]) ndarray
Image data, should be rank-2 or rank-3 with RGB channels
mask : (M, N[, 3]) ndarray, optional
Mask to be applied. If None, the alpha channel is added
with full opacity assumed (1) at all locations.
"""
if mask is None:
mask = np.ones_like(img)
if img.ndim == 2:
img = gray2rgb(img)
return np.dstack((img, mask))
def simple_merge(base_warped, img_warped, base_mask, img_mask):
# Add the three images together. This could create dtype overflows!
# We know they are are floating point images after warping, so it's OK.
merged = (base_warped + img_warped)
# Track the overlap by adding the masks together
# Multiply by 1.0 for bool -> float conversion
overlap = (base_mask * 1.0 + img_mask)
# Normalize through division by `overlap` - but ensure the minimum is 1
norm = merged / np.maximum(overlap, 1)
return norm
# +
# -
def detect_and_extract(detector, img):
detector.detect_and_extract(img)
keypoints = detector.keypoints
descriptors = detector.descriptors
return keypoints, descriptors
def find_output_shape(base_img, model_robust):
r, c = base_img.shape[:2]
corners = np.array([[0,0],
[0,r],
[c,0],
[c,r]])
warped_corners = model_robust(corners)
all_corners = np.vstack((warped_corners, corners))
# The overally output shape will be max - min
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = (corner_max - corner_min)
# Ensure integer shape with np.ceil and dtype conversion
output_shape = np.ceil(output_shape[::-1]).astype(int)
return output_shape, corner_min
def find_two_matches(base_img, img, base_k, img_k, base_d, img_d, min_matches=10):
matches = match_descriptors(base_d, img_d, cross_check=True)
# * src (image to be registered): pano2
# * dst (reference image): pano1, our middle frame registration target
src = img_k[matches[:,1]][:,::-1]
dst = base_k[matches[:,0]][:,::-1]
# if there are not enough matches, this fails
if matches.shape[0] > min_matches:
model_robust, inliers = ransac((src, dst), ProjectiveTransform,
min_samples=8, residual_threshold=1,
max_trials=600)
ransac_matches = matches[inliers]
return model_robust, ransac_matches
else:
return np.zeros((0, 2)), np.zeros((0, 2))
# +
num_keypoints = 800
pano_imgs = ImageCollection('*.jpg')
img_col = load_images('../data/jpg/', 'jpg')
img_feat = {}
num_imgs = len(img_col)
min_matches = 40
# -
def remove_empty_edges(img):
def get_mask(sums):
if sum(sums) > 0:
first = sums.index(1)
last = sums[::-1].index(1)
num_ones = (len(sums)-first)-last
out = [0]*first + [1]*num_ones + [0]*last
return out
else:
return sums
#for ax in range(len(img.shape)):
axes = [0, 1]
for ax in range(2):
sums = np.sum(img, axis=axes[ax])
# make a mask of zero lines in image
sums= [bool(x) for x in sums]
empty = get_mask(list(sums))
img = np.compress(empty, img, axis=axes[ax-1])
return img
# +
"""Read SIFT and SURF feature files.
See Also
--------
http://people.cs.ubc.ca/~lowe/keypoints/
http://www.vision.ee.ethz.ch/~surf/
"""
__all__ = ['load_sift', 'load_surf']
import numpy as np
def _sift_read(f, mode='SIFT'):
"""Read SIFT or SURF features from a file.
Parameters
----------
f : string or open file
Input file generated by the feature detectors from
http://people.cs.ubc.ca/~lowe/keypoints/ or
http://www.vision.ee.ethz.ch/~surf/
Returns
-------
data : record array with fields
- row: int
row position of feature
- column: int
column position of feature
- scale: float
feature scale
- orientation: float
feature orientation
- data: array
feature values
"""
if not hasattr(f, 'readline'):
f = file(f, 'r')
if mode == 'SIFT':
nr_features, feature_len = map(int, f.readline().split())
datatype = np.dtype([('row', float), ('column', float),
('scale', float), ('orientation', float),
('data', (float, feature_len))])
else:
mode = 'SURF'
feature_len = int(f.readline()) - 1
nr_features = int(f.readline())
datatype = np.dtype([('column', float), ('row', float),
('second_moment', (float, 3)),
('sign', float), ('data', (float, feature_len))])
data = np.fromfile(f, sep=' ')
if data.size != nr_features * datatype.itemsize / np.dtype(float).itemsize:
raise IOError("Invalid %s feature file." % mode)
return data.view(datatype)
def load_sift(f):
return _sift_read(f, mode='SIFT')
def load_surf(f):
return _sift_read(f, mode='SURF')
load_sift.__doc__ = _sift_read.__doc__
load_surf.__doc__ = _sift_read.__doc__
# -
import surf
def surf_detect_and_extract(img):
ip = surf.surf(img)
#points = surf.interest_points(img, 6, 24, 1, max_points=1024)
#descs = surf.descriptors(img, points, descriptor_only=True)
print(ip.shape)
k = ip[:, :2]
d = ip[:,6:]
def rotate(y,x, a):
sa = np.sin(a)
ca = np.cos(a)
return (ca*x-sa*y, sa*x+ca*y)
return k, d, ip
# not working - descriptor is not correct
k1, d1, ip1 = surf_detect_and_extract(img1)
k2, d2, ip2 = surf_detect_and_extract(img2)
plot_two_keypoints(img1, img2, k1, k2)
f1 = surf.show_surf(img1, ip1)
f2 = surf.show_surf(img2, ip2)
plt.figure()
plt.imshow(f1)
plt.figure()
plt.imshow(f2)
# +
from skimage.feature import CENSURE
from mahotas.features import zernike_moments
from mahotas.features import surf
import timeit
import time
# -
import skimage.data as data
def z(img1, img2):
br = zernike()
#k1 = corner_peaks(corner_harris(img1, method='eps', eps=.001, sigma=3), min_distance=5)
k1 = corner_peaks(corner_fast(img1, n=4, threshold=.001), min_distance=5)
br.extract(img1, k1)
d1 = br.descriptors
k1 = k1[br.mask]
#k2 = corner_peaks(corner_harris(img2, method='eps', eps=.001, sigma=3), min_distance=5)
k2 = corner_fast(img2, n=4, threshold=.001)
br = zernike()
br.extract(img2, k2)
d2 = br.descriptors
k2 = k2[br.mask]
matches = match_descriptors(d1, d2, cross_check=True)
model_robust, ransac_matches = find_two_matches(img1, img2,
k1, k2,
d1, d2)
#fig, ax = plt.subplots(nrows=1, ncols=1)
#plt.gray()
#plot_matches(ax, img1, img2, k1, k2, ransac_matches)
prec = round(ransac_matches.shape[0]/float(matches.shape[0]), 2)
#print('zern keypoints', k1.shape[0], k2.shape[0], 'matches', matches.shape[0],ransac_matches.shape[0], prec )
return prec
def o(img1, img2):
# fast_threshold - decide whether pixels are brighter or darker,
# decrease for more corners
# harris_k smaller for detection of sharp corners
orb = ORB(n_keypoints=600, fast_n=5,
fast_threshold=0.02,
harris_k=.01, n_scales=10)
# Zernike moments are not a texture feature, but rather a global measure of how the mass is distributed.
#k1, d1 = detect_and_extract(orb, img1)
#k2, d2 = detect_and_extract(orb, img2)
#matches = match_descriptors(d1, d2, cross_check=True)
#model_robust, ransac_matches = find_two_matches(img1, img2,
# k1, k2,
# d1, d2)
#prec = round(ransac_matches.shape[0]/float(matches.shape[0]), 2)
#print('orb keypoints', k1.shape[0], k2.shape[0], 'matches', matches.shape[0], ransac_matches.shape[0], prec )
#fig, ax = plt.subplots(nrows=1, ncols=1)
#plt.gray()
#plot_matches(ax, img1, img2, k1, k2, ransac_matches)
#return prec
a = 10
st = time.time()
for i in range(a):
z(img1, img2)
print('took', (time.time()-st)/float(a))
def z(img1):
br = zernike()
#k1 = corner_peaks(corner_harris(img1, method='eps', eps=.001, sigma=3), min_distance=5)
k1 = corner_peaks(corner_fast(img1, n=4, threshold=.001), min_distance=5)
br.extract(img1, k1)
d1 = br.descriptors
kk = k1[br.mask]
print(k1.shape, kk.shape, d1.shape)
z(img1)
# +
tform = AffineTransform(scale=(1.2,1.2),translation=(0,-100))
img3 = warp(img2, tform)
img4 = rotate(img2, 25)
img5 = rotate(img3, 25)
poh, pohw, pohr, pohwr = 0,0,0,0
zoh, zohw, zohr, zohwr = 0,0,0,0
a = 1
for x in range(a):
poh += o(img1, img2)
pohw += o(img1, img3)
pohr += o(img1, img4)
pohwr += o(img1, img5)
for x in range(a):
zoh += z(img1, img2)
zohw += z(img1, img3)
zohr += z(img1, img4)
zohwr += z(img1, img5)
oa = [poh/float(a), pohw/float(a), pohr/float(a), pohwr/float(a)]
za = [zoh/float(a), zohw/float(a), zohr/float(a), zohwr/float(a)]
#img1 = imread('IMG_1765.jpg')[:,:,2]
#img2 = imread('IMG_1767.jpg')[:,:,2]
img1 = imread('IMG_1771.jpg')[:,:,2]
img2 = imread('IMG_1772.jpg')[:,:,2]
tform = AffineTransform(scale=(1.2,1.2),translation=(0,-100))
img3 = warp(img2, tform)
img4 = rotate(img2, 25)
img5 = rotate(img3, 25)
poh, pohw, pohr, pohwr = 0,0,0,0
zoh, zohw, zohr, zohwr = 0,0,0,0
for x in range(a):
poh += o(img1, img2)
pohw += o(img1, img3)
pohr += o(img1, img4)
pohwr += o(img1, img5)
for x in range(a):
zoh += z(img1, img2)
zohw += z(img1, img3)
zohr += z(img1, img4)
zohwr += z(img1, img5)
eoa = [poh/float(a), pohw/float(a), pohr/float(a), pohwr/float(a)]
eza = [zoh/float(a), zohw/float(a), zohr/float(a), zohwr/float(a)]
# -
#ORB Easy Images 63.2 51.3 0.567 37.3
#Zernike Easy Images 69.6 59.5 10.6 15.6
#ORB Hard Images 42.5 30.8 47.2 31.4
#Zernike Hard Images 57.3 37.5 7.5 14.3
import pandas as pd
rows=['ORB Easy Images', 'Zernike Easy Images', 'ORB Hard Images', 'Zernike Hard Images', ]
cols= ["Sample Image", "Scaled", "Rotated", "Scaled and Rotated"]
d = np.array([oa, za, eoa, eza])
p = pd.DataFrame(d, index=rows, columns=cols)
p
p.T.plot()
def _zern_loop(image, descriptors, keypoints, pos0, pos1):
for p in range(pos0.shape[0]):
pr0 = pos0[p, 0]
pc0 = pos0[p, 1]
pr1 = pos1[p, 0]
pc1 = pos1[p, 1]
for k in range(keypoints.shape[0]):
kr = keypoints[k, 0]
kc = keypoints[k, 1]
if image[kr + pr0, kc + pc0] < image[kr + pr1, kc + pc1]:
descriptors[k, p] = True
from skimage.feature.util import _mask_border_keypoints, DescriptorExtractor
class zernike(DescriptorExtractor):
def __init__(self, descriptor_size=256, patch_size=49,
sigma=1, sample_seed=1):
self.descriptor_size = descriptor_size
self.patch_size = patch_size
self.sigma = sigma
self.sample_seed = sample_seed
self.descriptors = None
self.mask = None
def extract(self, image, keypoints):
patch_size = self.patch_size
desc_size = self.descriptor_size
random = np.random.RandomState()
random.seed(self.sample_seed)
samples = (patch_size / 5.0) * random.randn(desc_size * 8)
samples = np.array(samples, dtype=np.int32)
samples = samples[(samples < (patch_size // 2))
& (samples > - (patch_size - 2) // 2)]
pos1 = samples[:desc_size * 2].reshape(desc_size, 2)
pos2 = samples[desc_size * 2:desc_size * 4].reshape(desc_size, 2)
pos1 = np.ascontiguousarray(pos1)
pos2 = np.ascontiguousarray(pos2)
self.mask = _mask_border_keypoints(image.shape, keypoints,
patch_size // 2)
keypoints = np.array(keypoints[self.mask, :], dtype=np.intp,
order='C', copy=False)
self.descriptors = np.zeros((keypoints.shape[0], desc_size),
dtype=bool, order='C')
_zern_loop(image, self.descriptors.view(np.uint8), keypoints,
pos1, pos2)
# + active=""
# One caveat to look out for when utilizing Zernike moments for shape description
# is the scaling and translation of the object in the image. Depending on where
# the image is translated in the image, your Zernike moments will be drastically different.
# -
def find_all_matches(unmatched,
matched=[],
fail_limit=3,
num_keypoints=800,
min_to_match=20,
do_plot=False):
num_unmatched = len(unmatched)
if num_unmatched == 0:
print("NONE left unmatched")
return matched
if num_unmatched == 1:
print("ONE left unmatched")
matched.append(unmatched[0])
return matched
print("=====================================")
base = unmatched[0]
if 'img' in base.keys():
base_img = base['img']
else:
base_img = imread(os.path.join(data_dir, base['name']))
orb = ORB(n_keypoints=num_keypoints, fast_threshold=0.05)
base_unmatched = []
base_name = base['name'].split('.')[0]
base_matched = 0
# go through each image that is not yet matched
for xx, timg in enumerate(unmatched[1:]):
# if the image has not yet been loaded, load it now
if 'img' not in timg.keys():
timg['img'] = imread(os.path.join(data_dir, timg['name']))
# for now, only use the 3rd channel
img = timg['img'][:,:,2]
base_k, base_d = detect_and_extract(orb, base_img[:,:,2])
# if we haven't recorded the keypoints for this image, get them now
if 'keypoints' in timg.keys():
img_k = timg['keypoints']
img_d = timg['descriptors']
else:
img_k, img_d = detect_and_extract(orb, img)
timg['keypoints'] = img_k
timg['descriptors'] = img_d
matches = match_descriptors(base_d, img_d, cross_check=True)
model_robust, ransac_matches = find_two_matches(base_img[:,:,2], img,
base_k, img_k,
base_d, img_d)
if do_plot:
print('matches', matches.shape[0], ransac_matches.shape[0])
#plot_two_keypoints(ax, base_img, img, base_k, img_k)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15,12))
plt.title('%s #### %s' %(base_name, timg['name']))
plt.gray()
ax[0].imshow(base_img)
ax[0].axis('off')
ax[0].scatter(base_k[:, 1], base_k[:, 0], facecolors='none', edgecolors='r')
ax[1].imshow(img)
ax[1].axis('off')
ax[1].scatter(img_k[:, 1], img_k[:, 0], facecolors='none', edgecolors='r')
plt.show()
if ransac_matches.shape[0] < min_to_match:
print("------------", matches.shape[0], "ransac", ransac_matches.shape[0])
base_unmatched.append(timg)
if len(base_unmatched) >= fail_limit:
# add two since we've already added this timg
base_unmatched.extend(unmatched[xx+2:])
break
else:
#print('ransac matches', ransac_matches.shape)
base_img = find_mask(base_img, timg['img'], model_robust)
base_img = img
base_matched += 1
print("***********", base_matched)
base_name+= '_' + timg['name'].split('.')[0]
# AFTER an image has been matched, remove from memory
# if we were able to match some images to this base_img that
# were not matched in the last run, call again until
# the number of unmatched images stops decreasing
#print('num previous unmatched', num_unmatched)
#print("could not match %s out of %s imgs" %(len(base_unmatched),
# len(unmatched)-1))
# not_matched must be > 0
# the new number of matches must be less than last time's not matched
if 'base_matched' in base.keys():
all_base_matched = base['base_matched'] + base_matched
else:
all_base_matched = 1 + base_matched
rr = {'name':base_name, 'img':base_img, 'base_matched':base_matched}
if (len(base_unmatched)) > 0:
#if base_matched > 0 :
#base_unmatched.insert(0, rr)
#print("!!!!!!!!!!! 1 match %s, unmatch %s" %(len(matched), len(base_unmatched)))
#return find_all_matches(base_unmatched, matched, num_keypoints, min_to_match)
#else:
print("DECLARING %s as matched, ending with %s matches" %(base_name, base_matched))
matched.append(rr)
#print("!!!!!!!!!!! 2 match %s, unmatch %s" %(len(matched), len(base_unmatched)))
return find_all_matches(base_unmatched, matched, fail_limit, num_keypoints, min_to_match, do_plot)
else:
matched.append(rr)
#print("!!!!!!!!!!! 3 match %s, unmatch %s" %(len(matched), len(base_unmatched)))
return matched
# +
def minimum_cost_merge(base_warped, img_warped, base_mask, img_mask):
# Start with the absolute value of the difference image.
# np.abs is necessary because we don't want negative costs!
costs = generate_costs(np.abs(img_warped - base_warped),
img_mask & base_mask)
costs[0, :] = 0
costs[-1, :] = 0
output_shape = base_warped.shape
# Arguments are:
# cost array
# start pt
# end pt
# can it traverse diagonally
ymax = output_shape[1] - 1
xmax = output_shape[0] - 1
# Start anywhere along the top and bottom, left of center.
mask_pts01 = [[0, ymax // 3],
[xmax, ymax // 3]]
# Start anywhere along the top and bottom, right of center.
mask_pts12 = [[0, 2*ymax // 3],
[xmax, 2*ymax // 3]]
pts, _ = route_through_array(costs, mask_pts01[0], mask_pts01[1], fully_connected=True)
# Convert list of lists to 2d coordinate array for easier indexing
pts = np.array(pts)
# Start with an array of zeros and place the path
_img_mask = np.zeros_like(img_warped, dtype=np.uint8)
_img_mask[pts[:, 0], pts[:, 1]] = 1
# Labeling starts with zero at point (0, 0)
_img_mask[label(_img_mask, connectivity=1) == 0] = 1
_base_mask = ~(_img_mask).astype(bool)
base_color = gray2rgb(base_warped)
img_color = gray2rgb(img_warped)
base_final = add_alpha(base_warped, _base_mask)
img_final = add_alpha(img_warped, _img_mask)
# Start with empty image
base_combined = np.zeros_like(base_warped)
base_combined += base_warped * _base_mask
base_combined += img_warped * _img_mask
return base_combined
# -
range(0, 100, 8)
def patchmaker(img, imsize=(255,255), percent_overlap=10):
"""
Split an image into overlapping patches
Parameters
----------
img : ndarray
Image from which to extract patches
imsize : tuple of ints
size of patches
percent_overlap : int
Percent as int of overlap desired between overlapping images
Returns
-------
patches : list of imsize overlapping segments of the image
"""
# store the patches here
patches = []
patch_rows = imsize[0]
patch_cols = imsize[1]
if 0 < percent_overlap < 100:
# determine how many pixels to overlap
non_overlap_rows = int(patch_rows*.01*(100-percent_overlap))
non_overlap_cols = int(patch_cols*.01*(100-percent_overlap))
else:
non_overlap_rows = patch_rows
non_overlap_cols = patch_cols
# row indexes into the original image
r1, c1 = 0,0
# column indexes into the original image
r2, c2 = imsize
# while the last index of the patch image is less than the size of the original, keep going
while r2 < img.shape[0]:
c1 = 0
c2 = c1 + patch_cols
while c2 < img.shape[1]:
patch = img[r1:r2, c1:c2]
patches.append(patch)
c1 += non_overlap_rows
c2 = c1 + patch_cols
r1 += non_overlap_rows
r2 = r1 + patch_rows
return patches
# +
def find_mask(base_img, img, model_robust):
# what type of interpolation
# 0: nearest-neighbor
# 1: bi-linear
warp_order = 1
output_shape, corner_min = find_output_shape(base_img, model_robust)
#print("output_shape", output_shape, corner_min)
#print(model_robust.scale, model_robust.translation, model_robust.rotation)
# This in-plane offset is the only necessary transformation for the base image
offset = SimilarityTransform(translation= -corner_min)
base_warped = warp(base_img[:,:,2], offset.inverse, order=warp_order,
output_shape = output_shape, cval=-1)
base_color = warp(base_img, offset.inverse, order=warp_order,
output_shape = output_shape, cval=-1)
# warp image corners to new position in mosaic
transform = (model_robust + offset).inverse
img_warped = warp(img[:,:,2], transform, order=warp_order,
output_shape=output_shape, cval=-1)
img_color = warp(img, transform, order=warp_order,
output_shape=output_shape, cval=-1)
base_mask = (base_warped != -1)
base_warped[~base_mask] = 0
img_mask = (img_warped != -1)
img_warped[~img_mask] = 0
#convert to rgb
#base_alpha = add_alpha(base_color, base_mask)
img_alpha = np.dstack((img_color, img_mask))
base_alpha = np.dstack((base_color, base_mask))
plt.imsave(tmp_base, base_alpha )
plt.imsave(tmp_img, img_alpha )
cmd = [path_to_enblend, tmp_base, tmp_img, '-o', tmp_out]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
# remove alpha channel
if os.path.exists(tmp_out):
out = imread(tmp_out)[:,:,:3]
else:
print("couldnt find out image")
print(rc, output, err)
plt.figure()
plt.imshow(base_alpha)
plt.figure()
plt.imshow(img_alpha)
plt.show()
out = base_alpha[:,:,:3]
#if you don't have enblend, you can use one of these
#merged_img = simple_merge(base_warped, img_warped, base_mask, img_mask)
#merged_img = minimum_cost_merge(base_warped, img_warped, base_mask, img_mask)
#merged_edges = remove_empty_edges(merged_img)
return out
# -
def find_alpha(base_img, img, model_robust):
# what type of interpolation
# 0: nearest-neighbor
# 1: bi-linear
warp_order = 1
output_shape, corner_min = find_output_shape(base_img, model_robust)
#print("output_shape", output_shape, corner_min)
#print(model_robust.scale, model_robust.translation, model_robust.rotation)
# This in-plane offset is the only necessary transformation for the base image
offset = SimilarityTransform(translation= -corner_min)
base_warped = warp(base_img[:,:,2], offset.inverse, order=warp_order,
output_shape = output_shape, cval=-1)
base_color = warp(base_img, offset.inverse, order=warp_order,
output_shape = output_shape, cval=-1)
# warp image corners to new position in mosaic
transform = (model_robust + offset).inverse
#img_warped = warp(img[:,:,2], transform, order=warp_order,
# output_shape=output_shape, cval=-1)
img_color = warp(img, transform, order=warp_order,
output_shape=output_shape, cval=-1)
#base_mask = (base_warped != -1)
#base_warped[~base_mask] = 0
img_mask = (img_warped != -1)
#img_warped[~img_mask] = 0
#convert to rgb
#base_alpha = add_alpha(base_color, base_mask)
img_alpha = np.dstack((img_color, img_mask))
#base_alpha = np.dstack((base_color, base_mask))
#plt.imsave(tmp_base, base_alpha )
#plt.imsave(tmp_img, img_alpha )
#cmd = [path_to_enblend, tmp_base, tmp_img, '-o', tmp_out]
#p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
#output, err = p.communicate(b"input data that is passed to subprocess' stdin")
#rc = p.returncode
# remove alpha channel
#if os.path.exists(tmp_out):
# out = imread(tmp_out)[:,:,:3]
#else:
# print("couldnt find out image")
# print(rc, output, err)
# plt.figure()
# plt.imshow(base_alpha)
# plt.figure()#
# plt.imshow(img_alpha)
# plt.show()
# out = base_alpha[:,:,:3]
#if you don't have enblend, you can use one of these
#merged_img = simple_merge(base_warped, img_warped, base_mask, img_mask)
#merged_img = minimum_cost_merge(base_warped, img_warped, base_mask, img_mask)
#merged_edges = remove_empty_edges(merged_img)
return tmp_alpha
# +
# imsave?
# -
#patches = patchmaker(img_col[0])
if 0:
unmatched = img_col
params = [[1, 500, 50], [2, 800, 20], [1000, 1000, 10], [1000, 10000, 7]]
for param in params:
matched = find_all_matches(unmatched, [], param[0], param[1], param[2], False)
print('found %s matches with' %len(matched), param)
print("HAVE %s MATCHES" %len(matched))
if 0:
for xx, timg in enumerate(matched):
plt.figure(figsize=(14,14))
plt.title("NUM %s NAME %s" %(xx, timg['name']))
plt.imshow(timg['img'])
plt.show()
# +
def split_find_all_matches(i):
"""Convert list to arguments for find_all_matches to work with multiprocessing
pool"""
return find_all_matches(*i)
#all_matched = find_all_matches(unmatched, [], param[0], param[1], param[2], False)
#pool = Pool(processes=cpu_count())
#split_unmatched = [img_col[:6]]
#params = [[], 1, 500, 50]
#all_matched = pool.map(split_find_all_matches,
# itertools.izip(split_unmatched,
# itertools.repeat(params)))
# -
print("HE")
# +
# itertools.izip?
# -
x = [4, 5, 6]
a = {'stuff':2}
b = {'333':2}
z = [a, b]
x.extend(z)
x
| iceview/mosaic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2.7 (conda)
# language: python
# name: python27conda
# ---
# # Plotting Gallery
import pandas as pd
fm = pd.read_csv('../../../fm.csv', index_col='trip_log_id')
# **Pie Charts**
#
# A *pie chart* does a good job of showing the distribution of different categorical variables. The `piechart` function makes it easy to see what a variety of parameters would look like:
#
# 
#
# We can play with the sliders to find the image that looks just right. The *Merge Slider* make an "Other" category by combining together infrequent values. The *Drop Slider* in a sense does the opposite: it removes high frequency values from the total. In this example, that's a reasonable thing to do since we don't want to include `JFK` or `LGA` in flights to NYC.
#
# Once we've settled on a parameter set, we can make a static plot by setting `static=True` with the parameters we like.
# +
from henchman.plotting import show
import henchman.plotting as hplot
from bokeh.palettes import Category20b_20
show(hplot.piechart(fm['flights.dest'],
drop_n=2, mergepast=10),
title='Destination Airport for flights from NYC', static=True, width=620, colors=Category20b_20)
# +
from henchman.plotting import show
from henchman.plotting import gridplot
import henchman.plotting as hplot
p1 = show(hplot.piechart(fm['flights.dest'],
drop_n=0, mergepast=10),
title='Destination Airport for flights from NYC', static=True, width=310, height=325, fig=True)
p2 = show(hplot.piechart(fm['flights.carrier'],
sort=False, mergepast=None),
title='Airline percentages for flights to NYC', static=True, width=310, height=325, fig=True)
gridplot([p1, p2], n_cols=2)
# -
# **Histograms**
#
# A histogram is a way of showing the distribution of a numeric variable. Histograms can be tricky: small changes to input parameters can dramatically change what the final graph will look like. In particular, the *number of bins* (`n_bins`) and the excluded values (`col_max` and `col_min`) change the height of particular bars! To sidestep this, we have an interactive method for histograms as well
#
# 
#
# Once we've settled on parameters, we can once again set `static=True` to get a static plot that looks just how we would like.
# +
from henchman.plotting import gridplot
import henchman.plotting as hplot
p1 = show(hplot.histogram(fm['flights.MEAN(trip_logs.arr_delay)'],
n_bins=50),
title='Histogram of average flight delay', height=320, width=310, static=True, fig=True)
p2 = show(hplot.histogram(fm['flights.MEAN(trip_logs.arr_delay)'],
n_bins=50, col_max=120),
title='Histogram of average flight delay with col_max', height=320, width=310, static=True, fig=True)
p3 = show(hplot.histogram(
fm['flights.MEAN(trip_logs.arr_delay)'],
fm['label'],
n_bins=50,
col_max=150,
normalized=False),
title='Actual delays overlayed over historical delays (real)', height=160, width=620, static=True, fig=True)
p4 = show(hplot.histogram(
fm['flights.MEAN(trip_logs.arr_delay)'],
fm['label'],
n_bins=50,
col_max=150,
normalized=True),
title='Actual delays overlayed over historical delays (normalized)', height=160, width=620, static=True, fig=True)
gridplot([[p1, p2], [p3], [p4]])
# -
# **Bivariate Plots**
# The scatter plots provide an opportunity to explore data in more dimensions. You provide a minimum of two columns to use as the `x` and `y` axes. You can also give a `agg` column to aggregate by and a `label` column which you would like listed in the hover tooltip.
#
# In this example, we want to look at how distance and delay are related to carrier. We can look at the maximum delay and distance per carrier and the average delay and distance to get a good idea of how that carrier is operating out of NYC.
#
# 
#
from henchman.plotting import show
import henchman.plotting as hplot
show(hplot.scatter(fm['distance'], fm['flights.MEAN(trip_logs.arr_delay)'], cat=fm['flights.carrier'],
label=fm['label'], aggregate='mean'),
y_axis='Mean Delay by Carrier', x_axis='Mean Distance by Carrier',
title='Average Distance and Delay by Carrier', static=True)
# When we have a time column, we can see how different numeric columns change over time. In this example, we look at how the `label` changes over time. Here, we can pin down the date of a particular winter storm where 89% of flights on January 6 2017 were delayed
#
# 
#
show(hplot.timeseries(fm['time'], fm['label'], n_bins=60, col_max='2017-02-01'),
title='Mean labels per day', height=300, width=630, png=True, colors=['#990000', 'white'])
| docs/_source/plotting_gallery.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Datafaucet
#
# Datafaucet is a productivity framework for ETL, ML application. Simplifying some of the common activities which are typical in Data pipeline such as project scaffolding, data ingesting, start schema generation, forecasting etc.
# ## Data Engine
# ### Starting the engine
#
# Super simple, yet flexible :)
import datafaucet as dfc
#start the engine
engine = dfc.engine('spark')
#you can also use directlt the specific engine class
engine = dfc.SparkEngine()
# Loading and saving data resources is an operation performed by the engine. The engine configuration can be passed straight as parameters in the engine call, or configured in metadata yaml files.
# ### Engine Context
#
# You can access the underlying engine by referring to the engine.context. In particular for the spark engine the context can be accessed with the next example code:
spark = dfc.context()
spark
# +
# create a dataframe with two columns, named resp. 'a' and 'b'
df = spark.createDataFrame([('yes',1),('no',0)], ('a', 'b'))
df.show()
# -
# ### Engine configuration
engine = dfc.engine()
engine.conf
engine.env
# For the full configuration, please uncomment and execute the following statement
engine.info
# ### Submitting engine parameters during engine initalization
#
# Submit master, configuration parameters and services as engine params
import datafaucet as dfc
dfc.engine('spark', master='spark://spark-master:7077', services='postgres')
dfc.engine().conf
| demos/tutorial/demo/engine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#imports
from datasets import load_dataset
from thai2transformers.metrics import classification_metrics
from pythainlp.ulmfit import process_thai
import pandas as pd
# +
#parameters
class Args:
dataset_name_or_path = 'wisesight_sentiment'
feature_col = 'texts'
label_col = 'category'
metric_for_best_model = 'f1_micro'
seed = 1412
args = Args()
# -
dataset = load_dataset(args.dataset_name_or_path)
dataset
if args.dataset_name_or_path == 'wongnai_reviews':
train_val_split = dataset['train'].train_test_split(test_size=0.1, shuffle=True, seed=2020)
dataset['train'] = train_val_split['train']
dataset['validation'] = train_val_split['test']
dataset
# +
#nbsvm class
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.linear_model import LogisticRegression
from scipy import sparse
class NbSvmClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, penalty='l2', C=1.0, dual=False, seed=1412):
self.penalty = penalty
self.C = C
self.dual = dual
self.seed = seed
def predict(self, x):
# Verify that model has been fit
check_is_fitted(self, ['_r', '_clf'])
return self._clf.predict(x.multiply(self._r))
def predict_proba(self, x):
# Verify that model has been fit
check_is_fitted(self, ['_r', '_clf'])
return self._clf.predict_proba(x.multiply(self._r))
def fit(self, x, y):
# Check that X and y have correct shape
y = y.toarray().ravel() if type(y)!=np.ndarray else y.ravel()
x, y = check_X_y(x, y, accept_sparse=True)
def pr(x, y_i, y):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
self._r = sparse.csr_matrix(np.log(pr(x,1,y) / pr(x,0,y)))
x_nb = x.multiply(self._r)
self._clf = LogisticRegression(penalty = self.penalty,
C=self.C,
dual=self.dual,
solver='liblinear',
random_state=self.seed,).fit(x_nb, y)
return self
# -
if args.dataset_name_or_path == 'generated_reviews_enth':
texts_train = [i['th'] for i in dataset['train'][args.feature_col]]
texts_valid = [i['th'] for i in dataset['validation'][args.feature_col]]
texts_test = [i['th'] for i in dataset['test'][args.feature_col]]
else:
texts_train = dataset['train'][args.feature_col]
texts_valid = dataset['validation'][args.feature_col]
texts_test = dataset['test'][args.feature_col]
# +
#x
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(ngram_range=(1,2), tokenizer=process_thai,
min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1,
smooth_idf=1, sublinear_tf=1 )
x_train = tfidf.fit_transform(texts_train)
x_valid = tfidf.transform(texts_valid)
x_test = tfidf.transform(texts_test)
x_train,x_valid,x_test
# +
#y
if args.dataset_name_or_path == 'generated_reviews_enth' and args.label_col=='review_star':
labels_train = [i-1 for i in dataset['train'][args.label_col]]
labels_valid = [i-1 for i in dataset['validation'][args.label_col]]
labels_test = [i-1 for i in dataset['test'][args.label_col]]
else:
labels_train = dataset['train'][args.label_col]
labels_valid = dataset['validation'][args.label_col]
labels_test = dataset['test'][args.label_col]
from sklearn.preprocessing import OneHotEncoder
import numpy as np
enc = OneHotEncoder(handle_unknown='ignore')
y_train = enc.fit_transform(np.array(labels_train)[:,None])
y_valid = enc.transform(np.array(labels_valid)[:,None])
y_test = enc.transform(np.array(labels_test)[:,None])
y_train,y_valid,y_test
# +
#validation
from sklearn.metrics import f1_score, accuracy_score
def validation_f1(penalty, C, seed):
probs = np.zeros((x_valid.shape[0], y_valid.shape[1]))
for i in range(len(enc.categories_[0])):
if penalty == 'l1':
model = NbSvmClassifier(penalty='l1',
C=C,
dual=False,
seed=seed).fit(x_train, y_train[:,i])
else:
model = NbSvmClassifier(penalty='l2',
C=C,
dual=True,
seed=seed).fit(x_train, y_train[:,i])
probs[:,i] = model.predict_proba(x_valid)[:,1]
preds = probs.argmax(1)
return f1_score(labels_valid, preds, average='micro')
# -
hyperparams = []
for p in ['l1','l2']:
for c in range(1,5):
hyp = {'dataset':args.dataset_name_or_path,
'penalty':p,
'C':c,
'f1_micro':validation_f1(p,c,seed=args.seed)}
hyp['dual'] = True if p=='l2' else False
hyperparams.append(hyp)
hyperparams_df = pd.DataFrame(hyperparams).sort_values('f1_micro',ascending=False).reset_index(drop=True)
best_hyperparams = hyperparams_df.drop(['f1_micro','dataset'],1).iloc[0,:].to_dict()
hyperparams_df
# +
#test
probs = np.zeros((x_test.shape[0], y_test.shape[1]))
for i in range(len(enc.categories_[0])):
model = NbSvmClassifier(**best_hyperparams).fit(x_train, y_train[:,i])
probs[:,i] = model.predict_proba(x_test)[:,1]
class Preds:
label_ids = labels_test
predictions = probs
pd.DataFrame.from_dict(classification_metrics(Preds),orient='index').transpose()
# -
from collections import Counter
Counter(labels_test)
| notebooks/train_sequence_multiclass_nbsvm.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.5.0-dev
# language: julia
# name: julia-0.5
# ---
# # Mathematics Problem Solving using Computation
#
# We will talk about a simple problem in number theory, related to digit sums, and how we can use the computer to help solve this problem. This notebook is known to be compatible with Julia 0.5. There are no prerequisites to understanding the analysis used, but some prior reading on [digit sums](https://en.wikipedia.org/wiki/Digit_sum) may help. For the proof, some properties of digit sums proven in the last notebook are also used.
#
# The problem is at follows: For any positive integer $n$, show that either $n=k+\operatorname{s}(k)$ for some positive integer $k$, or $n+1=k+\operatorname{s}(k)$ for some positive integer $k$. Here $\operatorname{s}(k)$ denotes the sum of the digits of $k$, when expressed in base $10$.
#
# This problem is not too hard to prove, but we will show a method of obtaining a proof quickly and easily through computer simulation. We want to show that all natural numbers belong to the set $S=\{k+\operatorname{s}(k)\mid k\in\mathbb{Z}^+\}
# \cup \{k+\operatorname{s}(k)-1\mid k\in\mathbb{Z}^+\}$; that is, if $n$ cannot itself be expressed as $k+\operatorname{s}(k)$, then $n+1$ can. It is easy to verify that this is indeed the problem we are trying to solve.
#
# To solve this problem, we will first look at the behaviour of elements in the set $Q = \{k+\operatorname{s}(k)\mid k\in\mathbb{Z}^+\}$. Let's compute some elements first, say all elements of this set less than $200$. We can easily see that it suffices to consider $k$ between $1$ and $200$.
ksk(k) = k + sum(digits(k))
Q = filter(n -> n ≤ 200, sort(unique([ksk(k) for k in 1:200])))
# Interesting. What if we plot the results?
using Gadfly
plot(x=1:length(Q), y=Q, Geom.line)
# What we've seen so far is a straight line alternating between slopes $1$ and $2$. This is strong evidence towards our conclusion. But to understand more, perhaps it will help to plot the function $\operatorname{ksk}(k) = k + \operatorname{s}(k)$ directly.
plot(x=1:150, y=map(ksk, 1:150))
# What we have discovered is no revolution. Indeed, it's a fairly simple result. If $k$ does not end in $9$, then $\operatorname{s}(k+1)=\operatorname{s}(k)+1$, so we have $\operatorname{ksk}(k+1) = \operatorname{ksk}(k) + 2$ for all $k$ not ending in $9$. And if $k$ does end in $9$, then we know $\operatorname{s}(k+1)<\operatorname{s}(k)$ (in fact, it's not hard to show that the digit sum must decrease by a number of the form $9\ell+8$).
#
# At this point we (with only $5$ lines of code) have seen an approach to solving this problem. All possible values for $n$ will intersect at least one of the ascending lines with slope $2$, and therefore at least one of $n$ and $n+1$ will be in the set $Q$, and therefore $n\in S$, and that's what we wanted to prove. To close off, I'll present a more rigourous wording for the proof. As an exercise, generalize the proof to other bases.
# ## Theorem (KSK for Base 10)
#
# Let $\operatorname{s}(k)$ denotes the sum of the digits of $k$, when expressed in base $10$. Then
#
# \begin{equation}
# \left\{k + \operatorname{s}(k) \mid k\in\mathbb{Z}^+\right\} \cup
# \left\{k + \operatorname{s}(k) - 1 \mid k\in\mathbb{Z}^+\right\} = \mathbb{Z}^+
# \end{equation}
#
# ### Proof
#
# Let $n$ be an arbitrary natural number, with $n>1$ (when $n=1$, the statement is very easy to prove). Consider the sequence of numbers ${(T_n)}_{n=1}^\infty$ defined as follows: \begin{equation}
# T_n = \begin{cases}
# \frac{n+1}{2} + \operatorname{s}\left(\frac{n+1}{2}\right) - 1 & \text{if }n\text{ odd} \\
# \frac{n}{2} + \operatorname{s}\left(\frac{n}{2}\right) & \text{if }n\text{ even}
# \end{cases}
# \end{equation}
#
# In other words, the sequence ${(T_n)}_{n=1}^\infty$ is as follows: \begin{equation}
# 1 + \operatorname{s}(1) - 1, 1 + \operatorname{s}(1), 2 + \operatorname{s}(2) - 1, 2 + \operatorname{s}(2), \dots, n + \operatorname{s}(n) - 1, n + \operatorname{s}(n), \dots
# \end{equation}
#
# We will show by induction that for all $n\in\mathbb{Z}^+$, $T_n<n$. In the base case, $T_1 = 1 + \operatorname{s}(1) = 1 < n$. Assume now that $T_m < n$ for some $m$. Now we will consider three cases.
#
# #### Case 1
#
# Let us suppose that $m$ is odd and hence \begin{equation}
# T_m = \frac{n+1}{2} + \operatorname{s}\left(\frac{n+1}{2}\right) - 1
# \end{equation}
#
# Then by definition of tuple $T$, \begin{equation}
# T_{m+1}=\frac{n+1}{2} + \operatorname{s}\left(\frac{n+1}{2}\right) = T_m + 1
# \end{equation}
#
# Hence $T_{m+1}=T_m + 1$. But $T_m<n$, so therefore $T_{m+1}<n+1$ and so $T_{m+1}\le n$. But by the supposition, $T_{m+1}\ne n$, so $T_{m+1}<n$.
#
# #### Case 2
#
# Let us suppose that $m$ is even. Assume $\frac{m}{2}$ does not have final digit $9$. Then $\operatorname{s}\left(\frac{m}{2}\right)$ **rest to come**
| digit-sums/kplussk.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="rv5dXf9AcZ8V"
from sklearn.neural_network import MLPClassifier
# + id="VfOutl8rcjWt"
X = [[0, 0], [0, 1], [1, 0], [1, 1]]
y = [0, 1, 1, 1]
# + id="3cMcOlErcsBI"
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
# + id="jx-Rxxn6dVH1"
clf.fit(X, y)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="kFb84-qIeK7A" outputId="210c90b0-114b-48b6-b975-5e39a434dcff"
clf.predict([[1,0]])
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="655wp1UifdGa" outputId="5931780b-c87e-45dc-ab1d-a33540696d86"
clf.predict([[0,0]])
| ANN_using_sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyPRECIS Notebook Style Guide
#
# Thanks for showing the enthusiasm to help develop the PyPRECIS notebooks. Please use this style guide as a reference when creating or modifying content...
#
# ## Worksheet Title
#
# All worksheets should start with a title formatted as a level 1 heading:
#
# ```md
# # Worksheet ?: All Worksheets Should Have a Clear Title
# ```
#
# Worksheet titles should be followed with a short description of the worksheet.
#
# ## Learning Aims
#
# This followed by a list of 3 to 4 learning aims for the worksheet. We use the HTML `div class="alert alert-block alert-warning"` to colour this is a nice way:
#
# ```md
# <div class="alert alert-block alert-warning">
# <b>By the end of this worksheet you should be able to:</b><br>
# - Identify and list the names of PRECIS output data in PP format using standard Linux commands.<br>
# - Use basic Iris commands to load data files, and view Iris cubes. <br>
# - Use Iris commands to remove the model rim, select data variables and save the output as NetCDF files.
# </div>
# ```
#
# When rendered, it looks like this:
#
# <div class="alert alert-block alert-warning">
# <b>By the end of this worksheet you should be able to:</b><br>
# - Identify and list the names of PRECIS output data in PP format using standard Linux commands.<br>
# - Use basic Iris commands to load data files, and view Iris cubes. <br>
# - Use Iris commands to remove the model rim, select data variables and save the output as NetCDF files.
# </div>
#
# Remember to start each learning aim with a verb. Keep them short and to the point. If you have more than 3 to 4 learning aims, consider whether there is too much content in the workbook.
#
# ## Notes
#
# You may wish to use a Note box to draw the learners attention to particular actions or points to note. Note boxes are created using `div class="alert alert-block alert-info"`
#
# ```md
# <div class="alert alert-block alert-info">
# <b>Note:</b> In the boxes where there is code or where you are asked to type code, click in the box, then press <kbd>Ctrl</kbd> + <kbd>Enter</kbd> to run the code. <br>
# <b>Note:</b> An percentage sign <code>%</code> is needed to run some commands on the shell. It is noted where this is needed.<br>
# <b>Note:</b> A hash <code>#</code> denotes a comment; anything written after this character does not affect the command being run. <br>
# </div>
# ```
#
# Which looks like:
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> In the boxes where there is code or where you are asked to type code, click in the box, then press <kbd>Ctrl</kbd> + <kbd>Enter</kbd> to run the code. <br>
# <b>Note:</b> An percentage sign <code>%</code> is needed to run some commands on the shell. It is noted where this is needed.<br>
# <b>Note:</b> A hash <code>#</code> denotes a comment; anything written after this character does not affect the command being run. <br>
# </div>
#
# ## Contents
#
# Immediately following the Learning Aims (or Note box if used) add a list of contents.
#
# ```md
# ## Contents
# ### [1.1: Data locations and file names](#1.1)
# ### ...additional headings
# ```
#
# Items in the contents list are formatted as level 3 headings. Note the `[Link Name](Link location)` syntax. Each subsequent heading in the notebook needs to have a `id` tag associated with it for the links to work. These are formatted like this:
#
# ```md
# <a id='1.1'></a>
# ## 1.1 Data locations and file names
# ```
#
# Remember that the `id` string must match the link location otherwise the link won't work. Remember to update both the link title numbering and the link id numbering if you are reordering content.
#
# ## Section Headings
#
# To help users navigate round the document use section headings to break the content into sections. As detailed above, each section heading needs to have an `id` tag associated with it to build the Contents links.
#
# If you want to further subdivide each section, use bold letters with a parentheses:
#
# ```md
# **a)** Ordinary section text continues...
# ```
#
# ## General Formatting
#
# Use links to point learners to additional learning resources. These follow the standard markdown style: `[Link text](Link location)`, eg.
#
# ```md
# [Iris](http://scitools.org.uk/iris/docs/latest/index.html)
# ```
#
# gives
#
# [Iris](http://scitools.org.uk/iris/docs/latest/index.html)
#
# Format key commands using bold back-ticks:
#
# ```md
# **`cd`**
# ```
#
# Where certain keyboard combinations are necessary to execute commands, use the `<kbd>` html formatting.
#
#
# ```md
# <kbd>Ctrl</kbd> + <kbd>Enter</kbd>
# ```
#
# which gives:
#
# <kbd>Ctrl</kbd> + <kbd>Enter</kbd>
#
# Code blocks are entered in new notebook cells, with the `Code` style. Remember, all python should be **Python 3**.
# +
# This is a code block
# Make sure you include comments with your code to help explain what you are doing
# Leave space if you want learners to complete portions of code
# -
# <div class="alert alert-block alert-info">
# <b>Note:</b> Remember you can use additional Note blocks at any time to highlight important points!
# </div>
#
# If you want to add pictures, place image files in the `/notebooks/img` folder. Use html image formatting tags to control the position of the image in the rendered notebook cell:
#
# ```md
# <p><img src="notebooks/img/python_and_iris.png" alt="python + iris logo" style="float: center; height: 100px;"/></p>
# ```
#
# gives
#
# <p><img src="notebooks/img/python_and_iris.png" alt="python + iris logo" style="float: center; height: 100px;"/></p>
#
# Images can also be places in Note and Question blocks in the same manner. See Worksheet 1 for an example.
#
# ## Questions
#
# Asking questions is a key part of the learning process. Questions blocks use the `div class="alert alert-block alert-success"` style, and should be visually separated from the main text using horizontal rules above and below the question section:
#
# ```md
# ---
# <div class="alert alert-block alert-success">
# <b>Question:</b> How many pp files are in this directory, in total?
# <br>How many of these pp files contain the string 'sep'; relating to September? What command do you need to use to find this out?
# </div>
#
# <b>Answer</b>:
# <br>*Total number of pp files:
# <br>Number of September pp files:
# <br>Command used to find number of september pp files:*
#
# ---
# ```
#
# This renders as:
#
# ---
# <div class="alert alert-block alert-success">
# <b>Question:</b> How many pp files are in this directory, in total?
# <br>How many of these pp files contain the string 'sep'; relating to September? What command do you need to use to find this out?
# </div>
#
# <b>Answer</b>:
# <br>*Total number of pp files:
# <br>Number of September pp files:
# <br>Command used to find number of september pp files:*
#
# ---
#
# Make sure to put your _answer_ section in a different notebook cell from the _question_ section to avoid learners accidently editing the question blocks. Questions may also include code blocks. Remember to use a horizonal rule `---` to show where the question section starts and stops.
#
# ## Worksheet footer
#
# At the end of the worksheet, summarise the content using a `div class="alert alert-block alert-warning"`
#
# ```md
# <center>
# <div class="alert alert-block alert-warning">
# <b>This completes worksheet 1.</b> <br>You have created pre-processed files (rim removed, individual variables, concenated over time, in NetCDF format). <br>
# In worksheet 2, you will begin to analyse these files.
# </div>
# </center>
# ```
#
# <center>
# <div class="alert alert-block alert-warning">
# <b>This completes worksheet 1.</b> <br>You have created pre-processed files (rim removed, individual variables, concenated over time, in NetCDF format). <br>
# In worksheet 2, you will begin to analyse these files.
# </div>
# </center>
#
# Finally, a copyright statement and the Met Office logo should be added to all notebooks:
#
# ```md
# <p><img src="img/MO_MASTER_black_mono_for_light_backg_RBG.png" alt="python + iris logo" style="float: center; height: 100px;"/></p>
# <center>© Crown Copyright 2019, Met Office</center>
# ```
# <p><img src="notebooks/img/MO_MASTER_black_mono_for_light_backg_RBG.png" alt="Met Office logo" style="float: center; height: 100px;"/></p>
# <center>© Crown Copyright 2019, Met Office</center>
| CONTRIBUTING.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scanners
# +
from ib_insync import *
util.startLoop()
ib = IB()
ib.connect('127.0.0.1', 7497, clientId=9)
# -
# ## Basic Scanner
#
# To create a scanner create a `ScannerSubscription` option to submit to the `reqScannerData` method. For any scanner to work it must at least have the `instrument`, `locationCode`, and `scanCode` parameters filled.
#
# Additionally, the `ScannerSubscription` has other filters directly submitted like `abovePrice` and `aboveVolume`. Check [here](https://github.com/erdewit/ib_insync/blob/master/ib_insync/objects.py) for a full list of the default filters.
# +
sub = ScannerSubscription(instrument='STK',
locationCode='STK.US.MAJOR',
scanCode='TOP_PERC_GAIN',
abovePrice=500,
aboveVolume=1000000,
)
scanData = ib.reqScannerData(sub, [])
ib.sleep(0.1)
ib.cancelScannerSubscription(scanData)
# -
# The scanner will return a list of contracts but no other data. In this case we can only be sure that these stocks had a price over 500 and volume over 1,000,000 when the scan was performed.
# We can loop through these contracts to find more information. For example, lets get the number of public floating shares for each contract in our scanner.
# +
from bs4 import BeautifulSoup
def getFloat(contract):
fundamentals = ib.reqFundamentalData(contract, 'ReportSnapshot')
fundamentals = BeautifulSoup(fundamentals, 'xml')
qfloat = float(fundamentals.find_all('SharesOut')[0]['TotalFloat'])
return qfloat
for scan in scanData:
try:
contract = scan.contractDetails.contract
print('The public float for',contract.symbol,'is',getFloat(contract))
except:
print('Public float not available for',contract.symbol)
# -
# As you can see, not all contracts returned by the scanner will be valid for your purposes. Make sure you include checks when processing the scanner results.
#
# ## Scanner Parameters
#
# The scanner parameters map directly to the options available through the TWS "Advanced Market Scanner." You can check all the scanner parameters values available
allParams = ib.reqScannerParameters()
print(allParams[:3000]) # only print 3,000 characters to keep it short
# <br>For every parameter you can see the <name> which is how it appears in TWS and the <type> which is how you would submit it to the `ScannerSubscription` object.
#
# In the above example you can see that each instrument also shows you which filters are applicable to it. If we want to use any of these filter options we need to use the `TagValue` class and the `scannerSubscriptionFilterOptions` option. In this case lets use the "CHANGEPERC" filter to filter out stocks that have not moved more than 2% since yesterdays close.
# +
sub = ScannerSubscription(instrument='STK', # Submit a scanner subscription for stocks
locationCode='STK.US.MAJOR', # Use only US-Major stock exchanges
scanCode='TOP_PERC_GAIN', # Use the Top % Gainers scanner code
abovePrice=5, # Any stock above $5
aboveVolume=100000, # and above 100,000 volume traded so far today
)
# Create a list of tag values with the option and optionvalue
tagValues = [TagValue("changePercAbove", "20")]
# Submit the tags when requesting the scanner data, don't forget to include the [] in the second argument
scanData = ib.reqScannerData(sub, [], scannerSubscriptionFilterOptions=tagValues)
ib.sleep(0.1)
ib.cancelScannerSubscription(scanData)
# Print out the sorted sumbols for the returned contracts
symbols = [scan.contractDetails.contract.symbol for scan in scanData]
sorted(symbols)
# -
# Note that in order to submit the "CHANGEPERC" filter I had to change it.
# - First I had to put it in [camelCase](https://en.wikipedia.org/wiki/Camel_case) format.
# - Second, I had to add an "Above" suffix to tell the filter that I wanted everythin above that value. I could have changed this to "Below"
# Anything scanner you can create in TWS can be submitted through the API. The `scanCode` parameter maps directly to the "Parameter" window in the TWS "Advanced Market Scanner." We can verify this by printing out the `scanCode` values available.
paramSoup = BeautifulSoup(allParams, 'xml')
[sc.text for sc in paramSoup.find_all('scanCode')[:20]] # just print out 20 items for brevety
# ## Other Products
#
# You are not limited to stocks.
[i.find_all('name')[0].text for i in paramSoup.find_all('Instrument')][:10]
# +
sub = ScannerSubscription(instrument='FUT.US',
locationCode='FUT.GLOBEX',
scanCode='MOST_ACTIVE')
scanData = ib.reqScannerData(sub, [])
contracts = [scan.contractDetails.contract for scan in scanData]
contracts[:5]
# -
ib.disconnect()
| notebooks/scanners.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # slicer
#
# > Slice images/labels with overlapping and in different scales Predict (with fastai) and merge the sliced images to full size
# ## Feature
# - Preprocess of high resolution imagery and label data for use in Semantic Segmentation tasks (Deep Learning)
# - Increases amount of trainingdata by generating different scales and overlappings for images and labels
# - Multi stage interpolation (Nearest Neighbor + Bicubic combined) for image data
# - Nearest Neighbor interpolation for label data
# - More than half empty slices will be ignored / It is possilbe to slice a dismembered Mosaik!
# - Add padding (to the right and bottom) to your high resolution images
# - Do fastai predictions and merge the images to full size
# Docs under https://abalone1.github.io/slicer/
| 99_index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('Accidents0515.csv',parse_dates=['Date'])
data2015=data[data.Date.dt.year==2015]
pune = data2015[['Accident_Index','Longitude','Latitude','Date','Day_of_Week','Time']]
pune['time']=pd.to_datetime(pune['Time'])
pune['longitude'] = np.floor_divide(pune.Longitude,0.0001)/10000
pune['latitude'] = np.floor_divide(pune.Latitude,0.0001)/10000
pune['day_of_week']=pune['Day_of_Week']
timespacedata = pune.groupby(['latitude','longitude','time']).count()
# ### if there are more counts,we can group by day
timespacedata['count'] = timespacedata['Date']
timespacedata = timespacedata[timespacedata.Date>1]
timespacedata.drop(['Date','Accident_Index','Longitude','Latitude','Time','Day_of_Week'],axis=1,inplace=True)
timespacedata.reset_index(inplace=True)
timespacedata=timespacedata.T
import pyrebase
config = {
"apiKey": "AIzaSyASZZ_9OlEXA7rQgOyN05OYhjUl9lL5oLM",
"authDomain": "smartpune-b614c.firebaseapp.com",
"databaseURL": "https://smartpune-b614c.firebaseio.com",
"projectId": "smartpune-b614c",
"storageBucket": "smartpune-b614c.appspot.com",
"messagingSenderId": "556169280085"
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
auth.sign_in_with_email_and_password('r<PASSWORD> <EMAIL>','<PASSWORD>')
db = firebase.database()
| backend/timespacedata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 10.3 海报制作
#
# 参加过各类学术会议或研讨会的读者可能会知道:在各类学术交流中,除了制作幻灯片用于汇报,还有一种非常重要的展现成果方式就是海报 (poster)。制作海报的方式有很多种,这里将介绍如何使用LaTeX制作海报。
#
# 在2010年的时候,LaTeX技术问答社区TeX StackExchange中有一个非常有趣的问题 (网址为[https://tex.stackexchange.com/questions/341](https://tex.stackexchange.com/questions/341)),问题的内容是“如何使用LaTeX制作在学术会议中展示研究成果的海报?有没有一些现成的文档类型可供使用?”,关于这个问题的讨论和回答非常精彩,问题的回答里面提到了各种工具,其中,最引人注意的是`tikzposter`和`beamerposter`这两种工具。时至今日,`tikzposter`和`beamerposter`这两种工具已经成为制作海报最为常用的文档类型。从名字上看,它们分别与用于绘制图形的tikz宏包和用于制作幻灯片的beamer文档类型密不可分,在这两种工具中,我们可以使用到一些简单好用的命令和样式。
#
# ### 10.3.1 `tikzposter`工具
#
# `tikzposter`是一个文档类,用于生成 PDF 格式的科学海报。与其他文档类类似,`tikzposter`也包含前导代码和主体代码两个部分,前导代码主要用于海报基本信息的设定及宏包的调用,主题代码用来设计海报内容。
#
# 【**例1**】创建一个`tikzposter`类的简单文档,并在前导代码中填写一些基本信息。
#
# ```tex
# \documentclass[25pt, a0paper, portrait]{tikzposter}
#
# \title{TikzPoster Example}
# \author{author}
# \date{\today}
# \institute{LaTeX Cookbook Institute}
# \usetheme{Board}
#
# \begin{document}
#
# \maketitle
#
# \end{document}
#
# ```
#
# 编译效果如图10.3.1所示。
#
# <p align="center">
# <img align="middle" src="graphics/example10_3_1.png" width="750" />
# </p>
#
# <center><b>图10.3.1</b> 编译后效果</center>
#
# 与其他文档类型类似,例1中第一个命令`\documentclass[...]{tikzposter}` 声明这个文档是一个`tikzposter`,大括号内的附加参数分别设置字体大小、纸张大小和方向;`title` , `author`, `date` 及 `institute`用来填写海报题目、作者、日期及单位等信息; ` \usetheme{Board}`设置海报主题。
#
# 【**例2**】在例1代码中加入主体代码,制作一个简单简历。
#
# ```tex
#
# \documentclass[25pt, a0paper, portrait]{tikzposter}
# \title{TikzPoster Example}
# \author{author}
# \date{\today}
# \institute{LaTeX Cookbook Institute}
#
# \usepackage{blindtext}
# \usepackage{comment}
#
# \usetheme{Board}
#
# \begin{document}
#
# \maketitle
#
# \block{~}
# {
# \blindtext
# }
#
# \begin{columns}
# \column{0.4}
# \block{More text}{Text and more text}
#
# \column{0.6}
# \block{Something else}{Here, \blindtext \vspace{4cm}}
# \note[
# targetoffsetx=-9cm,
# targetoffsety=-6.5cm,
# width=0.5\linewidth
# ]
# {e-mail \texttt{<EMAIL>}}
# \end{columns}
#
# \begin{columns}
# \column{0.5}
# \block{A figure}
# {
# \begin{tikzfigure}
# \includegraphics[width=0.4\textwidth]{images/R-C.jpg}
# \end{tikzfigure}
# }
# \column{0.5}
# \block{Description of the figure}{\blindtext}
# \end{columns}
#
# \end{document}
#
# ```
#
# 编译效果如图10.3.2所示。
#
# <p align="center">
# <img align="middle" src="graphics/example10_3_2.png" width="750" />
# </p>
#
# <center><b>图10.3.2</b> 编译后效果</center>
# ### 10.3.2 `beamerposter`工具
#
# `beamerposter`是建立在`beamer`类上的宏包,可以用于生成和设计科学海报。`beamerposter`同样包含前导代码和主题代码两个部分,前导代码主要用于海报基本信息的设定及宏包的调用基本与`beamer`类相同,主题代码用来设计海报内容。
#
# 【**例3**】创建一个调用`beamerposter`宏包的`beamer`类简单文档,并在前导代码中填写一些基本信息。
#
# ```tex
# \documentclass{beamer}
# \usepackage{times}
# \usepackage{amsmath,amsthm, amssymb}
# \boldmath
# \usetheme{RedLion}
# \usepackage[orientation=portrait,size=a0,scale=1.4]{beamerposter}
#
# \title[Beamer Poster]{Beamer Poster example}
# \author[<EMAIL>]{author}
# \institute[Overleaf University]{LaTeX Cookbook Institute}
# \date{\today}
#
# \logo{\includegraphics[height=7.5cm]{overleaf-logo}}
#
#
# \begin{document}
#
# This is a Beamer poster example
#
# \end{document}
# ```
# 编译效果如图10.3.3所示。
#
# <p align="center">
# <img align="middle" src="graphics/example10_3_3.png" width="750" />
# </p>
#
# <center><b>图10.3.3</b> 编译后效果</center>
#
# 【**例4**】在例3代码中加入主体代码,制作一个简单简历。
#
# ```tex
#
# \documentclass{beamer}
# \usepackage{times}
# \usepackage{amsmath,amsthm, amssymb}
# \boldmath
# \usetheme{RedLion}
# \usepackage[orientation=portrait,size=a0,scale=1.4]{beamerposter}
#
# \title[Beamer Poster]{Beamer Poster example}
# \author[<EMAIL>]{author}
# \institute[Overleaf University]{LaTeX Cookbook Institute}
# \date{\today}
#
# \logo{\includegraphics[height=7.5cm]{overleaf-logo}}
#
#
# \begin{document}
# \begin{frame}{}
# \vfill
# \begin{block}{\large Fontsizes}
# \centering
# {\tiny tiny}\par
# {\scriptsize scriptsize}\par
# {\footnotesize footnotesize}\par
# {\normalsize normalsize}\par
# {\large large}\par
# {\Large Large}\par
# {\LARGE LARGE}\par
# {\veryHuge VeryHuge}\par
# {\VeryHuge VeryHuge}\par
# {\VERYHuge VERYHuge}\par
# \end{block}
# \vfill
# \vfill
# \begin{block}{\large Fontsizes}
# \centering
# {\tiny tiny}\par
# {\scriptsize scriptsize}\par
# {\footnotesize footnotesize}\par
# {\normalsize normalsize}\par
# {\large large}\par
# {\Large Large}\par
# {\LARGE LARGE}\par
# {\veryHuge VeryHuge}\par
# {\VeryHuge VeryHuge}\par
# {\VERYHuge VERYHuge}\par
# \end{block}
# \vfill
# \begin{columns}[t]
# \begin{column}{.30\linewidth}
# \begin{block}{Introduction}
# \begin{itemize}
# \item some items
# \item some items
# \item some items
# \item some items
# \end{itemize}
# \end{block}
# \end{column}
# \begin{column}{.48\linewidth}
# \begin{block}{Introduction}
# \begin{itemize}
# \item some items and $\alpha=\gamma, \sum_{i}$
# \item some items
# \item some items
# \item some items
# \end{itemize}
# $$\alpha=\gamma, \sum_{i}$$
# \end{block}
#
# \begin{block}{Introduction}
# \begin{itemize}
# \item some items
# \item some items
# \item some items
# \item some items
# \end{itemize}
# \end{block}
#
# \begin{block}{Introduction}
# \begin{itemize}
# \item some items and $\alpha=\gamma, \sum_{i}$
# \item some items
# \item some items
# \item some items
# \end{itemize}
# $$\alpha=\gamma, \sum_{i}$$
# \end{block}
# \end{column}
# \end{columns}
# \end{frame}
#
# \end{document}
#
# ```
#
# <p align="center">
# <img align="middle" src="graphics/example10_3_4.png" width="750" />
# </p>
#
# <center><b>图10.3.4</b> 编译后效果</center>
# ### 参考资料
#
# - [How to create posters using LaTeX](https://tex.stackexchange.com/questions/341)
# 【回放】[**10.2 算法伪代码**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-10/section2.ipynb)
#
# 【继续】[**10.4 简历制作**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-10/section4.ipynb)
# ### License
#
# <div class="alert alert-block alert-danger">
# <b>This work is released under the MIT license.</b>
# </div>
| chapter-10/section3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import os
base = "../freesound_audio_tagging/mfcc_no_pick_folders/"
classes = os.listdir(base)
no_classes = len(classes)
print(no_classes)
print(classes)
# +
labels = {}
i = 0
for c in classes:
c_images = os.listdir(base+c)
for image in c_images:
labels[c+'/'+image] = i
i+=1
# + active=""
# #{'dogs/dog.201.jpg': 0, 'dogs/dog.7783.jpg': 0, needs abosulute path ( base )
# -
def image_generator(input_ids, batch_size = 32):
while True:
batch_paths = np.random.choice(a= input_ids, size = batch_size)
batch_input = []
batch_output = []
for input_id in batch_paths:
input = np.load(base+input_id)
output = labels[input_id]
batch_input += [input]
batch_output += [output]
batch_x = np.array(batch_input)
batch_y = np.array(batch_output)
yield (batch_x, batch_y)
# +
from random import shuffle
batch_size = 32
img_ids = list(labels.keys()) #each sample is a key and the return value of it, is the class number
shuffle(img_ids)
split = int(0.85 * len(img_ids))
train_ids = img_ids[0:split]
valid_ids = img_ids[split:]
train_generator = image_generator(train_ids, batch_size = batch_size)
valid_generator = image_generator(valid_ids, batch_size = batch_size)
# +
#Must know in order to break the loop
print(no_classes)
print(len(img_ids))
print(len(train_ids))
print(len(valid_ids))
# -
for x , y in train_generator:
print(x.shape)
print(y.shape)
break
print(x[0])
print(y[0])
| Working_Generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# ## Ch `07`: Concept `03`
# + [markdown] deletable=true editable=true
# ## Denoising autoencoder
# + [markdown] deletable=true editable=true
# A denoising autoencoder is pretty much the same architecture as a normal autoencoder. The input is noised up, and cost function tries to denoise it by minimizing the construction error from denoised input to clean output.
# + deletable=true editable=true
import tensorflow as tf
import numpy as np
import time
def get_batch(X, Xn, size):
a = np.random.choice(len(X), size, replace=False)
return X[a], Xn[a]
class Denoiser:
def __init__(self, input_dim, hidden_dim, epoch=10000, batch_size=50, learning_rate=0.001):
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x')
self.x_noised = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x_noised')
with tf.name_scope('encode'):
self.weights1 = tf.Variable(tf.random_normal([input_dim, hidden_dim], dtype=tf.float32), name='weights')
self.biases1 = tf.Variable(tf.zeros([hidden_dim]), name='biases')
self.encoded = tf.nn.sigmoid(tf.matmul(self.x_noised, self.weights1) + self.biases1, name='encoded')
with tf.name_scope('decode'):
weights = tf.Variable(tf.random_normal([hidden_dim, input_dim], dtype=tf.float32), name='weights')
biases = tf.Variable(tf.zeros([input_dim]), name='biases')
self.decoded = tf.matmul(self.encoded, weights) + biases
self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.x, self.decoded))))
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.saver = tf.train.Saver()
def add_noise(self, data):
noise_type = 'mask-0.2'
if noise_type == 'gaussian':
n = np.random.normal(0, 0.1, np.shape(data))
return data + n
if 'mask' in noise_type:
frac = float(noise_type.split('-')[1])
temp = np.copy(data)
for i in temp:
n = np.random.choice(len(i), round(frac * len(i)), replace=False)
i[n] = 0
return temp
def train(self, data):
data_noised = self.add_noise(data)
with open('log.csv', 'w') as writer:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(self.epoch):
for j in range(50):
batch_data, batch_data_noised = get_batch(data, data_noised, self.batch_size)
l, _ = sess.run([self.loss, self.train_op], feed_dict={self.x: batch_data, self.x_noised: batch_data_noised})
if i % 10 == 0:
print('epoch {0}: loss = {1}'.format(i, l))
self.saver.save(sess, './model.ckpt')
epoch_time = int(time.time())
row_str = str(epoch_time) + ',' + str(i) + ',' + str(l) + '\n'
writer.write(row_str)
writer.flush()
self.saver.save(sess, './model.ckpt')
def test(self, data):
with tf.Session() as sess:
self.saver.restore(sess, './model.ckpt')
hidden, reconstructed = sess.run([self.encoded, self.decoded], feed_dict={self.x: data})
print('input', data)
print('compressed', hidden)
print('reconstructed', reconstructed)
return reconstructed
def get_params(self):
with tf.Session() as sess:
self.saver.restore(sess, './model.ckpt')
weights, biases = sess.run([self.weights1, self.biases1])
return weights, biases
# + deletable=true editable=true
| ch07_autoencoder/Concept03_denoising.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/anshupandey/Deep-Learning-for-structured-Data/blob/main/code_multi_variate_timeseries_forecasting.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="CTyAXAMocJ2G"
# # Multivariate times series forecasting
# + [markdown] id="Bzs8hZmMce5b"
#
#
# ```
# No: row number
# year: year of data in this row
# month: month of data in this row
# day: day of data in this row
# hour: hour of data in this row
# pm2.5: PM2.5 concentration
# DEWP: Dew Point
# TEMP: Temperature
# PRES: Pressure
# cbwd: Combined wind direction
# Iws: Cumulated wind speed
# Is: Cumulated hours of snow
# Ir: Cumulated hours of rain
#
# ```
#
#
# + id="jX4YjywUcJhw"
# !wget -q https://raw.githubusercontent.com/aspdiscovery123/Deep-Learning-for-structure-data/master/pollution.csv
# + id="60atUG7ubwwe"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime as dt
# + id="W0N-bFcedmg9"
def parse(x):
return dt.strptime(x, '%Y %m %d %H')
# + id="7bt3ZxOccWKF"
df = pd.read_csv("pollution.csv",parse_dates=[['year','month','day','hour']],index_col='No',date_parser=parse)
# + colab={"base_uri": "https://localhost:8080/"} id="1s2HGTOEcY8y" outputId="695c34eb-4724-44da-b8e4-22a68b66b507"
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="7-_3UwsPcZYw" outputId="65e84b6d-5229-43d0-d303-7003b0df1e51"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="O3QhVuPkcbow" outputId="6b9be8e5-09f1-4b79-c27e-006c74cf7376"
df['pm2.5'].isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ewORvJWXeQox" outputId="c540736e-f1a9-46bf-ac2d-1e2f63300fbe"
df.head(30)
# + colab={"base_uri": "https://localhost:8080/"} id="t1sSq0HVebjM" outputId="06ae5606-6c72-4c5f-9bd6-ebadc7b5e405"
df = df[24:]
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="JR_kGbMGerfh" outputId="bad2142f-9e7e-47c2-c90f-42860317b128"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="W64UX_SGeso4" outputId="c0babef4-8bed-4586-c5a5-1a928dd0f022"
df.fillna(0,inplace=True)
df.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="4GqYqCz9e092" outputId="21c1016a-96c1-4595-8d9f-7e10f7cb60ab"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="Lefe623ue6mD" outputId="f5a49000-f117-4364-b318-5616b9edbcc1"
df.cbwd.value_counts()
# + id="iIceJHpTgdoh"
df2 = df.copy()
df2.index = df2.year_month_day_hour
df2.drop(columns=['year_month_day_hour'],inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="AEM2806ygqM4" outputId="01c8a044-1059-4160-9c05-0f953051a636"
df2.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="EJRV32xIhVGc" outputId="76b69ea2-5f1b-4de8-f078-f0129366c092"
y = df2['pm2.5']
df2 = df2.shift()
df2.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="ue4AofVSiwRX" outputId="bf9bc9d1-545b-43d9-aa1a-66848448a47b"
df2.dropna(inplace=True)
df2.head()
# + colab={"base_uri": "https://localhost:8080/"} id="VsTWDZCPfeXS" outputId="52063222-1721-47e8-eee4-dc322854b473"
from sklearn.preprocessing import OneHotEncoder,MinMaxScaler
from sklearn.compose import ColumnTransformer
transformer = ColumnTransformer([('ohe',OneHotEncoder(),[4]),
('mm',MinMaxScaler(),[0,1,2,3,5,6,7])])
transformer.fit(df2)
df3 = transformer.transform(df2)
df3.shape
# + colab={"base_uri": "https://localhost:8080/"} id="ln8rgjOcg84F" outputId="04087d83-c733-4b66-d54b-660b5b46c152"
y.shape
# + colab={"base_uri": "https://localhost:8080/"} id="_NXYvvuihp6c" outputId="1e158dea-de13-405d-f67b-fe8b4a995736"
y = y[1:]
y.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="QiqHfR69jGHM" outputId="8dd0d673-fa34-4202-8ee1-cb3fb67ca914"
pd.DataFrame(df3).head()
# + colab={"base_uri": "https://localhost:8080/"} id="YeygyricjK-g" outputId="43257788-69d3-484b-df91-5f18602ceeaf"
y.head()
# + colab={"base_uri": "https://localhost:8080/"} id="ncO6k0x2jL77" outputId="7c259976-b95e-41d2-98fa-8e768cfd9deb"
df3.shape
# + colab={"base_uri": "https://localhost:8080/"} id="BXcF2nNCj9MS" outputId="79ac93af-8eac-40ea-826c-27bc39093be6"
# samples, timestamps, features
df3 = df3.reshape(-1,1,11)
df3.shape
# + [markdown] id="t4mR8DGmkSzi"
# ## Modelling of LSTM network
# + id="pdsjWRVIkH5a"
from tensorflow.keras import models,layers
# + colab={"base_uri": "https://localhost:8080/"} id="3Smvt0pPkZkE" outputId="e2c4d59a-6dd8-43b4-92dc-5610617dd9b3"
ip_layer = layers.Input(shape=(1,11))
lstm_layer = layers.LSTM(25,activation='relu')(ip_layer)
op = layers.Dense(1)(lstm_layer)
model = models.Model(inputs=ip_layer,outputs=op)
model.compile(loss='mae',optimizer='adam')
model.fit(df3,y,batch_size=360,epochs=20,shuffle=False)
# + colab={"base_uri": "https://localhost:8080/"} id="7VXKJbtEmULX" outputId="38458463-df1a-4103-f90e-dae273257b34"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="i6bgiTuBlJ_d" outputId="6b90e11b-d772-4ef5-e5ce-b075323e88ec"
model.fit(df3,y,batch_size=720,epochs=10,shuffle=False)
# + colab={"base_uri": "https://localhost:8080/"} id="eG1dqnYMlRKY" outputId="a5ae175c-b72c-417d-cbe9-e36bcd4d57b6"
model.fit(df3,y,batch_size=1440,epochs=20,shuffle=False)
# + id="wSHlf5THlc5V"
df4 = df.copy()
df4.index = df4.year_month_day_hour
df4.drop(columns=['year_month_day_hour'],inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="UnxRfqQendIo" outputId="95f9d680-4faa-48fb-e5fc-10fbe7b8e7c8"
df4.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="JWdZPGzNnfFP" outputId="b2c2fe0d-dba3-45c2-84b4-6843fb9e0c07"
df4.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="sitOyWy6ngcX" outputId="48b21d04-6e76-4e22-de77-f95ac0fc696f"
plt.figure(figsize=(15,20))
for i in range(len(df4.columns)):
plt.subplot(len(df4.columns),1,i+1)
plt.plot(df4[df4.columns[i]])
plt.title(df4.columns[i])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="yr9LD_ZkvUq4" outputId="c6569282-f7af-4855-f30e-a4ff46f9686a"
df4.head()
# + colab={"base_uri": "https://localhost:8080/"} id="oj8OGDUlwgHb" outputId="eed26e4f-98ed-49b7-caaa-4c08a516f4ac"
df4.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="gGWmfJW4vjNY" outputId="8f073f97-c2f4-4438-8bac-0e841a2b150c"
from sklearn.preprocessing import LabelEncoder,MinMaxScaler
le = LabelEncoder()
df4['cbwd'] = le.fit_transform(df['cbwd'])
mm = MinMaxScaler()
df4[['pm2.5', 'DEWP', 'TEMP', 'PRES', 'Iws', 'Is', 'Ir']] = mm.fit_transform(df4[['pm2.5', 'DEWP', 'TEMP', 'PRES', 'Iws', 'Is', 'Ir']])
df4.head()
# + [markdown] id="RBN3Qpzkp5EO"
# # Sequence size = 24 (24 hours)
# + colab={"base_uri": "https://localhost:8080/"} id="TSBzDwLbw0o_" outputId="8ee6d83e-e636-4e6b-8000-4acc91e7e1eb"
df4.shape
# + id="MAmGBC60n_gy"
def split_sequence(sequence,n_steps):
x = []
y = []
for i in range(len(sequence)):
# get the end index of the pattern
end_ix = i + n_steps
# check if the iteration is beyond the size of sequence, break the loop
if end_ix > len(sequence)-1:
break
# collect the input and output parts of the pattern
seq_x, seq_y = sequence.iloc[i:end_ix],sequence.iloc[i,0]
x.append(seq_x)
y.append(seq_y)
return np.array(x),np.array(y)
# + colab={"base_uri": "https://localhost:8080/"} id="jzr46jMixHNt" outputId="e499450c-13d5-47a7-e628-4e37e253744e"
x,y = split_sequence(df4,n_steps=24)
x.shape,y.shape
# + colab={"base_uri": "https://localhost:8080/"} id="M3ztypLaxOiu" outputId="daf6889d-5ac9-4bcb-cc38-ee4de0f9c415"
xtr,xts = x[:35020,:,:],x[35020:,:,:]
ytr,yts = y[:35020],y[35020:]
print(xtr.shape,xts.shape)
print(ytr.shape,yts.shape)
# + [markdown] id="7VvNEMVQyjQW"
# ## Modelling the LSTM network
# + id="S1w6jsy9ygc9"
from tensorflow.keras import models,layers
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="RKGpi9EwynjM" outputId="e2b2e3cc-5c79-4483-9da9-c49201388d9d"
ip_layer = layers.Input(shape=(24,8))
lstm_layer = layers.LSTM(400,activation='relu',return_sequences=True)(ip_layer)
lstm_layer = layers.LSTM(600,activation='relu',return_sequences=False)(lstm_layer)
op = layers.Dense(1)(lstm_layer)
model = models.Model(inputs=ip_layer,outputs=op)
model.compile(loss='mae',optimizer='adam')
model.fit(xtr,ytr,batch_size=240,epochs=20,shuffle=False,validation_data=(xts,yts))
# + id="yVF7_Yyl1roc"
# + id="JEt5Nrc_zDfk"
| code_multi_variate_timeseries_forecasting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbsphinx="hidden"
# # The Discrete-Time Fourier Transform
#
# *This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the bachelors module Signals and Systems, Comunications Engineering, Universität Rostock. Please direct questions and suggestions to [<EMAIL>](mailto:<EMAIL>).*
# -
# ## Summary of Properties, Theorems and Transforms
#
# The [properties](properties.ipynb), [theorems](theorems.ipynb) and transforms of the discrete-time Fourier transform (DTFT) as derived in the previous sections are summarized in the following. The corresponding tables serve as a reference for the application of the DTFT in the theory of signals and systems. Please refer to the respective sections for details.
# ### Definition
#
# The DTFT and its inverse are defined as
#
# \begin{align}
# X(e^{j \Omega}) &= \sum_{k = -\infty}^{\infty} x[k] \, e^{- j \Omega k} \\
# x[k] &= \frac{1}{2 \pi} \int_{-\pi}^{\pi} X(e^{j \Omega}) \, e^{j \Omega k} \; d \Omega
# \end{align}
#
# for $k \in \mathbb{Z}$ and $\Omega \in \mathbb{R}$.
# ### Properties and Theorems
#
# The properties and theorems of the DTFT are given as
#
# |  | $x[k]$ | $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$ |
# |:---|:---:|:---:|
# | [Linearity](properties.ipynb#Linearity) | $A \, x_1[k] + B \, x_2[k]$ | $A \, X_1(e^{j \Omega}) + B \, X_2(e^{j \Omega})$ |
# | [Real-valued signal](properties.ipynb#Real-valued-signals) | $x^*[k]$ | $X^*(e^{-j \Omega})$ |
# | [Convolution](theorems.ipynb#Convolution-Theorem) | $x[k] * h[k]$ | $X(e^{j \Omega}) \cdot H(e^{j \Omega})$ |
# | [Shift](theorems.ipynb#Shift-Theorem) | $x[k - \kappa]$ | $e^{-j \Omega \kappa} \cdot X(e^{j \Omega})$ |
# | [Multiplication](theorems.ipynb#Multiplication-Theorem) | $x[k] \cdot h[k]$ | $\frac{1}{2 \pi} X(e^{j \Omega}) \circledast_{2 \pi} H(e^{j \Omega})$ |
# | [Modulation](theorems.ipynb#Modulation-Theorem) | $e^{j \Omega_0 k} \cdot x[k]$ | $X(e^{j (\Omega- \Omega_0)})$ |
# | [Parseval's Theorem](theorems.ipynb#Parseval's-Theorem) | $\sum_{k = -\infty}^{\infty} \lvert x[k] \rvert^2$ | $\frac{1}{2 \pi} \int_{-\pi}^{\pi} \lvert X(e^{j \Omega}) \rvert^2 \; d\Omega$ |
#
#
# where $A, B \in \mathbb{C}$, $\Omega_0 \in \mathbb{R}$ and $\kappa \in \mathbb{Z}$.
# ### Selected Transforms
#
# DTFTs which are frequently used are given as
#
# | $x[k]$ | $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$ |
# |:---:|:---:|
# | $\delta[k]$ | $1$ |
# | $1$ | ${\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right)$ |
# | $\epsilon[k]$ | $\frac{1}{1 - e^{-j \Omega}} + \frac{1}{2} {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right)$ |
# | $\text{rect}_N[k]$ | $e^{-j \Omega \frac{N-1}{2}} \cdot \frac{\sin \left(\frac{N \Omega}{2} \right)}{\sin \left( \frac{\Omega}{2} \right)}$ |
# | $e^{j \Omega_0 k}$ | ${\bot \!\! \bot \!\! \bot}\left( \frac{\Omega-\Omega_0}{2 \pi} \right)$ |
# | $\sin(\Omega_0 t)$ | $\frac{1}{2} \left[ {\bot \!\! \bot \!\! \bot} \left( \frac{\Omega + \Omega_0}{2 \pi} \right) + {\bot \!\! \bot \!\! \bot} \left( \frac{\Omega - \Omega_0}{2 \pi} \right) \right]$ |
# | $\cos(\Omega_0 t)$ | $\frac{j}{2} \left[ {\bot \!\! \bot \!\! \bot} \left( \frac{\Omega + \Omega_0}{2 \pi} \right) - {\bot \!\! \bot \!\! \bot} \left( \frac{\Omega - \Omega_0}{2 \pi} \right) \right]$ |
# | $a^k \epsilon[k]$ | $\frac{1}{1-a e^{-j \Omega}}$ |
#
#
# where $\Omega_0 \in \mathbb{R}$ and $|a| < 1$. More transforms may be found in the literature or [online](https://en.wikipedia.org/wiki/Discrete-time_Fourier_transform#Table_of_discrete-time_Fourier_transforms).
# + [markdown] nbsphinx="hidden"
# **Copyright**
#
# This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Continuous- and Discrete-Time Signals and Systems - Theory and Computational Examples*.
| discrete_time_fourier_transform/table_theorems_transforms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from lightgbm import LGBMClassifier
import warnings
import operator
# %matplotlib inline
train_df = pd.read_csv("application_train.csv")
train_df.shape
train_df.head()
test_df = pd.read_csv("application_test.csv")
test_df.shape
# ## Missing values
# To get a better understanding of our dataset, we plot a heatmap which shows where are the missing values in our data. This way we can see the holes in our dataset better.
plt.figure(figsize=(30, 10))
sns.heatmap(train_df.isnull(), yticklabels= False , cbar = False)
percentage = pd.DataFrame((train_df.isnull().sum()/train_df.shape[0]).sort_values(ascending = False))
percentage.columns = ['percentage']
percentage.head(10)
# As we can see, in some columns there is a high percentage of missing values. we will deal with missing values later before training models.
# ## Label class distribution
train_df['TARGET'].value_counts().plot(kind = 'pie' ,autopct='%1.0f%%')
# As we can see the target class is unbalanced and it would be hard to train data. We will consider this fact when we want to train models with our data.
# ## Types of columns
train_df.dtypes.value_counts()
# So we have 16 catogerical columns and we need to encode them. Let's see in detail how many categories each column have.
for col in train_df.select_dtypes(include=['object']) :
print('column %s has %s unique categories' % (col,len(train_df[col].unique())))
# We have 16 categorical columns, in each of which from 2 to 58 different options of values. We use one-hot-encoding to transform them to numerical values.
# ### One-hot-encoding
# Write a function for one hot encoding to handle categorical features
def one_hot_encoding(df) :
for col in list(df.columns) :
if df[col].dtype == 'object' :
df = pd.concat([df, pd.get_dummies(df[col], prefix=col)], axis=1)
df = df.drop(columns = col) #remove the categorical column after hot encoding
return(df)
train_df = one_hot_encoding(train_df)
test_df = one_hot_encoding(test_df)
print('size of train_df after one-hot-encoding is: ', train_df.shape)
print('size of test_df after one-hot-encoding is: ', test_df.shape)
# As we can see we have 3 more columns(rather than target) in train dataframe compared to test dataframe. So we should make an alignment between them.
#
#
# ### Alignment
target_label = train_df['TARGET'] #saving target column to add it afterwards since it will disappear after alignment
train_df, test_df = train_df.align(test_df, join = 'inner', axis = 1)
train_df['TARGET'] = target_label #add target column to train_df
print('size of train_df after alignment is: ', train_df.shape)
print('size of test_df after alignment is: ', test_df.shape)
# Now the the train and test data frame are aligned
# ## Data correlation
# In this step we calculate pearson correlation coefficient between each column and the target column, so we will have a basic understanding that which columns are more related to the target.
corr = train_df.corr()['TARGET'].sort_values()
print(corr.tail(15)) #to get most positively correlated features
print(corr.head(15)) #to get most negatively correlated features
# As we can see Age, External soureces, gender, education, income type and region are more related to the target(although non of them has very high correlation) Let's do some more analysis on these factors to get a better knowledge of our data
# ### Age
plt.hist(-train_df['DAYS_BIRTH'] / 365, edgecolor = 'k', bins = 10)
plt.title('Age of Client')
plt.xlabel('Age (years)')
plt.ylabel('Count')
# As we can see most of clients are between 30 and 45. let's see how does age change customers behavior for paying loans.
# +
Age = train_df[['DAYS_BIRTH','TARGET']].copy(deep=True)
warnings.filterwarnings('ignore')
imputer = SimpleImputer(strategy = "median")
imputer.fit(Age)
Age.loc[:] = imputer.transform(Age)
#change Age from days to years
Age.loc[Age['TARGET']==0 ,'paid'] = -Age.loc[Age['TARGET']==0,'DAYS_BIRTH']/365
Age.loc[Age['TARGET']==1 ,'not_paid'] = -Age.loc[Age['TARGET']==1,'DAYS_BIRTH']/365
fig = plt.figure(figsize=(10, 6))
plt.subplot(1, 2, 1)
plt.hist(Age['paid'],edgecolor = 'k', bins = 10)
plt.title('paid_loans')
plt.xlabel('Age (years)')
plt.ylabel('Count')
plt.subplot(1, 2, 2)
plt.hist(Age['not_paid'],edgecolor = 'k', bins = 10)
plt.title('not_paid loans')
plt.xlabel('Age (years)')
plt.ylabel('Count')
plt.subplots_adjust(wspace = .5)
plt.show()
# -
# As we can see in not_paid loans subplot, as the age of customers increases, the possibility that they will pay the loan back increase.
# ### Education level
train_df2 = pd.read_csv("application_train.csv")
edu = train_df2[['NAME_EDUCATION_TYPE','TARGET']].copy(deep=True)
edu = edu.dropna(how='any',axis=0)
fig = plt.figure(figsize=(10, 10))
edu.groupby(['NAME_EDUCATION_TYPE','TARGET']).size().unstack().plot(kind='bar',stacked=True)
plt.xlabel('Education level')
plt.ylabel('count')
plt.show()
# ### Gender
gender = train_df2[['CODE_GENDER','TARGET']].copy(deep=True)
gender = gender.replace('XNA', np.nan)
gender = gender.dropna(how='any',axis=0)
fig = plt.figure(figsize=(10, 10))
gender.groupby(['CODE_GENDER','TARGET']).size().unstack().plot(kind='bar',stacked=True)
plt.xlabel('Gender')
plt.ylabel('count')
plt.show()
# Women clients are almost twice as many men, while men show higher risk.
# ### Family Status
FStatus = train_df2[['NAME_FAMILY_STATUS','TARGET']].copy(deep=True)
FStatus = FStatus.dropna(how='any',axis=0)
fig = plt.figure(figsize=(10, 10))
FStatus.groupby(['NAME_FAMILY_STATUS','TARGET']).size().unstack().plot(kind='bar',stacked=True)
plt.xlabel('family status')
plt.ylabel('count')
plt.show()
# While the majority of clients are married, customers in unmarried and single relationships are less risky.
# ## Feature engineering
# ### Adding some useful features
# +
train_df['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True) # Deleting outsiders
train_df['CREDIT_DIV_ANNUITY'] = train_df['AMT_CREDIT']/train_df['AMT_ANNUITY']
train_df['ANNUITY_INCOME_PERCENT'] = train_df['AMT_ANNUITY'] / train_df['AMT_INCOME_TOTAL']
train_df['BIRTH_DIV_EMPLOYED'] = train_df['DAYS_BIRTH']/train_df['DAYS_EMPLOYED']
train_df['DAYREG_DIV_DAYPUB'] = train_df['DAYS_REGISTRATION']/train_df['DAYS_ID_PUBLISH']
train_df['CREDIT_MINUS_GOOD'] = train_df['AMT_CREDIT']/train_df['AMT_GOODS_PRICE']
train_df['INCOME_CHILD'] = train_df['AMT_INCOME_TOTAL']/train_df['CNT_CHILDREN']
train_df['INCOME_DIV_FAM'] = train_df['AMT_INCOME_TOTAL']/train_df['CNT_FAM_MEMBERS']
# -
test_df['CREDIT_DIV_ANNUITY'] = test_df['AMT_CREDIT']/test_df['AMT_ANNUITY']
test_df['ANNUITY_INCOME_PERCENT'] = test_df['AMT_ANNUITY'] / test_df['AMT_INCOME_TOTAL']
test_df['BIRTH_DIV_EMPLOYED'] = test_df['DAYS_BIRTH']/test_df['DAYS_EMPLOYED']
test_df['DAYREG_DIV_DAYPUB'] = test_df['DAYS_REGISTRATION']/test_df['DAYS_ID_PUBLISH']
test_df['CREDIT_MINUS_GOOD'] = test_df['AMT_CREDIT']/test_df['AMT_GOODS_PRICE']
test_df['INCOME_CHILD'] = test_df['AMT_INCOME_TOTAL']/test_df['CNT_CHILDREN']
test_df['INCOME_DIV_FAM'] = test_df['AMT_INCOME_TOTAL']/test_df['CNT_FAM_MEMBERS']
# ### Adding polynomial features
# In this step we try to create new features from available important ones. One way to do that is to use polynomial method and create features that are the degree of the features available. Since creating polynomial features may not always improve our model, we create another data frame with theses features and try to learn data with and without them.
#
# We choose 5 important features(based on their correlation with target) : EXT_SOURCE_1, EXT_SOURCE_2, EXT_SOURCE_3, DAYS_BIRTH, CODE_GENDER_F and use PolynomialFeatures class from Scikit-Learn with degree 3.
important_features = train_df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH','CODE_GENDER_F' ]]
important_features_test = test_df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH','CODE_GENDER_F' ]]
imputer = SimpleImputer(strategy = 'median') #replacing null values with the median of that column
important_features = imputer.fit_transform(important_features)
important_features_test = imputer.fit_transform(important_features_test)
polynom = PolynomialFeatures(degree = 3)
poly_features = polynom.fit_transform(important_features) #applying PolynomialFeatures to train set
poly_features_test = polynom.fit_transform(important_features_test ) #applying PolynomialFeatures to test set
print(poly_features.shape)
# Now we havve 56 polynomial features from 5 original important features. Now we calculate correlation between these polynomial features and the target to see how these features are related to the label.
# +
# We create a data frame from all polynomial features that we created in previous step and then calculate correlations
poly_features = pd.DataFrame(poly_features , columns = polynom.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2',
'EXT_SOURCE_3', 'DAYS_BIRTH', 'CODE_GENDER_F']))
poly_features_test = pd.DataFrame(poly_features_test , columns = polynom.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2',
'EXT_SOURCE_3', 'DAYS_BIRTH', 'CODE_GENDER_F']))
poly_features.head()
# -
poly_features = poly_features.drop('1' , axis = 1) # The first featur with degree 0 is useless so we drop it
poly_features_test = poly_features_test.drop('1' , axis = 1)
poly_features ['TARGET'] = train_df['TARGET']
corr = poly_features.corr()['TARGET'].sort_values()
print(corr.tail(15)) #to get most positively correlated features
print(corr.head(15)) #to get most negatively correlated features
poly_features.shape
# As we can see now, some of these polynomial features have more correlation with the target class.
#
# Now we create new train and test data sets and add these polynomial features. Later for training model we also use these datasets with polynomial features to see if it would improve our model.
poly_features['SK_ID_CURR'] = train_df['SK_ID_CURR'] #adding Id column so we can merge these datasets later
poly_features = poly_features.drop('TARGET', axis = 1)
poly_features_test['SK_ID_CURR'] = test_df['SK_ID_CURR']
poly_train = train_df.merge(poly_features, on = 'SK_ID_CURR', how = 'left')
poly_test = test_df.merge(poly_features_test, on = 'SK_ID_CURR', how = 'left')
poly_train.head()
# ## Adding data from other tables
# ### <font color='blue'>bureau_balance table</font>
bureau_balance = pd.read_csv("bureau_balance.csv")
bureau_balance.head()
bureau_balance["STATUS"].unique()
# Meaning of differnet values of STATUS column is as following :
#
# C - closed, that is, repaid credit. X - unknown status. 0 - current loan, no delinquency. 1 - 1-30 days overdue, 2 - 31-60 days overdue, and so on up to status 5 - the loan is sold to a third party or written off
#
# We can use this STATUS column and define a risk factor by allocating a value to each status and then calculate sum of them for each SK_ID_BUREAU
# +
bureau_balance['STATUS'] = bureau_balance['STATUS'].map({'C' : 0 , '0' : 0 , 'X' : .1 , '1' : 1 ,
'2' : 2 , '3' : 3 , '4' :4 , '5' : 5})
# Allocate .1 for X because in this case the status is unknown and it's not reasonable to map a high risk to it
bureau_balance_final = bureau_balance.groupby('SK_ID_BUREAU', as_index=False)['STATUS'].sum()
bureau_balance_final = bureau_balance_final.rename(columns = {'STATUS' : 'BB_RISK'})
bureau_balance_final.head()
# -
# ### <font color='blue'>bureau table</font>
# In this step, we first add the risk value that we calculated in previous step to the bureau table and fill null values with 0
bureau = pd.read_csv("bureau.csv")
bureau= bureau.merge(bureau_balance_final, on = 'SK_ID_BUREAU', how = 'left')
bureau['BB_RISK'] = bureau['BB_RISK'].fillna(0)
bureau = one_hot_encoding(bureau)
bureau.head()
# Now we get the mean of bureau table features for each SK_ID_CURR and also number of previous loans that each customer got before
bureau_mean = bureau.groupby('SK_ID_CURR').mean()
previous_loans = bureau.groupby('SK_ID_CURR', as_index=False)['SK_ID_BUREAU'].count() #number of previous loans for each customer
previous_loans = previous_loans.rename(columns = {"SK_ID_CURR" : "SK_ID_CURR", "SK_ID_BUREAU" : "PLoan_num"})
bureau_mean= bureau_mean.merge(previous_loans, on = 'SK_ID_CURR', how = 'left')
bureau_mean = bureau_mean.drop(columns = "SK_ID_BUREAU" )
bureau_mean.head()
# Now we define a new variable from exisiting varaibles that may be useful and it is how often the cusomer took loans in past, was it on a regular basis or for a short period? Each can have different interpretation.
frequency = bureau[['SK_ID_CURR', 'SK_ID_BUREAU', 'DAYS_CREDIT']].groupby(by=['SK_ID_CURR'])
frequency1 = frequency.apply(lambda x: x.sort_values(['DAYS_CREDIT'], ascending=False)).reset_index(drop=True)
frequency1['Loan_FRQ'] = frequency1.groupby(by=['SK_ID_CURR'])['DAYS_CREDIT'].diff()
# Now need to find mean of Loan_FRQ for each SK_ID_CURR. First, I drop null values(beacause when we calculate diff, the diff value for the first bureau of each SK_ID_CURR is NAN ) and then calculate mean values for each SK_ID_CURR
# +
frequency1 = frequency1.dropna(subset = ['Loan_FRQ'])
frequency1 = frequency1.groupby('SK_ID_CURR', as_index=False)['Loan_FRQ'].mean()
# Now we should merge frequency1 and bureau_mean database
bureau_mean= bureau_mean.merge(frequency1, on = 'SK_ID_CURR', how = 'left')
#we have null values in Loan_FRQ column if there was just 1 previous loan
#fill null values of this column with the value of DAYS_CREDIT column
bureau_mean["Loan_FRQ"] = np.where(bureau_mean["Loan_FRQ"].isnull(), bureau_mean['DAYS_CREDIT'], bureau_mean["Loan_FRQ"])
bureau_mean["Loan_FRQ"] = bureau_mean["Loan_FRQ"].abs()
bureau_mean.head(10)
# +
# Now we fill null values with the value of median
imputer = SimpleImputer(strategy = "median")
imputer.fit(bureau_mean)
bureau_mean.loc[:] = imputer.transform(bureau_mean)
bureau_mean.columns = ['BUR_' + col for col in bureau_mean.columns]
bureau_mean = bureau_mean.rename(columns = {'BUR_SK_ID_CURR' : 'SK_ID_CURR'})
# -
# ### <font color='blue'>POS_CASH_balance table</font>
pos_cash = pd.read_csv("POS_CASH_balance.csv")
pos_cash.head()
# +
pos_cash = one_hot_encoding(pos_cash)
pos_count = pos_cash[[ 'SK_ID_PREV', 'SK_ID_CURR']].groupby(by = 'SK_ID_CURR').count()
pos_count = pos_count.rename(columns= {'SK_ID_CURR' : 'SK_ID_CURR', 'SK_ID_PREV' : 'prev_pos_count'})
pos_avg = pos_cash.groupby('SK_ID_CURR').mean()
pos_avg = pos_avg.merge(pos_count, how='left', on='SK_ID_CURR')
pos_avg = pos_avg.drop('SK_ID_PREV', axis = 1)
pos_avg.head()
# -
# changing column names to avoid any problem when we want to merge these tables with train and test
pos_avg.columns = ['POS_' + col for col in pos_avg.columns]
pos_avg = pos_avg.rename(columns = {'POS_SK_ID_CURR' : 'SK_ID_CURR'})
# ### <font color='blue'>installments_payments table</font>
ins_pay = pd.read_csv("installments_payments.csv")
ins_pay = one_hot_encoding(ins_pay)
ins_pay.head()
ins_count = ins_pay[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
ins_count = ins_count.rename(columns = {'SK_ID_CURR' : 'SK_ID_CURR' , 'SK_ID_PREV' : 'ins_count'})
ins_avg = ins_pay.groupby('SK_ID_CURR').mean()
ins_avg = ins_avg.merge(ins_count, how='left', on='SK_ID_CURR')
ins_avg = ins_avg.drop('SK_ID_PREV', axis = 1)
ins_avg.head()
# #### Adding new features
# +
# Percentage and difference paid in each installment (amount paid and installment value)
ins_avg['PAYMENT_PERC'] = ins_avg['AMT_PAYMENT'] / ins_avg['AMT_INSTALMENT']
ins_avg['PAYMENT_DIFF'] = ins_avg['AMT_INSTALMENT'] - ins_avg['AMT_PAYMENT']
# Days past due and days before due (no negative values)
ins_avg['DPD'] = ins_avg['DAYS_ENTRY_PAYMENT'] - ins_avg['DAYS_INSTALMENT']
ins_avg['DBD'] = ins_avg['DAYS_INSTALMENT'] - ins_avg['DAYS_ENTRY_PAYMENT']
ins_avg['DPD'] = ins_avg['DPD'].apply(lambda x: x if x > 0 else 0)
ins_avg['DBD'] = ins_avg['DBD'].apply(lambda x: x if x > 0 else 0)
ins_avg.head()
# -
#analyze null values
ins_avg.isnull().sum()
# There are just 9 rows with null values in DAYS_ENTRY_PAYMENT and AMT_PAYMENT columns (also in new columns that we created from them). We fill these null values with the mean of the column
ins_avg = ins_avg.fillna(ins_avg.mean())
ins_avg.head()
#changing columns name
ins_avg.columns = ['ins_' + col for col in ins_avg.columns]
ins_avg = ins_avg.rename(columns = {'ins_SK_ID_CURR' : 'SK_ID_CURR'})
# ### <font color='blue'>credit_card_balance table</font>
cc = pd.read_csv('credit_card_balance.csv')
cc.head()
# +
# handling categorical features
cc = one_hot_encoding(cc)
# filling null values with median
imputer = SimpleImputer(strategy = "median")
imputer.fit(cc)
cc.loc[:] = imputer.transform(cc)
# Adding number of credit cards
cc_count = cc[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
cc_count = cc_count.rename(columns = {'SK_ID_CURR' : 'SK_ID_CURR' , 'SK_ID_PREV' : 'cc_count'})
# calculating the mean of each feature for each customer
cc_avg = cc.groupby('SK_ID_CURR').mean()
cc_avg = cc_avg.merge(cc_count, how='left', on='SK_ID_CURR')
cc_avg = cc_avg.drop('SK_ID_PREV', axis = 1)
cc_avg.head()
# -
#changing columns name
cc_avg.columns = ['ins_' + col for col in cc_avg.columns]
cc_avg = cc_avg.rename(columns = {'ins_SK_ID_CURR' : 'SK_ID_CURR'})
# ### <font color='blue'>previous_application table</font>
#
pre_app = pd.read_csv('previous_application.csv')
pre_app.head()
# +
# handling categorical features
pre_app = one_hot_encoding(pre_app)
# Adding number of credit cards
pre_count = pre_app[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
pre_count = pre_count.rename(columns = {'SK_ID_CURR' : 'SK_ID_CURR' , 'SK_ID_PREV' : 'pre_app_count'})
# calculating the mean of each feature for each customer
app_avg = pre_app.groupby('SK_ID_CURR').mean()
app_avg = app_avg.merge(pre_count, how='left', on='SK_ID_CURR')
app_avg = app_avg.drop('SK_ID_PREV', axis = 1)
app_avg.head()
# -
# filling null values with median
imputer = SimpleImputer(strategy = "median")
imputer.fit(app_avg)
app_avg.loc[:] = imputer.transform(app_avg)
#changing columns name
app_avg.columns = ['app_' + col for col in app_avg.columns]
app_avg = app_avg.rename(columns = {'app_SK_ID_CURR' : 'SK_ID_CURR'})
# ### Merging tables with train and test set
# In this step we merge the datasets that we created in last step with trian and test dataset. Since not all applicants have previous applications or loans, we fill null values of columns of these new datasets with 0 .
def merge_dataset(df1,df2,key):
df2_cols = list(df2.columns)
df1 = df1.merge(df2, how='left', on= key)
df1[df2_cols] = df1[df2_cols].fillna(0)
return df1
# Adding Bureau table
train_df = merge_dataset(train_df, bureau_mean, 'SK_ID_CURR')
test_df = merge_dataset(test_df, bureau_mean, 'SK_ID_CURR' )
# Adding POS_CASH_balance table
train_df = merge_dataset(train_df, pos_avg , 'SK_ID_CURR')
test_df = merge_dataset(test_df, pos_avg , 'SK_ID_CURR' )
# Adding installments_payments table
train_df = merge_dataset(train_df, ins_avg , 'SK_ID_CURR')
test_df = merge_dataset(test_df, ins_avg , 'SK_ID_CURR' )
# Adding credit_card_balance table
train_df = merge_dataset(train_df, cc_avg , 'SK_ID_CURR')
test_df = merge_dataset(test_df, cc_avg , 'SK_ID_CURR' )
# Adding previous_application table
train_df = merge_dataset(train_df, app_avg , 'SK_ID_CURR')
test_df = merge_dataset(test_df, app_avg , 'SK_ID_CURR' )
# Adding some new useful features
train_df['INTEREST'] = train_df['app_CNT_PAYMENT']*train_df['AMT_ANNUITY'] - train_df['AMT_CREDIT']
train_df['INTEREST_RATE'] = 2*12*train_df['INTEREST']/(train_df['AMT_CREDIT']*(train_df['app_CNT_PAYMENT']+1))
train_df['INTEREST_SHARE'] = train_df['INTEREST']/train_df['AMT_CREDIT']
test_df['INTEREST'] = test_df['app_CNT_PAYMENT']*test_df['AMT_ANNUITY'] - test_df['AMT_CREDIT']
test_df['INTEREST_RATE'] = 2*12*test_df['INTEREST']/(test_df['AMT_CREDIT']*(test_df['app_CNT_PAYMENT']+1))
test_df['INTEREST_SHARE'] = test_df['INTEREST']/test_df['AMT_CREDIT']
train_df.head()
#train_df.to_csv('processed.csv', encoding='utf-8', index=False)
# +
#test_df.to_csv('processed_test.csv' , encoding='utf-8', index=False)
# -
# # Modeling
train_df = pd.read_csv('processed.csv')
test_df = pd.read_csv('processed_test.csv')
test_df.head()
# #### Checking for Nan, inf or -inf values and substituding them with the mean of each numeric columns
#
# Substituting inf and -inf values with nan
train_df = train_df.replace([np.inf, -np.inf], np.nan)
# Filling the Nan values in the new numeric columns with the mean
for column in list(train_df.columns):
if train_df[column].dtypes == 'float64':
train_df[column] = train_df[column].fillna(train_df[column].mean())
# Checking if there are still any problems into the dataframe
train_df.isnull().any().any()
# ---
# Selecting the 'SK_ID_CURR' column for future use
client_names = train_df[['SK_ID_CURR']]
client_names
# Splitting dataframe in features and target variable
feature_cols = list(train_df.columns)
y = train_df.TARGET.values # Target variable
train_df = train_df[feature_cols].drop(['TARGET'], axis = 1)
train_df = train_df.drop(['SK_ID_CURR'], axis = 1) # Features
# #### Dividing the data into train, val and test datasets
# Now that we have defined the initial dataframe of features and the Target variable array, we can divide our dataset into training, validation and testing sets, and then select suitable methods for binary classification in order to develop our statistical model.
# +
# Splitting the dataset
X_train, X_temp, y_train, y_temp = train_test_split(train_df, y, stratify = y, test_size=0.3, random_state=42)
X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, stratify = y_temp, test_size=0.5, random_state=42)
print('Shape of X_train:',X_train.shape)
print('Shape of X_val:',X_val.shape)
print('Shape of X_test:',X_test.shape)
# -
# As we can see, we can count over 500 columns in our dataframes, and so many different features can consistently slow our models and generate too much noise, so that becomes even more difficult finding the correct probability that each client will or won't pay back the loan.
# For these reasons, we already decided to select the best feature with light gbm
model_sel = lgb.LGBMClassifier(boosting_type='gbdt', max_depth=7, learning_rate=0.01, n_estimators= 2000,
class_weight='balanced', subsample=0.9, colsample_bytree= 0.8, n_jobs=-1)
train_features, valid_features, train_y, valid_y = train_test_split(X_train, y_train, test_size = 0.15, random_state = 42)
model_sel.fit(train_features, train_y, early_stopping_rounds=100, eval_set = [(valid_features, valid_y)], eval_metric = 'auc', verbose = 200)
get_feat = pd.DataFrame(sorted(zip(model_sel.feature_importances_, train_df.columns)), columns=['Value','Feature'])
features_sorted = get_feat.sort_values(by="Value", ascending=False)
features_sel = list(features_sorted[features_sorted['Value']>=50]['Feature'])
print(features_sel, len(features_sel))
# Selecting the best 150 features out of 202
best_features = features_sel[0:150]
# Defining new dataframes with only the selected features
X_train_sel = X_train[features_sel]
X_val_sel = X_val[features_sel]
X_test_sel = X_test[features_sel]
X_train_best = X_train_sel[best_features]
X_test_best = X_test_sel[best_features]
X_val_best = X_val_sel[best_features]
# Feature Scaling
sc = StandardScaler()
X_train_sel = sc.fit_transform(X_train_sel)
X_train_sel = sc.transform(X_train_sel)
X_test_sel = sc.fit_transform(X_test_sel)
X_test_sel = sc.transform(X_test_sel)
X_val_sel = sc.fit_transform(X_val_sel)
X_val_sel = sc.transform(X_val_sel)
X_train_best = sc.fit_transform(X_train_best)
X_train_best = sc.transform(X_train_best)
X_test_best = sc.fit_transform(X_test_best)
X_test_best = sc.transform(X_test_best)
X_val_best = sc.fit_transform(X_val_best)
X_val_best = sc.transform(X_val_best)
# ### Logistic Regression
aucs = {}
# +
# inizialize the model (using the default parameters)
logistic = LogisticRegression(max_iter = 4000) # It doesn't converge for lower values
# fit the model with data
logistic.fit(X_train_best,y_train)
# Predicting the target values for X_test
y_pred = logistic.predict(X_test_best)
# -
# Defining and plotting with heatmap the confusion matrix relative to logistic regression
def heat_conf(conf_matrix):
fig = plt.figure(figsize=(8,8))
# plotting the heatmap
class_names=[0,1] # name of classes
fig, ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
sns.heatmap(pd.DataFrame(conf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
ax.xaxis.set_label_position("top")
ax.set_ylim(len(conf_matrix)+1, -1)
plt.tight_layout()
plt.title('Confusion matrix', y=1.1)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
conf_matrix = metrics.confusion_matrix(y_test, y_pred)
heat_conf(conf_matrix)
# Let's see the Accuracy, Precision and Recall values:
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred, pos_label = 1))
print("Recall:",metrics.recall_score(y_test, y_pred, pos_label = 1))
# Now we can plot the ROC, and calculate the AUC relative to logistic regression.
fig = plt.figure(figsize=(7,6))
y_pred_proba = logistic.predict_proba(X_test_best)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba, pos_label = 1)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
plt.show()
aucs['Logistic Regression'] = auc
auc
# ### Random Forest
# +
# Create a random forest classifier
clf = RandomForestClassifier(n_estimators=200, random_state=0, n_jobs=-1, max_depth = 10)
# Train the classifier
clf.fit(X_train_sel, y_train)
# -
# Print the name and gini importance of each feature
for feature in zip(features_sel, clf.feature_importances_):
print(feature)
# +
# Create a selector object that will use the random forest classifier to identify
# features that have an importance of more than
sfm = SelectFromModel(clf, threshold=0.005)
# Train the selector
sfm.fit(X_train_sel, y_train)
# -
# Print the names of the most important features
for feature_list_index in sfm.get_support(indices=True):
print(features_sel[feature_list_index])
# Transform the data to create a new dataset containing only the most important features
X_important_train = sfm.transform(X_train_sel)
X_important_test = sfm.transform(X_test_sel)
# +
# Create a new random forest classifier for the most important features
clf_important = RandomForestClassifier(n_estimators=200, random_state=0, n_jobs=-1)
# Train the new classifier on the new dataset containing the most important features
clf_important.fit(X_important_train, y_train)
# +
# Apply The Full Featured Classifier To The Test Data
y_pred = clf.predict(X_test_sel)
# View The Accuracy, Precision and Recall Of our model with all features
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred, pos_label = 1))
print("Recall:",metrics.recall_score(y_test, y_pred, pos_label = 1))
# -
conf_matrix = metrics.confusion_matrix(y_test, y_pred)
heat_conf(conf_matrix)
fig = plt.figure(figsize=(7,6))
y_pred_proba = clf.predict_proba(X_test_sel)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba, pos_label = 1)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
plt.show()
aucs['Random Forest'] = auc
auc
# +
# Apply The Full Featured Classifier To The Test Data
y_important_pred = clf_important.predict(X_important_test)
# View The Accuracy,Precision and Recall off our model with selected features
print("Accuracy:",metrics.accuracy_score(y_test, y_important_pred))
print("Precision:",metrics.precision_score(y_test, y_important_pred, pos_label = 1))
print("Recall:",metrics.recall_score(y_test, y_important_pred, pos_label = 1))
# -
conf_matrix = metrics.confusion_matrix(y_test, y_important_pred)
heat_conf(conf_matrix)
fig = plt.figure(figsize=(7,6))
y_pred_proba = clf_important.predict_proba(X_important_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba, pos_label = 1)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
plt.show()
aucs['Random Forest selected'] = auc
auc
# ### Light gbm
# Defining again the dataframes, without scaling
X_train_sel = X_train[features_sel]
X_val_sel = X_val[features_sel]
X_test_sel = X_test[features_sel]
# Since the dataset is unbalanced, with over 90% of target values equal to 0, we need to add weight to give more importance to the target value 1 when is found. (We tried downsampling, but it didn't give better results).
tar_weight = np.ones((len(X_train_sel),), dtype=int)
for i in range(len(X_train_sel)):
if y_train[i]== 0:
tar_weight[i]=1
else:
tar_weight[i]=10
# lgbm format
train = lgb.Dataset(X_train_sel, label = y_train, weight= tar_weight )
valid = lgb.Dataset(X_val_sel, label = y_val)
# Cross Validation to find the best max depth:
# +
cross = []
max_D = [2,3,5,10] # Possible values of max_depth parameter
for i in max_D:
params = {'boosting_type': 'gbdt',
'max_depth' : i,
'objective': 'binary',
'nthread': 5,
'num_leaves': 32,
'learning_rate': 0.05,
'max_bin': 512,
'subsample_for_bin': 200,
'subsample': 0.7,
'subsample_freq': 1,
'colsample_bytree': 0.8,
'reg_alpha': 20,
'reg_lambda': 20,
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 10,
'scale_pos_weight': 1,
'num_class' : 1,
'metric' : 'auc'
}
lgbm = lgb.train(params,
train,
2500,
valid_sets=valid,
early_stopping_rounds= 100,
verbose_eval= 10
)
y_prob = lgbm.predict(X_val_sel)
cross.append(roc_auc_score(y_val,y_prob))
best = max_D[np.argmax(cross)]
print('The best max depth is ', best )
# -
params = {'boosting_type': 'gbdt',
'max_depth' : 5,
'objective': 'binary',
'nthread': 5,
'num_leaves': 32,
'learning_rate': 0.05,
'max_bin': 512,
'subsample_for_bin': 200,
'subsample': 0.7,
'subsample_freq': 1,
'colsample_bytree': 0.8,
'reg_alpha': 20,
'reg_lambda': 20,
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 10,
'scale_pos_weight': 1,
'num_class' : 1,
'metric' : 'auc'
}
lgbm = lgb.train(params,
train,
2500,
valid_sets=valid,
early_stopping_rounds= 100,
verbose_eval= 10
)
y_pred_prob = lgbm.predict(X_test_sel)
y_pred_prob # Probabilities relative to clients
# Giving each predicted probability a target value
y_pred = np.ones((len(X_test_sel),), dtype=int)
for i in range(len(y_pred_prob)):
if y_pred_prob[i]<=0.5:
y_pred[i]=0
else:
y_pred[i]=1
conf_matrix = metrics.confusion_matrix(y_test, y_pred)
heat_conf(conf_matrix)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred, pos_label = 1))
print("Recall:",metrics.recall_score(y_test, y_pred, pos_label = 1))
fig = plt.figure(figsize=(7,6))
y_pred_proba = lgbm.predict(X_test_sel)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba, pos_label = 1)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
plt.show()
aucs['Light GBM'] = auc
auc
best_method = max(aucs.items(), key=operator.itemgetter(1))[0]
best_method
# ### Creating the predition CSV file
test_pred = test_df.drop(['SK_ID_CURR'], axis = 1)
test_sel = test_pred[features_sel]
y_pred_prob_test = lgbm.predict(test_sel)
submission = pd.DataFrame({'SK_ID_CURR':test_df['SK_ID_CURR'],'TARGET':y_pred_prob_test})
submission.to_csv('Home_credit_default_risk.csv', index = False)
submission
| final notebook_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30100, "status": "ok", "timestamp": 1634217401801, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "02695860433788978097"}, "user_tz": -330} id="fyJOV0wg8YWs" outputId="ba382f8d-1e72-48ef-ead6-474c1177459c"
# #### These below steps are supposed to be executed only in google colab:
# They will install libraries on the fly. However in jupyter notebook you have to preinstall required libraries using 'cmd' prompt
#
# - !pip install -q pyomo
# - !apt-get install -y -qq coinor-cbc
# - !apt-get install -y -qq glpk-utils
# + executionInfo={"elapsed": 389, "status": "ok", "timestamp": 1634217479266, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "02695860433788978097"}, "user_tz": -330} id="sr4UyEaA8YWy"
# Importing Libraries
import pandas as pd
import numpy as np
import math
from pyomo.environ import *
import matplotlib.pyplot as plt
import seaborn as sns
# + executionInfo={"elapsed": 385, "status": "ok", "timestamp": 1634217483779, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh7qEJpIZvpZ7EO8wJyR6c1PPuTlzWXFE1RcI0XPg=s64", "userId": "02695860433788978097"}, "user_tz": -330} id="BRbzMafS8YWz"
from __future__ import division
from pyomo.opt import SolverFactory
# -
# #### Link to use relative path to input data set based on your directory structure:
# https://stackoverflow.com/questions/50119792/python-import-excel-file-using-relative-path
# + colab={"base_uri": "https://localhost:8080/", "height": 385} executionInfo={"elapsed": 476, "status": "error", "timestamp": 1634217758544, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh7qEJpIZvpZ7EO8wJyR6c1PPuTlzWXFE1RcI0XPg=s64", "userId": "02695860433788978097"}, "user_tz": -330} id="FdE6kOOE8YWz" outputId="210a232f-fe89-48f5-d10a-e013d427dd58"
# Reading the data from Excel workbook
# inputdata='Staffing+Data.xlsx'
inputdata=r'./../data/raw/Staffing+Data.xlsx'
AppDemand = pd.read_excel(inputdata, sheet_name='DemandData')
StaffAv = pd.read_excel(inputdata, sheet_name='StaffAvailability')
Cost = pd.read_excel(inputdata, sheet_name='Cost')
ServiceRate = pd.read_excel(inputdata, sheet_name='ServiceRate')
# -
print(AppDemand.shape)
AppDemand.head()
# + id="c1Ksbrvi94oi"
print(StaffAv.shape)
StaffAv.head()
# -
print(Cost.shape)
Cost.head()
print(ServiceRate.shape)
ServiceRate.head()
# + [markdown] id="BmhVYw0c8YW0"
# ## Data pre-processing
# + [markdown] id="7s138JWf8YW2"
# #### Create the required Python data structures for indexes and parameters
#
# -
# Demand data
Demand = AppDemand.set_index(['State', 'Month'])['Demand'].to_dict()
Demand
# FTE Salary from the data
FTE_Salary = Cost.set_index(['State', 'Month'])['MonthlySalary'].to_dict()
FTE_Salary
# Unit outsourcing Cost
UnitOutSourceCost = Cost.set_index(['State', 'Month'])['UnitOutSourceCost'].to_dict()
UnitOutSourceCost
# Number of Applications when Employee working Full Time
FTE_AppServiceRate = ServiceRate.iloc[0,0]
FTE_AppServiceRate
StaffAv.head()
# +
# Staff Availability for serving the Insurance Applications
# Worst and Best => staffavail_LB and staffavail_UB respectively
StaffAv_LB = StaffAv.set_index(['State', 'Month'])['LB'].to_dict()
StaffAv_UB = StaffAv.set_index(['State', 'Month'])['UB'].to_dict()
StaffAv_Per = StaffAv.set_index(['State', 'Month'])['StaffAvPer'].to_dict()
# -
StaffAv_LB
StaffAv_UB
StaffAv_Per
Cost.head(1)
Cost.State.unique()
Cost.State.value_counts()
Cost.State.value_counts().plot.bar()
plt.show()
Cost.Month.unique()
Cost.Month.value_counts()
Cost.Month.value_counts().plot.bar()
plt.show()
# Index
BankLoc = Cost['State'].unique().tolist() #Directly convert array object to a list here
Month = Cost['Month'].unique().tolist() #Directly convert array object to a list here
#BankLoc
#array(['A', 'B', 'C'], dtype=object)
BankLoc
# Month = array(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
# 'Oct', 'Nov', 'Dec'], dtype=object)
Month
# --------------------------
# # In the Above Steps(Cells) we completed reading necessary Data into Python.
# # We next formulate pyomo model using "glpk solver" for our optimization process.
# + [markdown] id="75RfXu3L8YW2"
# # Question 1
#
# The company wants to know the optimised staffing recommendations for the business case described.
# Write the mathematical model for the deterministic optimisation problem. Define and explain your decision variables, objective function and the constraint. (Hint: Use months of the year as the model timeline).
#
#
# + [markdown] id="iSiYctAL8YW4"
# **Note:** You can write the mathematical formulation directly in your report.
# + [markdown] id="X3l_yB5w8YW5"
# # Question 2
#
# Code the problem is Python and use any optimization package to solve it. Add comments to your code to explain each step.
# + [markdown] id="USbNQwZI8YW6"
# #### Expected output:
#
# Create a data frame containing the number of outsourced applications and the number of FTEs for each state-month combination. You can choose to have extra columns like staff availability, demand etc. in your dataframe apart from the ones mentioned earlier.
# -
# #### Creating a model instance
# + id="PI_yoNVZ8YW7"
# model object
model = ConcreteModel()
# Next task is to define the Sets
model.i = Set(initialize = BankLoc, doc = 'State') # i is Insurance Company location
model.j = Set(initialize = Month, doc = 'Month') # j is Months
# -
# #### Define Pyomo sets and Parameters
# + id="T0kwlXku8YW7"
# Parameters -> demand, FTE Salaries, Outsourcing Cost(oc), Average Staff Availability(sa)
model.d = Param(model.i, model.j, initialize = Demand, doc = 'Demand')
model.s = Param(model.i, model.j, initialize = FTE_Salary, doc = 'FTE_Salary')
model.oc = Param(model.i, model.j, initialize = UnitOutSourceCost, doc = 'UnitOutSourceCost')
model.saP = Param(model.i, model.j, initialize = StaffAv_Per, doc = 'StaffAvPer') #model.saP= Staff Availibility Percentage
model.saLB = Param(model.i, model.j, initialize = StaffAv_LB, doc = 'StaffAvLB') #model.saLB=Staff Availibility Lower Bound Percentage
model.saUB = Param(model.i, model.j, initialize = StaffAv_UB, doc = 'StaffAvUB') #modelsaUB=Staff Availibility Upper Bound Percentage
# -
# Scaler Application Serve Rate when working 100%
model.r = Param(initialize = FTE_AppServiceRate, doc = 'FTE App Serve Rate')
model.r
# +
# Parameter for no of Applications that can be processed for the given Staff Availability
def c_FTEAPP(model, i, j):
return model.r * model.saP[i,j]
model.FTEAPPNO = Param(model.i, model.j, initialize=c_FTEAPP, doc='No of FTE App')
# -
model.FTEAPPNO
# + [markdown] id="CF3BVPzh8YW8"
# #### Decision variables
# -
# Define Decision Variables
#X=App Quantity processed by internal full time staff. This is continuous variable
#y=App Quantity processed by outsourced vendors. This is Integer variable
model.x = Var(model.i, model.j, domain = NonNegativeReals, doc='No of FTE') #Continuous Variable
model.y = Var(model.i, model.j, domain = NonNegativeIntegers, doc='No of Outsourced Applications') #Integer Variable
# + [markdown] id="_OL1k-cP8YW8"
# ### Constraints
# -
# #### Mathematical Expressions for Regulatory Constraints:
# - Regulatory Constraint:y(i,j)<=Demand(i,j)*30 | For all i belongs to State 'A' and j belongs to all months(Jan to Dec)
# - Regulatory Constraint: y(i,j)<=Demand(i,j)*40 | For all i belongs to State 'B' and j belongs to all months(Jan to Dec)
# +
# Regulatory Constraint:y(i,j)<=Demand(i,j)*30 | For all i belongs to State 'A' and j belongs to all months(Jan to Dec)
# A -> 30%
def Reg_A(model, i, j):
return 0.30 * model.d[i,j]
model.Reg_A = Param(model.i, model.j, initialize=Reg_A, doc='RegRest_A')
# +
# Regulatory Constraint: y(i,j)<=Demand(i,j)*40 | For all i belongs to State 'B' and j belongs to all months(Jan to Dec)
# B -> 40%
def Reg_B(model, i, j):
return 0.40 * model.d[i,j]
model.Reg_B = Param(model.i, model.j, initialize=Reg_B, doc='RegRest_B')
# -
model.i
# #### Mathematical expression of Demand Constraint:
# #### Demand Constraint= Staff availability*(40Apps/FTE)+Outsourced Insurance Apps Quantity=(Insurance Apps Demand/Month)
# +
# Demand Constraint
model.demand_constraint = ConstraintList()
for i in model.i:
for j in model.j:
model.demand_constraint.add(expr= model.x[i,j]*model.FTEAPPNO[i,j] + model.y[i,j] == model.d[i,j])
# +
# Regulatory Constraint
model.regulatoryCons = ConstraintList()
for i in model.i:
for j in model.j:
if i=='A':
model.regulatoryCons.add(expr = model.y['A', j] <= model.Reg_A['A', j])
elif i=='B':
model.regulatoryCons.add(expr = model.y['B', j] <= model.Reg_B['B', j])
# + [markdown] id="jZk_DqLx8YW8"
# #### Mathematical Expression For Objective function:
#
# - Objective Function:Minimize(Annual Total Cost of FTE+Annual Total Cost of Outsourced Insurance Application)
# - Minimize($\sum_{i}$ $\sum_{j}$ X(i,j) * FTESalary(i,j) + $\sum_{i}$ $\sum_{j}$ Y(i,j) * UnitCost(i,j))
# +
def objective_rule(model):
return sum(model.s[i,j]*model.x[i,j] for i in model.i for j in model.j) +\
sum(model.oc[i,j]*model.y[i,j] for i in model.i for j in model.j)
model.objective = Objective(rule = objective_rule, sense = minimize, doc = 'Define Objective Function')
# + [markdown] id="4x65t8Fe8YW9"
# #### Invoking the "glpk" solver:
#
# -
result_cost = SolverFactory('glpk').solve(model)
result_cost.write()
# result_cost = SolverFactory('glpk').solve(model)
#
# result_cost.write()
# #### Above code throws following error when you do not take care of writing output in a different excel file and than saving the excelwriter instance:
#
# ERROR: Solver (glpk) returned non-zero return code (1)
# ERROR: Solver log: GLPSOL--GLPK LP/MIP Solver 5.0 Parameter(s) specified in
# the command line:
# --write C:\Users\THEHOM~1\AppData\Local\Temp\tmpannjzqj7.glpk.raw --wglp
# C:\Users\THEHOM~1\AppData\Local\Temp\tmp4ofd1y97.glpk.glp --cpxlp
# C:\Users\THEHOM~1\AppData\Local\Temp\tmpbh90rifx.pyomo.lp
# Reading problem data from
# 'C:\Users\THEHOM~1\AppData\Local\Temp\tmpbh90rifx.pyomo.lp'...
# C:\Users\THEHOM~1\AppData\Local\Temp\tmpbh90rifx.pyomo.lp:5: constraints
# section missing CPLEX LP file processing error
# ---------------------------------------------------------------------------
# ApplicationError Traceback (most recent call last)
# <ipython-input-41-eafa0353ff6a> in <module>
# ----> 1 result_cost = SolverFactory('glpk').solve(model)
# 2 result_cost.write()
#
# D:\dev\anaconda_installation_folder\lib\site-packages\pyomo\opt\base\solvers.py in solve(self, *args, **kwds)
# 593 elif hasattr(_status, 'log') and _status.log:
# 594 logger.error("Solver log:\n" + str(_status.log))
# --> 595 raise ApplicationError(
# 596 "Solver (%s) did not exit normally" % self.name)
# 597 solve_completion_time = time.time()
#
# ApplicationError: Solver (glpk) did not exit normally
model.pprint()
# +
#model.value
# -
#No of FTE for State 'A' 'Dec' month
print(model.x['A','Jan'].value)
print(model.x['A','Dec'].value) #Answer should be closer to 139
# The number of Applications processed by outsourced vendor
print(model.y['A', 'Jan'].value)
print(model.y['A', 'Dec'].value) #Answer should be closer to 1554
# #### Print the value of the objective function
TotalCost = model.objective.expr() #Answer should be closer to 18million
TotalCost
# + [markdown] id="py3pYC1s8YW9"
# **`Checkpoint 1:`** Seems like the company has to spend around 17.9 m$ in total for the application approval process.
# + [markdown] id="MjAeKddK8YW-"
# #### Creating dataframe for the results
#
# +
# Create dataframe FTE_Staff containing our FTE and the number of applications outsourced and other columns of interest:
FTE_Staff = []
for i in BankLoc:
for j in Month:
demand=model.d[i,j]
lb_per=model.saLB[i,j]
ub_per=model.saUB[i,j]
staffav_per=model.saP[i,j]
fte=model.x[i,j].value #This is output given by the pyomo after solving the formulated model
app_outsourced=model.y[i,j].value #This is output given by the pyomo after solving the formulated model
app_staff_processed = model.x[i,j].value*40*model.saP[i,j]
monthly_cost = model.x[i,j].value*FTE_Salary[i,j] + model.y[i,j].value*UnitOutSourceCost[i,j]
percent_app_outsourced = round((app_outsourced/demand)*100,2)
percent_app_staff_processed = round((app_staff_processed/demand)*100,2)
avg_cost_per_app = monthly_cost/demand
FTE_Staff.append([i,j,demand,lb_per,ub_per,staffav_per,fte,app_outsourced,app_staff_processed,percent_app_outsourced,percent_app_staff_processed,monthly_cost,avg_cost_per_app]) #list of list
FTE_Staff = pd.DataFrame(FTE_Staff, columns=['State','Month','Apps_Demand','LB_Per','UB_Per','StaffAvPer','FTE','App_Outsourced',"App_Staff_Processed" , 'Per_App_Outsourced', 'Per_App_Staff_Processed','Monthly_Cost','Avg_Cost_Per_App'])
# -
FTE_Staff.head(10)
# +
# What is the average FTE per Month
TotalFTE = FTE_Staff['FTE'].sum()
AverageFTEperMonth =(TotalFTE/12).round(2)
print(AverageFTEperMonth) # Answer should come to 257
# -
# Total Application Demand
TotalAppDemand = AppDemand['Demand'].sum() # Answer should be 113294
print(TotalAppDemand)
#Total Application Demand From 'FTE_Staff' dataframe
tad=FTE_Staff.Apps_Demand.sum()
print(tad)
# Percentage of Outsourced Applications
PercentageOutsourced = ((FTE_Staff['App_Outsourced'].sum()/TotalAppDemand)*100).round(2) # Answer should be around 18%
print(PercentageOutsourced)
# Avg Cost Per Application
AvgCostPerAppl = (TotalCost/ TotalAppDemand).round(2) # Answer should be around 160
print(AvgCostPerAppl)
#Another way to get Total Cost of processing all applications: by using statistical function(sum()) on the dataframe itself
tc=FTE_Staff.Monthly_Cost.sum()
print(tc) #Answer should be closer to 18million
# + [markdown] id="3BXry40n8YW-"
# #### Writing the results(dataframe) in to an Excel sheet:
#
# +
# Writing the results in to an "Output Excel sheet"
from openpyxl import load_workbook
book = load_workbook(inputdata) #Loading the previous excel workbook and keeping it to append it to our output file.
# create excel writer object
outputdata=r'./../data/processed/staffing_data_output.xlsx'
# writer = pd.ExcelWriter('staffing_data_output.xlsx', engine = 'openpyxl')
writer = pd.ExcelWriter(outputdata, engine = 'openpyxl')
#Assigning the workbook to the writer object
writer.book = book
# write dataframe to excel sheet named 'output_Actual'
FTE_Staff.to_excel(writer, sheet_name='Output_Actual')
#we will save and close the excel file finally at the end after writing down all necessary outputs
print('DataFrame=(Output_Actual) is written successfully to Excel Sheet.')
# + [markdown] id="NykRFgMt8YW-"
# # Question 3
#
# #### Worst-case and best-case analysis based on the staffs' availability.
#
# Assuming that the distribution is the same across all the states,
#
# #### 3.1 Worst case analysis
#
# - 3.1.1 What is the optimal number of staff members for the worst case?
#
# - 3.1.2 What is the percentage of outsourcing for the worst case?
#
# - 3.1.3 What is the average cost per application for the worst case?
#
#
# #### 3.2 Best case analysis
#
# - 3.2.1 What is the optimal number of staff members for the best case?
#
# - 3.2.2 What is the percentage of outsourcing for the best case?
#
# - 3.2.3 What is the average cost per application for the best case?
#
# + [markdown] id="WO1ZD3Zr8YW_"
# #### Expected output:
#
# For each of the subtasks (3.1 and 3.2) create a data frame containing the number of outsourced applications and the number of FTEs for each state-month combination. You can choose to have extra columns like staff availability, demand etc. in your dataframe apart from the ones mentioned earlier. Also, print the overall average percentage of outsourced applications and the overall average cost per application.
# + [markdown] id="Re7w8KQD8YXA"
# ## 3.1 Worst Case Analysis (Encoded as WC:Worst Case)
# - model in worst case analysis is named as model_WC
# + [markdown] id="ErR9HGCi8YXA"
# #### 3.1.1 Optimal number of staff members
# + [markdown] id="OblGU0T-8YXA"
# #### Creating a model instance
#
# +
# model object
model_WC = ConcreteModel()
# Next task is to define the Sets
model_WC.i = Set(initialize = BankLoc, doc = 'State') # i is Insurance Company location
model_WC.j = Set(initialize = Month, doc = 'Month') # j is Months
# + [markdown] id="6e4tmkg18YXB"
# #### Define Pyomo sets and Parameters
#
# +
# Parameters -> demand, FTE Salaries, Outsourcing Cost(oc), Average Staff Availability(sa)
model_WC.d = Param(model_WC.i, model_WC.j, initialize = Demand, doc = 'Demand')
model_WC.s = Param(model_WC.i, model_WC.j, initialize = FTE_Salary, doc = 'FTE_Salary')
model_WC.oc = Param(model_WC.i, model_WC.j, initialize = UnitOutSourceCost, doc = 'UnitOutSourceCost')
model_WC.saP = Param(model_WC.i, model_WC.j, initialize = StaffAv_Per, doc = 'StaffAvPer')
model_WC.saLB = Param(model_WC.i, model_WC.j, initialize = StaffAv_LB, doc = 'StaffAvLB')
model_WC.saUB = Param(model_WC.i, model_WC.j, initialize = StaffAv_UB, doc = 'StaffAvUB')
# -
# Scaler Application Serve Rate when working 100%
model_WC.r = Param(initialize = FTE_AppServiceRate, doc = 'FTE App Serve Rate')
# +
# Parameter for no of Applications that can be processed for the given Staff Availability
def c_FTEAPP_WC(model_WC, i, j):
return model_WC.r * model_WC.saLB[i,j] #Note this is important use saLB(Staff availability Lower Bound) here
model_WC.FTEAPPNO = Param(model_WC.i, model_WC.j, initialize=c_FTEAPP_WC, doc='No of FTE App')
# + [markdown] id="KRMmRbKI8YXB"
# #### Decision variables
#
# -
# Define Decision Variables
#X=App Quantity processed by internal full time staff. This is continuous variable
#y=App Quantity processed by outsourced vendors. This is Integer variable
model_WC.x = Var(model_WC.i, model_WC.j, domain = NonNegativeReals, doc='No of FTE') #Continuous Variable
model_WC.y = Var(model_WC.i, model_WC.j, domain = NonNegativeIntegers, doc='No of Outsourced Applications') #Integer Variable
# #### Constraints
# +
def Reg_A_WC(model_WC, i, j):
return 0.30 * model_WC.d[i,j]
def Reg_B_WC(model_WC, i, j):
return 0.40 * model_WC.d[i,j]
# + id="Phye8QO08YXB"
model_WC.Reg_A = Param(model_WC.i, model_WC.j, initialize=Reg_A_WC, doc='RegRest_A_Wc')
model_WC.Reg_B = Param(model_WC.i, model_WC.j, initialize=Reg_B_WC, doc='RegRest_B_WC')
# +
# Demand Constraint= Staff availability*(40Apps/FTE)+Outsourced Insurance Apps Quantity=(Insurance Apps Demand/Month)
model_WC.demand_constraint = ConstraintList()
for i in model_WC.i:
for j in model_WC.j:
model_WC.demand_constraint.add(expr= model_WC.x[i,j]*model_WC.FTEAPPNO[i,j] + model_WC.y[i,j] == model_WC.d[i,j])
# +
# Regulatory Constraint
model_WC.regulatoryCons = ConstraintList()
for i in model_WC.i:
for j in model_WC.j:
if i=='A':
model_WC.regulatoryCons.add(expr = model_WC.y['A', j] <= model_WC.Reg_A['A', j])
elif i=='B':
model_WC.regulatoryCons.add(expr = model_WC.y['B', j] <= model_WC.Reg_B['B', j])
# + [markdown] id="ltK7JckX8YXB"
# #### Objective function
#
# +
# Objective Function:Minimize(Annual Total Cost of FTE+Annual Total Cost of Outsourced Insurance Application)
def objective_rule(model_WC):
return sum(model_WC.s[i,j]*model_WC.x[i,j] for i in model_WC.i for j in model_WC.j) +\
sum(model_WC.oc[i,j]*model_WC.y[i,j] for i in model_WC.i for j in model_WC.j)
model_WC.objective = Objective(rule = objective_rule, sense = minimize, doc = 'Define Objective Function')
# + [markdown] id="kH5ndTn48YXC"
# #### Invoking the solver
#
# -
result_cost = SolverFactory('glpk').solve(model_WC)
result_cost.write()
model_WC.pprint()
# + [markdown] id="znHIxyY68YXC"
# #### Print the value of the objective function
#
# -
TotalCost_WC = (model_WC.objective.expr()) #Answer should be closer to 19.6million
TotalCost_WC
# + [markdown] id="0FpKJvYc8YXC"
# **`Checkpoint 2:`** The company has to spend around 19.6 m$ in total for the application approval process if the staffs are working with the minimum availability.
# + [markdown] id="qulkEhWT8YXC"
# #### Creating dataframe for the results
#
# +
# Create dataframe FTE_Staff_WC containing our FTE and the number of applications outsourced and other columns of interest:
FTE_Staff_WC = []
for i in BankLoc:
for j in Month:
demand=model_WC.d[i,j]
lb_per=model_WC.saLB[i,j]
ub_per=model_WC.saUB[i,j]
staffav_per=model_WC.saP[i,j]
fte=model_WC.x[i,j].value #This is output given by the pyomo after solving the formulated model_WC
app_outsourced=model_WC.y[i,j].value #This is output given by the pyomo after solving the formulated model_WC
app_staff_processed = model_WC.x[i,j].value*40*model_WC.saLB[i,j] #Note saLB here
monthly_cost = model_WC.x[i,j].value*FTE_Salary[i,j] + model_WC.y[i,j].value*UnitOutSourceCost[i,j]
percent_app_outsourced = round((app_outsourced/demand)*100,2)
percent_app_staff_processed = round((app_staff_processed/demand)*100,2)
avg_cost_per_app = monthly_cost/demand
FTE_Staff_WC.append([i,j,demand,lb_per,ub_per,staffav_per,fte,app_outsourced,app_staff_processed,percent_app_outsourced,percent_app_staff_processed,monthly_cost,avg_cost_per_app]) #list of list
FTE_Staff_WC = pd.DataFrame(FTE_Staff_WC, columns=['State','Month','Apps_Demand','LB_Per','UB_Per','StaffAvPer','FTE','App_Outsourced',"App_Staff_Processed" , 'Per_App_Outsourced', 'Per_App_Staff_Processed','Monthly_Cost','Avg_Cost_Per_App'])
# -
FTE_Staff_WC.head(10)
# +
# What is the average FTE per Month
TotalFTE_WC = FTE_Staff_WC['FTE'].sum()
AverageFTEperMonth_WC =(TotalFTE_WC/12).round(2)
print(AverageFTEperMonth_WC) # Answer should come to 230
# + [markdown] id="X1LLtByw8YXC"
# #### 3.1.2 Percentage of outsourced applications
# + id="FZgAHLTP8YXD"
# write your code here
PercentageOutsourced_WC = ((FTE_Staff_WC['App_Outsourced'].sum()/TotalAppDemand)*100).round(2) # Answer should be around 35%
print(PercentageOutsourced_WC)
# + [markdown] id="YHIuE1048YXD"
# #### 3.1.3 Average cost per application
# + id="4lvyDt6u8YXD"
# write your code here
AvgCostPerAppl_WC = (TotalCost_WC/ TotalAppDemand).round(2) # Answer should be around 173
print(AvgCostPerAppl_WC)
# +
# avgcostperapp_wc=FTE_Staff_WC.Avg_Cost_Per_App.mean()
# avgcostperapp_wc
# -
#Another way to get Total Cost of processing all applications: by using statistical function(sum()) on the dataframe itself
tc_WC=FTE_Staff_WC.Monthly_Cost.sum()
print(tc_WC) #Answer should be closer to 19.6million
# #### Writing the results(dataframe) in to an Excel sheet:
# +
# write dataframe to excel sheet named 'Output_WorstCase'
FTE_Staff_WC.to_excel(writer, sheet_name='Output_WorstCase')
# writer.save() #we will save and close the excel file finally at the end after writing down all necessary outputs
print('DataFrame=(OutPut_WorstCase) is written successfully to Excel Sheet.')
# + [markdown] id="YoBZaMHA8YXD"
# ## 3.2 Best Case Analysis (Encoded as BC:Best Case)
# - model in Best case analysis is named as model_BC.
# + [markdown] id="IocGl3il8YXE"
# #### 3.2.1 Optimal number of staff members
#
# + [markdown] id="gB_baAwQ8YXE"
# #### Creating a model instance
#
# +
# model object
model_BC = ConcreteModel()
# Next task is to define the Sets
model_BC.i = Set(initialize = BankLoc, doc = 'State') # i is Insurance Company location
model_BC.j = Set(initialize = Month, doc = 'Month') # j is Months
# + [markdown] id="sVj2rIUH8YXE"
# #### Define Pyomo sets and Parameters
#
# +
# Parameters -> demand, FTE Salaries, Outsourcing Cost(oc), Average Staff Availability(sa)
model_BC.d = Param(model_BC.i, model_BC.j, initialize = Demand, doc = 'Demand')
model_BC.s = Param(model_BC.i, model_BC.j, initialize = FTE_Salary, doc = 'FTE_Salary')
model_BC.oc = Param(model_BC.i, model_BC.j, initialize = UnitOutSourceCost, doc = 'UnitOutSourceCost')
model_BC.saP = Param(model_BC.i, model_BC.j, initialize = StaffAv_Per, doc = 'StaffAvPer')
model_BC.saLB = Param(model_BC.i, model_BC.j, initialize = StaffAv_LB, doc = 'StaffAvLB')
model_BC.saUB = Param(model_BC.i, model_BC.j, initialize = StaffAv_UB, doc = 'StaffAvUB')
# -
# Scaler Application Serve Rate when working 100%
model_BC.r = Param(initialize = FTE_AppServiceRate, doc = 'FTE App Serve Rate')
# +
# Parameter for no of Applications that can be processed for the given Staff Availability
def c_FTEAPP_BC(model_BC, i, j):
return model_BC.r * model_BC.saUB[i,j] #Note this is important use saUB((Staff availability Upper Bound)) here
model_BC.FTEAPPNO = Param(model_BC.i, model_BC.j, initialize=c_FTEAPP_BC, doc='No of FTE App')
# + [markdown] id="aGiuOPe88YXE"
# #### Decision variables
#
# -
# Define Decision Variables
#X=App Quantity processed by internal full time staff. This is continuous variable
#y=App Quantity processed by outsourced vendors. This is Integer variable
model_BC.x = Var(model_BC.i, model_BC.j, domain = NonNegativeReals, doc='No of FTE') #Continuous Variable
model_BC.y = Var(model_BC.i, model_BC.j, domain = NonNegativeIntegers, doc='No of Outsourced Applications') #Integer Variable
# + [markdown] id="0UgQS6J28YXE"
# #### Constraints
#
# +
def Reg_A_BC(model_BC, i, j):
return 0.30 * model_BC.d[i,j]
def Reg_B_BC(model_BC, i, j):
return 0.40 * model_BC.d[i,j]
# -
model_BC.Reg_A = Param(model_BC.i, model_BC.j, initialize=Reg_A_BC, doc='RegRest_A_BC')
model_BC.Reg_B = Param(model_BC.i, model_BC.j, initialize=Reg_B_BC, doc='RegRest_B_BC')
# +
# Demand Constraint= Staff availability*(40Apps/FTE)+Outsourced Insurance Apps Quantity=(Insurance Apps Demand/Month)
model_BC.demand_constraint = ConstraintList()
for i in model_BC.i:
for j in model_BC.j:
model_BC.demand_constraint.add(expr= model_BC.x[i,j]*model_BC.FTEAPPNO[i,j] + model_BC.y[i,j] == model_BC.d[i,j])
# +
# Regulatory Constraint
model_BC.regulatoryCons = ConstraintList()
for i in model_BC.i:
for j in model_BC.j:
if i=='A':
model_BC.regulatoryCons.add(expr = model_BC.y['A', j] <= model_BC.Reg_A['A', j])
elif i=='B':
model_BC.regulatoryCons.add(expr = model_BC.y['B', j] <= model_BC.Reg_B['B', j])
# + [markdown] id="OGyJlNYN8YXF"
# #### Objective function
#
# +
# Objective Function:Minimize(Annual Total Cost of FTE+Annual Total Cost of Outsourced Insurance Application)
def objective_rule(model_BC):
return sum(model_BC.s[i,j]*model_BC.x[i,j] for i in model_BC.i for j in model_BC.j) +\
sum(model_BC.oc[i,j]*model_BC.y[i,j] for i in model_BC.i for j in model_BC.j)
model_BC.objective = Objective(rule = objective_rule, sense = minimize, doc = 'Define Objective Function')
# + [markdown] id="CxLzAxbU8YXF"
# #### Invoking the solver
#
# -
result_cost = SolverFactory('glpk').solve(model_BC)
result_cost.write()
model_BC.pprint()
# + [markdown] id="IV_3gbVe8YXF"
# #### Print the value of the objective function
#
# -
TotalCost_BC = (model_BC.objective.expr()) #Answer should be closer to 16.5million
TotalCost_BC
# + [markdown] id="TQwABCpf8YXF"
# **`Checkpoint 3:`** The company has to spend around 16.5 m$ in total for the application approval process if the staffs are working with the maximum availability.
# + [markdown] id="EoYkQdUH8YXF"
# #### Creating dataframe for the results
#
# +
# Create dataframe FTE_Staff_BC containing our FTE and the number of applications outsourced and other columns of interest:
FTE_Staff_BC = []
for i in BankLoc:
for j in Month:
demand=model_BC.d[i,j]
lb_per=model_BC.saLB[i,j]
ub_per=model_BC.saUB[i,j]
staffav_per=model_BC.saP[i,j]
fte=model_BC.x[i,j].value #This is output given by the pyomo after solving the formulated model_BC
app_outsourced=model_BC.y[i,j].value #This is output given by the pyomo after solving the formulated model_BC
app_staff_processed = model_BC.x[i,j].value*40*model_BC.saUB[i,j] #Note saUB here
monthly_cost = model_BC.x[i,j].value*FTE_Salary[i,j] + model_BC.y[i,j].value*UnitOutSourceCost[i,j]
percent_app_outsourced = round((app_outsourced/demand)*100,2)
percent_app_staff_processed = round((app_staff_processed/demand)*100,2)
avg_cost_per_app = monthly_cost/demand
FTE_Staff_BC.append([i,j,demand,lb_per,ub_per,staffav_per,fte,app_outsourced,app_staff_processed,percent_app_outsourced,percent_app_staff_processed,monthly_cost,avg_cost_per_app]) #list of list
FTE_Staff_BC = pd.DataFrame(FTE_Staff_BC, columns=['State','Month','Apps_Demand','LB_Per','UB_Per','StaffAvPer','FTE','App_Outsourced',"App_Staff_Processed" , 'Per_App_Outsourced', 'Per_App_Staff_Processed','Monthly_Cost','Avg_Cost_Per_App'])
# -
FTE_Staff_BC.head(10)
# +
# What is the average FTE per Month
TotalFTE_BC = FTE_Staff_BC['FTE'].sum()
AverageFTEperMonth_BC =(TotalFTE_BC/12).round(2)
print(AverageFTEperMonth_BC) # Answer should come to 278
# + [markdown] id="tTL3vx4N8YXG"
# #### 3.2.2 Percentage of outsourced applications
# + id="i1ZJRn598YXG"
# write your code here
PercentageOutsourced_BC = ((FTE_Staff_BC['App_Outsourced'].sum()/TotalAppDemand)*100).round(2) # Answer should be around 4%
print(PercentageOutsourced_BC)
# + [markdown] id="BLjer_nz8YXG"
# #### 3.2.3 Average cost per application
# + id="iUaM-Nk48YXG"
# write your code here
AvgCostPerAppl_BC = (TotalCost_BC/ TotalAppDemand).round(2) # Answer should be around 145
print(AvgCostPerAppl_BC)
# -
#Another way to get Total Cost of processing all applications: by using statistical function(sum()) on the dataframe itself
tc_BC=FTE_Staff_BC.Monthly_Cost.sum()
print(tc_BC) #Answer should be closer to 16.5million
# #### Writing the results(dataframe) in to an Excel sheet:
# +
# write dataframe to excel sheet named 'Output_BestCase'
FTE_Staff_BC.to_excel(writer, sheet_name='Output_BestCase')
# writer.save() #we will save and close the excel file finally at the end after writing down all necessary outputs
print('DataFrame=(OutPut_BestCase) is written successfully to Excel Sheet.')
# -
list_total_cost=[TotalCost/1000000,TotalCost_WC/1000000,TotalCost_BC/1000000] #convert in millions for visualization
list_avgftepermonth=[AverageFTEperMonth,AverageFTEperMonth_WC,AverageFTEperMonth_BC]
list_peroutsourced=[PercentageOutsourced,PercentageOutsourced_WC,PercentageOutsourced_BC]
list_avgcostperapp=[AvgCostPerAppl,AvgCostPerAppl_WC,AvgCostPerAppl_BC]
# dictionary of lists
dic = {'TotalCost(m)': list_total_cost, 'AvgFTEperMonth': list_avgftepermonth, 'PerOutSourced': list_peroutsourced,'AvgCostperApp':list_avgcostperapp}
df_summary = pd.DataFrame(dic,index =['Actual', 'Worst_Case', 'Best_Case'])
df_summary.head()
# ### Inference from above dataframe:
# - Notice the above dataframe displays all the required metrics(parameters) asked in the assignment across 'Actual,'Worst Case' and 'Best Case'
# #### Writing the results(dataframe) in to an Excel sheet:
# +
# write dataframe to excel sheet named 'Summary'
df_summary.to_excel(writer, sheet_name='Summary')
# save the excel file
writer.save() #Finally when you want to save and close excelwriter object, call save()
print('DataFrame=(Summary) is written successfully to Excel Sheet.')
# -
# ---
# + [markdown] id="K-oaAHtu8YXG"
# # Question 4
#
# #### Creating Visualisations
# + [markdown] id="Qsq1r2UP8YXG"
# Create the following visualisations using your preferred method (i.e. Python, PowerPoint, Power BI, etc.) and add it to your report.
#
# Use the solution of Q2 to create a stacked column chart that shows the percentage of applications processed by the staff and by the vendor for each month (%staff processed applications+ %vendor processed applications should add up to 100%).
# Create a graph to show how the cost per application increases with respect to any change in the parameters in your analysis.
# Hint: Use the cost per application that you calculate in Questions 2 and 3 (i.e., the best case, and the worst case).
# + [markdown] id="862qkRCN8YXH"
# **Note:** You can create the charts in Python or some other visualisation tools and make it a part of your final report directly.
# + id="Kfczsej_8YXH"
FTE_Staff.head(10)
# -
df_staff = FTE_Staff[['Month', 'Per_App_Staff_Processed',"Per_App_Outsourced"]]
print(df_staff.shape)
df_staff.head()
import calendar
#Make a dictionary to map
d = dict((v,k) for k,v in enumerate(calendar.month_abbr))
d
df_staff['Month_num'] = df_staff.Month.map(d)
print(df_staff.shape)
df_staff.head()
df_staff=df_staff.groupby(['Month']).sum().sort_values("Month_num",ascending=True)
print(df_staff.shape)
# Remove column name 'Month_num'
df_staff.drop(['Month_num'], axis = 1, inplace = True) #drop 'month_num' after you have used it for sorting months.
print(df_staff.shape)
df_staff.head(20)
# plot data in stack manner of bar type
df_staff.plot( kind='bar', stacked=True, figsize=(10,5),
title='Percentage of applications processed by the staff and the vendors for all "Three" States(Locations)')
plt.show()
# #### Inference from above plot:
# - Notice this plot has maximum of 300% and not 100% as 100% of all three states(locations) add up to 300%
# ### State Wise(Location Wise) stacked bar charts:
# ### State 'A' Charts:
FTE_StaffA=FTE_Staff[FTE_Staff.State=='A']
print(FTE_StaffA.shape)
FTE_StaffA.head(12)
# One Method(Manual) to plot bars in stacked manner
plt.bar(FTE_StaffA.Month, FTE_StaffA.Per_App_Staff_Processed, color='g') #Green color for staff processed applications
plt.bar(FTE_StaffA.Month, FTE_StaffA.Per_App_Outsourced, bottom=FTE_StaffA.Per_App_Staff_Processed, color='r') #Red color for outsourced applications(red indicating danger and cost to company)
# changing the rc parameters(figure size)
plt.rcParams['figure.figsize'] = [10, 5]
plt.xlabel("Months in State A")
plt.ylabel("Percentage")
plt.legend(["Apps Staff Processed", "Apps Outsourced"], bbox_to_anchor = (1.05, 0.6)) #Place legend outside graph
plt.title("Percentage of applications processed by the staff and the vendors For State 'A'")
plt.show()
df_staffA=FTE_StaffA[['Month','Per_App_Staff_Processed','Per_App_Outsourced']] #The order you mention, the data gets stacked in that respective order
df_staffA.head(12)
# plot data in stack manner of bar type
df_staffA.plot(x='Month', kind='bar', stacked=True, figsize=(10,5),
title='Percentage of applications processed by the staff and the vendors For State "A"')
plt.show()
# ### State 'B' Charts:
FTE_StaffB=FTE_Staff[FTE_Staff.State=='B']
print(FTE_StaffB.shape)
FTE_StaffB.head(12)
df_staffB=FTE_StaffB[['Month','Per_App_Staff_Processed','Per_App_Outsourced']] #The order you mention, the data gets stacked in that respective order
df_staffB.head(12)
# plot data in stack manner of bar type
df_staffB.plot(x='Month', kind='bar', stacked=True, figsize=(10,5),
title='Percentage of applications processed by the staff and the vendors For State "B"')
plt.show()
# ### State 'C' Charts:
FTE_StaffC=FTE_Staff[FTE_Staff.State=='C']
print(FTE_StaffC.shape)
FTE_StaffC.head(12)
df_staffC=FTE_StaffC[['Month','Per_App_Staff_Processed','Per_App_Outsourced']] #The order you mention, the data gets stacked in that respective order
df_staffC.head(12)
# plot data in stack manner of bar type
df_staffC.plot(x='Month', kind='bar', stacked=True, figsize=(10,5),
title='Percentage of applications processed by the staff and the vendors For State "C"')
plt.show()
# ---
# #### Create a graph to show how the cost per application increases with respect to any change in the parameters in your analysis. (Hint: Use the cost per application that you calculate in Questions 2 and 3, i.e., the actual scenario, best case, and worst case.)
FTE_Staff.head(1)
# +
df_staff_actual = FTE_Staff[['Month','Monthly_Cost','Avg_Cost_Per_App']]
df_staff_actual.rename(columns = {'Monthly_Cost':'Monthly_Cost_Actual', 'Avg_Cost_Per_App':'Avg_Cost_Per_App_Actual'}, inplace = True)
df_staff_WC = FTE_Staff_WC[['Monthly_Cost','Avg_Cost_Per_App']]
df_staff_WC.rename(columns = {'Monthly_Cost':'Monthly_Cost_WC', 'Avg_Cost_Per_App':'Avg_Cost_Per_App_WC'}, inplace = True)
df_staff_BC = FTE_Staff_BC[['Monthly_Cost','Avg_Cost_Per_App']]
df_staff_BC.rename(columns = {'Monthly_Cost':'Monthly_Cost_BC', 'Avg_Cost_Per_App':'Avg_Cost_Per_App_BC'}, inplace = True)
# concatenating dataframes along columns
df_staff_all = pd.concat([df_staff_actual, df_staff_WC,df_staff_BC], axis=1)
df_staff_all['Month_num'] = df_staff_all.Month.map(d)
df_staff_all.head(2)
# -
df_staff_all_costperapp= df_staff_all[['Month','Month_num','Avg_Cost_Per_App_WC','Avg_Cost_Per_App_Actual','Avg_Cost_Per_App_BC']]
df_staff_all_costperapp.head(1)
df_staff_all_costperapp = df_staff_all_costperapp.groupby(['Month']).mean().sort_values("Month_num",ascending=True)
df_staff_all_costperapp.drop(['Month_num'], axis = 1, inplace = True) #drop 'month_num' after you have used it for sorting months.
df_staff_all_costperapp.plot.line(figsize=[10,5])
plt.xlabel("Month")
plt.ylabel("Avg Cost Per Application")
plt.title("Monthly Average Cost Per Application")
plt.show()
# #### Inference from above plot:
# - You can see that the WC(worst Case Line) is way above and leads in cost, than comes Actual and than comes BC(Best Case) line which is least in cost
# - We can infer from below graph that the cost/app is high during the month of March, July and also increases towards the end of year in months of November and December.
# #### Only the above graph is asked in the assignment, however if needed we can plot the graph for "Monthly_Cost" for all applications processed in a particular month as shown below:
df_staff_all_monthlycost= df_staff_all[['Month','Month_num','Monthly_Cost_WC','Monthly_Cost_Actual','Monthly_Cost_BC']]
df_staff_all_monthlycost.head(1)
df_staff_all_monthlycost = df_staff_all_monthlycost.groupby(['Month']).mean().sort_values("Month_num",ascending=True)
df_staff_all_monthlycost.drop(['Month_num'], axis = 1, inplace = True) #drop 'month_num' after you have used it for sorting months.
df_staff_all_monthlycost.plot.line(figsize=[10,5])
plt.xlabel("Month")
plt.ylabel("Monthly Total Cost ")
plt.title("Monthly Total Cost of Applications Processed")
plt.show()
# #### Inference from above plot/graph:
# - We can infer from below graph that the “Total Monthly Cost” is high during the month of March, July and also increases towards the end of year in months of November and December
# # This Staff Planning assignment is submitted by <NAME> and <NAME> of DSC26 Batch on 25th October 2021
# # Thank you Upgrad and IIITB For the Learning Experience!
# #### Project Complete
# - This Project is done by `<NAME>`
# - Contact no: +91-9108302174
# - Email Id: <EMAIL>
# - Linked in: https://www.linkedin.com/in/achalkagwad/
# ---
| notebooks/Main_staff_planning_OR_Pyomo_achal_himanka_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Figure 3
#
# Run the steps below to generate the data and plot of Figure 3.
#
# **<NAME>** // 2019 Jan 8 // Leiden Observatory // <EMAIL>
# +
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import numpy as np
from astropy.io import fits
from sts_class import SpectralTimeSeries
from aux import *
from matplotlib import pyplot as plt
from rmmodel_func import *
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import Colorbar
# -
# # Load Spectral Time Series
# Load the Spectral Time Series from the Beta Pictoris UVES data.
sts = SpectralTimeSeries()
sts.load('../data', 'sts')
# # Perform stellar pulsation removal
sts_before = SpectralTimeSeries()
sts_before.load('../data', 'sts')
sts_after = SpectralTimeSeries()
sts_after.load('../data', 'sts')
sts_after.correct_for_pulsations()
# # Plot Spectral Time Series for two candidates
# +
# load RM-models for two candidates
models = fits.getdata('../data/rm_models/diff_lineprof.fits')
# two candidates: obsid 58004 and 58098
obsids = ['58004', '58098']
fignames = ['figure3a', 'figure3b']
modelinds = [62, 34]
dw = 10
for n in range(2):
# get the data from the epoch with the specified observation ID
sts_obs_before = sts_before.datacube[:, sts.obsid_dict[obsids[n]]]
sts_obs_after = sts_after.datacube[:, sts.obsid_dict[obsids[n]]]
# get the best-fitting RM-model
modelprof = models[modelinds[n], :]
ind = np.argmax(modelprof)
xx, sts_model = np.meshgrid(np.arange(sts_obs_after.shape[1]), modelprof)
# get the residual
sts_res = sts_obs_after - sts_model
# get time stamps and convert to minutes
time = sts.time[sts.obsid_dict[obsids[n]]]
time = (time - time[0]) * 24 * 60
# setup grid for plot
gs = gridspec.GridSpec(2, 8, width_ratios=[1.,1.,1.,1.,
1.,1.,0.25,0.25])
ax1 = plt.subplot(gs[0:1, 0:3])
ax2 = plt.subplot(gs[0:1, 3:6])
ax3 = plt.subplot(gs[1:2, 0:2])
ax4 = plt.subplot(gs[1:2, 2:4])
ax5 = plt.subplot(gs[1:2, 4:6])
cbax1 = plt.subplot(gs[0:2, 6:7])
ax2.get_yaxis().set_visible(False)
ax4.get_yaxis().set_visible(False)
ax5.get_yaxis().set_visible(False)
# setup global colorbar range
vmin = min(np.min(sts_obs_before), np.min(sts_obs_after))
vmax = max(np.max(sts_obs_before), np.max(sts_obs_after))
im1 = ax2.imshow(np.rot90(sts_obs_before, 1),
extent = [sts.radvel[0], sts.radvel[-1],
time[0], time[-1]],
aspect = 4.5 * abs(sts.radvel[ind-dw] \
- sts.radvel[ind+dw])/ \
(time[-1] - time[0]),
vmin = vmin, vmax = vmax, cmap = 'Greys_r')
ax1.imshow(np.rot90(sts_obs_after, 1),
extent = [sts.radvel[0], sts.radvel[-1],
time[0], time[-1]],
aspect = 4.5 * abs(sts.radvel[ind-dw] - \
sts.radvel[ind+dw]) \
/(time[-1] - time[0]),
vmin = vmin, vmax = vmax, cmap = 'Greys_r')
ax3.imshow(np.rot90(sts_obs_after[ind-dw:ind+dw], 1),
extent = [sts.radvel[ind-dw], sts.radvel[ind+dw],
time[0], time[-1]], aspect = 1.25 * \
abs(sts.radvel[ind-dw] - sts.radvel[ind+dw])/(time[-1] - time[0]),
vmin = vmin, vmax = vmax, cmap = 'Greys_r')
ax4.imshow(np.rot90(sts_model[ind-dw:ind+dw], 1),
extent = [sts.radvel[ind-dw], sts.radvel[ind+dw],
0, 1],
aspect = 1.25 * abs(sts.radvel[ind-dw] - \
sts.radvel[ind+dw]),
vmin = vmin, vmax = vmax, cmap = 'Greys_r')
ax5.imshow(np.rot90(sts_res[ind-dw:ind+dw], 1),
extent = [sts.radvel[ind-dw], sts.radvel[ind+dw],
0, 1],
aspect = 1.25 * \
abs(sts.radvel[ind-dw] - sts.radvel[ind+dw]),
vmin = vmin, vmax = vmax, cmap = 'Greys_r')
# add titles and labels
ax1.set_title('After pulsation removal', size = 12)
ax2.set_title('Before pulsation removal', size = 12)
ax4.set_title('Model', size = 12)
ax5.set_title('Residual', size = 12)
ax4.set_xlabel('Radial velocity [km/s]', size = 15)
ax1.set_ylabel('Time [min]', size = 15)
ax3.set_ylabel('Time [min]', size = 15)
# add colorbar
cb1 = Colorbar(ax = cbax1, mappable = im1)
cb1.set_label('Flux [arbitrary units]', size = 15)
# save figure 3
if not os.path.isdir('../output/figure3/'):
os.mkdir('../output/figure3/')
plt.savefig('../output/figure3/' + str(fignames[n]) + '.png')
plt.savefig('../output/figure3/' + str(fignames[n]) + '.pdf', dpi = 300)
plt.show()
# -
| scripts/plot_fig3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 ('data_science_py395')
# language: python
# name: python3
# ---
# Programación para *Data Science*
# ============================
#
# Intro101 - 07a Conceptos avanzados de Python
# --------------------------------------
#
# En este Notebook encontraréis dos conjuntos de ejercicios de Python.
# ### Ejercicio 1
#
# Un número primo es aquél que solo es divisible por él mismo y por 1.
#
# a) Escribe un código que compruebe si un número `x = 15` es solo divisible por 1 o por el mismo. Escribe este código usando un iterador (un `for` o un `while`) que barra todos los valores desde `2` a `x-1`. Crea una variable `divisible` que tenga por defecto valor `False` y asigne el valor `True` si a lo largo de la iteración encuentra un número natural divisible. Puedes usar el operador modulo `a % b` para saber si un numero `b` es divisible por `a`.
x=15
divisible=False
for i in range(2,x):
if(x%i==0):
divisible=True
break
print("Es primo:",not divisible)
# b) Convierte tu código anterior en una función que compruebe si el número del argumento es primo o no, devolviendo True is es primo y False si no es primo. Comprueba tu función con los valores 492366587, 492366585, 48947 y 2,
#
# +
def es_primo(x):
divisible=False
for i in range(2,x):
if(x%i==0):
divisible=True
break
return not divisible
pruebas=[492366587, 492366585, 48947 , 2]
[print(f"El nº {prueba} es primo:{es_primo(prueba)}") for prueba in pruebas]
# -
# c) En el cálculo de la función anterior, una vez se ha encontrado un número que es divisible dentro del rango ya no tiene sentido comprobar el resto de números del rango. Por ejemplo si 10 ya es divisble entre 2, ya no hace falta probar de 3 en adelante pues ya sabemos que el número no es primo.
#
# Modifica la función anterior de la siguiente forma:
# - Una vez se encuentra el divisor, la iteración se interrumpe para no probar el resto de enteros.
# - La función devuelve
# - **Si es primo**: True
# - **Si no es primo**, el primer divisor mayor que 1.
#
# Puedes hacer uso del comando *break* dentro de un bucle para interrumpir este, puedes consultar más información sobre break en la documentación de python [aquí](https://docs.python.org/2/tutorial/controlflow.html).
#
# Comprueba tu función con los valores 492366585, 492366587, 48947 y 2,
# +
# Respuesta : (apartado anterior)
# -
# ### Ejercicio 2
#
# La Covid-19 es una enfermedad producida por la infección del virus SARS-CoV-2. La infección es transmisible de persona a persona y su contagiosidad depende de la cantidad del virus en las vías respiratorias. Si cada persona contagiada transmite la enfermedad a $\beta$ contactos en promedio por periodo de tiempo $t$, es posible estimar la evolución del contagio con un modelo matemático sencillo.
#
# Para $t=1$ día, las transmisiones en España se han estimado a partir de su histórico de las semanas de Febrero y Marzo del 2020 una $\beta = 0.35$ transmissiones por día por infectado.
#
# Durante un periodo de tiempo (por ejempo un día $d$) la tasa de nuevos contagios se puede estimar como una proporción al número de contagiados del periodo anterior $N$:
#
# $ \Delta N = N_{1} - N = \beta \cdot $ (1)
#
# Por tanto, podemos proyectar el número futuro de afectados como
#
# $ N_{1} = N + \beta \cdot N = (1+\beta) \cdot $ (2)
#
# En dos días:
#
# $ N_{2} = (1+\beta) \cdot N_{1} = (1+\beta)^2 \cdot $ (3)
#
# Y en general en D días tendremos
#
# $ N_{D} = (1+\beta)^D \cdot N$ (4)
#
# Asumiendo este sencillo modelo:
#
# a) Implementa una función de dos parámetros (N: población infectada inicial, D: número de días), que devuelva el cálculo de afectados para D días siguiendo la ecuación (4). Suponiendo una población afectada de 4250 (población afectada en españa a día 13 de Marzo de 2020), usa la función para calcular la población estimada en 1, 2, 7 y 30 días.
# +
def fcovid(N,D):
"""
Funcion que estima el numero de contagiados que va a haber en un periodo de tiempo.
args.
-----
N: int; poblacion infectada inicial.
D: int; días a estimar.
return.
-------
C: int; número de contagiados.
"""
beta=0.35
C=((1 + beta)**D) * N
return C
afectados0=4250
dias_est=[1,2,7,30]
[print(f"Contagiados en {dias} días: {fcovid(afectados0,dias)}") for dias in dias_est]
# -
# b) Sabiendo que los Servicios de Medicina Intensiva (SMI) disponen de 3363 camas para enfermos graves, y suponiendo que un 10% de los afectados por el covid-19 requerirán de SMI y una supervivencia del 2,5% (Exitus), escribe un código que calcule:
# - El día en curso (Día)
# - El total de afectados por el virus para cada día d (Afectados)
# - El total de ingresados en SMI por el virus para cada día d (Críticos)
# - El total de Exitus por el virus para cada día d (Exitus)
# - Si los servicios de SMI no pueden aceptar los ingresados para cada día $d$ (Estado: indicando Saturación/No Saturación)
#
# Imprime en pantalla la información de cada día durante una simulación de tres semanas, suponiendo que no hay recuperaciones, con una población afectada inicial 4250 y una $\beta = 0.35$ constante.
# +
camas=3363
graves=0.1
exitus=0.025
resultado1={
"dia":[i for i in range(1,3*7+1)],
"afectados":[fcovid(afectados0,dias) for dias in range(1,3*7+1)],
"criticos":[fcovid(afectados0,dias)*graves for dias in range(1,3*7+1)],
"exitus":[fcovid(afectados0,dias)*graves*exitus for dias in range(1,3*7+1)],
}
resultado1["estado"]=["No saturacion" if(camas-resultado1['criticos'][i-1]>0) else "Saturacion" for i in range(1,3*7+1)]
[print(f"Dia {resultado1['dia'][i]} afectados: {resultado1['afectados'][i]} criticos: {resultado1['criticos'][i]} exitus: {resultado1['exitus'][i]} estado: {resultado1['estado'][i]} " ) for i in range(3*7)]
# -
# c) Convierte el código anterior en una función que genere un archivo de texto con nombre `output.txt`, siguiendo este formato:
# ```
# Dia, Afectados, Críticos, Exitus, Estado
# 0, 4250, 425, 106, No Saturación
# 1, 5737, 573, 143, No Saturación
# 2, 7745, 774, 193, No Saturación
# ...
# ```
# Con los parámetros de entrada $N$, $D$, $\beta$, camas SMI.
def datos_covid(N,D,camas):
graves=0.1
exitus=0.025
arch=open("output.txt","w")
resultado={
"dia":[i for i in range(1,D+1)],
"afectados":[fcovid(N,dias) for dias in range(1,D+1)],
"criticos":[fcovid(N,dias)*graves for dias in range(1,D+1)],
"exitus":[fcovid(N,dias)*graves*exitus for dias in range(1,D+1)],
}
resultado["estado"]=[" No saturacion" if(camas-resultado['criticos'][i-1]>0) else " Saturacion" for i in range(1,3*7+1)]
arch.write("Dia, Afectados, Criticos, Exitus, Estado \n")
[arch.write(f"{resultado['dia'][i]} {resultado['afectados'][i]} {resultado['criticos'][i]} {resultado['exitus'][i]}{resultado['estado'][i]}\n") for i in range(D)]
return arch
archivo=datos_covid(4250,3*7,3363)
with open("output.txt","r") as archivo:
print(archivo)
# ### Ejercicio 3
#
# Dado el siguiente diccionario:
d = {"Alex":344334443, "Eva":5533443, "Cristina":443355, "Jonas":33223324}
# Escribid una función que pregunte al usuario que introduzca el nombre de una persona y muestre por pantalla el nombre de la persona y su teléfono.
#
# Tened en cuenta que:
#
# - La función debe controlar que el valor introducido por el usuario es un nombre que existe en el diccionario. En caso contrario, mostrará un mensaje de error ("El nombre introducido no corresponde a ninguna persona") y devolverá el valor False.
# - Debéis tener en cuenta que el nombre de las personas que nos pasan por parámetro puede ser en minúsculas, mayúsculas o una combinación de ambas, y que debemos encontrar el número de teléfono aunque la capitalización de la cadena entrada por el usuario no sea exactamente la misma que hemos almacenado en el diccionario.
# - Suponed que no hay acentos en los nombres.
#
# Nota 1: Para realizar la actividad, tendréis que capturar un texto que entrará el usuario. Consultad la [documentación oficial de la función input](https://docs.python.org/3/library/functions.html#input) para ver cómo hacerlo.
#
# Nota 2: También tendréis que pensar cómo tratar el hecho de que el usuario pueda utilizar mayúsculas y minúsculas en la escritura del nombre en el diccionario. ¡Os animamos a usar un buscador para intentar encontrar alguna alternativa para resolver este subproblema! ¡Recordad citar las referencias que hayáis usado para resolverlo!
#
# +
d = {"Alex":344334443, "Eva":5533443, "Cristina":443355, "Jonas":33223324}
def telefono(nombre,d):
"""
Funcion que indica el número de telefono de una persona perteneciente a un listado.
args.
-----
nombre: str; nombre de la persona.
d: dict; listado de nombres y telefonos.
return.
-------
n_telef: int; numero de telefono (o bool; /False/ si nombre no pertenece al listado)
"""
n_telef=False
for k,v in d.items():
if(nombre.capitalize()==k):
n_telef=v
break
if(n_telef==False):
print("El nombre introducido no corresponde a ninguna persona")
return n_telef
# +
print("Introduzca nombre:\n")
nombre=input()
print(nombre)
telefono(nombre,d)
# -
# ### Ejercicio 4
#
# Python dispone de un **idiom** muy útil conocido como `list comprehension`. Utilizando este **idiom**, proporcionad una expresión que devuelva las listas siguientes.
#
# Nota: Para realizar esta actividad necesitaréis investigar qué son las `list comprehension` y qué sintaxis utilizan. Para ello, se recomienda en primer lugar que utilicéis un buscador para encontrar información genérica sobre esta construcción. Después, os recomendamos que consultéis stackoverflow para ver algunos ejemplos de problemas que se pueden resolver con esta construcción.
#
#
# [stackoverflow](https://stackoverflow.com/) es un sitio de preguntas-y-respuestas muy popular entre programadores. Veréis que para la gran mayoría de las dudas que tengáis, habrá alguien que ya les habrá tenido (y consultado) anteriormente! Así pues, más allá de preguntar vosotros mismos las dudas allí (nosotros ya tenemos el foro del aula para ello!), consultar esta web os permitirá ver qué soluciones proponen otros programadores a estas dudas. A menudo habrá más de una solución a un mismo problema, y podréis valorar cuál es la más adecuada para vuestro problema.
#
# Para ver ejemplos de problemas que son adecuados para resolver con **list comprehensions**, os recomendamos leer las siguientes páginas:
# * https://stackoverflow.com/questions/12555443/squaring-all-elements-in-a-list
# * https://stackoverflow.com/questions/18551458/how-to-frame-two-for-loops-in-list-comprehension-python
# * https://stackoverflow.com/questions/24442091/list-comprehension-with-condition
# * https://stackoverflow.com/questions/41676212/i-want-to-return-only-the-odd-numbers-in-a-list
# * https://stackoverflow.com/questions/4260280/if-else-in-a-list-comprehension
#
# a) Una lista con los valores $4 x^2$ donde $x$ es cada uno de los números de la lista `list_1`:
# +
list_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
[4*x**2 for x in list_1]
# -
# b) Una lista con los valores $x/(x+1)$ donde $x$ es cada uno de los números de la lista `list_1`:
# +
list_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
[x/(x+1) for x in list_1]
# -
# c) Una lista con los valores $4x^2/(4x^2-1)$ donde $x$ es cada uno de los números de la lista `list_1`:
# +
list_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
[(4*x**2)/((4*x**2)-1) for x in list_1]
# -
# ### Ejercicio 5
#
# El siguiente ejercicio consiste en pasar un número en base 16 (hexadecimal, 0-9/A-F) a base 10 (decimal). Para hacerlo, debéis crear una **función** que dado un _string_ que representa un número en hexadecimal, por ejemplo, `AE3F`, devuelva el número
# natural correspondiente. En este caso, el resultado sería `44607`.
# +
def conversor_HaD(h):
"""
Funcion que realiza la conversión de hexadecimal a decimal.
args.
-----
h: str; numero hexadecimal (base 16)
return.
-------
d: int; numero convertido a decimal (base 10)
"""
d=0
sustituye=['A','B','C','D','E','F']
hexa={sustituye[k]:k+10 for k in range(len(sustituye))}
for i,hi in enumerate(h):
if(sustituye.count(hi)==0):
d+=int(hi)*16**(len(h)-i-1)
else:
d+=hexa.get(hi)*16**(len(h)-i-1)
return d
# -
conversor_HaD("AE3F")
# ### Ejercicio 6
#
# Las excepciones son errores detectados en tiempo de ejecución. Pueden y deben ser manejadas por el programador para minimizar el riesgo de que un determinado programa falle de forma no controlada. Escribid, en lenguaje Python, cómo generar y capturar la siguiente excepción: **ZeroDivisionError**.
a=1
b=0
try:
res=a/b
print(res)
except ZeroDivisionError as err:
print('Error:', err)
# ### Ejercicio 7
#
# Completad el código necesario para calcular el número de vocales y de consonantes respectivamente de un texto.
# +
import string
def contar_vocales_y_consonantes(texto):
# Cuenta las vocales contenidas en el string texto y también las consonantes.
num_vocales = 0
num_consonantes = 0
texto2=texto.translate(str.maketrans('', '', string.punctuation))
print(texto2)
num_espacios = texto2.count(' ')
vocales="AEIOU"
for letra in texto2:
for i in vocales:
if (letra.upper()==i):
num_vocales+=1
print(num_espacios)
num_consonantes=len(texto2)-num_vocales-num_espacios
return num_vocales, num_consonantes
texto = "Orbiting Earth in the spaceship, I saw how beautiful our planet is. \
People, let us preserve and increase this beauty, not destroy it!"
num_vocales, num_consonantes = contar_vocales_y_consonantes(texto)
print("El número de vocales es %d." % num_vocales)
print("El número de consonantes es %d." % num_consonantes)
# -
# ### Ejercicio 8
#
# Escribid una función que dada una lista de planetas del sistema solar, pregunte al usuario que introduzca una posición y muestre el plante correspondiente a dicha posición. Por ejemplo, si tenemos la siguiente lista: `['Mercurio', 'Venus', 'Tierra', 'Marte']` y el usuario nos ha introducido la posición `3`, hemos de mostrar como resultado por pantalla: `Tierra`.
#
# Consideraciones:
#
# - La posición que introduzca el usuario tiene que ser un número entero estrictamente positivo.
# - La función debe controlar el acceso a una posición fuera de la lista mediante una **excepción**. Por ejemplo, en el caso anterior debemos mostrar una mensaje de error si el usuario pide acceder a la posición 10.
# +
def planeta(planetas):
try:
indice=int(input())
res=planetas[indice-1]
except IndexError as err:
print('Error:', err)
res=False
return res
planetas=['Mercurio', 'Venus', 'Tierra', 'Marte']
print(planeta(planetas))
# -
# ### Ejercicio 9
#
# Dada una lista de planetas del sistema solar, determinad cuales de estos planetas tienen una masa superior a la de la Tierra. Por ejemplo, si la lista inicial es `['Venus', 'Marte', 'Saturno']`, el resultado que mostraríamos por pantalla sería `['Saturno']` ya que el planeta Saturno tiene una masa `95.2` veces superior a la Tierra.
#
# Consideraciones:
#
# - Debéis tener en cuenta que el nombre de los planetas que nos pasan por parámetro puede estar en minúsculas, mayúsculas o una combinación de ambas.
# - Podéis asumir que no habrá acentos en el nombre de los planetas.
# - Debéis determinar aquellos planetas que tiene una massa estrictamente superior a la de la Tierra.
# - No habrá planetas repetidos en la lista que nos pasan por parámetro.
masas = {'Mercurio': 0.06, 'Venus': 0.82, 'Tierra': 1, 'Marte': 0.11, 'Jupiter': 317.8,
'Saturno': 95.2, 'Urano': 14.6, 'Neptuno': 17.2, 'Pluto': 0.0022}
# +
def planetas_mas_grandes_que_Tierra(planetas):
"""
Planetas con una masa superior a la de la Tierra
"""
masas = {'Mercurio': 0.06, 'Venus': 0.82, 'Tierra': 1, 'Marte': 0.11, 'Jupiter': 317.8,
'Saturno': 95.2, 'Urano': 14.6, 'Neptuno': 17.2, 'Pluto': 0.0022}
planetas_masa_superior = []
for planeta in planetas:
if (masas.get(planeta.capitalize())>masas.get('Tierra')):
planetas_masa_superior.append(planeta.capitalize())
if (planetas_masa_superior==[]):
planetas_masa_superior="Ninguno de los planetas dados tiene masa superior a la Tierra"
return planetas_masa_superior
# Ejemplos de uso de la función anterior
print(planetas_mas_grandes_que_Tierra(['Venus', 'Mercurio', 'Marte']))
print(planetas_mas_grandes_que_Tierra(['Jupiter', 'Saturno', 'Pluto']))
print(planetas_mas_grandes_que_Tierra(['urano', 'tierra', 'neptuno', 'marte', 'Venus']))
print(planetas_mas_grandes_que_Tierra(['Tierra', 'MeRcUrIo', 'PLUTO', 'SATURNO']))
# Podéis añadir más ejemplos si lo consideráis oportuno
# -
# ### Ejercicio 10
#
# Dada una cadena de caracteres, `s`, de longitud `n` y un número entero positivo `k`, siendo `k` un divisor de `n`, podemos dividir la cadena `s` en `n / k` sub-cadenas de la misma longitud.
#
# Escribid una función que, dada una cadena `s` y un número entero `k`, devuelva las `n/k` sub-cadenas teniendo en cuenta las siguientes consideraciones:
#
# - El orden de los caracteres en las sub-cadenas debe ser el mismo que en la cadena original.
# - Todos los caracteres de las sub-cadenas deben aparecer una única vez. Es decir, si un caracter se repite dentro de una sub-cadena, sólo hemos de mostrar la primera ocurrencia.
#
# Por ejemplo, si tenemmos
# <code>
# s = AABCCAADA
# k = 3
# </code>
#
# el resultado a mostrar por pantalla sería:
# <code>
# AB
# CA
# AD
# </code>
#
# Tenemos que la longitud de la cadena es 9 y por lo tanto, podemos formar 3 sub-cadenas:
#
# `AAB -> AB` (el caracter A se repite dos veces)
#
# `CCA -> CA` (el caracter C se repite dos veces)
#
# `ADA -> AD` (el caracter A se repite dos veces)
# +
def no_replicas(cad):
return "".join(dict.fromkeys(cad))
def subcadenas(s,n):
lon=len(s)
div=int(lon/n)
ns=[s[ini:ini+div] for ini in range(0,lon,div)]
for i,c in enumerate(ns):
ns[i]=no_replicas(c)
return ns
# -
s = 'AABCCAADA'
k = 3
subcadenas(s,k)
| 02_PYTHON/week06/pra/your-solution-here/MC_week6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from pyrfume.odorants import from_cids
df1 = pd.read_csv('experiment1_comparisons.csv',
header=0,index_col=0,names=['A','B','Similarity'])
df1_cids = pd.read_csv('experiment1_cids.csv', index_col=0)
df1_cids = df1_cids.applymap(lambda x:x.replace('[','').replace(']','').strip().replace(' ',','))
df1_cids
df1.loc[:, ['A','B']] = df1.loc[:, ['A','B']].applymap(lambda x:df1_cids.loc[x]['Mixture Cids'])
df1.head()
df2 = pd.read_csv('experiment2_comparisons.csv',
header=0,index_col=0,names=['A','B','Similarity'])
df2_cids = pd.read_csv('experiment2_cids.csv', index_col=0)
df2_cids = df2_cids.applymap(lambda x:x.replace('[','').replace(']','').strip().replace(' ',','))
df2_cids
df2.loc[:, ['A','B']] = df2.loc[:, ['A','B']].applymap(lambda x:df2_cids.loc[x]['Mixture Cids'])
df2.head()
df3 = pd.read_csv('experiment3_comparisons.csv',
header=0,index_col=0,names=['A','B','Similarity'])
df3.head()
df = pd.concat([df1, df2, df3])
df.to_csv('behavior-main.csv')
cids1 = df1_cids['Mixture Cids'].apply(str.split, args=(',')).sum()
cids2 = df2_cids['Mixture Cids'].apply(str.split, args=(',')).sum()
cids3 = list(df3[['A', 'B']].values.ravel())
cids = cids1 + cids2 + cids3
cids = list(set(map(int, cids)))
molecules_info = from_cids(cids)
pd.DataFrame(molecules_info).set_index('CID').to_csv('molecules-info.csv')
| snitz_2013/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multivariate Linear Regression
# ## 1. Multiple Features
# Now we turn our attention to a new version of linear regression which is more powerful due to its ability to work with multiple variables/features.
# For instance, in our previous example of Portland house pricing we had:
# Import libraries
import pandas as pd
# Read the data
data = pd.read_csv("house_pricing.csv", low_memory=False)
# Show data
data[['size', 'price']].head()
# ... a single feature $x$, namely the size of the houses in $ft^2$, and we wanted to use it to predict $y$, the price of the house in USD.
#
# With this, the form of our hypothesis was:
#
# $$h_{\theta}(x) = \theta_0 + \theta_1 x.$$
# Now, it is easy to imagine that we won't only have the size of the house available, but also the number of bedrooms, the number of floors, the number of car spaces, the number of bathrooms, the age of the house, among others, and our data would look like:
data.head()
# These additional features give us much more information in our target of predicting the price.
# Let's define some notation:
#
# - $m$: Number of training examples.
# - $n$: Number of input variables/features.
# - $\boldsymbol{x}=\left[x_1, \dots, x_n\right]^T\in\mathbb{R}^n$: "Input" variables/features.
# - $x_j, \quad j=1,2,\dots,n$: $j-th$ input variable/feature.
# - $y$: "Output" variable/"target" variable.
# - $\boldsymbol{x}^{(i)}$: Input (features) of $i-$th training example.
# - $\boldsymbol{x}^{(i)}_j$: Value of feature $j$ in $i-$th training example.
# For instance, in the above data we may define $x_1$ as the house size, and $x_2$ as the number of bedrooms. In this sense:
#
# - $n=2$ is the number of input features.
#
# - The fourth training example is:
# $$
# x^{(4)} = \left[
# \begin{array}{c}
# 1416 \\
# 2
# \end{array}
# \right]\in\mathbb{R}^{2}.
# $$
#
# - The value of the second feature in the fourth training example:
# $$x^{(4)}_2 = 2.$$
# Now that we have **multiple features**, the form of our hypothesis turns into:
#
# $$
# h_{\theta}(\boldsymbol{x}) = \theta_0 + \theta_1 x_1 + \dots + \theta_n x_n.
# $$
# If we conveniently define a $0$ feature: $x_0 = 1$ ($x^{(i)}_0=1$ for all $i\in\{1, \dots, m\}$), the new feature vector $\boldsymbol{x}$ takes the form:
#
# $$
# \boldsymbol{x}=\left[
# \begin{array}{c}
# x_0 \\
# x_1 \\
# \vdots \\
# x_n
# \end{array}
# \right]\in\mathbb{R}^{n+1}
# $$
#
# as well as the parameter vector $\boldsymbol{\theta}$:
#
# $$
# \boldsymbol{\theta}=\left[
# \begin{array}{c}
# \theta_0 \\
# \theta_1 \\
# \vdots \\
# \theta_n
# \end{array}
# \right]\in\mathbb{R}^{n+1}
# $$
# Taking these into account, we can rewrite the hypothesis function as:
#
# \begin{align}
# h_{\theta}(\boldsymbol{x}) & = \theta_0 x_0 + \theta_1 x_1 + \dots + \theta_n x_n \\
# & = \boldsymbol{x}^T \boldsymbol{\theta} \\
# & = \boldsymbol{\theta}^T \boldsymbol{x}
# \end{align}
# ## 2. Gradient Descent for Multiple Variables
# With the above notation for the hypothesis function, we can define the cost function for multivariate linear regression in the same way we defined it for univariate linear regression:
# $$
# J(\boldsymbol{\theta}) = \frac{1}{2m}\sum_{i=1}^{m}(h_{\theta}(\boldsymbol{x}^{(i)}) - y^{(i)})^2,
# $$
# where the parameter vector $\boldsymbol{\theta}$ is the $n+1$ dimensional vector:
#
# $$
# \boldsymbol{\theta}=\left[
# \begin{array}{c}
# \theta_0 \\
# \theta_1 \\
# \vdots \\
# \theta_n
# \end{array}
# \right]\in\mathbb{R}^{n+1}
# $$
# Equivalently, considering a matrix of all the training examples $\boldsymbol{X}$:
#
# $$
# \boldsymbol{X} = \left[
# \begin{array}{c}
# \boldsymbol{x}^{(1)} \ ^T \\
# \boldsymbol{x}^{(2)} \ ^T \\
# \vdots \\
# \boldsymbol{x}^{(n)} \ ^T
# \end{array}
# \right] = \left[
# \begin{array}{ccccc}
# x_0^{(1)} & x_1^{(1)} & x_2^{(1)} & \dots & x_n^{(1)} \\
# x_0^{(2)} & x_1^{(2)} & x_2^{(2)} & \dots & x_n^{(2)} \\
# \vdots & \vdots & \vdots & \ddots & \vdots \\
# x_0^{(m)} & x_1^{(m)} & x_2^{(m)} & \dots & x_n^{(m)}
# \end{array}
# \right] = \left[
# \begin{array}{ccccc}
# 1 & x_1^{(1)} & x_2^{(1)} & \dots & x_n^{(1)} \\
# 1 & x_1^{(2)} & x_2^{(2)} & \dots & x_n^{(2)} \\
# \vdots & \vdots & \vdots & \ddots & \vdots \\
# 1 & x_1^{(m)} & x_2^{(m)} & \dots & x_n^{(m)}
# \end{array}
# \right] \in \mathbb{R}^{m \times (n+1)}
# $$
#
# The cost function can be rewritten as:
#
# $$
# J(\boldsymbol{\theta}) = \frac{1}{2m}\sum_{i=1}^{m}(\boldsymbol{x}^{(i)} \ ^T \boldsymbol{\theta} - y^{(i)})^2 = \frac{1}{2m}\left\lvert\left\lvert\boldsymbol{X}\boldsymbol{\theta} - \boldsymbol{y}\right\rvert\right\rvert^2,
# $$
# Moreover, the gradient descent algorithm takes the same form:
# - Initialize $\theta_j$, for $j\in\{0, 1,\dots, n\}$.
#
# - repeat until convergence {
# $$\theta_j := \theta_j - \alpha \frac{\partial}{\partial \theta_j} J(\boldsymbol{\theta}); \qquad \text{ for } j\in\{0, 1,\dots, n\}$$
# }
#
# where:
#
# \begin{align}
# \frac{\partial}{\partial \theta_j} J(\boldsymbol{\theta}) & = \frac{1}{m}\sum_{i=1}^{m}\left(h_{\theta}(\boldsymbol{x}^{(i)})-y^{(i)}\right)x_j^{(i)} \qquad \text{ for } j\in\{0, 1,\dots, n\}
# \end{align}
# Or, in a vector form:
#
# - Initialize $\boldsymbol{\theta}$.
#
# - repeat until convergence {
# $$\boldsymbol{\theta} := \boldsymbol{\theta} - \alpha \frac{\partial}{\partial \boldsymbol{\theta}} J(\boldsymbol{\theta})$$
# }
#
# where $\frac{\partial}{\partial \boldsymbol{\theta}} J(\boldsymbol{\theta})$ is the gradient of the function $J$:
#
# $$
# \frac{\partial}{\partial \boldsymbol{\theta}} J(\boldsymbol{\theta}) = \frac{1}{m}\left[
# \begin{array}{c}
# \sum_{i=1}^{m}\left(h_{\theta}(\boldsymbol{x}^{(i)})-y^{(i)}\right)x_0^{(i)} \\
# \sum_{i=1}^{m}\left(h_{\theta}(\boldsymbol{x}^{(i)})-y^{(i)}\right)x_1^{(i)} \\
# \vdots \\
# \sum_{i=1}^{m}\left(h_{\theta}(\boldsymbol{x}^{(i)})-y^{(i)}\right)x_n^{(i)}
# \end{array}
# \right] = \frac{1}{m} \boldsymbol{X}^T (\boldsymbol{X}\boldsymbol{\theta} - \boldsymbol{y})\in\mathbb{R}^{n+1}.
# $$
# ### 2.1. Feature scaling
#
# One problem that appears when we have multiple features, is that each one of the is in a different scale probably.
#
# For instance in our example:
data.head()
# x_1 feature range
data['size'].min(), data['size'].max()
# x_1 feature range
data['n_bedrooms'].min(), data['n_bedrooms'].max()
# - The $x_1$ feature (size) varies in the range $0 - 4500$ $ft^2$.
# - The $x_2$ feature (number of bedrooms) varies in the range $1 - 5$.
# A good idea is to **scale** these features so that they vary within similar ranges. Following this idea, the gradient descent algorithm can converge more quikcly.
# Concretely, we may select instead of the above features:
#
# - $x_1 = \frac{\text{size }(ft^2)}{5000}$
# - $x_2 = \frac{\text{number of bedrooms}}{5}$
#
# so that $0 \leq x_1 \leq 1$ and $0 \leq x_2 \leq 1$.
# In general, a feature scaling approach consists on getting every feature into approximately a $-1 \leq x_i \leq 1$ range ($i\in\{1, \dots, n\}$; the feature $x_0=1$ is excluded).
# One way to do this is:
#
# 1. **Mean normalization:** Replace each feature $x_i$ with $x_i - \mu_i$ to make features have approximately zero mean (do not apply to $x_0=1$).
#
# 2. **Scale:** After the mean normalization, the next step is to scale the resulting feature according to some factor (the span of the variable $\max x_i-\min x_i$ or the standard deviation $\sigma_i$. This is
#
# $$
# x_i \to \frac{x_i - \mu_i}{s_i},
# $$
#
# where $s_i$ may be one of $\max x_i-\min x_i$ and $\sigma_i$.
# ### 2.2. Learning rate
#
# The gradient descent algorithm has the form
#
# $$
# \boldsymbol{\theta} := \boldsymbol{\theta} - \alpha \frac{\partial}{\partial \boldsymbol{\theta}} J(\boldsymbol{\theta}).
# $$
#
# One natural question that arises is: How to choose the **learning rate** $\alpha$?
# The first thing is to make sure that the **gradient descent algorithm** is working properly. This can be done by looking at the cost function vs. the number of iterations plot. It should look as follows:
# Import libraries
from matplotlib import pyplot as plt
import numpy as np
# %matplotlib inline
# Decreasing cost function
plt.figure(figsize=(6, 4))
x = np.linspace(0, 10)
plt.plot(x, 11 * np.exp(-x / 3), label='')
plt.axvline(x=0, c='k', lw=2)
plt.axhline(y=0, c='k', lw=2)
plt.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labeltop=False,
labelleft=False,
labelright=False)
plt.axis([-1, 10, -1, 10])
plt.xlabel('No. of iterations')
plt.ylabel(r'$J(\theta)$')
# Regarding the gradient descent algorithm:
#
# - The cost function $J(\boldsymbol{\theta})$ should decrease after every iteration.
# - The number of iterations it takes to converge varies significantly across different applications.
# If the cost function increases:
# Increasing cost function
plt.figure(figsize=(6, 4))
x = np.linspace(0, 10)
plt.plot(x, 4 * np.exp(x / 10), label='')
plt.axvline(x=0, c='k', lw=2)
plt.axhline(y=0, c='k', lw=2)
plt.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labeltop=False,
labelleft=False,
labelright=False)
plt.axis([-1, 10, -1, 10])
plt.xlabel('No. of iterations')
plt.ylabel(r'$J(\theta)$')
# Then it is a clear signal that the gradient descent algorithm is not working. The reason is that the selected learning rate $\alpha$ is too big.
#
# > For **sufficently small** $\alpha$, $J(\theta)$ should decrease on every iteration.
#
# > But if $\alpha$ is too small, gradient descent can be slow to converge.
# ## 3. Features and Polynomial Regression
# ### Features generation
# It is not mandatory to use the data "as is". We can perform some operations on the data to obtain new features that may make more sense to use.
# For instance, consider the case where we are given the frontage and the depth of a house in order to predict its price. As before, we can use a hypothesis function like
#
# $$
# h_{\theta}(\boldsymbol{x}) = \theta_0 + \theta_1 \times frontage + \theta_2 \times depth.
# $$
#
# On the other hand, we may think that what actually determines the price of the house is its size. Then, we can define a feature $x = frontage \times depth$ and use the hypothesis function
#
# $$
# h_{\theta}(x) = \theta_0+ \theta_1 x.
# $$
# ### Polynomial features
# We can use all the machinery we have developed for multivariate linear regression to fit polynomial hypotheses to our data.
#
# For instance, we could try a cubic hypothesis like:
#
# $$
# h_{\theta}(x) = \theta_0 + \theta_1 x + \theta_2 x^2 + \theta_3 x^3.
# $$
#
# We can think of it as a multivariate hypothesis:
#
# \begin{align}
# h_{\theta}(x) & = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + \theta_3 x_3 \\
# & = \theta_0 + \theta_1 x + \theta_2 x^2 + \theta_3 x^3
# \end{align}
#
# with $x_1 = x$, $x_2 = x^2$ and $x_3 = x^3$.
#
# If we choose the features like this, then the feature scaling becomes very important. For example, if $x$ is the house size, then we have that
#
# $$
# x_1 \sim 1000
# $$
#
# $$
# x_2 \sim 1000000
# $$
#
# $$
# x_3 \sim 1000000000
# $$
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>. Based on the content of the Machine Learning course offered through coursera by Prof. <NAME>.
# </footer>
| Week2/1MultivariateLinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
# %matplotlib inline
#https://www.kaggle.com/uciml/breast-cancer-wisconsin-data
data = pd.read_csv('data.csv');
data.info()
data.head()
data=data.drop(["id","Unnamed: 32"],axis=1)
features_mean= list(data.columns[1:11])
plt.figure(figsize=(10,10))
sns.heatmap(data[features_mean].corr(), annot=True, square=True, cmap='coolwarm')
plt.show()
# +
diag_map = {'M':1, 'B':0}
data['diagnosis'] = data['diagnosis'].map(diag_map)
print(data)
# -
X = data.drop(["diagnosis"],axis=1)
y = data["diagnosis"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
# ExtraTreeClassifier
from sklearn.tree import ExtraTreeClassifier
Model = ExtraTreeClassifier()
Model.fit(X_train, y_train)
y_pred = Model.predict(X_test)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print('accuracy is',accuracy_score(y_pred,y_test))
| 13-Extra Tree Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
plt.rc("font", size=14)
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
import seaborn as sns
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
file = r'''C:\Users\aaronhxzhang\2019CrashDatasetNumberOne.xlsx'''
data = pd.ExcelFile(file)
df1 = data.parse('Sheet1')
df1 #read the dataset (for this new data, I cut out a lot of the useless categories)
df1.columns
print(df1.shape)
print(list(df1.columns))
df1.head()
# Our goal is to ultimately figure out what factors have the strongest correlation with crashes that have injuries involved in them
# +
#First we will cut down on some categories that have too many options
# -
df1['Crash Severity'].unique()
df1['Crash Severity']=np.where(df1['Crash Severity'] =='Property damage only (none injured)', 'Minor', df1['Crash Severity'])
df1['Crash Severity']=np.where(df1['Crash Severity'] =='Non-fatal injury', 'Medium', df1['Crash Severity'])
df1['Crash Severity']=np.where(df1['Crash Severity'] =='Fatal injury', 'Major', df1['Crash Severity'])
df1['Crash Severity']=np.where(df1['Crash Severity'] =='Not Reported', 'Unknown', df1['Crash Severity'])
df1['Crash Severity'].unique()
df1 = df1.drop(columns='City/Town')
df1 = df1.drop(columns='Crash Date')
df1
df1 = df1.drop(columns='Crash Number')
df1
df1['Manner of Collision'].unique()
df1['Manner of Collision']=np.where(df1['Manner of Collision'] =='Sideswipe, opposite direction', 'Sideswipe', df1['Manner of Collision'])
df1['Manner of Collision']=np.where(df1['Manner of Collision'] =='Sideswipe, same direction', 'Sideswipe', df1['Manner of Collision'])
df1['Manner of Collision']=np.where(df1['Manner of Collision'] =='Rear-to-rear', 'Rear-end', df1['Manner of Collision'])
df1['Manner of Collision']=np.where(df1['Manner of Collision'] == 'nan', 'Unknown', df1['Manner of Collision'])
df1['Manner of Collision'].unique()
df1['Hit & Run'].unique()
df1['Hit & Run']=np.where(df1['Hit & Run'] =='No hit and run', 'No', df1['Hit & Run'])
df1['Hit & Run']=np.where(df1['Hit & Run'] == 'Yes, hit and run', 'Yes', df1['Hit & Run'])
df1['Hit & Run'].unique()
df1
df1['Weather Condition'].unique()
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Clear/Unknown', 'Clear', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Clear/Cloudy', 'Clear', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Clear/Clear', 'Clear', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Clear/Other', 'Clear', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Clear', 'Clear', df1['Weather Condition'])
df1['Weather Condition'].unique()
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain', 'Rainy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Cloudy/Rain', 'Rainy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain/Cloudy', 'Rainy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Clear/Rain', 'Rainy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain/Rain', 'Rainy', df1['Weather Condition'])
df1['Weather Condition'].unique()
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Cloudy', 'Cloudy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Cloudy/Clear', 'Cloudy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Cloudy/Unknown', 'Cloudy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Cloudy/Cloudy', 'Cloudy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Cloudy/Other', 'Cloudy', df1['Weather Condition'])
df1['Weather Condition'].unique()
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Snow/Other', 'Snowy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Snow/Cloudy', 'Snowy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain/Snow', 'Snowy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Snow', 'Snowy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Snow/Rain', 'Snowy', df1['Weather Condition'])
df1['Weather Condition'].unique()
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain/Fog, smog, smoke', 'Rainy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain/Unknown', 'Rainy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain/Other', 'Rainy', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain/Clear', 'Rainy', df1['Weather Condition'])
df1['Weather Condition'].unique()
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Not Reported', 'Unknown', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Unknown', 'Unknown', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Unknown/Other', 'Unknown', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Unknown/Unknown', 'Unknown', df1['Weather Condition'])
df1['Weather Condition'].unique()
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Sleet, hail (freezing rain or drizzle)', 'Sleet/Hail', df1['Weather Condition'])
df1['Weather Condition']=np.where(df1['Weather Condition'] =='Rain/Sleet, hail (freezing rain or drizzle)', 'Sleet/Hail', df1['Weather Condition'])
df1['Weather Condition'].unique()
df1
df1['Age of Driver - Youngest Known'].unique()
df1['Age of Driver - Youngest Known']=np.where(df1['Age of Driver - Youngest Known'] =='65-74', '65+', df1['Age of Driver - Youngest Known'])
df1['Age of Driver - Youngest Known']=np.where(df1['Age of Driver - Youngest Known'] =='75-84', '65+', df1['Age of Driver - Youngest Known'])
df1['Age of Driver - Youngest Known']=np.where(df1['Age of Driver - Youngest Known'] =='16-20', '16-24', df1['Age of Driver - Youngest Known'])
df1['Age of Driver - Youngest Known']=np.where(df1['Age of Driver - Youngest Known'] =='21-24', '16-24', df1['Age of Driver - Youngest Known'])
df1['Age of Driver - Youngest Known']=np.where(df1['Age of Driver - Youngest Known'] =='25-34', '25-44', df1['Age of Driver - Youngest Known'])
df1['Age of Driver - Youngest Known']=np.where(df1['Age of Driver - Youngest Known'] =='35-44', '25-44', df1['Age of Driver - Youngest Known'])
df1['Age of Driver - Youngest Known']=np.where(df1['Age of Driver - Youngest Known'] =='45-54', '45-64', df1['Age of Driver - Youngest Known'])
df1['Age of Driver - Youngest Known']=np.where(df1['Age of Driver - Youngest Known'] =='55-64', '45-64', df1['Age of Driver - Youngest Known'])
df1['Age of Driver - Youngest Known'].unique()
df1
df1['Speed Limit'].unique()
df1['Speed Limit'].unique()
df1['Speed Limit']=np.where(df1['Speed Limit'] == 0, 20, df1['Speed Limit'])
df1['Speed Limit']=np.where(df1['Speed Limit'] == 10, 20, df1['Speed Limit'])
df1['Speed Limit']=np.where(df1['Speed Limit'] == 15, 20, df1['Speed Limit'])
df1['Speed Limit']=np.where(df1['Speed Limit'] == 325, 35, df1['Speed Limit'])
df1['Speed Limit'].unique()
df1['Speed Limit']=np.where(df1['Speed Limit'] == 5, 20, df1['Speed Limit'])
df1['Speed Limit']=np.where(df1['Speed Limit'] == 60, 65, df1['Speed Limit'])
df1['Speed Limit']=np.where(df1['Speed Limit'] == 50, 55, df1['Speed Limit'])
df1['Speed Limit'].unique()
df1
df1['Ambient Light'].unique()
df1['Ambient Light']=np.where(df1['Ambient Light'] =='Dawn', 'Daylight', df1['Ambient Light'])
df1['Ambient Light']=np.where(df1['Ambient Light'] =='Dark - lighted roadway', 'Dark', df1['Ambient Light'])
df1['Ambient Light']=np.where(df1['Ambient Light'] =='Dusk', 'Dark', df1['Ambient Light'])
df1['Ambient Light']=np.where(df1['Ambient Light'] =='Dark - unknown roadway lighting', 'Dark', df1['Ambient Light'])
df1['Ambient Light']=np.where(df1['Ambient Light'] =='Dark - roadway not lighted', 'Dark', df1['Ambient Light'])
df1['Ambient Light']=np.where(df1['Ambient Light'] =='Not reported', 'Unknown', df1['Ambient Light'])
df1['Ambient Light'].unique()
df1
df1['Roadway Intersection Type'].unique()
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='Driveway', 'None', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='Not at junction', 'None', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='Unknown', 'None', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='Not reported', 'None', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='Railway grade crossing', 'railroad crossing', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='Four-way intersection', 'four-way', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='Five-point or more', 'Five-way or more', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='On-ramp', 'Highway ramp', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type']=np.where(df1['Roadway Intersection Type'] =='Off-ramp', 'Highway ramp', df1['Roadway Intersection Type'])
df1['Roadway Intersection Type'].unique()
df1
df1['Age of Driver - Oldest Known'].unique()
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='65-74', '65+', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='75-84', '65+', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='16-20', '16-24', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='21-24', '16-24', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='25-34', '25-44', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='35-44', '25-44', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='45-54', '45-64', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='55-64', '45-64', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known']=np.where(df1['Age of Driver - Oldest Known'] =='>84', '65+', df1['Age of Driver - Oldest Known'])
df1['Age of Driver - Oldest Known'].unique()
df1['Age of Driver - Youngest Known'].unique()
df1
df1['Number of NonFatal Injuries'].unique()
df1['Number of Fatal Injuries'].unique()
df1['Number of NonFatal Injuries'].value_counts()
df1['Number of Fatal Injuries'].value_counts()
# # Draw Connections solely between crashes and non-fatal injuries
# Will make df2 a new data structure which does not include any fatal crashes
#
df2 = df1.drop(columns = 'Number of Fatal Injuries')
df2
#Represent crashes that had injuries (no matter how many) with a 1, and those that had none with a 0 (oops did it for df1)
df1['Number of NonFatal Injuries']=np.where(df1['Number of NonFatal Injuries'] == 1, 1, df1['Number of NonFatal Injuries'])
df1['Number of NonFatal Injuries']=np.where(df1['Number of NonFatal Injuries'] == 2, 1, df1['Number of NonFatal Injuries'])
df1['Number of NonFatal Injuries']=np.where(df1['Number of NonFatal Injuries'] == 3, 1, df1['Number of NonFatal Injuries'])
df1['Number of NonFatal Injuries']=np.where(df1['Number of NonFatal Injuries'] == 4, 1, df1['Number of NonFatal Injuries'])
df1['Number of NonFatal Injuries']=np.where(df1['Number of NonFatal Injuries'] == 5, 1, df1['Number of NonFatal Injuries'])
df1['Number of NonFatal Injuries']=np.where(df1['Number of NonFatal Injuries'] == 7, 1, df1['Number of NonFatal Injuries'])
df1['Number of NonFatal Injuries']=np.where(df1['Number of NonFatal Injuries'] == 0, 0, df1['Number of NonFatal Injuries'])
df2['Number of NonFatal Injuries']=np.where(df2['Number of NonFatal Injuries'] == 1, 1, df2['Number of NonFatal Injuries'])
df2['Number of NonFatal Injuries']=np.where(df2['Number of NonFatal Injuries'] == 2, 1, df2['Number of NonFatal Injuries'])
df2['Number of NonFatal Injuries']=np.where(df2['Number of NonFatal Injuries'] == 3, 1, df2['Number of NonFatal Injuries'])
df2['Number of NonFatal Injuries']=np.where(df2['Number of NonFatal Injuries'] == 4, 1, df2['Number of NonFatal Injuries'])
df2['Number of NonFatal Injuries']=np.where(df2['Number of NonFatal Injuries'] == 5, 1, df2['Number of NonFatal Injuries'])
df2['Number of NonFatal Injuries']=np.where(df2['Number of NonFatal Injuries'] == 7, 1, df2['Number of NonFatal Injuries'])
df2['Number of NonFatal Injuries']=np.where(df2['Number of NonFatal Injuries'] == 0, 0, df2['Number of NonFatal Injuries'])
df2
# Data Exploration (from here on down)
df2['Number of NonFatal Injuries'].value_counts()
sns.countplot(x='Number of NonFatal Injuries',data=df2, palette='hls')
plt.show()
plt.savefig('count_plot')
# We can see that there are 714 crashes with no injuries against 239 crashes that had injuries
df2.groupby('Number of NonFatal Injuries').mean()
df2.columns
df2.groupby('Manner of Collision').mean()
df2.groupby('Weather Condition').mean()
df2.groupby('Age of Driver - Youngest Known').mean()
# %matplotlib inline
pd.crosstab(df2['Manner of Collision'],df2['Number of NonFatal Injuries']).plot(kind='bar')
plt.title('Injury Frequency by Collision Type')
plt.xlabel('Manner of Collision')
plt.ylabel('Frequency of Injury')
plt.savefig('injuries_vs_collisiontype')
# Manner of Collision could be a very good predictor of whether there will be an injury or not
#time to start the RFE process :(
df2.columns
cat_vars=['Crash Severity', 'Manner of Collision', 'Age of Driver - Youngest Known','Age of Driver - Oldest Known',
'Hit & Run', 'Road Surface', 'Ambient Light', 'Weather Condition', 'Roadway Intersection Type']
for var in cat_vars:
cat_list='var'+'_'+var
cat_list = pd.get_dummies(df1[var], prefix=var)
data1=df2.join(cat_list)
df2=data1
cat_vars=['Crash Severity', 'Manner of Collision', 'Age of Driver - Youngest Known','Age of Driver - Oldest Known',
'Hit & Run', 'Road Surface', 'Ambient Light', 'Weather Condition', 'Roadway Intersection Type']
df2_vars=df2.columns.values.tolist()
to_keep=[i for i in df2_vars if i not in cat_vars]
data_final=df2[to_keep]
data_final.columns.values
data_final_vars=data_final.columns.values.tolist()
y=['Number of NonFatal Injuries']
X=[i for i in data_final_vars if i not in y]
# +
#I'm really hoping this feature selection will work this time
# +
from sklearn import datasets
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
# -
rfe = RFE(logreg, 1)
rfe = rfe.fit(data_final[X], data_final[y] )
print(rfe.support_)
print(rfe.ranking_)
#Finally WORKED!!
#rfe selected 4,5,6,11,25,34,38,43,46,47 + 3 13 14 15 24 32 37 40 + 35 36 + 17 22
# +
#The RFE gave 51 true/false responses corresponding to the 51 features in X which were compared against the only feature in y
#which is 'Number of NonFatal Injuries' (the 51 features do not include 'Number of NonFatal Injuries')
#RFE selected X[3], X[4], X[5], X[10], X[24], X[33], X[37], X[42], X[45], X[46] + X[2], X[12], X[13], X[14], X[23], X[31], X[36], X[39]
X[3], X[4], X[5], X[10], X[24], X[33], X[37], X[42], X[45], X[46]
# +
#RFE selected 'Crash Severity_Medium','Crash Severity_Minor','Crash Severity_Unknown','Manner of Collision_Sideswipe','Hit & Run_Yes','Road Surface_Wet','Weather Condition_Clear','Weather Condition_Unknown','Roadway Intersection Type_None','Roadway Intersection Type_T-intersection'
# -
X[2], X[12], X[13], X[14], X[23], X[31], X[36], X[39], X[16], X[21], X[34], X[35]
# +
cols=['Crash Severity_Unknown',
'Manner of Collision_Sideswipe',
'Hit & Run_Yes',
'Road Surface_Wet',
'Weather Condition_Clear',
'Weather Condition_Unknown',
'Roadway Intersection Type_None',
'Roadway Intersection Type_T-intersection']
X=data_final[cols]
y=data_final['Number of NonFatal Injuries']
# -
# # Implementing the Model
import statsmodels.api as sm
logit_model=sm.Logit(y,X)
result=logit_model.fit()
print(result.summary())
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test)))
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
kfold = model_selection.KFold(n_splits=10, random_state=7)
modelCV = LogisticRegression()
scoring = 'accuracy'
results = model_selection.cross_val_score(modelCV, X_train, y_train, cv=kfold, scoring=scoring)
print("10-fold cross validation average accuracy: %.3f" % (results.mean()))
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
| 2019 Main with New Data-First Test with Logistic Regression .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
from childes_mi.utils.paths import PHONBANK_DFS
from childes_mi.utils.general import flatten
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
transcript_df = pd.read_pickle(PHONBANK_DFS/'transcript_df.pickle')
transcript_df['dataset'] = [i.split('_')[2] for i in transcript_df.transcript_root_name.values]
transcript_df[:3]
datasets_to_use = ['Providence', # 0;8-3;3 spontaneous interactions with their parents (usually their mothers) at home https://phonbank.talkbank.org/access/Eng-NA/Providence.html
'Davis', # 0;7-3;0 natural interactions and situations that occurred in their daily lives https://phonbank.talkbank.org/access/Eng-NA/Davis.html
'Goad', # 0;8-2;11 in their homes in a naturalistic setting https://phonbank.talkbank.org/access/Eng-NA/Goad.html
]
dataset_mask = np.array([i in datasets_to_use for i in transcript_df.dataset.values])
transcript_df = transcript_df[dataset_mask]
transcript_df = transcript_df[transcript_df.language == 'eng']
len(transcript_df)
participants = list((PHONBANK_DFS / 'participants').glob('*.pickle'))
transcripts = list((PHONBANK_DFS / 'transcripts').glob('*.pickle'))
participants[:2], transcripts[:2]
len(transcripts), len(participants)
all_transcript_df = pd.concat([pd.read_pickle(i) for i in tqdm(transcripts)])
len(all_transcript_df)
all_transcript_df[:3]
all_participant_df = pd.concat([pd.read_pickle(i) for i in tqdm(participants)])
len(all_participant_df)
all_participant_df[:3]
import re
def convert_age(age_year):
"Caclculate age in months from a string in CHILDES format"
m = re.match("P(\d+)Y(\d+)M?(\d?\d?)D?", age_year)
age_month = int(m.group(1)) * 12 + int(m.group(2))
try:
if int(m.group(3)) > 15:
age_month += 1
# some corpora don't have age information?
except ValueError as e:
pass
return age_month
ages = [convert_age(age) if type(age) == str else np.nan for age in tqdm(all_participant_df.age.values) ]
fig, ax = plt.subplots()
ax.hist(np.array(ages)/12, bins=np.linspace(0,10,25));
ax.set_title('age of participant')
# ### Create a phone dataset of english
all_participant_df.role.unique()
def get_transcript_info(transcript_subset, participant_row):
""" from a transcript, create a pandas dataframe of orthography/phones
"""
# for participant in dataset
participant_ts = transcript_subset[
transcript_subset.speaker == participant_row.participant_id
]
if type(participant_row.age) is not str:
age = np.nan
else:
age = round(convert_age(participant_row.age) / 12, 3)
phone_df = pd.DataFrame(
[[
participant_row.name,
participant_row.transcript_id,
age,
dataset,
participant_row.language,
participant_row.sex,
participant_ts.ipa_actual.values,
participant_ts.orthography.values,
participant_row.xml_loc,
]],
columns=[
"name",
"transcript_id",
"age",
"dataset",
"language",
"sex",
"phones",
"orthography",
"xml_loc",
],
)
return phone_df
# +
#def get_transcript_info(transcript_subset, participant_row):
# return
# -
from joblib.externals.loky import set_loky_pickler
from joblib import parallel_backend
from joblib import Parallel, delayed
from joblib import wrap_non_picklable_objects
all_transcript_df['xml_str'] = [i.as_posix() for i in all_transcript_df.xml_loc]
all_participant_df['xml_str'] = [i.as_posix() for i in all_participant_df.xml_loc]
transcript_df['xml_str'] = [i.as_posix() for i in transcript_df.xml_loc]
target_child_df = all_participant_df[all_participant_df.role == 'Target Child']
# + code_folding=[]
phone_df = []
with Parallel(n_jobs=-1) as parallel:
for dataset in tqdm(datasets_to_use):
# subset dataset
ds_trdf = transcript_df[transcript_df.dataset == dataset]
print("~~~~~~~~~~~~~~~~~~~~", dataset, "~~~~~~~~~~~~~~~~~~~~")
# for transcripts in dataset
phone_df.append(
pd.concat(
parallel(
delayed(get_transcript_info)(
transcript_subset=all_transcript_df[
all_transcript_df.xml_str == transcript_row.xml_str
],
participant_row=target_child_df[
target_child_df.xml_str == transcript_row.xml_str
].iloc[0],
)
for idx, transcript_row in tqdm(
ds_trdf.iterrows(), total=len(ds_trdf)
)
)
)
)
phone_df = pd.concat(phone_df)
# -
phone_df[:3]
len(phone_df)
PHONBANK_DFS
phone_df.to_pickle(PHONBANK_DFS/'phone_df.pickle')
| notebooks/phonbank2/3.0-prep-phonbank-dataset.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # golomb8
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/examples/golomb8.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/python/golomb8.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the Golomb ruler problem.
This model aims at maximizing radar interferences in a minimum space.
It is known as the Golomb Ruler problem.
The idea is to put marks on a rule such that all differences
between all marks are all different. The objective is to minimize the length
of the rule.
"""
from absl import app
from absl import flags
from ortools.constraint_solver import pywrapcp
FLAGS = flags.FLAGS
# We disable the following warning because it is a false positive on constraints
# like: solver.Add(x == 0)
# pylint: disable=g-explicit-bool-comparison
# Create the solver.
solver = pywrapcp.Solver('golomb ruler')
size = 8
var_max = size * size
all_vars = list(range(0, size))
marks = [solver.IntVar(0, var_max, 'marks_%d' % i) for i in all_vars]
objective = solver.Minimize(marks[size - 1], 1)
solver.Add(marks[0] == 0)
# We expand the creation of the diff array to avoid a pylint warning.
diffs = []
for i in range(size - 1):
for j in range(i + 1, size):
diffs.append(marks[j] - marks[i])
solver.Add(solver.AllDifferent(diffs))
solver.Add(marks[size - 1] - marks[size - 2] > marks[1] - marks[0])
for i in range(size - 2):
solver.Add(marks[i + 1] > marks[i])
solution = solver.Assignment()
solution.Add(marks[size - 1])
collector = solver.AllSolutionCollector(solution)
solver.Solve(
solver.Phase(marks, solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE), [objective, collector])
for i in range(0, collector.SolutionCount()):
obj_value = collector.Value(i, marks[size - 1])
time = collector.WallTime(i)
branches = collector.Branches(i)
failures = collector.Failures(i)
print(('Solution #%i: value = %i, failures = %i, branches = %i,'
'time = %i ms') % (i, obj_value, failures, branches, time))
time = solver.WallTime()
branches = solver.Branches()
failures = solver.Failures()
print(('Total run : failures = %i, branches = %i, time = %i ms' %
(failures, branches, time)))
| examples/notebook/examples/golomb8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/elegy_intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KLnKyx6CWEH_"
# # Introduction to Elegy
#
# This is slightly modified from
# https://poets-ai.github.io/elegy/getting-started/high-level-api/
# and
# https://poets-ai.github.io/elegy/getting-started/low-level-api/
#
#
# + [markdown] id="A0kNzIO81GY5"
# In this tutorial we will explore the basic features of **Elegy**. If you are a Keras user you should feel at home, if you are currently learning JAX things will appear much more streamlined. To get started you will first need to install the following dependencies:
# + id="4HaiTv5AVxy-"
# %%capture
# !pip install git+https://github.com/deepmind/dm-haiku
# #!pip install -q clu ml-collections git+https://github.com/google/flax
# + id="LvmBdeZq1GY6"
# %%capture
# ! pip install --upgrade pip
# ! pip install elegy datasets matplotlib
# + [markdown] id="0f_2f5hG1GZA"
#
# ## Loading the Data
# In this tutorial we will train a Neural Network on the MNIST dataset, for this we will first need to download and load the data into memory. Here we will use the `datasets` library to load the dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 427, "referenced_widgets": ["65bfe5fdb102457db4619ffd6b7463f5", "fa722095ce6e4e2dbe403c3645d17000", "8244358b593a467cbde52c83df67e35c", "28b749a8db614eb5a3f56ab534c5f32f", "<KEY>", "<KEY>", "<KEY>", "6c48d5ff4f46403abc7f7eac20e64917", "<KEY>", "<KEY>", "13fe2d18dad94a12b31a8a5e7908a06b", "<KEY>", "2ac027f6ea0948d99fdd57f96e6fdd3b", "<KEY>", "ceb03809f4ac4a40aaef2add6edfe309", "606c7b874aff4b11bb934bff8900e366", "<KEY>", "<KEY>", "<KEY>", "e37f60c497dd40c5b9e4eaf37dd01ecc", "<KEY>", "<KEY>", "<KEY>", "44762eead99a4617b4d882fddac169b4", "6069d489ce7f46169c1cdda804782128", "<KEY>", "ca300b85e15f42889cdcf656dddf69a8", "a556c7207df846d3a41391ece0527190", "<KEY>", "a7be91ef0ff34d10ad216e20fdf011cc", "4733bf403c1a4d899ce661cff39aa8d4", "<KEY>", "63447861258a4b54a89a2a931d85bfef", "2e62d672d4d2475ab1455def9b4dc6a6", "64b18b8c5330468e8ed4d9394d064a05", "efe34cbbd56e49ff8191a9c79f571f4d", "fa5d055a4f3340c19652abf238657540", "<KEY>", "7d333f18d34f477e89861900f93fc410", "<KEY>", "<KEY>", "a0017d924a6b4038a78e3ff552fe3b65", "9a13fa17fde949c79971d348de4454a3", "<KEY>", "<KEY>", "f3c5e242299547c081448e73cb77991d", "60e6069f287042158887368a3e9dd82f", "0a8fc257e3b448a5b8ed8f3939d572f0", "c8979e4e99ff46c58641c0d84a79965c", "<KEY>", "081b37dd3e9a430aad936afac00416e5", "<KEY>", "<KEY>", "b2c66d1eaffa456588c01ae302ee0821", "<KEY>", "c785e7d7eda441339013bcbe92c071f8", "<KEY>", "be8681df1cb148efa4c201736d9520c6", "a1a69bacae0149018534de6ab680eae3", "39d354b2b31b466eab68b33a858ecf5a", "5e23e2a8724240eeaa99809992671647", "a4c58c4ebea64f39947a98b37d2fb5c8", "<KEY>", "0fa253cc99504c7bb8c11ae00de85cca", "9531d15333ff4267ac0dbb1e8f1d1117", "d73c1ebbd21a4630a0a016c5e3b766a4", "2d1568c817214a1da0fd11ed6b09ebe9", "<KEY>", "3ed8f7dc1a0049888134fa770ad9153c", "ec566c7eaa15486f8ef067abe13d2f13", "<KEY>", "d1b3ef38a2cd4e6395548d7b313ec0b0", "10c42275cfa64e64939e943c1b0e30da", "bc4eba43d00d47d5b9d9efb2d113cced", "ed233c1e41cf49e6ad5f6339529f81b8", "16f1ad87df5c4dda9435bacfbb06dee0", "2d83f376e2b34ce7ae327a81486ef417", "<KEY>", "<KEY>", "9d3ba301be584337a6ae1bae092c5c55", "<KEY>", "91168283c5d44ce79dfa5bb0d010d3f7", "<KEY>", "576fc9c180d94ad797627950d04d1f17", "f1938cda0d9f40a7a19ac2efe5eb36c2", "<KEY>", "752588e00f874ff1a2cb2762ead5708b", "<KEY>", "85e299ea195245bb85be1eb5734355d8", "5a4bcaf30a9341b99622b17e9781a07a", "f3573e3bee0e4c6380c4e75bdfe30e51", "e902ba929b9946138ac799f8ac09dc24", "896382da324f49ea9494b00c6907a2a3", "ef97d8c5872a4ef692d42d626b24e42f", "793151466be2442b9eedf8bc360396fa", "b17afcde823449d68dac46bf150f99e2", "c0a74cce432c4871b96419c1cd2caf61", "050ce16772cb40ceb6ca3585506077be", "0b398f1ed4e442db9aea4ebe1eaadcee", "6378d9eda6244cf1a436881095ae711b", "<KEY>", "54a89a8689de47abb10561f323a1c2e4", "e199ee5934da43ac9317d01780d37af2", "<KEY>", "3271f2a5b8934483a6529b5ac43a597c", "58d95da7881840c19138133e4f38bcfa", "8bbe55fc32664457a357cd55818e7598", "ce518f6402e34da2adb1a7690eccf72b", "<KEY>", "fd6e51500909497b812770edcdc7ec86", "157651a249334c069f5384e761edfb7a", "<KEY>", "<KEY>", "ed77e3127b624ea698392e5c8e3da384", "<KEY>", "3eacca1a3d4e4d4fa56ac79ea99baa05", "c60300a85bea434083077e855578a1a0", "<KEY>", "497eff029ba141dcaf1cf05f9856809d", "0bf729399556477fbb6289708ed7fee5", "fc48a2c65caa40318fa2b4f5cbd4459f"]} id="RxbhMx0v1GZB" outputId="0ab3538b-6912-4cca-84a7-62b0995522ec"
from datasets.load import load_dataset
dataset = load_dataset("mnist")
dataset.set_format("np")
X_train = dataset["train"]["image"]
y_train = dataset["train"]["label"]
X_test = dataset["test"]["image"]
y_test = dataset["test"]["label"]
print("X_train:", X_train.shape, X_train.dtype)
print("y_train:", y_train.shape, y_train.dtype)
print("X_test:", X_test.shape, X_test.dtype)
print("y_test:", y_test.shape, y_test.dtype)
# + [markdown] id="2qt-5RCB1GZG"
# ## Defining the Architecture
# The first thing we need to do is define our model's architecture inside a `Module`, to do this we just create a class that inherites from `Module` and implement a `__call__` method. In this example we will create a simple 2 layer MLP:
# + id="K993A1K11GZH"
import jax.numpy as jnp
import jax
import elegy as eg
class MLP(eg.Module):
def __init__(self, n1: int, n2: int):
self.n1 = n1
self.n2 = n2
@eg.compact
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
x = x.astype(jnp.float32) / 255.0
x = eg.nn.Flatten()(x)
# first layers
x = eg.nn.Linear(self.n1)(x)
x = jax.nn.relu(x)
# first layers
x = eg.nn.Linear(self.n2)(x)
x = jax.nn.relu(x)
# output layer
x = eg.nn.Linear(10)(x)
return x
# + [markdown] id="SyD7EpFI1GZL"
# This code should feel familiar to most Keras / PyTorch users, the main difference is that we are using the `@compact` decorator to define submodules inline (e.g. Linear) inline, this tends to produce much shorter and readable code.
#
# ## Creating the Model
#
# Now that we have this module we can create an Elegy `Model` which is Elegy's central API:
# + id="dM8PdElf1GZL"
import optax
model = eg.Model(
module=MLP(n1=300, n2=100),
loss=[
eg.losses.Crossentropy(),
eg.regularizers.L2(l=1e-4),
],
metrics=eg.metrics.Accuracy(),
optimizer=optax.adam(1e-3),
)
# + [markdown] id="h44wXhQk1GZQ"
# If you are a Keras user this code should look familiar, main differences are:
#
# * You need to pass a `module` with the architecture.
# * `loss` and `metrics` are a bit more flexible in that they do not need to match the label's structure.
# * There is no `compile` step, all its done in the constructor.
# * For the `optimizer` you can use any `optax` optimizer.
#
# As in Keras, you can get a rich description of the model by calling `Model.summary` with a sample input:
# + id="U_EKAZLMAbU1" colab={"base_uri": "https://localhost:8080/", "height": 578} outputId="d7effc89-5c73-4b53-fafe-0b166751b39c"
model.summary(X_train[:64])
# + [markdown] id="e62MGzVz1GZV"
# ## Training the Model
# We are now ready to pass our model some data to start training, like in Keras this is done via the `fit` method which contains more or less the same signature. Elegy support a variety of input data sources like Tensorflow Dataset, Pytorch DataLoader, Elegy DataLoader, and Python Generators, check out the guide on Data Sources for more information.
#
# The following code will train our model for `10` epochs while limiting each epoch to `200` steps and using a batch size of `64`:
# + id="dfRIJ1uG1GZW" colab={"base_uri": "https://localhost:8080/"} outputId="8eaa3b5e-44d3-4eb9-cfd5-24fcbe992d17"
# %%time
history = model.fit(
inputs=X_train,
labels=y_train,
epochs=10,
steps_per_epoch=200,
batch_size=64,
validation_data=(X_test, y_test),
shuffle=True,
verbose=3,
callbacks=[eg.callbacks.ModelCheckpoint("models/high-level", save_best_only=True)],
)
# + [markdown] id="1rNWFoBJsGky"
# The `ModelCheckpoint` callback will periodically save the model in a folder called `"models/high-level"`, we will use it later.
#
# `fit` returns a `History` object which of the losses and metrics during training which we can visualize.
# + [markdown] id="-zB8XuKXsKC4"
# ## Plotting learning curves
# + colab={"base_uri": "https://localhost:8080/"} id="gpG_5jkzqMeT" outputId="b05317e3-d7c9-47d2-bd48-c8f78994076c"
print(history)
print(history.history)
print(history.history.keys())
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="DAfc-JNd1GZb" outputId="4d4d940f-7088-455b-f9ac-4863d603f54f"
import matplotlib.pyplot as plt
def plot_history(history):
n_plots = len(history.history.keys()) // 2
plt.figure(figsize=(14, 24))
for i, key in enumerate(list(history.history.keys())[:n_plots]):
metric = history.history[key]
val_metric = history.history[f"val_{key}"]
plt.subplot(n_plots, 1, i + 1)
plt.plot(metric, "o-", label=f"Training {key}")
plt.plot(val_metric, "x-", label=f"Validation {key}")
plt.legend(loc="lower right")
plt.ylabel(key)
plt.title(f"Training and Validation {key}")
plt.show()
plot_history(history)
# + colab={"base_uri": "https://localhost:8080/"} id="ARVwPnkx4squ" outputId="e44592ad-bfe6-4d52-b6df-95f515dc1cb2"
ev = model.evaluate(x=X_test, y=y_test)
print(ev)
# + [markdown] id="fRlK89tC1GZf"
# ## Generating Predictions
#
# Having our trained model we can now get some samples from the test set and generate some predictions. Lets select `9` random images and call `.predict`:
# + id="FCNlJ74S1GZf"
import numpy as np
idxs = np.random.randint(0, len(X_test), size=(9,))
x_sample = X_test[idxs]
y_pred = model.predict(x=x_sample)
# + colab={"base_uri": "https://localhost:8080/"} id="Edp8jhuwqxZv" outputId="57a721e2-a2a9-4b3f-e95b-5fc6c77d56ac"
print(y_pred.shape)
# + [markdown] id="JJ1Km-w41GZo"
# Easy right? Finally lets plot the results to see if they are accurate.
# + colab={"base_uri": "https://localhost:8080/", "height": 716} id="9rnFhoOl1GZp" outputId="3e694fa2-a315-45cf-ebc4-5b992ecba624"
plt.figure(figsize=(12, 12))
for i in range(3):
for j in range(3):
k = 3 * i + j
plt.subplot(3, 3, k + 1)
plt.title(f"{np.argmax(y_pred[k])}")
plt.imshow(x_sample[k], cmap="gray")
# + [markdown] id="WfmdgSqR1GZv"
#
#
# ## Serialization
# To serialize the `Model` you can use the `model.save(...)`, this will create a folder with some files that contain the model's code plus all parameters and states.
# + id="-ztl-pEarVHX"
model.save("mymodel")
# + colab={"base_uri": "https://localhost:8080/"} id="OYex8WNBrWx2" outputId="638e9c54-fd4d-41a1-b165-f94cf8e1a7a8"
# !ls
# + colab={"base_uri": "https://localhost:8080/"} id="vABtpQgcrX6G" outputId="4b884d9c-61a0-4505-8139-f530fc578aa7"
# !ls mymodel
# + [markdown] id="w01zUhoLr5dR"
#
#
# However since we had previously used the `ModelCheckpoint` callback we can load it using `elegy.load`. Lets get a new model reference containing the same weights and call its `evaluate` method to verify it loaded correctly:
# + colab={"base_uri": "https://localhost:8080/"} id="Ts86LE44r7X4" outputId="1a315995-3fec-4f88-83e5-75ba1332b8de"
# !ls models
# + colab={"base_uri": "https://localhost:8080/"} id="I_jyTb3hr9G4" outputId="7eb70769-eb5a-4b20-d213-30bb513c9c40"
# !ls models/high-level
# + colab={"base_uri": "https://localhost:8080/"} id="lak2-b9m1GZw" outputId="9dda9355-3a1a-4aa1-eaa7-d91883ab835f"
# current model reference
print("current model id:", id(model))
model.evaluate(x=X_test, y=y_test)
model_old = model
# load model from disk
model = eg.load("models/high-level")
# new model reference
print("new model id: ", id(model))
# check that it works!
model.evaluate(x=X_test, y=y_test)
# + [markdown] id="V0nLXQ4zAbU5"
#
# You can also serialize your Elegy Model as a TensorFlow SavedModel which is portable to many platforms many platforms and services, to do this you can use the `saved_model` method. `saved_model` will convert the function that creates the predictions for your Model (`pred_step`) in Jax to a TensorFlow version via `jax2tf` and then serialize it to disk.
#
# The function `saved_model` accepts a sample to infer the shapes, the path where the model will be saved at, and a list of batch sizes for the different signatures it accepts. Due to some current limitations in Jax it is not possible to create signatures with dynamic dimensions so you must specify a couple which might fit you needs.
# + id="VGpy1lbxAbU5" colab={"base_uri": "https://localhost:8080/"} outputId="31a1ddb2-7b65-407c-afa6-03e0d29897b8"
model.saved_model(x_sample, "saved-models/high-level")
# + colab={"base_uri": "https://localhost:8080/"} id="sBy2Z0MaslDD" outputId="144ea220-537d-4763-bd90-55f144acc0e0"
# !ls saved-models/high-level
# + [markdown] id="B6gNfkB-AbU5"
# We can test our saved model by loading it with TensorFlow and generating a couple of predictions as we did previously:
# + id="r9Iy0zFDAbU5" colab={"base_uri": "https://localhost:8080/", "height": 443} outputId="270520c9-647e-4c7a-8c6c-0eeff386eada"
import tensorflow as tf
saved_model = tf.saved_model.load("saved-models/high-level")
y_pred_tf = saved_model(x_sample.astype(np.int32))
plt.figure(figsize=(12, 12))
for i in range(3):
for j in range(3):
k = 3 * i + j
plt.subplot(3, 3, k + 1)
plt.title(f"{np.argmax(y_pred_tf[k])}")
plt.imshow(x_sample[k], cmap="gray")
# + [markdown] id="X7yuYgrcuCpc"
# ## Distributed training
#
# To parallelize training and inference using pmap on a mulit-core TPU you just need to add
# ```
# model = model.distributed()
# ```
# after creating the model.
# For an example, try running https://github.com/probml/pyprobml/blob/master/scripts/mnist_elegy_distributed.py on a TPU VM v3-8.
# In colab, there will not be any speedup, since there is only 1 GPU. (I have not tried TPU mode in colab.)
#
#
#
# + [markdown] id="NP90PeQTmrsP"
# ## Low-level API
# + [markdown] id="t0V7RLWOnQcL"
# ### Introduction
# + [markdown] id="F1-2T8ZZmtYu"
#
#
#
# The low-level API lets you redefine what happens during the various stages of training, evaluation and inference by implementing some methods in a custom class. Here is the list of methods you can define along with the high-level method that uses it:
#
# | Low-level Method | High-level Method |
# | :- | :- |
# | `pred_step` | `predict` |
# | `test_step` | `evaluate` |
# | `grad_step` | NA |
# | `train_step` | `fit` |
#
# Check out the guides on the low-level API for more information.
#
# In this tutorial we are going to implement Linear Classifier using pure Jax by overriding`pred_step` which defines the forward pass and `test_step` which defines loss and metrics of our model.
#
# `pred_step` returns a tuple with:
# * `y_pred`: predictions of the model
# * `states`: a `elegy.States` namedtuple that contains the states for thing like network trainable parameter, network states, metrics states, optimizer states, rng state.
#
# `test_step` returns a tuple with:
# * `loss`: the scalar loss use to calculate the gradient
# * `logs`: a dictionary with the logs to be reported during training
# * `states`: a `elegy.States` namedtuple that contains the states for thing like network trainable parameter, network states, metrics states, optimizer states, rng state.
#
# Since Jax is functional you will find that low-level API is very explicit with state management, that is, you always get the currrent state as input and you return the new state as output. Lets define `test_step` to make things clearer:
# + [markdown] id="N_mYnMxym6T7"
# ### Linear classifier
# + id="1-j-cmf9myXK"
import jax
import numpy as np
import jax.numpy as jnp
import typing as tp
import elegy as eg
M = tp.TypeVar("M", bound=eg.Model)
class LinearClassifier(eg.Model):
w: jnp.ndarray = eg.Parameter.node()
b: jnp.ndarray = eg.Parameter.node()
def __init__(
self,
features_out: int,
loss: tp.Any = None,
metrics: tp.Any = None,
optimizer=None,
seed: int = 42,
eager: bool = False,
):
self.features_out = features_out
super().__init__(
loss=loss,
metrics=metrics,
optimizer=optimizer,
seed=seed,
eager=eager,
)
def init_step(self: M, key: jnp.ndarray, inputs: jnp.ndarray) -> M:
features_in = np.prod(inputs.shape[1:])
self.w = jax.random.uniform(
key,
shape=[
features_in,
self.features_out,
],
)
self.b = jnp.zeros([self.features_out])
assert self.optimizer is not None
self.optimizer = self.optimizer.init(self)
return self
def pred_step(self: M, inputs: tp.Any) -> eg.PredStepOutput[M]:
# flatten + scale
inputs = jnp.reshape(inputs, (inputs.shape[0], -1)) / 255
# linear
logits = jnp.dot(inputs, self.w) + self.b
return logits, self
def test_step(
self: M,
inputs,
labels,
) -> eg.TestStepOutput[M]:
model = self
# forward
logits, model = model.pred_step(inputs)
# crossentropy loss
target = jax.nn.one_hot(labels["target"], self.features_out)
loss = jnp.mean(-jnp.sum(target * jax.nn.log_softmax(logits), axis=-1))
# metrics
logs = dict(
acc=jnp.mean(jnp.argmax(logits, axis=-1) == labels["target"]),
loss=loss,
)
return loss, logs, model
# + [markdown] id="8YhK4JETnA2C"
# Notice the following:
# * We define a bunch of arguments with specific names, Elegy uses Dependency Injection so you can just request what you need.
# * `initializing` tells us if we should initialize our parameters or not, here we are directly creating them ourselves but if you use a Module system you can conditionally call its `init` method here.
# * Our model is defined by a simple linear function.
# * Defined a simple crossentropy loss and an accuracy metric, we added both the the logs.
# * We set the updated `States.net_params` with the `w` and `b` parameters so we get them as an input on the next run after they are initialized.
# * `States.update` offers a clean way inmutably update the states without having to copy all fields to a new States structure.
#
# Remember `test_step` only defines what happens during `evaluate`, however, `Model`'s default implementation has a structure where on method is defined in terms of another:
#
# ```
# pred_step ⬅ test_step ⬅ grad_step ⬅ train_step
# ```
#
# Because of this, we get the `train_step` / `fit` for free if we just pass an optimizer to the the constructor as we are going to do next:
# + [markdown] id="Qek5Z2eznVEg"
# ### Training
# + id="uxYqZ8c2nBGR"
import optax
model = LinearClassifier(features_out=10, optimizer=optax.adam(1e-3))
# + colab={"base_uri": "https://localhost:8080/"} id="zDfWqKI0nIf3" outputId="fab87be9-891a-4f81-dec6-5e33811d8b1c"
history = model.fit(
inputs=X_train,
labels=y_train,
epochs=10,
steps_per_epoch=200,
batch_size=64,
validation_data=(X_test, y_test),
shuffle=True,
callbacks=[eg.callbacks.ModelCheckpoint("models/low-level", save_best_only=True)],
)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="getHKQaPnOEo" outputId="821165d3-1e5c-41cf-d3d1-cef98e41cd20"
import matplotlib.pyplot as plt
def plot_history(history):
n_plots = len(history.history.keys()) // 2
plt.figure(figsize=(14, 24))
for i, key in enumerate(list(history.history.keys())[:n_plots]):
metric = history.history[key]
val_metric = history.history[f"val_{key}"]
plt.subplot(n_plots, 1, i + 1)
plt.plot(metric, "o-", label=f"Training {key}")
plt.plot(val_metric, "x-", label=f"Validation {key}")
plt.legend(loc="lower right")
plt.ylabel(key)
plt.title(f"Training and Validation {key}")
plt.show()
plot_history(history)
# + [markdown] id="ZaPdDJw3nbRy"
# Notice that the logs are very noisy, this is because for this example we didn't use cummulative metrics so the reported value is just the value for the last batch of that epoch, not the value for the entire epoch. To fix this we could use some of the modules in `elegy.metrics`.
# + id="cMhxydyunbmN"
| notebooks/misc/elegy_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
input_file = "../../Nano_Reads_Tools/VirSorterNanoSimAssemblies.csv"
df = pd.read_csv(input_file)
# +
import pandas as pd
from sklearn.metrics import precision_score, recall_score, f1_score
#input_file = "../VirFinder/VirFinder_Data/Uniform_Medium_Complexity_Simulation.csv"
input_file = "../../Nano_Reads_Tools/VirFinder/NanoSim_Raw.csv"
df = pd.read_csv(input_file)
#df2 = pd.read_csv("../../Contig_Abundance_By_Class_Simulation.csv")
#simulation = input_file.split("_")[-4].split("/")[1]
#complexity = input_file.split("_")[-3]
#df1 = df2[(df2.Simulation == simulation) & (df2.Complexity == complexity)]
#Phage_Count = int(df1['Phage'])
#Phage_Count = 34
Phage_Count = 2514
#Prophage_Count = int(df1['Prophage'])
df['True_Label'] = [x.split("_")[0] for x in df['name']]
# -
# +
import pandas as pd
from sklearn.metrics import precision_score, recall_score, f1_score
import numpy as np
def VirFinder_Thresholding(input_file):
df = pd.read_csv(input_file)
df2 = pd.read_csv("../../Contig_Abundance_By_Class_Simulation.csv")
simulation = input_file.split("_")[-4].split("/")[1]
complexity = input_file.split("_")[-3]
df1 = df2[(df2.Simulation == simulation) & (df2.Complexity == complexity)]
Phage_Count = int(df1['Phage'])
Prophage_Count = int(df1['Prophage'])
df['True_Label'] = [x.split("_")[0] for x in df['name']]
b_dict = {}
v_dict = {}
for item in np.arange(0.005,0.10, 0.005):
dfy = df[df['pvalue']<=float(item)]
x = dfy.groupby('True_Label').count()
x = x[['name']]
try:
Phage_Predicted = int(x.loc["Phage",])
except:
Phage_Predicted = 0
try:
Prophage_Predicted = int(x.loc["Prophage",])
except:
Prophage_Predicted = 0
False_Negative_Phage = Phage_Count - Phage_Predicted
False_Negative_Prophage = Prophage_Count - Prophage_Predicted
y_true = [1 if x == "Phage" else 0 for x in dfy['True_Label']]
y_pred = [1]*len(dfy)
listofzeros = [0]*False_Negative_Phage
listofones = [1]*False_Negative_Phage
y_pred = y_pred + listofzeros
y_true = y_true + listofones
f1 = f1_score(y_true, y_pred)
name = 'Prophage_Bacteria_'+ simulation + "_" + complexity + "_" +str(round(item, 4))
b_dict[name] = [f1]
### Viral
dfy['True_Label'] = dfy['True_Label'].replace(regex='Prophage', value="Phage")
y_true = [1 if x == "Phage" else 0 for x in dfy['True_Label']]
y_pred = [1]*len(dfy)
listofzeros = [0]*False_Negative_Phage
listofones = [1]*False_Negative_Phage
y_pred = y_pred + listofzeros
y_true = y_true + listofones
listofzeros2 = [0]*False_Negative_Prophage
listofones2 = [1]*False_Negative_Prophage
y_pred = y_pred + listofzeros2
y_true = y_true + listofones2
f1 = f1_score(y_true, y_pred)
name = 'Prophage_Bacteria_'+ simulation + "_" + complexity + "_" +str(round(item, 4))
v_dict[name] = [f1]
top_b = max(b_dict, key=b_dict.get)
top_v = max(v_dict, key=v_dict.get)
return(top_b, b_dict[top_b], top_v, v_dict[top_v])
# +
bac = {}
vir = {}
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Exponential_Low_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Exponential_Medium_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Exponential_High_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Uniform_Low_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Uniform_Medium_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Uniform_High_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Lognormal_Low_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Lognormal_Medium_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Lognormal_High_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Zero_Low_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Zero_Medium_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
nameb, scoreb, namev, scorev = VirFinder_Thresholding("../VirFinder/VirFinder_Data/Zero_High_Complexity_Simulation.csv")
bac[nameb] = scoreb
vir[namev] = scorev
# -
bac
# +
df['True_Label'] = df['True_Label'].replace(regex='Prophage', value="Phage")
y_true = [1 if x == "Phage" else 0 for x in df['True_Label']]
y_pred = [1]*len(df)
listofzeros = [0]*False_Negative_Phage
listofones = [1]*False_Negative_Phage
y_pred = y_pred + listofzeros
y_true = y_true + listofones
listofzeros2 = [0]*False_Negative_Prophage
listofones2 = [1]*False_Negative_Prophage
y_pred = y_pred + listofzeros2
y_true = y_true + listofones2
p = precision_score(y_true, y_pred)
r = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
print("Scores if prophages are considered viruses")
print("Precision: " + str(p))
print("Recall: " + str(r))
print("F1 Score: " + str(f1))
my_dict['Prophage_Viruses'] = [p,r,f1]
# -
w = pd.DataFrame(my_dict.items(), columns=["Prophage Category",2])
w = w.set_index("Prophage Category")
w['Precision'] = [x[0] for x in w[2]]
w['Recall'] = [x[1] for x in w[2]]
w['F1 Score'] = [x[2] for x in w[2]]
w = w.drop(2, axis=1)
w['s'] = simulation
w['c'] = complexity
w['tool'] = "VirFinder"
w['p'] = 0.05
w
#w.to_csv()
df['True_Label'] = [x.split("_")[0] for x in df['name']]
# +
df = df[df['pvalue']<=float(0.01)]
### Without Prophage
#df = df[df['True_Label']!="Prophage"]
x = df.groupby('True_Label').count()
x = x[['name']]
Phage_Predicted = int(x.loc["V",])
False_Negative_Phage = Phage_Count - Phage_Predicted
y_true = [1 if x == "V" else 0 for x in df['True_Label']]
y_pred = [1]*len(df)
listofzeros = [0]*False_Negative_Phage
listofones = [1]*False_Negative_Phage
y_pred = y_pred + listofzeros
y_true = y_true + listofones
p = precision_score(y_true, y_pred)
r = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
print("Scores if prophages are considered bacteria")
print("Precision: " + str(round(p,9)))
print("Recall: " + str(round(r,9)))
print("F1 Score: " + str(round(f1,9)))
my_dict = {}
my_dict['Prophage_Bacteria'] = [p,r,f1]
print(round(p,9),round(r,9),round(f1,9))
# -
| data/Tool_Performance/Tool_Predictions/PerformanceScripts/VirFinderParser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AWS SDK
# Data Processing
#
#
# ## DynamoDB Local and Remote
# This Notebook connects to local DynamoDb.
# ```
# # you should build the docker container at least once.
# docker-compose build
# ```
# local requires you to launch the Dynamodb locally
# ```
# docker-compose up
# ```
#
#
# # The gateway is handled in node
# ## Issues
#
# * need to make typifyItem go deep into JSON
# * need to create version that creates, reads, writes, updates REMOTE aws tables
# * need to convert the keys from a # seperator to . separator d#1 goes to d.1
# * 2019-06-01 need to setup (developement, test, prod) versions of tables
# * 2019-05-31 add access_key to .env
# * 2019-05-31 add secret_key to .env
#
# +
from dotenv import load_dotenv
load_dotenv(verbose=True)
import os
from os.path import isfile, join
from pprint import pprint
import json
# from interface import implements, Interface
import interface
from util import Util
#from buffered_writer import BufferedWriter
#from parameters import Parameters
from data_process_interface import DataProcessInterface
from stubdb import StubDB
import boto3
from boto3.dynamodb.conditions import Key
# data processing
from document_process import DocumentProcess
from movie_process import MovieProcess
util = Util()
# +
# process_config is written to process_config.json by 01-process-data.ipynb
# load process_config.json
# process_config = util.readProcessConfig()
process_config = {'key': 'movies', 'region': 'us-east-1', 'suffix': 'dev', 'target': 'remote'}
process_config = {'key': 'documents', 'region': 'us-east-1', 'suffix': 'dev', 'target': 'remote'}
pprint(process_config)
# +
API_URL = os.getenv("API_URL")
API_USER = os.getenv("API_USER")
AWS_ACCESS_KEY_ID=os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY_ID=os.getenv("AWS_SECRET_ACCESS_KEY_ID")
REGION_NAME='us-east-1' #os.getenv("REGION_NAME")
assert API_URL != None
assert API_USER != None
assert AWS_ACCESS_KEY_ID != None
assert AWS_SECRET_ACCESS_KEY_ID != None
assert REGION_NAME != None
# -
# ## Connect and Load Table Definitions
# +
#########
# CREATE STUBDB
##
db_name='dynamodb'
endpoint_url='http://localhost:8000'
histories = 'table.histories.json' # split create table and gsi
stubDB = None
stubDB = StubDB(db_name=db_name,\
endpoint_url=endpoint_url,\
aws_access_key_id=AWS_ACCESS_KEY_ID,\
aws_secret_access_key=AWS_SECRET_ACCESS_KEY_ID,\
region_name=process_config['region'])\
.loadTableHistories(histories, env_suffix=process_config['suffix'])\
.connect(process_config['target'])
print('--------')
# rename the key to reflect the environment dev, test, or prod
for key in stubDB.table_histories:
keyname = key
for item in stubDB.table_histories[key]:
tb_name = '{}_{}'.format(item['TableName'], process_config['suffix'])
item['TableName']=tb_name
# -
pprint(stubDB.getTableList())
# +
import ipywidgets as widgets
from IPython.display import display
class SystemButtons:
def __init__(self, stubDB):
self.stubDB = stubDB
self.show_button = widgets.Button(description="Show Tables ")
def show_on_button_clicked(self, b):
print(self.stubDB.getTableList())
def display(self):
self.show_button.on_click(self.show_on_button_clicked)
display(self.show_button)
return self
class TableButtons:
def __init__(self, stubDB, table_name_key, env_suffix):
self.stubDB = stubDB
self.env_suffix = env_suffix
self.table_name = stubDB.getTableName(table_name_key)
self.table_name_key = table_name_key
# self.table_def = table_def
self.table_history = self.stubDB.table_histories[self.table_name_key]
self.create_button =widgets.Button(description="Create {}".format(self.table_name))
#self.update_table_button = widgets.Button(description="Update {}".format(self.table_name))
self.del_button = widgets.Button(description="Delete {}".format(self.table_name))
self.count_button = widgets.Button(description="Count Local {} Data".format(self.table_name))
self.batch_load_button =widgets.Button(description="Batch Load {}".format(self.table_name))
def create_on_button_clicked(self, b):
for tdef in self.table_history:
if 'KeySchema' in tdef:
try:
print('A:')
pprint(tdef)
self.stubDB.client.create_table(**tdef)
except NameError as nameerror:
print(nameerror)
print('table {} already created.'.format(self.table_name))
else:
try:
print('B:')
pprint(tdef)
self.stubDB.client.update_table(**tdef)
except NameError as nameerror:
print(nameerror)
print('table {} already updated.'.format(self.table_name))
def count_on_button_clicked(self,b):
#print('count')
try:
#table = self.stubDB.db.Table(self.table_name)
#table_name = self.table_history[0]['TableName']
table = self.stubDB.db.Table(self.table_name)
print('table: ', table.item_count)
except:
print('Table {} doesnt exist'.format(self.table_name))
def del_on_button_clicked(self, b):
#self.stubDB.deleteTable(self.table_name)
try:
# table_name = self.table_history[0]['TableName']
table = self.stubDB.db.Table(self.table_name)
table.delete()
#self.stubDB.deleteTable(table_name)
#table = self.db.Table(tablename)
#table.delete()
#waiter = this.client.get_waiter('table_not_exists')
#waiter.wait(TableName=tablename)
#table = self.stubDB.db.Table(tablename)
#table.delete()
except NameError as nameerror:
print(nameerror)
print('failed to delete {}'.format(self.table_name))
# def batch_load_on_button_clicked(self, b):
# self.stubDB.loadBatchTableData(self.table_name_key, self.env_suffix, 'local')
def display(self):
#self.update_table_button.on_click(self.update_table_on_button_clicked)
self.create_button.on_click(self.create_on_button_clicked)
self.del_button.on_click(self.del_on_button_clicked)
self.count_button.on_click(self.count_on_button_clicked)
#self.batch_load_button.on_click(self.batch_load_on_button_clicked)
#display(self.update_table_button)
display(self.create_button)
display(self.del_button)
display(self.count_button)
# display(self.batch_load_button)
return self
# -
#
#
stubDB.dryrun = True # dont write to aws
stubDB.dryrun = False
systemButton = SystemButtons(stubDB).display()
tableButtons = TableButtons(stubDB,
process_config['key'],
process_config['suffix']
).display()
| soke-data/00-aws-connect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# ### Quiz
#
# 위 그림은 크기가 5인 정수 삼각형의 한 모습이다.
#
# 맨 위층 7부터 시작해서 아래에 있는 수 중 하나를 선택하여 아래층으로 내려올 때, 이제까지 선택된 수의 합이 최대가 되는 경로를 구하는 프로그램을 작성하라. 아래층에 있는 수는 현재 층에서 선택된 수의 대각선 왼쪽 또는 대각선 오른쪽에 있는 것 중에서만 선택할 수 있다.
#
# 삼각형의 크기는 1 이상 500 이하이다. 삼각형을 이루고 있는 각 수는 모두 정수이며, 범위는 0 이상 9999 이하이다.
# 시간 제한: 2초 메모리 제한 128MB
# ### 풀이
H = int(input())
int_triangle = []
for i in range(H):
input_ = input()
line = list(map(int, input_.split(' ')))
assert len(line) == i + 1
int_triangle.append(line)
# +
def solve(depth, idx):
# 재귀를 이용해 맨 밑으로 내려감
if depth < len(sum_triangle) - 2:
solve(depth + 1, idx)
# 왼쪽에서 오른쪽으로 이동하며 계산
for idx in range(depth+1):
if sum_triangle[depth+1][idx] > sum_triangle[depth+1][idx+1]:
sum_triangle[depth][idx] += sum_triangle[depth+1][idx]
path_triangle[depth][idx] = idx
else:
sum_triangle[depth][idx] += sum_triangle[depth+1][idx+1]
path_triangle[depth][idx] = idx+1
def show_result():
idx = 0
print('경로:', end=' ')
for depth in range(len(path_triangle)):
print(int_triangle[depth][idx], end=' ')
idx = path_triangle[depth][idx]
print()
print('수의 합:', sum_triangle[0][0])
def main():
global sum_triangle # 합을 저장할 리스트
global path_triangle # 경로를 저장할 리스트
sum_triangle = [[x for x in y] for y in int_triangle]
path_triangle = [[-1 for _ in x] for x in int_triangle]
solve(0, 0)
show_result()
# +
import time
if __name__ == '__main__':
s = time.time()
main()
e = time.time()
print("수행 시간: {0:3.6f}초".format(e - s))
| QUIZ/integer triangle solution.ipynb |