code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercise 1 (notebook 2)
#
# 1. Calculate the % GC and % AT content in the trna sequenceExercise 1 (notebook 2)
#
# 2. Calculate the % GC and % AT content in the trna sequence
trna='AAGGGCTTAGCTTAATTAAAGTGGCTGATTTGCGTTCAGTTGATGCAGAGTGGGGTTTTGCAGTCCTTA'
A_count=trna.count('A')
C_count=trna.count('C')
G_count=trna.count('G')
T_count=trna.count('T')
def gc_content(G_count, C_count):
"""The function takes in the total count of G and the total count of C and returns
the percentage GC content"""
for i in trna:
GC_content = (trna.count('G')+ trna.count('C'))/ len(trna)*100
return(GC_content)
def at_content(A_count, T_count):
"""The function takes in the total count of A and the total count of T and returns
the percentage AT content"""
for i in trna:
AT_content = (trna.count('A') + trna.count('T'))/ len(trna)*100
return(AT_content)
gc_content(G_count, C_count)
at_content(A_count, T_count)
# ## Exercise 2 (notebook 2)
#
# 1. Given the following amino acid sequence (MNKMDLVADVAEKTDLSKAKATEVIDAVFA), find the first, last and the 5th amino acids in the sequence.
# 2. The above amino acid is a bacterial restriction enzyme that recognizes "TCCGGA". Find the first restriction site in the following sequence: AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA
#
#
def pos_seq(seq):
"""The function returns the nucleotide in indices; 4,-1,0"""
pos = print (str(seq[0]) + (seq[4]) + (seq[-1]))
return pos
pos_seq('MNKMDLVADVAEKTDLSKAKATEVIDAVFA')
def transcript_location(seq2):
"""given a restriction site sequence, the function returns the restriction site"""
location = seq2.find("TCCGGA")
return location
transcript_location('AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA')
# ## Exercise 3 (notebook 4)
# - Using strings, lists, tuples and dictionaries concepts, find the reverse complement of AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA
seq = "AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA"
def reverse_complement(complement):
"""the function takes the complement and replaces it with bases
in the reversed sequence given then outputs a reverse complement of the given sequence """
reverse_seq = "".join(complement.get(base, base) for base in reversed(seq))
return reverse_seq
reverse_complement({'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'})
# ## Exercise 4 (notebook 5)
# - Expand the script in the previous cell to also manage ATM deposits
def atm_transact():
"""given a choice, the function returns options for transaction"""
acountbal = 50000
choice = input("Please enter 'b' to check balance or 'd' to deposit or 'w' to withdraw: ")
while choice != 'q':
if choice.lower() in ('b', 'w', 'd'):
if choice.lower() == 'b':
print("Your balance is: %d" % acountbal)
print("Anything else?")
choice = input("Enter) b for balance, w to withdraw, or d to deposit or q to quit: ")
print(choice.lower())
elif choice.lower() == 'd':
deposit = float(input("Enter amount to be deposited:"))
print("Amount Deposited:", deposit)
acountbal = acountbal + deposit
print("your new current balance is %.2f:" %(acountbal))
print("Anything else?")
choice = input("Enter b for balance, w to withdraw, or d to deposit or q to quit: ")
else:
withdraw = float(input("Enter amount to withdraw: "))
if withdraw <= acountbal:
print("here is your: %.2f" % withdraw)
acountbal = acountbal - withdraw
print("Anything else?")
choice = input("Enter b for balance, w to withdraw or q to quit: ")
#choice = 'q'
else:
print("You have insufficient funds: %.2f" % acountbal)
else:
print("Wrong choice!")
choice = input("Please enter 'b' to check balance or 'w' to withdraw: ")
#update to perform deposits
#give statements
atm_transact()
# ## Exercise 5 (notebook 5)
#
# 1. Create a while loop that starts with x = 0 and increments x until x is equal to 5. Each iteration should print to the console.
# 2. Repeat the previous problem, but the loop will skip printing x = 5 to the console but will print values of x from 6 to 10.
# 3. Create a for loop that prints values from 4 to 10 to the console.
#
#
def value(number):
"""starts with x = 0 and increments x until x is equal to 5.
Then it prints it to the console"""
x=0
while x<5:
x+=1
print('x=',x)
value(10)
def val(num):
"""given a number, the function will skip printing x = 5 to the console
but will print values of x from 6 to 10"""
for x in range(num):
if x==5:
continue
print('x=',x)
val(10)
def num2(val2):
"""the function takes in a number and prints values from 4 to 10 to the console"""
x=0
for x in range(val2+1):
if x<4:
continue
else:
print(x)
num2(10)
# ## Exercise 6 (notebook 6)
# Let's return to our earlier exercise: calculating %GC content. In this exercise:
#
# - Write a function percentageGC that calculates the GC content of a DNA sequence
# - The function should return the %GC content
# - The Function should return a message if the provided sequence is not DNA (This should be checked by a different function, called by your function)
#
#
def percentageGC(seq):
""" returns the %GC content in the given DNA"""
return round((seq.count('C') + seq.count('G'))/len(seq)*100)
percentageGC('AAGGGCTTAGCTTAATTAAAGTGGCTGATTTGCGTTCAGTTGATGCAGAGTGGGGTTTTGCAGTCCTTA')
def test_dna(sequence):
"""The function tests the sequence for non_DNA bases """
bases = 'ATGC'
status = True, "This is a valid DNA sequence"
for base in sequence:
if base in bases:
pass
else:
status = False, "This is not a valid DNA sequence"
print("At position", sequence.find(base), "invalid dna base", base)
return status
test_dna('ATGC')
test_dna('MNTPK')
# ## Exercise 1 (notebook 7)
# Write a function the reads the file (humchr.txt) and writes to another file (gene_names.txt) a clean list of gene names.
#
# +
def write2file(gene_list, out_file):
"""
Takes a gene list and writes the output to file
"""
with open(out_file, 'w') as outfile:
outfile.write('\n'.join(gene_list))
def remove_empty(gene_list):
"""
Given a gene list, removes items
that start with dash (empty)
"""
tag = True
while tag:
try:
gene_list.remove('-')
except ValueError:
tag = False
return gene_list
def clean_genes(input_file, out_file):
"""
Given a chromosome annotation file, extract the
genes and write them to another file
"""
gene_list = []
tag = False
with open(input_file, 'r') as humchrx:
for line in humchrx:
if line.startswith('Gene'):
tag=True
if line == '\n':
tag = False
if tag:
gene_list.append(line.split()[0])
#clean the gene list
gene_list.pop(2)
gene_list[0] = gene_list[0]+"_"+gene_list[1]
gene_list.pop(1)
gene_list = remove_empty(gene_list)
## Writing to file
write2file(gene_list, out_file)
clean_genes('./humchrx.txt', 'testing.txt')
# -
# ## Exercise 2 (notebook 7)
# 1. Convert the function you wrote in exercise 1 into a python module.
# 2. Then, import the module and use the function to read humchrx.txt file and create a gene list file.
# 3. Create a stand-alone script that does all the above.
#
#
import Exercise
Exercise.clean_genes('humchrx.txt', 'clean.txt')
# ## Exercise 3 (notebook 7)
# 1. Using the same concept, convert your script in exercise 1 to take command line arguments (input and output files)
# 2. Using a DNA sequence read from file, answer the following questions:
# - Show that the DNA string contains only four letters.
# - In the DNA string there are regions that have a repeating letter. What is the letter and length of the longest repeating region?
# - ])
# print("Number of arguments: ", len(sys.argv))How many ’ATG’s are in the DNA string?
#
#
def num_nuc(sequence):
my_list = list(sequence)
set_dna = set(my_list)
return set_dna
num_nuc('AAAGGGCTTAGCTTAATTAAAGTGGCTGATTTGCCCCCGTTCAGTTGATTTGCAGAGTGGGGTTTTGCAGTCCT')
# +
def longest_repeating_nucleotide():
dna_seq = input('please enter the DNA sequence')
A_list = [] # initialize an empty list to store repeating nucleotides
tag = False
As = [] # initialize an empty list to store repeating nucleotides that will be appended to initialized A_list above
for nuc in dna_seq:
for dna in dna_seq:
if dna == nuc:
tag = True
As.append(dna)
else:
if len(As) > 1:
A_list.append(As)
As = []
tag = False
len_list = [len(x) for x in A_list]
long_index = len_list.index(max(len_list))
print('*'*100)
print('%s is the longest repeating letter, repeat %d times' %(A_list[long_index][0], len(A_list[long_index])))
longest_repeating_nucleotide()
# -
def count_ATGs(dna):
"""counts the ATG’s in a DNA string"""
if dna.find == -1:
return 'Pattern not found'
else:
return dna.count('ATG')
count_ATGs('AAAGGGCTTAGCTTAATTAAAGTGGCATGTGATTTGCCCCCGTTCAGTTGATTTGCAGAGTGGGGTTTTGCAGTCCT')
| Notebooks/Class_exercise_functions_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=["docstring"]
# # Importer
# There are two different mechanisms for loading Literate notebooks:
# * Import hooks
# * Code generation
#
# In this notebook, the mechanism by which `.ipynb` Python notebooks are loaded will be explored.
# -
# ## Loaded modules
# ### Loading the hook
# First we need to run the shim that installs the import hook and loads the useful globals:
# + tags=[]
# %load_ext literary.module
# -
# This is **only needed if we wish to execute this notebook directly**, as the import hook performs this step automatically when importing notebooks. Defining `__package__` enables us to perform relative imports. Let's import a function from [exports.ipynb]():
# ### Loading a module
# Now we can import a notebook as a module:
# + tags=[]
from . import docstring
# -
# And we can look at their docstrings:
help(docstring)
# We can also rich-display the docstring (but images will be omitted):
# + tags=[]
from IPython.display import Markdown
Markdown(docstring.__doc__)
# -
# The loader sets the `__file__`, `__loader__`, and other module attributes:
docstring.__file__
docstring.__loader__
# ### Accessing exports
# We could also import a notebook which defines some exports:
from . import exports
# As a pure-Python module, `docstring` has some useful attributes
dir(exports)
exports.how_long_is_a_piece_of_string("")
# ### Generated Python code
import inspect
print(inspect.getsource(docstring))
print(inspect.getsource(exports))
from . import patching
# + tags=[]
print(inspect.getsource(patching))
# -
| examples/src/package_a/importer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="A9fAcSCGhwxF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="d72d3c67-85b8-4215-a1a4-d900bc52b6a4"
import pandas as pd
import numpy as np
import os
import zipfile as zf
import imutils
import shutil
from imutils import build_montages,paths
# + id="lfTwrcCbaOIu" colab_type="code" outputId="da2c9055-68fa-4770-a6e8-35262c79bf21" colab={"base_uri": "https://localhost:8080/", "height": 34}
os.getcwd()
# + id="REzLcnAWarSM" colab_type="code" colab={}
data_dir = '/content/Data/'
os.mkdir(data_dir)
# + id="BwQCPrz2aS01" colab_type="code" colab={}
os.mkdir(data_dir+'YOUNG/')
os.mkdir(data_dir+'MIDDLE/')
os.mkdir(data_dir+'OLD/')
# + id="fkL1m9bwyFrB" colab_type="code" colab={}
handle_train = zf.ZipFile(r'/content/Train.zip')
handle_train.extractall('/content/train')
handle_train.close()
handle_test = zf.ZipFile(r'/content/Test.zip')
handle_test.extractall('/content/test')
handle_test.close()
# + id="lAaQFyUiypgT" colab_type="code" colab={}
train_images = os.listdir('/content/train/Train/')
test_images = os.listdir('/content/test/Test/')
fp_train_images = '/content/train/Train/'
fp_test_images = '/content/test/Test/'
# + id="RVhtpgEOQ57e" colab_type="code" colab={}
#19906 train images -> 15000 train + 400 test + 906 validate
#6636 test images
# + id="ssjBcp0ozODT" colab_type="code" outputId="edc1702c-9d60-40d4-ba73-a66a936528af" colab={"base_uri": "https://localhost:8080/", "height": 195}
df_train = pd.read_csv('/content/train.csv')
df_train.head(5)
# + id="qMleW9CHV6Na" colab_type="code" outputId="5de19552-74d6-4300-fe01-7c752a2bec62" colab={"base_uri": "https://localhost:8080/", "height": 84}
df_train['Class'].value_counts()
# + id="ZQSsphI9zQzu" colab_type="code" outputId="713be329-fea2-4c81-d4ce-6563f723fdc3" colab={"base_uri": "https://localhost:8080/", "height": 195}
sample_submn = pd.read_csv('/content/sample_submission_sDO3m7O.csv')
sample_submn.head()
# + id="fAfxZ7l4_-fc" colab_type="code" colab={}
middle_path = '/content/Data/MIDDLE/'
old_path = '/content/Data/OLD/'
young_path = '/content/Data/YOUNG/'
# + id="JQageL-Z_-dA" colab_type="code" colab={}
for index,row in df_train.iterrows():
category = row['Class']
shutil.move(fp_train_images+row["ID"],data_dir+category)
# + id="4xlHW3vj_-as" colab_type="code" colab={}
(list(paths.list_files('/content/Data/')))
# + id="A8pcYSLi_-Wd" colab_type="code" colab={}
# !zip -r /content/datafile.zip /content/Data
# + id="NDh-pnkH8VwI" colab_type="code" colab={}
| making_age_detection_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import libraries
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
training_data_folder_path = 'dataset/training-data'
test_data_folder_path = 'dataset/test-data'
random_image = cv2.imread('dataset/training-data/1/Alvaro_Uribe_0020.jpg')
fig = plt.figure()
ax1 = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax1.set_title('Image from category 1')# change category name accordingly
plt.imshow(cv2.cvtColor(random_image, cv2.COLOR_BGR2RGB))
plt.show()
# # face detection
haarcascade_frontalface = 'opencv_xml_files/ haarcascade_frontalface.xml'
def detect_face(input_img):
image = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier('opencv_xml_files/haarcascade_frontalface.xml')
faces = face_cascade.detectMultiScale(image, scaleFactor=1.2, minNeighbors=5);
if (len(faces) == 0):
return -1, -1
(x, y, w, h) = faces[0]
return image[y:y+w, x:x+h], faces[0]
# # prepare training dataset
def prepare_training_data(training_data_folder_path):
detected_faces = []
face_labels = []
traning_image_dirs = os.listdir(training_data_folder_path)
for dir_name in traning_image_dirs:
label = int(dir_name)
training_image_path = training_data_folder_path + "/" + dir_name
training_images_names = os.listdir(training_image_path)
for image_name in training_images_names:
image_path = training_image_path + "/" + image_name
image = cv2.imread(image_path)
face, rect = detect_face(image)
if face is not -1:
resized_face = cv2.resize(face, (121,121), interpolation = cv2.INTER_AREA)
detected_faces.append(resized_face)
face_labels.append(label)
return detected_faces, face_labels
detected_faces, face_labels = prepare_training_data("dataset/training-data")
print("Total faces: ", len(detected_faces))
print("Total labels: ", len(face_labels))
# # initialize a face recognizer
eigenfaces_recognizer = cv2.face.EigenFaceRecognizer_create()
# # train the face recognizer model
eigenfaces_recognizer.train(detected_faces, np.array(face_labels))
# +
def draw_rectangle(test_image, rect):
(x, y, w, h) = rect
cv2.rectangle(test_image, (x, y), (x+w, y+h), (0, 255, 0), 2)
def draw_text(test_image, label_text, x, y):
cv2.putText(test_image, label_text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
# -
# # predict output on test data
def predict(test_image):
detected_face, rect = detect_face(test_image)
resized_test_image = cv2.resize(detected_face, (121,121), interpolation = cv2.INTER_AREA)
label= eigenfaces_recognizer.predict(resized_test_image)
label_text = tags[label[0]]
draw_rectangle(test_image, rect)
draw_text(test_image, label_text, rect[0], rect[1]-5)
return test_image, label_text
tags = ['0', '1', '2', '3', '4']
test_image = cv2.imread("dataset/test-data/1/Alvaro_Uribe_0021.jpg")
predicted_image, label = predict(test_image)
fig = plt.figure()
ax1 = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax1.set_title('actual class: ' + tags[1]+ ' | ' + 'predicted class: ' + label)
plt.axis("off")
plt.imshow(cv2.cvtColor(predicted_image, cv2.COLOR_BGR2RGB))
plt.show()
| Exercise01/.ipynb_checkpoints/Exercise01-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="1WS85XQlC6Yi"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
# + [markdown] id="U4OrSMli4zmB"
# # Question 1
# + id="wxkXq6IUJvx4"
df = pd.read_csv("parler_postsData.csv")
dict_frequency={}
count_total=0
for i in df['Creator']:
if(i in dict_frequency.keys()):
temp=dict_frequency.get(i)
dict_frequency[i]=temp+1
else:
dict_frequency[i]=1
count_total+=1
#sorting done according to dictionary values
sorted_tuples_frequency = sorted(dict_frequency.items(), key=lambda item: item[1],reverse=True)
sorted_dict_frequency = {k: v for k, v in sorted_tuples_frequency}#dictionary with id and frequency sorted in decreasing order according to frequecy
#Correct df for user data
df_user=pd.read_csv("parler_userData.csv")
df_user_data=df_user[['Id','Username','Human','Verified','Interactions','Score','Bio']]
# + [markdown] id="17YFx5VH-kCe"
# List making for top 10 and checking their human and verified condition
# + id="UxkMQcGxSKtm" colab={"base_uri": "https://localhost:8080/"} outputId="f81bcf0f-72eb-43a4-9b8b-ed7a0a21466c"
#making list of top 10
id_frequency_top10=np.array(list(sorted_dict_frequency.keys())[0:10]) #contains id of top 10
frequency_top10=np.array(list(sorted_dict_frequency.values())[0:10]) #contains frequency of top 10
#for finding percentage
frequency_top10=(frequency_top10/count_total)
frequency_top10=frequency_top10*100
#making a Username,Verified,Human list
Username_list_frequency=[]
Verified_list_frequency=[]
Human_list_frequency=[]
for i in range(10):
t=df_user_data[df_user_data['Id']==id_frequency_top10[i]]
Username_list_frequency.extend(t['Username'])
Verified_list_frequency.extend(t['Verified'])
Human_list_frequency.extend(t['Human'])
print(sum(frequency_top10)," total content generated by top 10 cumulatively")
frequency_top10=frequency_top10.astype(str).astype(float).tolist()
# + [markdown] id="kEQCHsHy-esB"
# Plotting Question 1
# + id="pc-zGVSPbTyp" colab={"base_uri": "https://localhost:8080/", "height": 411} outputId="06bf6e45-ef05-4148-944a-2ba54d7e5e0b"
graph=plt.bar(Username_list_frequency,frequency_top10)
plt.xticks(color='yellow', rotation=270, horizontalalignment='right')
#red(Verified Human) green(Verified not human) blue(not verified Human) black(not verified not human)
handles= [plt.Rectangle((0,0),1,1, color=x) for x in ['red','green','blue','black']]
labels=['Verified Human','Verified Not Human','Not Verified Human','Not Verified Not Human']
for i in range(10):
if (Verified_list_frequency[i]==True and Human_list_frequency[i]==True):
graph[i].set_color('red')
elif (Verified_list_frequency[i]==True):
graph[i].set_color('green')
elif (Human_list_frequency[i]==True):
graph[i].set_color('blue')
else:
graph[i].set_color('black')
plt.title('Question 1')
plt.xlabel('UserName')
plt.ylabel('Percentage of message send')
plt.legend(handles,labels)
# + [markdown] id="yBRVfoZdoRDP"
# # **Question 2 begins**
#
# # Upvotes and Interactions
# + id="AyxMyj1Ms-7V" colab={"base_uri": "https://localhost:8080/"} outputId="d23f1391-acc7-4e9b-f2aa-45d30f32c026"
#dictionary will contains id as keys and interactions and upvotes as values respectively
dict_interactions={}
dict_upvotes={}
for i in range(df_user_data.shape[0]):
temp_id=df_user_data.iloc[i,0]
temp_interactions=df_user_data.iloc[i,4]
temp_upvotes=df_user_data.iloc[i,5]
dict_upvotes[temp_id]=temp_upvotes
dict_interactions[temp_id]=temp_interactions
#sorting done according to values for dictionary
sorted_tuples_upvotes = sorted(dict_upvotes.items(), key=lambda item: item[1],reverse=True)
sorted_dict_upvotes = {k: v for k, v in sorted_tuples_upvotes}#dictionary with id and upvotes sorted in decreasing order according to frequecy
sorted_tuples_interactions = sorted(dict_interactions.items(), key=lambda item: item[1],reverse=True)
sorted_dict_interactions = {k: v for k, v in sorted_tuples_interactions}#dictionary with id and interactions sorted in decreasing order according to frequecy
#making list for top 10 with id as keys and upvotes and interactions as values respectively
id_upvotes_top10=np.array(list(sorted_dict_upvotes.keys())[0:10]) #contains id of top 10
upvotes_top10=np.array(list(sorted_dict_upvotes.values())[0:10]) #contains upvotes of top 10
id_interactions_top10=np.array(list(sorted_dict_interactions.keys())[0:10]) #contains id of top 10
interactions_top10=np.array(list(sorted_dict_interactions.values())[0:10]) #contains interactions of top 10
Username_list_upvotes=[]
Verified_list_upvotes=[]
Human_list_upvotes=[]
Username_list_interactions=[]
Verified_list_interactions=[]
Human_list_interactions=[]
word_cloud_bio=[] #this will contain bio of top 10 users according to interactions
#Addition to Username done by looking at id plus the addition to verified and human list done throught users dataset
for i in range(10):
t=df_user_data[df_user_data['Id']==id_upvotes_top10[i]]
Username_list_upvotes.extend(t['Username'])
Verified_list_upvotes.extend(t['Verified'])
Human_list_upvotes.extend(t['Human'])
t1=df_user_data[df_user_data['Id']==id_interactions_top10[i]]
Username_list_interactions.extend(t1['Username'])
Verified_list_interactions.extend(t1['Verified'])
Human_list_interactions.extend(t1['Human'])
word_cloud_bio.extend(t1['Bio'])
for i in range(10):
print(Username_list_interactions[i]," ",interactions_top10[i])
upvotes_top10=upvotes_top10.astype(str).astype(float).tolist()
interactions_top10=interactions_top10.astype(str).astype(float).tolist()
# + [markdown] id="oiCiUeDT_DGT"
# Graph Plotting - Upvotes
# + id="buW8-jZ_1Nve" colab={"base_uri": "https://localhost:8080/", "height": 383} outputId="8b5b80e3-d9b1-4604-e070-3c4e1ee6f4c3"
graph=plt.bar(Username_list_upvotes,upvotes_top10)
plt.xticks(color='yellow', rotation=270, horizontalalignment='right')
#red(Verified Human) green(Verified not human) blue(not verified Human) black(not verified not human)
handles= [plt.Rectangle((0,0),1,1, color=x) for x in ['red','green','blue','black']]
labels=['Verified Human','Verified Not Human','Not Verified Human','Not Verified Not Human']
for i in range(10):
if (Verified_list_upvotes[i]==True and Human_list_upvotes[i]==True):
graph[i].set_color('red')
elif (Verified_list_upvotes[i]==True):
graph[i].set_color('green')
elif (Human_list_upvotes[i]==True):
graph[i].set_color('blue')
else:
graph[i].set_color('black')
plt.title('Question 2-Upvotes')
plt.xlabel('UserName')
plt.ylabel('Upvotes')
plt.legend(handles,labels)
# + [markdown] id="tClS4X2I_Lol"
# Graph Plotting - Interactions
# + id="mL6y0eDs3Ry8" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="165c2eec-5fdb-406f-f562-e9d0864f1137"
graph=plt.bar(Username_list_interactions,interactions_top10)
plt.xticks(color='yellow', rotation=270, horizontalalignment='right')
#red(Verified Human) green(Verified not human) blue(not verified Human) black(not verified not human)
handles= [plt.Rectangle((0,0),1,1, color=x) for x in ['red','green','blue','black']]
labels=['Verified Human','Verified Not Human','Not Verified Human','Not Verified Not Human']
for i in range(10):
if (Verified_list_interactions[i]==True and Human_list_interactions[i]==True):
graph[i].set_color('red')
elif (Verified_list_interactions[i]==True):
graph[i].set_color('green')
elif (Human_list_interactions[i]==True):
graph[i].set_color('blue')
else:
graph[i].set_color('black')
plt.title('Question 2-Interactions')
plt.xlabel('UserName')
plt.ylabel('Interactions')
plt.legend(handles,labels)
# + [markdown] id="5RRJYiFh4pqp"
# # Mentions
# + id="4Y6_3DCZKFzW"
dict_mentions={} #dictionary contains id as keys and no of mentions as values
for i in df['At']:
i=i[1:-1]
if(i==""):
continue
temp_list_mentions=i.split(", ")
for j in range(len(temp_list_mentions)):
temp_id=temp_list_mentions[j].split(": ") #this is done since username is given as key and userId given as value
if (temp_id[1] in dict_mentions.keys()): #I am considering only userIds and adding to dictionary then doing the count on how many times that id occured
temp=dict_mentions.get(temp_id[1])
dict_mentions[temp_id[1]]=temp+1
else:
dict_mentions[temp_id[1]]=1
#sorting done for dictionary
sorted_tuples_mentions = sorted(dict_mentions.items(), key=lambda item: item[1],reverse=True)
sorted_dict_mentions = {k: v for k, v in sorted_tuples_mentions}#dictionary with id and mentions sorted in decreasing order according to frequecy
id_mentions_top10=np.array(list(sorted_dict_mentions.keys())[0:10]) #contains id of top 10
mentions_top10=np.array(list(sorted_dict_mentions.values())[0:10]) #contains mentions of top 10
Username_list_mentions=[]
Verified_list_mentions=[]
Human_list_mentions=[]
for i in range(10):
t=df_user_data[df_user_data['Id']==id_mentions_top10[i][1:-1]]
Username_list_mentions.extend(t['Username'])
Verified_list_mentions.extend(t['Verified'])
Human_list_mentions.extend(t['Human'])
#mentions_top10=upvotes_top10.astype(str).astype(float).tolist()
# + [markdown] id="Wo7oE2MDARz-"
# Graph Plotting - Mentions
# + id="bNR15epvARb-" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="d1ccf085-b610-4f8e-ad1d-5267173751f4"
graph=plt.bar(Username_list_mentions,mentions_top10)
plt.xticks(color='yellow', rotation=270, horizontalalignment='right')
#red(Verified Human) green(Verified not human) blue(not verified Human) black(not verified not human)
handles= [plt.Rectangle((0,0),1,1, color=x) for x in ['red','green','blue','black']]
labels=['Verified Human','Verified Not Human','Not Verified Human','Not Verified Not Human']
for i in range(10):
if (Verified_list_mentions[i]==True and Human_list_mentions[i]==True):
graph[i].set_color('red')
elif (Verified_list_mentions[i]==True):
graph[i].set_color('green')
elif (Human_list_mentions[i]==True):
graph[i].set_color('blue')
else:
graph[i].set_color('black')
plt.title('Question 2-Mentions')
plt.xlabel('UserName')
plt.ylabel('Mentions')
plt.legend(handles,labels)
# + [markdown] id="HATqZ7jSjXxE"
# # Q2 B
# + id="bwKJzDmORUXz"
#data cleaning done to remove nan=no bio, websites,mentions,urls,short word of length=2 and hashtags
#written .org,.com specifically according to given dataset accroding to which the data preprocessing is done
def data_cleaning(temp_str):
if(len(temp_str)<3 or temp_str[0:]=="nan" or temp_str[0]=='#' or temp_str[0]=='@' ):
return 0
if(len(temp_str)>4 and ( "http" in temp_str or ".com" in temp_str or ".org" in temp_str)):
return 0
return 1
# + id="2S-Wz_JpaRkG"
comment_words = ''
stopwords = set(STOPWORDS)
# iterate through the csv file
for val in word_cloud_bio:
# typecaste each val to string
val = str(val)
val=val.replace("\\n"," ")
val=val.replace("\\n\n"," ")
# split the value
tokens_temp = val.split()
tokens=[]
# Converts each token into lowercase
for i in range(len(tokens_temp)):
tokens_temp[i] = tokens_temp[i].lower()
t=data_cleaning(tokens_temp[i])
if(t==1):
tokens.append(tokens_temp[i])
comment_words += " ".join(tokens)+" "
# print(comment_words)
# + [markdown] id="MVpi2pajDMOd"
# Word-Cloud (For top 10 user bios according to most no of interactions)
# + id="aM95447lDJZF" colab={"base_uri": "https://localhost:8080/", "height": 607} outputId="b5e29bc9-41ba-45bd-950f-b9bb06d8da82"
wordcloud = WordCloud(width = 800, height = 800,
background_color ='white',
stopwords = stopwords,
min_font_size = 10).generate(comment_words)
# plot the WordCloud image
plt.figure(figsize = (8, 8), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
# + [markdown] id="wBJpjZtoXziJ"
# # Q3
# + [markdown] id="ptBLkMQ3Q01p"
# So basically first I added every posts body to "word_cloud_body" as strings. Now loop over it. Then replaced "\n" and "\n\n" to " ". Then split the whole string and started looping over that string word by word.
# Then calling the data cleaning function which will tell whether to include that word for word cloud or not. If returned 1 then include it else exclude.
# If included then words are added to "tokens" which are joined to "comment_words". Also for the next part of the question a dictionary is made which stores the word and its frequency in dict_word_count
# + id="BIea5qpvX3J7"
word_cloud_body=[]
count=0
for i in df["Body"]:
word_cloud_body.append(i)
comment_words = ''
stopwords = set(STOPWORDS)
dict_word_count={}
# iterate through the csv file
for val in word_cloud_body:
# typecaste each val to string
val = str(val)
val=val.replace("\\n"," ")
val=val.replace("\\n\n"," ")
# split the value
tokens_temp = val.split()
tokens=[]
for i in range(len(tokens_temp)):
tokens_temp[i] = tokens_temp[i].lower()
tokens_temp[i] = tokens_temp[i].replace("now"," ")
tokens_temp[i] = tokens_temp[i].replace("will"," ") #will could be name or future tense that why removed it because of non clearity
t=data_cleaning(tokens_temp[i])
if(t==1):
if (tokens_temp[i] in dict_word_count.keys()):
temp_count=dict_word_count.get(tokens_temp[i])
dict_word_count[tokens_temp[i]]=temp_count+1
else:
dict_word_count[tokens_temp[i]]=1
tokens.append(tokens_temp[i])
comment_words += " ".join(tokens)+" "
# + [markdown] id="Qa0THC9XFOij"
# Word Cloud (For post Body)
# + id="qaFdhnFKFOJ0" colab={"base_uri": "https://localhost:8080/", "height": 607} outputId="a568c943-1b3f-4528-d574-8596def3db01"
wordcloud = WordCloud(width = 800, height = 800,
background_color ='white',
stopwords = stopwords,
min_font_size = 10).generate(comment_words)
# plot the WordCloud image
plt.figure(figsize = (8, 8), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
# + id="XhiV_VMc7SEq" colab={"base_uri": "https://localhost:8080/"} outputId="27898856-ca72-4a76-e223-976b93fe7068"
final_dict = {key: dict_word_count[key] for key in dict_word_count if key not in stopwords}
from heapq import nlargest
high = nlargest(10, final_dict, key = final_dict.get)
words_q3=[]
words_occurence_q3=[]
for val in high:
words_q3.append(val)
words_occurence_q3.append(final_dict.get(val))
for i in range(10):
print(words_q3[i]," ",words_occurence_q3[i])
# + [markdown] id="8lxTedLIPFRZ"
# Graph Plotting (top 10 words based on occurence)
# + id="o9uUAWHhPNAs" colab={"base_uri": "https://localhost:8080/", "height": 349} outputId="17b9c309-2276-4193-9697-33edb2a6258f"
graph=plt.bar(words_q3,words_occurence_q3)
plt.xticks(color='yellow', rotation=270, horizontalalignment='right')
plt.title('Question 3-Words based on Occurence')
plt.xlabel('Words')
plt.ylabel('Occurences')
# + [markdown] id="qVb7SrAzeKEQ"
# # Q3 b - no of occurences
# + id="t8Z30vT9Z3ne"
hashtags=[]
for i in df["Body"]:
hashtags.append(i)
hashtags_count={}
hashtags_length={}
for val in hashtags:
val = str(val)
val=val.replace("\\n"," ")
val=val.replace("\\n\n"," ")
# split the value
tokens_temp = val.split()
for i in range(len(tokens_temp)):
if(len(tokens_temp[i])>1):
if(tokens_temp[i][0]=='#'):
if (tokens_temp[i] in hashtags_count.keys()):
temp_count=hashtags_count.get(tokens_temp[i])
hashtags_count[tokens_temp[i]]=temp_count+1
temp_length=hashtags_length.get(tokens_temp[i])
hashtags_length[tokens_temp[i]]=temp_length+len(tokens_temp)
else:
hashtags_count[tokens_temp[i]]=1
hashtags_length[tokens_temp[i]]=len(tokens_temp)
sorted_tuples_hashtags = sorted(hashtags_count.items(), key=lambda item: item[1],reverse=True)
sorted_dict_hashtags = {k: v for k, v in sorted_tuples_hashtags}#dictionary with id and mentions sorted in decreasing order according to frequecy
hashtags_top10=np.array(list(sorted_dict_hashtags.keys())[0:10]) #contains id of top 10
hashtags_count_top10=np.array(list(sorted_dict_hashtags.values())[0:10]) #contains mentions of top 10
# + [markdown] id="h3hEsoABT0jp"
# Graph-Plotting (Occurence of hashtags)
# + id="m3MOC6bpT1A4" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="e09419e6-7c49-411b-bdf3-10af1cc15258"
graph=plt.bar(hashtags_top10,hashtags_count_top10)
plt.xticks(color='yellow', rotation=270, horizontalalignment='right')
plt.title('Question 3-Hashtags based on Occurence')
plt.xlabel('Hashtags')
plt.ylabel('Occurences')
# + [markdown] id="H2drHE-4wjrk"
# # Q3 b - avg post length
# + id="Zuh7S9YuvwC_"
hashtags_length_count_ratio={}
for i in hashtags_count:
temp_count=hashtags_count[i]
temp_length=hashtags_length[i]
hashtags_length_count_ratio[i]=temp_length/temp_count
sorted_tuples_hashtags_ratio = sorted(hashtags_length_count_ratio.items(), key=lambda item: item[1],reverse=True)
sorted_dict_hashtags_ratio = {k: v for k, v in sorted_tuples_hashtags_ratio}#dictionary with id and mentions sorted in decreasing order according to frequecy
hashtags_top10=np.array(list(sorted_dict_hashtags_ratio.keys())[0:10]) #contains id of top 10
hashtags_ratio_top10=np.array(list(sorted_dict_hashtags_ratio.values())[0:10]) #contains mentions of top 10
# + [markdown] id="WGhSk2Dx3K8R"
# Graph Plotting (Hashtags based on average length of post)
# + id="ADdmKPYn3Li6" colab={"base_uri": "https://localhost:8080/", "height": 426} outputId="7a49fead-9fe4-4cef-cc02-5be7f6a792d6"
graph=plt.bar(hashtags_top10,hashtags_ratio_top10)
plt.xticks(color='yellow', rotation=270, horizontalalignment='right')
plt.title('Question 3-Hashtags based on Average length of Post')
plt.xlabel('Hashtags')
plt.ylabel('Average Length')
# + [markdown] id="0TL5buIF3tlx"
# # Q-4 Begins
# + [markdown] id="HOKawdcKmruZ"
# Content generation on Parler.
# + id="WirdCJmO3w5s" colab={"base_uri": "https://localhost:8080/", "height": 276} outputId="0107754a-9c1c-4f03-b2f8-348f23a9cc56"
# !pip install chart-studio
import chart_studio.plotly as py
import plotly.graph_objs as go
h_dataframe=df.sort_values(by='CreatedAt', ignore_index=True)
time=pd.to_datetime(h_dataframe['CreatedAt'], format='%Y%m%d%H%M%S', errors='coerce')
posts=np.asarray(h_dataframe.index)
posts_df=pd.DataFrame()
posts_df['Date']=time
posts_df['NoOfPosts']=posts
data=[go.Scatter(x=time,y=posts, name='Posts created')]
from plotly.offline import plot
plot(data, filename='Posts.html')
# + [markdown] id="2DJ48M5Rm6oq"
# User account creation on Parler.
# + id="XPYLUWmWm-AG" colab={"base_uri": "https://localhost:8080/", "height": 188} outputId="ef8a4afb-06cb-41d1-d7c0-2d0b0f44af19"
# !pip install chart-studio
import chart_studio.plotly as py
import plotly.graph_objs as go
h_dataframe=df_user.sort_values(by='Joined', ignore_index=True)
time=pd.to_datetime(h_dataframe['Joined'], format='%Y%m%d%H%M%S', errors='coerce')
users=np.asarray(h_dataframe.index)
users_df=pd.DataFrame()
users_df['Date']=time
users_df['NoOfPosts']=users
data=[go.Scatter(x=time,y=users, name='Users account created')]
from plotly.offline import plot
plot(data, filename='Users.html')
| code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
from datetime import datetime
from matplotlib import pyplot as plt
from matplotlib import rcParams
# %matplotlib inline
# -
import numpy as np
import pandas as pd
import datetime as dt
# # Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
# create engine to hawaii.sqlite
engine = create_engine("sqlite:///Resources/hawaii.sqlite",echo=False)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# View all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement=Base.classes.measurement
Station=Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# # Exploratory Precipitation Analysis
# +
# Find the most recent date in the data set.
recentdate=session.query(func.max(Measurement.date)).first()
recentdate
# +
p=session.query(Measurement.date,Measurement.prcp).\
filter(Measurement.date > '2016-08-22').all()
datePrcp_df=pd.DataFrame(data=p,columns=["date","prcp"])
dategroupPrcp=datePrcp_df.groupby(["date"]).mean()
dategroupPrcp=dategroupPrcp.reset_index()
dategroupPrcp=dategroupPrcp.sort_values(by="date",ascending=True)
dategroupPrcp.to_csv("outputs/average_prcp.csv",
encoding="utf-8", index=False, header=True)
dategroupPrcp
# -
listDate=dategroupPrcp["date"].tolist()
listPrcp=dategroupPrcp["prcp"].tolist()
dateList=dategroupPrcp["date"].tolist()
ex_dt=[]
for dt in range(0,len(dateList)):
if dt%15==0:
ex_dt.append(dateList[dt])
# +
#plt.figure()
#plt.plot(listDate,listPrcp)
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Verdana']
rcParams['font.size'] = 8
fig, ax = plt.subplots(figsize=(10,7))
ax.bar(listDate, listPrcp, label="precipitation")
plt.legend(loc="upper right")
plt.xlabel("Date")
plt.ylabel("Inches")
ax.xaxis.set_tick_params(pad=1)
ax.yaxis.set_tick_params(pad=5)
plt.xticks(ex_dt, rotation='vertical')
plt.yticks([0.0,0.5,1.0,1.5,2.0,2.5])
ax.grid(b=True, color='grey', linestyle='-.', linewidth=0.5, alpha=0.2)
#plt.grid(True, linewidth= 1, linestyle="--")
plt.show()
# +
# Use Pandas to calcualte the summary statistics for the precipitation data
dategroupPrcp.describe()
# -
# # Exploratory Station Analysis
# Design a query to calculate the total number stations in the dataset
stations=session.query(func.count(Station.station)).all()
stations
# +
# Design a query to find the most active stations (i.e. what stations have the most rows?)
# List the stations and the counts in descending order.
activer=session.query(Station.name,func.count(Measurement.date))\
.join(Measurement,Measurement.station==Station.station)\
.group_by(Station.name)\
.order_by(func.count(Measurement.date).desc())
activer_df=pd.DataFrame(data=activer,columns=["station","row_number"])
activer_df.to_csv("outputs/stations.csv",
encoding="utf-8", index=False, header=True)
for i,k in activer:
print(f"Station Name: {i}, Row_Number: {k}")
# -
activer_df
# +
# Using the most active station id from the previous query,
#calculate the lowest, highest, and average temperature.
most_active_station=session.query(Station.station)\
.filter(Station.name == 'WAIHEE 837.5, HI US')\
.all()
for i in most_active_station:
print(i)
# +
# Using the most active station id from the previous query, calculate the lowest, highest, and average temperature.
# -
min_temperature_most_active_station=session.query(func.min(Measurement.tobs))\
.filter(Measurement.station=="USC00519281")\
.all()
min_temperature_most_active_station
# +
max_temperature_most_active_station=session.query(func.max(Measurement.tobs))\
.filter(Measurement.station=="USC00519281")\
.all()
max_temperature_most_active_station
# -
avg_temperature_most_active_station=session.query(func.avg(Measurement.tobs))\
.filter(Measurement.station=="USC00519281")\
.all()
avg_temperature_most_active_station
avg_temperature_most_active_station[0][0]
lst=[min_temperature_most_active_station[0][0],avg_temperature_most_active_station[0][0],max_temperature_most_active_station[0][0]]
lst
# +
# Using the most active station id
# Query the last 12 months of temperature observation data
#for this station and plot the results as a histogram
twelwe_month_temperature=session.query(Measurement.date,func.avg(Measurement.tobs))\
.filter(Measurement.date>'2016-08-22')\
.filter(Measurement.station=="USC00519281")\
.group_by(Measurement.date)
# -
df=pd.DataFrame(data=twelwe_month_temperature,columns=["date","temperature"])
df.set_index('date',inplace=True)
df
df.plot(kind='hist',bins=12)
plt.title("Station: USC00519281")
plt.xlabel("Temperature")
plt.savefig("outputs/temperature_frequencies_active_station.png")
plt.show()
df=df.reset_index()
df['date']=pd.to_datetime(df['date'])
df
df["month"]=df['date'].dt.month
df.to_csv("outputs/last_year.csv",
encoding="utf-8", index=False, header=True)
df
say=df.loc[:,["month","temperature"]].groupby(["month"]).mean()
say
say.plot(kind='bar')
plt.show()
# # Close session
# Close Session
session.close()
| climate_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import theano
import theano.tensor as T
# +
x = T.dscalar("x")
y = T.dscalar("y")
f = theano.function([x, y], x ** 2 + y ** 2)
f(0, 0)
# +
e = np.array([[0, 0, 0], [1, 1, 1], [1, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -1, 1]], dtype=float)
s = 0.05
n_ = np.array([1, 1, 1])
n_ = n_ / np.sqrt((n_ ** n_).sum())
(n_ * n_).sum()
print e
# +
event = theano.shared(e, "event")
theta = T.dvector("theta")
phi = T.dvector("phi")
nx = T.cos(theta)
ny = T.sin(theta) * T.sin(phi)
nz = T.sin(theta) * T.cos(phi)
n = T.stack(nx, ny, nz).T
sigma = theano.shared(0.05, 'sigma')
scalar = T.dot(event, n.T)
project = event - T.tensordot(scalar, n, axes=([], []))
delta_sq = T.sum(project * project, axis=1)
r = T.sum(T.exp(-delta_sq / sigma))
#rr_hess = theano.gradient.hessian(r, [theta, phi])
rr = theano.function([phi, theta], project)
# -
rr([0.0, 0.1], [0.0, 0.1])
s(1, 2)
event.set_value(np.ones((3,2)))
event.get_value()
# +
x = T.dmatrix("x")
a = theano.shared(1.0, "a")
y = T.sum((x - a) ** 2)
dy = theano.function([x], theano.gradient.hessian(y, x))
# -
from theano import pp
pp(dy.maker.fgraph.outputs[0])
| theano.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: me
# language: python
# name: me
# ---
# +
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# -
warnings.filterwarnings('ignore')
pd.options.display.float_format = '{:20,.4f}'.format
# # Country-level indicator variables
#
# Load and reshape the selected country-level indicator datasets and create aggregated metaindicators for the world economy.
# Aggregation weighting heuristics
# - All constant 2010 US$ indicators are divided by population to arrive at a per-capita measure rather than an absolute dollar value.
# - All GDP-based ratios are multiplied by GDP and then divided by population to arrive at a per-capita measure.
# - Population ratios are multiplied with the population and divided by world population after aggregation.
# - All percentages are transformed to ratios by dividing with 100.
pop_df = pd.read_csv('data/model_one/people.csv', sep=';')
pop_pivot_df = pop_df.pivot_table(index='date', columns=['country', 'indicator'], values='value').drop_duplicates()
population_df = pop_pivot_df.iloc[:, pop_pivot_df.columns.get_level_values(1) == 'Population, total'].stack('country').fillna(0.00)
population_df['population'] = population_df['Population, total']
gdp_read_df = pd.read_csv('data/model_one/policy.csv', sep=';')
gdp_pivot_df = gdp_read_df.pivot_table(index='date', columns=['country', 'indicator'], values='value').drop_duplicates()
gdp_df = gdp_pivot_df.iloc[:, gdp_pivot_df.columns.get_level_values(1) == 'GDP (constant 2010 US$)'].stack('country').fillna(0.00)
gdp_df['gdp'] = gdp_df['GDP (constant 2010 US$)']
gni_read_df = pd.read_csv('data/model_one/policy.csv', sep=';')
gni_pivot_df = gni_read_df.pivot_table(index='date', columns=['country', 'indicator'], values='value').drop_duplicates()
gni_df = gni_pivot_df.iloc[:, gni_pivot_df.columns.get_level_values(1) == 'GNI (constant 2010 US$)'].stack('country').fillna(0.00)
gni_df['gni'] = gni_df['GNI (constant 2010 US$)']
countries_of_interest =['Singapore', 'Switzerland', 'Netherlands', 'Japan', 'France', 'United States', 'China',
'India', 'Brazil', 'Colombia', 'Indonesia', 'Senegal', 'Ghana']
# ## People
consumption_df = pd.read_csv('data/model_one/trade.csv', sep=';')
consumption_pivot_df = consumption_df.pivot_table(index='date', columns=['country', 'indicator'], values='value').drop_duplicates()
household_df = consumption_pivot_df.iloc[:, consumption_pivot_df.columns.get_level_values(1) == 'Households and NPISHs Final consumption expenditure per capita (constant 2010 US$)'].stack('country')
people_df = pd.read_csv('data/model_one/people.csv', sep=';')
peoples_pivot_df = people_df.pivot_table(index='date', columns=['country', 'indicator'], values='value').drop_duplicates()
peoples_index_df = pd.concat([peoples_pivot_df.stack('country'), gdp_df, gni_df, household_df], axis=1).sort_index().fillna(0.00)
peoples_index_df.columns
peoples_features = ['newborns', 'birth deaths', 'life expectancy', 'rural population', 'urban population',
'workers', 'unemployed', 'overweight', 'population density', 'housing expenditure',
'health expenditure', 'food expenditure', 'transportation expenditure', 'education expenditure',
'leisure expenditure']
# #### People features
#
# - **newborns**: number of newborn infants.
# - **birth deaths**: number of deaths in live births.
# - **life expectancy**: life expectancy at birth in years.
# - **rural population**: number of people living in a rural setting.
# - **urban population**: number of people living in an urban setting.
# - **workers**: size of the nation's labor force.
# - **unemployed**: number of unemployed.
# - **overweight**: number adults in the population that are overweight.
# - **population density**: 1000 persons per sq. km of land area
# - **housing expenditure**: expenditure on housing in 2010 USD.
# - **health expenditure**: expenditure on health in 2010 USD.
# - **food expenditure**: expenditure on housing in 2010 USD.
# - **transportation expenditure**: expenditure on health in 2010 USD.
# - **education expenditure**: expenditure on housing in 2010 USD.
# - **leisure expenditure**: expenditure on health in 2010 USD.
peoples_index_df['newborns'] = (peoples_index_df['Population, total'] / 1000) * peoples_index_df['Birth rate, crude (per 1,000 people)']
peoples_index_df['birth deaths'] = peoples_index_df['newborns'] * (peoples_index_df['Mortality rate, infant (per 1,000 live births)'] / 100)
peoples_index_df['life expectancy'] = peoples_index_df['Life expectancy at birth, total (years)']
peoples_index_df['workers'] = peoples_index_df['Labor Force, Persons, Number of']
peoples_index_df['unemployed'] = peoples_index_df['Unemployment, Persons, Number of']
peoples_index_df['population density'] = peoples_index_df['Population density (people per sq. km of land area)'] / 1000
peoples_index_df['rural population'] = (peoples_index_df['Rural population (% of total population)'] / 100) * peoples_index_df['Population, total']
peoples_index_df['urban population'] = (peoples_index_df['Urban population (% of total)'] / 100) * peoples_index_df['Population, total']
peoples_index_df['overweight'] = (peoples_index_df['Population ages 15-64 (% of total)'] / 100) * (peoples_index_df['Prevalence of overweight (% of adults)'] / 100) * peoples_index_df['Population, total']
peoples_index_df['housing expenditure'] = (peoples_index_df['Housing, Water, Electricity, Gas and Other Fuels, Weight, Percent'] / 100) * peoples_index_df['Households and NPISHs Final consumption expenditure per capita (constant 2010 US$)'] * peoples_index_df['Population, total']
peoples_index_df['health expenditure'] = (peoples_index_df['Health, Weight, Percent'] / 100) * peoples_index_df['Households and NPISHs Final consumption expenditure per capita (constant 2010 US$)'] * peoples_index_df['Population, total']
peoples_index_df['food expenditure'] = (peoples_index_df['Food and non-alcoholic beverages, Weight, Percent'] / 100) * peoples_index_df['Households and NPISHs Final consumption expenditure per capita (constant 2010 US$)'] * peoples_index_df['Population, total']
peoples_index_df['transportation expenditure'] = (peoples_index_df['Transport, Weight, Percent'] / 100) * peoples_index_df['Households and NPISHs Final consumption expenditure per capita (constant 2010 US$)'] * peoples_index_df['Population, total']
peoples_index_df['education expenditure'] = (peoples_index_df['Education, Weight, Percent'] / 100) * peoples_index_df['Households and NPISHs Final consumption expenditure per capita (constant 2010 US$)'] * peoples_index_df['Population, total']
peoples_index_df['leisure expenditure'] = (peoples_index_df['Recreation and culture, Weight, Percent'] / 100) * peoples_index_df['Households and NPISHs Final consumption expenditure per capita (constant 2010 US$)'] * peoples_index_df['Population, total']
peoples_features_df = peoples_index_df[peoples_features]
# +
# peoples_plots_df = peoples_features_df[['rural population']].unstack()
# fig, ax = plt.subplots(figsize=(15,7))
# peoples_plots_df.iloc[:, peoples_plots_df.columns.get_level_values(1).isin(countries_of_interest)].dropna().plot(kind='line', ax=ax)
# plt.legend(loc = 'upper center', bbox_to_anchor=(0.5, -0.05), fancybox = True, shadow = True)
# ax.set_title('Life expectancy');
# -
# ## Technology
tech_df = pd.read_csv('data/model_one/tech.csv', sep=';')
tech_pivot_df = tech_df.pivot_table(index='date', columns=['country', 'indicator'], values='value').drop_duplicates()
tech_index_df = pd.concat([tech_pivot_df.stack('country'), population_df, gdp_df, gni_df], axis=1).sort_index().fillna(0.00)
tech_index_df.columns
tech_features = ['electricity access', 'electricity consumption', 'co2 emissions', 'broadband subscriptions',
'mobile subscriptions', 'manufacturing value added', 'high-tech value added', 'r and d spend',
'cereal yield']
# #### Tech features
#
# - **electricity access**: number of people with electricity access.
# - **electricity consumption**: MWh consumed per year.
# - **c02 emissions**: metric tons of CO2 emitted.
# - **broadband subscriptions**: number of broadband subscriptions.
# - **mobile subscriptions**: number of mobile subscriptions.
# - **manufacturing value added**: manufacturing value added in 2010 USD.
# - **high-tech value added**: high-tech industry value added in 2010 USD.
# - **r and d spend**: country r and d spend.
# - **cereal yield**: 1000 kg per hectare.
tech_index_df['manufacturing value added'] = ((tech_index_df['Manufacturing, value added (% of GDP)'] / 100) * tech_index_df['GDP (constant 2010 US$)'])
tech_index_df['high-tech value added'] = (((tech_index_df['Manufacturing, value added (% of GDP)'] / 100) * tech_index_df['GDP (constant 2010 US$)']) * (tech_index_df['Medium and high-tech Industry (including construction) (% manufacturing value added)'] / 100))
tech_index_df['electricity access'] = (tech_index_df['Access to electricity (% of population)'] / 100) * tech_index_df['Population, total']
tech_index_df['broadband subscriptions'] = tech_index_df['Fixed broadband subscriptions (per 100 people)'] * (tech_index_df['Population, total'] / 100)
tech_index_df['mobile subscriptions'] = tech_index_df['Mobile cellular subscriptions (per 100 people)'] * (tech_index_df['Population, total'] / 100)
tech_index_df['electricity consumption'] = (tech_index_df['Electric power consumption (kWh per capita)'] / 1000) * tech_index_df['Population, total']
tech_index_df['co2 emissions'] = tech_index_df['CO2 emissions (metric tons per capita)'] * tech_index_df['Population, total']
tech_index_df['r and d spend'] = (tech_index_df['Research and development expenditure (% of GDP)'] / 100) * tech_index_df['GDP (constant 2010 US$)']
tech_index_df['cereal yield'] = tech_index_df['Cereal yield (kg per hectare)'] / 1000
tech_features_df = tech_index_df[tech_features]
tech_plot_df = tech_features_df[['broadband subscriptions']].unstack()
fig, ax = plt.subplots(figsize=(15,7))
tech_plot_df.iloc[:, tech_plot_df.columns.get_level_values(1).isin(countries_of_interest)].dropna().plot(kind='line', ax=ax)
plt.legend(loc = 'upper center', bbox_to_anchor=(0.5, -0.05), fancybox = True, shadow = True)
ax.set_title('The new internet');
# ## Trade
trade_df = pd.read_csv('data/model_one/trade.csv', sep=';')
trade_pivot_df = trade_df.pivot_table(index='date', columns=['country', 'indicator'], values='value').drop_duplicates()
trade_index_df = pd.concat([trade_pivot_df.stack('country'), population_df, gdp_df, gni_df], axis=1).sort_index().fillna(0.00)
trade_index_df.columns
trade_features = ['imports of goods and services', 'exports of goods and services', 'energy imports',
'high-tech exports', 'food exports', 'services trade', 'trade', 'household consumption expenditure']
# #### Trade features
#
# - **imports of goods and services**: the imports of goods and services in 2010 USD.
# - **exports of goods and services**: the exports of goods and services in 2010 USD.
# - **energy imports**: percentage of energy used that was imported.
# - **high-tech exports**: percentage of manufactures consisting of high tech goods.
# - **food exports**: percentage of merchandise exports consisting of foodstuff.
# - **food imports**: percentage of merchandise imports consisting of foodstuff.
# - **services trade**: trade in services in 2010 USD.
# - **trade**: trade volumes in 2010 USD.
# - **household consumption expenditure**: household consumption expenditure in 2010 USD.
trade_index_df['imports of goods and services'] = trade_index_df['Imports of goods and services (constant 2010 US$)']
trade_index_df['exports of goods and services'] = trade_index_df['Exports of goods and services (constant 2010 US$)']
trade_index_df['energy imports'] = trade_index_df['Energy imports, net (% of energy use)'] / 100
trade_index_df['high-tech exports'] = trade_index_df['High-technology exports (% of manufactured exports)'] / 100
trade_index_df['food exports'] = trade_index_df['Food exports (% of merchandise exports)'] / 100
trade_index_df['food imports'] = trade_index_df['Food imports (% of merchandise imports)'] / 100
trade_index_df['household consumption expenditure'] = trade_index_df['Households and NPISHs Final consumption expenditure per capita (constant 2010 US$)'] * trade_index_df['Population, total']
trade_index_df['services trade'] = (trade_index_df['Trade in services (% of GDP)'] / 100) * trade_index_df['GDP (constant 2010 US$)']
trade_index_df['trade'] = (trade_index_df['Trade (% of GDP)'] / 100) * trade_index_df['GDP (constant 2010 US$)']
trade_features_df = trade_index_df[trade_features]
trade_plot_df = trade_features_df[['services trade']].unstack()
fig, ax = plt.subplots(figsize=(15,7))
trade_plot_df.iloc[:, trade_plot_df.columns.get_level_values(1).isin(countries_of_interest)].dropna().plot(kind='line', ax=ax)
plt.legend(loc = 'upper center', bbox_to_anchor=(0.5, -0.05), fancybox = True, shadow = True)
ax.set_title('The services trade');
# ## Policy
policy_df = pd.read_csv('data/model_one/policy.csv', sep=';')
policy_pivot_df = policy_df.pivot_table(index='date', columns=['country', 'indicator'], values='value').drop_duplicates()
policy_index_df = pd.concat([policy_pivot_df.stack('country'), population_df], axis=1).sort_index().fillna(0.00)
policy_index_df.columns
policy_features = ['bank capital to assets ratio', 'bank nonperforming loans', 'lending interest rate',
'real interest rate', 'gross domestic savings', 'broad money', 'government debt',
'government interest payments external debt', 'government tax revenue', 'government debt service',
'gdp', 'gni', 'inflation', 'listed companies']
# #### Policy features
#
# - **bank capital to assets ratio**: (sic), commercial banks.
# - **bank nonperforming loans**: commercial bank nonperforming loans percentage.
# - **lending interest rate**: average commercial bank lending interest rate.
# - **real interest rate**: interest rate in percentages.
# - **gross domestic savings**: gross domestic savings in 2010 USD.
# - **listed companies**: market capitalization of listed companies in 2010 USD.
# - **broad money**: amount of broad money present in the economy in 2010 USD.
# - **government debt**: amount of outstanding government debt in 2010 USD.
# - **government interest payments external debt**: interest payments on external debt in 2010 USD.
# - **tax revenue**: government tax revenue in 2010 USD.
# - **total debt service**: government total debt service in 2010 USD.
# - **gdp**: gross domestic product in 2010 USD.
# - **gni**: gross national product in 2010 USD.
# - **inflation**: annual inflation.
policy_index_df['bank capital to assets ratio'] = policy_index_df['Bank capital to assets ratio (%)'] / 100
policy_index_df['bank nonperforming loans'] = policy_index_df['Bank nonperforming loans to total gross loans (%)'] / 100
policy_index_df['lending interest rate'] = policy_index_df['Lending interest rate (%)'] / 100
policy_index_df['real interest rate'] = policy_index_df['Real interest rate (%)'] / 100
policy_index_df['inflation'] = policy_index_df['Inflation, GDP deflator (annual %)'] / 100
policy_index_df['gdp'] = policy_index_df['GDP (constant 2010 US$)']
policy_index_df['gni'] = policy_index_df['GNI (constant 2010 US$)']
policy_index_df['gross domestic savings'] = (policy_index_df['Gross domestic savings (% of GDP)'] / 100) * policy_index_df['GDP (constant 2010 US$)']
policy_index_df['broad money'] = (policy_index_df['Broad money (% of GDP)'] / 100) * policy_index_df['GDP (constant 2010 US$)']
policy_index_df['listed companies'] = (policy_index_df['Market capitalization of listed domestic companies (% of GDP)'] / 100) * policy_index_df['GDP (constant 2010 US$)']
policy_index_df['government debt'] = (policy_index_df['Central government debt, total (% of GDP)'] / 100) * policy_index_df['GDP (constant 2010 US$)']
policy_index_df['government interest payments external debt'] = (policy_index_df['Interest payments on external debt (% of GNI)'] / 100) * policy_index_df['GNI (constant 2010 US$)']
policy_index_df['government tax revenue'] = (policy_index_df['Tax revenue (% of GDP)'] / 100) * policy_index_df['GDP (constant 2010 US$)']
policy_index_df['government debt service'] = (policy_index_df['Total debt service (% of GNI)'] / 100) * policy_index_df['GNI (constant 2010 US$)']
policy_features_df = policy_index_df[policy_features]
policy_plot_df = policy_features_df[['listed companies']].unstack()
fig, ax = plt.subplots(figsize=(15,7))
policy_plot_df.iloc[:, policy_plot_df.columns.get_level_values(1).isin(countries_of_interest)].dropna().plot(kind='line', ax=ax)
plt.legend(loc = 'upper center', bbox_to_anchor=(0.5, -0.05), fancybox = True, shadow = True)
ax.set_title('Corporate world');
# ## Aggregate the variables
#
# The indicator variables are mean-aggregated to create world-level values for each of the indicator variables.
# #### Join the four groups of metaindicators to create a single feature set
features_df = pd.concat([peoples_features_df, tech_features_df, trade_features_df, policy_features_df, population_df], axis=1).sort_index().fillna(0.00)
# +
# sorted(list(features_df.columns.get_level_values(0)))
# -
# #### Weigh and aggregate features at world-level
#
# * Average over the ratio-based macroindicators.
# * Sum all gni, gdp, and population-based features and divide them by world gni, gdp, and population respectively.
ratio_based_features = ['bank capital to assets ratio', 'bank nonperforming loans', 'cereal yield', 'energy imports',
'food exports', 'high-tech exports', 'inflation', 'lending interest rate', 'life expectancy',
'population density', 'real interest rate']
ratio_avg_df = features_df[ratio_based_features].groupby(level=0).mean()
ratio_avg_df.replace(0.00, np.nan, inplace=True)
ratio_feature_df = ratio_avg_df.interpolate().fillna(0.00)
gdp_based_features = ['gdp', 'broad money', 'exports of goods and services', 'gross domestic savings',
'high-tech value added', 'household consumption expenditure', 'imports of goods and services',
'listed companies', 'manufacturing value added', 'r and d spend', 'services trade', 'trade']
# missing: [education expenditure', 'food expenditure','health expenditure', 'housing expenditure', 'leisure expenditure', 'transportation expenditure']
gdp_sum_df = features_df[gdp_based_features].groupby(level=0).sum()
gdp_sum_df.replace(0.00, np.nan, inplace=True)
gdp_feature_df = gdp_sum_df.iloc[:,1:].div(gdp_sum_df.gdp, axis=0).interpolate().fillna(0.00)
tax_based_features = ['government debt', 'government debt service', 'government interest payments external debt',
'government tax revenue']
tax_sum_df = features_df[tax_based_features].groupby(level=0).sum()
tax_sum_df.replace(0.00, np.nan, inplace=True)
tax_feature_df = tax_sum_df.iloc[:,1:].div(tax_sum_df['government tax revenue'], axis=0).interpolate().fillna(0.00)
population_based_features = ['population', 'birth deaths', 'broadband subscriptions', 'electricity access',
'co2 emissions', 'electricity consumption', 'mobile subscriptions', 'newborns',
'overweight', 'rural population', 'unemployed', 'urban population', 'workers']
pop_sum_df = features_df[population_based_features].groupby(level=0).sum()
pop_sum_df.replace(0.00, np.nan, inplace=True)
pop_feature_df = pop_sum_df.iloc[:,1:].div(pop_sum_df.population, axis=0).interpolate().fillna(0.00)
agg_feature_df = pd.concat([ratio_feature_df, gdp_feature_df, tax_feature_df, pop_feature_df], axis=1).sort_index().fillna(0.00)
agg_feature_df.columns
world_agg_plot_df = agg_feature_df[['trade']].unstack()
fig, ax = plt.subplots(figsize=(15,7))
world_agg_plot_df.plot(kind='line', ax=ax)
plt.legend(loc = 'upper center', bbox_to_anchor=(0.5, -0.05), fancybox = True, shadow = True)
ax.set_title('World trade as percent of national GDP');
agg_feature_df.to_csv('features/m_one/world_aggregate.csv', sep=';')
| features/world_aggregate_features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial "Sensitivity_analysis.py"
#
# Authors: [<NAME>](mailto:<EMAIL>), [<NAME>](mailto:<EMAIL>)
#
# [INSA Lyon](https://www.insa-lyon.fr), France, 07/01/2022
#
# ## Introduction
#
# In order to verify the sensitivity of the Electre Tri ranking regarding the variability of parameter, this Python code [**Sensitivity_analysis.py**](Sensitivity_analysis.py), displayed below, allows to calculate the sensitivity index of parameters, then to display them.
#
# The choice of sensitivity analysis method is developed in our report on InER project.
# ## Python code presentation
#
# The different steps of the sensitivity analysis code are presented here.
#
# The code is organized in six parts :
# 1. Copy of dictionary
# 2. Modification of dictionary
# 3. Definition of the sensitivity index
# 4. Calculation of sensitivity index with methods
# 5. Graphic display
# 6. Sensitivity analysis and implementation of methods
#
# Each part will be explained below.
# For beginning, the methods ELECTRE_Tri_B and ELECTRE_Tri_B_function and Pyplot are used in the code.
import ELECTRE_Tri_B
import ELECTRE_Tri_B_function
from matplotlib import pyplot as plt
# ## 1. First part : Copy of dictionary
#
# #### 1.1 Ungroup dictionary
# The first step is to ungroup the dictionaries to get unitary dictionary.
#
# The boundaries of actions performance dictionary (Bk) is in the form of dictionaries in a principal dictionary.
# The thresholds dictionary is in the form of a dictionary where values are (q,p,v) list
#
# To modify each parameter in a single method, a single dictionary is necessary. For that, this function ungroup the two dictionnaries in unitary dictionaries.
def ungroup_dict(BP, T):
"""
Ungroup the dictionaries to get unitary dictionary
:param BP: Dictionary containing the boundaries of actions performance 'Bk', in the form of dictionaries in a principal
dictionary
:param T: Dictionary containing the pseudo-criterias q,p and v. In the form of a dictionary where values are list (q,p,v)
:return the different ungrouped dictionaries
"""
dict_q = {}
dict_p = {}
dict_v = {}
for i in range(0, len(T)):
dict_q[list(T.keys())[i]] = list(T.values())[i][0]
dict_p[list(T.keys())[i]] = list(T.values())[i][1]
dict_v[list(T.keys())[i]] = list(T.values())[i][2]
dict_b0 = list(BP.values())[0]
dict_b1 = list(BP.values())[1]
dict_b2 = list(BP.values())[2]
dict_b3 = list(BP.values())[3]
dict_b4 = list(BP.values())[4]
dict_b5 = list(BP.values())[5]
return dict_q, dict_p, dict_v, dict_b0, dict_b1, dict_b2, dict_b3, dict_b4, dict_b5
# #### 1.2 Gather dictionary
#
# Once the dictionary is modified, the original dictionary (Threshold or Boundaries Action Performance) should be recreated by gathering the different unitary dictionaries.
#
# Two methods are defined : for pseudo-criterias and for boundaries of action performances.
#
# The gathered dictionary of the boundaries of action performance is created by adding unitary dictionaries. The 'index' indicates where is the modified dictionary.
def gather_dictBK(Dinit, Dmodified, index, D0, D1, D2, D3, D4, D5):
"""
Gather the Boundaries Actions Performances dictionary to use it in Electre Tri
:param Dinit: Initial dictionary containing the form of the gathered dictionary.
:param Dmodified: Modified dictionary to be included in gathered dictionary
:param index: Index of modified parameter : position of modified dictionary in the gathered dictionary
:param D0: Dictionary number 1 to be included in the gathered dictionary
:param D1: Dictionary number 2 to be included in the gathered dictionary
:param D2: Dictionary number 3 to be included in the gathered dictionary
:param D3: Dictionary number 4 to be included in the gathered dictionary
:return the BP dictionary, modified and gathered
"""
Dgathered = {}
Dgathered[list(Dinit.keys())[0]] = D0
Dgathered[list(Dinit.keys())[1]] = D1
Dgathered[list(Dinit.keys())[2]] = D2
Dgathered[list(Dinit.keys())[3]] = D3
Dgathered[list(Dinit.keys())[4]] = D4
Dgathered[list(Dinit.keys())[5]] = D5
for i in range(0, len(Dinit)):
if i == index:
Dgathered.update({list(Dgathered.keys())[i]: Dmodified})
return Dgathered
# The gathered dictionary of the pseudo-criterias is created by making a list of different values of dictionary (q,p,v). The 'index' indicates the position of the modified value in the list.
def gather_dictQPV(Dinit, Dmodified, index, D0, D1, D2):
"""
Gather the Thresholds dictionary to use it in Electre Tri
:param Dinit: Initial dictionary containing the form of the gathered dictionary.
:param Dmodified: Modified dictionary to be included in gathered dictionary
:param index: Index of modified parameter : position of modified dictionary in the gathered dictionary
:param D0: Dictionary number 1 to be included in the gathered dictionary
:param D1: Dictionary number 2 to be included in the gathered dictionary
:param D2: Dictionary number 3 to be included in the gathered dictionary
:param D3: Dictionary number 4 to be included in the gathered dictionary
:return the Thresholds dictionary, modified and gathered
"""
Dgathered = {}
for i in range(0, len(Dinit)):
if index == 0: # Modification de Q
Dgathered[list(Dinit.keys())[i]] = (
list(Dmodified.values())[i], list(D1.values())[i], list(D2.values())[i])
elif index == 1:
Dgathered[list(Dinit.keys())[i]] = (
list(D0.values())[i], list(Dmodified.values())[i], list(D2.values())[i])
elif index == 2:
Dgathered[list(Dinit.keys())[i]] = (
list(D0.values())[i], list(D1.values())[i], list(Dmodified.values())[i])
return Dgathered
# ## 2. Second part : Modification of dictionary
#
# In this part, with a given dictionary, a specific value (associated with her 'index') is modified regarding a percentage.
#
# If all values of the dictionary should be modified, a 'global_test' is True.
#
# Three different methods are created.
# Firstly, the transformation of the Weight dictionary is made. The method take into account the sign of the percentage. Then, the normalization is realized to keep the sum of weight equals to one.
def transform_dictW(Dinit, percent=0, index=0, global_test=False):
"""
Transform a specific value of Weight dictionary. The weights must still be normalized.
:param Dinit: Dictionary which should be modified. In form of a unitary dictionary
:param percent: percentage of modification
:param index: Index of parameter of criteria to modify
:param global_test: If global_test is True, the method modifies the parameter value of all criterias
:return modified dictionary
"""
Dtransformed = {}
if list(Dinit.values())[index] < 0 and global_test == False:
percent = -1 * percent
for i in range(0, len(Dinit)):
condition_test = True
if i == index or global_test is True:
if global_test is True:
percent = percent * list(Dinit.values())[i] / abs(list(Dinit.values())[i])
Dtransformed[list(Dinit.keys())[i]] = list(Dinit.values())[i] * (100 + percent) / 100
percent = percent * list(Dinit.values())[i] / abs(list(Dinit.values())[i])
else:
Dtransformed[list(Dinit.keys())[i]] = list(Dinit.values())[i] * (1 - ((percent / 100) / (len(Dinit) - 1)))
return Dtransformed, condition_test
# Secondly, the transformation of the pseudo-criterrias dictionary is made.
#
# The element X1 correspond to the modified value. The elements X0 and X2 allow to verify if the condition "q < p < v" is always true. If it's not the case, the test_condition is False and the calculation of the sensitivity index isn't made.
#
# The modified dictionary and the condition is returned to be used in a future method.
def transform_dictQPV(Dinit, index_param, percent=0, index=0, global_test=False):
"""
Transform a specific value of Threshold dictionary. A sorting condition should be respected : q < p < v.
:param Dinit: Dictionary containing the dictionary which should be modified. In form of a list of dictionaries
:param index_param: Index of parameter to modify (to specify if Q, P or V is modified)
:param percent: percentage of modification
:param index: Index of parameter of criteria to modify
:param global_test: If global_test is True, the method modifies the parameter value of all criterias
:return modified dictionary
"""
Dtransformed = {}
Dbrowsed = Dinit[index_param]
if list(Dbrowsed.values())[index] < 0 and global_test == False:
percent = -1 * percent
for i in range(0, len(Dbrowsed)):
condition_test = True
X1 = list(Dbrowsed.values())[i] * (100 + percent) / 100
if index_param != 2:
X2 = list(Dinit[index_param + 1].values())[i]
else:
X2 = 0
if index_param != 0:
X0 = list(Dinit[index_param - 1].values())[i]
else:
X0 = 0
if i == index or global_test is True:
if global_test is True and list(Dbrowsed.values())[i] != 0:
percent = percent * list(Dbrowsed.values())[i] / abs(list(Dbrowsed.values())[i])
if list(Dbrowsed.values())[i] == 0:
Dtransformed[list(Dbrowsed.keys())[i]] = list(Dbrowsed.values())[i]
else:
Dtransformed[list(Dbrowsed.keys())[i]] = list(Dbrowsed.values())[i] * (100 + percent) / 100
percent = percent * list(Dbrowsed.values())[i] / abs(list(Dbrowsed.values())[i])
else:
Dtransformed[list(Dbrowsed.keys())[i]] = list(Dbrowsed.values())[i]
if (X1 >= X2 and index_param != 2) or (X1 <= X0 and index_param != 0):
Dtransformed[list(Dbrowsed.keys())[i]] = list(Dbrowsed.values())[i]
condition_test = False
return Dtransformed, condition_test
# The same method is written for the boundaries of action performance (Bk). The condition is True if the inequality "Bk < Bk+1" is respected.
def transform_dictBK(Dinit, index_param, percent=0, index=0, global_test=False):
"""
Transform a specific value of Boundaries Actions Performances dictionary.
A sorting condition should be respected : Bk < B(k+1). The extrem boundaries B0 and B3 shouldn't be modified.
:param Dinit: Dictionary containing the dictionary which should be modified. In form of a list of dictionaries
:param index_param: Index of parameter to modify (to specify if Q, P or V is modified)
:param percent: percentage of modification
:param index: Index of parameter of criteria to modify
:param global_test: If global_test is True, the method modifies the parameter value of all criterias
:return modified dictionary
"""
Dtransformed = {}
Dbrowsed = Dinit[index_param]
for i in range(0, len(Dbrowsed)):
condition_test = True
sign = 1
if list(Dbrowsed.values())[index] < 0 and global_test == False:
sign = -1
X0 = list(Dinit[index_param - 1].values())[i]
X1 = list(Dbrowsed.values())[i] * (100 + sign*percent) / 100
X2 = list(Dinit[index_param + 1].values())[i]
if i == index or global_test is True :
if list(Dbrowsed.values())[i] == 0:
Dtransformed[list(Dbrowsed.keys())[i]] = list(Dbrowsed.values())[i]
else:
Dtransformed[list(Dbrowsed.keys())[i]] = list(Dbrowsed.values())[i] * (100 + sign*percent) / 100
else:
Dtransformed[list(Dbrowsed.keys())[i]] = list(Dbrowsed.values())[i]
if X1 >= X2 or X1 <= X0:
Dtransformed[list(Dbrowsed.keys())[i]] = list(Dbrowsed.values())[i]
condition_test = False
return Dtransformed, condition_test
# ## 3. Third part : Definition of the sensitivity index
#
# A short method calculated the sensitivity index from the ranking variation between the initial ranking (with the reference values) and the modified ranking (with a increased/decreased dictionary).
#
# If the condition test (where the inequality isn't respected for boundaries of action performance or for pseudo-criterias) is false, the sum is negative.
# That allows to identify a modification percentage too strong.
def sensitivity_index(dictY1, dictY2, condition_test=True):
"""
Calculate the sensitivity index
:param dictY1: Initial ranking
:param dictY2: Modified ranking
:param condition_test: If the test is False, the function doesn't calculate the index.
It's false when the ranking condition (q<p<v or bk<bk+1) isn't respected
:return calculated sensitivity index
"""
deltaY = 0
for j in range(0, len(dictY1)):
deltaY = abs(list(dictY2.values())[j] - list(dictY1.values())[j]) + deltaY
if condition_test == False:
deltaY = -0.5
return deltaY
# ## 4. Fourth part : Calculation of sensitivity index with methods
#
# Three methods are written to calculate the sensitivity index from the initial ranking and a percentage of modification. Each method is organized in three parts :
# 1. Transformation of the dictionary
# 2. Calculation of the new ranking
# 3. Calculation of the sensitivity index
#
# The dictionary to be modified is browsed and each value is modified once at time. Then, the same is made for the modification of all elements : the global test is true.
#
# The sensitivity index of weight is calculated, in the form of list. Each value corresponds to the sensitivity index of the criteria modification.
#
# So the first value corresponds to the modification of the first weight (the first criteria).
def indexW_calcul(ordre_init, percent, C, A, AP, BA, BP, W, T):
"""
Calculate the Weight sensitivity index, regarding initial ranking and a modification percentage
:param ordre_init: Initial ranking calculated by Electre Tri method
:param percent: percentage of modification
:param C: Criteria
:param A: Actions
:param AP: Actions_performances
:param BA: Boundary_actions
:param BP: Boundary_actions_performances
:param W: Weights
:param T: Thresholds
:return: calculated sensitivity index
"""
sens_index = [[0 for _ in range(len(W))] for _ in range(len(W) + 1)]
for j in range(0, 1):
# j is the parameter to be modified (Weight 1 to Weight 5)
global_test = False
for i in range(0, len(W)):
index = i
modified_dict, condition_test = transform_dictW(W, percent, index, global_test)
modified_ranking = ELECTRE_Tri_B_function.ELECTRE_Tri(Cat, C, W, A, AP, BA, BP, T, "W", modified_dict)
sens_index[index][j] = sensitivity_index(ordre_init, modified_ranking, condition_test)
global_test = True
modified_dict, condition_test = transform_dictW(W, percent, index, global_test)
modified_ranking = ELECTRE_Tri_B_function.ELECTRE_Tri(Cat, C, W, A, AP, BA, BP, T, "W", modified_dict)
sens_index[index + 1][j] = sensitivity_index(ordre_init, modified_ranking, condition_test)
return sens_index
# The same method is made for the calculation of the sensitivity index of pseudo-criterias. The returned index is in the form of nested lists. Each list contains three values (q,p,v) for each criteria.
def indexQPV_calcul(ordre_init, percent, C, A, AP, BA, BP, W, T):
"""
Calculate the pseudo-criteria sensitivity index, regarding initial ranking and a modification percentage
:param ordre_init: Initial ranking calculated by Electre Tri method
:param percent: percentage of modification
:param C: Criteria
:param A: Actions
:param AP: Actions_performances
:param BA: Boundary_actions
:param BP: Boundary_actions_performances
:param W: Weights
:param T: Thresholds
:return: calculated sensitivity index
"""
dict_q, dict_p, dict_v, dict_b0, dict_b1, dict_b2, dict_b3, dict_b4, dict_b5= ungroup_dict(BP, T)
dict_list = [dict_q, dict_p, dict_v]
dict_initial = T
sens_index = [[0 for _ in range(len(dict_list))] for _ in range(len(dict_list[0]) + 1)]
for j in range(0, len(dict_list)):
# j is the parameter to be modified (Q, P or V)
global_test = False
browsed_dict = dict_list[j]
for i in range(0, len(browsed_dict)):
index = i
modified_dict, condition_test = transform_dictQPV(dict_list, j, percent, index,
global_test)
modified_Dgathered = gather_dictQPV(dict_initial, modified_dict, j, dict_list[0], dict_list[1],
dict_list[2])
modified_ranking = ELECTRE_Tri_B_function.ELECTRE_Tri(Cat, C, W, A, AP, BA, BP, T, "QPV", modified_Dgathered)
sens_index[index][j] = sensitivity_index(ordre_init, modified_ranking, condition_test)
global_test = True
modified_dict, condition_test = transform_dictQPV(dict_list, j, percent, index, global_test)
modified_Dgathered = gather_dictQPV(dict_initial, modified_dict, j, dict_list[0], dict_list[1], dict_list[2])
modified_ranking = ELECTRE_Tri_B_function.ELECTRE_Tri(Cat, C, W, A, AP, BA, BP, T, "QPV", modified_Dgathered)
sens_index[index + 1][j] = sensitivity_index(ordre_init, modified_ranking, condition_test)
return sens_index
# The same method is made for the calculation of the sensitivity index of the boundaries of action performance.
#
# The returned index is in the form of nested lists. Each list contains four values, corresponding to four boundaries, for each criteria.
# A second condition is the separability of the boundaries, that should be strict, equivalent to a credibility less than 0,5.
# If the condition isn't respected, the sensitivity index isn't calculated (equals to -0.5).
def indexBK_calcul(ordre_init, percent, C, A, AP, BA, BP, W, T):
"""
Calculate the Boundaries Action Performance sensitivity index, regarding initial ranking and a modification percentage
:param ordre_init: Initial ranking calculated by Electre Tri method
:param percent: percentage of modification
:param C: Criteria
:param A: Actions
:param AP: Actions_performances
:param BA: Boundary_actions
:param BP: Boundary_actions_performances
:param W: Weights
:param T: Thresholds
:return: calculated sensitivity index
"""
dict_q, dict_p, dict_v, dict_b0, dict_b1, dict_b2, dict_b3, dict_b4, dict_b5 = ungroup_dict(BP, T)
dict_list = [dict_b0, dict_b1, dict_b2, dict_b3, dict_b4, dict_b5]
dict_initial = BP
sens_index = [[0 for _ in range(len(dict_list))] for _ in range(len(dict_list[0]) + 1)]
for j in range(0, len(dict_list)):
# j is the parameter to modify (B1 or B2)
if not (j == 0) and not (j == len(dict_list)-1):
global_test = False
browsed_dict = dict_list[j]
for i in range(0, len(browsed_dict)):
index = i
modified_dict, condition_test = transform_dictBK(dict_list, j, percent, index,
global_test)
modified_Dgathered = gather_dictBK(dict_initial, modified_dict, j, dict_list[0], dict_list[1],dict_list[2]
, dict_list[3], dict_list[4], dict_list[5])
credibility = ELECTRE_Tri_B.minimum_required_level_of_credibility(C, W, BA, modified_Dgathered, T)
if list(credibility.values())[j] > 0.5:
condition_test = False
modified_ranking = ELECTRE_Tri_B_function.ELECTRE_Tri(Cat, C, W, A, AP, BA, BP, T, "BK", modified_Dgathered)
sens_index[index][j] = sensitivity_index(ordre_init, modified_ranking,
condition_test)
global_test = True
modified_dict, condition_test = transform_dictBK(dict_list, j, percent, index, global_test)
modified_Dgathered = gather_dictBK(dict_initial, modified_dict, j, dict_list[0], dict_list[1],
dict_list[2], dict_list[3], dict_list[4], dict_list[5])
modified_ranking = ELECTRE_Tri_B_function.ELECTRE_Tri(Cat, C, W, A, AP, BA, BP, T, "BK", modified_Dgathered)
credibility = ELECTRE_Tri_B.minimum_required_level_of_credibility(C, W, BA, modified_Dgathered, T)
if list(credibility.values())[j] > 0.5:
condition_test = False
sens_index[index + 1][j] = sensitivity_index(ordre_init, modified_ranking,
condition_test)
return sens_index
# ## 5. Fifth part : Graphic display
#
# Two types of graphic are displayed.
#
# The first represents the sensitivity index as a fonction of parameter, for each modification percentage.
#
# The characteristics of the graphic are defined.
def graphic(sens_indexW, sens_indexBK, sens_indexQPV, pourcent, index):
"""
Display the sensitivity index for each criteria regarding a percentage
:param sens_indexW: Weight sensitivity index
:param sens_indexBK: Boundaries Action Performance sensitivity index
:param sens_indexQPV: Pseudo-criterias sensitivity index
:param pourcent: percentage of modification
:param index: control of the graphic position which will be displayed
:return: the last position of displayed graphic
"""
x = ["W1", "W2", "W3", "W4", "W5", "BK1", "BK2", "Q", "P", "V"]
y = [0 for _ in range(len(x))]
count = 0
for i in range(0, len(sens_indexW) - 1):
y[count] = sens_indexW[i][0]
count = count + 1
for i in range(1, 3):
y[count] = sens_indexBK[len(sens_indexBK) - 1][i]
count = count + 1
for i in range(7, 10):
y[count] = sens_indexQPV[len(sens_indexQPV) - 1][i - 7]
count = count + 1
# Graphic characteristics
if index > 3:
plt.figure(figsize=(12, 4))
index = 1
plt.subplot(1, 3, index)
axes = plt.gca()
axes.title.set_size(15)
axes.xaxis.label.set_size(10)
axes.yaxis.label.set_size(10)
plt.xticks(fontsize=8)
plt.title('Increase of ' + str(pourcent) + ' %')
plt.ylabel('Sensitivity index')
plt.xlabel('Parameters')
plt.text(0, -0.5, "The negative values indicate \n a parameter increased too much", size=4)
plt.yticks([-0.5,0, 0.5, 1, 1.5,2,2.5,3,3.5,4,4.5,5])
plt.bar(x, y, color='red')
return index + 1
# The second graphic represent the sensitivity index as a fonction of the modification percentage, for all the parameter. That allows to visualized quickly the sensitivity of the model.
#
# For a better visibility, the negative value are cleaned by the following code.
def clean_list(l):
"""
Clean the list with delete negative values to not display them
:param l: list to browse
:return: modified list
"""
for j in range(0, len(l)):
for i in range(0, len(l[0])):
if l[j][i] == -0.5 or l[j][i] == -1:
l[j][i] = None
return l
# Then the global graphic is displayed. The value of each criteria is recuperated from the different dictionary of sensitivity index.
def global_graphic(C, BA, list_percent, sens_indexWglobal, sens_indexBKglobal, sens_indexQPVglobal, F):
"""
Display a global graphic with the sensitivity index in function of modification percentage, for each criteria
:param list_percent: list of modification percentage
:param sens_indexWglobal: global Weight sensitivity index
:param sens_indexBKglobal: global Boundaries Action Performance sensitivity index
:param sens_indexQPVglobal: global Pseudo-criterias sensitivity index
:param F : indicates the set of parameter to display.
Weight : F=1
Bk : F=2
QPV : F=3
All parameters : F=4
"""
plt.figure().canvas.manager.set_window_title("Sensitivity index regarding the modification percentage")
Q, P, V = [], [], []
BK, W = [[] for _ in range(len(BA))], [[] for _ in range(len(C))]
for i in range(0, len(C)):
W[i]=[]
for i in range(0, len(BA)):
BK[i]=[]
for j in range(0, len(list_percent)):
if F ==1 or F ==4:
for i in range(0, len(C)):
W[i].append((sens_indexWglobal)[j][i])
if F == 2 or F == 4:
for i in range(1, len(BA)-1):
BK[i].append(clean_list(sens_indexBKglobal)[j][i])
if F == 3 or F == 4:
Q.append(clean_list(sens_indexQPVglobal)[j][0])
P.append(clean_list(sens_indexQPVglobal)[j][1])
V.append(clean_list(sens_indexQPVglobal)[j][2])
if F == 1 or F == 4:
count = 0
for i in range(0, len(C)):
if count == 0:
plt.plot(list_percent, W[i], ':', label=C[i])
count = 1
elif count ==1 :
plt.plot(list_percent, W[i], '--.', label=C[i])
count = 2
else :
plt.plot(list_percent, W[i], '-', label=C[i])
count = 0
if F == 2 or F == 4:
count = 0
for i in range(1, len(BA)-1):
if count == 0:
plt.plot(list_percent, BK[i], '--*', label=BA[i])
count = 1
else:
plt.plot(list_percent, BK[i], ':X', label=BA[i])
count = 0
if F == 3 or F == 4:
plt.plot(list_percent, Q, '--c*', label='Indifference Q')
plt.plot(list_percent, P, '--m*', label='Preference P')
plt.plot(list_percent, V, '--y*', label='Veto V')
plt.title("Sensitivity index regarding modification percentage")
plt.ylabel('Sensitivity index')
plt.xlabel('Percentage')
plt.xticks(list_percent)
legend = []
if F == 1 or F == 4:
for i in range(0, len(C)):
legend.extend([C[i]])
if F == 2 or F == 4:
for i in range(1, len(BA)-1):
legend.extend([BA[i]])
if F == 3 or F == 4 :
legend.extend('Q')
legend.extend('P')
legend.extend('V')
plt.gca().legend(legend, bbox_to_anchor = (1.0, 1))
plt.show()
# ## 5. Sixth part : Sensitivity analysis : implementation of methods
#
# The last part of the code implement the previous methods to realize the sensitivity analysis.
#
# The initial ranking is calculated from the input datas.
# For different percentages of modification, the sensitivity indexes are calculated. Then, the results are displayed.
# +
"""
Make the sensitivity analysis of Electre Tri method
Input data of Electre Tri method :
Cat: Categories
C: Criteria
W: Weights
A: Actions
AP: Actions_performances
BA: Boundary_actions
BP: Boundary_actions_performances
T: Thresholds
"""
Cat, C, W, A, AP, BA, BP, T = ELECTRE_Tri_B_function.input_data("Isolation")
# Initial Ranking
ordre_init = ELECTRE_Tri_B_function.ELECTRE_Tri(Cat, C, W, A, AP, BA, BP, T)
print("ordre initial", ordre_init)
plt.bar(list(ordre_init.keys()), list(ordre_init.values()))
plt.xlabel("Scénarios")
plt.ylabel("Rang médian")
sens_indexWglobal = []
sens_indexBKglobal = []
sens_indexQPVglobal = []
list_percent = []
#plt.figure(figsize=(12, 4))
x = 1
for percent_modification in range(0,500,50):
if percent_modification == 0:
continue
list_percent.append(percent_modification)
sens_indexW = indexW_calcul(ordre_init, percent_modification, C, A, AP, BA, BP, W, T)
sens_indexBK = indexBK_calcul(ordre_init, percent_modification, C, A, AP, BA, BP, W, T)
sens_indexQPV = indexQPV_calcul(ordre_init, percent_modification, C, A, AP, BA, BP, W, T)
print(percent_modification, "Sensitivity index W : ", sens_indexW)
print(percent_modification, "Sensitivity index BK : ", sens_indexBK)
print(percent_modification, "Sensitivity index QPV : ", sens_indexQPV)
#x = graphic(sens_indexW, sens_indexBK, sens_indexQPV, percent_modification, x)
W2 = []
for i in range(0, len(sens_indexW) - 1):
W2.append(sens_indexW[i][0])
sens_indexWglobal.append(W2)
sens_indexBKglobal.append(sens_indexBK[5])
sens_indexQPVglobal.append(sens_indexQPV[5])
plt.show()
print("Global sensitivity index W :", W2)
print("Global sensitivity index BK :", sens_indexBKglobal)
print("Global sensitivity index QPV :", sens_indexQPVglobal)
global_graphic(C, BA, list_percent, sens_indexWglobal, sens_indexBKglobal, sens_indexQPVglobal, 4)
# -
| Tutorial Sensibility Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reproducing Fig. 1
# This notebook exemplifies how to reproduce Figure 1 of the article.
# The annotations from all screens will be downloaded and parsed to
# build statistics on phenotypes, which will be displayed using Bokeh.
# ### Dependencies
# * [Matplotlib](https://matplotlib.org/)
# * [NumPy](https://www.numpy.org/)
# * [Pandas](https://pandas.pydata.org/)
# * [Bokeh](https://bokeh.pydata.org/)
#
# The cell below will install dependencies if you choose to run the notebook in [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb#recent=true).
# %pip install idr-py
# +
from IPython import get_ipython
import omero
from idr import connection
import numpy as np
from pandas import DataFrame
from pandas import read_csv
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.plotting import output_notebook
from bokeh.plotting import show
from bokeh.models import HoverTool
import bokeh.palettes as bpal
output_notebook()
get_ipython().run_line_magic('matplotlib', 'inline')
# -
# ### Method definitions
def getBulkAnnotationAsDf(screenID, conn):
ofId = None
sc = conn.getObject('Screen', screenID)
for ann in sc.listAnnotations():
if isinstance(ann, omero.gateway.FileAnnotationWrapper):
if (ann.getFile().getName() == 'bulk_annotations'):
if (ann.getFile().getSize() > 1476250900):
print("that's a big file...")
return None
ofId = ann.getFile().getId()
break
if ofId is None:
return None
original_file = omero.model.OriginalFileI(ofId, False)
table = conn.c.sf.sharedResources().openTable(original_file)
try:
rowCount = table.getNumberOfRows()
column_names = [col.name for col in table.getHeaders()]
black_list = []
column_indices = []
for column_name in column_names:
if column_name in black_list:
continue
column_indices.append(column_names.index(column_name))
table_data = table.slice(column_indices, None)
finally:
table.close()
data = []
for index in range(rowCount):
row_values = [column.values[index] for column in table_data.columns]
data.append(row_values)
dfAnn = DataFrame(data)
dfAnn.columns = column_names
return dfAnn
def appendPhInfo(phall, screen, df):
"""
extract all phenotypes information from given bulk annotation file and
append it to the phall
"""
phcol = df.columns[[('Phenotype' in s) and ('Term Accession' in s)
for s in df.columns]]
for s in phcol:
ph = df[s].unique()
if ph[0] != '':
ph = ph[0]
desc = df[s.replace('Accession', 'Name')].unique()[0]
else:
ph = ph[1]
desc = df[s.replace('Accession', 'Name')].unique()[1]
dfph = df[df[s] != '']
try:
phall[ph]['n'] = phall[ph]['n']+len(dfph)
if not (screen in phall[ph]['screens']):
phall[ph]['screens'].append(screen)
except Exception:
phcur = {'n': len(dfph), 'screens': [screen], 'desc': desc}
phall[ph] = phcur
# ### Connect to the IDR server
conn = connection('idr.openmicroscopy.org')
# ## Build and display figure
# +
screens = list(conn.getObjects("Screen"))
screen_count = len(screens)
print(screen_count)
phall = {}
# List of screens used for paper
screen_ids = [3, 102, 51, 202, 597, 253, 201, 154, 751, 206,
251, 803, 1351, 1501, 1551, 1601, 1602, 1603, 1202, 1101, 1302,
1201, 1251, 1151, 1203, 1204, 1651, 1652, 1653, 1654]
print("Iterating through screens...")
for sc in screens:
sc_id = sc.getId()
print('loading ' + str(sc_id))
if sc_id in screen_ids:
df = getBulkAnnotationAsDf(sc_id, conn)
if df is not None:
appendPhInfo(phall, sc.getName(), df)
# -
# ### Disconnect when done loading data
conn.close()
# ### Load grouping of phenotypes for ordering and coloring them
# Categories were chosen manually offline
# (see [paper](https://www.nature.com/articles/nmeth.4326))
dfColor = read_csv('https://raw.githubusercontent.com/IDR/idr-notebooks/master/includes/CMPOAccessionToPhenotypeCategories.csv')
colors = {}
for i, grp in enumerate(dfColor.CmpoPhenotypeCategory.unique()):
colors[grp] = bpal.Set3_10[i % 10]
# +
# add the information to the data and sort it
for ph in phall.keys():
try:
v = dfColor['CmpoAcc'] == ph
phall[ph]['group'] = dfColor[v]['CmpoPhenotypeCategory'].values[0]
phall[ph]['groupColor'] = colors[phall[ph]['group']]
phall[ph]['FigureCmpoName'] = dfColor[v]['FigureCmpoName'].values[0]
except Exception:
print('pass:'+ph)
del phall[ph]
phalls = sorted(phall.values(), key=lambda x: x['group'])
# +
TOOLS = "pan,wheel_zoom,reset"
phenotypes = figure(title="Fig 1",
tools=TOOLS,
y_axis_type="log",
width=900,
toolbar_location="above")
source = ColumnDataSource(
data=dict(
ph=[ph['FigureCmpoName'] for ph in phalls],
n=[ph['n'] for ph in phalls],
names=[ph['screens'] for ph in phalls],
desc=[ph['desc'] for ph in phalls],
x=[2*x for x in range(len(phall.keys()))],
r=[1*len(ph['screens']) for ph in phalls],
color=[ph['groupColor'] for ph in phalls],
groupName=[ph['group'] for ph in phalls]
))
label_data = {2*i: x for i, x in
enumerate([ph['FigureCmpoName'] for ph in phalls])}
cir = phenotypes.circle('x', 'n', radius='r', source=source, color='color')
hover = HoverTool(
tooltips=[
("Term", "@ph"),
("Description", "@desc"),
("Number of samples", "@n"),
("Screens name", "@names"),
("group", "@groupName")
]
)
phenotypes.add_tools(hover)
phenotypes.xaxis.major_label_orientation = np.pi/4.
phenotypes.xaxis.axis_label_text_font_size = "10pt"
show(phenotypes)
# -
# ### License (BSD 2-Clause)¶
# Copyright (C) 2016-2021 University of Dundee. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| Figure_1_Sampling_of_Phenotypes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + language="javascript"
# $('#appmode-leave').hide();
# $('#copy-binder-link').hide();
# $('#visit-repo-link').hide();
# +
import ipywidgets as ipw
import json
import random
import time
import pandas as pd
import os
import webbrowser
import math
from IPython.display import display, Markdown
# set kinetic parameters
with open("rate_parameters.json") as infile:
jsdata = json.load(infile)
params = jsdata["kin7"]
# -
# Copyright **<NAME> and <NAME>**, January 2021
#
# ## Determination of the Rate Law \#6
# You are studying the composition reaction of a compound by monitoring its concentration as a function of time.
# Perform a series of experiments to determine the order of the reaction, the rate constant, and the half-life.
# What was the initial concentration of the reactant?
#
# ### Instructions:
#
# - Use the slide bar below to select a time at which you collect the data.
# - Click `Perform measurement` to run the virtual experiment and obtain the result of the experiment.
# - Click `Download CSV` to export the complete data set for all the experiments as a CSV file.
# - Note that every time you `Restart laboratory` the initial concentration of the reactant may change.
# +
# define path to results.csv file
respath = os.path.join(os.getcwd(), "..", "results.csv")
# delete existing result file and setup rng
if os.path.exists(respath):
os.remove(respath)
#random.seed(params["error"].get("seed", 0))
t = int( time.time() * 1000.0 )
random.seed( ((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24) )
class system:
def __init__(self, vol=0, conc=0, press=0):
self.vol = vol
self.conc = conc
self.press = press
class data:
def __init__(self, start=-1, error=0, label='none', units='pure', value=0,
minval=-1, maxval=3):
self.start = start
self.minval = minval
self.maxval = maxval
self.error = error
self.label = label
self.units = units
self.value = value
# Experiment setup (+ hidden paramters)
system = system()
def initialiseExperiment():
global n
global system
global columns_list
global scatter
scatter = 0.1
n = []
columns_list = []
n.append(len(args)) # number of input adjustable parameters
n.append(len(result)) # number of results for the experiment
for i in range(0, n[0]):
columns_list.append(f"{args[i].label} [{args[i].units}]")
for i in range(0, n[1]):
columns_list.append(f"{result[i].label} [{result[i].units}]")
# Random initial concentration
system.conc = random.random()
# +
# Adjustable input parameters
def initialiseVariables():
global logScale
logScale = False
global args
args = []
args.append(
data(
label = "Time",
minval = 1,
maxval = 100,
start = 1.,
units = "min",
value = 0.
)
)
# Results
def initialiseResults():
global result
result = []
result.append(
data(
label = "[C]",
start = 0.,
error = random.random() / 10.,
units = "mol/L"
)
)
def measure():
time = args[0].value.value
Ainv = 1. / system.conc + params["k"] * time
res = 1. / Ainv
return res
initialiseVariables()
# +
out_P = ipw.Output()
out_L = ipw.Output()
with out_L:
display(Markdown("[Download CSV](../results.csv)"))
def calc(btn):
out_P.clear_output()
# Measurement result
result[0].value = measure()
# Random error
result[0].error = result[0].value * scatter * (0.5 - random.random()) * 2
# Output result
out_R[0].value = f"{result[0].value + result[0].error:.3e}"
# Read previous lines
res = pd.read_csv(respath)
var_list = []
for i in range(0, n[0]):
var_list.append(args[i].value.value)
for i in range(0, n[1]):
var_list.append(result[i].value + result[i].error)
# Append result
res.loc[len(res)] = var_list
res.to_csv(respath, index=False)
with out_P:
display(res.tail(50))
def reset(btn):
if os.path.exists(respath):
os.remove(respath)
initialiseResults()
initialiseExperiment()
res = pd.DataFrame(columns=columns_list)
res.to_csv(respath, index=False)
with out_P:
out_P.clear_output()
display(res.tail(50))
# interactive buttons ---
# btn_reset = ipw.Button(description="Restart Laboratory", layout=ipw.Layout(width="150px"))
# btn_reset.on_click(reset)
# btn_calc = ipw.Button(description="Perform measurement", layout=ipw.Layout(width="150px"))
# btn_calc.on_click(calc)
# ---
def solution(btn):
print("The reaction is second order")
print("Rate constant = %.4f" % params["k"])
# interactive buttons ---
btn_reset = ipw.Button(description="Restart laboratory", layout=ipw.Layout(width="150px"))
btn_reset.on_click(reset)
btn_calc = ipw.Button(description="Perform measurement", layout=ipw.Layout(width="150px"))
btn_calc.on_click(calc)
btn_solution = ipw.Button(description="Solution", layout=ipw.Layout(width="150px"))
btn_solution.on_click(solution)
# ---
reset(btn_reset)
rows = []
for i in range(0, n[0]):
if logScale:
args[i].value = ipw.FloatLogSlider(value=args[i].start, min=args[i].minval, max=args[i].maxval)
else:
args[i].value = ipw.FloatSlider(value=args[i].start, min=args[i].minval, max=args[i].maxval)
rows.append(ipw.HBox([ipw.Label(value=f"{args[i].label} [{args[i].units}]:",
layout=ipw.Layout(width="250px")),
args[i].value]))
out_R = []
for i in range(0, n[1]):
out_R.append(ipw.Label(value=""))
rows.append(ipw.HBox([ipw.Label(value=f"Measured {result[i].label} [{result[i].units}]:",
layout=ipw.Layout(width="250px")),
out_R[i]]))
rows.append(ipw.HBox([btn_reset, btn_calc, btn_solution, out_L]))
rows.append(ipw.HBox([out_P]))
ipw.VBox(rows)
# -
| CEK_problems/kinetics_05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Natural Language Processing Project
#
# Welcome to the NLP Project for this section of the course. In this NLP project you will be attempting to classify Yelp Reviews into 1 star or 5 star categories based off the text content in the reviews. This will be a simpler procedure than the lecture, since we will utilize the pipeline methods for more complex tasks.
#
# We will use the [Yelp Review Data Set from Kaggle](https://www.kaggle.com/c/yelp-recsys-2013).
#
# Each observation in this dataset is a review of a particular business by a particular user.
#
# The "stars" column is the number of stars (1 through 5) assigned by the reviewer to the business. (Higher stars is better.) In other words, it is the rating of the business by the person who wrote the review.
#
# The "cool" column is the number of "cool" votes this review received from other Yelp users.
#
# All reviews start with 0 "cool" votes, and there is no limit to how many "cool" votes a review can receive. In other words, it is a rating of the review itself, not a rating of the business.
#
# The "useful" and "funny" columns are similar to the "cool" column.
#
# ## Imports
# **Import the usual suspects. :) **
import numpy as np
import pandas as pd
# ## The Data
#
# **Read the yelp.csv file and set it as a dataframe called yelp.**
yelp = pd.read_csv('yelp.csv')
# ** Check the head, info , and describe methods on yelp.**
yelp.head()
yelp.info()
yelp.describe()
# **Create a new column called "text length" which is the number of words in the text column.**
yelp['text length'] = yelp['text'].apply(len)
# # EDA
#
# Let's explore the data
#
# ## Imports
#
# **Import the data visualization libraries if you haven't done so already.**
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
# %matplotlib inline
# **Use FacetGrid from the seaborn library to create a grid of 5 histograms of text length based off of the star ratings. Reference the seaborn documentation for hints on this**
g = sns.FacetGrid(yelp,col='stars')
g.map(plt.hist,'text length')
# **Create a boxplot of text length for each star category.**
sns.boxplot(x='stars',y='text length',data=yelp,palette='rainbow')
# **Create a countplot of the number of occurrences for each type of star rating.**
sns.countplot(x='stars',data=yelp,palette='rainbow')
# ** Use groupby to get the mean values of the numerical columns, you should be able to create this dataframe with the operation:**
stars = yelp.groupby('stars').mean()
stars
# **Use the corr() method on that groupby dataframe to produce this dataframe:**
stars.corr()
# **Then use seaborn to create a heatmap based off that .corr() dataframe:**
sns.heatmap(stars.corr(),cmap='coolwarm',annot=True)
# ## NLP Classification Task
#
# Let's move on to the actual task. To make things a little easier, go ahead and only grab reviews that were either 1 star or 5 stars.
#
# **Create a dataframe called yelp_class that contains the columns of yelp dataframe but for only the 1 or 5 star reviews.**
yelp_class = yelp[(yelp.stars==1) | (yelp.stars==5)]
# ** Create two objects X and y. X will be the 'text' column of yelp_class and y will be the 'stars' column of yelp_class. (Your features and target/labels)**
X = yelp_class['text']
y = yelp_class['stars']
# **Import CountVectorizer and create a CountVectorizer object.**
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
# ** Use the fit_transform method on the CountVectorizer object and pass in X (the 'text' column). Save this result by overwriting X.**
X = cv.fit_transform(X)
# ## Train Test Split
#
# Let's split our data into training and testing data.
#
# ** Use train_test_split to split up the data into X_train, X_test, y_train, y_test. Use test_size=0.3 and random_state=101 **
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101)
# ## Training a Model
#
# Time to train a model!
#
# ** Import MultinomialNB and create an instance of the estimator and call is nb **
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
# **Now fit nb using the training data.**
nb.fit(X_train,y_train)
# ## Predictions and Evaluations
#
# Time to see how our model did!
#
# **Use the predict method off of nb to predict labels from X_test.**
predictions = nb.predict(X_test)
# ** Create a confusion matrix and classification report using these predictions and y_test **
from sklearn.metrics import confusion_matrix,classification_report
print(confusion_matrix(y_test,predictions))
print('\n')
print(classification_report(y_test,predictions))
# **Great! Let's see what happens if we try to include TF-IDF to this process using a pipeline.**
# # Using Text Processing
#
# ** Import TfidfTransformer from sklearn. **
from sklearn.feature_extraction.text import TfidfTransformer
# ** Import Pipeline from sklearn. **
from sklearn.pipeline import Pipeline
# ** Now create a pipeline with the following steps:CountVectorizer(), TfidfTransformer(),MultinomialNB()**
pipeline = Pipeline([
('bow', CountVectorizer()), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', MultinomialNB()), # train on TF-IDF vectors w/ Naive Bayes classifier
])
# ## Using the Pipeline
#
# **Time to use the pipeline! Remember this pipeline has all your pre-process steps in it already, meaning we'll need to re-split the original data (Remember that we overwrote X as the CountVectorized version. What we need is just the text**
# ### Train Test Split
#
# **Redo the train test split on the yelp_class object.**
X = yelp_class['text']
y = yelp_class['stars']
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101)
# **Now fit the pipeline to the training data. Remember you can't use the same training data as last time because that data has already been vectorized. We need to pass in just the text and labels**
# May take some time
pipeline.fit(X_train,y_train)
# ### Predictions and Evaluation
#
# ** Now use the pipeline to predict from the X_test and create a classification report and confusion matrix. You should notice strange results.**
predictions = pipeline.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
| ML/NLP/NLP_Project .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# +
import pandas as pd
import seaborn as sns
sns.set(
style="whitegrid",
font_scale=1.2,
)
sns.set_style({'font.family':'serif', 'font.serif':'Times New Roman'})
import matplotlib.pyplot as plt
# +
df = pd.read_excel('ryai190138_appendixe1.xlsx')
dfg = df[['verse_ID', 'subject_ID', 'T1_fx-g', 'T2_fx-g', 'T3_fx-g', 'T4_fx-g',
'T5_fx-g', 'T6_fx-g', 'T7_fx-g', 'T8_fx-g', 'T9_fx-g', 'T10_fx-g',
'T11_fx-g', 'T12_fx-g', 'L1_fx-g', 'L2_fx-g', 'L3_fx-g', 'L4_fx-g',
'L5_fx-g', 'L6_fx-g']]
dfs = df[['verse_ID', 'subject_ID', 'T1_fx-s', 'T2_fx-s', 'T3_fx-s', 'T4_fx-s',
'T5_fx-s', 'T6_fx-s', 'T7_fx-s', 'T8_fx-s', 'T9_fx-s', 'T10_fx-s',
'T11_fx-s', 'T12_fx-s', 'L1_fx-s', 'L2_fx-s', 'L3_fx-s', 'L4_fx-s',
'L5_fx-s', 'L6_fx-s']]
# -
_ = sns.swarmplot(y="verse_ID", x="T1_fx-g", data=dfg, palette="Set1", size=5)
# _ = plt.xlabel(r'T1_fx-g')
# _ = plt.ylabel(r'verse_ID')
# # _ = plt.title(r'$\tex{}$')
# plt.show()
| CTLabel/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="sMKYrPjWXSLw"
# # MiniBatchKMeans with StandardScaler
# + [markdown] id="1Y8KEhdSY_wm"
# ### This Code template is for clustering analysis using the Simple MiniBatchKMeans where the scaling method used is Standard Scaler.
# + [markdown] id="sqLmErs-ZKCA"
# ### Required Packages
# + id="RLrskL-gXSLz"
# !pip install plotly
# + id="mpoQ5I4DXSL0"
import warnings
import operator
import itertools
import numpy as np
import pandas as pd
import plotly.express as px
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import plotly.graph_objects as go
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import silhouette_score
warnings.filterwarnings('ignore')
# + [markdown] id="mYvI1q-MXSL1"
# ### Initialization
#
# Filepath of CSV file
# + id="Zh0fQz82XSL2"
#filepath
file_path = ""
# + [markdown] id="Jm9QBunEXSL2"
# List of features which are required for model training
# + id="Y-FX4Y9OXSL3"
features = []
# + [markdown] id="V7Ej4ZfqXSL4"
# ### Data Fetching
#
# Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
#
# We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="Qx9KjzIxXSL5" outputId="d0e41254-8e22-45ad-b0c7-334280864531"
df=pd.read_csv(file_path)
df.head()
# + [markdown] id="PTqm5IruXSNe"
# ### Feature Selections
#
# It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
#
# We will assign all the required input features to X.
# + id="zyYsmX0-XSNf"
X = df[features]
# + [markdown] id="KU-DhjB5XSNf"
# ### Data Preprocessing
#
# Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
#
# + id="e9BCp2jbXSNg"
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
# + [markdown] id="LJjQaDlNXSNh"
# Calling preprocessing functions on the feature and target set.
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="zuD6VvCSXSNi" outputId="7ecddc16-c099-4baa-f5b5-fa6c029b2bdd"
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
X.head()
# + [markdown] id="Fv7n2EO4XSNj"
# ### Data Rescaling
#
# Performing StandardScaler data rescaling operation on dataset. The StandardScaler Standardize features by removing the mean and scaling to unit variance
#
# [StandardScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
# + id="BZKtAGGzXSNj" colab={"base_uri": "https://localhost:8080/", "height": 203} outputId="59c4c122-b9d2-42ea-ba0b-c447fac37974"
X_Scaled=StandardScaler().fit_transform(X)
X_Scaled=pd.DataFrame(data = X_Scaled,columns = X.columns)
X_Scaled.head()
# + [markdown] id="yMP1DdiiXSNk"
# ### Silhouette Score
# [Reference](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html)
# * Silhouette Coefficient or silhouette score is a metric used to calculate the goodness of a clustering technique. Its value ranges from -1 to 1.
# * The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Negative values generally indicate that a sample has been assigned to the wrong cluster, as a different cluster is more similar.
#
#
# + id="5GSSMS5rXSNk"
def SilhoutteScore(X):
SScore=dict()
for num_clusters in range(2,11):
miniBatchKMeans = MiniBatchKMeans(n_clusters=num_clusters,random_state=123).fit(X)
SScore[num_clusters]=silhouette_score(X, miniBatchKMeans.labels_)
SScore = dict( sorted(SScore.items(), key=operator.itemgetter(1),reverse=True))
return SScore
# + id="V-hEz4mzXSNk" colab={"base_uri": "https://localhost:8080/"} outputId="ba32ce03-94f7-4022-9c9c-eb9e8e22ace8"
SilhoutteScore(X_Scaled)
# + [markdown] id="QHNwOA-LXSNl"
# ### Elbow Method
# [Info](https://en.wikipedia.org/wiki/Elbow_method_(clustering))
#
# The Elbow Method is one of the most popular methods to determine this optimal value of k.
# We iterate the values of k from 1 to 11 and calculate the distortion or inertia for each value of k in the given range.
# Where Inertia is the sum of squared distances of samples to their closest cluster center.
# To determine the optimal number of clusters, we have to select the value of k at the “elbow” ie the point after which the distortion/inertia start decreasing in a linear fashion.
# + id="Oz4l-4SXXSNl"
def ElbowPlot(X):
SSE = []
for i in range(1, 11):
miniBatchkMeans = MiniBatchKMeans(n_clusters=i,random_state=123).fit(X)
SSE.append(miniBatchkMeans.inertia_)
plt.plot(range(1, 11),SSE)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('SSE')
plt.show()
# + id="4g2aeI4pXSNl" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="ac18f5dd-3406-4cf5-8a51-83a9818a8191"
ElbowPlot(X_Scaled)
# + [markdown] id="EDRamsehXSNm"
# ### Model
#
# The MiniBatchKMeans is a variant of the KMeans algorithm which uses mini-batches to reduce the computation time, while still attempting to optimise the same objective function. Mini-batches are subsets of the input data, randomly sampled in each training iteration. These mini-batches drastically reduce the amount of computation required to converge to a local solution. In contrast to other algorithms that reduce the convergence time of k-means, mini-batch k-means produces results that are generally only slightly worse than the standard algorithm.
#
#
# #### Tuning Parameters:
#
#
# **n_clusters: int, default=8** ->
# The number of clusters to form as well as the number of centroids to generate.
#
# **init: {‘k-means++’, ‘random’}, callable or array-like of shape (n_clusters, n_features), default=’k-means++’** ->
# Method for initialization: ‘k-means++’ : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. ‘random’: choose n_clusters observations (rows) at random from data for the initial centroids. If an array is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. If a callable is passed, it should take arguments X, n_clusters and a random state and return an initialization.
#
# **max_iter: int, default=100** ->
# Maximum number of iterations over the complete dataset before stopping independently of any early stopping criterion heuristics.
#
# **batch_size: int, default=100** ->
# Size of the mini batches.
#
# **verbose: int, default=0** ->
# Verbosity mode.
#
# **compute_labels: bool, default=True** ->
# Compute label assignment and inertia for the complete dataset once the minibatch optimization has converged in fit.
#
# **random_state: int, RandomState instance or None, default=None** ->
# Determines random number generation for centroid initialization and random reassignment. Use an int to make the randomness deterministic. See Glossary.
#
# **tol: float, default=0.0** ->
# Control early stopping based on the relative center changes as measured by a smoothed, variance-normalized of the mean center squared position changes. This early stopping heuristics is closer to the one used for the batch variant of the algorithms but induces a slight computational and memory overhead over the inertia heuristic. To disable convergence detection based on normalized center change, set tol to 0.0 (default).
#
# **max_no_improvement: int, default=10** ->
# Control early stopping based on the consecutive number of mini batches that does not yield an improvement on the smoothed inertia. To disable convergence detection based on inertia, set max_no_improvement to None.
#
# **init_size: int, default=None** ->
# Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than n_clusters. If None, init_size= 3 * batch_size.
#
# **n_init: int, default=3** ->
# Number of random initializations that are tried. In contrast to KMeans, the algorithm is only run once, using the best of the n_init initializations as measured by inertia.
#
# **reassignment_ratio: float, default=0.01** ->
# Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more easily reassigned, which means that the model will take longer to converge, but should converge in a better clustering.
# + id="fVIY7IVrXSNm"
miniBatchkMeans = MiniBatchKMeans(n_clusters=6, random_state=123)
pred_y = miniBatchkMeans.fit_predict(X_Scaled)
# + [markdown] id="JfvCpeE5XSNn"
# ### Cluster Analysis
#
# First, we add the cluster labels from the trained model into the copy of the data frame for cluster analysis/visualization.
# + id="uKK5KwniXSNn" colab={"base_uri": "https://localhost:8080/", "height": 203} outputId="159436c6-c3fa-40c6-9d07-409e5806bb21"
ClusterDF = X_Scaled.copy(deep=True)
ClusterDF['ClusterID'] = pred_y
ClusterDF.head()
# + [markdown] id="JmurPohoXSNo"
# #### Cluster Records
# The below bar graphs show the number of data points in each available cluster.
# + id="H4vL_jxFXSNp" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="b8a245fa-3df3-46b5-ff9d-7bf830bcd94d"
ClusterDF['ClusterID'].value_counts().plot(kind='bar')
# + [markdown] id="fl6hIKvOXSNp"
# #### Cluster Plots
# Below written functions get utilized to plot 2-Dimensional and 3-Dimensional cluster plots. Plots include different available clusters along with cluster centroid.
# + id="vCp4lzyEXSNq"
def Plot2DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 2)):
plt.rcParams["figure.figsize"] = (8,6)
xi,yi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1])
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
plt.scatter(DFC[i[0]],DFC[i[1]],cmap=plt.cm.Accent,label=j)
plt.scatter(miniBatchkMeans.cluster_centers_[:,xi],miniBatchkMeans.cluster_centers_[:,yi],marker="^",color="black",label="centroid")
plt.xlabel(i[0])
plt.ylabel(i[1])
plt.legend()
plt.show()
def Plot3DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig,ax = plt.figure(figsize = (16, 10)),plt.axes(projection ="3d")
ax.grid(b = True, color ='grey',linestyle ='-.',linewidth = 0.3,alpha = 0.2)
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
ax.scatter3D(DFC[i[0]],DFC[i[1]],DFC[i[2]],alpha = 0.8,cmap=plt.cm.Accent,label=j)
ax.scatter3D(miniBatchkMeans.cluster_centers_[:,xi],miniBatchkMeans.cluster_centers_[:,yi],miniBatchkMeans.cluster_centers_[:,zi],
marker="^",color="black",label="centroid")
ax.set_xlabel(i[0])
ax.set_ylabel(i[1])
ax.set_zlabel(i[2])
plt.legend()
plt.show()
def Plotly3D(X_Cols,df):
for i in list(itertools.combinations(X_Cols,3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig1 = px.scatter_3d(miniBatchkMeans.cluster_centers_,x=miniBatchkMeans.cluster_centers_[:,xi],y=miniBatchkMeans.cluster_centers_[:,yi],
z=miniBatchkMeans.cluster_centers_[:,zi])
fig2=px.scatter_3d(df, x=i[0], y=i[1],z=i[2],color=df['ClusterID'])
fig3 = go.Figure(data=fig1.data + fig2.data,
layout=go.Layout(title=go.layout.Title(text="x:{}, y:{}, z:{}".format(i[0],i[1],i[2])))
)
fig3.show()
# + id="snbqzHM4XSNq" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a674db74-0e3e-4705-8191-4c1588011a69"
Plot2DCluster(X_Scaled.columns,ClusterDF)
# + id="_nppZFOYXSNr" colab={"base_uri": "https://localhost:8080/", "height": 575} outputId="ee25eacd-3117-49e5-f0eb-a6df48e30793"
Plot3DCluster(X_Scaled.columns,ClusterDF)
# + id="VGL1WPasXSNs" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="d3a55ef6-1a08-4e89-d932-fc813982f7ae"
Plotly3D(X_Scaled.columns,ClusterDF)
# + [markdown] id="P_ChiHFTXSNt"
# #### Creator: <NAME> , Github: [Profile](https://github.com/guptayush179)
#
| Clustering/MiniBatchKMeans/MiniBatchKMeans_StandardScaled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="LisER1atk_SK" colab_type="code" colab={}
import pandas as pd
from google.colab import files
train_file = "https://raw.githubusercontent.com/danielcaraway/COVID19/master/WK4_0413/train.csv"
train = pd.read_csv(train_file)
# + id="-7CrCyJlmDP8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="0cd59dac-a14a-45a5-81b9-4d257fafd03e"
## Just looking at CA
ca = train[train['Province_State'] == 'California']
ca['Date'] = pd.to_datetime(ca['Date'])
# + id="qfGjVDAKmMxX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 330} outputId="9226115a-37aa-4022-ee33-c23a2e1b9593"
import matplotlib.pyplot as plt
import seaborn as sns
# cc_df = cc_df.set_index('datetime')
ca.plot(x="Date", y=["ConfirmedCases", "Fatalities"])
plt.title('California')
plt.show()
# + id="7eVKoyoam_ZR" colab_type="code" colab={}
| assets/covid19/PROJECT_COVID19_GLOBAL_WK4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np #I always import the same stuff and set up the same directories. This is always my first code-box.
ddir='' #your data directory, like '/Users/ayep/research/data'; starting with "/Users" makes code easily portable to other computers :)
pdir='' #your plot directory, like '/Users/ayep/research/plot'
# -
# ## Useful Everyday Functions
# If there are codes you use every day...make a funciton! For example, I open files, write files, and take error-weighted means all, the, time. I also make LaTeX deluxe tables all the time that continually have to be updated with new data. That's a pain to do by hand..........so I have functions! See Textify and LaTeX_Tableizer.
# +
#open data files line-by-line but split up by tab ('\t')
def opendat(dir,filename): #dir,'filename'. For opening a data file. Can then send through roundtable.
f=open(dir+filename,'r')
dat=f.readlines()
f.close()
labels=dat[0][0:-1].split()
dat2=[[a.strip('\n') for a in d.split('\t')] for d in dat if d[0]!='#']
dat3=[['nan' if a.strip()=='' else a for a in d] for d in dat2]
return [dat3,labels]
#open data files into well-named variables:
def opendat2(dirr,filename,params): #Use as var,var,var...=opendat2(dir,'filename',['keys']).
dat,label=opendat(dirr,filename) #Get keys by first leaving ['keys'] blank: opendat2(dirr,filename,[])
print(label)
varrs=[]
for i in range(len(params)):
j=label.index(params[i])
try:
var=np.array([float(d[j]) for d in dat]) #works for float.
varrs.append(var)
except ValueError:
var=[d[j].strip() for d in dat] #works for strings.
varrs.append(var)
return varrs
#write new data files
def writedat(dirr,filename,pars,label): #.dat auto included. pars as [name,ra,dec] etc.
datp=[[str(a[i]) for a in pars] for i in range(len(pars[0]))]
f=open(dirr+filename+'.dat','w')
print('\t'.join(label),file=f)
print(label)
for d in datp:
print('\t'.join(d),file=f)
f.close()
print('It is written: '+filename+'.dat')
# -
# ## Demo
#I can start with blank to auto-print my keys.
opendat2(ddir,'SampleData_Kinematics.dat',[])
# +
# I want my proper motions in right ascension and declination, let's say,
# and I want to call them pra and pdec.
pra,pdec=opendat2(ddir,'SampleData_Kinematics.dat',['pra','pdec'])
# Now I can do all further operations on variables that are called what they are!
# For example, average magnitude of 2-D sky motion is this:
p2D = np.nanmean(np.sqrt(pra**2. + pdec**2.))
print(p2D)
# Isn't that sooo much easier, getting to do operations on pra and pdec instead of something
# like data['propermotionRA']**2. + data['propermotionDEC']**2. etc. etc.? :)
# -
| 2020_Workshop/Alex_Python/.ipynb_checkpoints/Functions-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data-X: Airbnb vs. Rent
# Uses models generated from this project to provide insights in relation to average rent prices
# ___
# ### Dependencies
# Depends on models generated from:
# * airbnb_price_predictor.ipynb
# ### Imports
# +
# Import Python Packages
import re
import warnings
# Import Standard ML packages
import numpy as np
import pandas as pd
# Import and Configure Plotting Libraries
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid", palette="muted")
plt.rcParams['figure.figsize'] = (12, 9)
plt.rcParams['font.size'] = 16
# %matplotlib inline
# -
# ### Import Datasets
jan17_raw_df = pd.read_csv("../raw_datasets/sf_rentcafe_jan_17.csv")
may18_raw_df = pd.read_csv("../raw_datasets/sf_rentcafe_may_18.csv")
aug18_raw_df = pd.read_csv("../raw_datasets/sf_rentcafe_aug_18.csv")
nov18_raw_df = pd.read_csv("../raw_datasets/sf_rentcafe_nov_18.csv")
# +
import_path = "../exported_models/airbnb_price_predictor.hdf"
X_df = pd.read_hdf(import_path, "X_df")
Y = pd.read_hdf(import_path, "Y")
X_df.head()
# -
# ### Data Cleaning/Transformation
# +
def select_columns(df, *columns):
return df.loc[:, columns]
def dollar_to_float(df, *columns):
for c in columns:
df[c] = df[c].str.replace(r'[$,]', '').astype("float64")
return df
def clean_df(df):
df = df.copy()
return (
df.set_index("Neighborhood")
.pipe(
dollar_to_float,
"All rentals",
"Studio",
"1 Bed",
"2 Beds",
"3 Beds"
)
)
# +
jan17_df = clean_df(jan17_raw_df)
may18_df = clean_df(may18_raw_df)
aug18_df = clean_df(aug18_raw_df)
nov18_df = clean_df(nov18_raw_df)
nov18_df.head()
# -
# ### Analysis/Visualization
# +
nbr_cols = X_df.columns
nbr_cols = nbr_cols[nbr_cols.str.startswith("neighbourhood_cleansed")].values
nbr_cols = [re.sub(r"neighbourhood_cleansed_", "", c) for c in nbr_cols]
def nbr_in_rent(c):
return nov18_df.index.isin([c]).any()
# -
def visualize_rent_history(nbr, num_bed):
if not nbr_in_rent(nbr):
warnings.warn("Rent prices unavailable for this neighbourhood.")
return None
beds_map = {
0: "Studio",
1: "1 Bed",
2: "2 Beds",
3: "3 Beds"
}
bed_col = beds_map[num_bed]
dates = [1, 12+5, 12+8, 12+11]
rent_prices = [
jan17_df.loc[nbr, bed_col],
may18_df.loc[nbr, bed_col],
aug18_df.loc[nbr, bed_col],
nov18_df.loc[nbr, bed_col]
]
plt.title(f"""Historical Data for {bed_col} Apartment in {nbr}""")
plt.xticks(dates, ["Jan '17", "May '18", "Aug '18", "Nov '18"])
plt.plot(dates, rent_prices, 'o-')
plt.savefig('../plots/Airbnb Rent Comparison Sample Historical Rent.png', bbox_inches='tight')
plt.show()
visualize_rent_history(nbr="Tenderloin", num_bed=1)
# +
def get_rent_price(nbr, num_bed):
if not nbr_in_rent(nbr):
warnings.warn("Rent prices unavailable for this neighbourhood.")
return None
beds_map = {
0: "Studio",
1: "1 Bed",
2: "2 Beds",
3: "3 Beds"
}
bed_col = beds_map[num_bed]
return nov18_df.loc[nbr, bed_col]
def price_comparison_stats(nbr, num_beds, airbnb_price):
if not nbr_in_rent(nbr):
warnings.warn("Rent prices unavailable for this neighbourhood.")
return None
rent_price = get_rent_price(nbr, num_beds)
breakeven = rent_price/airbnb_price
return {
"airbnb_price": airbnb_price,
"nbr": nbr,
"num_bed": num_beds,
"rent_price": rent_price,
"breakeven_days": breakeven,
"breakeven_ratio": breakeven/30
}
def visualize_stats(stats):
if stats == None or len(stats) != 6:
warnings.warn("Stats dont have right properties")
return None
plt.figure(figsize=(16,4))
plt.suptitle("Airbnb vs. Rent Prices")
plt.subplot(1, 2, 1)
plt.title("Price Comparison")
plt.ylabel("Daily Price ($)")
plt.xticks(np.arange(2), ["Average Rent", "Airbnb"])
prices = [stats["rent_price"]/30, stats["airbnb_price"]]
plt.bar(np.arange(2), prices)
plt.subplot(1, 2, 2)
plt.title("Breakeven Point")
days = np.arange(31)
price_day = days * stats["airbnb_price"]
plt.xlabel("Days Airbnb is Rented")
plt.ylabel("Cumulative Price")
plt.axvline(x=stats["breakeven_days"], color="r")
plt.text(
stats["breakeven_days"]-1.05,
25*stats["airbnb_price"],
f"""Breakeven = {np.round(stats["breakeven_days"], 1)} days""",
rotation=90,
fontsize=14
)
plt.plot(days, price_day)
plt.savefig('../plots/Airbnb Rent Comparison Sample.png', bbox_inches='tight')
plt.show()
# -
test_airbnb_price = 300
stats = price_comparison_stats(
nbr="Hayes Valley",
num_beds=1,
airbnb_price=test_airbnb_price
)
visualize_stats(stats)
# ### Export Data
export_path = "../exported_models/airbnb_rent_comparison.hdf"
jan17_df.to_hdf(export_path, "jan17_df")
may18_df.to_hdf(export_path, "may18_df")
aug18_df.to_hdf(export_path, "aug18_df")
nov18_df.to_hdf(export_path, "nov18_df")
| ipython_notebooks/airbnb_rent_comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <div class="contentcontainer med left" style="margin-left: -50px;">
# <dl class="dl-horizontal">
# <dt>Title</dt> <dd> Image Element</dd>
# <dt>Dependencies</dt> <dd>Plotly</dd>
# <dt>Backends</dt> <dd><a href='../bokeh/Image.ipynb'>Bokeh</a></dd> <dd><a href='../matplotlib/Image.ipynb'>Matplotlib</a></dd> <dd><a href='./Image.ipynb'>Plotly</a></dd>
# </dl>
# </div>
import numpy as np
import holoviews as hv
hv.extension('plotly')
# Like ``Raster``, a HoloViews ``Image`` allows you to view 2D arrays using an arbitrary color map. Unlike ``Raster``, an ``Image`` is associated with a [2D coordinate system in continuous space](Continuous_Coordinates.ipynb), which is appropriate for values sampled from some underlying continuous distribution (as in a photograph or other measurements from locations in real space).
# +
ls = np.linspace(0, 10, 200)
xx, yy = np.meshgrid(ls, ls)
bounds=(-1,-1,1,1) # Coordinate system: (left, bottom, top, right)
img = hv.Image(np.sin(xx)*np.cos(yy), bounds=bounds)
img
# -
# Slicing, sampling, etc. on an ``Image`` all operate in this continuous space, whereas the corresponding operations on a ``Raster`` work on the raw array coordinates.
img + img[-0.5:0.5, -0.5:0.5]
# Notice how, because our declared coordinate system is continuous, we can slice with any floating-point value we choose. The appropriate range of the samples in the input numpy array will always be displayed, whether or not there are samples at those specific floating-point values. This also allows us to index by a floating value, since the ``Image`` is defined as a continuous space it will snap to the closest coordinate, to inspect the closest coordinate we can use the ``closest`` method:
# %%opts Points (color='black' symbol='x' )
closest = img.closest((0.1,0.1))
print('The value at position %s is %s' % (closest, img[0.1, 0.1]))
img * hv.Points([img.closest((0.1,0.1))])
# We can also easily take cross-sections of the Image by using the sample method or collapse a dimension using the ``reduce`` method:
img.sample(x=0) + img.reduce(x=np.mean)
# For full documentation and the available style and plot options, use ``hv.help(hv.Image).``
| examples/reference/elements/plotly/Image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# The PyData ecosystem has a number of core Python data containers that allow users to work with a wide array of datatypes, including:
#
# * [Pandas](https://pandas.pydata.org): DataFrame, Series (columnar/tabular data)
# * [Rapids cuDF](https://docs.rapids.ai/api/cudf/stable/): GPU DataFrame, Series (columnar/tabular data)
# * [Dask](https://dask.pydata.org): DataFrame, Series (distributed/out of core arrays and columnar data)
# * [XArray](https://xarray.pydata.org): Dataset, DataArray (labelled multidimensional arrays)
# * [Streamz](https://streamz.readthedocs.io): DataFrame(s), Series(s) (streaming columnar data)
# * [Intake](https://github.com/ContinuumIO/intake): DataSource (data catalogues)
# * [GeoPandas](https://geopandas.org): GeoDataFrame (geometry data)
# * [NetworkX](https://networkx.github.io/documentation/stable/): Graph (network graphs)
#
# Many of these libraries have the concept of a high-level plotting API that lets a user generate common plot types very easily. The native plotting APIs are generally built on [Matplotlib](http://matplotlib.org), which provides a solid foundation, but means that users miss out the benefits of modern, interactive plotting libraries for the web like [Bokeh](http://bokeh.pydata.org) and [HoloViews](http://holoviews.org).
#
# **hvPlot** provides a high-level plotting API built on HoloViews that provides a general and consistent API for plotting data in all the formats mentioned above.
#
# As a first simple illustration of using hvPlot, let's create a small set of random data in Pandas to explore:
# +
import numpy as np
import pandas as pd
index = pd.date_range('1/1/2000', periods=1000)
df = pd.DataFrame(np.random.randn(1000, 4), index=index, columns=list('ABCD')).cumsum()
df.head()
# -
# ## Pandas default .plot()
#
# Pandas provides Matplotlib-based plotting by default, using the `.plot()` method:
# +
# %matplotlib inline
df.plot();
# -
# The result is a PNG image that displays easily, but is otherwise static.
#
# ## Switching Pandas backend
#
# To allow using hvPlot directly with Pandas we have to import `hvplot.pandas` and swap the Pandas backend with:
# +
import hvplot.pandas # noqa
pd.options.plotting.backend = 'holoviews'
# -
# **NOTE:** This requires a recent version of pandas (later than 0.25.0), see the [Pandas API](Pandas_API.ipynb) for more details.
df.plot()
# ## .hvplot()
#
# If we instead change `%matplotlib inline` to `import hvplot.pandas` and use the ``df.hvplot`` method, it will now display an interactively explorable [Bokeh](http://bokeh.pydata.org) plot with panning, zooming, hovering, and clickable/selectable legends:
df.hvplot()
# This interactive plot makes it much easier to explore the properties of the data, without having to write code to select ranges, columns, or data values manually. Note that while pandas, dask and xarray all use the `.hvplot` method, `intake` uses hvPlot as its main plotting API, which means that is available using `.plot()`.
# ## hvPlot native API
#
# For the plot above, hvPlot dynamically added the Pandas `.hvplot()` method, so that you can use the same syntax as with the Pandas default plotting. If you prefer to be more explicit, you can instead work directly with hvPlot objects:
# +
from hvplot import hvPlot
hvplot.extension('bokeh')
plot = hvPlot(df)
plot(y=['A', 'B', 'C', 'D'])
# -
# ## Switching the plotting extension to Matplotlib or Plotly
#
# While the default plotting extension of hvPlot is [Bokeh](http://bokeh.pydata.org), it is possible to load either Matplotlib or Plotly with `.extension()` and later switch from a plotting library to another with `.output()`. More information about working with multiple plotting backends can be found in the [plotting extensions guide](Plotting_Extensions.ipynb).
# +
hvplot.extension('matplotlib')
df.hvplot(rot=30)
# -
# ## Getting help
#
# When working inside IPython or the Jupyter notebook hvplot methods will automatically complete valid keywords, e.g. pressing tab after declaring the plot type will provide all valid keywords and the docstring:
#
# ```python
# df.hvplot.line(<TAB>
# ```
#
# Outside an interactive environment ``hvplot.help`` will bring up information providing the ``kind`` of plot, e.g.:
#
# ```python
# hvplot.help('line')
# ```
#
# For more detail on the available options see the [Customization](Customization.ipynb) user guide.
#
# ## Next steps
#
# Now that you can see how hvPlot is used, let's jump straight in and discover some of the more powerful things we can do with it in the [Plotting](Plotting.ipynb) section.
| examples/user_guide/Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pinn-tf2
# language: python
# name: pinn-tf2
# ---
# # Learning a LJ potential [](https://colab.research.google.com/github/Teoroo-CMC/PiNN/blob/master/docs/notebooks/Learn_LJ_potential.ipynb)
#
#
# This notebook showcases the usage of PiNN with a toy problem of learning a Lennard-Jones
# potential with a hand-generated dataset.
# It serves as a basic test, and demonstration of the workflow with PiNN.
# Install PiNN
# !pip install git+https://github.com/Teoroo-CMC/PiNN
# %matplotlib inline
import os, warnings
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from ase import Atoms
from ase.calculators.lj import LennardJones
os.environ['CUDA_VISIBLE_DEVICES'] = ''
index_warning = 'Converting sparse IndexedSlices'
warnings.filterwarnings('ignore', index_warning)
# ## Reference data
# Helper function: get the position given PES dimension(s)
def three_body_sample(atoms, a, r):
x = a * np.pi / 180
pos = [[0, 0, 0],
[0, 2, 0],
[0, r*np.cos(x), r*np.sin(x)]]
atoms.set_positions(pos)
return atoms
# +
atoms = Atoms('H3', calculator=LennardJones())
na, nr = 50, 50
arange = np.linspace(30,180,na)
rrange = np.linspace(1,3,nr)
# Truth
agrid, rgrid = np.meshgrid(arange, rrange)
egrid = np.zeros([na, nr])
for i in range(na):
for j in range(nr):
atoms = three_body_sample(atoms, arange[i], rrange[j])
egrid[i,j] = atoms.get_potential_energy()
# Samples
nsample = 100
asample, rsample = [], []
distsample = []
data = {'e_data':[], 'f_data':[], 'elems':[], 'coord':[]}
for i in range(nsample):
a, r = np.random.choice(arange), np.random.choice(rrange)
atoms = three_body_sample(atoms, a, r)
dist = atoms.get_all_distances()
dist = dist[np.nonzero(dist)]
data['e_data'].append(atoms.get_potential_energy())
data['f_data'].append(atoms.get_forces())
data['coord'].append(atoms.get_positions())
data['elems'].append(atoms.numbers)
asample.append(a)
rsample.append(r)
distsample.append(dist)
# -
plt.pcolormesh(agrid, rgrid, egrid, shading='auto')
plt.plot(asample, rsample, 'rx')
plt.colorbar()
# ## Dataset from numpy arrays
# +
from pinn.io import sparse_batch, load_numpy
data = {k:np.array(v) for k,v in data.items()}
dataset = lambda: load_numpy(data, splits={'train':8, 'test':2})
train = lambda: dataset()['train'].shuffle(100).repeat().apply(sparse_batch(100))
test = lambda: dataset()['test'].repeat().apply(sparse_batch(100))
# -
# ## Training
# ### Model specification
# +
import pinn
params={
'model_dir': '/tmp/PiNet',
'network': {
'name': 'PiNet',
'params': {
'ii_nodes':[8,8],
'pi_nodes':[8,8],
'pp_nodes':[8,8],
'out_nodes':[8,8],
'depth': 4,
'rc': 3.0,
'atom_types':[1]}},
'model':{
'name': 'potential_model',
'params': {
'e_dress': {1:-0.3}, # element-specific energy dress
'e_scale': 2, # energy scale for prediction
'e_unit': 1.0, # output unit of energy dur
'log_e_per_atom': True, # log e_per_atom and its distribution
'use_force': True}}} # include force in Loss function
model = pinn.get_model(params)
# -
# %rm -rf /tmp/PiNet
train_spec = tf.estimator.TrainSpec(input_fn=train, max_steps=5e3)
eval_spec = tf.estimator.EvalSpec(input_fn=test, steps=10)
tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
# ## Validate the results
# ### PES analysis
atoms = Atoms('H3', calculator=pinn.get_calc(model))
epred = np.zeros([na, nr])
for i in range(na):
for j in range(nr):
a, r = arange[i], rrange[j]
atoms = three_body_sample(atoms, a, r)
epred[i,j] = atoms.get_potential_energy()
plt.pcolormesh(agrid, rgrid, epred, shading='auto')
plt.colorbar()
plt.title('NN predicted PES')
plt.figure()
plt.pcolormesh(agrid, rgrid, np.abs(egrid-epred), shading='auto')
plt.plot(asample, rsample, 'rx')
plt.title('NN Prediction error and sampled points')
plt.colorbar()
# ### Pairwise potential analysis
# +
atoms1 = Atoms('H2', calculator=pinn.get_calc(model))
atoms2 = Atoms('H2', calculator=LennardJones())
nr2 = 100
rrange2 = np.linspace(1,1.9,nr2)
epred = np.zeros(nr2)
etrue = np.zeros(nr2)
for i in range(nr2):
pos = [[0, 0, 0],
[rrange2[i], 0, 0]]
atoms1.set_positions(pos)
atoms2.set_positions(pos)
epred[i] = atoms1.get_potential_energy()
etrue[i] = atoms2.get_potential_energy()
# -
f, (ax1, ax2) = plt.subplots(2,1, gridspec_kw = {'height_ratios':[3, 1]})
ax1.plot(rrange2, epred)
ax1.plot(rrange2, etrue,'--')
ax1.legend(['Prediction', 'Truth'], loc=4)
_=ax2.hist(np.concatenate(distsample,0), 20, range=(1,1.9))
# ## Molecular dynamics with ASE
from ase import units
from ase.io import Trajectory
from ase.md.nvtberendsen import NVTBerendsen
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
atoms = Atoms('H', cell=[2, 2, 2], pbc=True)
atoms = atoms.repeat([5,5,5])
atoms.rattle()
atoms.set_calculator(pinn.get_calc(model))
MaxwellBoltzmannDistribution(atoms, 300*units.kB)
dyn = NVTBerendsen(atoms, 0.5 * units.fs, 300, taut=0.5*100*units.fs)
dyn.attach(Trajectory('ase_nvt.traj', 'w', atoms).write, interval=10)
dyn.run(5000)
| docs/notebooks/Learn_LJ_potential.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pyspark import SparkContext
from awsglue.context import GlueContext
from awsglue.dynamicframe import DynamicFrame
sc = SparkContext.getOrCreate()
gc = GlueContext(sc)
spark = gc.spark_session
plist = '[{"Product":"P1","Qty":"10"},{"Product":"P2","Qty":"5"}]'
spark_csv = spark.read.option("delimiter", ";").csv("teste.csv", header=True)
spark_df = spark.read.json(sc.parallelize([plist]))
glue_df_csv = DynamicFrame.fromDF(spark_csv, gc, "glue_df")
glue_df = DynamicFrame.fromDF(spark_df, gc, "glue_df")
glue_df.printSchema()
glue_df.toDF().show()
glue_df_csv.toDF().show()
# -
import boto3
import os
from pyspark.sql import SparkSession
def add_to_bucket(bucket_name: str, file_name: str):
try:
# host.docker.internal
s3 = boto3.client('s3',
endpoint_url="http://host.docker.internal:4566",
use_ssl=False,
aws_access_key_id='mock',
aws_secret_access_key='mock',
region_name='us-east-1')
s3.create_bucket(Bucket=bucket_name)
file_key = f'{os.getcwd()}/{file_name}'
with open(file_key, 'rb') as f:
s3.put_object(Body=f, Bucket=bucket_name, Key=file_name)
print(file_name)
return s3
except Exception as e:
print(e)
return None
# + tags=[]
def create_testing_pyspark_session():
print('creating pyspark session')
sparksession = (SparkSession.builder
.master('local[2]')
.appName('pyspark-demo')
.enableHiveSupport()
.getOrCreate())
hadoop_conf = sparksession.sparkContext._jsc.hadoopConfiguration()
hadoop_conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
hadoop_conf.set("fs.s3a.path.style.access", "true")
hadoop_conf.set("fs.s3a.connection.ssl.enabled", "false")
hadoop_conf.set("com.amazonaws.services.s3a.enableV4", "true")
hadoop_conf.set("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider")
hadoop_conf.set("fs.s3a.access.key", "mock")
hadoop_conf.set("fs.s3a.secret.key", "mock")
hadoop_conf.set("fs.s3a.session.token", "mock")
hadoop_conf.set("fs.s3a.endpoint", "http://host.docker.internal:4566")
return sparksession
# +
test_bucket = 'teste'
# Write to S3 bucket
add_to_bucket(bucket_name=test_bucket, file_name='teste.csv')
spark_session = create_testing_pyspark_session()
file_path = f's3://{test_bucket}/teste.csv'
# Read from s3 bucket
data_df = spark_session.read.option('delimiter', ';').option('header', 'true').option('inferSchema',
'False').format('csv').load(file_path)
print(data_df.show())
# -
write_path = f's3a://{test_bucket}/testparquet/'
data_df.write.parquet(write_path, mode='overwrite')
| glue/testes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# -
# Importing the dataset
dataset = pd.read_csv('/Users/harpreet/Documents/C_A_ProjReq/C_A_SourceDB/Cfpb_ChurnModel/Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, -1].values
#Copy of dataset into DF
DF=dataset
dataset.head()
#Encoding categorical Data
#Encoding Independent variables
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:,1] = labelencoder_X_1.fit_transform(X[:,1])
labelencoder_X_2 = LabelEncoder()
X[:,2] = labelencoder_X_2.fit_transform(X[:,2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:] #to prevent data duplicy
X[:5]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
print(X_test)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#Making the ANN
#Importing keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
#Initializing the ANN
classifier = Sequential()
#Adding input layer and first hidden layer with dropout
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dropout(p = 0.1))
#Adding second hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))
classifier.add(Dropout(p = 0.1))
#Adding the output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
#Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#Fitting the ANN to the training set
classifier.fit(X_train, y_train, batch_size = 10, nb_epoch = 100)
# +
#Making predictions and evaluating the models
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# -
#homework
new_prediction = classifier.predict(sc.transform(np.array([[0.0,0,600,1,40,3,60000,2,1,1,50000]])))
new_prediction = (new_prediction > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
#Evaluating the ANN
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense
def build_classifier():
classifier = Sequential()
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, nb_epoch = 100)
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = 1)
#n_jobs = 1 due to using windows, for other platforms, use n_jobs = -1, to engage all the processors
mean = accuracies.mean()
variance = accuracies.std()
# Tuning the ANN
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
def build_classifier(optimizer):
classifier = Sequential()
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier)
parameters = {'batch_size': [25,32],
'nb_epoch': [100,500],
'optimizer': ['adam','rmsprop']}
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 10)
grid_search = grid_search.fit(X_train, y_train)
best_parameters = grid_search.best_params_
best_accuracy = grid_search.best_score_
# +
#####################_Trying Diffrent Approach_#####################################
# -
DF.head()
# Drop the columns that we have decided won't be used in prediction
df = DF.drop(["RowNumber", "Gender"], axis=1)
features = df.drop(["Exited"], axis=1).columns
df.head()
# +
print(np.unique(df['Age']) )
# -
#print np.unique(data['Age Band'])
#[0 999 u'0-17' u'18-34' u'35-54' u'55+']
df['Age'].replace(999,0,inplace=True)
agefeaturemap = {label:idx for idx,label in enumerate(np.unique(df['Age']))}
print (agefeaturemap)
df['Age'] = df['Age'].map(agefeaturemap)
print(np.unique(df['CreditScore']) )
#[0 u'300-553' u'554-602' u'603-640' u'641-685' u'686-733' u'734-850']
creditscoremap = {label:idx for idx,label in enumerate(np.unique(df['CreditScore']))}
print(creditscoremap)
df['CreditScore'] = df['CreditScore'].map(creditscoremap)
print( np.unique(df['CreditScore']) )
#[0 u'300-553' u'554-602' u'603-640' u'641-685' u'686-733' u'734-850']
df['CreditScore'] = df['CreditScore'].map(creditscoremap)
print(X_test,y_pred)
| ChurnModel_CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Augmentation
#
# <img src="https://vlibras.gov.br/assets/imgs/VLibrasLogoBarraPrincipal.png" width="150px" />
#
# Neste notebook iremos realizar um técnica na nossa base dados chamada *data augmentation*. Esse processo tem como objetivo aumentar e diversificar nosso conjunto de dados.
# ## 0. Dependências
import re
import random
import pandas as pd
from tqdm import tqdm
# ## 1. Helpers
#
# Metodos auxiliares que serão utilizados ao longo deste notebook.
def row_search(current, list_gr, list_gi):
_list = list()
words_gr = current['gr'].split()
words_gi = current['gi'].split()
for item_gr, item_gi in zip(list_gr, list_gi):
if item_gr in words_gr and item_gi in words_gi:
_list.append((item_gr, item_gi))
return _list
# ## 2. Augmentation para Lugares
#
# Usando um arquivo de contendo todos os locais sinalizados, detectar locais em frases e substituir por outros locais. Por exemplo:
#
# (*VIAJAR RECIFE ONTEM*,*VIAJAR RECIFE&CIDADE ONTEM*)
#
# Deve adicionar frases como as seguintes, a lista de frases do pipeline:
#
# (**VIAJAR BRASÍLIA ONTEM**, **VIAJAR BRASÍLIA&CIDADE ONTEM**)
#
# (**VIAJAR RIO_DE_JANEIRO ONTEM**,**VIAJAR RIO_DE_JANEIRO&CIDADE ONTEM**)
# ### Leitura do arquivo
#
# Iremos carregar uma lista de lugares junto com seus respectivos contextos. A primeira coluna apresenta o nome literal da **Cidade**, **País** ou **Estado** a segunda columa apresenta, alem do nome, o respectivo contexto (*Cidade/País/Estado*) do lugar.
df_places = pd.read_csv('data/lugares.csv')
print(df_places.tail)
list_places_gr, list_places_gi = df_places['gr'].values.tolist(), df_places['gi'].values.tolist()
print(list_places_gr)
print(list_places_gi)
# ### Gerando novas frases
#
# Iremos gerar novas frases baseando na lista que carregamos no passo anterior e no **corpus de entrada**. Por enquanto, o corpus contém apenas 4 frases aleatórias.
corpus = pd.DataFrame({'gr': ['VIAJAR RECIFE ONTEM', 'VIAJAR AMAZONAS ONTEM', 'EM DEZEMBRO NO NATAL VIAJAR PARA NATAL', '<NAME>'], 'gi': ['VIAJAR RECIFE&CIDADE ONTEM', 'VIAJAR AMAZONAS&ESTADO ONTEM', 'EM DEZEMBRO NO NATAL VIAJAR PARA NATAL&CIDADE', 'VIAJAR MANAUS&CIDADE CHEGAR ALEMANHA&PAÍS']})
print(corpus)
MAX_EXAMPLES_CORPUS = 10 if corpus.shape[0] >= 10 else corpus.shape[0]
MAX_EXAMPLES_PLACES = 20 if df_places.shape[0] >= 20 else df_places.shape[0]
corpus_sample = corpus.sample(MAX_EXAMPLES_CORPUS)
places_sample = df_places.sample(MAX_EXAMPLES_PLACES)
print(corpus_sample)
print(places_sample)
df_augmentation_places = pd.DataFrame()
for index, row in tqdm(corpus_sample.iterrows(), total=corpus_sample.shape[0]):
literals = row_search(row, list_places_gr, list_places_gi)
for (literal_gr, literal_gi) in literals:
occurrences = (re.findall(literal_gr, row['gr']), re.findall(literal_gi, row['gi']))
if len(occurrences[0]) == 1 and len(occurrences[1]) == 1:
list_gr = [re.sub(literal_gr, place, row['gr']) for place in list_places_gr[:MAX_EXAMPLES_PLACES] if literal_gr != place]
list_gi = [re.sub(literal_gi, place, row['gi']) for place in list_places_gi[:MAX_EXAMPLES_PLACES] if literal_gi != place]
df = pd.DataFrame({'gr': list_gr, 'gi': list_gi})
df_augmentation_places = df_augmentation_places.append(df, ignore_index=True)
df_augmentation_places
| Lugares.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Descarga de históricos de precios
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/7/7d/Copper_Price_History_USD.png" width="600px" height="400px" />
#
# > Entonces, en la clase anterior vimos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad).
#
# > Estas medidas se pueden calcular cuando tenemos escenarios probables de la economía y conocemos sus probabilidades de ocurrencia. Ahora, si no conocemos dichos escenarios, ¿qué podemos hacer?
# *Objetivos:*
# - Aprender a importar datos desde archivos separados por comas (extensión `.csv`).
# - Descargar el paquete `pandas-datareader`.
# - Aprender a descargar datos desde fuentes remotas.
#
# **Referencias:**
# - http://pandas.pydata.org/
# - https://pandas-datareader.readthedocs.io/en/latest/
# ## 1. Importar datos desde archivos locales
#
# <img style="float: left; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/8/86/Microsoft_Excel_2013_logo.svg" width="300px" height="125px" />
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/0/0a/Python.svg" width="300px" height="125px" />
# ### 1.1. ¿Porqué?
#
# - Muchas veces tenemos bases de datos proporcionadas como archivos locales.
# - Para poder analizar, procesar y tomar decisiones con estos datos, es necesario importarlos a python.
# - Ejemplos de archivos donde comúnmente se guardan bases de datos son:
# - `.xls` o `.xlsx`
# - `.cvs`
# - Excel es ampliamente usado en distintos campos de aplicación en todo el mundo.
# - Nos guste o no, esto también aplica a ciencia de datos (ingeniería financiera).
# - Muchos de ustedes en su futuro académico y profesional tendrán que trabajar con estas hojas de cálculo, pero no siempre querrán trabajar directamente con ellas si tienen que hacer un análisis un poco más avanzado de los datos.
# - Por eso en Python se han implementado herramientas para leer, escribir y manipular este tipo de archivos.
#
# En esta clase veremos cómo podemos trabajar con Excel y Python de manera básica utilizando la librería *pandas*.
# ### 1.2. Reglas básicas para antes de leer hojas de cálculo
#
# Antes de comenzar a leer una hoja de cálculo en Python (o cualquier otro programa), debemos considerar el ajustar nuestro archivo para cumplir ciertos principios, como:
#
# - La primer fila de la hoja de cálculo se reserva para los títulos, mientras que la primer columna se usa para identificar la unidad de muestreo o indización de los datos (tiempo, fecha, eventos...)
# - Evitar nombres, valores o campos con espacios en blanco. De otra manera, cada palabra se interpreta como variable separada y resultan errores relacionados con el número de elementos por línea.
# - Los nombres cortos se prefieren sobre nombre largos.
# - Evite símbolos como ?, $, %, ^, &, *, (,),-,#, ?, ,,<,>, /, |, \, [ ,] , {, y }.
# - Borre cualquier tipo de comentario que haya hecho en su archivo para evitar columnas extras.
# - Asegúrese de que cualquier valor inexistente esté indicado como NA.
#
# Si se hizo algún cambio, estar seguro de guardarlo.
#
# Si estás trabajando con Microsoft Excel, verás que hay muchas opciones para guardar archivos, a parte de las extensiones por defecto .xls or .xlsx. Para esto ir a “Save As” y seleccionar una de las extensiones listadas en “Save as Type”.
#
# La extensión más común es .csv (archivos de texto separados por comas).
# **Actividad.** Descargar precios de acciones de Apple (AAPL) de Yahoo Finance, con una ventana de tiempo desde el 01-01-2015 al 31-12-2017 y frecuencia diaria.
#
# - Ir a https://finance.yahoo.com/.
# - Buscar cada una de las compañías solicitadas.
# - Dar click en la pestaña *'Historical Data'*.
# - Cambiar las fechas en *'Time Period'*, click en *'Apply'* y, finalmente, click en *'Download Data'*.
# - **¡POR FAVOR! GUARDAR ESTOS ARCHIVOS EN UNA CARPETA LLAMADA precios EN EL MISMO DIRECTORIO DONDE TIENEN ESTE ARCHIVO**.
# ### 1.3. Carguemos archivos .csv como ventanas de datos de pandas
#
# Ahora podemos comenzar a importar nuestros archivos.
#
# Una de las formas más comunes de trabajar con análisis de datos es en pandas. Esto es debido a que pandas está construido sobre NumPy y provee estructuras de datos y herramientas de análisis fáciles de usar.
# Importamos pandas
import pandas as pd
# Para leer archivos `.csv`, utilizaremos la función `read_csv` de pandas:
# Función read_csv
data=pd.read_csv("precios/AAPL.csv")
# Cargamos hoja de calculo en un dataframe
aapl=pd.DataFrame(data)
aapl
# #### Anotación #1
# - Quisieramos indizar por fecha.
# Cargamos hoja de calculo en un dataframe
data=pd.read_csv("precios/AAPL.csv",index_col='Date')
aapl=pd.DataFrame(data)
aapl
# Graficar precios de cierre y precios de cierre ajustados
aapl[['Close','Adj Close']].plot(figsize=(8,6));
# #### Anotación #2
# - Para nuestra aplicación solo nos interesan los precios de cierre de las acciones (columna Adj Close).
# Cargamos hoja de calculo en un dataframe
aapl=pd.read_csv("precios/AAPL.csv",index_col='Date',usecols=['Date','Adj Close'])
aapl.columns=['AAPL']
aapl
# **Actividad.** Importen todos los archivos .csv como acabamos de hacerlo con el de apple. Además, crear un solo DataFrame que cuyos encabezados por columna sean los nombres respectivos (AAPL, AMZN,...) y contengan los datos de precio de cierre.
# Cargamos hoja de calculo en un dataframe
aapl=pd.read_csv("precios/AAPL.csv",index_col='Date',usecols=['Date','Adj Close'])
aapl.columns=['AAPL']
amzn=pd.read_csv("precios/AMZN.csv",index_col='Date',usecols=['Date','Adj Close'])
amzn.columns=['amzn']
closes=pd.DataFrame(index=aapl.index,columns=['AAPL','AMZN'])
closes.index.name='Date'
closes['AAPL']=aapl
closes['AMZN']=amzn
closes
# ## 2. Descargar los datos remotamente
# Para esto utilizaremos el paquete *pandas_datareader*.
#
# **Nota**: Usualmente, las distribuciones de Python no cuentan, por defecto, con el paquete *pandas_datareader*. Por lo que será necesario instalarlo aparte:
# - buscar en inicio "Anaconda prompt" y ejecutarlo como administrador;
# - el siguiente comando instala el paquete en Anaconda: *conda install pandas-datareader*;
# - una vez finalice la instalación correr el comando: *conda list*, y buscar que sí se haya instalado pandas-datareader
# Importar el modulo data del paquete pandas_datareader. La comunidad lo importa con el nombre de web
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
# El módulo data del paquete pandas_datareader contiene la funcion `DataReader`:
# Función DataReader
web.DataReader
# - A esta función le podemos especificar la fuente de los datos para que se use la api específica para la descarga de datos de cada fuente.
# - Fuentes:
# - Google Finance: su api ya no sirve.
# - Quandl: solo permite descargar datos de equities estadounidenses de manera gratuita, y una a la vez. Es la base de datos más completa.
# - IEX: los datos tienen antiguedad máxima de 5 años y de equities estadounidenses.
# - Yahoo! Finance: su api ha tenido cambios significativos y ya no es posible usarla desde DataReader. Sin embargo permite obtener datos de distintas bolsas (incluida la mexicana), por eso le haremos la luchita.
# Ejemplo google finance
ticker='AAPL'
source='google'
start='2015-01-01'
end='2017-12-31'
aapl_goo=web.DataReader(ticker,source,start,end)
# Ejemplo quandl
ticker='AAPL'
source='quandl'
start='2015-01-01'
end='2017-12-31'
aapl_qua=web.DataReader(ticker,source,start,end)
aapl_qua
# Ejemplo iex
ticker='AAPL'
source='iex'
start='2015-01-01'
end='2017-12-31'
aapl_iex=web.DataReader(ticker,source,start,end)
aapl_iex
# Ejemplo yahoo
ticker='AAPL'
source='yahoo'
start='2015-01-01'
end='2017-12-31'
aapl_yahoo=web.DataReader(ticker,source,start,end)
aapl_yahoo
# Intentamos con la función YahooDailyReader
# YahooDailyReader
ticker='AAPL'
start='2015-01-01'
end='2017-12-31'
aapl_yahoo=web.YahooDailyReader(ticker,start,end,interval='d').read()
aapl_yahoo
# Ejemplo
# Sin embargo no se pueden descargar varios a la vez. Intentémoslo hacer nosotros así sea de manera rudimentaria:
# Crear función para descargar precios de cierre ajustados de varios activos a la vez:
def download_closes(tickers, start_date, end_date):
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
closes = pd.DataFrame(columns = tickers, index=web.YahooDailyReader(tickers[0], start_date, end_date).read().index)
for ticker in tickers:
df = web.YahooDailyReader(ticker, start_date, end_date).read()
closes[ticker]=df['Adj Close']
closes.index_name = 'Date'
closes = closes.sort_index()
return closes
ticker=['AAPL','AMZN']
start='2015-01-01'
end='2017-12-31'
closes=download_closes(ticker,start,end)
closes
# A veces, no lee a la primera. La api es inestable. Intentar varias veces.
#
# **Nota**: Para descargar datos de la bolsa mexicana de valores (BMV), el ticker debe tener la extensión MX.
# Por ejemplo: *MEXCHEM.MX*, *LABB.MX*, *GFINBURO.MX* y *GFNORTEO.MX*.
# +
# Ejemplo: 'AAPL', 'MSFT', 'NVDA', '^GSPC'
ticker=['AAPL','MSFT', 'NVDA', '^GSPC']
start='2015-01-01'
end='2017-12-31'
closes=download_closes(ticker,start,end)
closes
# -
# Gráfico
closes.plot(figsize=(8,6));
# **Conclusión**
# - Aprendimos a importar datos desde archivos locales.
# - Aprendimos a importar datos remotamente con el paquete pandas_datareader. Queda pendiente Infosel (después vemos esto).
#
# ¿Ahora qué? Pues con estos históricos, obtendremos los rendimientos y ellos nos servirán como base para caracterizar la distribución de rendimientos...
#
# ### ¡Oh, si!
# # Anuncios parroquiales:
# ## 1. Recordar tarea para hoy.
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
| Clase5_DescargaHistoricos-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 数学函数、字符串和对象
# ## 本章介绍Python函数来执行常见的数学运算
# - 函数是完成一个特殊任务的一组语句,可以理解为一个函数相当于一个小功能,但是在开发中,需要注意一个函数的长度最好不要超过一屏
# - Python中的内置函数是不需要Import导入的
# <img src="../Photo/15.png"></img>
a,b,c = eval(input("请输入三个数字以逗号分开:"))
choose_mode = input("请输入操作类型:最大值输入max,最小值输入min,求幂输入pow")
if choose_mode == "min":
print(min(a,b,c))
if choose_mode == "max":
print(max(a,b,c))
if choose_mode == "pow":
print(pow(b,c))
# ## 尝试练习Python内置函数
# ## Python中的math模块提供了许多数学函数
# <img src="../Photo/16.png"></img>
# <img src="../Photo/17.png"></img>
#
import random
import math
a = random.random()/255/2
print(a)
res= 1.0 / (1.0 + math.exp(-a))
print(res)
round(res)
# ## 两个数学常量PI和e,可以通过使用math.pi 和math.e调用
# ## EP:
# - 通过math库,写一个程序,使得用户输入三个顶点(x,y)返回三个角度
# - 注意:Python计算角度为弧度制,需要将其转换为角度
# <img src="../Photo/18.png">
import math
x1,y1,x2,y2,x3,y3 = eval(input("请输入三角形的三个顶点,用,隔开:"))
a = math.sqrt((x2-x3)**2+(y2-y3)**2)
b = math.sqrt((x1-x3)**2+(y1-y3)**2)
c = math.sqrt((x1-x2)**2+(y1-y2)**2)
#使用两点之间的距离公式计算边长
print(a,b,c)
A = math.acos((a*a-b*b-c*c)/(-2*b*c))
print(A)
print("A=",math.degrees(A))
B = math.acos((b*b-a*a-c*c)/(-2*a*c))
C = math.acos((c*c-a*a-b*b)/(-2*b*a))
print("A=",math.degrees(A),"B=",math.degrees(B),"C=",math.degrees(C))
# ## 字符串和字符
# - 在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“
# - 在使用”“”时,给予其变量则变为字符串,否则当多行注释使用
# ## ASCII码与Unicode码
# - <img src="../Photo/19.png"></img>
# - <img src="../Photo/20.png"></img>
# - <img src="../Photo/21.png"></img>
# ## 函数ord、chr
# - ord 返回ASCII码值
# - chr 返回字符
email = '<EMAIL>'
for i in email:
print(chr(ord(i)+10),end="")
#输出不换行。
email = '<EMAIL>'
result =''
for i in email:
result += chr(ord(i)+10)
print(result)
# ## EP:
# - 利用ord与chr进行简单邮箱加密
# +
import hashlib
# 待加密信息
str = 'i like fate.'
# 创建md5对象
hl = hashlib.md5()
ha = hashlib.md5()
# Tips
# 此处必须声明encode
# 若写法为hl.update(str) 报错为: Unicode-objects must be encoded before hashing
hl.update(str.encode(encoding='utf-8'))
print('MD5加密前为 :' + str)
print('MD5加密后为 :' + hl.hexdigest())
database = 'b9a2acce30db4fa8be156d3dcebaf0cb'
qianming = input("请输入用户签名")
ha.update(qianming.encode(encoding='utf-8'))
print(ha.hexdigest())
if database == ha.hexdigest():
print("认证成功")
else:
print("认证失败!")
# -
# ## 转义序列 \
# - a = "He said,"Johon's program is easy to read"
# - 转掉它原来的意思
# - 一般情况下只有当语句与默认方法相撞的时候,就需要转义
# ## 高级print
# - 参数 end: 以什么方式结束打印
# - 默认换行打印
# ## 函数str
# - 将类型强制转换成字符串类型
# - 其他一些以后会学到(list,set,tuple...)
# ## 字符串连接操作
# - 直接使用 “+”
# - join() 函数
# ## EP:
# - 将 “Welcome” “to” "Python" 拼接
# - 将int型 100 与 “joker is a bad man” 拼接
# - 从控制台读取字符串
# > 输入一个名字返回夸奖此人
print( ''.join(['Welcome ','to ','Python']))
name = input("请输入人名:")
print(' '.join([name,'is a goood boy !']))#以空格为分割符凭借字符串
print("joker is a bad man "+"and")
# ## 实例研究:最小数量硬币
# - 开发一个程序,让用户输入总金额,这是一个用美元和美分表示的浮点值,返回一个由美元、两角五分的硬币、一角的硬币、五分硬币、以及美分个数
# <img src="../Photo/22.png"></img>
# - Python弱项,对于浮点型的处理并不是很好,但是处理数据的时候使用的是Numpy类型
# <img src="../Photo/23.png"></img>
# ## id与type
# - id 查看内存地址,在判断语句中将会使用
# - type 查看元素类型
# ## 其他格式化语句见书
# # Homework
# - 1
# <img src="../Photo/24.png"><img>
# <img src="../Photo/25.png"><img>
import math
r = eval(input("请输入五边形定点到中心的距离:"))
s = 2*r*math.sin(math.pi/5)
area = 5*s*s/(4*math.tan(math.pi/5))
print("五边形的面积为:",round(area)
# - 2
# <img src="../Photo/26.png"><img>
import math
x1,y1,x2,y2 = eval(input("请输入地球表面两点的坐标,x1,y1,x2,y2形式输入-值表示东经和南纬:"))
x1 = math.radians(x1)
x2 = math.radians(x2)
y1 = math.radians(y1)
y2 = math.radians(y2)
d = 6371.01*math.acos(math.sin(x1)*math.sin(x2)+math.cos(x1)*math.cos(x2)*math.cos(y1-y2))
print("地球上,这两点的距离为:",abs(d))
# - 3
# <img src="../Photo/27.png"><img>
import math
s = eval(input("请输入五角形的边长:"))
area = 5*s*s/4*math.tan(math.pi/5)
print("五角形的面积为:",area)
# - 4
# <img src="../Photo/28.png"><img>
import math
n = eval(input("请输入多边形的边数:"))
s = eval(input("请输入多边形的边长:"))
area = n*s*s/4*math.tan(math.pi/n)
print("五角形的面积为:",area)
# - 5
# <img src="../Photo/29.png"><img>
# <img src="../Photo/30.png"><img>
asc = int(input("请输入一个ASCII码的值(一个0-127之间的整数不包括0和127):"))
print("这个ASC码对应的字符是:",chr(asc))
# - 6
# <img src="../Photo/31.png"><img>
# +
import math
Employee_Name = input("雇员姓名:")
Housr_Worked = eval(input("每小时报酬:"))
federal_tax_withholding_rate = eval(input("联邦扣税率:"))
state_tax_withholding_rate = eval(input("周预扣税率:"))
worked = Housr_Worked*10
print("雇员姓名:",Employee_Name)
print("每小时报酬:",Housr_Worked)
print("联邦扣税率:",federal_tax_withholding_rate)
print("州与扣税率:",state_tax_withholding_rate)
print("\t本月工资:",worked)
print("\t联邦扣税:",worked*federal_tax_withholding_rate)
print("\t州扣税:",worked*state_tax_withholding_rate)
print("本月实发工资:",worked-(worked*federal_tax_withholding_rate+worked*state_tax_withholding_rate))
# -
# - 7
# <img src="../Photo/32.png"><img>
num0 = int(input("请输入一个整数"))
count=0
num1=num0
sum=0
while num0>0:
num0=num0//10
count+=1
i = 0
while num1>0:
sum=sum+num1%10*10**(count-1)
count-=1
num1=num1//10
print("这个数的颠倒数是:",sum)
# - 8 进阶:
# > 加密一串文本,并将解密后的文件写入本地保存
| 9.11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calibration Rectangle
#
# 150mm x 50mm rectangle for calibration purposes
# splay the image.
import gcode
gcode.GCode
# +
# gcode.GCode?
# -
gcode.hline
# +
# gcode.hline?
# -
gcode.hline(X0=0, Xf=10, Y=0)
gcode.Line(gcode.hline(X0=0, Xf=10, Y=0))
# +
# gcode.Shapes.Circle?
# +
# gcode.Shapes.Square?
# -
import numpy as np
x = np.linspace(0,10, 100)
x
np.log10(.1)
np.log10(100)
# +
# Calibration Program
# -
# [Assume homing is done]
# > G92: A Programmable Temporary Work Offset
#
# > Suppose you want a programmable temporary work offset. There are a lot of ways to accomplish this, but one that is tried and true is to use G92. G92 establishes a work offset based on the offset coordinates you provide. So, if that vise jaw corner we’ve talked about is located from the current tool position at offset X10Y10Z0, you could execute the following:
#
# - https://www.cnccookbook.com/g54-g92-g52-work-offsets-cnc-g-code/
#
# > G20 / G21 G Codes are Unit Conversion for the Machine, Not Your Part Program
#
# - G20 and G21 G Code: Metric and Imperial Unit Conversion CNC Programming
#
# > G91 & G90 G-Code: CNC Absolute and Incremental Programming
# https://www.cnccookbook.com/g91-g90-g-code-cnc-absolute-incremental-programming/
#
# 
# +
prog = gcode.GCode()
prog.G92(X=0, Y=0, Z=0)
prog.G21() # Metric
prog.G90() # Absolute
prog
# -
# DOE Configuration
laserin_speed=200
laserin_power=200
def init():
prog = gcode.GCode()
prog.G92(X=0, Y=0, Z=0)
prog.G21() # Metric
prog.G90() # Absolute
return prog
axis_prog=gcode.GCode()
axis_prog.G0(X=0, Y=0, Z=0, F=300)
axis_prog.M4(S=laserin_power)
axis_prog.G1(X=0, Y=0, Z=0, F=laserin_speed)
axis_prog.G1(X=0, Y=160, Z=0, F=laserin_speed)
axis_prog.G0(X=60, Y=0, Z=0)
axis_prog.G1(X=0, Y=0, Z=0, F=laserin_speed)
axis_prog.G1(X=10, Y=10, Z=0, F=laserin_speed)
axis_prog
init()+axis_prog
Ys = [10, 160]
X_pos = 10
for laserin_power in [75, 100, 125, 150, 175, 200, 255]:
print(f"Power Test: {laserin_power}")
for x_spacing in [0, 0.1, 0.25, 0.5, 1, 1.25, 2, 2.5, 5]:
X_pos+=x_spacing
X_pos=np.round(X_pos, 4)
print(f"\tX {X_pos}")
X_pos+=5
# 5 mm between DOE tests.
X_pos
X_pos = 10
for laserin_power in [75, 100, 125, 150, 175, 200, 255]:
print(f"Power Test: {laserin_power}")
for x_spacing in [0, 0.1, 0.25, 0.5, 1, 1.25, 2, 2.5, 5]:
X_pos+=x_spacing
X_pos=np.round(X_pos, 4)
Ys = np.flip(Ys)
print(f"\tX:{X_pos}\tY:{Ys[0]}:{Ys[1]}")
X_pos+=5
# 5 mm between DOE tests.
X_pos
doe=gcode.GCode()
X_pos = 10
for laserin_power in [75, 100, 125, 150, 175, 200, 255]:
print(f"Power Test: {laserin_power}")
for x_spacing in [0, 0.1, 0.25, 0.5, 1, 1.25, 2, 2.5, 5]:
X_pos+=x_spacing
X_pos=np.round(X_pos, 4)
doe.G0(X=X_pos, Y=Ys[0])
doe.G1(X=X_pos, Y=Ys[0])
doe.G1(X=X_pos, Y=Ys[1])
doe.G0(X=X_pos, Y=Ys[1])
Ys = np.flip(Ys)
print(f"\tX:{X_pos}\tY:{Ys[0]}:{Ys[1]}")
X_pos+=5
# 5 mm between DOE tests.
X_pos
doe
| Development/CalibrationRectangle_150x50-Copy2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modeling and Simulation in Python
#
# Chapter 7: Thermal systems
#
# Copyright 2017 <NAME>
#
# License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
#
# +
# If you want the figures to appear in the notebook,
# and you want to interact with them, use
# # %matplotlib notebook
# If you want the figures to appear in the notebook,
# and you don't want to interact with them, use
# # %matplotlib inline
# If you want the figures to appear in separate windows, use
# # %matplotlib qt5
# tempo switch from one to another, you have to select Kernel->Restart
# %matplotlib inline
from modsim import *
# -
# ### The coffee cooling problem.
#
# I'll use a `State` object to store the initial temperature.
#
init = State(temp=90)
init
# And a `System` object to contain the system parameters.
coffee = System(init=init,
volume=300,
r=0.01,
T_env=22,
t0=0,
t_end=30,
dt=1)
coffee
# The `update` function implements Newton's law of cooling.
def update(state, system):
"""Update the thermal transfer model.
state: State (temp)
system: System object
returns: State (temp)
"""
unpack(system)
T = state.temp
T += -r * (T - T_env) * dt
return State(temp=T)
# Here's how it works.
update(init, coffee)
# Now we can run simulations using the same function from the previous chapter.
def run_simulation(system, update_func):
"""Runs a simulation of the system.
Add a TimeFrame to the System: results
system: System object
update_func: function that updates state
"""
unpack(system)
frame = TimeFrame(columns=init.index)
frame.loc[t0] = init
ts = linrange(t0, t_end-dt, dt)
for t in ts:
frame.loc[t+dt] = update_func(frame.loc[t], system)
system.results = frame
# And here's how it works.
run_simulation(coffee, update)
coffee.results
# Here's what the results look like.
plot(coffee.results.temp, label='coffee')
decorate(xlabel='Time (minutes)',
ylabel='Temperature (C)')
# After running the simulation, we can extract the final temperature from the results.
def final_temp(system):
"""Final temperature.
If system has no results, return initial temp.
system: System object.
returns: temperature (degC)
"""
if hasattr(system, 'results'):
return system.results.temp[system.t_end]
else:
return system.init.temp
# It will be convenient to wrap these steps in a function. `kwargs` is a collection of whatever keyword arguments are provided; they are passed along as arguments to `System`.
def make_system(T_init=90, r=0.01, volume=300, t_end=30):
"""Runs a simulation with the given parameters.
T_init: initial temperature in degC
r: heat transfer rate, in 1/min
volume: volume of liquid in mL
t_end: end time of simulation
returns: System object
"""
init = State(temp=T_init)
system = System(init=init,
volume=volume,
r=r,
T_env=22,
t0=0,
t_end=t_end,
dt=1)
return system
# Here's how we use it:
coffee = make_system()
run_simulation(coffee, update)
final_temp(coffee)
# **Exercise:** Simulate the temperature of 50 mL of milk with a starting temperature of 5 degC, in a vessel with the same insulation, for 15 minutes, and plot the results.
# +
# Solution goes here
init1 = State (temp = 5)
milk = System(init1=init1,
volume=50,
r=0.01,
T_env=22,
t0=0,
t_end=15,
dt=1)
milk
def update(state, system):
"""Update the thermal transfer model.
state: State (temp)
system: System object
returns: State (temp)
"""
unpack(system)
T = state.temp
T += -r * (T - T_env) * dt
return State(temp=T)
def run_simulation(system, update_func):
"""Runs a simulation of the system.
Add a TimeFrame to the System: results
system: System object
update_func: function that updates state
"""
unpack(system)
frame = TimeFrame(columns=init.index)
frame.loc[t0] = init
ts = linrange(t0, t_end-dt, dt)
for t in ts:
frame.loc[t+dt] = update_func(frame.loc[t], system)
system.results = frame
run_simulation(milk, update)
milk.results
# -
plot(milk.results.temp, label='milk')
decorate(xlabel='Time (minutes)',
ylabel='Temperature (C)')
# ### Using `fsolve`
#
# As a simple example, let's find the roots of this function; that is, the values of `x` that make the result 0.
def func(x):
return (x-1) * (x-2) * (x-3)
# `modsim.py` provides `fsolve`, which does some error-checking and then runs `scipy.optimize.fsolve`. The first argument is the function whose roots we want. The second argument is an initial guess.
fsolve(func, x0=0)
# Usually the root we get is the one that's closest to the initial guess.
fsolve(func, 1.9)
fsolve(func, 2.9)
# But not always.
fsolve(func, 1.5)
# We want to find the value of `r` that makes the final temperature 70, so we define an "error function" that takes `r` as a parameter and returns the difference between the final temperature and the goal.
def error_func1(r):
"""Runs a simulation and returns the `error`.
r: heat transfer rate, in 1/min
returns: difference between final temp and 70 C
"""
system = make_system(r=r)
run_simulation(system, update)
return final_temp(system) - 70
# With `r=0.01`, we end up a little too warm.
error_func1(r=0.01)
# The return value from `fsolve` is an array with a single element, the estimated value of `r`.
solution = fsolve(error_func1, 0.01, xtol=1e-8)
r_coffee = solution[0]
r_coffee
# If we run the simulation with the estimated value of `r`, the final temperature is 70 C, as expected.
coffee = make_system(r=r_coffee)
run_simulation(coffee, update)
final_temp(coffee)
# **Exercise:** When you call `fsolve`, it calls `error_func1` several times. To see how this works, add a print statement to `error_func1` and run `fsolve` again.
# **Exercise:** Repeat this process to estimate `r_milk`, given that it starts at 5 C and reaches 20 C after 15 minutes.
#
# Before you use `fsolve`, you might want to try a few values for `r_milk` and see how close you can get by trial and error. Here's an initial guess to get you started:
r_milk = 0.1
milk = make_system(T_init=5, t_end=15, r=r_milk)
run_simulation(milk, update)
final_temp(milk)
# Solution goes here
def error_func1(r):
"""Runs a simulation and returns the `error`.
r: heat transfer rate, in 1/min
returns: difference between final temp and 70 C
"""
system = make_system(r=r)
run_simulation(system, update)
return final_temp(system) - 70
# Solution goes here
error_func2(r=0.1)
# Solution goes here
solution = fsolve(error_func2, 0.1, xtol=1e-8)
r_milk = solution[0]
r_milk
# Solution goes here
milk = make_system(r=r_milk, T_init=5, t_end=15)
run_simulation(milk, update)
final_temp(milk)
# ### Mixing liquids
# The following function takes `System` objects that represent two liquids, computes the temperature of the mixture, and returns a new `System` object that represents the mixture.
def mix(s1, s2):
"""Simulates the mixture of two liquids.
s1: System representing coffee
s2: System representing milk
returns: System representing the mixture
"""
assert s1.t_end == s2.t_end
volume = s1.volume + s2.volume
temp = (s1.volume * final_temp(s1) +
s2.volume * final_temp(s2)) / volume
mixture = make_system(T_init=temp,
volume=volume,
r=s1.r)
return mixture
# First we'll see what happens if we add the milk at the end. We'll simulate the coffee and the milk separately.
coffee = make_system(T_init=90, t_end=30, r=r_coffee, volume=300)
run_simulation(coffee, update)
final_temp(coffee)
milk = make_system(T_init=5, t_end=30, r=r_milk, volume=50)
run_simulation(milk, update)
final_temp(milk)
# Here's what the results look like.
# +
plot(coffee.results.temp, label='coffee')
plot(milk.results.temp, '--', label='milk')
decorate(xlabel='Time (minutes)',
ylabel='Temperature (C)',
loc='center left')
savefig('chap07-fig01.pdf')
# -
# Here's what happens when we mix them.
mix_last = mix(coffee, milk)
final_temp(mix_last)
# And here's what we get if we add the milk immediately.
coffee = make_system(T_init=90, r=r_coffee, volume=300)
milk = make_system(T_init=5, r=r_milk, volume=50)
mix_first = mix(coffee, milk)
mix_first.t_end = 30
run_simulation(mix_first, update)
final_temp(mix_first)
# The following function takes `t_add`, which is the time when the milk is added, and returns the final temperature.
def run_and_mix(t_add, t_total=30):
"""Simulates two liquids and them mixes them at t_add.
t_add: time in minutes
t_total: total time to simulate, min
returns: final temperature
"""
coffee = make_system(T_init=90, t_end=t_add,
r=r_coffee, volume=300)
run_simulation(coffee, update)
milk = make_system(T_init=5, t_end=t_add,
r=r_milk, volume=50)
run_simulation(milk, update)
mixture = mix(coffee, milk)
mixture.t_end = t_total - t_add
run_simulation(mixture, update)
return final_temp(mixture)
# We can try it out with a few values.
run_and_mix(0)
run_and_mix(15)
run_and_mix(30)
# And then sweep a range of values for `t_add`
sweep = SweepSeries()
for t_add in linrange(0, 30, 2):
temp = run_and_mix(t_add)
sweep[t_add] = temp
# Here's what the result looks like.
# +
plot(sweep, color='purple')
decorate(xlabel='Time added (min)',
ylabel='Final temperature (C)',
legend=False)
savefig('chap07-fig02.pdf')
# -
# **Exercise:** Suppose the coffee shop won't let me take milk in a separate container, but I keep a bottle of milk in the refrigerator at my office. In that case is it better to add the milk at the coffee shop, or wait until I get to the office?
#
# Hint: Think about the simplest way to represent the behavior of a refrigerator in this model. The change you make to test this variation of the problem should be very small!
# ### Analysis
# Now we can use the analytic result to compute temperature as a function of time. The following function is similar to `run_simulation`.
def run_analysis(system):
"""Computes temperature using the analytic solution.
Adds TimeFrame to `system` as `results`
system: System object
"""
unpack(system)
T_init = init.temp
ts = linrange(t0, t_end, dt)
temp_array = T_env + (T_init - T_env) * exp(-r * ts)
temp_series = TimeSeries(temp_array, index=ts)
system.results = TimeFrame(temp_series, columns=['temp'])
# Here's how we run it. From the analysis, we have the computed value of `r_coffee2`
r_coffee2 = 0.011610223142273859
init = State(temp=90)
coffee2 = System(init=init, T_env=22, r=r_coffee2,
t0=0, t_end=30)
run_analysis(coffee2)
final_temp(coffee2)
# And we can compare to the results from simulation.
init = State(temp=90)
coffee = System(init=init, T_env=22, r=r_coffee,
t0=0, t_end=30, dt=1)
run_simulation(coffee, update)
final_temp(coffee)
# They are identical except for small roundoff errors.
coffee.results - coffee2.results
| code/chap07-mine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cia (P3)
# language: python
# name: cia (p3)
# ---
import sys
sys.path.append('/home/immersinn/gits/cia_library/src/')
import utils
import mysql_utils
import mysql.connector
from mysql.connector.cursor import MySQLCursor
from mysql.connector.errors import IntegrityError
import requests
from bs4 import BeautifulSoup as bs
# ### Pull Doc Info from MySQL
docs = mysql_utils.docinfoFromMySQL(limit=0, fields=['doc_id'])
len(docs)
for doc in docs[:5]:
print(doc)
# ### Pull PDFs from Site
pdf_root_url = "https://www.cia.gov/library/readingroom/docs/"
def build_pdf_url(doc_id):
return(pdf_root_url + "DOC_" + doc_id + '.pdf')
pdf_urls = [build_pdf_url(doc['doc_id']) for doc in docs]
pdf_urls[:5]
req = requests.get(pdf_urls[0])
pdf = req.content
req.close()
pdf[:100]
from urllib import request
import socks
from sockshandler import SocksiPyHandler
opener = request.build_opener(SocksiPyHandler(socks.SOCKS5,
"127.0.0.1",
9050))
pdf = opener.open(pdf_urls[0]).read()
pdf[:100]
n_openers = 4
1 // n_openers
2 // 5
import itertools
itertools.cycle
cy = itertools.cycle([1,2,3])
let = 'abcdefgh'
for n,l in zip(cy,let):
print(n)
print(l)
from importlib import reload
reload(utils)
utils.writePDF(pdf, docs[0]['doc_id'])
reload(mysql_utils)
| notebooks/Scrape PDFs from Individual Pages.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py36pytorch1
# language: python
# name: py36pytorch1
# ---
# +
import sys
import PIL.Image as Image
import os
import numpy as np
import torch
from torchvision import transforms
from tqdm import tqdm
import warnings
import torch.nn.functional as F
sys.path.append('../utils/')
import outil
import torchvision.models as models
if not sys.warnoptions:
warnings.simplefilter("ignore")
sys.path.append('../model')
from resnet50 import resnet50
import torchvision.models as models
import kornia.geometry as tgm
import matplotlib.pyplot as plt
# %matplotlib inline
minSize = 480 # min dimension in the resized image
nbIter = 10000 # nb Iteration
tolerance = 0.05 # tolerance
transform = 'Homography' # coarse transformation
strideNet = 16 # make sure image size is multiple of strideNet size
MocoFeat = True ## using moco feature or not
### ImageNet normalization
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
preproc = transforms.Compose([transforms.ToTensor(), normalize,])
# -
# ### Loading model (Moco feature or ImageNet feature)
# +
if MocoFeat :
resnet_feature_layers = ['conv1','bn1','relu','maxpool','layer1','layer2','layer3']
resNetfeat = resnet50()
featPth = '../model/pretrained/resnet50_moco.pth'
param = torch.load(featPth)
state_dict = {k.replace("module.", ""): v for k, v in param['model'].items()}
msg = 'Loading pretrained model from {}'.format(featPth)
print (msg)
resNetfeat.load_state_dict( state_dict )
else :
resnet_feature_layers = ['conv1','bn1','relu','maxpool','layer1','layer2','layer3']
resNetfeat = models.resnet50(pretrained=True)
resnet_module_list = [getattr(resNetfeat,l) for l in resnet_feature_layers]
last_layer_idx = resnet_feature_layers.index('layer3')
resNetfeat = torch.nn.Sequential(*resnet_module_list[:last_layer_idx+1])
resNetfeat.cuda()
resNetfeat.eval()
# +
if transform == 'Affine' :
Transform = outil.Affine
nbPoint = 3
else :
Transform = outil.Homography
nbPoint = 4
# -
# ### Loading images
# +
img1 = '../img/MegaDepth_Train_Org_2_1.jpg'
img2 = '../img/MegaDepth_Train_Org_2_2.jpg'
I1 = Image.open(img1).convert('RGB')
I2 = Image.open(img2).convert('RGB')
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.axis('off')
plt.imshow(I2)
plt.subplot(1, 2, 2)
plt.axis('off')
plt.imshow(I1)
plt.show()
# -
# ### Pre-processing images (multi-scale + imagenet normalization)
# +
## We only compute 3 scales :
I1Down2 = outil.resizeImg(I1, strideNet, minSize // 2)
I1Up2 = outil.resizeImg(I1, strideNet, minSize * 2)
I1 = outil.resizeImg(I1, strideNet, minSize)
I1Tensor = transforms.ToTensor()(I1).unsqueeze(0).cuda()
feat1Down2 = F.normalize(resNetfeat(preproc(I1Down2).unsqueeze(0).cuda()))
feat1 = F.normalize(resNetfeat(preproc(I1).unsqueeze(0).cuda()))
feat1Up2 = F.normalize(resNetfeat(preproc(I1Up2).unsqueeze(0).cuda()))
I2 = outil.resizeImg(I2, strideNet, minSize)
I2Tensor = transforms.ToTensor()(I2).unsqueeze(0).cuda()
feat2 = F.normalize(resNetfeat(preproc(I2).unsqueeze(0).cuda()))
# -
# ### Extract matches
# +
W1Down2, H1Down2 = outil.getWHTensor(feat1Down2)
W1, H1 = outil.getWHTensor(feat1)
W1Up2, H1Up2 = outil.getWHTensor(feat1Up2)
featpMultiScale = torch.cat((feat1Down2.contiguous().view(1024, -1), feat1.contiguous().view(1024, -1), feat1Up2.contiguous().view(1024, -1)), dim=1)
WMultiScale = torch.cat((W1Down2, W1, W1Up2))
HMultiScale = torch.cat((H1Down2, H1, H1Up2))
W2, H2 = outil.getWHTensor(feat2)
feat2T = feat2.contiguous().view(1024, -1)
## get mutual matching
index1, index2 = outil.mutualMatching(featpMultiScale, feat2T)
W1MutualMatch = WMultiScale[index1]
H1MutualMatch = HMultiScale[index1]
W2MutualMatch = W2[index2]
H2MutualMatch = H2[index2]
ones = torch.cuda.FloatTensor(H2MutualMatch.size(0)).fill_(1)
match2 = torch.cat((H1MutualMatch.unsqueeze(1), W1MutualMatch.unsqueeze(1), ones.unsqueeze(1)), dim=1)
match1 = torch.cat((H2MutualMatch.unsqueeze(1), W2MutualMatch.unsqueeze(1), ones.unsqueeze(1)), dim=1)
# -
# ### RANSAC
## if very few matches, it is probably not a good pair
if len(match1) < nbPoint :
print ('not a good pair...')
bestParam, bestInlier, match1Inlier, match2Inlier = outil.RANSAC(nbIter, match1, match2, tolerance, nbPoint, Transform)
# ### return pair coarsely aligned
## We keep the pair only we have enough inliers
if len(match1Inlier) > 50 :
if transform == 'Affine':
grid = F.affine_grid(torch.from_numpy(bestParam[:2].astype(np.float32)).unsqueeze(0).cuda(), IpTensor.size()) # theta should be of size N×2×3
else :
warper = tgm.HomographyWarper(I1Tensor.size()[2], I1Tensor.size()[3])
grid = warper.warp_grid(torch.from_numpy(bestParam.astype(np.float32)).unsqueeze(0).cuda())
I2Sample = F.grid_sample(I2Tensor.clone(), grid)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.axis('off')
plt.imshow(transforms.ToPILImage()(I2Sample.squeeze().cpu()))
plt.subplot(1, 2, 2)
plt.axis('off')
plt.imshow(I1)
plt.show()
| train/generate_coarse_aligned_pair.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit
# name: python37664bitea6a0a427b5a48de8969fa56a6df37f4
# ---
# # Agrupación de datos
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Agregación de datos por categoría
gender = ('Male', 'Female')
income = ('Poor', 'Middle Class', 'Rich') # Nivel de ingresos
# +
n = 500
gender_data = []
income_data = []
for i in range(0, 500):
gender_data.append(np.random.choice(gender))
income_data.append(np.random.choice(income))
# -
# Z -> N(0,1)
# N(m,s) -> m + s * >
height = 160 + 30 * np.random.randn(n)
weight = 65 + 25 * np.random.randn(n)
age = 30 + 12 * np.random.randn(n)
income = 18000 + 3500 * np.random.rand(n)
data = pd.DataFrame(
{
'Gender' : gender_data,
'Economic Status' : income_data,
'Height' : height,
'Weight' : weight,
'Age' : age,
'Income' : income
}
)
data.head(10)
# Las edades quizás habría que truncarlas para que sean enteros y las alturas habría que tener en cuenta que nos interesa, y dependiendo deberemos modificar la desviación típica y la media. Así, con todas las columnas. Los ingresos también pueden tener poco sentido, porque puede haber una persona de clase media que cobre menos que una persona, lo cual es incongruente.
# ## Agrupación de datos
grouped_gender = data.groupby('Gender')
# ### Obtener los dos grupos
for names, groups in grouped_gender:
print(names)
print(groups)
# ### Obtener solo un grupo
grouped_gender.get_group('Female')
# ### Doble agrupación
double_group = data.groupby(['Gender', 'Economic Status'])
for names, groups in double_group:
print(names)
print(groups)
# ## Operaciones sobre datos agrupados
double_group.sum()
double_group.mean()
double_group.size()
double_group.describe()
double_group['Income'].describe()
# ### Operaciones de agregación
double_group.aggregate(
{
"Income" : np.sum,
"Age" : np.mean,
"Height" : np.std
}
)
double_group.aggregate(
{
"Age" : np.mean,
"Height" : lambda h:(np.mean(h)/np.std(h)) # tipificacion
}
)
double_group.aggregate([np.sum, np.mean, np.std])
double_group.aggregate([lambda x: np.mean(x) / np.std(x)]) # tipificación de todas las columnas
# ## Filtrado de datos
# ### Filtrar grupos de edad cuya suma supera los 2400
double_group['Age'].filter(lambda x: x.sum() > 2400) # devuelve los elementos
# ## Transformación de variables
# ### Unidad tipificada
# [Más info](https://es.wikipedia.org/wiki/Unidad_tipificada)
zscore = lambda x : (x - x.mean()) / x.std()
z_group = double_group.transform(zscore) # datos normalizados
plt.hist(z_group['Age'])
# ### Reemplazar NaN
fill_na_mean = lambda x : x.fillna(x.mean())
double_group.transform(fill_na_mean)
# En este caso no hay NaNs porque es un Dummy DataFrame
# ## Otras operaciones
# ### Obtener una determinada fila
double_group.head(1) # primera fila de cada uno de los grupos
double_group.tail(1) # ultima fila de cada uno de los grupos
double_group.nth(32) # cada uno de los grupos debe tener al menos 32 filas
# ### Ordenar por una determinada columna
data_sorted = data.sort_values(['Age','Income'])
data_sorted.groupby('Gender').head()
data_sorted.groupby('Gender').head(1) # la mujer y el hombre más jovenes
data_sorted.groupby('Gender').tail(1) # la mujer y el hombre más mayores
# ## Conjunto de entrenamiento y testing
data = pd.read_csv('../python-ml-course/datasets/customer-churn-model/Customer Churn Model.txt')
# ### Dividir utilizando la distribución normal
a = np.random.randn(len(data))
check = (a < 0.8) # 80% training, 20% testing
training = data[check]
testing = data[~check]
# ### Con la librería sklearn
from sklearn.model_selection import train_test_split # pip install scikit-learn
train, test = train_test_split(data, test_size = 0.2) # igual que antes
print(len(train),len(test))
# ### Usando una función shuffle (aleatorio)
from sklearn.utils import shuffle
data = shuffle(data) # mezclado aleatorio
cut_id = int(0.90*len(data))
train_data = data[:cut_id]
test_data = data[cut_id+1:]
| MaPeCode_Notebooks/6. Agrupacion de datos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="5ffb21374c7cf4b98e7239045ef9bf312effee25"
# # Two Sigma Financial News Competition Official Getting Started Kernel
# ## Introduction
# In this competition you will predict how stocks will change based on the market state and news articles. You will loop through a long series of trading days; for each day, you'll receive an updated state of the market, and a series of news articles which were published since the last trading day, along with impacted stocks and sentiment analysis. You'll use this information to predict whether each stock will have increased or decreased ten trading days into the future. Once you make these predictions, you can move on to the next trading day.
#
# This competition is different from most Kaggle Competitions in that:
# * You can only submit from Kaggle Kernels, and you may not use other data sources, GPU, or internet access.
# * This is a **two-stage competition**. In Stage One you can edit your Kernels and improve your model, where Public Leaderboard scores are based on their predictions relative to past market data. At the beginning of Stage Two, your Kernels are locked, and we will re-run your Kernels over the next six months, scoring them based on their predictions relative to live data as those six months unfold.
# * You must use our custom **`kaggle.competitions.twosigmanews`** Python module. The purpose of this module is to control the flow of information to ensure that you are not using future data to make predictions for the current trading day.
#
# ## In this Starter Kernel, we'll show how to use the **`twosigmanews`** module to get the training data, get test features and make predictions, and write the submission file.
# ## TL;DR: End-to-End Usage Example
# ```
# from kaggle.competitions import twosigmanews
# env = twosigmanews.make_env()
#
# (market_train_df, news_train_df) = env.get_training_data()
# train_my_model(market_train_df, news_train_df)
#
# for (market_obs_df, news_obs_df, predictions_template_df) in env.get_prediction_days():
# predictions_df = make_my_predictions(market_obs_df, news_obs_df, predictions_template_df)
# env.predict(predictions_df)
#
# env.write_submission_file()
# ```
# Note that `train_my_model` and `make_my_predictions` are functions you need to write for the above example to work.
# + [markdown] _uuid="225708f447eee93041881f9d6c3a3e890cb16718"
# ## In-depth Introduction
# First let's import the module and create an environment.
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
from kaggle.competitions import twosigmanews
# You can only call make_env() once, so don't lose it!
env = twosigmanews.make_env()
# + [markdown] _uuid="6034b46fce8c9d55d403de32e7cebe8cb9fef96d"
# ## **`get_training_data`** function
#
# Returns the training data DataFrames as a tuple of:
# * `market_train_df`: DataFrame with market training data
# * `news_train_df`: DataFrame with news training data
#
# These DataFrames contain all market and news data from February 2007 to December 2016. See the [competition's Data tab](https://www.kaggle.com/c/two-sigma-financial-news/data) for more information on what columns are included in each DataFrame.
# + _uuid="c20fa6deeac9d374c98774abd90bdc76b023ee63"
(market_train_df, news_train_df) = env.get_training_data()
# + _uuid="6811a1a76f08b2a029543cf73bcdf4dfca7dc362"
market_train_df.head()
# + _uuid="84b5a58f67ebded82e6aabc66ca36411e6db35a9"
market_train_df.tail()
# + _uuid="25115010e14ef3497932902db0cef68501ddca11"
news_train_df.head()
# + _uuid="cdc6b9842073bcb7d63cddc30e5bd7826ccfdfa1"
news_train_df.tail()
# + [markdown] _uuid="840aa03b49d675953f080e4069f79f435282bb43"
# ## `get_prediction_days` function
#
# Generator which loops through each "prediction day" (trading day) and provides all market and news observations which occurred since the last data you've received. Once you call **`predict`** to make your future predictions, you can continue on to the next prediction day.
#
# Yields:
# * While there are more prediction day(s) and `predict` was called successfully since the last yield, yields a tuple of:
# * `market_observations_df`: DataFrame with market observations for the next prediction day.
# * `news_observations_df`: DataFrame with news observations for the next prediction day.
# * `predictions_template_df`: DataFrame with `assetCode` and `confidenceValue` columns, prefilled with `confidenceValue = 0`, to be filled in and passed back to the `predict` function.
# * If `predict` has not been called since the last yield, yields `None`.
# + _uuid="724c38149860c8e9058474ac9045c2301e8a20da"
# You can only iterate through a result from `get_prediction_days()` once
# so be careful not to lose it once you start iterating.
days = env.get_prediction_days()
# + _uuid="8d5781f889893e3e34b054687fd538c1a76bfdcc"
(market_obs_df, news_obs_df, predictions_template_df) = next(days)
# + _uuid="fe1cb6accc3536258e7687c64ab3e5e5caa6334a"
market_obs_df.head()
# + _uuid="11c5b072aefdd54e6fbe9ae71dc3ea41909ade19"
news_obs_df.head()
# + _uuid="f99f6364881767a4071da9ea19e18163a7ce066b"
predictions_template_df.head()
# + [markdown] _uuid="11e95f2e3d493ee6e1023c7a4191310adde5d2bf"
# Note that we'll get an error if we try to continue on to the next prediction day without making our predictions for the current day.
# + _uuid="b8ac953a3afbfca2fb200bbf8f0f7339bec7e6f1"
next(days)
# + [markdown] _uuid="ba72731adf652d6011652e906d8b340d6572904e"
# ### **`predict`** function
# Stores your predictions for the current prediction day. Expects the same format as you saw in `predictions_template_df` returned from `get_prediction_days`.
#
# Args:
# * `predictions_df`: DataFrame which must have the following columns:
# * `assetCode`: The market asset.
# * `confidenceValue`: Your confidence whether the asset will increase or decrease in 10 trading days. All values must be in the range `[-1.0, 1.0]`.
#
# The `predictions_df` you send **must** contain the exact set of rows which were given to you in the `predictions_template_df` returned from `get_prediction_days`. The `predict` function does not validate this, but if you are missing any `assetCode`s or add any extraneous `assetCode`s, then your submission will fail.
# + [markdown] _uuid="9cd8317a5e52180b592ee2abc1d2177214642a3c"
# Let's make random predictions for the first day:
# + _uuid="a3f2197ed790f1aff1356a6954575fde976a4935"
import numpy as np
def make_random_predictions(predictions_df):
predictions_df.confidenceValue = 2.0 * np.random.rand(len(predictions_df)) - 1.0
# + _uuid="ca72b7003f24f4aa0c4afe25b600aae31abd64d5"
make_random_predictions(predictions_template_df)
env.predict(predictions_template_df)
# + [markdown] _uuid="ff62c167b459c5895383fb05fd9260c14be8c1b8"
# Now we can continue on to the next prediction day and make another round of random predictions for it:
# + _uuid="4e2293d44aad86d09d25326c4ede6f566ab69721"
(market_obs_df, news_obs_df, predictions_template_df) = next(days)
# + _uuid="140aee54dc838549f87f041a97c7a809ee4e0f6f"
market_obs_df.head()
# + _uuid="b02d43dec4b881564cd43ff6239c7aa97d94a7af"
news_obs_df.head()
# + _uuid="5233f1b22f5ddac08adb50bbaa6444a0da4a24bc"
predictions_template_df.head()
# + _uuid="a19142739096135d237e2837d8e10c992e53a6e5"
make_random_predictions(predictions_template_df)
env.predict(predictions_template_df)
# + [markdown] _uuid="8056b881707072c379ad2e89b9c59c3c041a2ab7"
# ## Main Loop
# Let's loop through all the days and make our random predictions. The `days` generator (returned from `get_prediction_days`) will simply stop returning values once you've reached the end.
# + _uuid="ef60bc52a8a228e5a2ce18e4bd416f1f1f25aeae"
for (market_obs_df, news_obs_df, predictions_template_df) in days:
make_random_predictions(predictions_template_df)
env.predict(predictions_template_df)
print('Done!')
# + [markdown] _uuid="7c8fbcca87c7f6abc53e86408417bf12ce21bb7f"
# ## **`write_submission_file`** function
#
# Writes your predictions to a CSV file (`submission.csv`) in the current working directory.
# + _uuid="2c8ed34ffb2c47c6e124530ec798c0b4eb01ddd5"
env.write_submission_file()
# + _uuid="d38aa8a67cad3f0c105db7e764ec9b805db39ceb"
# We've got a submission file!
import os
print([filename for filename in os.listdir('.') if '.csv' in filename])
# + [markdown] _uuid="f464f37885ffa763a2592e2867d74685f75be506"
# As indicated by the helper message, calling `write_submission_file` on its own does **not** make a submission to the competition. It merely tells the module to write the `submission.csv` file as part of the Kernel's output. To make a submission to the competition, you'll have to **Commit** your Kernel and find the generated `submission.csv` file in that Kernel Version's Output tab (note this is _outside_ of the Kernel Editor), then click "Submit to Competition". When we re-run your Kernel during Stage Two, we will run the Kernel Version (generated when you hit "Commit") linked to your chosen Submission.
# + [markdown] _uuid="2e3a267ea3149403c49ff59515a1a669ca2d1f9f"
# ## Restart the Kernel to run your code again
# In order to combat cheating, you are only allowed to call `make_env` or iterate through `get_prediction_days` once per Kernel run. However, while you're iterating on your model it's reasonable to try something out, change the model a bit, and try it again. Unfortunately, if you try to simply re-run the code, or even refresh the browser page, you'll still be running on the same Kernel execution session you had been running before, and the `twosigmanews` module will still throw errors. To get around this, you need to explicitly restart your Kernel execution session, which you can do by pressing the Restart button in the Kernel Editor's bottom Console tab:
# 
| Getting Started Kernel/two-sigma-news-official-getting-started-kernel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://colab.research.google.com/github/MisaOgura/flashtorch/blob/master/examples/activation_maximization_colab.ipynb)
# ## Activation maximization
#
# ---
#
# A quick demo of activation maximization with [FlashTorch 🔦](https://github.com/MisaOgura/flashtorch), using the pre-trained VGG16 model.
#
#
# ❗This notebook is for those who are using this notebook in **Google Colab**.
#
# If you aren't on Google Colab already, please head to the Colab version of this notebook **[here](https://colab.research.google.com/github/MisaOgura/flashtorch/blob/master/examples/activation_maximization_colab.ipynb)** to execute.
#
# ---
#
# [Activation maximization](https://pdfs.semanticscholar.org/65d9/94fb778a8d9e0f632659fb33a082949a50d3.pdf) is one form of feature visualization that allows us to visualize what CNN filters are "looking for", by applying each filter to an input image and updating the input image so as to maximize the activation of the filter of interest (i.e. treating it as a gradient ascent task with filter activation values as the loss).
#
# The optimization and visualization is available via `flashtorch.activmax.GradientAscent`. The implementation is inspired by [this demo](https://blog.keras.io/category/demo.html) by <NAME>.
# ### 0. Set up
#
# A GPU runtime is available on Colab for free, from the `Runtime` tab on the top menu bar.
#
# It is **highly recommended to use GPU** as a runtime for the enhanced speed of computation.
# +
# Install flashtorch
# !pip install flashtorch torch==1.5.0 torchvision==0.6.0 -U
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import torchvision.models as models
from flashtorch.activmax import GradientAscent
# -
# ### 1. Load a pre-trained Model
# +
model = models.vgg16(pretrained=True)
# Print layers and corresponding indicies
list(model.features.named_children())
# -
# ### 2. Specify layers and filters
# +
conv1_2 = model.features[2]
conv1_2_filters = [17, 33, 34, 57]
conv2_1 = model.features[5]
conv2_1_filters = [27, 40, 68, 73]
conv3_1 = model.features[10]
conv3_1_filters = [31, 61, 147, 182]
conv4_1 = model.features[17]
conv4_1_filters = [238, 251, 338, 495]
conv5_1 = model.features[24]
conv5_1_filters = [45, 271, 363, 409]
# -
# ### 3. Optimize and visualize filters
#
# Creating an instance of `GradientAscent` class with the model _without fully-connected layers_ allows us to use flexible input image sizes.
# +
g_ascent = GradientAscent(model.features)
g_ascent.use_gpu = True
# -
# By calling the `visualize` method and passing in the layer and filter indeciies defined above, it performs optimization and visualization.
#
# This is perhaps the most common way to use the `GradientAscent` class, but there are other APIs available according to your use cases (see section 4).
g_ascent.visualize(conv1_2, conv1_2_filters, title='conv1_2');
g_ascent.visualize(conv2_1, conv2_1_filters, title='conv2_1');
g_ascent.visualize(conv3_1, conv3_1_filters, title='conv3_1');
g_ascent.visualize(conv4_1, conv4_1_filters, title='conv4_1');
g_ascent.visualize(conv5_1, conv5_1_filters, title='conv5_1');
# We can see that, in the earlier layers (conv1_2, conv2_1), filters get activated by colors and simple patterns such as virtical, horisontal and diagonal lines.
#
# In the intermediate layers (conv3_1, conv4_1), we start to see more complex patterns.
#
# Then oncepts like 'eye' (filter 45) and 'entrance (?)' (filter 271) seem to appear in the last layer (conv5_1).
# ### 4. Other ways to use `GradientAscent`
# #### 4-1. `GradientAscent.visualize`: randomly select filters
#
# If you have a convolutional layer you want to vizualise, but you don't know which filters to choose, you can just pass in the layer to `visualize` without `filter_idxs`. It will randomly choose filters. You can adjust the number of filters chosen by passing `num_subplots` (default=4).
g_ascent.visualize(conv5_1, title='Randomly selected filters from conv5_1');
# #### 4-2. `GradientAscent.visualize`: plot one filter
#
# If you just want to visualize one filter, you can do so by specifying the filter index as an integer, not a list.
g_ascent.visualize(conv5_1, 3, title='conv5_1 filter 3');
# #### 4-3. `GradientAscent.visualize`: return image tensor
#
# If you want to grab the optimized image data, set `return_output` to `True`.
# +
output = g_ascent.visualize(conv5_1, 3, title='conv5_1 filter 3', return_output=True);
print('num_iter:', len(output))
print('optimized image:', output[-1].shape)
# -
# #### 4-4. `GradientAscent.deepdream`: create DeepDream
#
# You can create a [DeepDream](https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html) by supplying a path to your own image.
#
# This will optimize the supplied image, instead of a random input noise, with regards to the filter specidied.
# +
# Download the example image
# !mkdir -p images
# !wget https://github.com/MisaOgura/flashtorch/raw/master/examples/images/jay.jpg -P /content/images
# -
g_ascent.deepdream('/content/images/jay.jpg', conv5_1, 33)
# #### 4-5. `GradientAscent.optimize`: perform optimization only (no visualization)
#
# If no visualization is needed, or if you want to futher customize visualization, you can call the `optimize` method directly.
# +
output = g_ascent.optimize(conv5_1, 3)
print('num_iter:', len(output))
print('optimized image:', output[-1].shape)
| examples/activation_maximization_colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img style="float: left; margin: 30px 15px 15px 15px;" src="https://pngimage.net/wp-content/uploads/2018/06/logo-iteso-png-5.png" width="300" height="500" />
#
#
# ### <font color='navy'> **Finanzas Cuantitativas | Verano 2021**.
#
# **Nombres:**
# - <NAME>.
# - <NAME>.
# - <NAME>.
#
# **Fecha:** 03 de junio del 2021.
#
# <img style="float: right; margin: 30px 15px 15px 15px;" src="https://www.monash.edu/__data/assets/image/0004/1017517/iStock-611890326-OK3.jpg" width="300" height="500" />
#
# **Expedientes** :
# - 721093.
# - 717710.
# - 722176.
#
# **Profesor:** <NAME>.
#
# ## Tarea N.
| Plantilla Tareas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Series
# A ``Series`` object is a distributed collection of 1D arrays, all of which share a common index. It supports several manipulations and computations on 1D data, some of which take the index into account.
#
# The most common examples of Series objects are likely time series data, in which each array represents a quantity that has been temporally sampled at regular intervals. For example, a movie can be represented as Series data, where a single record might consist of a pixel time series, as in the `fish-series` and `mouse-series` example data from the `basic usage` tutorial.
#
# The keys associated with Series records can be any data used to identify the series, for example, a single sensor channel identifier, or tuples representing pixel coordiantes (if derived from a movie). For example, a Series record representing a time series from an outer corner of a movie might have key (0, 0, 0), with the value being an array representing the measurements at that location over time: [112, 110, 118, ..., 124].
#
# Here, we show examples of loading and manipulating Series data.
# Setup plotting
# --------------
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('notebook')
# Loading series
# --------------
# Series can be loaded from a ``ThunderContext`` using the `loadSeries` method. (See the ``Input formats`` tutorial for more detail on loading methods and formats.) Here, we'll again load the "fish-series" example data set:
data = tsc.loadExample('fish-series')
# Inspection
# ----------
# Let's plot a random subset of the data using the ``subset`` method, which thresholds based on the standard deviation:
examples = data.subset(nsamples=50, thresh=1)
plt.plot(examples.T[0:20,:]);
# Note the variation in raw intensity levels.
# Processing
# -------------
# A ``Series`` object has various methods for processing and transforming the data. For example, ``center`` subtracts the mean, ``squelch`` sets to 0 any records not exceeding a threshold, and ``zscore`` subtracts the mean and divides by the standard deviation. Note the changes in the y-axis.
examples = data.center().subset(nsamples=50, thresh=10)
plt.plot(examples.T[0:20,:]);
examples = data.squelch(150).zscore().subset(nsamples=50, thresh=0.1)
plt.plot(examples.T[0:20,:]);
# Related methods include ``standardize``, ``detrend``, and ``normalize`` (the latter two are specified to `TimeSeries`, see below)
# Selections
# ----------
# A ``Series`` has a 1D index, which can be used to subselect values.
data.index.shape
# For example, to select a range:
data.between(0,8).first()
# Note that the index changes to reflect the subselected range:
data.between(0,8).index
# We can also select based on an arbitrary criterion function:
data.select(lambda x: x < 5).index
# The default index generated for ``Series`` objects will be the range of integers starting at zero and ending one before the length of the series data, as shown in these examples. However, other data types can also be used as the index for a series object, such as a sequence of strings, providing text labels for each element in the series array, or a tuple with indices at different levels. See the tutorial on Multi-indexing tutorial for this usage.
# Statistics
# ----------
# A ``Series`` can be summarized with statistics both within and across records. To summarize across records:
plt.plot(data.toTimeSeries().normalize().max());
plt.plot(data.toTimeSeries().normalize().mean());
plt.plot(data.toTimeSeries().normalize().min());
# To summarize within records:
data.seriesMean().first()
data.seriesStdev().first()
# We can also correlate each record with a signal of interest. As expected, for a random signal, the correlation should be near 0.
from numpy import random
signal = random.randn(240)
data.correlate(signal).first()
# Keys
# ----
# We often use integer keys to index the records of a ``Series``, and sometimes these keys represent indices into some original array (for example, indices into a multi-dimensional tensor, or pixel coordinates if the records are time series from a movie).
#
# A few convenience methods are provided for working with keys. First, the attribute ``dims`` contains information about the range of the keys. Computing this requires a pass through the data, but if computed on a ``Series`` it will be inherited by all derived objects, to avoid recomputation.
data.dims.max
data.dims.min
# For keys that correspond to subscripts (e.g. indices of the rows and columns of a matrix, coordinates in space), we can convert between subscript and linear indexing. The default for these conversions is currently onebased subscript indexing, so we need to set ``onebased`` to ``False`` (this will likely change in a future release).
data.keys().take(10)
data.subToInd(isOneBased=False).keys().take(10)
data.subToInd(isOneBased=False).indToSub(isOneBased=False).keys().take(10)
# The ``query`` method can be used to average subselected records based on their (linearized) keys. It returns the mean value and key for each of the provided index lists.
keys, values = data.query(inds=[[100,101],[200]], isOneBased=False)
keys
values.shape
# The ``pack`` method collects a series into a local array, reshaped based on the keys. If there are multiple values per record, all will be collected into the local array, so typically we select a subset of values before packing to avoid overwhelming the local returning a very large amount of data.
out = data.select(0).pack()
out.shape
out = data.between(0,2).pack()
out.shape
# Conversions
# -----------
# Subclasses of ``Series`` provide additional functionality for more specialized data types.
# A ``TimeSeries`` provides methods specific to time-varying data, like ``fourier``, for computing the statistics of a Fourier transform:
ts = data.toTimeSeries()
fr = ts.fourier(freq=5)
fr.index
fr.select('coherence').first()
# Or detrend for detrending data over time
plt.plot(ts.mean())
plt.plot(ts.detrend('nonlinear', order=5).mean());
# A ``RowMatrix`` provides a variety of methods for working with distributed matrices and matrix operations:
mat = data.toRowMatrix()
from thunder import Colorize
Colorize.image(mat.cov())
| python/doc/tutorials/src/series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine Learning Models for SCOPE: Decision Tree
# Models will be coded here, but the official write up will be in the RMarkdown document.
# +
# load the data files
import pandas as pd
import numpy as np
from pymodelutils import utils
logs = pd.read_csv("data/metis_logs.csv")
logs.head()
# -
# filter down to show the average opinion (0 means no alert, 1 means alert)
logs['run_date'] = logs['run_date'].astype('datetime64[ns]')
logs['is_alert'] = (np.where(logs['is_alert'] == 'f', 0, 1))
logs = logs.groupby(['series', 'kpi', 'run_date']).mean().round(0).reset_index()
logs['is_campaign'] = np.where(logs['campaign_id'] > 0, 1, 0)
logs = logs.drop(columns=['client_id', 'partner_id', 'campaign_id'])
logs['is_alert'].describe()
AS_data = pd.read_csv("data/python_AS.csv")
AS_data.head()
TS_data = pd.read_csv("data/python_TS.csv")
TS_data.head()
RexT_data = pd.read_csv("data/python_RexT.csv")
RexT_data.head()
# ## Data Prep
# R has already filtered down the data to the days we are going to use and marked what is disqualified. We still have to handle the feature selection and one-hot encoding of select columns though. We also need to normalize it since out KPIs behave quite differently.
# +
# add column for AS to tell if it is campaign level or not
AS_data['is_campaign'] = np.where(AS_data['campaign_id'] > 0, 1, 0)
# drop the data we don't need for the model or for matching back to the logs
AS_keep_columns = ['series', 'day', 'run_date', 'kpi', 'value', 'disqualified', 'is_campaign']
TS_keep_columns = ['series', 'day', 'run_date', 'site_type', 'event_name',
'kpi', 'value', 'disqualified']
RexT_drop_columns = ['ranking',
'day_of_week',
'day_of_month',
'month_of_year',
'day_of_year',
'week_of_year']
AS_data = AS_data[AS_keep_columns]
TS_data = TS_data[TS_keep_columns]
RexT_data = RexT_data.drop(columns=RexT_drop_columns)
# -
AS_data.head()
TS_data.head()
RexT_data.head()
# add a new column to determine how many days before the run_date the day column entry is
# this will enable us to pivot that data into separate columns for the features of our model
utils.prep_dates(AS_data)
utils.prep_dates(TS_data)
utils.prep_dates(RexT_data)
# inner joins to logs
AS_data = pd.merge(AS_data, logs, on=['series', 'run_date', 'kpi', 'is_campaign'], how='inner')
TS_data = pd.merge(TS_data, logs, on=['series', 'run_date', 'kpi'], how='inner')
RexT_data = pd.merge(RexT_data, logs, on=['series', 'run_date', 'kpi'], how='inner')
# +
# filter out the disqualified data (AS and TS data only)
AS_disqualified = AS_data[AS_data.disqualified]
TS_disqualified = TS_data[TS_data.disqualified]
# valid for model (AS and TS data only)
valid_AS_raw = AS_data[~(AS_data.disqualified)]
valid_TS_raw = TS_data[~(TS_data.disqualified)]
# keep a copy of the raw RexT data
RexT_data_raw = RexT_data.copy(deep=True)
# -
# final preparations to the data shape for use in the model
valid_AS = utils.data_prep_pipeline(AS_data.copy(),
indices=['series', 'run_date', 'kpi', 'is_campaign', 'is_alert'],
cols=['kpi'],
scaling_method=['standardize', 'min_max', 'percent_of_mean'])
valid_TS = utils.data_prep_pipeline(TS_data.copy(),
indices=['series', 'run_date', 'site_type', 'event_name', 'is_alert'],
cols=['site_type', 'event_name'],
scaling_method=['standardize', 'min_max', 'percent_of_mean'])
valid_RexT = utils.data_prep_pipeline(utils.clean_regions(RexT_data),
indices=['isCountry', 'isSubregion', 'isRegion',
'series', 'run_date', 'is_alert'],
cols=['series'],
scaling_method=['standardize', 'min_max', 'percent_of_mean'])
# for the TS data we need to drop event_name_SITE LEVEL because it will always be the same as site_type_SITE LEVEL
valid_TS = {key : value.drop(columns='event_name_SITE LEVEL') for key, value in valid_TS.items()}
valid_AS['min_max'].head()
valid_TS['percent_of_mean'].head()
valid_RexT['standardize'].head()
# ## Modelling
# Now that all the data is prepped, we can start building some logistic regression models to test on. We also need to split our data into a test and train set being careful that we have an equal proportion of anomalies in each (because they are very few, we have to make sure we don't train or test the model on all the anomalies while the other gets none).
#
# ### Split Data into Train and Test Sets
# +
from sklearn.model_selection import train_test_split
# scaling method to test
AS_scaler = 'min_max'
TS_scaler = 'min_max'
RexT_scaler = 'min_max'
# separate out data into feature matrices and target arrays
AS_features = valid_AS[AS_scaler][[col for col in valid_AS[AS_scaler].columns
if col not in ['series', 'run_date', 'is_alert']]] # this needs to be the model features
AS_targets = valid_AS[AS_scaler]['is_alert'] # this needs to be the results from the logs (only)
TS_features = valid_TS[TS_scaler][[col for col in valid_TS[TS_scaler].columns
if col not in ['series', 'run_date', 'is_alert']]]
TS_targets = valid_TS[TS_scaler]['is_alert']
RexT_features = valid_RexT[RexT_scaler][[col for col in valid_RexT[RexT_scaler].columns
if col not in ['run_date', 'is_alert']]]
RexT_targets = valid_RexT[RexT_scaler]['is_alert']
test_RexT_features = RexT_features.drop(columns=[col for col in RexT_features.columns
if 'series' in col
or col in ['isCountry', 'isSubregion', 'isRegion']])
# split into a train and test set
AS_X_train, AS_X_test, AS_y_train, AS_y_test = train_test_split(AS_features[[col for col in AS_features.columns
if 'diff' not in col]],
AS_targets,
test_size=0.2,
random_state=25)
TS_X_train, TS_X_test, TS_y_train, TS_y_test = train_test_split(TS_features[[col for col in TS_features.columns
if 'diff' not in col]],
TS_targets,
test_size=0.2,
random_state=25)
RexT_X_train, RexT_X_test, RexT_y_train, RexT_y_test = train_test_split(test_RexT_features[[col for col in
test_RexT_features.columns
if 'diff' not in col]],
RexT_targets,
test_size=0.5,
random_state=25)
# -
# Let's make sure that we have similar percentage of anomalies in our test and train sets.
# AS
print('Total alerts in training set: ' + str(AS_y_train.sum()))
print('Total alerts in test set: ' + str(AS_y_test.sum()))
pd.DataFrame({'train' : AS_y_train.value_counts(normalize=True),
'test' : AS_y_test.value_counts(normalize=True)})
# TS
print('Total alerts in training set: ' + str(TS_y_train.sum()))
print('Total alerts in test set: ' + str(TS_y_test.sum()))
pd.DataFrame({'train' : TS_y_train.value_counts(normalize=True),
'test' : TS_y_test.value_counts(normalize=True)})
# RexT
print('Total alerts in training set: ' + str(RexT_y_train.sum()))
print('Total alerts in test set: ' + str(RexT_y_test.sum()))
pd.DataFrame({'train' : RexT_y_train.value_counts(normalize=True),
'test' : RexT_y_test.value_counts(normalize=True)})
# ## Decision Tree
# +
from sklearn.tree import DecisionTreeClassifier
AS_tree = DecisionTreeClassifier(random_state=25)
TS_tree = DecisionTreeClassifier(random_state=25)
RexT_tree = DecisionTreeClassifier(random_state=25)
AS_decision_tree_model = AS_tree.fit(AS_X_train, AS_y_train)
TS_decision_tree_model = TS_tree.fit(TS_X_train, TS_y_train)
RexT_decision_tree_model = RexT_tree.fit(RexT_X_train, RexT_y_train)
# +
from sklearn.metrics import roc_curve, auc
AS_y_prob_fit = AS_decision_tree_model.predict_proba(AS_X_test)
TS_y_prob_fit = TS_decision_tree_model.predict_proba(TS_X_test)
RexT_y_prob_fit = RexT_decision_tree_model.predict_proba(RexT_X_test)
AS_decision_tree_roc_curve = roc_curve(AS_y_test, AS_y_prob_fit[:,1], pos_label=1) # returns tuple: fpr, tpr, thresholds
AS_decision_tree_roc_curve_AUC = auc(AS_decision_tree_roc_curve[0],
AS_decision_tree_roc_curve[1]) # needs fpr, tpr
TS_decision_tree_roc_curve = roc_curve(TS_y_test, TS_y_prob_fit[:,1], pos_label=1)
TS_decision_tree_roc_curve_AUC = auc(TS_decision_tree_roc_curve[0],
TS_decision_tree_roc_curve[1])
RexT_decision_tree_roc_curve = roc_curve(RexT_y_test, RexT_y_prob_fit[:,1], pos_label=1)
RexT_decision_tree_roc_curve_AUC = auc(RexT_decision_tree_roc_curve[0],
RexT_decision_tree_roc_curve[1])
# -
# ROC Curve
utils.model_roc_curves(roc_data_dict={'AS' : AS_decision_tree_roc_curve,
'TS' : TS_decision_tree_roc_curve,
'RexT' : RexT_decision_tree_roc_curve},
auc_dict={'AS' : AS_decision_tree_roc_curve_AUC,
'TS' : TS_decision_tree_roc_curve_AUC,
'RexT' : RexT_decision_tree_roc_curve_AUC},
method_name='Decision Tree')
AS_threshold = 0.75
utils.confusion_matrix_visual(AS_y_test,
AS_decision_tree_model.predict_proba(AS_X_test)[:,1] \
>= AS_threshold, 'AS')
TS_threshold = 0.75
utils.confusion_matrix_visual(TS_y_test,
TS_decision_tree_model.predict_proba(TS_X_test)[:,1] \
>= TS_threshold,
'TS')
RexT_threshold = 0.5
utils.confusion_matrix_visual(RexT_y_test,
RexT_decision_tree_model.predict_proba(RexT_X_test)[:,1] \
>= RexT_threshold,
'RexT')
# #### Metrics
utils.classification_report_all(y_test_dict={'AS' : AS_y_test,
'TS' : TS_y_test,
'RexT' : RexT_y_test},
y_pred_dict={'AS' :
AS_decision_tree_model.predict_proba(AS_X_test)[:,1] >= \
AS_threshold,
'TS' :
TS_decision_tree_model.predict_proba(TS_X_test)[:,1] >= \
TS_threshold,
'RexT' :
RexT_decision_tree_model.predict_proba(RexT_X_test)[:,1] >= \
RexT_threshold})
| Jupyter Notebooks/Decision Tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1.6 LaTeX制作中文文档
#
# LaTeX最初只提供英文的编译环境,随着其在文档编辑领域的优势越来越深入人心,LaTeX逐渐开始支持多种语言的编译。在各类LaTeX技术问答社区,我们经常会看到一些关于LaTeX中文文档编译的问题,十几年前,最受大家关心的问题或许还是如何使用LaTeX编译中文文档。
#
# ### 1.6.1 使用`ctex`
#
# 通常来说,最简单的方式在XeLaTeX编译环境下使用`ctex`宏包,即`\usepackage[UTF8]{ctex}`。
#
# 【**例1**】在LaTeX中选择XeLaTeX编译器,并使用`ctex`宏包制作一个简单的中文文档。
#
# ```tex
# \documentclass{article}
# \usepackage[UTF8]{ctex}
#
# \begin{document}
#
# 永和九年,岁在癸丑,暮春之初,会于会稽山阴之兰亭,修稧(禊)事也。群贤毕至,少长咸集。此地有崇山峻领(岭),茂林修竹;又有清流激湍,映带左右,引以为流觞曲水,列坐其次。虽无丝竹管弦之盛,一觞一咏,亦足以畅叙幽情。
#
# \end{document}
# ```
#
# 编译后效果如图1.6.1所示。
#
# <p align="center">
# <img align="middle" src="graphics/zh_example1.png" width="600" />
# </p>
#
# <center><b>图1.6.1</b> 编译后的文档</center>
#
# 当然,`ctex`中也有一种特定的文档类型,名为`ctexart`,使用这种文档类型即可制作中文文档。
#
# 【**例2**】在LaTeX中选择XeLaTeX编译器,并使用`ctexart`文档类型制作一个简单的中文文档。
#
# ```tex
# \documentclass{ctexart}
#
# \begin{document}
#
# 永和九年,岁在癸丑,暮春之初,会于会稽山阴之兰亭,修稧(禊)事也。群贤毕至,少长咸集。此地有崇山峻领(岭),茂林修竹;又有清流激湍,映带左右,引以为流觞曲水,列坐其次。虽无丝竹管弦之盛,一觞一咏,亦足以畅叙幽情。
#
# \end{document}
# ```
#
# 编译后效果如图1.6.2所示。
#
# <p align="center">
# <img align="middle" src="graphics/zh_example2.png" width="580" />
# </p>
#
# <center><b>图1.6.2</b> 编译后的文档</center>
#
#
# 更多国家的学者希望用到这一工具,因此出现了许多支持各国语言的工具包或编写环境,作为全世界使用人数最多的语言,LaTeX有`CJKutf8`宏包、`CTEX`宏包等多种方式可以实现中文文档编辑,均能在开源网站Overleaf中调用并实现中文文档制作。
#
# ### (1)使用`CJKutf8`宏包
#
# `CJKutf8`宏包提供了两种中文简体字体制作中文文档:使用`\begin{CJK}{UTF8}{gbsn}`、`\end{CJK}`环境将以宋体(gbsn)制作文档,而使用`\begin{CJK}{UTF8}{gkai}`、`\end{CJK}`环境则以楷体(gkai)制作文档内容。在默认的`pdfLaTeX`编译环境中即可得到文档编译结果。
#
# 【**例1**】在Overleaf中使用`CJKutf8`宏包制作中文文档。
# ```tex
# \documentclass{article}
# \usepackage{CJKutf8}
#
# \begin{document}
#
# \begin{CJK}{UTF8}{gbsn} %字体是gbsn
# 你好,LaTeX!。
# \end{CJK}
#
# \end{document}
#
# ```
#
# 编译后效果如图1.6.1所示。
#
# <p align="center">
# <img align="middle" src="graphics/1.6-1.jpg" width="200" />
# </p>
#
# <center><b>图1.6.1</b> 编译后文档</center>
#
# ### (2)使用`CTEX`宏包
#
# `CJKutf8`宏包只提供了两种字体,可选择的余地太小。如果想要使用更丰富的字体编辑 Latex 中文文档,可以调用`CTEX`宏包、并设置 UTF8 选项使其支持 utf-8 编码。在Overleaf中使用`CTEX`宏包时,需要先将编译环境从`pdfLaTeX`调整为`XeLaTeX`。
#
# 【**例2**】在Overleaf中使用`CTEX`宏包制作中文文档。
# ```tex
# \documentclass[UTF8]{ctexart}
#
# \begin{document}
#
# {\kaishu 你好,LaTeX!(楷体)}
#
# {\songti 你好,LaTeX!(宋体)}
#
# {\heiti 你好,LaTeX!(黑体)}
#
# {\fangsong 你好,LaTeX!(仿宋)}。
#
# \end{document}
#
# ```
#
# 编译后效果如图1.6.2所示。
#
# <p align="center">
# <img align="middle" src="graphics/1.6-2.jpg" width="200" />
# </p>
#
# <center><b>图1.6.2</b> 编译后文档</center>
#
#
# 目前在Overleaf上已经出现了许多中文文档LaTeX模板,除了一些学位论文模板,一些中文学术期刊如《计算机学报》也提供了科技论文的LaTeX模板。
#
# 《中国科学:信息科学》[https://www.overleaf.com/project/5e99712a0916c900018d11af](https://www.overleaf.com/project/5e99712a0916c900018d11af)
#
# 《计算机学报》[https://www.overleaf.com/project/5f4793c256c62e0001f06d95](https://www.overleaf.com/project/5f4793c256c62e0001f06d95)
#
# 中文学位论文模板:
#
# 《浙江大学学位论文模板》[https://www.overleaf.com/project/610fa05007d0073d5405a04f](https://www.overleaf.com/project/610fa05007d0073d5405a04f)
#
# 《武汉大学博士学位论文模板》[https://www.overleaf.com/project/610fa09e07d007fa5605a1e9](https://www.overleaf.com/project/610fa09e07d007fa5605a1e9)
#
# 《中山大学研究生毕业论文模板》[https://www.overleaf.com/project/610fa17307d007f2d305a388](https://www.overleaf.com/project/610fa17307d007f2d305a388)
#
# 《南京大学研究生毕业论文模板》[https://www.overleaf.com/project/610fa1d007d00704c305a3eb](https://www.overleaf.com/project/610fa1d007d00704c305a3eb)
#
# 另外,开源项目[https://github.com/MCG-NKU/NSFC-LaTex](https://github.com/MCG-NKU/NSFC-LaTex)提供了国家自然科学基金申报书的LaTeX模板。
# 【回放】[**1.5 关于LaTeX的开源项目**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-1/section5.ipynb)
# ### License
#
# <div class="alert alert-block alert-danger">
# <b>This work is released under the MIT license.</b>
# </div>
| chapter-1/section6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %aimport utils_1_1
import pandas as pd
import numpy as np
import altair as alt
from altair_saver import save
import datetime
import dateutil.parser
from os.path import join
from constants_1_1 import SITE_FILE_TYPES
from utils_1_1 import (
get_site_file_paths,
get_site_file_info,
get_site_ids,
get_visualization_subtitle,
get_country_color_map,
)
from theme import apply_theme
from web import for_website
alt.data_transformers.disable_max_rows(); # Allow using rows more than 5000
# +
data_release='2021-04-27'
df = pd.read_csv(join("..", "data", "1.1.resurgence", "case rate and severity risk", "case_numbers.csv"))
df = df.rename(columns={
"n.all": "n_all",
"n.severe": "n_severe"
})
df = df.drop(columns=['Unnamed: 0', 'n_severe'])
COUNTRY = ['ALL', 'BRAZIL', 'FRANCE', 'GERMANY', 'ITALY', 'SPAIN', 'USA']
COUNTRY_COLOR = ['black', '#CB7AA7', '#0072B2', '#E79F00', '#029F73', '#57B4E9', '#D45E00']
df.wave = df.wave.apply(lambda x: { 'early': 'First', 'late': 'Second' }[x])
df
# +
d = df.copy()
plot = alt.Chart(
d
).transform_filter(
alt.datum.n_all > 0
).mark_bar(
size=35,
# stroke='black',
# strokeWidth=1
).encode(
x=alt.X("wave:N", title=None, axis=alt.Axis(labels=False)),
y=alt.Y("n_all:Q", title=None, scale=alt.Scale(type='log')),
color=alt.Color("wave:N", scale=alt.Scale(range=['#D45E00', '#0072B2']), title='Wave'),
).properties(
width=100,
height=250
)
text = plot.mark_text(
size=16, dx=0, dy=-5, color='white', baseline='bottom', fontWeight=500
).encode(
# x=alt.X('month:N'),
# y=alt.Y('value:Q', stack='zero'),
x=alt.X("wave:N", title=None, axis=alt.Axis(labels=False)),
y=alt.Y("n_all:Q", title=None),
# detail='cat:N',
text=alt.Text('n_all:Q', format=','),#, format='.0%'),
# order="order:O",
# opacity=alt.Opacity('visibility:N', scale=alt.Scale(domain=[True, False], range=[1, 0]))
)
plot = (plot + text).facet(
column=alt.Column("country:N", header=alt.Header(title=None), sort=COUNTRY)
).resolve_scale(color='shared')
plot = plot.properties(
title={
"text": [
f"Country-Level Hospitalizations By Wave"
],
"dx": 45,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
"subtitleColor": "gray",
}
)
# plot = alt.vconcat(*(
# plot_lab(df=df, lab=lab) for lab in unique_sites
# ), spacing=30)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='bottom',
legend_title_orient='left',
axis_label_font_size=14,
header_label_font_size=16,
point_size=100
)
plot
# +
df = pd.read_csv(join("..", "data", "1.1.resurgence", "case rate and severity risk", "percCountCountry_AndAll_Weeks.csv"), sep='\t')
df = df.rename(columns={
"Country": "country",
"weeks": "week"
})
# df = df.drop(columns=['Unnamed: 0', 'n_severe'])
df.country = df.country.apply(lambda x: x.upper())
COUNTRY = ['ALL', 'BRAZIL', 'FRANCE', 'GERMANY', 'ITALY', 'SPAIN', 'USA']
COUNTRY_COLOR = ['black', '#CB7AA7', '#0072B2', '#E79F00', '#029F73', '#57B4E9', '#D45E00']
# df.wave = df.wave.apply(lambda x: { 'early': 'First', 'late': 'Second' }[x])
df
# +
d = df.copy()
d = d.sort_values('week')
# Moving average using three time points (previous, current, next)
d['percentage'] = d.groupby('country').percentage.apply(lambda x : x.shift().rolling(3, min_periods=1).mean().fillna(x))
d['percentage'] = d.groupby('country').percentage.apply(lambda x : x.shift(-2))
d = d[d.week <= '2021-02-28']
plot = alt.Chart(
d
).mark_line(
size=2.5,
point=alt.OverlayMarkDef(filled=True, strokeWidth=4, opacity=0.7),
opacity=0.7
).encode(
x=alt.X("week:T", title=None, axis=alt.Axis(format=("%B, %Y"), tickCount=7.9, labelAngle=0)), # https://github.com/d3/d3-time-format#locale_format
y=alt.Y("percentage:Q", title=None, axis=alt.Axis(format=".0%")),
color=alt.Color("country:N", scale=alt.Scale(domain=COUNTRY, range=COUNTRY_COLOR), title='Country'),
).properties(
width=850,
height=350
)
# text = plot.mark_text(
# size=16, dx=0, dy=-5, color='white', baseline='bottom', fontWeight=500
# ).encode(
# # x=alt.X('month:N'),
# # y=alt.Y('value:Q', stack='zero'),
# x=alt.X("wave:N", title=None, axis=alt.Axis(labels=False)),
# y=alt.Y("percentage:Q", title=None, scale=alt.Scale(format=".1%")),
# # detail='cat:N',
# text=alt.Text('n_all:Q'),#, format='.0%'),
# # order="order:O",
# # opacity=alt.Opacity('visibility:N', scale=alt.Scale(domain=[True, False], range=[1, 0]))
# )
# plot = (plot).facet(
# row=alt.Row("country:N", header=alt.Header(title=None), sort=COUNTRY)
# ).resolve_scale(color='shared')
plot = plot.properties(
title={
"text": [
f"Intensity Rate Of Hospitalization Over Time"
],
"dx": 35,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
"subtitleColor": "gray",
}
)
# plot = alt.vconcat(*(
# plot_lab(df=df, lab=lab) for lab in unique_sites
# ), spacing=30)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='top-right',
legend_title_orient='top',
axis_label_font_size=14,
header_label_font_size=16,
point_size=30,
axis_tick_color='black'
)
plot
# d
# +
df = pd.read_csv(join("..", "data", "1.1.resurgence", "case rate and severity risk", "severity_shift_random_effects.csv"))
df = df.rename(columns={
"ci.lwr": "ci_l",
"ci.upr": "ci_u",
"n.all": "n",
"weeks": "week"
})
df = df[df.effect_size == 'risk_ratio']
df['e_l'] = df.pp - df.se
df['e_u'] = df.pp + df.se
df['sig'] = df['sig'].apply(lambda x: 'p<0.05' if x == True else 'False')
COUNTRY = ['ALL', 'BRAZIL', 'FRANCE', 'GERMANY', 'ITALY', 'SPAIN', 'USA']
COUNTRY_COLOR = ['black', '#CB7AA7', '#0072B2', '#E79F00', '#029F73', '#57B4E9', '#D45E00']
df
# +
d = df.copy()
d = d[d.day == 0]
plot = alt.Chart(
d
).mark_point(
size=180,
filled=True,
shape='diamond',
# point=alt.OverlayMarkDef(filled=True, strokeWidth=4, opacity=0.7),
opacity=1
).encode(
y=alt.Y("country:O", title=None, axis=alt.Axis(labelAngle=0, tickCount=20, labels=True)),
x=alt.X("pp:Q", title="Relative Risk", scale=alt.Scale(zero=False, clamp=True), axis=alt.Axis(labelAngle=0, tickCount=10)),
color=alt.Color("country:N", scale=alt.Scale(domain=COUNTRY, range=COUNTRY_COLOR), title='Country'),
# stroke=alt.Stroke("sig", scale=alt.Scale(domain=['p<0.05'], range=['black']), title='Significance')
).properties(
width=750,
height=350
)
guide = alt.Chart(
pd.DataFrame({'baseline': [1, 1, 1, 1, 1, 1, 1], 'country': COUNTRY})
).mark_rule(color='gray', strokeDash=[3,3], opacity=0.5).encode(
x=alt.X('baseline:Q')
)
tick = plot.mark_errorbar(
opacity=0.7, color='black'
).encode(
y=alt.Y("country:O", title='Days Since Admission'),
x=alt.X("ci_l:Q", title="Relative Risk"),
x2=alt.X2("ci_u:Q"),
stroke=alt.value('black'),
strokeWidth=alt.value(2)
)
plot = (tick + plot + guide)
# .facet(
# row=alt.Row("country:N", header=alt.Header(title=None, labelAngle=0, labelAnchor='start', labelPadding=30), sort=COUNTRY)
# ).resolve_scale(color='shared')
plot = plot.properties(
title={
"text": [
f"Relative Risk Of Severe Disease In Second Compared To First Wave",
# f"Early To Late Relative Risk Of Severe COVID-19"
],
"dx": 60,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
"subtitleColor": "gray",
}
)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='top-right',
legend_title_orient='top',
axis_label_font_size=14,
header_label_font_size=16
)
plot
# +
df = pd.read_csv(join("..", "data", "1.1.resurgence", "case rate and severity risk", "severity_shift_random_effects.csv"))
df = df.rename(columns={
"ci.lwr": "ci_l",
"ci.upr": "ci_u",
"n.all": "n",
"weeks": "week"
})
df = df[df.effect_size == 'risk']
df = df[df.cohort == 'dayX']
df['e_l'] = df.pp - df.se
df['e_u'] = df.pp + df.se
df.wave = df.wave.apply(lambda x: x.capitalize())
df['sig'] = df['sig'].apply(lambda x: 'p<0.05' if x == True else 'False')
COUNTRY = ['ALL', 'BRAZIL', 'FRANCE', 'GERMANY', 'ITALY', 'SPAIN', 'USA']
COUNTRY_COLOR = ['black', '#CB7AA7', '#0072B2', '#E79F00', '#029F73', '#57B4E9', '#D45E00']
df
# +
d = df.copy()
d = d[d.day == 0]
d.wave = d.wave.apply(lambda x: 'First' if x == 'Early' else 'Second')
plot = alt.Chart(
d
).mark_bar(
size=35,
# filled=True,
# shape='diamond',
# point=alt.OverlayMarkDef(filled=True, strokeWidth=4, opacity=0.7),
opacity=1
).encode(
x=alt.X("wave:N", title=None, axis=alt.Axis(labelAngle=0, tickCount=20, labels=False)),
y=alt.Y("pp:Q", title='Absolute Risk', scale=alt.Scale(zero=False, clamp=True), axis=alt.Axis(labelAngle=0, tickCount=10, format='%')),
color=alt.Color("wave:N", scale=alt.Scale(range=['#D45E00', '#0072B2']), title=None),
# stroke=alt.Stroke("sig", scale=alt.Scale(domain=['p<0.05'], range=['black']), title='Significance')
).properties(
width=100,
height=250
)
guide = alt.Chart(
pd.DataFrame({'baseline': [1, 1, 1, 1, 1, 1, 1], 'country': COUNTRY})
).mark_rule(color='gray', strokeDash=[3,3], opacity=0.5).encode(
x=alt.X('baseline:Q')
)
tick = plot.mark_errorbar(
opacity=0.7, color='black'
).encode(
x=alt.X("wave:O", title='Days Since Admission'),
y=alt.Y("ci_l:Q", title='Absolute Risk'),
y2=alt.Y2("ci_u:Q"),
stroke=alt.value('black'),
strokeWidth=alt.value(2)
)
plot = (plot + tick).facet(
# Bug aligning labels. https://github.com/altair-viz/altair/issues/1878
column=alt.Column("country:N", header=alt.Header(title=None), sort=COUNTRY),
# labelAngle=0, labelBaseline='middle', labelAnchor='middle', labelAlign='left', labelPadding=0, labelLineHeight=0
spacing=10
).resolve_scale(color='shared')
plot = plot.properties(
title={
"text": [
f"Absolute Risk For Severe COVID-19 In The First and Second Waves",
],
"dx": 60,
# "subtitle": [
# get_visualization_subtitle(data_release=data_release, with_num_sites=False)
# ],
"subtitleColor": "gray",
}
)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='top-left',
legend_title_orient='top',
axis_label_font_size=14,
header_label_font_size=16,
header_label_orient='bottom'
)
plot
# +
df = pd.read_csv(join("..", "data", "1.1.resurgence", "case rate and severity risk", "table.stay.rmDead.toShare.csv"))
df = df[df.siteid.str.contains("meta")]
df.siteid = df.siteid.apply(lambda x: x.replace('meta-', '').upper())
COUNTRY = ['ALL', 'BRAZIL', 'FRANCE', 'GERMANY', 'ITALY', 'SPAIN', 'USA']
COUNTRY_COLOR = ['black', '#CB7AA7', '#0072B2', '#E79F00', '#029F73', '#57B4E9', '#D45E00']
WAVE_COLOR = ['#D45E00', '#0072B2']
df.week = df.week.apply(lambda x: x.replace('week1', '1 Week').replace('week2', '2 Weeks').replace('week3', '> 3 Weeks'))
df.head()
# +
barSize = 30
barGap = 10
height = 320
first = alt.Chart(
df
).mark_bar(
size=barSize,
xOffset=-barSize/2.0 - 2
).encode(
x=alt.X('week:O', title=None, axis=alt.Axis()),
y=alt.Y('n', title='Proportion of Patients', axis=alt.Axis(format='%'), scale=alt.Scale(domain=[0, 1])),
color=alt.Color('wave:N', scale=alt.Scale(range=WAVE_COLOR)),
).transform_filter(
{'field': 'wave', 'oneOf': ['First']}
).properties(
width=(barSize + barGap)*6,
height=height
)
firstError = alt.Chart(
df
).mark_bar(
size=1,
xOffset=-barSize/2.0 - 2,
color='black'
).encode(
x=alt.X('week:O', title=None, axis=alt.Axis()),
y='ci_l:Q',
y2='ci_u:Q',
opacity=alt.value(1),
color=alt.value('black')
).transform_filter(
{'field': 'wave', 'oneOf': ['First']}
).properties(
width=(barSize + barGap)*6,
height=height
)
second = alt.Chart(
df
).mark_bar(
size=barSize,
xOffset=barSize/2.0 + 2
).encode(
x=alt.X('week:O', title=None, axis=alt.Axis()),
y=alt.Y('n', title='Proportion of Patients', axis=alt.Axis(format='%'), scale=alt.Scale(domain=[0, 1])),
color=alt.Color('wave:N', scale=alt.Scale(range=WAVE_COLOR), title='Wave')
).transform_filter(
{'field': 'wave', 'oneOf': ['Second']}
).properties(
width=(barSize + barGap)*6,
height=height
)
secondError = alt.Chart(
df
).mark_bar(
size=1,
xOffset=barSize/2.0 + 2,
color='black'
).encode(
x=alt.X('week:O', title=None, axis=alt.Axis()),
y='ci_l:Q',
y2='ci_u:Q',
opacity=alt.value(1),
color=alt.value('black')
).transform_filter(
{'field': 'wave', 'oneOf': ['Second']}
).properties(
width=(barSize + barGap)*6,
height=height
)
plot = (first + firstError + second + secondError).resolve_scale(opacity='independent').facet(
column=alt.Column('siteid:N', title=None, sort=['META', 'BRAZIL', 'FRANCE', 'GERMANY', 'ITALY', 'SPAIN', 'USA'])
).properties(
title={
'text': 'Proportion of Patients By Length Of Stay',
'dx': 60
}
)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='top-left',
legend_title_orient='top',
axis_label_font_size=14,
header_label_font_size=16,
header_label_orient='top'
)
plot
# +
df = pd.read_csv(join("..", "data", "1.1.resurgence", "case rate and severity risk", "KM_censoring_rate.csv"), sep=',')
df = df.rename(columns={
"censor.rate": "censor_rate",
"std.err": "std_error",
"95cl_lwr": "ci_l",
"95cl_upr": "ci_u"
})
df = df.drop(columns=['Unnamed: 0'])
df.wave = df.wave.apply(lambda x: x.capitalize())
df.wave = df.wave.apply(lambda x: (x + " Waves") if x == "All" else (x + " Wave"))
df.country = df.country.apply(lambda x: x.upper())
COUNTRY = ['ALL', 'FRANCE', 'GERMANY', 'ITALY', 'USA']
COUNTRY_COLOR = ['black', '#0072B2', '#E79F00', '#029F73', '#D45E00']
df
# +
d = df.copy()
# d = d.sort_values('week')
# # Moving average using three time points (previous, current, next)
# d['percentage'] = d.groupby('country').percentage.apply(lambda x : x.shift().rolling(3, min_periods=1).mean().fillna(x))
# d['percentage'] = d.groupby('country').percentage.apply(lambda x : x.shift(-2))
# d = d[d.week <= '2021-02-28']
plot = alt.Chart(
d
).mark_line(
size=3.5,
point=alt.OverlayMarkDef(filled=True, strokeWidth=4, opacity=0.7),
opacity=0.7
).encode(
x=alt.X("day:Q", title="Days Since Admission", axis=alt.Axis(tickCount=10, labelAngle=0), scale=alt.Scale(padding=10, nice=False)), # https://github.com/d3/d3-time-format#locale_format
y=alt.Y("censor_rate:Q", title="Proportion of Patients", axis=alt.Axis(format=".0%")),
color=alt.Color("country:N", scale=alt.Scale(domain=COUNTRY, range=COUNTRY_COLOR), title='Country'),
).properties(
width=200,
height=200
)
error = alt.Chart(
d
).mark_errorbar(
opacity=0.7
).encode(
x=alt.X("day:Q", title="Days Since Admission", axis=alt.Axis(tickCount=10, labelAngle=0), scale=alt.Scale(padding=10, nice=False)), # https://github.com/d3/d3-time-format#locale_format
y=alt.Y("ci_l:Q", title="Proportion of Patients", axis=alt.Axis(format=".0%")),
y2=alt.Y2("ci_u:Q"),
color=alt.Color("country:N", scale=alt.Scale(domain=COUNTRY, range=COUNTRY_COLOR), title='Country'),
).properties(
width=200,
height=200
)
# text = plot.mark_text(
# size=16, dx=0, dy=-5, color='white', baseline='bottom', fontWeight=500
# ).encode(
# # x=alt.X('month:N'),
# # y=alt.Y('value:Q', stack='zero'),
# x=alt.X("wave:N", title=None, axis=alt.Axis(labels=False)),
# y=alt.Y("percentage:Q", title=None, scale=alt.Scale(format=".1%")),
# # detail='cat:N',
# text=alt.Text('n_all:Q'),#, format='.0%'),
# # order="order:O",
# # opacity=alt.Opacity('visibility:N', scale=alt.Scale(domain=[True, False], range=[1, 0]))
# )
plot = (plot + error).facet(
column=alt.Column("country:N", header=alt.Header(title=None), sort=COUNTRY),
row=alt.Row("wave:N", title=None, header=alt.Header(labelOrient='left'))
).resolve_scale(color='shared')
plot = plot.properties(
title={
"text": [
f"Censor Rate Across Hospitalization Days"
],
"dx": 35,
}
)
# plot = alt.vconcat(*(
# plot_lab(df=df, lab=lab) for lab in unique_sites
# ), spacing=30)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='right',
legend_title_orient='top',
axis_label_font_size=14,
header_label_font_size=16,
header_label_orient='top',
point_size=50,
axis_tick_color='black'
)
plot
# d
# +
df = pd.read_csv(join("..", "data", "1.1.resurgence", "case rate and severity risk", "KM_censoring_rate.csv"), sep=',')
df = df.rename(columns={
"censor.rate": "censor_rate",
"std.err": "std_error",
"95cl_lwr": "ci_l",
"95cl_upr": "ci_u"
})
df = df.drop(columns=['Unnamed: 0'])
df.wave = df.wave.apply(lambda x: x.capitalize())
df["category"] = df.wave.apply(lambda x: 'all' if x == 'All' else 'wave')
df.country = df.country.apply(lambda x: x.upper())
COUNTRY = ['ALL', 'FRANCE', 'GERMANY', 'ITALY', 'USA']
COUNTRY_COLOR = ['black', '#0072B2', '#E79F00', '#029F73', '#D45E00']
df
# +
d = df.copy()
# d = d.sort_values('week')
# # Moving average using three time points (previous, current, next)
# d['percentage'] = d.groupby('country').percentage.apply(lambda x : x.shift().rolling(3, min_periods=1).mean().fillna(x))
# d['percentage'] = d.groupby('country').percentage.apply(lambda x : x.shift(-2))
# d = d[d.week <= '2021-02-28']
WAVE_COLOR = ['black', '#D45E00', '#0072B2']
plot = alt.Chart(
d
).mark_line(
size=3.5,
point=alt.OverlayMarkDef(filled=True, strokeWidth=4, opacity=0.7),
opacity=0.7
).encode(
x=alt.X("day:Q", title="Days Since Admission", axis=alt.Axis(tickCount=10, labelAngle=0), scale=alt.Scale(padding=10, nice=False)), # https://github.com/d3/d3-time-format#locale_format
y=alt.Y("censor_rate:Q", title=['Proportion of Patients', 'in Hospital'], axis=alt.Axis(format=".0%")),
color=alt.Color("wave:N", scale=alt.Scale(range=WAVE_COLOR), title='Wave'),
).properties(
width=200,
height=200
)
error = alt.Chart(
d
).mark_errorbar(
opacity=0.7
).encode(
x=alt.X("day:Q", title="Days Since Admission", axis=alt.Axis(tickCount=10, labelAngle=0), scale=alt.Scale(padding=10, nice=False)), # https://github.com/d3/d3-time-format#locale_format
y=alt.Y("ci_l:Q", title=['Proportion of Patients', 'in Hospital'], axis=alt.Axis(format=".0%")),
y2=alt.Y2("ci_u:Q"),
color=alt.Color("wave:N", scale=alt.Scale(range=WAVE_COLOR), title='Wave')
).properties(
width=200,
height=200
)
# text = plot.mark_text(
# size=16, dx=0, dy=-5, color='white', baseline='bottom', fontWeight=500
# ).encode(
# # x=alt.X('month:N'),
# # y=alt.Y('value:Q', stack='zero'),
# x=alt.X("wave:N", title=None, axis=alt.Axis(labels=False)),
# y=alt.Y("percentage:Q", title=None, scale=alt.Scale(format=".1%")),
# # detail='cat:N',
# text=alt.Text('n_all:Q'),#, format='.0%'),
# # order="order:O",
# # opacity=alt.Opacity('visibility:N', scale=alt.Scale(domain=[True, False], range=[1, 0]))
# )
plot = (plot + error).facet(
column=alt.Column("country:N", header=alt.Header(title=None), sort=COUNTRY),
row=alt.Row("category:N", title=None, header=alt.Header(labelOrient='left', labels=False, titleFontWeight='normal'))
).resolve_scale(color='shared')
plot = plot.properties(
title={
"text": [
f"Censor Rate Across Hospitalization Days"
],
"dx": 35,
}
)
# plot = alt.vconcat(*(
# plot_lab(df=df, lab=lab) for lab in unique_sites
# ), spacing=30)
plot = apply_theme(
plot,
axis_y_title_font_size=16,
title_anchor='start',
legend_orient='right',
legend_title_orient='top',
axis_label_font_size=14,
header_label_font_size=16,
header_label_orient='top',
point_size=50,
axis_tick_color='black'
)
plot
# d
# -
| notebooks/2.1.waves.17_temporal_case_rate_1.1_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ehcIkJrxwFfr" colab_type="text"
# # Transfer learning in Keras
#
# https://keras.io/applications/
#
# https://keras.io/callbacks/
#
# <table class="tfo-notebook-buttons" align="left"><td>
# <a target="_blank" href="https://colab.research.google.com/github/dzlab/deepprojects/blob/master/classification/CV_Transfer_Learning_with_Keras.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td><td>
# <a target="_blank" href="https://github.com/dzlab/deepprojects/blob/master/classification/CV_Transfer_Learning_with_Keras.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
# + id="Hrd2WxF4JX06" colab_type="code" colab={}
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# + id="ZtoZUrhe8g54" colab_type="code" colab={}
import numpy as np
import pathlib
import glob
import urllib.request
from tqdm import tqdm
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.metrics import confusion_matrix
# + id="AEecEb4KWu9i" colab_type="code" colab={}
import warnings
warnings.filterwarnings("ignore")
# + id="4fsG8PmUQ20O" colab_type="code" colab={}
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras import preprocessing
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Dropout, GlobalAveragePooling2D, BatchNormalization
from tensorflow.keras.utils import Sequence
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
# + id="_AqRp0FJrEnw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="2f14261b-9f9b-48c4-a391-3c3dffdf2be8"
print(tf.VERSION)
print(tf.keras.__version__)
# + id="iX9Uk8RdEAiM" colab_type="code" outputId="318a5a51-9c61-43d8-e31d-8e82b9abdc7d" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="D5HN1tqACUyC" colab_type="text"
# ## Data
# Download pictures of Chess world champions from google and store them in a way that's compatible with imagenet.
#
# On google images, search for the names of champions, then this on the Developer Console:
#
# `urls = Array.from(document.querySelectorAll('.rg_di .rg_meta')).map(el=>JSON.parse(el.textContent).ou);
# window.open('data:text/csv;charset=utf-8,' + escape(urls.join('\n')));`
# + id="yUZH_o45Ei6v" colab_type="code" colab={}
path = pathlib.Path('/content/gdrive/My Drive/data/champions')
# + id="Py4GxMeRFKN7" colab_type="code" colab={}
classes = ['alekhine', 'botvinnik', 'casablanca', 'fischer', 'karpov', 'kasparov', 'kramnik', 'spassky']
# + id="RtVwT2dSEm4k" colab_type="code" colab={}
folders = [path/c for c in classes]
files = [path/'urls_alekhine.csv', path/'urls_botvinnik.csv', path/'urls_casablanca.csv', path/'urls_fischer.csv', path/'urls_karpov.csv', path/'urls_kasparov.csv', path/'urls_kramnik.csv', path/'urls_spassky.csv']
# + id="pZnQ0misFfRb" colab_type="code" outputId="bf37e6a1-cef0-48a9-de3d-f3527158367a" colab={"base_uri": "https://localhost:8080/", "height": 187}
# download the urls
for index in tqdm(range(len(files))):
number = 0
for url in open(files[index]).readlines():
filename = folders[index]/('{0:04d}'.format(number)+'.jpg')
try:
urllib.request.urlretrieve(url, filename)
except:
pass
number += 1
# + [markdown] id="qnoT4NhC8vPL" colab_type="text"
# ## Model
# Load a ResNet-50 model with weigths pre-trained from the imagenet dataset, then use it a as backbone for the champions classifier model.
# + id="WuuaxOyl8sua" colab_type="code" colab={}
model1 = ResNet50(weights='imagenet')
# + [markdown] id="O_45sJvBPNQB" colab_type="text"
# ### Transfer learning
# Using transfer learning method to reuse the capabilities learnt by the image-net based model.
# 1. Remove the last layer of the original model as it was trained to classify images to one of 1000 imagenet classes (as it's use less in our case)
# 1. Add a header on top of this base model,
# 2. Freeze the layers in this base model
# 3. Train only the head to learn how to classify the picture of the champions.
# + id="xfBo2c3SQiiD" colab_type="code" outputId="40cebf65-dd22-4b6f-e7a7-cd12157dfe71" colab={"base_uri": "https://localhost:8080/", "height": 6426}
model1.summary()
# + id="d673jiG-RC8H" colab_type="code" outputId="014ffecc-7f11-482f-b12d-3494d2001729" colab={"base_uri": "https://localhost:8080/", "height": 34}
model1.layers[-3].output
# + [markdown] id="QtQZvEiMvIGS" colab_type="text"
# Helper functions to set the layers of a NN to trainable or not
# + id="e3VWO-CquiRp" colab_type="code" colab={}
# freeze all layers of the model (from left to right)
def freeze(model, limit=None):
# handle negative indices
if limit != None and limit < -1:
limit = limit + len(model.layers)
# loop for all valid indices and mark the corresponding layer
for index, layer in enumerate(model.layers):
if limit != None and index > limit:
break
layer.trainable = False
# unfreeze all layers of the model up to the given layer index (from right to left)
def unfreeze(model, limit=None):
# handle negative indices
if limit != None and limit < -1:
limit = limit + len(model.layers)
for index, layer in enumerate(model.layers):
if limit != None and index < limit:
continue
layer.trainable = True
def print_trainable_status(model):
for layer in model.layers:
output_shape = str(layer.output_shape)
print('{:20} {:20} {}'.format(layer.name, output_shape, layer.trainable))
# + id="5E5C5x1Z86iK" colab_type="code" colab={}
# use the layer before the average pool, which is before 1000 Dense one
# 1. freeze the original model up to the last layer we will keep
freeze(model1, -3)
#print_trainable_status(model1)
# 2. create a new model that will be chained to the output of our base model
x = model1.layers[-3].output # shape should be (bs=None, 7, 7, 2048)
x = Dropout(rate=0.3)(x) # shape should be (bs=None, 7, 7, 2048)
x = GlobalAveragePooling2D()(x) # shape should be (bs=None, 2048)
x = Dense(1024, activation='relu')(x) # shape should be (bs=None, 1024)
x = BatchNormalization()(x)
predictions = Dense(len(classes), activation='softmax')(x) # shape should be (bs=None, num_champtions)
# + id="2Qb-VpWyQhof" colab_type="code" colab={}
# create a new model with input similar to the base imagenet model and output as the predictions
model2 = Model(inputs=model1.input, outputs=predictions)
# + id="ww6tdIdWkZic" colab_type="code" outputId="f2863b12-bb4b-46b1-862d-bb38dc93459d" colab={"base_uri": "https://localhost:8080/", "height": 6494}
model2.summary()
# + [markdown] id="jR7_HLItTzvu" colab_type="text"
# ## Training
# + [markdown] id="wiBm5uZnT1it" colab_type="text"
# ### Freeze base model
# First: we need to train only the top layers (which were randomly initialized) and freeze all layers from the base model
#
# + id="EjiCnk7sR2vC" colab_type="code" colab={}
for layer in model1.layers:
layer.trainable = False
# + id="zDE0tQozUEjM" colab_type="code" colab={}
# compile the model to before training
adam = Adam(lr=0.001, epsilon=0.01, decay=0.0001)
model2.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# + id="KHbMgELOUTo2" colab_type="code" outputId="dcb51b1e-d199-4bbd-b53e-0b3ba6125f57" colab={"base_uri": "https://localhost:8080/", "height": 34}
model1.input.shape # inputs to the Resnet-50 are tensors are images of 224x224
# + [markdown] id="6rzAIuhzXIPv" colab_type="text"
# ### Generate training data
# + [markdown] id="G6e6p-C3XYpx" colab_type="text"
# We need to generates batches of samples indefinitely, our features is an array of data with shape (batch_size,224,224,3) and labels is an array of data with shape (batch_size,1). We use data from features and labels to train our model.
# + id="vEzE2Q9lXL06" colab_type="code" colab={}
# use this generator: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
"""
https://github.com/keras-team/keras/issues/2702
model.fit_generator( ...,
validation_data=val_datagen.flow(val_X, val_y, batch_size=BATCH_SIZE),
nb_val_samples=val_X.shape[0])
"""
def generator(path, classes, batch_size, size=(224, 224), validation=False):
"""generates batches of samples indefinitely
parameters:
sample_per_epoch: number of samples you want to train in each epoch
nb_epoch: number of epochs
batch_size: number of the samples in a batch
validation: whether to generate train or validation set
"""
images,labels = [], []
for index in range(len(classes)):
pattern = folders[index].as_posix()
if not validation:
pattern += '/train/*.jpg'
else:
pattern += '/valid/*.jpg'
# append each file in this folder to the output
for img_path in glob.glob(pattern):
img = image.load_img(img_path, target_size=size)
img_data = image.img_to_array(img)
lbl_data = np.zeros((8), dtype=np.float32); lbl_data[1]=1
images.append(img_data)
labels.append(lbl_data)
if len(images) == batch_size:
features, targets = np.array(images), np.array(labels)
images,labels = [], []
yield features, targets
if len(images) > 0:
features, targets = np.array(images), np.array(labels)
images,labels = [], []
yield features, targets
# + id="tZbqyQsvsR2G" colab_type="code" colab={}
class ImageGenerator(Sequence):
"""Generator for a sequence of Images"""
def __init__(self, path, classes, batch_size, image_size=(224, 224), validation=False, shuffle=True):
self.validation = validation
self.image_size, self.batch_size = image_size, batch_size
self.urls, self.labels = self.get_urls_(path, classes)
self.items_size = len(self.urls)
self.classes_size = len(classes)
self.indexes = np.arange(self.items_size)
self.shuffle= shuffle
self.on_epoch_end()
def get_urls_(self, path, classes):
"""Load the path for every item"""
urls,labels = [], []
for i, c in enumerate(classes):
pattern = (path/c).as_posix()
if not self.validation:
pattern += '/train/*.jpg'
else:
pattern += '/valid/*.jpg'
# add all elements in those folders
for img_path in glob.glob(pattern):
urls.append(img_path)
labels.append(i)
return urls, labels
def load_urls_(self, indexes):
"""Load the urls of the images into a tensor"""
# init target arrays
images = np.zeros((self.batch_size, self.image_size[0], self.image_size[1], 3), dtype=np.float32)
labels = np.zeros((self.batch_size, self.classes_size), dtype=np.float32)
# Find list of urls in this batch
urls = [self.urls[k] for k in indexes]
lbls = [self.labels[k] for k in indexes]
for index, img_path in enumerate(urls):
# read image from url
img = preprocessing.image.load_img(img_path, target_size=self.image_size)
img_data = preprocessing.image.img_to_array(img)
# read the proper label
lbl_data = np.zeros((8), dtype=np.float32)
lbl_data[lbls[index]]=1
# append data
images[index, :] = img_data
labels[index, :] = lbl_data
return images, labels
def on_epoch_end(self):
"""Rearrange the indexes after each epoch"""
self.indexes = np.arange(self.items_size)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __len__(self):
"""Number of batches per epoch"""
return int(np.floor(self.items_size / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data for the batch
X, y = self.load_urls_(indexes)
return X, y
# + id="C9KIYyjmGD4S" colab_type="code" colab={}
# setup generators for train and validation set
train_dl = ImageGenerator(path, classes, batch_size=48)
valid_dl = ImageGenerator(path, classes, batch_size=48, validation=True)
# + id="PGL8x6-zG3Fc" colab_type="code" outputId="2180cf8b-d078-4918-902c-270bea0d3393" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(train_dl), len(valid_dl)
# + id="54CVgxyl8TT7" colab_type="code" colab={}
class DataBunch:
def __init__(self, train_dl, valid_dl, test_dl=None):
self.train_dl, self.valid_dl, self.test_dl = train_dl, valid_dl, test_dl
def one_item(self):
batch_index = np.random.randint(low=0, high=len(self.train_dl)-1, size=1)[0]
X, y = self.train_dl[batch_index]
item_index = np.random.randint(low=0, high=len(X)-1, size=1)[0]
item = X[item_index]
img = Image.fromarray(item, mode='RGBA')
return img
def show_batch(self, rows=2, **kwargs):
"""e.g. show_batch(rows=3, figsize=(7,6))"""
batch = np.random.choice(self.train_dl)
for nrows in rows:
ncols = rows
for index in rows:
plt.subplot(nrows, ncols, index, **kwargs)
data = np.random.choice(batch)
img = Image.fromarray(data, mode='RGBA')
plt.imshow(img)
#fig, axs = plt.subplots(1,2,figsize=(12,4))
# + id="RRr1tUzqH22f" colab_type="code" colab={}
#PIL.imshow
data = DataBunch(train_dl, valid_dl)
#data.show_batch()
# + id="Dij_Jg-KHJoB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="d39302aa-e83d-41ab-9309-f191d672d88d"
batch_index = np.random.randint(low=0, high=len(train_dl)-1, size=1)[0]
batch, y = train_dl[batch_index]; print(batch.shape)
item_index = np.random.randint(low=0, high=len(batch)-1, size=1)[0]
item = batch[item_index]; print(item.shape)
plt.imshow(item)
# + id="8gnyblSEBE20" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="9d3a50c2-0a2c-44b2-a44b-f31301a90482"
img = data.one_item()
img
# + id="dPtZFq0R6KK7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="13b78b91-a948-4344-f660-815762081113"
train_dl[1][0].shape
# + id="nY9atqOEUc5Y" colab_type="code" outputId="b1c5120b-aa6b-4ce2-db84-548ab35aee7f" colab={"base_uri": "https://localhost:8080/", "height": 357}
# fit the model using the previous generators
history = model2.fit_generator(generator=train_dl, validation_data=valid_dl, epochs=10, use_multiprocessing=True)
# + id="1FGg1rEXXUdO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="f7cc775d-9e5b-4648-dff1-d8cb2cdaa64e"
plt.plot(history.history['loss'], label="train")
plt.plot(history.history['val_loss'], label="valid")
# Add legend
plt.legend(loc='top left')
# Add title and x, y labels
plt.title("Losses over epoch", fontsize=16, fontweight='bold')
#plt.suptitle("Random Walk Suptitle", fontsize=10)
plt.xlabel("epoch")
plt.ylabel("Loss")
plt.show()
# + id="GdCJf0QXfe26" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="370bb641-99e6-42d4-d051-9fd9bcf5e261"
plt.plot(history.history['acc'], label="train")
plt.plot(history.history['val_acc'], label="valid")
# Add legend
plt.legend(loc='top left')
# Add title and x, y labels
plt.title("Accuracy over epoch", fontsize=16, fontweight='bold')
#plt.suptitle("Random Walk Suptitle", fontsize=10)
plt.xlabel("epoch")
plt.ylabel("Accuracy")
plt.show()
# + [markdown] id="1NKRRfKJxY6Q" colab_type="text"
# ## Prediction
# + id="iZ0jBvkGtnzC" colab_type="code" colab={}
class ImageItem():
def __init__(self, x):
self.image_size = image_size
def from_url(img_path, image_size=(224, 224)):
img = preprocessing.image.load_img(img_path, target_size=image_size)
img_data = preprocessing.image.img_to_array(img)
x = np.expand_dims(img_data, axis=0)
x = preprocess_input(x)
return img, img_data, x
# + id="V_0H5bSrb8-p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="16dab69c-881c-4f81-9b73-5730ed657cc4"
img_path = folders[0]/'train'/'0000.jpg'
glob.glob(folders[0].as_posix()+'/*.jpg')
img, img_data, x = ImageItem.from_url(img_path)
img_data.shape, x.shape
# + id="bP-CLSnOcEHA" colab_type="code" outputId="211d68e0-78f4-4590-cbd5-ce5d42871d32" colab={"base_uri": "https://localhost:8080/", "height": 241}
#plt.imshow(img)
#plt.imshow(img_data)
img
# + id="hnmwKQdWcIo5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="df4d9def-92b0-4e82-f07a-d6ed46453fa4"
output = model2.predict(x)
class_index = np.argmax(output[0])
print('Prediction: {}'.format(classes[class_index]))
# + id="twAUwzPtcTEw" colab_type="code" outputId="8bc79015-d525-49b8-9f52-a02d92691371" colab={"base_uri": "https://localhost:8080/", "height": 68}
# read image
img_path = folders[1]/'valid'/'0294.jpg'
glob.glob(folders[1].as_posix()+'/*.jpg')
img, img_data, x = ImageItem.from_url(img_path)
# run prediction
output = model2.predict(x)
class_index = np.argmax(output[0])
print('Prediction: {}'.format(classes[class_index]))
print(output)
# + id="k_9EGQincYmg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="bbd27954-f3b2-4211-f529-bba6699e7d7b"
classes
# + [markdown] id="eVWOnfiMxk9b" colab_type="text"
# ### Confusion Matrix
# + id="uusRSvSq-fEx" colab_type="code" colab={}
test_dl = ImageGenerator(path, classes, batch_size=48, validation=True)
# + id="DN2PrzunyhTp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="61060064-d8e0-4459-dfc4-c26956850367"
y = []
y_hat = []
iterations = len(test_dl)
print('Number of iterations: {}'.format(iterations))
for i in range(iterations):
X_batch, y_batch = test_dl[i]
y_hat_batch = model2.predict(X_batch)
for j in range(len(y_batch)):
y.append(np.argmax(y_batch[j]))
y_hat.append(np.argmax(y_hat_batch[j]))
# + id="hYJLsPlOhHs8" colab_type="code" colab={}
def display_confusion_matrix(cm, labels):
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(111)
cax = ax.matshow(cm, cmap=plt.cm.Blues)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
# + id="bPi2qhLsljzG" colab_type="code" outputId="83bbc088-0705-4354-e14d-d495f210ac8b" colab={"base_uri": "https://localhost:8080/", "height": 689}
y_label = [classes[i] for i in y]
y_hat_label = [classes[i] for i in y_hat]
cm = confusion_matrix(y_label, y_hat_label, classes); #print(cm)
#display_confusion_matrix(y, y_hat, classes)
display_confusion_matrix(cm, classes)
# + [markdown] id="b22WFVStGqKW" colab_type="text"
# ### Activation
# + [markdown] id="jgwHQCmhH0oz" colab_type="text"
# #### Heatmap
# + id="nDd55E99lrZb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="11e107e1-fbb7-4558-da8b-e22ab41f9ec5"
img_path = folders[0]/'train'/'0000.jpg'
glob.glob(folders[0].as_posix()+'/*.jpg')
img, img_data, x = ImageItem.from_url(img_path)
img_data.shape, x.shape
# + id="fFXGxrufB2F7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d3a17279-4422-474f-f65b-bee0567155c1"
output = model1.layers[-3](x); output.shape
# + id="AcDB2CpOJ_Ob" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a2de4116-de25-4e61-d1b2-801793322d7b"
model1.layers[-3].output_shape
# + id="UHxWM9lUXBVi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="679b8e6b-fe63-4f13-da59-8b03bb5f7ee9"
x1 = model2.layers[0](x)
x1.shape
# + id="8uuJGMSnKKzd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="37f1dfe6-2d64-4e0f-a628-d572a9ca81aa"
target_layer_index = len(model2.layers) - 6 # activation_48
model2.layers[target_layer_index].name
# + [markdown] id="jCkeGZRCavKy" colab_type="text"
# calculate the output of the activation layer
# + id="U_KlnLHSZy-4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2f15607a-25e6-43d4-ed57-1c62b1d8a68f"
target_layer_output = K.function([model2.layers[0].input], [model2.layers[target_layer_index].output])
activations = target_layer_output([x])[0]
activations.shape
# + [markdown] id="Xg5YfvupabZo" colab_type="text"
# average on the last dimension then scale the 7x7 matrix to image_size then display it on top of the original image
# + id="3U6_7aDabDWc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c6b95b38-84a3-4feb-9dba-c4eb10a74ef8"
activations_avg = activations.mean(-1)[0]; activations_avg.shape
# + id="7A03Dy7TI89P" colab_type="code" colab={}
def show_heatmap(img, hm):
_,ax = plt.subplots()
plt.xticks([], []); plt.yticks([], [])
ax.imshow(img)
ax.imshow(hm, alpha=0.6, extent=(0,224,224,0), interpolation='bilinear', cmap='magma');
# + id="JT_x0CATbemU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="0da4375e-85ef-4f17-bdbf-c1bbbab98bc0"
show_heatmap(img, activations_avg)
# + id="ZTO5_S5BbmSo" colab_type="code" colab={}
# + id="hYv9XDcibr59" colab_type="code" colab={}
| classification/CV_Transfer_Learning_with_Keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
def remove_currency_signs(df, col):
"""Remove dollar sign and thousand separator."""
df[col] = df[col].str.replace('$', '', regex=False)
df[col] = df[col].str.replace(',', '')
def convert_to_bool(df, col):
"""Convert object columns to boolean."""
df[col] = df[col].replace({'f': 0, 't': 1})
df[col] = df[col].astype(bool)
# ## `calendar_cleaned`
calendar = pd.read_csv('data/Boston/calendar.csv')
calendar_cleaned = calendar.copy()
# +
calendar_cleaned['date'] = pd.to_datetime(calendar_cleaned['date'])
convert_to_bool(calendar_cleaned, 'available')
remove_currency_signs(calendar_cleaned, 'price')
calendar_cleaned['price'] = calendar_cleaned['price'].astype(float)
# -
calendar_cleaned.to_csv('data/Boston/cleaned_calendar.csv', index=False)
calendar_cleaned
# ### `listings_cleaned`
listings = pd.read_csv('data/Boston/listings.csv')
listings.shape[0]
listings_short = listings.copy()
drop_cols = ['listing_url', 'scrape_id', 'last_scraped', 'name', 'space', 'experiences_offered',
'neighborhood_overview', 'notes', 'transit', 'thumbnail_url', 'medium_url', 'picture_url',
'xl_picture_url', 'host_url', 'host_name', 'host_location', 'host_about', 'host_thumbnail_url',
'host_picture_url', 'host_neighbourhood', 'host_listings_count', 'host_total_listings_count',
'host_verifications', 'host_has_profile_pic', 'street', 'neighbourhood', 'neighbourhood_group_cleansed',
'city', 'state', 'zipcode', 'market', 'smart_location', 'country_code', 'country', 'latitude',
'longitude', 'is_location_exact', 'bathrooms', 'beds', 'bed_type', 'square_feet',
'guests_included', 'extra_people', 'maximum_nights', 'calendar_updated', 'has_availability',
'availability_30', 'availability_60', 'availability_90', 'availability_365',
'calendar_last_scraped', 'requires_license', 'license', 'jurisdiction_names',
'require_guest_profile_picture', 'require_guest_phone_verification', 'calculated_host_listings_count']
listings_short.drop(drop_cols, axis=1, inplace=True)
listings_cleaned = listings_short.copy()
listings_cleaned.drop(['summary', 'host_acceptance_rate'], axis=1, inplace=True)
index = listings_short[listings_short.property_type.isnull()].index
listings_short.loc[index, 'property_type'] = 'Other'
listings_cleaned['bedrooms'].fillna(1, inplace=True)
listings_cleaned['bedrooms'] = listings_cleaned['bedrooms'].astype(int)
index = listings_short[listings_short.bedrooms == 0].index
listings_short.loc[index, 'bedrooms'] = 1
listings_cleaned.drop(listings_cleaned[listings_cleaned.review_scores_accuracy.isnull()].index, inplace=True)
listings_cleaned.drop(listings_cleaned[listings_cleaned.review_scores_location.isnull()].index, inplace=True)
listings_cleaned.drop(listings_cleaned[listings_cleaned.review_scores_rating.isnull()].index, inplace=True)
listings_cleaned.drop(listings_cleaned[listings_cleaned.host_response_time.isnull()].index, inplace=True)
listings_cleaned.drop(['weekly_price', 'monthly_price', 'security_deposit', 'cleaning_fee',
'access', 'interaction', 'house_rules'], axis=1, inplace=True)
listings_cleaned.isnull().sum()
listings_cleaned.shape
# **Summary:**
#
# * *neighbourhood_cleansed* remains since it is the only one of the three without missing values
# * missing values in *property_type* integrated in category *Other*
# * replace NaN and 0 bedrooms with 1
# * drop Boston-specific columns *access*, *interaction* and *house_rules*
# * drop columns *weekly_price*, *monthly_price*, *security_deposit* and *cleaning_fee* due to large amount of missing values
# * drop rows with missing values for all host- and review-related columns
listings_cleaned.to_csv('data/Boston/cleaned_listings.csv', index=False)
| Airbnb_Boston_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Now that you have finished the simplest problem using TensorFlow, then we can move forward to text classification. This task is almost the same simple with the last one except for the preprocessing step, but the decoding in preprocessing is already included in the package. The idea of this notebook comes from [TensorFlow Tutorial](https://www.tensorflow.org/tutorials/keras/basic_text_classification). Let's get started!
# The dataset used in this notebook is [IMBD Dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb). It includes the preprocessed reviews and labels of positive or negative.
# +
# Import libraries.
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
# +
# Download and check dimension of the dataset.
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000) # "num_words" takes the first most frequent words
print('training data: ', train_data.shape)
print('training labels: ', train_labels.shape)
print('test data: ', test_data.shape)
print('test labels: ', test_labels.shape)
# +
# Visualize the decoded dataset adapted from the tutorial.
word_index = imdb.get_word_index()
# The first four indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([word_index.get(i, '?') for i in text])
print(train_data[0], ' -> ', train_labels[0])
decode_review(train_data[0])
# -
# Preprocess data to pad them to same lengths.
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=0,
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=0,
padding='post',
maxlen=256)
print(train_data[0])
print('training data length: ', train_data.shape[1])
# +
# Model construction.
model = keras.Sequential()
model.add(keras.layers.Embedding(10000, 16)) # embedding layer with size 10000 as the number of words and will add a new dimension
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
# -
# Model compiling.
model.compile(optimizer='adam',
loss='binary_crossentropy', # "binary_crossentropy" is better for probability problems
metrics=['accuracy'])
# Create validation set.
x_train = train_data[10000:]
x_val = train_data[:10000]
y_train = train_labels[10000:]
y_val = train_labels[:10000]
# Model training.
history = model.fit(x_train,
y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val))
# Print result.
results = model.evaluate(test_data, test_labels)
print(results)
# +
# Visualize the learning curve on loss.
history_dict = history.history # history_dict has keys of ['loss', 'val_loss', 'val_acc', 'acc']
acc = history_dict['acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, 'ro', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# +
# Visualize the learrning curve on accuracy.
plt.clf() # clear figure
acc = history_dict['acc']
val_acc = history_dict['val_acc']
plt.plot(epochs, acc, 'ro', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
| 02_Text_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Iterators / range function
# The call of the range(1000000) does not return the list of numbers, it returns a range object that is iterable. This object generates the million values one at a time, and only as needed. This technic has great advantage, because it allows use a loop on the big range but without settong aside a huge amount of memory for it. Also, if such a loop were to be interrupted in some fashion, no time will have been spent computing unused values of the range.
#
# ## Generators
# The most convenient technique for creating iterators in Python is through the use of generators. A generator is implemented with a syntax that is very similar to a function, but instead of returning values, a yield statement is executed to indicate each element of the series.
# To show the vay of implementing genrator I will use a example with determining all factors of a positive integer. For example, the number 100 has factors 1, 2, 4, 5, 10, 20, 25, 50, 100. A traditional function might produce and return a list containing all factors, implemented as:
def factors1(n):
results = [ ]
for k in range(1,n+1):
if n % k == 0: results.append(k)
return results
# Generator that produces the same numbers:
def factors2(n):
for k in range(1,n+1):
if n % k == 0:
yield k
factors1(100)
for factor in factors2(100):
print(factor)
# ## Comprehension Syntax
# + active=""
# [ expression for value in iterable if condition ]
#
# List comprehension:
# [ k k for k in range(1, n+1) ]
#
# set comprehension
# { k k for k in range(1, n+1) }
#
# generator comprehension
# ( k k for k in range(1, n+1) )
#
# dictionary comprehension
# { k : k k for k in range(1, n+1) }
#
#
# -
# ## Conditional Expressions
# + active=""
# expr1 if condition else expr2
# -
# ## Decorators
# Create a decorator that adds behavior to a given function:
# Convert the return value in some way
# +
# def my_decorator(func):
# def wrapper(func):
# lst_factors = func()
# for el in lst_factors:
# print(el * 2)
# return wrapper
# @my_decorator
# def factors1(n):
# results = [ ]
# for k in range(1,n+1):
# if n % k == 0: results.append(k)
# return results
# +
# factors1(100)
# -
| .ipynb_checkpoints/Pythonisms-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import re
import speech_recognition as sr
from sys import argv
from pydub import AudioSegment, effects
from pydub.silence import split_on_silence
# +
def normalization():
'''
to normalize the audio pitch inside the audio file
input: will take thepath of audio file in .wav form as input
output: will normalize the audio and save it as normalized.wav
'''
file = input('Enter the path to recorded file')
rawsound = AudioSegment.from_file(file, "wav")
normalizedsound = effects.normalize(rawsound)
normalizedsound.export("normalized.wav", format="wav")
print("normalized recording saved as normalized.wav \n")
def speechToTextModule(lang="en-in"):
'''
to convert audio file to text
brief: It will firstly normalize the audion after that on the basis of silence
and frecuence will cut the audio into segments and then it will process
each chunk of audio and convert to text.
input: will take thepath of audio file in .wav form as input
output: recognized.txt file with all text converted
'''
# calling normalization function
normalization()
# opening normalized audio file and recognized.txt for appending detected text
song = AudioSegment.from_wav("normalized.wav")
fh = open("recognized.txt", "w+")
# spliting audio into chunks with parameter as silence of 1.2 seconds
chunks = split_on_silence(song,
# must be silent for at least 1.2 seconds
min_silence_len = 1200,
# consider it silent if quieter than -50 dBFS
silence_thresh = -50
)
# creating a directory to store the audio chunks.
try:
os.mkdir('audio_chunks')
except(FileExistsError):
pass
print("folder created for storing the chunks of audio file \n")
os.chdir('audio_chunks')
i = 0
# processing each chunk
for chunk in chunks:
# Create 0.5 seconds silence chunk
chunk_silent = AudioSegment.silent(duration = 10)
audio_chunk = chunk_silent + chunk + chunk_silent
# export audio chunk and save it in the current directory.
print("saving chunk{0}.wav".format(i))
# specify the bitrate to be 192 k
audio_chunk.export("./chunk{0}.wav".format(i), bitrate ='192k', format ="wav")
# the name of the newly created chunk
filename = 'chunk'+str(i)+'.wav'
print("Processing chunk "+str(i))
# get the name of the newly created chunk
# in the AUDIO_FILE variable for later use.
file = filename
# create a speech recognition object
r = sr.Recognizer()
# recognize the chunk
with sr.AudioFile(file) as source:
r.pause_threshhold = 1
r.energy_threshold = 7000
audio_listened = r.listen(source)
# below could be used in case above three lines are not giving good results
# r.adjust_for_ambient_noise(source)
# audio_listened = r.listen(source)
try:
# try converting it to text by specifying the language
rec = r.recognize_google(audio_listened, language=lang)
# write the output to the file.
fh.write(rec+". ")
# catch any errors.
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("no internet connection or access")
i += 1
os.chdir('..')
# -
speechToTextModule()
| audio_chunk_and_text/.ipynb_checkpoints/audio_to_text_module-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import tensorflow as tf
import collections
scripts = open("data/scripts.txt", "r", encoding="utf-8")
corpus = scripts.read()
def create_tables(words):
count = collections.Counter(words).most_common()
dictionary = {}
for word, k in count:
dictionary[word] = len(dictionary) #word to key
reverse = dict(zip(dictionary.values(), dictionary.keys())) #key to word
return dictionary, reverse
def punctuations():
return {
'.': '||period||',
',': '||comma||',
'"': '||quotes||',
';': '||semicolon||',
'!': '||exclamation-mark||',
'?': '||question-mark||',
'(': '||left-parentheses||',
')': '||right-parentheses||',
'--': '||emm-dash||',
'\n': '||return||'
}
tokens = punctuations()
for token in tokens:
corpus = corpus.replace(token, " " + tokens[token] + " ")
corpus = corpus.lower()
corpus = corpus.split()
dictionary, reverse = create_tables(corpus)
def make_minibatches(text, batch_size, sequence_length):
words = batch_size * sequence_length
num_batches = len(text) // words
text = text[:num_batches*words]
y = np.array(text[1:] + [text[0]])
x = np.array(text)
x_batches = np.split(x.reshape(batch_size, -1), num_batches, axis=1)
y_batches = np.split(y.reshape(batch_size, -1), num_batches, axis=1)
return np.array(list(zip(x_batches, y_batches)))
#Hyperparameters
epochs = 100
batch_size = 512
rnn_size = 512
num_layers = 3
keep_prob = 0.7 #dropout rate
embed_dim = 512
sequence_length = 30
lr = 0.001
save_dir = "./output"
training = tf.Graph()
with training.as_default():
input_text = tf.placeholder(tf.int32, [None, None], name="input")
targets = tf.placeholder(tf.int32, [None, None], name="targets")
alpha = tf.placeholder(tf.float32, name='alpha')
num_words = len(dictionary)
input_shape = tf.shape(input_text)
rnn_layers = []
for i in range(num_layers):
lstm = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size)
drop_cell = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob)
rnn_layers.append(drop_cell)
cell = tf.contrib.rnn.MultiRNNCell(rnn_layers)
initial_state = cell.zero_state(input_shape[0], tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
embed = tf.contrib.layers.embed_sequence(input_text, num_words, embed_dim)
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
logits = tf.contrib.layers.fully_connected(outputs, num_words, activation_fn=None)
probs = tf.nn.softmax(logits, name='probs')
cost = tf.contrib.seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_shape[0], input_shape[1]])
)
optimizer = tf.train.AdamOptimizer(alpha)
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
corpus_int = [dictionary[word] for word in corpus]
batches = make_minibatches(corpus_int, batch_size, sequence_length)
with tf.Session(graph=training) as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
print("Epoch " + str(epoch))
for batch_index, (x, y) in enumerate(batches):
feed_dict = {
input_text: x,
targets: y,
initial_state: state,
alpha: lr
}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed_dict)
if epoch % 25 == 0:
saver = tf.train.Saver()
saver.save(sess, save_dir)
print("model saved")
| lstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/papagorgio23/Python101/blob/master/Validation_F%2B_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="cdF7iaugCiWf" colab_type="text"
# # Validate F+ Lead Scoring 2.0 Model
# + [markdown] id="TCITBhqhCo3K" colab_type="text"
# ## Load Data
# + id="Du9OeQNw632D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="2ed46c62-3cf7-4529-b665-5c094c914ba7"
# Installing Library
# !pip install pydata_google_auth
# + id="YcbmerBR7WFr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="db170d8a-7156-4efb-d152-4d563fc55471"
# Using GBQ shout Out to Hughes
import pandas_gbq
import pydata_google_auth
SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/drive',
]
credentials = pydata_google_auth.get_user_credentials(
SCOPES,
auth_local_webserver=False)
# + id="QX6hpC2g7gTJ" colab_type="code" colab={}
sql = """
SELECT
a.id
,m.name
,m.loan_application_id
,m.lead_type__c
,m.createddate
,m.prequal_submitted_date__c
,m.full_app_submitted_date__c
,m.Loan_Officer_Assigned
,m.date_funded__c
,m.funded_flag
,m.loan_officer_name__c
,m.amount_of_loan_requested__c
,m.first_amount_of_loan_requested
,m.verifiable_annual_income__c
,m.first_income_p1
,m.co_app_verifiable_annual_income__c
,m.first_income_p2
,m.co_app
,m.first_coapp
,m.income_sum
,CAST(m.first_income_sum AS INT64) AS first_income_sum
,m.c_LTI
,m.First_LTI
,m.loan_use__c
,m.first_loan_use
,m.employment_status__c
,m.ndi_ratio__c
,m.first_ndi_ratio__c
,m.fico__c
,m.first_FICO
,m.utm_source__c
,m.bcc0300__c
,m.first_bcc0300__c
,a.interest_rate__c
,a.risk_group__c
,a.final_risk_group__c
,a.risk_group_p1__c
FROM
`ffn-dw-bigquery-prd.Credit_Views.Check_Sales_NPV_Model_Inputs` m
LEFT JOIN
`freedom-dw.salesforce_ffam.application__c` a ON m.name = a.name
WHERE
m.createddate BETWEEN '2019-05-01' AND '2019-10-15'
"""
# + id="hEZwN2Yi79m4" colab_type="code" colab={}
df1 = pandas_gbq.read_gbq(sql, project_id='ffn-dw-bigquery-prd', credentials=credentials, dialect='standard')
df = df1
# + id="rIUpFkFi8Mxv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 305} outputId="810c8300-dfa8-47d6-c9b9-c5040c449a0c"
df1.head()
# + id="qUa0ioSTHEtU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 242} outputId="4bc9810e-25eb-496d-d35e-ccf4fe389fce"
df.columns
# + id="5uAw7If58l2Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="dbd8d107-0b80-42df-f684-b0ec9b3dd69c"
df.columns
# + [markdown] id="rP6QffcNCs56" colab_type="text"
# ## Model Test 1 - First touch variables
# + id="m9e3HiCB8o-2" colab_type="code" colab={}
# removed duplicate columns and save the first values
firstDf = df.drop(["id", "amount_of_loan_requested__c", "verifiable_annual_income__c", \
"co_app_verifiable_annual_income__c", "co_app", "c_LTI", \
"loan_use__c", "employment_status__c", "ndi_ratio__c", \
"fico__c", "bcc0300__c"], axis=1)
# + id="9PS8jIcg_NfB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="d6817a7e-8e91-4c39-ff76-39b83f7f4f5b"
firstDf.columns
# + id="1YCGkPO2_q6D" colab_type="code" colab={}
# + [markdown] id="DYFC16gSC6qP" colab_type="text"
# ## Model Test 2 - Current model
# + [markdown] id="i9jIcIbl-Ybo" colab_type="text"
# # Mount Drive
# + id="t8j_zGn6-aXT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="f8043939-d790-4e58-eb21-9eff6458a2b8"
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
# + id="1Ur3JPJg-f2E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3f0fee64-fefb-46c9-c6d4-d8512b57a4ac"
# set working directory
import os
os.chdir("/content/drive/My Drive/Data Scientist/F+ Lead Scoring Model/Validation/")
os.getcwd()
# + id="LDK0njr5TKdP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="148fc66d-f46e-43ed-9f6a-8b67bde59cd5"
# ls
# + [markdown] id="AuN7jS1-6xrR" colab_type="text"
# ## Module Setup
# + id="D5fWmAsS6IqQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="b3b34d0f-66ab-4719-a3dc-2183f498150c"
import numpy as np
import pandas as pd
import logging
import os
from sklearn.externals import joblib
#dirdata = os.path.join(os.path.dirname(__file__), 'Data')
global initialized
initialized = False
def init():
global fplus_ls_model
global unitEconomicsData
global initialized
if (initialized == True):
return
try:
fplus_ls_model = joblib.load('Fplus_Lead_Scoring')
unitEconomicsData = pd.read_csv('unitEconomicsData.csv')
#fplus_ls_model = joblib.load(os.path.join(dirdata, 'Fplus_Lead_Scoring'))
#unitEconomicsData = pd.read_csv(os.path.join(dirdata, 'unitEconomicsData.csv'))
except Exception as e:
#print(e)
return
initialized = True
# + id="XozO7zN261xv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="322a82f2-c0b1-4a1c-f191-14e82db14679"
## initialize Model
init()
## doesn't work.... So here we go manual
#fplus_ls_model = joblib.load('Data/Fplus_Lead_Scoring')
#unitEconomicsData = pd.read_csv('Data/unitEconomicsData.csv')
# + id="k7_nLUX5XjYv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="05c4b12a-cd06-46b6-d321-d6152d6129d5"
unitEconomicsData.head()
# + [markdown] id="7wJCalwK5Jt4" colab_type="text"
# ## Library Binning
# + id="6RBMJyXxC8c6" colab_type="code" colab={}
import numpy as np
import pandas as pd
def get_co_app_cat(co_app_income):
if pd.isnull(co_app_income):
return 0
return 1
def get_loan_use_cat(loan_use):
if pd.isnull(loan_use):
return 3
loan_use = loan_use.strip()
if (loan_use == 'Credit Card Refinancing'):
return 4
if (loan_use in ['Major Purchase','Other']):
return 2
if (loan_use == 'Auto Purchase'):
return 1
return 3
def get_employment_cat(employment_status):
if pd.isnull(employment_status):
employment_status = ''
employment_status = employment_status.strip()
if (employment_status == 'Retired'):
return 4
if (employment_status in ['Self-employed']):
return 2
if (employment_status in ['Other', '']):
return 1
return 3
def get_loan_amount_cat(loan_amount):
if pd.isnull(loan_amount):
return 1
loan_amount = float(loan_amount)
if (loan_amount < 15000):
return 4
if (loan_amount >= 15000) and (loan_amount < 20000):
return 3
if (loan_amount >= 20000) and (loan_amount < 25000):
return 2
return 1
def get_mkt_chan_cat(utm_source):
if pd.isnull(utm_source):
return 3
utm_source = utm_source.strip()
if (utm_source in ['creditkarma','nerdwallet']):
return 7
if (utm_source in ['credible','experian']):
return 6
if (utm_source in ['website', 'google','msn','ck','nerd',
'115','save','dm','SLH','201']):
return 5
if (utm_source in ['facebook', 'even','uplift','Quinstreet',
'Personalloanpro','113']):
return 2
if (utm_source in ['LendEDU', 'monevo','247','sfl']):
return 1
return 3
def get_fico(fico):
if pd.isnull(fico):
return 990
fico = int(fico)
if (fico >= 9000):
return 990
if fico < 600:
return 990
return fico
def get_lti(lti):
if pd.isnull(lti):
return 36
lti = float(lti)
if (lti > 35) or (lti < 1):
return 36
if (lti >= 1) and (lti < 2):
return 35
if (lti >= 2) and (lti < 3):
return 34
return np.floor(lti)
def get_bcc0300(bcc0300):
if pd.isnull(bcc0300):
return 99
bcc0300 = int(bcc0300)
if (bcc0300 >= 25):
return 30
return bcc0300
def get_ndi_ratio(ndi_ratio):
if pd.isnull(ndi_ratio):
return 5
ndi_ratio = float(ndi_ratio)
ndi_ratio = np.floor(ndi_ratio)
if (ndi_ratio < 10):
return 5
if (ndi_ratio > 75):
return 80
return ndi_ratio
# + id="29bmfzEw583t" colab_type="code" colab={}
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
import library_binning as lib
#import scoringData as sd
def score_collector_phase1(lead, debug = False):
mod_lead = pd.DataFrame()
bin_vars = {}
error = False
error_msg = ''
prob_prediction = 0
npv = 0
MODEL_NAME = 'fplus_ls_201902_v1'
try:
# create binned/transformed variables
bin_vars['co_app'] = pd.Series(lib.get_co_app_cat(lead['co_app_verifiable_annual_income']))
bin_vars['loan_use'] = lib.get_loan_use_cat(lead['loan_use'])
bin_vars['employment'] = lib.get_employment_cat(lead['employment_status'])
bin_vars['loan_amount'] = lib.get_loan_amount_cat(lead['final_loan_amount'])
bin_vars['mkt_chan'] = lib.get_mkt_chan_cat(lead['utm_source'])
bin_vars['ficox'] = lib.get_fico(lead['fico'])
bin_vars['lti'] = lib.get_lti(lead['lti'])
bin_vars['bcc0300'] = lib.get_bcc0300(lead['xpn_bcc0300'])
bin_vars['ndi'] = lib.get_ndi_ratio(lead['ndi_ratio'])
bin_vars['ndisq'] = bin_vars['ndi'] * bin_vars['ndi']
mod_lead = pd.DataFrame(bin_vars)
# create dummies
cat_vars=['co_app','loan_use','employment','loan_amount','mkt_chan']
for var in cat_vars:
cat_list = pd.get_dummies(mod_lead[var], prefix=var)
temp=mod_lead.join(cat_list)
mod_lead=temp
data_vars=mod_lead.columns.values.tolist()
to_keep=[i for i in data_vars if i not in cat_vars]
mod_lead=mod_lead[to_keep]
# print(mod_lead.columns.values)
# re-index to have same columns as the model
mod_lead = mod_lead.reindex(columns = ['ficox', 'lti', 'bcc0300', 'ndi', 'ndisq', 'co_app_0', 'co_app_1',
'loan_use_1', 'loan_use_2', 'loan_use_3', 'loan_use_4', 'employment_1',
'employment_2', 'employment_3', 'employment_4', 'loan_amount_1',
'loan_amount_2', 'loan_amount_3', 'loan_amount_4', 'mkt_chan_1',
'mkt_chan_2', 'mkt_chan_3', 'mkt_chan_5', 'mkt_chan_6',
'mkt_chan_7'], fill_value=0)
# score
#prob_prediction = sd.fplus_ls_model.predict_proba(mod_lead)[0][1] I'm changing this to remove sd and just loading scoring model and data already
prob_prediction = fplus_ls_model.predict_proba(mod_lead)[0][1]
#print(prob_prediction)
# calc NPV
npv = get_npv_calc(lead['final_loan_amount'], prob_prediction, lead['utm_source'])
except Exception as e:
# print(e)
error = True
error_msg = 'Error in Scoring.'
# add an error flag
if (debug):
return_scores = {
'bin_lead': mod_lead,
'fuse.score':prob_prediction,
'fuse.npv': npv,
'fuse.model': MODEL_NAME,
'fuse.error_flag': error,
'fuse.error_reason': error_msg
}
return return_scores
# return the dictionary
return_scores = {
'fuse.score':prob_prediction,
'fuse.npv': npv,
'fuse.model': MODEL_NAME,
'fuse.error_flag': error,
'fuse.error_reason': error_msg
}
return return_scores
def get_npv_calc(loan_amt,prob_prediction,utm_source):
if pd.notnull(utm_source):
utm_src = utm_source.strip()
utm_src = utm_src.lower()
npv = 0
cpa = 0
#if pd.notnull(utm_source) and (utm_src in set(sd.unitEconomicsData['utm_source'])): commenting this out because of the sd and loading it locally
# cpa = sd.unitEconomicsData.loc[sd.unitEconomicsData['utm_source'] == utm_src, 'CPA'].values[0]
if pd.notnull(utm_source) and (utm_src in set(unitEconomicsData['utm_source'])):
cpa = unitEconomicsData.loc[unitEconomicsData['utm_source'] == utm_src, 'CPA'].values[0]
if pd.isnull(loan_amt):
loan_amt = 0
else:
loan_amt = float(loan_amt)
npv = prob_prediction * ((loan_amt * 0.0785) - (707.9 + cpa))
return npv
# + id="iFHx4rDNYM51" colab_type="code" colab={}
# + [markdown] id="kwGWDkyJ5Riq" colab_type="text"
# ## Main Lead Level
# + id="Xy-8jtMd5MoJ" colab_type="code" colab={}
import numpy as np
import pandas as pd
import score_collector_phase1 as sc1
def main_leadlevel(leads, phase=1, provide="scored_lead"):
if phase == 1:
checked_lead = check_lead_phase1(leads)
if checked_lead["error"]:
return_value = {
"fuse.score" : 0,
"fuse.npv" : 0,
"fuse.model" : '',
"fuse.error_flag": True,
"fuse_error_reason" : "Lead Validation Failed."
}
return return_value
error_msg = checked_lead["warning"]
return_value = sc1.score_collector_phase1(checked_lead["return_lead"])
if len(error_msg.strip()) > 0:
return_value['fuse.error_reason'] = error_msg + ' ' + return_value['fuse.error_reason']
return return_value
# Check that the lead has all the required fields
# if a value is missing, return warning message
def check_lead_phase1(lead):
# convert all keys to lower case
raw_lead = {k.lower(): v for k, v in lead.items()}
error = False
return_lead = {}
missing_variables = []
error_msg = ''
required_fields = ["co_app_verifiable_annual_income","loan_use","employment_status",
"final_loan_amount","fico","lti","xpn_bcc0300","ndi_ratio","utm_source"]
try:
for field in required_fields:
if (field not in raw_lead):
raw_lead[field] = np.NaN
for key in raw_lead:
var = returnNaNifEmptyorNaN(raw_lead[key])
return_lead[key] = var
if (pd.isnull(return_lead[key])):
missing_variables.append(key)
if (len(missing_variables)>0):
error_msg = ",".join(missing_variables)
error_msg = "Imputing values for: " + error_msg
except Exception as e:
#print(e)
error=True
return_value = {
"return_lead" : return_lead,
"warning": error_msg,
"error": error
}
return return_value
def returnNaNifEmptyorNaN(variable):
if isinstance(variable, int):
return variable
if isinstance(variable, float):
return variable
# if null or empty
if pd.isnull(variable) or len(variable)<1:
return np.NaN
if variable.isnumeric():
return variable
return variable.strip()
# + id="C_onaN12UrRP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="2b7ffdeb-a8b7-492d-f8f8-9c4890ba4cf0"
lead = {"co_app_verifiable_annual_income":342,"loan_use":'Debt Consolidation', \
"employment_status":'Full-time', "final_loan_amount":25000, "fico":700,\
"lti":25.8, "xpn_bcc0300":42, "ndi_ratio":40, "utm_source":'Lending Tree'}
check_lead_phase1(lead)
# + [markdown] id="77OWhkhY5fhW" colab_type="text"
# ## Model
# + id="I2nSP8W25UUh" colab_type="code" colab={}
import scoringData as sd
import main_leadLevel as ml
import pandas as pd
import json
sd.init()
# Assumption : gets dictionary as a input
def run(leads):
score = ml.main_leadlevel(leads)
#score is dictionary, score['fuse_score'] is dataframe
#add scores to the lead
if ('co_app_verifiable_annual_income' in leads):
leads['co_app_verifiable_annual_income']=str(leads['co_app_verifiable_annual_income'])
if ('fico' in leads):
leads['fico']=str(leads['fico'])
if ('LTI' in leads):
leads['LTI']=str(leads['LTI'])
if ('xpn_bcc0300' in leads):
leads['xpn_bcc0300']=str(leads['xpn_bcc0300'])
if ('ndi_ratio' in leads):
leads['ndi_ratio']=str(leads['ndi_ratio'])
if ('final_loan_amount' in leads):
leads['final_loan_amount']=str(leads['final_loan_amount'])
leads.update(score)
return_list = []
return_list.append(leads)
return return_list
def format_scores_returned(score):
# convert everything to a dataframe
score = pd.DataFrame.from_dict(score,orient='index').transpose()
# convert back to dictionary in list format
score = score.to_dict('list')
return score
# + id="qs747pSaUWeR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="9ae34fc8-89df-4384-b90c-88945b370084"
run(lead)
# + id="6OJuyB8nW59d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="03d16099-1c39-433a-d524-11adfce0e6c5"
main_leadlevel(lead)
# + id="Dnsr5r3rXKgd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="fe5ff753-d78f-40c1-d4a5-8abc62c2d038"
score_collector_phase1(lead)
# + id="Xvlz8tvEXPGe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="efeaf86f-660f-41e0-a98b-e21ce8f35801"
lead
# + id="Wr77xZogZ6dV" colab_type="code" colab={}
# + [markdown] id="Dj2G4JW9Yj9o" colab_type="text"
# # New Score
# + [markdown] id="fxmHBLPwiCnX" colab_type="text"
# ## Load Data
# + id="j96VO_9-apAt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="ddc1fac3-79af-4724-e9c2-736457644451"
# load data
df = pd.read_csv('full_data.csv')
df.head()
# + id="w7t0h9gZcVxo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="8a2f19e9-5c76-4434-b3eb-f15cfe978fd9"
df.columns
# + id="JwtIJxS-c83D" colab_type="code" colab={}
lead = {"co_app_verifiable_annual_income":342,"loan_use":'Debt Consolidation', \
"employment_status":'Full-time', "final_loan_amount":25000, "fico":700,\
"lti":18.8, "xpn_bcc0300":42, "ndi_ratio":40, "utm_source":'Lending Tree'}
# + [markdown] id="rprlub1IiEfT" colab_type="text"
# ## Split Final DF and First DF
# + id="M47lAopHcfph" colab_type="code" colab={}
final_var = ['SF_App_Id', 'Funding_Score', 'Funding_Model_Segment', 'NPV_Score',\
'NPV_Model_Segment', 'Funding', 'NPV_Actual', 'co_app', 'loan_use__c',\
'employment_status__c', 'amount_of_loan_requested__c', 'fico__c', \
'c_LTI', 'bcc0300__c', 'ndi_ratio__c', 'utm_source__c']
first_var = ['SF_App_Id', 'Funding_Score', 'Funding_Model_Segment', 'NPV_Score',\
'NPV_Model_Segment', 'Funding', 'NPV_Actual', 'first_coapp', 'first_loan_use',\
'employment_status__c', 'first_amount_of_loan_requested', 'first_FICO', \
'First_LTI', 'first_bcc0300__c', 'first_ndi_ratio__c', 'utm_source__c']
# + id="Ej3HaMsffm3B" colab_type="code" colab={}
final_df = df[final_var]
first_df = df[first_var]
# + id="33OQoc7IfukG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="03283c2d-1375-446d-f520-48795d1f946d"
print(final_df.head())
print(first_df.head())
# + [markdown] id="81CJ6FOwiAUa" colab_type="text"
# ## Transform Variables
# + [markdown] id="jh6nYBlqfMAS" colab_type="text"
# **These Metrics need to be multiplied by 100**
#
# c_LTI
#
# First_LTI
#
# first_ndi_ratio__c
#
#
# + [markdown] id="s5cCAq9XiOlC" colab_type="text"
# ### Initial Transform
# + id="q4lYKwKaf323" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="36e6fd9f-6825-4d4d-cc09-b8a5e53bf77f"
final_df['c_LTI'] = final_df['c_LTI']*100
first_df['First_LTI'] = first_df['First_LTI']*100
first_df['first_ndi_ratio__c'] = first_df['first_ndi_ratio__c']*100
# + id="MsNUC3SfgVvC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="897b938d-4925-457f-8535-15f06b3ab35e"
first_df.describe()
# + id="dHQvNRWNhO_l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="465eecb1-c312-4705-8ae9-7d56186facac"
final_df.columns
# + [markdown] id="EIXeNH8kiKvC" colab_type="text"
# ### Apply Model Transformations
# + id="d_Gii9XzarUW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a173b5e4-149d-48c2-f6cc-576b2137243f"
# Transform variables
final_df['co_app'] = final_df['co_app'].apply(get_co_app_cat)
final_df['loan_use'] = final_df['loan_use__c'].apply(get_loan_use_cat)
final_df['employment'] = final_df['employment_status__c'].apply(get_employment_cat)
final_df['loan_amount'] = final_df['amount_of_loan_requested__c'].apply(get_loan_amount_cat)
final_df['mkt_chan'] = final_df['utm_source__c'].apply(get_mkt_chan_cat)
final_df['ficox'] = final_df['fico__c'].apply(get_fico)
final_df['lti'] = final_df['c_LTI'].apply(get_lti)
final_df['bcc0300'] = final_df['bcc0300__c'].apply(get_bcc0300)
final_df['ndi'] = final_df['ndi_ratio__c'].apply(get_ndi_ratio)
final_df['ndisq'] = final_df['ndi'] * final_df['ndi']
# Transform variables
first_df['co_app'] = first_df['first_coapp'].apply(get_co_app_cat)
first_df['loan_use'] = first_df['first_loan_use'].apply(get_loan_use_cat)
first_df['employment'] = first_df['employment_status__c'].apply(get_employment_cat)
first_df['loan_amount'] = first_df['first_amount_of_loan_requested'].apply(get_loan_amount_cat)
first_df['mkt_chan'] = first_df['utm_source__c'].apply(get_mkt_chan_cat)
first_df['ficox'] = first_df['first_FICO'].apply(get_fico)
first_df['lti'] = first_df['First_LTI'].apply(get_lti)
first_df['bcc0300'] = first_df['first_bcc0300__c'].apply(get_bcc0300)
first_df['ndi'] = first_df['first_ndi_ratio__c'].apply(get_ndi_ratio)
first_df['ndisq'] = first_df['ndi'] * first_df['ndi']
# + [markdown] id="vPmL6izMiUPz" colab_type="text"
# ### Dummy Variables
# + [markdown] id="sEI9vrgnnCtC" colab_type="text"
# #### Final DF
# + id="HcoxMfJRiuWw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="61922a64-4551-436a-c90a-4723f044672a"
final_df.columns
# + id="9_RG_hyEiXD5" colab_type="code" colab={}
# create dummies
cat_vars = ['co_app','loan_use','employment','loan_amount','mkt_chan']
for var in cat_vars:
cat_list = pd.get_dummies(final_df[var], prefix=var)
temp = final_df.join(cat_list)
final_df = temp
data_vars = final_df.columns.values.tolist()
to_keep = [i for i in data_vars if i not in cat_vars]
final_df = final_df[to_keep]
# + id="_iYahAuJi4TR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="a726d7a1-c029-4c04-f428-21e2026de25e"
final_df.columns
# + id="_uuEAdQTj1Ti" colab_type="code" colab={}
# re-index to have same columns as the model
final_df_indexed = final_df.reindex(columns = ['ficox', 'lti', 'bcc0300', 'ndi', 'ndisq', 'co_app_0', 'co_app_1',
'loan_use_1', 'loan_use_2', 'loan_use_3', 'loan_use_4', 'employment_1',
'employment_2', 'employment_3', 'employment_4', 'loan_amount_1',
'loan_amount_2', 'loan_amount_3', 'loan_amount_4', 'mkt_chan_1',
'mkt_chan_2', 'mkt_chan_3', 'mkt_chan_5', 'mkt_chan_6',
'mkt_chan_7'], fill_value=0)
# + id="65CbqfZKjCRz" colab_type="code" colab={}
prob_prediction = fplus_ls_model.predict_proba(final_df_indexed)[0][1]
# + id="sLmxTiVxj_gT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="fd74abdd-1137-4593-fdae-f027c17b5b0c"
final_df['New_Funding_Score'] = fplus_ls_model.predict_proba(final_df_indexed)[:,1]
# + id="K8v44QJumLAz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="8938be89-aee7-4b9d-aef7-1e0251a34544"
final_df['New_NPV_Score'] = final_df.apply(lambda row : get_npv_calc(row['amount_of_loan_requested__c'],
row['New_Funding_Score'], row['utm_source__c']), axis = 1)
# + [markdown] id="oygCHfXWnUFC" colab_type="text"
# #### First DF
# + id="yJS4gZR1nWgZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="5c8cdf00-8bbb-4cf9-9b1b-066b5931321f"
first_df.columns
# + id="vsRWMMvSnWj9" colab_type="code" colab={}
# create dummies
cat_vars = ['co_app','loan_use','employment','loan_amount','mkt_chan']
for var in cat_vars:
cat_list = pd.get_dummies(first_df[var], prefix=var)
temp = first_df.join(cat_list)
first_df = temp
data_vars = first_df.columns.values.tolist()
to_keep = [i for i in data_vars if i not in cat_vars]
first_df = first_df[to_keep]
# + id="7YL05WVenWmX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="c2becce9-fe2a-4ca7-ce60-3e53718167db"
first_df.columns
# + id="_esoBI_VnWo3" colab_type="code" colab={}
# re-index to have same columns as the model
first_df_indexed = first_df.reindex(columns = ['ficox', 'lti', 'bcc0300', 'ndi', 'ndisq', 'co_app_0', 'co_app_1',
'loan_use_1', 'loan_use_2', 'loan_use_3', 'loan_use_4', 'employment_1',
'employment_2', 'employment_3', 'employment_4', 'loan_amount_1',
'loan_amount_2', 'loan_amount_3', 'loan_amount_4', 'mkt_chan_1',
'mkt_chan_2', 'mkt_chan_3', 'mkt_chan_5', 'mkt_chan_6',
'mkt_chan_7'], fill_value=0)
# + [markdown] id="LGhQEuyBoKSt" colab_type="text"
# ### Predict
# + id="5WQFdG8_nWyN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="11ea9b98-633d-4149-8307-9f8798e92d7e"
first_df['First_Funding_Score'] = fplus_ls_model.predict_proba(first_df_indexed)[:,1]
# + id="nnpr4h6knW5I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="ff391794-15e2-48fe-e6f9-3bd5b9a32303"
first_df['New_NPV_Score'] = first_df.apply(lambda row : get_npv_calc(row['first_amount_of_loan_requested'],
row['First_Funding_Score'], row['utm_source__c']), axis = 1)
# + id="Q3jl331WYltP" colab_type="code" colab={}
first_df.to_csv('first_df.csv', index=False)
# + id="4b5scM2FY49o" colab_type="code" colab={}
final_df.to_csv('final_df.csv', index=False)
# + id="r7G8-Pz3ohGX" colab_type="code" colab={}
['SF_App_Id', 'Funding_Score', 'Funding_Model_Segment', 'NPV_Score',\
'NPV_Model_Segment', 'Funding', 'NPV_Actual', 'co_app', 'loan_use__c',\
'employment_status__c', 'amount_of_loan_requested__c', 'fico__c', \
'c_LTI', 'bcc0300__c', 'ndi_ratio__c', 'utm_source__c']
# + [markdown] id="zm3LFBB2k3KN" colab_type="text"
# # Austen Data
# + id="3Haxjc2Vk45q" colab_type="code" colab={}
# Using GBQ shout Out to Hughes
import pandas_gbq
import pydata_google_auth
SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/drive',
]
credentials = pydata_google_auth.get_user_credentials(
SCOPES,
auth_local_webserver=False)
# + id="PU39qS9Vk4-G" colab_type="code" colab={}
sql = """
SELECT
id
, co_app_verifiable_annual_income__c
, loan_use__c
, employment_status__c
, amount_of_loan_requested__c
, fico__c
, lti__c
, bcc0300__c
, ndi_ratio__c
, utm_source__c
FROM `freedom-dw.salesforce_ffam.application__c` a
WHERE createddate >= '2019-01-01'
"""
# + id="xp8kIRvVk5Az" colab_type="code" colab={}
df1 = pandas_gbq.read_gbq(sql, project_id='ffn-dw-bigquery-prd', credentials=credentials, dialect='standard')
df = df1
# + id="m5jHWqdTpsFS" colab_type="code" colab={}
final_df = df
# + id="RJEegdBnPVnl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="f076523a-4933-4099-bb5c-6713c76bcf82"
final_df.head()
# + id="IDcgwXjqpqMB" colab_type="code" colab={}
# Transform variables
final_df['co_app'] = final_df['co_app_verifiable_annual_income__c'].apply(get_co_app_cat)
final_df['loan_use'] = final_df['loan_use__c'].apply(get_loan_use_cat)
final_df['employment'] = final_df['employment_status__c'].apply(get_employment_cat)
final_df['loan_amount'] = final_df['amount_of_loan_requested__c'].apply(get_loan_amount_cat)
final_df['mkt_chan'] = final_df['utm_source__c'].apply(get_mkt_chan_cat)
final_df['ficox'] = final_df['fico__c'].apply(get_fico)
final_df['lti'] = final_df['lti__c'].apply(get_lti)
final_df['bcc0300'] = final_df['bcc0300__c'].apply(get_bcc0300)
final_df['ndi'] = final_df['ndi_ratio__c'].apply(get_ndi_ratio)
final_df['ndisq'] = final_df['ndi'] * final_df['ndi']
# + id="1cMjy5_VqPJx" colab_type="code" colab={}
# create dummies
cat_vars = ['co_app','loan_use','employment','loan_amount','mkt_chan']
for var in cat_vars:
cat_list = pd.get_dummies(final_df[var], prefix=var)
temp = final_df.join(cat_list)
final_df = temp
data_vars = final_df.columns.values.tolist()
to_keep = [i for i in data_vars if i not in cat_vars]
final_df = final_df[to_keep]
# + id="3H7WJjkhQ9tk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 697} outputId="b921bf49-6139-49e5-d693-046579e4903e"
final_df.info()
# + id="0h89gG90qWHT" colab_type="code" colab={}
# re-index to have same columns as the model
final_df_indexed = final_df.reindex(columns = ['ficox', 'lti', 'bcc0300', 'ndi', 'ndisq', 'co_app_0', 'co_app_1',
'loan_use_1', 'loan_use_2', 'loan_use_3', 'loan_use_4', 'employment_1',
'employment_2', 'employment_3', 'employment_4', 'loan_amount_1',
'loan_amount_2', 'loan_amount_3', 'loan_amount_4', 'mkt_chan_1',
'mkt_chan_2', 'mkt_chan_3', 'mkt_chan_5', 'mkt_chan_6',
'mkt_chan_7'], fill_value=0)
# + id="y9bP2JGxYe50" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="219d93ba-4595-4435-ee9b-dc36cf9d5090"
final_df_indexed.info()
# + id="ZhbsJOFoqc16" colab_type="code" colab={}
prob_prediction = fplus_ls_model.predict_proba(final_df_indexed)[0][1]
# + id="uSC0IjAIqk-I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="bd6298db-b799-41b3-bf1a-47c5e28fb9f7"
final_df['New_Funding_Score'] = fplus_ls_model.predict_proba(final_df_indexed)[:,1]
# + id="T-CNjjksqtbh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="a47891fc-2133-4bd4-b80a-25191574e39e"
final_df['New_NPV_Score'] = final_df.apply(lambda row : get_npv_calc(row['amount_of_loan_requested__c'],
row['New_Funding_Score'], row['utm_source__c']), axis = 1)
# + id="GAz0ZF6cc6Vz" colab_type="code" colab={}
df = final_df[['id', 'amount_of_loan_requested__c', 'utm_source__c', 'New_Funding_Score', 'New_NPV_Score']]
# + id="0xRKlUGQdMvV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="7376883b-b543-4487-aa30-6122c2775c64"
df.head()
# + id="S4WRfck7vooA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="22c3ff0c-de89-4c2b-b1a7-6ab0fc6a23e7"
df.info()
# + id="-W6lWQ3hv5Zx" colab_type="code" colab={}
sql_1 = """
SELECT
application_key
FROM `ffam-data-platform.standardized_data.fplus_application`
WHERE
new_lead_datetime >= '2019-01-01'
AND flag_eligible_lead = TRUE
AND lead_type = 'Web'
AND latest_prequal_decision <> 'DECLINED'
"""
# + id="YTKuztiFwXMx" colab_type="code" colab={}
good_leads = pandas_gbq.read_gbq(sql_1, project_id='ffn-dw-bigquery-prd', credentials=credentials, dialect='standard')
# + id="6C1Il6aCwxnU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="58572750-d22f-4165-c9e5-c439cb32b044"
good_leads.info()
# + id="ee_brt5Mwxrn" colab_type="code" colab={}
good_leads1 = good_leads.merge(df, left_on='application_key', right_on='id')
# + id="5Zm-KKIIwxxU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="253c33f6-7360-4a90-f431-a241b90f80e3"
good_leads1 = good_leads1.drop(columns=['application_key'])
good_leads1.info()
# + id="LhAdLPhL1_kg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="f3a8bde1-1332-48dd-a869-453e0e287aaa"
good_leads2 = good_leads1.drop_duplicates(subset=['id'], keep='first')
good_leads2.info()
# + [markdown] id="WHGA9WQJjR55" colab_type="text"
# # Send Back to GBQ
# + id="LQ2qINJejT_4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="708a5048-8bab-47b1-b446-1e2ea19e8a7c"
#set destination table to insert data into
destinationtable = 'Jason.Austen_NPV_Fplus'
project_id='ffn-dw-bigquery-prd'
#send data to GBQ (pandas_gqb.to_gbq)
#https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq
#params are: dataframe, destination_table, project_id, if_exists (append), and table_schema (list of dicts, name and type)
pandas_gbq.to_gbq(dataframe = good_leads1,
destination_table = destinationtable,
project_id = project_id,
if_exists = 'append',
table_schema = [{'name':'id',
'type':'STRING'},
{'name':'amount_of_loan_requested__c',
'type':'FLOAT'},
{'name':'utm_source__c',
'type':'STRING'},
{'name':'New_Funding_Score',
'type':'FLOAT'},
{'name':'New_NPV_Score',
'type':'FLOAT'}])
# + [markdown] id="2uF5gMwAU-Gg" colab_type="text"
# # Shit for Aleks
# + id="AMME7wIWfd-v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="67a77ebe-80c0-457e-d83a-c3cea84755c5"
# load data
import pandas as pd
df = pd.read_csv('Retrain_Fplus.csv')
df.head()
# + id="N7dG-3U3jXxQ" colab_type="code" colab={}
df1 = df.dropna()
# + id="2LU5axF_jErE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 714} outputId="605dc067-aff6-4173-bbb8-7ff96b8f3f10"
df1.info()
# + id="tT0XeCXdfY-q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 446} outputId="01f82b52-823c-4d65-bafb-87d4ccbf73b0"
#set destination table to insert data into
destinationtable = 'Jason.retrain_fplus'
project_id='ffn-dw-bigquery-prd'
#send data to GBQ (pandas_gqb.to_gbq)
#https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq
#params are: dataframe, destination_table, project_id, if_exists (append), and table_schema (list of dicts, name and type)
pandas_gbq.to_gbq(dataframe = df1,
destination_table = destinationtable,
project_id = project_id,
if_exists = 'append',
table_schema = [
{'name':'SF_App_Id', 'type':'STRING'},
{'name':'Funding', 'type':'INTEGER'},
{'name':'first_coapp', 'type':'INTEGER'},
{'name':'first_loan_use', 'type':'STRING'},
{'name':'employment_status__c', 'type':'STRING'},
{'name':'first_amount_of_loan_requested', 'type':'INTEGER'},
{'name':'first_FICO', 'type':'INTEGER'},
{'name':'First_LTI', 'type':'FLOAT'},
{'name':'first_bcc0300__c', 'type':'INTEGER'},
{'name':'first_ndi_ratio__c', 'type':'FLOAT'},
{'name':'utm_source__c', 'type':'INTEGER'},
{'name':'ficox', 'type':'INTEGER'},
{'name':'lti', 'type':'INTEGER'},
{'name':'bcc0300', 'type':'INTEGER'},
{'name':'ndi', 'type':'INTEGER'},
{'name':'ndisq', 'type':'INTEGER'},
{'name':'co_app_0', 'type':'INTEGER'},
{'name':'co_app_1', 'type':'INTEGER'},
{'name':'loan_use_1', 'type':'INTEGER'},
{'name':'loan_use_2', 'type':'INTEGER'},
{'name':'loan_use_3', 'type':'INTEGER'},
{'name':'loan_use_4', 'type':'INTEGER'},
{'name':'employment_1', 'type':'INTEGER'},
{'name':'employment_2', 'type':'INTEGER'},
{'name':'employment_3', 'type':'INTEGER'},
{'name':'employment_4', 'type':'INTEGER'},
{'name':'loan_amount_1', 'type':'INTEGER'},
{'name':'loan_amount_2', 'type':'INTEGER'},
{'name':'loan_amount_3', 'type':'INTEGER'},
{'name':'loan_amount_4', 'type':'INTEGER'},
{'name':'mkt_chan_1', 'type':'INTEGER'},
{'name':'mkt_chan_2', 'type':'INTEGER'},
{'name':'mkt_chan_3', 'type':'INTEGER'},
{'name':'mkt_chan_5', 'type':'INTEGER'},
{'name':'mkt_chan_6', 'type':'INTEGER'},
{'name':'mkt_chan_7', 'type':'INTEGER'}])
# + id="kIQhXdeqjn5c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 446} outputId="510c363b-c967-4f48-d6d9-aa8257744a14"
#set destination table to insert data into
destinationtable = 'Jason.retrain'
project_id='ffn-dw-bigquery-prd'
#send data to GBQ (pandas_gqb.to_gbq)
#https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq
#params are: dataframe, destination_table, project_id, if_exists (append), and table_schema (list of dicts, name and type)
pandas_gbq.to_gbq(dataframe = df1,
destination_table = destinationtable,
project_id = project_id,
if_exists = 'append',
table_schema = [
{'name':'SF_App_Id', 'type':'STRING'},
{'name':'Funding', 'type':'INTEGER'},
{'name':'first_coapp', 'type':'INTEGER'},
{'name':'first_loan_use', 'type':'STRING'},
{'name':'employment_status__c', 'type':'STRING'},
{'name':'first_amount_of_loan_requested', 'type':'INTEGER'},
{'name':'first_FICO', 'type':'INTEGER'},
{'name':'First_LTI', 'type':'FLOAT'},
{'name':'first_bcc0300__c', 'type':'INTEGER'},
{'name':'first_ndi_ratio__c', 'type':'FLOAT'},
{'name':'utm_source__c', 'type':'INTEGER'},
{'name':'ficox', 'type':'INTEGER'},
{'name':'lti', 'type':'INTEGER'},
{'name':'bcc0300', 'type':'INTEGER'},
{'name':'ndi', 'type':'INTEGER'},
{'name':'ndisq', 'type':'INTEGER'},
{'name':'co_app_0', 'type':'INTEGER'},
{'name':'co_app_1', 'type':'INTEGER'},
{'name':'loan_use_1', 'type':'INTEGER'},
{'name':'loan_use_2', 'type':'INTEGER'},
{'name':'loan_use_3', 'type':'INTEGER'},
{'name':'loan_use_4', 'type':'INTEGER'},
{'name':'employment_1', 'type':'INTEGER'},
{'name':'employment_2', 'type':'INTEGER'},
{'name':'employment_3', 'type':'INTEGER'},
{'name':'employment_4', 'type':'INTEGER'},
{'name':'loan_amount_1', 'type':'INTEGER'},
{'name':'loan_amount_2', 'type':'INTEGER'},
{'name':'loan_amount_3', 'type':'INTEGER'},
{'name':'loan_amount_4', 'type':'INTEGER'},
{'name':'mkt_chan_1', 'type':'INTEGER'},
{'name':'mkt_chan_2', 'type':'INTEGER'},
{'name':'mkt_chan_3', 'type':'INTEGER'},
{'name':'mkt_chan_5', 'type':'INTEGER'},
{'name':'mkt_chan_6', 'type':'INTEGER'},
{'name':'mkt_chan_7', 'type':'INTEGER'}])
| Validation_F+_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + Collapsed="false" tags=["setup"]
import matplotlib.pyplot as plt
from matplotlib import colors, ticker
# import cartopy.crs as ccrs
import pandas as pd
import numpy as np
import scipy as sp
from astropy.table import Table
import astropy.units as u
import astropy.coordinates as coord
import arviz as az
import seaborn as sns
import kinesis as kn
import gapipes as gp
kn.set_mpl_style()
# -
df = kn.data.load_hyades_dataset()
df = df.loc[df['Member_r19']!='other'].copy()
def xyz_icrs_to_galactic(xyz):
c = coord.ICRS(*xyz, representation_type="cartesian")
return c.transform_to(coord.Galactic).cartesian.xyz.value
b_c = np.array([17.16461006, 41.27147655, 13.70518315])
b_c_gal = xyz_icrs_to_galactic(b_c)
r_c = np.linalg.norm(df.g.icrs.cartesian.xyz.value.T - b_c[None:], axis=1)
df['r_c'] = r_c
r_cut = 10
# +
fig = plt.figure(figsize=(7, 6))
gs = fig.add_gridspec(6, 2)
ax_xy = fig.add_subplot(gs[:4, 0], aspect=0.7)
ax_xz = fig.add_subplot(gs[-2:, 0], sharex=ax_xy, aspect="equal")
df_rv = df.groupby(df["radial_velocity"].notna()).get_group(True)
ax_numhist = fig.add_subplot(gs[:3, 1])
ax_numdens = fig.add_subplot(gs[3:, 1], sharex=ax_numhist)
ax_xz.set_xlim(-115, 42)
ax_xz.set_ylim((-48, 28))
ax_xy.scatter(
df["gx"], df["gy"], s=1, c="tab:gray", label="all ($N={}$)".format(len(df))
)
ax_xz.scatter(df["gx"], df["gz"], s=1, c="tab:gray")
ax_xy.scatter(
df_rv["gx"], df_rv["gy"], s=1, c="C0", label="has RV($N={}$)".format(len(df_rv))
)
ax_xz.scatter(df_rv["gx"], df_rv["gz"], s=1, c="C0")
ax_xy.legend(loc="lower right", fontsize=11, frameon=True,markerscale=3,handletextpad=0)
ax_xy.set_ylim(-210, 210)
ax_xy.set_xlabel("$X$ [pc]")
ax_xy.set_ylabel("$Y$ [pc]")
ax_xz.set_xlabel("$X$ [pc]")
ax_xz.set_ylabel("$Z$ [pc]")
nr, bin_edges, patches = ax_numhist.hist(
df["r_c"], np.logspace(0, 2.5, 64), color="tab:gray"
)
ax_numhist.hist(df_rv["r_c"], np.logspace(0, 2.5, 64), color="C0")
ax_numhist.set_xscale("log")
ax_numhist.set_xlabel("$r_c$ [pc]")
ax_numhist.set_ylabel("count")
ax_numhist.axvline(
r_cut, c="k",
)
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
numdens = nr / (bin_centers ** 2 * 4 * np.pi) / (bin_edges[1] - bin_edges[0])
numdens_err = (
np.sqrt(nr) / (bin_centers ** 2 * 4 * np.pi) / (bin_edges[1] - bin_edges[0])
)
ax_numdens.errorbar(bin_centers, numdens, numdens_err, c="tab:gray")
ax_numdens.set_xscale("log")
ax_numdens.set_yscale("log")
ax_numdens.set_xlabel("$r_c$ [pc]")
ax_numdens.set_ylabel("number density [$\mathrm{pc}^{-3}$]")
ax_numdens.axvline(
r_cut, c="k",
)
from matplotlib.ticker import ScalarFormatter
ax_numhist.xaxis.set_major_formatter(ScalarFormatter())
# ax_numhist.set_xticks([2,3,4,5],minor=True)
ax_numhist.tick_params(axis="x", which="minor", length=4, width=0.5)
ax_numdens.tick_params(axis="both", which="minor", length=4, width=0.5)
from matplotlib.patches import Circle
for circ_radius in [10]:
circle_xy = Circle(
[b_c_gal[0], b_c_gal[1]],
circ_radius,
facecolor="None",
edgecolor="k",
label=r"$r_c=10~\mathrm{pc}$",
)
circle_xz = Circle(
[b_c_gal[0], b_c_gal[2]],
circ_radius,
facecolor="None",
edgecolor="k",
label=r"$r_c=10~\mathrm{pc}$",
)
ax_xy.add_patch(circle_xy)
ax_xz.add_patch(circle_xz)
# ax_xy.legend()
fig.tight_layout()
fig.savefig("../report/plots/1-summarize-and-plot-sample/sampledist.pdf")
# -
print((df['r_c']<r_cut).sum())
| hyades/1.1-summarize-and-plot-sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import yfinance as yf # for data
import pandas_datareader.data as web
from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
from pandas_datareader._utils import RemoteDataError
from datetime import datetime, timedelta
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pickle
plt.rcParams['figure.figsize'] = [10, 6]
import warnings
warnings.filterwarnings('ignore')
# ### Getting data
#
# Daiy data can be easily imported using `pandas_datareader`. For shorter intervals I will be using `yfinance`
# +
# How yfinance can be used ""
data = yf.download( # or pdr.get_data_yahoo(...
# tickers list or string as well
tickers = "SPY",
# use "period" instead of start/end
# valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
# (optional, default is '1mo')
period = "35d",
# fetch data by interval (including intraday if period < 60 days)
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
# (optional, default is '1d')
interval = "5m",
# group by ticker (to access via data['SPY'])
# (optional, default is 'column')
group_by = 'ticker',
# adjust all OHLC automatically
# (optional, default is False)
auto_adjust = True,
# download pre/post regular market hours data
# (optional, default is False)
prepost = False,
# use threads for mass downloading? (True/False/Integer)
# (optional, default is True)
threads = True,
# proxy URL scheme use use when downloading?
# (optional, default is None)
proxy = None
)
data
# +
data = yf.download(tickers = "SPY",period = "90",interval = "5m",group_by = 'ticker',auto_adjust = True,prepost = False,threads = True,proxy = None)
# -
data = data.reset_index()
data
# ### Stratgeies
#
# - Stratgey 1:Volume tarading, look for the past data, get stocks where volumes of today and yesterday are both postive and prior to that there were two days with negative volume.
#
# - Stratgey 2:Macd trading, look for the past data where have certain pattern for macd, we are looking for intersections that indicates upper tredn
#
# - Strategy 3: Ballinger trading, look for the past data where we have the price dipping below the lower ballinger band, for buying opportunity
# ---
# - Our investigation space will be the list of stocks in series_tickers file
#Download file "series_tickers.p" from github or create your own list of tickers
series_tickers = pickle.load(open("series_tickers.p", "rb" ))
df = pd.DataFrame(series_tickers).reset_index()
df
# +
#(df.index == 'JKS').any()
list_of_lists = [['APPL','APPL'],['AAPL','AAPL'],['IBM','IBM'],['AMD','AMD'],['T','T'],['DAL','DAL'],['J&J','J&J'],['INTC','INTC'],['GE','GE'],['NIO','NIO'],['JKS','JKS']]
df_new = pd.DataFrame(list_of_lists, columns = ['Symbol','Security Name'])
# new tickers
series_tickers = pd.concat([df,df_new],axis = 0).set_index('Symbol').iloc[:,0]
# -
# - here will create a class for the stock
# +
class stock:
def __init__(self,stock = 'NIO',price = 'Close'):
self.stock = stock
self.price = price
def get_df(self):
'''
volume trading create a data frame for the stocks and manipulate it, a new column reflecting postive or negative is created, green for postive
and red for negative
inputs:
------
None
returns:
------
df : a dataframe which will be used with the subsequent functions
'''
price = self.price
df = yf.download(tickers = self.stock,period = "35d",interval = "5m",group_by = 'ticker',auto_adjust = True,prepost = False,threads = True,proxy = None)
df['close_before'] = df[price].shift(1)
df['relative_price'] = df[price] - df['close_before']
df["Color"] = np.where(df["relative_price"]<0, 'red', 'green')
df = df.iloc[::-1]
df = df.reset_index()
df = df.fillna(0)
self.df = df
return self.df
def macd(self):
'''
macd trading, create a data frame for the stocks and manipulate it, a new column reflecting postive or negative is created, green for when macd is above the signal line
and red for when when the macd is below singal line
inputs:
------
None
returns:
------
df : a dataframe which will be used with the subsequent functions
'''
price = self.price
df = self.df
df = df.iloc[::-1]
#Calculate the MACD and Signal Line indicators
#Calculate the Short Term Exponential Moving Average
ShortEMA = df[price].ewm(span=12, adjust=False).mean() #AKA Fast moving average
#Calculate the Long Term Exponential Moving Average
LongEMA = df[price].ewm(span=26, adjust=False).mean() #AKA Slow moving average
#ShortEMA = df['Adj Close'].rolling(window = 12).mean() #AKA Fast moving average
#Calculate the Long Term Exponential Moving Average
#LongEMA = df['Adj Close'].rolling(window = 26).mean() #AKA Slow moving average
#Calculate the Moving Average Convergence/Divergence (MACD)
MACD = ShortEMA - LongEMA
#Calcualte the signal line
signal = MACD.ewm(span=9, adjust=False).mean()
#signal = MACD.rolling(window = 9).mean()
df['macd'] = MACD
df['signal'] = signal
df['macd_above'] = np.where(df['macd'] > df['signal'],'green','red')
#### ballinger band
df['Middle Band'] =df['Close'].rolling(window=20).mean()
df['Upper Band'] = df['Middle Band'] + 1.96*df['Close'].rolling(window=20).std()
df['Lower Band'] = df['Middle Band'] - 1.96*df['Close'].rolling(window=20).std()
df['status_lower'] = np.where(df['Close'] < df['Lower Band'],'below_ballinger','normal')
df['status_upper'] = np.where(df['Close'] > df['Upper Band'],'above_ballinger','normal')
self.df2 = df[::-1]
return self.df2
def moving_avg(self,time_frame = 50):
'''
macd trading, create a data frame for the stocks and manipulate it, a new column reflecting postive or negative is created, green for when macd is above the signal line
and red for when when the macd is below singal line
inputs:
------
None
returns:
------
df : a dataframe which will be used with the subsequent functions
'''
df = self.df
price = self.price
df = df.iloc[::-1]
#Calculate the MACD and Signal Line indicators
#Calculate the Short Term Exponential Moving Average
emasUsed = [26,50]
for ema in emasUsed:
df['Ema_' + str(ema)] = round(df[price].ewm(span = ema, adjust = False).mean(),2)
# df['moving_avg'] =df['Close'].rolling(window=time_frame).mean()
df['status_moving_avg'] = np.where(df['Ema_26'] > df['Ema_50'],'green','red')
self.df2 = df[::-1]
return self.df2
def is_this_a_winner_moving_avg(self,colors = ['green','red']):
'''
macd trading, given a condition by the user this function will return wether a stock matches that condition or not
inputs:
------
colors : list of colors for today and the previous 3 days, green indicates a positive volume and red indicates a negative volume
returns:
------
result : Boolean, True if the conditions in the colors list are met
'''
df = self.df2
result = np.where(df['status_moving_avg'][0]== colors[0] and df['status_moving_avg'][1]==colors[1],True,False)
#result = np.where(df['Color'][0]=='green' and df['Color'][1]=='red' and df['Color'][2]=='red' and df['Color'][3]=='red' ,True,False)
return result[()]
def is_this_a_winner_volume(self,colors = ['green','green','red','red']):
'''
volume trading, given a condition by the user this function will return wether a stock matches that condition or not
inputs:
------
colors : list of colors for today and the previous 3 days, green indicates a positive volume and red indicates a negative volume
returns:
------
result : Boolean, True if the conditions in the colors list are met
'''
df = self.df
result = np.where(df['Color'][0]== colors[0] and df['Color'][1]==colors[1] and df['Color'][2]==colors[2] and df['Color'][3]==colors[3],True,False)
#result = np.where(df['Color'][0]=='green' and df['Color'][1]=='red' and df['Color'][2]=='red' and df['Color'][3]=='red' ,True,False)
return result[()]
def is_this_a_winner_macd(self,colors = ['green','green','red']):
'''
macd trading, given a condition by the user this function will return wether a stock matches that condition or not
inputs:
------
colors : list of colors for today and the previous 3 days, green indicates a positive volume and red indicates a negative volume
returns:
------
result : Boolean, True if the conditions in the colors list are met
'''
df = self.df2
result = np.where(df['macd_above'][0]== colors[0] and df['macd_above'][1]==colors[1] and df['macd_above'][2]==colors[2],True,False)
#result = np.where(df['Color'][0]=='green' and df['Color'][1]=='red' and df['Color'][2]=='red' and df['Color'][3]=='red' ,True,False)
return result[()]
def is_this_a_winner_ballinger(self,status_lower = ['normal','below_ballinger','below_ballinger']):
'''
ballinger trading, given a condition by the user this function will return wether a stock matches that condition or not
inputs:
------
status_lower : list of status for today and the previous 3 days, where the price is below or above ballinger ['normal','below_balinger','above_balinger']
returns:
------
result : Boolean, True if the conditions in the status list are met
'''
df = self.df2
result = np.where(df['status_lower'][0]== status_lower[0] and df['status_lower'][1]==status_lower[1] and df['status_lower'][2]==status_lower[2],True,False)
#result = np.where(df['Color'][0]=='green' and df['Color'][1]=='red' and df['Color'][2]=='red' and df['Color'][3]=='red' ,True,False)
return result[()]
def plot_df_bar(self):
'''
volume trading : displays a bar plot for the volume over time,green bar for positivie volume and red bar for negative volume
inputs:
'''
df = self.df
df1 = df[df['relative_price']<0]
df2 = df[df['relative_price']>=0]
plt.bar(df1['Datetime'], df1['Volume'], color='r')
plt.bar(df2['Datetime'], df2['Volume'], color='g')
plt.show()
return
def plot_df_macd(self):
'''
volume trading : displays a bar plot for the volume over time,green bar for positivie volume and red bar for negative volume
inputs:
'''
df = self.df2.iloc[1:5,:]
plt.plot(df['Datetime'],df['signal'])
plt.plot(df['Datetime'],df['macd']);
return
def plot_df_ballinger(self):
'''
displays a bar plot for the volume over time,green bar for positivie volume and red bar for negative volume
inputs:
'''
df = self.df2.iloc[1:5,:]
plt.plot(df['Datetime'], df['Close'], color='g')
plt.plot(df['Datetime'], df['Lower Band'], color='r')
plt.plot(df['Datetime'], df['Upper Band'], color='b')
plt.show()
return
# +
### Example: volume trading :
# how about TSLA
tsla = stock('TSLA')
tsla.get_df()
# -
tsla.moving_avg()
tsla.is_this_a_winner_moving_avg(colors = ['green','green'])
# +
# let's test TSLA stock with a condition that we know it does meet
conditions= ['green','red','red','red']
tsla.is_this_a_winner_volume(colors = conditions)
# -
tsla.plot_df_bar()
# +
### Example:
# macd trading
# let's try TSLA stock again but using macd strategy
# -
tsla = stock('SLM')
tsla.get_df()
tsla.moving_avg()
tsla.is_this_a_winner_ballinger(status_lower = ['normal','normal','normal'])
tsla.is_this_a_winner_macd(colors = ['red','red','red'])
tsla.is_this_a_winner_volume(colors = ['green','green','red','red'])
tsla.plot_df_macd()
tsla.plot_df_ballinger()
# ### Hunting for winners
# - Now let's look at out list of stocks and find ones that meet our condition
# +
winners_moving_avg_26_50 = [] # a list that we will append with stocks that meet out conditions
#i = 0
for ticker, name in series_tickers.iteritems():
# i = i +1
try:
my_stock = stock(ticker)
my_stock.get_df()
my_stock.moving_avg()
if my_stock.is_this_a_winner_moving_avg(colors=['green','red']):
print(name,'is a winner')
winners_moving_avg_26_50.append(ticker)
else:
print(name,'is not a winner')
except:
continue
#if i ==4:
# break
# -
winners_moving_avg_26_50
# - Finally let's look at the list of our winners
winners
# +
# very short list , we need to adjust the pattern so we got more stocks
# +
winners_macd = [] # a list that we will append with stocks that meet out conditions
#i = 0
for ticker, name in series_tickers.iteritems():
#i = i +1
try:
my_stock = stock(ticker)
my_stock.get_df()
my_stock.moving_avg()
if my_stock.is_this_a_winner_moving_avg(colors = ['green','green','red','red']):
print(name,'is a winner')
#my_stock.plot_df_ballinger()
#my_stock.plot_df_macd()
winners_macd.append(name)
else:
print(name,'is not a winner')
except:
continue
#if i ==1:
#break
# -
winners_macd
winners_macd
# +
##################### run all the analysis together #########################
# +
winners_ballinger = [] # a list that we will append with stocks that meet out conditions
#i = 0
for ticker, name in series_tickers.iteritems():
#i = i +1
try:
my_stock = stock(ticker)
my_stock.get_df()
my_stock.macd()
if my_stock.is_this_a_winner_ballinger(status_lower = ['normal','below_ballinger','below_ballinger']):
print(name,'is a winner')
my_stock.plot_df_ballinger()
winners_ballinger.append(ticker)
else:
print(name,'is not a winner')
except:
continue
#if i ==1:
#break
winners_macd = [] # a list that we will append with stocks that meet out conditions
#i = 0
for ticker, name in series_tickers.iteritems():
#i = i +1
try:
my_stock = stock(ticker)
my_stock.get_df()
my_stock.macd()
if my_stock.is_this_a_winner_macd(colors = ['green','red','red']):
print(name,'is a winner')
my_stock.plot_df_ballinger()
winners_macd.append(ticker)
else:
print(name,'is not a winner')
except:
continue
#if i ==1:
#break
set(winners_macd + winners_ballinger)
# -
set(winners_macd + winners_ballinger)
list(set().union(winners_macd,winners_ballinger))
list(set(winners_macd) & set(winners_ballinger))
# +
d = {'A':3,'B':2,'C':9}
from collections import Counter
c = Counter(d)
c.most_common()
# -
| short_term/short_term_moving_avg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="UmB7C4ljZ9ZF"
import cv2 as cv
from glob import glob
import numpy as np
# + colab={"base_uri": "https://localhost:8080/"} id="qm0g-M-MaBx9" outputId="2326f1e6-7642-49f4-f8a1-ce5a4743476f"
img = cv.imread('road.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
edges = cv.Canny(gray, 255, 255)
cv.imwrite('road_lines.jpg', edges)
lines = cv.HoughLinesP(edges, 1, np.pi/180, 280, minLineLength=100, maxLineGap=255)
for line in lines:
x1, y1, x2, y2 = line[0]
cv.line(img, (x1, y1), (x2, y2), (255, 0, 0), 3)
cv.imwrite('road_out.jpg', img)
| P20132/road_detection.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Lecture 2 - Key datatypes & operators in R
# + [markdown] slideshow={"slide_type": "slide"}
# ### Lecture learning objectives:
#
# By the end of this lecture and worksheet 2, students should be able to:
#
# * Explain how the assignment symbol, `<-` differs from `=` in R
# * Create in R, and define and differentiate in English, the below listed key datatypes in R:
# - logical, numeric, character and factor vectors
# - lists
# - data frames and tibbles
# * Use R to determine the type and structure of an object
# * Explain the distinction between names and values, and when R will copy an object.
# * Use the three subsetting operators, `[[`, `[`, and `$`, to subset single and multiple elements from vectors and data frames, lists and matrices
# * Compute numeric and boolean values using their respective types and operations
# + [markdown] slideshow={"slide_type": "slide"}
# ### Getting help in R
#
# No one, even experienced, professional programmers remember what every function does, nor do they remember every possible function argument/option. So both experienced and new programmers (like you!) need to look things up, A LOT!
#
# One of the most efficient places to look for help on how a function works is the R help files. Let’s say we wanted to pull up the help file for the `max()` function. We can do this by typing a question mark in front of the function we want to know more about.
# + [markdown] slideshow={"slide_type": "slide"}
# `?max`
# + [markdown] slideshow={"slide_type": "notes"}
# At the very top of the file, you will see the function itself and the package it is in (in this case, it is base). Next is a description of what the function does. You’ll find that the most helpful sections on this page are “Usage”, “Arguments” and "Examples".
#
# - **Usage** gives you an idea of how you would use the function when coding--what the syntax would be and how the function itself is structured.
# - **Arguments** tells you the different parts that can be added to the function to make it more simple or more complicated. Often the “Usage” and “Arguments” sections don’t provide you with step by step instructions, because there are so many different ways that a person can incorporate a function into their code. Instead, they provide users with a general understanding as to what the function could do and parts that could be added. At the end of the day, the user must interpret the help file and figure out how best to use the functions and which parts are most important to include for their particular task.
# - The **Examples** section is often the most useful part of the help file as it shows how a function could be used with real data. It provides a skeleton code that the users can work off of.
# + [markdown] slideshow={"slide_type": "notes"}
# Below is a useful graphical summary of the help docs that might be useful to start getting you oriented to them:
#
# <img src="https://socviz.co/assets/ch-09-read-a-help-page.png">
#
# *Source: https://socviz.co/appendix.html#a-little-more-about-r*
# + [markdown] slideshow={"slide_type": "slide"}
# ### The assignment symbol, `<-`
#
# - R came from S, S used `<-`
# - S was inspired from APL, which also used `<-`
# - APL was designed on a specific keyboard, which had a key for `<-`
# - At that time there was no `==` for testing equality, it was tested with `=`, so something else need to be used for assignment.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/9/9f/APL-keybd2.svg/410px-APL-keybd2.svg.png">
#
# source: https://colinfay.me/r-assignment/
# + [markdown] slideshow={"slide_type": "slide"}
# - Nowadays, `=` can also be used for assignment, however there are some things to be aware of...
# + [markdown] slideshow={"slide_type": "fragment"}
# - stylistically, `<-` is preferred over `=` for readability
# + [markdown] slideshow={"slide_type": "fragment"}
# - `<-` and `->` are valid in R, the latter can be useful in pipelines (more on this in data wrangling)
# + [markdown] slideshow={"slide_type": "fragment"}
# - `<-` and `=` have different emphasis in regards to environments
# + [markdown] slideshow={"slide_type": "fragment"}
# - **we expect you to use `<-` in MDS for object assignment in R**
# + [markdown] slideshow={"slide_type": "slide"}
# #### Assignment readability
#
# Consider this code:
#
# ```
# c <- 12
# d <- 13
# ```
# + [markdown] slideshow={"slide_type": "fragment"}
# Which equality is easier to read?
#
# ```
# e = c == d
# ```
#
# or
#
# ```
# e <- c == d
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# #### Assignment environment
#
# What value does x hold at the end of each of these code chunks?
# + [markdown] slideshow={"slide_type": "fragment"}
#
#
# ```median(x = 1:10)```
#
# vs
#
# ```median(x <- 1:10)```
# + [markdown] slideshow={"slide_type": "slide"}
# Here, in the first example where `=` is used to set `x`, `x` only exists in the `median` function call, so we are returned the result from that function call, however, when we call `x` later, it does not exist and so R returns an error.
# + [markdown] slideshow={"slide_type": "fragment"}
# ```
# median(x = 1:10)
# x
# ```
#
# ```
# 5.5
# Error in eval(expr, envir, enclos): object 'x' not found
# Traceback:
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# Here, in the second example where `<-` is used to set `x`, `x` exists in the median function call, **and** in the global environment (outside the `median` function call). So when we call `x` later, it **does** exist and so R returns the value that the name `x` is bound to.
# + [markdown] slideshow={"slide_type": "fragment"}
# ```
# median(x <- 1:10)
# x
# ```
#
#
# ```
# 5.5
# 1 2 3 4 5 6 7 8 9 10
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# #### What does assignment do in R?
#
# When you type this into R: `x <- c(1, 2, 3)`
#
# This is what R does:
#
# <img src="https://d33wubrfki0l68.cloudfront.net/bd90c87ac98708b1731c92900f2f53ec6a71edaf/ce375/diagrams/name-value/binding-1.png" width=300 algin="left">
#
# *Source: [Advanced R](https://adv-r.hadley.nz/) by <NAME>*
# + [markdown] slideshow={"slide_type": "fragment"}
# What does this mean? It means that even if you don't bind a name to an object in R using ` <- `, it still exists somewhere in memory during the R session it was created in. This is typically not a problem unless your data sets are very large.
# + [markdown] slideshow={"slide_type": "slide"}
# ### A note on names
#
# #### Rules for syntactic names:
# - May use: letters, digits, `.` and `_`
# - Cannot begin with `_` or a digit
# - Cannot use reserved words (e.g., `for`, `if`, `return`)
#
# #### How to manage non-syntactic names
# - Usually come across these when reading in someone else's data
# - Backticks, \`, can be used manage these cases (e.g., ``` `_abc` <- 1 ```)
# - If your data contains these, use R to rename things to make them syntactic (for your future sanity)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Key datatypes in R
#
# <img src="img/r-datatypes.png" width=1000>
#
# *note - There are no scalars in R, they are represented by vectors of length 1.*
#
# *Source: [Advanced R](https://adv-r.hadley.nz/) by <NAME>*
# + [markdown] slideshow={"slide_type": "notes"}
# - `NULL` is not a vector, but related and frequently functions in the role of a generic zero length vector.
# + [markdown] slideshow={"slide_type": "slide"}
# #### What is a data frame?
#
# From a data perspective, it is a rectangle where the rows are the observations:
#
# <img src="https://github.com/UBC-DSCI/introduction-to-datascience/blob/master/img/obs.jpeg?raw=true" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### What is a data frame?
#
# and the columns are the variables:
#
# <img src="https://github.com/UBC-DSCI/introduction-to-datascience/blob/master/img/vars.jpeg?raw=true" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### What is a data frame?
#
# From a computer programming perspective, in R, a data frame is a special subtype of a list object whose elements (columns) are vectors.
#
# <img src="https://github.com/UBC-DSCI/introduction-to-datascience/blob/master/img/vectors.jpeg?raw=true" width=800>
# + [markdown] slideshow={"slide_type": "fragment"}
# **Question:** What do you notice about the elements of each of the vectors in this data frame?
# + [markdown] slideshow={"slide_type": "slide"} tags=[]
# #### What is a vector?
#
# - objects that can contain 1 or more elements
# - elements are ordered
# - must all be of the same type (e.g., double, integer, character, logical)
#
# <img src="https://github.com/UBC-DSCI/introduction-to-datascience/blob/master/img/vector.jpeg?raw=true" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### How are vectors different from a list?
# <img src="https://github.com/UBC-DSCI/introduction-to-datascience/blob/master/img/vec_vs_list.jpeg?raw=true" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Reminder: what do lists have to do with data frames?
#
# <img src="https://github.com/UBC-DSCI/introduction-to-datascience/blob/master/img/dataframe.jpeg?raw=true" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# ### A bit more about Vectors
#
# Your closest and most important friend in R
#
# <img src="https://media.giphy.com/media/EQCgmS4lwDS8g/giphy.gif" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Creating vectors and vector types
# + slideshow={"slide_type": "fragment"}
char_vec <- c("joy", "peace", "help", "fun", "sharing")
char_vec
typeof(char_vec)
# + slideshow={"slide_type": "slide"}
log_vec <- c(TRUE, TRUE, FALSE, FALSE, TRUE)
log_vec
typeof(log_vec)
# + slideshow={"slide_type": "slide"}
double_vec <- c(1, 2, 3, 4, 5)
double_vec
typeof(double_vec)
# + slideshow={"slide_type": "slide"}
int_vec <- c(1L, 2L, 3L, 4L, 5L)
int_vec
typeof(int_vec)
# + [markdown] slideshow={"slide_type": "slide"}
# `str` is a useful command to get even more information about an object:
# + slideshow={"slide_type": "fragment"}
str(int_vec)
# + [markdown] slideshow={"slide_type": "slide"}
# #### What happens to vectors of mixed type?
#
#
# + slideshow={"slide_type": "fragment"}
mixed_vec <- c("joy", 5.6, TRUE, 1L, "sharing")
typeof(mixed_vec)
# + [markdown] slideshow={"slide_type": "fragment"}
# Hierarchy for coercion:
#
# character → double → integer → logical
# + [markdown] slideshow={"slide_type": "slide"}
# #### Useful functions for testing type and forcing coercion:
#
# - `is.logical()`, `is.integer()`, `is.double()`, and `is.character()` returns `TRUE` or `FALSE`, depending on type of object and function used.
# - `as.logical()`, `as.integer()`,` as.double()`, or `as.character()` coerce vector to type specified by function name.
# + [markdown] slideshow={"slide_type": "slide"}
# #### How to subset and modify vectors
#
# <img src="https://media.giphy.com/media/l4pTocra1lFDomV5S/giphy.gif" width=700>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Subsetting
#
# - R counts from 1!!!
# + slideshow={"slide_type": "fragment"}
name <- c("T", "i", "f", "f", "a", "n", "y")
# + [markdown] slideshow={"slide_type": "fragment"}
# What letter will I get in R? What would I get in Python?
# + slideshow={"slide_type": "fragment"}
name[2]
# + [markdown] slideshow={"slide_type": "slide"}
# What letters will I get in R? What would I get in Python?
#
# ```
# name <- c("T", "i", "f", "f", "a", "n", "y")
# ```
# + slideshow={"slide_type": "fragment"}
name[2:4]
# + [markdown] slideshow={"slide_type": "slide"}
# What letter will I get in R? What would I get in Python?
#
# ```
# name <- c("T", "i", "f", "f", "a", "n", "y")
# ```
# + slideshow={"slide_type": "fragment"}
name[-1]
# + [markdown] slideshow={"slide_type": "slide"}
# How do I get the last element in a vector in R?
#
# ```
# name <- c("T", "i", "f", "f", "a", "n", "y")
# ```
# + slideshow={"slide_type": "fragment"}
name[length(name)]
# + [markdown] slideshow={"slide_type": "slide"}
# #### Modifing vectors
#
# We can combine the assignment symbol and subsetting to modify vectors:
#
# ```
# name <- c("T", "i", "f", "f", "a", "n", "y")
# ```
# + slideshow={"slide_type": "fragment"}
name[1] <- "t"
name
# + [markdown] slideshow={"slide_type": "slide"}
# This can be done for more than one element:
# + slideshow={"slide_type": "fragment"}
name[1:3] <- c("T", "I", "F")
name
# + [markdown] slideshow={"slide_type": "slide"}
# What if you ask for elements that are not there?
# + slideshow={"slide_type": "fragment"}
name[8:12]
# + [markdown] slideshow={"slide_type": "slide"}
# This syntax also lets you add additional elements:
# + slideshow={"slide_type": "fragment"}
name[8:12] <- c("-", "A", "n", "n", "e")
name
# + [markdown] slideshow={"slide_type": "slide"}
# #### What happens when you modify a vector in R?
#
# Consider:
#
# ```
# x <- c(1, 2, 3)
# y <- x
#
# y[3] <- 4
# y
# #> [1] 1 2 4
# ```
#
# What is happening in R's memory for each line of code?
#
# + [markdown] slideshow={"slide_type": "skip"}
# |Code | R's memory representation |
# |---|---|
# | `x <- c(1, 2, 3)` | <img src="https://d33wubrfki0l68.cloudfront.net/bd90c87ac98708b1731c92900f2f53ec6a71edaf/ce375/diagrams/name-value/binding-1.png" width=200 algin="left"> |
# | `y <- x` | <img src="https://d33wubrfki0l68.cloudfront.net/bdc72c04d3135f19fb3ab13731129eb84c9170af/f0ab9/diagrams/name-value/binding-2.png" width=200 algin="left"> |
# | `y[[3]] <- 4` | <img src="https://d33wubrfki0l68.cloudfront.net/ef9f480effa2f1d0e401d1f94218d0cf118433c0/b56e9/diagrams/name-value/binding-3.png" width=200 algin="left"> |
#
# This is called "copy-on-modify".
#
# *Source: [Advanced R](https://adv-r.hadley.nz/) by <NAME>*
# + [markdown] slideshow={"slide_type": "slide"}
# #### Why copy-on-modify
# - Since there are no scalars in R, vectors are essentially immutable
# - If you change one element of the vector, you have to copy the whole thing to update it
#
# #### Why do we care about knowing this?
# - Given that data frames are built on-top of vectors, this has implications for speed when working with large data frames
# + [markdown] slideshow={"slide_type": "slide"}
# #### Why vectors?
#
# Vectorized operations!
# + slideshow={"slide_type": "fragment"}
c(1, 2, 3, 4) + c(1, 1, 1, 1)
# + [markdown] slideshow={"slide_type": "slide"}
# But watch out for vector recycling in R!
#
# This makes sense:
# + slideshow={"slide_type": "fragment"}
c(1, 2, 3, 4) + c(1)
# + [markdown] slideshow={"slide_type": "slide"}
# but this does not!
# + slideshow={"slide_type": "fragment"}
c(1, 2, 3, 4) + c(1, 2)
# + [markdown] slideshow={"slide_type": "slide"}
# A list of vector operators here: [R Operators cheat sheet](https://cran.r-project.org/doc/contrib/Baggott-refcard-v2.pdf)
#
# One to watch out for, logical and (`&`) and or (`|`) operators come in both an elementwise and first element comparison form, for example:
# + slideshow={"slide_type": "fragment"}
# compares each elements of each vector by position
c(TRUE, TRUE, TRUE) & c(FALSE, TRUE, TRUE)
# + slideshow={"slide_type": "fragment"}
# compares only the first elements of each vector
c(TRUE, TRUE, TRUE) && c(FALSE, TRUE, TRUE)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Extending our knowledge to data frames
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src="https://github.com/UBC-DSCI/introduction-to-datascience/blob/master/img/dataframe.jpeg?raw=true" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Getting to know a data frame
# + slideshow={"slide_type": "fragment"}
head(mtcars)
# + slideshow={"slide_type": "fragment"}
str(mtcars)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Subsetting and modifying data frames
#
# There are 3 operators that can be used when subsetting data frames: `[`, `$` and `[[`
#
# | Operator | Example use | What it returns |
# |----------|-------------|-----------------|
# | `[` | `mtcars[1:10, 2:4]` | rows 1-10 for columns 2-4 of the data frame, as a data frame |
# | `[` | `mtcars[1:10, ]` | rows 1-10 for all columns of the data frame, as a data frame |
# | `[` | `mtcars[1]` | the first column of the data frame, as a data frame |
# | `[[` | `mtcars[[1]]` | the first column of the data frame, as a vector |
# | `$` | `mtcars$cyl` | the column the corresponds to the name that follows the `$`, as a vector |
#
# Note that `$` and `[[` remove a level of structure from the data frame object (this happens with lists too).
# + [markdown] slideshow={"slide_type": "slide"}
# ### Other R objects
#
# We are focusing on vectors and data frames in this lecture because these are the objects you will encounter most frequently in R for data science. These subsetting (and modification) syntax also work on other objects in R, in the same way.
#
# Examples that you will encounter in the worksheet and lab are matrices and lists.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Logical indexing of data frames
#
# We can also use logical statements to filter for rows containing certain values, or values above or below a threshold. For example, if we want to filter for rows where the cylinder value in the `cyl` column is 6 in the mtcars data frame shown below:
# + slideshow={"slide_type": "fragment"}
options(repr.matrix.max.rows = 10) # limit number of rows that are output
mtcars
# + slideshow={"slide_type": "slide"}
mtcars[mtcars$cyl == 6, ]
# + [markdown] slideshow={"slide_type": "slide"}
# Another example:
# + slideshow={"slide_type": "fragment"}
mtcars[mtcars$hp > 200, ]
# + [markdown] slideshow={"slide_type": "slide"}
# #### Modifing data frames
#
# Similar to vectors, we can combine the assignment symbol and subsetting to modify data frames.
#
# For example, here we create a new column called `kml`:
# + slideshow={"slide_type": "fragment"}
mtcars$kml <- mtcars$mpg / 2.3521458
head(mtcars)
# + [markdown] slideshow={"slide_type": "fragment"}
# The same syntax works to overwrite an existing column.
# + [markdown] slideshow={"slide_type": "slide"}
# #### What happens when we modify an entire column? or a row?
#
# To answer this we need to look at how data frames are represented in R's memory.
# + [markdown] slideshow={"slide_type": "slide"}
# #### How R represents data frames:
#
# - Remember that data frames are lists of vectors
# - As such, they don't store the values themselves, they store references to them:
#
# ```d1 <- data.frame(x = c(1, 5, 6), y = c(2, 4, 3))```
#
# <img src="https://d33wubrfki0l68.cloudfront.net/80d8995999aa240ff4bc91bb6aba2c7bf72afc24/95ee6/diagrams/name-value/dataframe.png" width="200">
#
# *Source: [Advanced R](https://adv-r.hadley.nz/) by <NAME>*
# + [markdown] slideshow={"slide_type": "slide"}
# #### How R represents data frames:
#
# If you modify a column, only that column needs to be modified; the others will still point to their original references:
#
# ``` d2 <- d1
# d2[, 2] <- d2[, 2] * 2```
#
# <img src="https://d33wubrfki0l68.cloudfront.net/c19fd7e31bf34ceff73d0fac6e3ea22b09429e4a/23d8d/diagrams/name-value/d-modify-c.png" width="250">
#
# *Source: [Advanced R](https://adv-r.hadley.nz/) by <NAME>*
# + [markdown] slideshow={"slide_type": "slide"}
# #### How R represents data frames:
#
# However, if you modify a row, every column is modified, which means every column must be copied:
#
# ```d3 <- d1
# d3[1, ] <- d3[1, ] * 3```
#
# <img src="https://d33wubrfki0l68.cloudfront.net/36df61f54d1ac62e066fb814cb7ba38ea6047a74/facf8/diagrams/name-value/d-modify-r.png" width="400">
#
# *Source: [Advanced R](https://adv-r.hadley.nz/) by <NAME>*
# + [markdown] slideshow={"slide_type": "slide"}
# #### An exception to copy-on-modify
#
# If an object has a single name bound to it, R will modify it in place:
#
# ```v <- c(1, 2, 3)```
#
# <img src="https://d33wubrfki0l68.cloudfront.net/496ac87edf04d7e235747c3cf4a4e66deca754f2/3ac04/diagrams/name-value/v-inplace-1.png" width="200">
#
# ```v[[3]] <- 4```
#
# <img src="https://d33wubrfki0l68.cloudfront.net/a6ef7ab337f156cdb2c21816923368383bc2e858/1f8bb/diagrams/name-value/v-inplace-2.png" width="200">
#
#
# - Hence, modify in place can be a useful optimization for speeding up code.
# - However, there are some complications that make predicting exactly when R applies this optimisation challenging (see [here](https://adv-r.hadley.nz/names-values.html#modify-in-place) for details)
# - There is one other time R will do this, we will cover this when we get to environments.
#
# *Source: [Advanced R](https://adv-r.hadley.nz/) by <NAME>*
# + [markdown] slideshow={"slide_type": "slide"}
# ### Writing readable R code
#
# - WriTing AND reading (code) TaKes cognitive RESOURCES, & We only hAvE so MUCh!
#
# - To help free up cognitive capacity, we will follow the [tidyverse style guide](https://style.tidyverse.org/index.html)
#
# <img src="img/tidyverse-hex.png" width=200>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Sample code **not** in tidyverse style
#
# Can we spot what's wrong?
# + [markdown] slideshow={"slide_type": "fragment"}
# ```
# library(tidyverse)
# us.2015.econ=read_csv( "data/state_property_data.csv")
# us.2016.vote=read_csv( "data/2016_presidential_election_state_vote.csv")
# stateData=left_join (us.2015.econ,us.2016.vote) %>%
# filter(party!="Not Applicable") %>%
# mutate(meanCommuteHours=mean_commute_minutes/60)
# ggplot(stateData, aes (x=mean_commute_minutes, y=med_prop_val, color=party)) +
# geom_point()+
# xlab( "Income (USD)" )+
# ylab("Median property value (USD)")+
# scale_colour_manual (values = c("blue","red"))+
# scale_x_continuous (labels = scales::dollar_format())+
# scale_y_continuous (labels = scales::dollar_format())
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# #### Sample code in tidyverse style
# + [markdown] slideshow={"slide_type": "fragment"}
# ```
# library(tidyverse, quietly = TRUE)
# us_2015_econ <- read_csv("data/state_property_data.csv")
# us_2016_vote <- read_csv("data/2016_presidential_election_state_vote.csv")
# state_data <- left_join(us_2015_econ, us_2016_vote) %>%
# filter(party != "Not Applicable") %>%
# mutate(mean_commute_hours = mean_commute_minutes / 60)
# ggplot(state_data, aes(x = med_income, y = med_prop_val, color = party)) +
# geom_point() +
# xlab("Income (USD)") +
# ylab("Median property value (USD)") +
# scale_colour_manual(values = c("blue", "red")) +
# scale_x_continuous(labels = scales::dollar_format()) +
# scale_y_continuous(labels = scales::dollar_format())
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ### What did we learn today?
#
# - How to get help in R
#
# - How the ` <- ` differs from `=` in R
#
# - Base R syntax for subsetting and modifying R objects
#
# - Some aspects of tidyverse code style
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Additional resources:
# - [RStudio base R cheat sheet](https://www.rstudio.com/wp-content/uploads/2016/10/r-cheat-sheet-3.pdf)
# - [R Operators cheat sheet](https://cran.r-project.org/doc/contrib/Baggott-refcard-v2.pdf)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Attribution:
# - [Advanced R](https://adv-r.hadley.nz/) by <NAME>
# - [Why do we use arrow as an assignment operator?](https://colinfay.me/r-assignment/) by <NAME>
| class/R/02-data-types-and-operators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="EtdtP8_cD8-G"
# # Custom Layers
# + [markdown] colab_type="text" id="r50jnbKrIMeZ"
# One of the reasons for the success of deep learning can be found in the wide range of layers that can
# be used in a deep network. This allows for a tremendous degree of customization and adaptation. For
# instance, scientists have invented layers for images, text, pooling, loops, dynamic programming, even for
# computer programs. Sooner or later you will encounter a layer that doesn’t exist yet in Torch, or even
# better, you will eventually invent a new layer that works well for your problem at hand. This is when it’s
# time to build a custom layer. This section shows you how.
# + [markdown] colab_type="text" id="5F8SRjlqEh2-"
# Layers without Parameters
# Since this is slightly intricate, we start with a custom layer (aka Module) that doesn’t have any inherent parameters. Our first step is very similar to when we introduced modules previously. The following
# CenteredLayer class constructs a layer that subtracts the mean from the input. We build it by inheriting from the Module class and implementing the forward method.
#
#
# + colab={} colab_type="code" id="8RnC8rUyEpvF"
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
# -
class CenteredLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x - x.mean()
# + [markdown] colab_type="text" id="XV7HjPJ2HcTA"
# To see how it works let’s feed some data into the layer.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="mnPzfKzxGf3T" outputId="c0e6cdfe-67d9-4bd4-b2dd-9c300f07860a"
layer = CenteredLayer()
layer(torch.FloatTensor([1, 2, 3, 4, 5]))
# + [markdown] colab_type="text" id="r7b2DC_sGiA5"
# We can also use it to construct more complex models.
#
# + colab={} colab_type="code" id="ndZS07wZGqcx"
net = nn.Sequential(nn.Linear(8,128), CenteredLayer())
# + [markdown] colab_type="text" id="Q__zetKlMFuF"
# Let’s see whether the centering layer did its job. For that we send random data through the network and
# check whether the mean vanishes. Note that since we’re dealing with floating point numbers, we’re going
# to see a very small albeit typically nonzero number.
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="SBXVV0XKLKOm" outputId="32e5c382-1288-4944-b7b4-ec1e42279d11"
y=net(torch.rand(4,8))
y.mean()
# + [markdown] colab_type="text" id="nfq8wa3aOavn"
# # Layers with Parameters
# + [markdown] colab_type="text" id="RuBt45V8OhTu"
# Now that we know how to define layers in principle, let’s define layers with parameters. These can be adjusted through training. In order to simplify things for an avid deep learning researcher the Parameter
# class and the ParameterDict dictionary provide some basic housekeeping functionality. In particular,
# they govern access, initialization, sharing, saving and loading model parameters. For instance, this way
# we don’t need to write custom serialization routines for each new custom layer.
# For instance, we can use the member variable params of the ParameterDict type that comes with
# the Module class. It is a dictionary that maps string type parameter names to model parameters in the
# Parameter type. We can create a Parameter instance from ParameterDict via the get function.
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="rutASqVeOg94" outputId="e83e69d0-3732-4a45-cb97-92169aa75097"
params=torch.nn.ParameterDict()
params.update({"param2":nn.Parameter(Variable(torch.ones(2,3)))})
params
# + [markdown] colab_type="text" id="2MslVi6oMLiX"
# Let’s use this to implement our own version of the dense layer. It has two parameters - bias and weight.
# To make it a bit nonstandard, we bake in the ReLU activation as default. Next, we implement a fully
# connected layer with both weight and bias parameters. It uses ReLU as an activation function, where
# in_units and units are the number of inputs and the number of outputs, respectively.
#
# + colab={} colab_type="code" id="Elm6quT-QOC6"
class MyDense(nn.Module):
def __init__(self, units, in_units):
super().__init__()
self.weight = Variable(torch.ones(in_units, units))
self.bias = Variable(torch.ones(units,))
def forward(self, x):
linear = torch.matmul(x, self.weight.data) + self.bias.data
return F.relu(linear)
# + [markdown] colab_type="text" id="EVK0jlxRQubj"
# Naming the parameters allows us to access them by name through dictionary lookup later. It’s a good idea
# to give them instructive names. Next, we instantiate the MyDense class and access its model parameters.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="QKZvUVYKRsst" outputId="bfad20b9-ddd4-455e-c84d-4a9a03d3c645"
dense = MyDense(units=3, in_units=5)
dense.parameters
# + [markdown] colab_type="text" id="9LCRANkkRxj3"
# We can directly carry out forward calculations using custom layers.
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="EcxgO1p6R8tx" outputId="835ade9b-b9df-49d9-c0c4-4731b76af500"
dense(torch.rand(2,5))
# + [markdown] colab_type="text" id="KDNSxXBXR_1u"
# We can also construct models using custom layers. Once we have that we can use it just like the built-in
# dense layer. The only exception is that in our case size inference is not automagic. Please consult the
# PyTorch documentation for details on how to do this.,
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="tYO-20Y9SCNC" outputId="46899257-9141-4bea-ab18-19f05d748b92"
net = nn.Sequential(
MyDense(8, in_units=64),
MyDense(1, in_units=8))
net(torch.rand(2,64))
# + [markdown] colab_type="text" id="3zGA6m60SE27"
# # Summary
#
# • We can design custom layers via the Module class. This is more powerful than defining a module
# factory, since it can be invoked in many contexts.
#
# • Modules can have local parameters.
# + [markdown] colab_type="text" id="ZbFfKDqvSLrU"
# # Exercises
#
# 1. Design a layer that learns an affine transform of the data, i.e. it removes the mean and learns an
# additive parameter instead.
#
# 2. Design a layer that takes an input and computes a tensor reduction, i.e. it returns
# ∑
# yk =
# i,j Wijkxixj .
#
# 3. Design a layer that returns the leading half of the Fourier coefficients of the data. Hint - look up
# the fft function in PyTorch.
#
| Ch07_Deep_Learning_Computation/Custom_Layers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/danielvilela/data_science/blob/master/Untitled1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="gZsoLONcTGeE" colab_type="text"
# # Semana Data Science
#
# **Principais bibliotecas em data science**
#
# * Biblioteca pandas
# * Biblioteca matplotlib
# * Biblioteca Seahorn
#
# + id="yxH7BLIVTZse" colab_type="code" colab={}
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style()
# + id="fP60L1b6U4be" colab_type="code" colab={}
# Importar o arquivo listings.csv para um DataFrame
url = "http://data.insideairbnb.com/brazil/rj/rio-de-janeiro/2019-11-22/visualisations/listings.csv"
df = pd.read_csv(url)
# + [markdown] id="2r1p0JxnVgjs" colab_type="text"
# **Dicionário de váriaveis**
#
# `dsad`
# + id="KjSkBPd7VRPb" colab_type="code" outputId="586c5907-a093-4ccf-8bcc-98ee56ed1ff2" colab={"base_uri": "https://localhost:8080/", "height": 445}
df.head() #Mostra o cabeçalho ou as 5 primeiras entradas
# + id="AClsPm9WVST8" colab_type="code" outputId="f3545b44-499c-4fd5-c5b1-d45f5a666fa4" colab={"base_uri": "https://localhost:8080/", "height": 34}
df.shape #Identificar volume de dados do DataFrame (33mil casas em 16 colunas)
# + id="CMXOFqMYVxSe" colab_type="code" outputId="beac8bd0-dc40-4f55-d938-53b5891b2c9d" colab={"base_uri": "https://localhost:8080/", "height": 306}
df.isnull().sum()
# + id="zJHEcEHZWFxi" colab_type="code" outputId="cd576346-bc34-4e66-fbad-8e1f8b20cae6" colab={"base_uri": "https://localhost:8080/", "height": 306}
df.isnull().sum()/ df.shape[0] #ver em porcentagem
# + id="DrQKQC0pWJOt" colab_type="code" outputId="11df4b29-beef-4b1d-d401-df3dfaa8a2a7" colab={"base_uri": "https://localhost:8080/", "height": 848}
df.hist(figsize=(15,10))
# + id="a8ks4OAUWXFO" colab_type="code" outputId="953e3951-e0da-4303-adb3-f886c330ac77" colab={"base_uri": "https://localhost:8080/", "height": 317}
df.describe()
# + id="2lPcDdFQXJbY" colab_type="code" colab={}
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dapnn
# language: python
# name: dapnn
# ---
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#PDC-2020" data-toc-modified-id="PDC-2020-1"><span class="toc-item-num">1 </span>PDC 2020</a></span></li><li><span><a href="#PDC-2021" data-toc-modified-id="PDC-2021-2"><span class="toc-item-num">2 </span>PDC 2021</a></span></li><li><span><a href="#Datasets-utilized-in-the-BINET-Paper" data-toc-modified-id="Datasets-utilized-in-the-BINET-Paper-3"><span class="toc-item-num">3 </span>Datasets utilized in the BINET Paper</a></span></li></ul></div>
# +
#default_exp conversion
# -
# # Log Conversion
#
# > Converts the event logs into csv format to make it easier to load them
#
# We did not store the unprocessed data in the github repository, because of github file size limits and total repository size. Therefore, this notebook cannot be run without the original data. However, this is also not required, because the resulting csvs are stored. We keep it here, to show how we converted the data into csvs. If you are interested in the original data, please check out the original sources.
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
from dapnn.imports import *
# ## PDC 2020
# Convert Training Logs:
for fn in progress_bar(glob.glob('data/orig/unzipped/PDC2020_training/*')):
new_fn=fn.split('.')[0]+'.csv.gz'
new_fn='data/csv/'+"/".join(new_fn.split('/')[3:])
log = xes_importer.apply(fn)
df = log_converter.apply(log, variant=log_converter.Variants.TO_DATA_FRAME)
df.to_csv(new_fn,index=False,compression='gzip')
# Convert Ground Truth Logs:
for fn in progress_bar(glob.glob('data/orig/unzipped/PDC2020_ground_truth/*')):
new_fn=fn.split('.')[0]+'.csv.gz'
new_fn='data/csv/'+"/".join(new_fn.split('/')[3:])
log = xes_importer.apply(fn)
df = log_converter.apply(log, variant=log_converter.Variants.TO_DATA_FRAME)
df.to_csv(new_fn,index=False,compression='gzip')
# ## PDC 2021
# Convert Training Logs:
for fn in progress_bar(glob.glob('data/orig/unzipped/PDC2021_training/*')):
new_fn=fn.split('.')[0]+'.csv.gz'
new_fn='data/csv/'+"/".join(new_fn.split('/')[3:])
log = xes_importer.apply(fn)
df = log_converter.apply(log, variant=log_converter.Variants.TO_DATA_FRAME)
df.to_csv(new_fn,index=False,compression='gzip')
# Convert Ground Truth Logs:
for fn in progress_bar(glob.glob('data/orig/unzipped/PDC2021_ground_truth/*')):
new_fn=fn.split('.')[0]+'.csv.gz'
new_fn='data/csv/'+"/".join(new_fn.split('/')[3:])
log = xes_importer.apply(fn)
df = log_converter.apply(log, variant=log_converter.Variants.TO_DATA_FRAME)
df.to_csv(new_fn,index=False,compression='gzip')
# ## Datasets utilized in the BINET Paper
# +
import gzip
import json
def load_binet_data (path):
with gzip.open(path, "r") as f:
data = f.read()
j = json.loads (data.decode('utf-8'))
event_df = pd.DataFrame({'attributes': [], 'name': [], 'timestamp': [],'timestamp_end': [], 'event_id': [], 'trace_id': []})
truth_df = pd.DataFrame({'case': [], 'anomalies': []})
res=[]
res=pd.DataFrame()
for case in (j['cases']):
trace = pd.DataFrame.from_dict(case['events'])
trace['anomaly']= case['attributes']['label'] if type(case['attributes']['label'])==str else case['attributes']['label']['anomaly']
trace['trace_id']=case['id']
res=res.append(trace)
res=pd.concat([res.drop(['attributes'], axis=1), res['attributes'].apply(pd.Series)], axis=1)
return res
# -
import warnings
warnings.filterwarnings('ignore')
# Convert Synthethic Datasets and BPI Challenge Datasets, that already include artifical anomalies from BINET Paper
for fn in progress_bar((list(glob.glob('data/orig/unzipped/binet_logs/*-0.3-*')))):
new_fn=fn[:-7]+'csv.gz'
new_fn='data/csv/'+"/".join(new_fn.split('/')[3:])
df=load_binet_data(fn)
df.to_csv(new_fn,index=False,compression='gzip')
| 01_log_conversion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Voila/papermill notebook
#
# 1. Choose a notebook
# 2. Set parameters (parameters extracted by papermill; GUI generated by ipywidgets)
# 3. Run the notebook with those parameters
import os
import urllib
import ipywidgets as widgets
from papermill import inspect_notebook
from papermill.parameterize import parameterize_notebook
from papermill.iorw import load_notebook_node, list_notebook_files
from IPython.display import display, clear_output, Markdown, HTML
# Choose notebooks from the current directory
notebooks = [os.path.basename(f) for f in list_notebook_files(os.getcwd())]
notebooks.remove('notebook-runner.ipynb')
notebooks.sort()
# Run the target notebook. Each cell is executed by the current
# kernel and captured by an Output widget. Markdown cells are
# rendering using IPython.display. Other cell types are ignored.
def run_target(target, params):
target_nb = load_notebook_node(target)
target_nb = parameterize_notebook(target_nb, params)
target_output.clear_output()
for cell in target_nb.cells:
cell_output = widgets.Output() #layout={'border': '1px solid black'})
if cell.cell_type == 'code':
with cell_output:
get_ipython().ex(cell.source)
elif cell.cell_type == 'markdown':
with cell_output:
display(Markdown(cell.source))
with target_output:
display(cell_output)
# +
# Use papermill to get the target notebook's parameters
def get_params(target):
params = inspect_notebook(target)
params = {k: v['default'] for k, v in params.items()}
return params
# Get parameters from the URL query string in Voila
def get_query_string_params():
qs = os.getenv('QUERY_STRING', '')
params = dict(urllib.parse.parse_qsl(qs))
return params
# Override default params (from notebook parameter cell) with
# query string parameters in Voila.
def merge_params(params, qs_params):
overridden = []
for k in params:
if k in qs_params:
overridden.append(k)
params[k] = qs_params[k]
return overridden
# The quick link lets you go back to this page with the user-provided
# parameters as the default values in Voila.
def build_quick_link(target, params):
myparams = {'target': target}
for k, v in params.items():
# Use repr to make sure strings stay quoted on the other side
myparams[k] = repr(v)
link = 'notebook-runner.ipynb?' + urllib.parse.urlencode(myparams)
return f"Quick link: <a href='{link}'>{link}</a>"
# Given a set of parameters, this builds a function that we can give
# to ipywidgets.interactive() and let it build a widget UI for us.
# It returns a widget that we can incorporate into a larger layout.
def build_inputs(target):
params = get_params(target)
qs_params = get_query_string_params()
overridden = merge_params(params, qs_params)
if overridden:
with target_input:
display(HTML(f"<b>Note:</b> Loaded defaults from query string: {', '.join(overridden)}"))
defaults = ", ".join(f"{k}={v}" for k, v in params.items())
code = f"def run_notebook({defaults}):\n parameters = {repr(params)}"
for p in params:
code += f"\n parameters['{p}'] = {p}"
code += f"\n quick_link.value = build_quick_link('{target}', parameters)"
code += f"\n run_target('{target}', parameters)"
#print(code)
exec(code, globals())
return widgets.interactive(run_notebook, {"manual": True, "manual_name": "Run Notebook"})
# -
# Load the selected notebook. This will extract parameters from
# the notebook and display the auto-generated GUI.
def load_notebook(b):
target = target_name.value
target_input.clear_output()
target_output.clear_output()
w = build_inputs(target)
with target_input:
display(w)
# +
# Widgets for selecting the target notebook
target_select = widgets.Output(layout={'border': '1px solid blue'})
target_name = widgets.Dropdown(options=notebooks, description='Target nb:')
load_button = widgets.Button(description='Load parameters')
load_button.on_click(load_notebook)
with target_select:
display(target_name)
display(load_button)
# Widgets for auto-generated GUI based on target notebook params
target_input = widgets.Output(layout={'border': '1px solid blue'})
quick_link = widgets.HTML()
# The output from executing the target notebook
target_output = widgets.Output(layout={'border': '1px solid blue'})
# If the target notebook was specified in the URL (Voila only), then go
# ahead and pre-select it and extract the parameters.
qs_params = get_query_string_params()
if 'target' in qs_params:
target_name.value = qs_params['target']
with target_select:
display(HTML(f"<b>Note:</b> Loaded target from query string: {qs_params['target']}"))
load_notebook(load_button)
display(target_select, target_input, quick_link, target_output)
# -
| papermill/notebook-runner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NLP Course 2 Week 1 Lesson : Building The Model - Lecture Exercise 02
# Estimated Time: 20 minutes
# <br>
# # Candidates from String Edits
# Create a list of candidate strings by applying an edit operation
# <br>
# ### Imports and Data
# data
word = 'de<PASSWORD>' # 🦌
# ### Splits
# Find all the ways you can split a word into 2 parts !
# +
# splits with a loop
splits_a = []
for i in range(len(word)+1):
splits_a.append([word[:i],word[i:]])
for i in splits_a:
print(i)
# +
# same splits, done using a list comprehension
splits_b = [(word[:i], word[i:]) for i in range(len(word) + 1)]
for i in splits_b:
print(i)
# -
# ### Delete Edit
# Delete a letter from each string in the `splits` list.
# <br>
# What this does is effectivly delete each possible letter from the original word being edited.
# +
# deletes with a loop
splits = splits_a
deletes = []
print('word : ', word)
for L,R in splits:
if R:
print(L + R[1:], ' <-- delete ', R[0])
# -
# It's worth taking a closer look at how this is excecuting a 'delete'.
# <br>
# Taking the first item from the `splits` list :
# breaking it down
print('word : ', word)
one_split = splits[0]
print('first item from the splits list : ', one_split)
L = one_split[0]
R = one_split[1]
print('L : ', L)
print('R : ', R)
print('*** now implicit delete by excluding the leading letter ***')
print('L + R[1:] : ',L + R[1:], ' <-- delete ', R[0])
# So the end result transforms **'dearz'** to **'earz'** by deleting the first character.
# <br>
# And you use a **loop** (code block above) or a **list comprehension** (code block below) to do
# <br>
# this for the entire `splits` list.
# +
# deletes with a list comprehension
splits = splits_a
deletes = [L + R[1:] for L, R in splits if R]
print(deletes)
print('*** which is the same as ***')
for i in deletes:
print(i)
# -
# ### Ungraded Exercise
# You now have a list of ***candidate strings*** created after performing a **delete** edit.
# <br>
# Next step will be to filter this list for ***candidate words*** found in a vocabulary.
# <br>
# Given the example vocab below, can you think of a way to create a list of candidate words ?
# <br>
# Remember, you already have a list of candidate strings, some of which are certainly not actual words you might find in your vocabulary !
# <br>
# <br>
# So from the above list **earz, darz, derz, deaz, dear**.
# <br>
# You're really only interested in **dear**.
# +
vocab = ['dean','deer','dear','fries','and','coke']
edits = list(deletes)
print('vocab : ', vocab)
print('edits : ', edits)
candidates=[]
### START CODE HERE ###
#candidates = ?? # hint: 'set.intersection'
### END CODE HERE ###
print('candidate words : ', candidates)
# -
# Expected Outcome:
#
# vocab : ['dean', 'deer', 'dear', 'fries', 'and', 'coke']
# <br>
# edits : ['earz', 'darz', 'derz', 'deaz', 'dear']
# <br>
# candidate words : {'dear'}
# ### Summary
# You've unpacked an integral part of the assignment by breaking down **splits** and **edits**, specifically looking at **deletes** here.
# <br>
# Implementation of the other edit types (insert, replace, switch) follows a similar methodology and should now feel somewhat familiar when you see them.
# <br>
# This bit of the code isn't as intuitive as other sections, so well done!
# <br>
# You should now feel confident facing some of the more technical parts of the assignment at the end of the week.
| 2. Natural Language Processing with Probabilistic Models/Week 1/NLP_C2_W1_lecture_nb_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The dataset used in this notebook is Tesla stock history from 2014 to 2017. You can find the .csv file in the project folder.
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
# %matplotlib inline
# ### Step 1. Loading dataset
tesla_stocks = pd.read_csv('tesla_stocks.csv')
tesla_stocks.head()
# Choosing only column that we are going to use in the prediction process.
data_to_use = tesla_stocks['Close'].values
# ### Step 2. Data preprocessing
#
# #### Step 2.1 Scaling data
scaler = StandardScaler()
scaled_data = scaler.fit_transform(data_to_use.reshape(-1, 1))
plt.figure(figsize=(12,7), frameon=False, facecolor='brown', edgecolor='blue')
plt.title('Scaled TESLA stocks from August 2014 to August 2017')
plt.xlabel('Days')
plt.ylabel('Scaled value of stocks')
plt.plot(scaled_data, label='Stocks data')
plt.legend()
plt.show()
def window_data(data, window_size):
'''
This function is used to create Features and Labels datasets. By windowing the data.
Input: data - dataset used in the project
window_size - how many data points we are going to use to predict the next datapoint in the sequence
[Example: if window_size = 1 we are going to use only the previous day to predict todays stock prices]
Outputs: X - features splitted into windows of datapoints (if window_size = 1, X = [len(data)-1, 1])
y - 'labels', actually this is the next number in the sequence, this number we are trying to predict
'''
X = []
y = []
i = 0
while (i + window_size) <= len(data) - 1:
X.append(data[i:i+window_size])
y.append(data[i+window_size])
i += 1
assert len(X) == len(y)
return X, y
#
# #### Step 2.2 Windowing the data with window_data function
X, y = window_data(scaled_data, 7)
# #### Step 2.3 Splitting data to training and testing parts
# +
X_train = np.array(X[:700])
y_train = np.array(y[:700])
X_test = np.array(X[700:])
y_test = np.array(y[700:])
print("X_train size: {}".format(X_train.shape))
print("y_train size: {}".format(y_train.shape))
print("X_test size: {}".format(X_test.shape))
print("y_test size: {}".format(y_test.shape))
# -
# ### Step 3. Define the network
#Hyperparameters used in the network
batch_size = 7 #how many windows of data we are passing at once
window_size = 7 #how big window_size is (Or How many days do we consider to predict next point in the sequence)
hidden_layer = 256 #How many units do we use in LSTM cell
clip_margin = 4 #To prevent exploding gradient, we use clipper to clip gradients below -margin or above this margin
learning_rate = 0.001
epochs = 200
# #### Step 3.1 Define palceholders
inputs = tf.placeholder(tf.float32, [batch_size, window_size, 1])
targets = tf.placeholder(tf.float32, [batch_size, 1])
# In this notebook I am implementing LSTM cell from scratch using TensorFlow. In the next 2 cells you will find weights and implementation of the LSTM cell.
# +
# LSTM weights
#Weights for the input gate
weights_input_gate = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))
weights_input_hidden = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))
bias_input = tf.Variable(tf.zeros([hidden_layer]))
#weights for the forgot gate
weights_forget_gate = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))
weights_forget_hidden = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))
bias_forget = tf.Variable(tf.zeros([hidden_layer]))
#weights for the output gate
weights_output_gate = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))
weights_output_hidden = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))
bias_output = tf.Variable(tf.zeros([hidden_layer]))
#weights for the memory cell
weights_memory_cell = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))
weights_memory_cell_hidden = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))
bias_memory_cell = tf.Variable(tf.zeros([hidden_layer]))
# -
## Output layer weigts
weights_output = tf.Variable(tf.truncated_normal([hidden_layer, 1], stddev=0.05))
bias_output_layer = tf.Variable(tf.zeros([1]))
# This is definition of LSTM cell. The best explanation of the LSTM you will find [here](http://colah.github.io/posts/2015-08-Understanding-LSTMs/)
#
#
# 
#
# *This image is from Colah's blog*
def LSTM_cell(input, output, state):
input_gate = tf.sigmoid(tf.matmul(input, weights_input_gate) + tf.matmul(output, weights_input_hidden) + bias_input)
forget_gate = tf.sigmoid(tf.matmul(input, weights_forget_gate) + tf.matmul(output, weights_forget_hidden) + bias_forget)
output_gate = tf.sigmoid(tf.matmul(input, weights_output_gate) + tf.matmul(output, weights_output_hidden) + bias_output)
memory_cell = tf.tanh(tf.matmul(input, weights_memory_cell) + tf.matmul(output, weights_memory_cell_hidden) + bias_memory_cell)
state = state * forget_gate + input_gate * memory_cell
output = output_gate * tf.tanh(state)
return state, output
# ## Time to define loop for the network
outputs = []
for i in range(batch_size): #Iterates through every window in the batch
#for each batch I am creating batch_state as all zeros and output for that window which is all zeros at the beginning as well.
batch_state = np.zeros([1, hidden_layer], dtype=np.float32)
batch_output = np.zeros([1, hidden_layer], dtype=np.float32)
#for each point in the window we are feeding that into LSTM to get next output
for ii in range(window_size):
batch_state, batch_output = LSTM_cell(tf.reshape(inputs[i][ii], (-1, 1)), batch_state, batch_output)
#last output is conisdered and used to get a prediction
outputs.append(tf.matmul(batch_output, weights_output) + bias_output_layer)
outputs
# #### Step 3.3 Define loss
# +
losses = []
for i in range(len(outputs)):
losses.append(tf.losses.mean_squared_error(tf.reshape(targets[i], (-1, 1)), outputs[i]))
loss = tf.reduce_mean(losses)
# -
# #### Step 3.4 Define optimizer with gradient clipping
gradients = tf.gradients(loss, tf.trainable_variables())
clipped, _ = tf.clip_by_global_norm(gradients, clip_margin)
optimizer = tf.train.AdamOptimizer(learning_rate)
trained_optimizer = optimizer.apply_gradients(zip(gradients, tf.trainable_variables()))
# ### Time to train the network
session = tf.Session()
session.run(tf.global_variables_initializer())
for i in range(epochs):
traind_scores = []
ii = 0
epoch_loss = []
while(ii + batch_size) <= len(X_train):
X_batch = X_train[ii:ii+batch_size]
y_batch = y_train[ii:ii+batch_size]
o, c, _ = session.run([outputs, loss, trained_optimizer], feed_dict={inputs:X_batch, targets:y_batch})
epoch_loss.append(c)
traind_scores.append(o)
ii += batch_size
if (i % 30) == 0:
print('Epoch {}/{}'.format(i, epochs), ' Current loss: {}'.format(np.mean(epoch_loss)))
sup =[]
for i in range(len(traind_scores)):
for j in range(len(traind_scores[i])):
sup.append(traind_scores[i][j][0])
tests = []
i = 0
while i+batch_size <= len(X_test):
o = session.run([outputs], feed_dict={inputs:X_test[i:i+batch_size]})
i += batch_size
tests.append(o)
tests_new = []
for i in range(len(tests)):
for j in range(len(tests[i][0])):
tests_new.append(tests[i][0][j])
test_results = []
for i in range(749):
if i >= 701:
test_results.append(tests_new[i-701])
else:
test_results.append(None)
# ### Plotting predictions from the network
plt.figure(figsize=(16, 7))
plt.plot(scaled_data, label='Original data')
plt.plot(sup, label='Training data')
plt.plot(test_results, label='Testing data')
plt.legend()
plt.show()
session.close()
| lstm_from_scratch_tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Status calculation with ekostat_calculator
import os
import sys
import datetime
import core
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual
print(os.getcwd())
def return_input(value):
return value
# -----------------------------------
# ## Select
# ### Directories and file paths
start_year = interactive(return_input,
value = widgets.Dropdown(
options=[2009, 2010, 2011, 2012, 2013],
value=2009,
description='Select start year:',
disabled=False)
)
end_year = interactive(return_input,
value = widgets.Dropdown(
options=[2011, 2012, 2013, 2014, 2015, 2016],
value=2015,
description='Select start year:',
disabled=False)
)
from IPython.display import display
display(start_year, end_year)
print(start_year.result, end_year.result)
test_widget = core.jupyter_eventhandlers.MultiCheckboxWidget(['Bottenfauna', 'Växtplankton','Siktdjup','Näringsämnen'])
test_widget # Display the widget
if __name__ == '__main__':
nr_marks = 60
print('='*nr_marks)
print('Running module "lv_test_file.py"')
print('-'*nr_marks)
print('')
#root_directory = os.path.dirname(os.path.abspath(__file__)) # works in
root_directory = os.getcwd() # works in notebook
resources_directory = root_directory + '/resources'
filter_directory = root_directory + '/workspaces/default/filters'
data_directory = root_directory + '/workspaces/default/data'
# est_core.StationList(root_directory + '/test_data/Stations_inside_med_typ_attribute_table_med_delar_av_utsjö.txt')
core.ParameterList()
#--------------------------------------------------------------------------
print('{}\nSet directories and file paths'.format('*'*nr_marks))
raw_data_file_path = data_directory + '/raw_data/data_BAS_2000-2009.txt'
first_filter_data_directory = data_directory + '/filtered_data'
first_data_filter_file_path = filter_directory + '/selection_filters/first_data_filter.txt'
winter_data_filter_file_path = filter_directory + '/selection_filters/winter_data_filter.txt'
summer_data_filter_file_path = filter_directory + '/selection_filters/summer_data_filter.txt'
tolerance_filter_file_path = filter_directory + '/tolerance_filters/tolerance_filter_template.txt'
# ### Set up filters
# TODO: Store selection filters as attributes or something that allows us to not have to call them before calculating indicators/qualityfactors
print('{}\nInitiating filters'.format('*'*nr_marks))
first_filter = core.DataFilter('First filter', file_path = first_data_filter_file_path)
winter_filter = core.DataFilter('winter_filter', file_path = winter_data_filter_file_path)
winter_filter.save_filter_file(filter_directory + '/selection_filters/winter_data_filter_save.txt') # mothod available
summer_filter = core.DataFilter('summer_filter', file_path = summer_data_filter_file_path)
summer_filter.save_filter_file(filter_directory + '/selection_filters/summer_data_filter_save.txt') # mothod available
tolerance_filter = core.ToleranceFilter('test_tolerance_filter', file_path = tolerance_filter_file_path)
print('done\n{}.'.format('*'*nr_marks))
# ### Load reference values
print('{}\nLoading reference values'.format('*'*nr_marks))
core.RefValues()
core.RefValues().add_ref_parameter_from_file('DIN_winter', resources_directory + '/classboundaries/nutrients/classboundaries_din_vinter.txt')
core.RefValues().add_ref_parameter_from_file('TOTN_winter', resources_directory + '/classboundaries/nutrients/classboundaries_totn_vinter.txt')
core.RefValues().add_ref_parameter_from_file('TOTN_summer', resources_directory + '/classboundaries/nutrients/classboundaries_totn_summer.txt')
print('done\n{}.'.format('*'*nr_marks))
# ------------------------------------------------------
# ## Data
# ### Select data and create DataHandler instance
# Handler (raw data)
raw_data = core.DataHandler('raw')
raw_data.add_txt_file(raw_data_file_path, data_type='column')
# ### Apply filters to selected data
# Use first filter
filtered_data = raw_data.filter_data(first_filter)
# Save filtered data (first filter) as a test
filtered_data.save_data(first_filter_data_directory)
# Load filtered data (first filter) as a test
loaded_filtered_data = core.DataHandler('first_filtered')
loaded_filtered_data.load_data(first_filter_data_directory)
# -----------------------------------------------------
# ## Calculate Quality elements
# ### Create an instance of NP Qualityfactor class
qf_NP = core.QualityFactorNP()
# use set_data_handler to load the selected data to the QualityFactor
qf_NP.set_data_handler(data_handler = loaded_filtered_data)
# ### Filter parameters in QualityFactorNP
# THIS SHOULD BE DEFAULT
print('{}\nApply season filters to parameters in QualityFactor\n'.format('*'*nr_marks))
# First general filter
qf_NP.filter_data(data_filter_object = first_filter)
# winter filter
qf_NP.filter_data(data_filter_object = winter_filter, indicator = 'TOTN_winter')
qf_NP.filter_data(data_filter_object = winter_filter, indicator = 'DIN_winter')
# summer filter
qf_NP.filter_data(data_filter_object = summer_filter, indicator = 'TOTN_summer')
print('done\n{}.'.format('*'*nr_marks))
# ### Calculate Quality Factor EQR
print('{}\nApply tolerance filters to all indicators in QualityFactor and get result\n'.format('*'*nr_marks))
qf_NP.get_EQR(tolerance_filter)
print(qf_NP.class_result)
print('-'*nr_marks)
print('done')
print('-'*nr_marks)
| .ipynb_checkpoints/lv_notebook-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Over deze opdrachten
#
# * dit is Jupyter Notebook `python-sqlite-0.ipynb` - voor het aanmaken van de database.
# * voor een inleiding over het gebruik van Jupyter Notebooks: [Inleiding Jupyter Notebook](Inleiding-Jupyter.ipynb)
# * de hele reeks Python SQlite opdrachten:
# * [Python SQLite - init database](python-sqlite-0.ipynb) (om met een schone lei te beginnnen)
# * [Python SQLite - selectie en projectie](python-sqlite-1.ipynb)
# * [Python SQLite - joins](python-sqlite-2.ipynb)
# * [Python SQLite - CRUD](python-sqlite-3.ipynb)
# * [Python SQLite - Schema](python-sqlite-4.ipynb)
#
# ### Voorbeeld
#
# Bij deze opdrachten gebruiken we een voorbeeld-database met drie tabellen: `leden`, `inschrijvingen`, en `events`.
# Deze database komt uit een webtoepassing; deze vind je op glitch.com. REF
# Daar kun je de toepassing bekijken, uitproberen, en er een eigen versie ("remix") van maken.
# ## Aanmaken van de database
# In de volgende opdrachten voer je allerlei queries uit op een database.
# Je moeten dan eerst wel een database met inhoud hebben.
# Met de onderstaande opdrachten maak je deze database.
# Deze opdrachten hoef je maar één keer uit te voeren: de database blijft bestaan, met je veranderingen.
# Je kunt deze opdrachten ook uitvoeren om opnieuw te beginnen, in een goed gedefinieerde toestand.
import sqlite3
db = sqlite3.connect('example.db')
cursor = db.cursor()
# We maken de tabel(len) aan.
# We verwijderen eerst een eventueel bestaande versie van de tabel(len):
# we hebben dan een goed gedefinieerde toestand.
#
# > Opmerking: er zijn kleine verschillen in de notatie van de constraints bij het aanmaken van een tabel; MySQL gebruikt bijvoorbeeld een andere notatie dan Oracle.
# ### Eerste tabel: leden
cursor.execute('''DROP TABLE IF EXISTS leden;''')
cursor.execute('''CREATE TABLE leden(
lidnr INTEGER PRIMARY KEY,
voornaam VARCHAR(255) NOT NULL,
achternaam VARCHAR(255) NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE
);''')
# We hebben een voorbeeld-inhoud van de tabel(len) in csv-bestanden.
# Zo'n csv-bestand kun je gemakkelijk aanpassen in een teksteditor.
# Voor het importeren van een csv-bestand gebruiken we een speciale SQLite-opdracht, via de shell.
# (Een alternatief is om dit bestand te importeren via pandas.)
# + language="bash"
# sqlite3 example.db
# .mode csv
# .import leden.csv leden
# -
# Hieronder een voorbeeld van een SQL-opdracht die we rechtstreeks in SQLite uitvoeren.
# + language="bash"
# sqlite3 example.db
# SELECT * FROM leden;
# -
# Eenzelfde opdracht, nu als onderdeel van een Python-programma:
cursor.execute('''SELECT * FROM leden;''')
for row in cursor:
print(row)
# ### Tweede tabel: events
#
# De tabel `events` bevat de events waarvoor de leden kunnen inschrijven.
# Elk event heeft een datum en een beschrijving.
#
# Hiervoor volgen we hetzelfde patroon:
cursor.execute('''DROP TABLE IF EXISTS events;''')
cursor.execute('''CREATE TABLE events(
eventnr INTEGER,
datum VARCHAR(255) NOT NULL,
beschrijving VARCHAR(255),
PRIMARY KEY (eventnr),
CONSTRAINT name UNIQUE (datum, beschrijving)
);''')
# + language="bash"
# sqlite3 example.db
# .mode csv
# .import events.csv events
# -
cursor.execute('''SELECT * FROM events;''')
for row in cursor:
print(row)
# ### Derde tabel: inschrijvingen
#
# Deze tabel beschrijft een N-M relatie tussen leden en inschrijvingen.
# Naast de verwijzingen (via *foreign keys*) naar de andere tabellen vindt je hier de gegevens over de inschrijving (maaltijd-keuze).
cursor.execute('''DROP TABLE IF EXISTS inschrijvingen;''')
cursor.execute('''CREATE TABLE inschrijvingen(
eventnr INTEGER,
lidnr INTEGER,
maaltijd VARCHAR(255),
PRIMARY KEY (lidnr, eventnr),
FOREIGN KEY (lidnr) REFERENCES leden (lidnr),
FOREIGN KEY (eventnr) REFERENCES events (eventnr)
);''')
# + language="bash"
# sqlite3 example.db
# .mode csv
# .import inschrijvingen.csv inschrijvingen
# -
cursor.execute('''SELECT * FROM inschrijvingen;''')
for row in cursor:
print(row)
# ### Demonstratie: alle inschrijvingen
#
# Voor een overzicht van alle inschrijvingen met de gegevens van de leden en van de events gebruiken we een join.
# Dit is een voorproefje - in een volgend notebook werken we dit verder uit.
cursor.execute('''SELECT evt.datum, evt.beschrijving, lid.voornaam, lid.achternaam, lid.email, ins.maaltijd
FROM inschrijvingen ins, events evt, leden lid
WHERE ins.lidnr = lid.lidnr AND ins.eventnr = evt.eventnr;''')
for row in cursor:
print(row)
db.commit()
db.close()
| python-sqlite-0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Preprocessing of MAGeT results and merge into the matched data table.
# ## Input:
# matched_des_data_file = data_dir/'matched_Des-cere_qced.csv'
# matched_DKT_data_file = data_dir/'matched_DKT-cere_qced.csv'
# ## Ouput:
# matched_des_all_file = data_dir/ 'matched_Des-all_qced.csv'
# matched_DKT_all_file = data_dir/ 'matched_DKT-all_qced.csv'
# +
#reading/merging/selecting data
from pathlib import Path
import nipype.interfaces.io as nio
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# input folder
data_dir = Path("/codes/tab_data")
maget_dir = Path("/codes/preproc/maget/results")
# Existing data file
matched_des_data_file = data_dir/'matched_Des-cere_qced.csv'
matched_DKT_data_file = data_dir/'matched_DKT-cere_qced.csv'
# output file
matched_des_all_file = data_dir/ 'matched_Des-all_qced.csv'
matched_DKT_all_file = data_dir/ 'matched_DKT-all_qced.csv'
cohorts_str = ['ADNI','ET','NC','PPMI']
# preproce of MAGeT resutls
et_maget_read = pd.read_csv(maget_dir/('ET_volumes.csv'), sep=',', header=0, index_col=0)
et_maget_read.index= [x[2:10] for x in et_maget_read.index]
nc_maget_read = pd.read_csv(maget_dir/('NC_volumes.csv'), sep=',', header=0, index_col=0)
nc_maget_read.index= [x[2:10] for x in nc_maget_read.index]
adni_maget_read = pd.read_csv(maget_dir/('ADNI_volumes.csv'), sep=',', header=0, index_col=0)
ppmi_maget_read = pd.read_csv(maget_dir/('PPMI_volumes.csv'), sep=',', header=0, index_col=0)
# remove sub-3600 from PPMI for not being selected and also conflicting with ET subject ID
ppmi_maget_read=ppmi_maget_read.drop(index=['sub-3600'])
maget_df = pd.concat([et_maget_read,nc_maget_read,ppmi_maget_read,adni_maget_read])
maget_df.index= [x.replace('-','_') for x in maget_df.index]
maget_df.loc[:,'L_I_IV'] = maget_df.loc[:,'L_I_II']+maget_df.loc[:,'L_III']+maget_df.loc[:,'L_IV']
maget_df.loc[:,'R_I_IV'] = maget_df.loc[:,'R_I_II']+maget_df.loc[:,'R_III']+maget_df.loc[:,'R_IV']
des_df = pd.read_csv(matched_des_data_file , sep=',', header=0, index_col=0);
DKT_df = pd.read_csv(matched_DKT_data_file , sep=',', header=0, index_col=0);
des_all_df = des_df.join(maget_df, how='left').copy()
DKT_all_df = DKT_df.join(maget_df, how='left').copy()
# -
# # Adding Gray Matter volume from SUIT and MAGET
# adding gray matter volume from suit and maget segmentations
SUIT_l_labels = ['Left_I_IV', 'Left_V', 'Left_VI', 'Left_CrusI', 'Left_CrusII', 'Left_VIIb', 'Left_VIIIa', 'Left_VIIIb', 'Left_IX', 'Left_X'];
SUIT_r_labels = ['Right_I_IV','Right_V','Right_VI','Right_CrusI','Right_CrusII','Right_VIIb','Right_VIIIa','Right_VIIIb','Right_IX','Right_X'];
MAGeT_l_labels = ['L_I_IV', 'L_V', 'L_VI', 'L_Crus_I', 'L_Crus_II', 'L_VIIB', 'L_VIIIA', 'L_VIIIB', 'L_IX', 'L_X'];
MAGeT_r_labels = ['R_I_IV','R_V','R_VI','R_Crus_I','R_Crus_II', 'R_VIIB','R_VIIIA','R_VIIIB','R_IX','R_X'];
# suit
des_all_df.loc[:,'Left_cerebellar_GM_SUIT'] =des_all_df.loc[:, SUIT_l_labels].sum(axis=1); des_all_df.loc[:,'Right_cerebellar_GM_SUIT'] =des_all_df.loc[:,SUIT_r_labels].sum(axis=1)
des_all_df.loc[:,'Left_cerebellar_GM_MAGeT']=des_all_df.loc[:,MAGeT_l_labels].sum(axis=1); des_all_df.loc[:,'Right_cerebellar_GM_MAGeT']=des_all_df.loc[:,MAGeT_r_labels].sum(axis=1)
# maget
DKT_all_df.loc[:,'Left_cerebellar_GM_SUIT'] =DKT_all_df.loc[:, SUIT_l_labels].sum(axis=1); DKT_all_df.loc[:,'Right_cerebellar_GM_SUIT'] =DKT_all_df.loc[:,SUIT_r_labels].sum(axis=1)
DKT_all_df.loc[:,'Left_cerebellar_GM_MAGeT']=DKT_all_df.loc[:,MAGeT_l_labels].sum(axis=1); DKT_all_df.loc[:,'Right_cerebellar_GM_MAGeT']=DKT_all_df.loc[:,MAGeT_r_labels].sum(axis=1)
# # Save files
#save data
des_all_df.to_csv(matched_des_all_file)
DKT_all_df.to_csv(matched_DKT_all_file)
| preproc/preproc4_Augmented-Cohort_MAGeT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ex. 1 Srinivasa Ramanujan calculates π
# +
import math
def estimate_pi():
""" This code calculates the value of π using Ramanujan formula
return : value of π """
total = 0
k = 0
factor = 2 * math.sqrt(2) / 9801
while True:
"""factorial of a number is calculated using the math library"""
num = math.factorial(4*k) * (1103 + 26390*k)
den = math.factorial(k)**4 * 396**(4*k)
term = factor * num / den
total += term
if abs(term) < 1e-15:
break
k += 1
return 1 / total
print("The value of pi calculated by formula is",estimate_pi())
print("The error is",math.pi-estimate_pi())
# -
# # Ex. 2 Happy numbers
#
# # 2a. function isHappy(n) that checks whether a number is happy or un-happy.
# +
def ishappy(n):
"""Function to determine whether a number is happy or not (using while loop)
parameter: positive integer
return: True if happy number """
past = set()
while n != 1:
n = sum(int(i)**2 for i in str(n))
if n in past:
return False
past.add(n)
return True
# -
ishappy(7)
# # 2b. happy numbers from 1 to 100
print("The Happy numbers between 1 and 100 are as follows:")
[x for x in range(1,101) if ishappy(x)]
# # 2c. problem in two different ways
def happy(number, past = None):
"""Recursive Function to determine happy number
input: positive integer
return: True if happy number else False"""
def happy_calc(number):
return sum((int(digit)**2 for digit in str(number)))
if past == None:
past = set()
number = happy_calc(number)
if number == 1:
return True
if number in past:
return False
past.add(number)
return happy(number, past)
happy(154)
# # 2d. taking the sum of cubes of the digits instead
# +
def determineHappiness(n):
""" Function to determine happy / unhappy / almost happy number
input: positive integer
return: happy / unhappy / almost happy number """
initial=n
iteration_value=0
maximum_iteration=5
while n!=1 and iteration_value<=maximum_iteration:
string_n=str(n)
no_digits=len(string_n)
summation_of_cube=sum([int(x)**3 for x in string_n])
n=summation_of_cube
iteration_value+=1
if n==1:
return "happy"
elif n>=1 and initial==summation_of_cube:
return "almost_happy"
else:
return "unhappy"
determineHappiness(154)
# -
# # 2d(i). determine whether a number is happy, almost happy, or unhappy
determineHappiness(154)
determineHappiness(153)
determineHappiness(100)
# # 2d(ii). all happy numbers from 1 to 1000
print("Happy Numbers between 1 and 1000 are as follows:")
[i for i in range(1,1001) if determineHappiness(i)=='happy']
# # numbers in the range 1 to 1000 are almost happy
print("Almost Happy Numbers between 1 and 1000 are as follows:")
almost_happy_list=[i for i in range(1,1001) if determineHappiness(i)=='almost_happy']
print("There are {} almost happy numbers between 1 and 1000 and they are: \n{}".
format(len(almost_happy_list),almost_happy_list))
# # Ex. 3 The Birthday Paradox
# # a.function called has_duplicates
# +
def has_duplicates(input_list):
"""This function is used to identify the presence of duplicates in a list.
It takes a list as an argument.
Returns True if the input list has duplicates else False"""
dict_count={i:input_list.count(i) for i in input_list}
return any(i > 1 for i in dict_count.values())
l=['1','b','c','3']
print(has_duplicates(l))
print("The non-modified original list is",l)
# -
# # b.i.probability on the basis generating 10 000 trials of n= 27birthdays
# +
import random
def has_duplicates(input_list):
"""This function is used to identify the presence of duplicates in a list.
It takes a list as an argument.
Returns True if the input list has duplicates else False"""
dict_count={i:input_list.count(i) for i in input_list}
return any(i > 1 for i in dict_count.values())
def random_bdays(n):
"""Returns a list of integers between 1 and 365, with length n.
n: int
returns: list of int"""
return ([random.randint(1,365) for i in range(n)])
num_students = 27
num_simulations = 10000
"""lambda function to simulate for number of trials as provided"""
number_of_matches_passed=len(list(filter(lambda x: has_duplicates(random_bdays(num_students)) , range(num_simulations))))
print("After {} simulations with {} students:-".format(num_simulations,num_students))
print('there were %d simulations with at least one match.' % number_of_matches_passed)
# -
# # b.ii.approximated probability
m= 365
n= 27
print("Tha approximated probability is",1-math.e**(-(n**2)/(2*m)))
# # b.ii.exact probability
# +
import numpy as np
a=np.product([(m-n+1)/m for n in range(1,n+1)])
print("Tha exact probability is",1-a)
# -
# # b.iii. at least two people have a non-unique birthday in their class.
# +
import numpy as np
num=np.product([i for i in range(365,1,-1) ][:27])
den=np.product([365 for i in range(1,28)])
probability_value=1- (num/den)
probability_value
# Reference: https://medium.com/i-math/the-birthday-problem-307f31a9ac6f
# -
# # Ex. 4 Making triangles
# +
import random as rand
def istriangle(a, b, c):
"""function to check the conditon of a triangle
input: 3 measurements
return: True if satisfied """
return a + b > c and a + c > b and b + c > a
def break_one_stick():
"""function to break the stick of unit length twice at x and y respectively using random.uniform function
return: 3 measurements of the broken sticks"""
x, y = rand.uniform(0, 1.0), rand.uniform(0, 1.0)
if x > y:
x, y = y, x
return istriangle(x, y - x, 1 - y)
#x=[break_one_stick() for i in range(10)]
trials = 1000000
"""lambda function to simulate for number of trials as provided"""
number_of_triangles_passed=len(list(filter(lambda x: break_one_stick() , range(trials))))
print("Number of triangles formed overall is",number_of_triangles_passed)
print("Estimated probability = {0:0.4f}".format(number_of_triangles_passed/trials))
# -
# # Ex. 5 Estimating π
# +
import math
import random
import pandas as pd
def estimate_π(n):
"""function to calculate the value of π
input: positive integer
return: approximate value of π"""
points = [(random.uniform(-1,1), random.uniform(-1,1)) for _ in range(n)]
points_inside_circle = [(x,y) for (x,y) in points if x**2 + y**2 <= 1]
return 4 * len(points_inside_circle) / len(points)
"""Dict takes keys as no.of trials and values as approximated π value obtained from the function"""
D={n:estimate_π(n) for n in [10, 100, 1000, 10000, 100000, 10000]}
"""Calculation of Absolute error"""
"""
other ways
Error= [abs(math.pi-x) for x in D.values()],
Error =list(map(lambda x: abs(math.pi-x),D.values())),
f = lambda x: abs(math.pi-x)
[f(x) for x in D.values()]"""
Error= [abs(math.pi-x) for x in D.values()]
"""Dataframe to tabulate the results"""
df = pd.DataFrame(D.items() ,columns=['Various trials', 'Estimated_π_values'])
df['Error']=Error
print(df)
# -
# Interpretation:
# 1. Higher the trials, lower the error value inturn higher the accuracy.
# 2. We can conclude that, correct values of π are obtained at larger trials.
# # Ex. 6 Anagrams
# +
def signature(s):
"""function to return all unique characters in a string in sorted order
input: a string s
return: string of all unique char in sorted order"""
t="".join(i for i in sorted(s))
return t
def all_anagrams(filename):
"""function to find all anagrams in a list of words.
filename: string filename of the word list
Returns: a map from each word to a list of its anagrams."""
d = {}
for line in open(filename):
word = line.strip().lower()
t = signature(word)
# TODO: rewrite using defaultdict
if t not in d:
d[t] = [word]
else:
d[t].append(word)
return d
def print_anagram_sets(d):
"""prints the anagram sets in d.
d: map from words to list of their anagrams"""
for v in d.values():
if len(v) > 1:
print(v)
def print_anagram_sets_in_order(d):
"""Prints the anagram sets in d in decreasing order of size.
d: map from words to list of their anagrams"""
# make a list of (length, word pairs)
t = []
for v in d.values():
if len(v) > 1:
t.append((len(v), v))
# sort in ascending order of length
t.sort(reverse=True)
for x in t:
print(x[1])
#colors_by_length = sorted(t, key=lambda c: c[0],reverse=True)
#return colors_by_length
def filter_length(d, n):
"""Select only the words in d that have n letters.
d: map from word to list of anagrams
n: integer number of letters
returns: new map from word to list of anagrams"""
res = {}
for word, anagrams in d.items():
if len(word) == n:
res[word] = anagrams
return res
anagram_map = all_anagrams(r'C:\Users\<NAME>\Downloads\SoSe 2020\programming-lab-ss20\programming-lab-ss20\words.txt')
# -
# # 6a. all the sets of words that are anagrams
print_anagram_sets(anagram_map)
# # 6b. the largest set of anagrams first
print_anagram_sets_in_order(anagram_map)
# # 6c. set of 8 letters contains the most anagrams
eight_letters = filter_length(anagram_map, 8)
print_anagram_sets_in_order(eight_letters)
| solutions/Ruppa Surulinathan-plab-01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import multivariate_normal
from math import cos, sin, pi
import matplotlib.pyplot as plt
# -
# # Generate multivariate distributions
# Generate random samples from a multivariate normal distribution.
#
# The multivariate normal, multinormal or Gaussian distribution is a generalization of the one-dimensional normal distribution to higher dimensions. Such a distribution is specified by its mean and covariance matrix.
# +
mean = [0, 0]
cov = [[1, 0], [0, 100]] # diagonal covariance
x, y = np.random.multivariate_normal(mean, cov, 5000).T
plt.figure(1)
plt.plot(x, y, 'x')
plt.axis('equal')
plt.show()
# -
# --- end example ---
# # Problem #1
# Setting up the weights of each class
# +
n = 200
piA = 3/4
piB = 1/4
nA = int(n*piA)
nB = int(n*piB)
# -
# Class 0: Gaussian mixture of two components
# Component A:
# Eigenvectors and eigenvalues
evec1 = np.transpose(np.array([1,0]))
evec2 = np.transpose(np.array([0,1]))
U = np.vstack((evec1, evec2))
print("U:" + str(U))
eval1 = 4
eval2 = 1
evals = np.array([eval1, eval2])
Lambda = np.diag(evals)
print("L:" + str(Lambda))
# Covariance
C = U*Lambda*np.transpose(U)
print(C)
# Putting it all together for Component A
# +
meanA = [0,0]
covA = C.T
xA, yA = np.random.multivariate_normal(meanA, covA, nA).T
plt.figure(1)
plt.plot(xA, yA, 'x')
plt.title('Class 0, Component A')
plt.axis('equal')
plt.show()
# -
# Component B
# +
evec1 = np.transpose(np.array([1,0]))
evec2 = np.transpose(np.array([0,1]))
U = np.vstack((evec1, evec2))
print("U:" + str(U))
eval1 = 1
eval2 = 4
evals = np.array([eval1, eval2])
Lambda = np.diag(evals)
print("L:" + str(Lambda))
C = U*Lambda*np.transpose(U)
print("Covariance matrix: " + str(C))
meanB = [6,4]
covB = C.T
xB, yB = np.random.multivariate_normal(meanB, covB, nB).T
plt.figure(1)
plt.plot(xB, yB, 'o', c='red')
plt.title('Class 0, Component B')
plt.axis('equal')
plt.show()
# -
# ## Class 0: Gaussian mixture of two components
# +
plt.figure(1)
plt.plot(xA, yA, 'x', c='blue', label='A')
plt.plot(xB, yB, 'o', c='red', label='B')
plt.suptitle('Problem 1: Generating 2D synthetic data', fontweight='bold')
plt.title('Class 0: Gaussian mixture of A and B components')
plt.axis('equal')
plt.legend()
plt.show()
# -
# Write a function to generate Gaussian mixtures with two components
#
# gaussian mixture = [number of samples, weight, [mean], [eigenvalue], [eigenvector1], [eigenvector2] ]
# +
# number of samples
n = 200
# weights of each components
weightA = 2/3
weightB = 1/3
# mean
meanA = np.array([2, 3])
meanB = np.array([2, -2])
# theta
thetaA = pi/4
thetaB = pi/6
# eigenvalues
eigvalA = np.array([1, 2])
eigvalB = np.array([4, 1])
# eigenvectors
eigvecA1 = np.array([cos(thetaA), sin(thetaA)])
eigvecA2 = np.array([-sin(thetaA), cos(thetaA)])
eigvecB1 = np.array([cos(thetaB), sin(thetaB)])
eigvecB2 = np.array([-sin(thetaB), cos(thetaB)])
eigvecA = np.vstack((eigvecA1, eigvecA2))
eigvecB = np.vstack((eigvecB1, eigvecB2))
g_mix1 = np.array([n, weightA, meanA, eigvalA, eigvecA], dtype='object')
g_mix2 = np.array([n, weightB, meanB, eigvalB, eigvecB], dtype='object')
g_mix = np.vstack((g_mix1, g_mix2))
def generate_gaussian_mixture(gauss_mix):
# initialize list for distributions
x = []
y = []
for g in gauss_mix:
# step 0 - define number of samples to generate
num_samples = int(g[0] * g[1])
# step 1 - define U
U = np.vstack((g[4][0].T, g[4][1].T))
# step 2 - define Lambda
Lambda = np.diag(g[3])
# step 3 - compute C
C = U * Lambda * U.T
print("Covariance matrix: " + str(C))
C = C.astype('float64')
# step 4 - correct for floating point negative errors
#min_eig = np.min(np.real(np.linalg.eigvals(C)))
#if min_eig < 0:
# print(min_eig)
# C -= 10*min_eig * np.eye(*C.shape)
# step 4 - compute distribution
xg, yg = np.random.multivariate_normal(g[2], C.T, num_samples).T
x.append(xg)
y.append(yg)
x = np.asarray(x, dtype='object')
y = np.asarray(y, dtype='object')
return x, y
# -
x, y = generate_gaussian_mixture(g_mix)
# +
colors = ['red', 'blue']
labels = ['C', 'D']
markers = ['x', 'o']
plt.figure(1)
for i in range(np.shape(x)[0]):a
plt.scatter(x[i], y[i], c=colors[i], marker=markers[i], label=labels[i])
plt.suptitle('Problem 1: Generating 2D synthetic data', fontweight='bold')
plt.title('Class 1: Gaussian mixture of C and D components')
plt.axis('equal')
plt.legend()
plt.show()
# -
# ---
# --- break space ---
# ---
# # Appendix of random scripts
# +
evec1 = np.transpose(np.array([1,0]))
evec2 = np.transpose(np.array([0,1]))
U = np.vstack((evec1, evec2))
eval1 = 1
eval2 = 4
evals = np.array([eval1, eval2])
Lambda = np.diag(evals)
C = U*Lambda*np.transpose(U)
meanB = [6,4]
covB = C.T
x, y = np.random.multivariate_normal(meanB, covB, nB).T
colors = ['red', 'blue']
labels = ['C', 'D']
markers = ['x', 'o']
plt.figure(1)
plt.scatter(x, y, c=colors[0], marker=markers[0], label=labels[0])
plt.suptitle('Problem 1: Generating 2D synthetic data', fontweight='bold')
plt.title('Class 1: Covariance is not positive-semidefinite')
plt.axis('equal')
plt.legend()
plt.show()
# -
# ---
# # Final Answer
# +
eps=1e-8
# define the number of samples
n = 200
# define the mean points for each of the synthetic cluster centers
mu_0a = [0,0]
mu_0b = [6,4]
mu_1c = [2,3]
mu_1d = [2,-2]
t_means = [mu_0a, mu_0b, mu_1c, mu_1d]
# t_means shape: KxN where K is the number of components and N is number of dimensions (2)
# for each cluster center, create a positive semi definite covariance matrix
cov_0a = np.array([[4,0],[0,1]])
cov_0b = np.array([[1,0],[0,4]])
cov_1c = np.array([[0.5,0],[0,1]])
cov_1d = np.array([[3,0],[0,0.75]])
t_covs = [cov_0a, cov_0b, cov_1c, cov_1d]
# t_covs shape: KxNxN where the NxN is the covariance matrix of each component K.
# weights
weights = [3/4, 1/4, .67, 1/3]
XX = []
X = []
for mean, cov, w in zip(t_means, t_covs, weights):
num_samples = int(n*w)
x = np.random.multivariate_normal(mean, cov, num_samples) # create normal gaussian distributions
X += list(x) # stack points into dataset list
XX.append(x)
X = np.array(X) # make numpy array (speed)
# -
# ## Plot the data
# +
font = {'family' : 'Tahoma',
'weight' : 'bold',
'size' : 22}
plt.rc('font', **font)
colors = ['tab:blue', 'tab:blue', 'tab:green', 'tab:green', 'yellow', 'red', 'brown', 'grey']
ccolors = ['darkblue', 'darkblue', 'lime', 'lime']
ecolors = ['blue', 'blue', 'green', 'green']
labels = ['Class 0', 'Class 0', 'Class 1', 'Class 1']
# +
fig, ax = plt.subplots(figsize=(10,8))
for c in range(k):
if c == 0 or c == 2:
plt.scatter(XX[c][:,0], XX[c][:,1], color=ccolors[c], s=100, edgecolors=ecolors[c], marker='o', alpha=0.5, label=labels[c])
else:
plt.scatter(XX[c][:,0], XX[c][:,1], color=ccolors[c], s=100, edgecolors=ecolors[c], marker='o', alpha=0.5)
plt.xlabel(r'$\lambda_{1}$')
plt.ylabel(r'$\lambda_{2}$')
plt.legend()
plt.show()
# -
# ---
print(np.shape(XX))
# +
XX_class0 = np.vstack((XX[0], XX[1]))
XX_class1 = np.vstack((XX[2], XX[3]))
print(np.shape(XX_class0))
print(np.shape(XX_class1))
XXc = np.vstack((XX_class0, XX_class1))
print("Ground truth data stacked by class: ", np.shape(XXc))
# -
# ## Write datapoints to file
savePath = '/Users/mackenzie/Box/2020-2021/Classes/S21 ECE283 - Machine Learning/HW1/results/hw1prob1_datapoints.txt'
np.savetxt(savePath, XXc, fmt='%5f', delimiter=',')
| jupyter/ECE283 HWs/HW1/HW #1, Problem #1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Alternating minimization
# Reconstruction with alternating minimization (possible using both strobed illumination for initializations)
# +
# %matplotlib notebook
# %load_ext autoreload
# %autoreload 2
import numpy as np
import scipy as sp
import scipy.misc as misc
import matplotlib.pyplot as plt
import time
import sys
import itertools
import scipy.io as sio
import datetime
import os, glob
# MD imports
from libwallerlab.opticsalgorithms.motiondeblur import blurkernel
from libwallerlab.opticsalgorithms.motiondeblur import kernel_objectives
from libwallerlab.opticsalgorithms.motiondeblur import blurkernelplot as bkplt
# Libwallerlab imports
import libwallerlab.operators as ops
import libwallerlab.operators.base as bops
from libwallerlab.utilities import opticstools, displaytools, iotools
from libwallerlab.algorithms import iterative, objectivefunctions, regularizers
# +
ops.setDefaultBackend('numpy') # arrayfire or numpy
ops.setDefaultDatatype('complex64')
global_dtype = ops.config.default_dtype
global_backend = ops.config.default_backend
np_dtype = bops.getNativeDatatype(ops.config.default_dtype, ops.config.default_backend)
# -
# ## Load Data
# +
dataset_path_stem = '/Users/zfphil/Dropbox/Datasets/motiondeblur_datasets/'
dataset_path_stem = 'D:\\Zack\\Dropbox\\Datasets\\motiondeblur_datasets\\'
dataset_path_stem = 'D:\\Zack\\05-22-18-MotionDeblur\\'
dataset_path_stem = '/Users/zfphil/Desktop/'
dataset_path_stem = 'J:\\data\\'
dataset_path_stem = '/home/sarah/Dropbox/motiondeblur_datasets/'
# dataset_path_stem = 'J:/Dropbox/Datasets/motiondeblur_datasets/'
dataset_path = 'res_target_color_coded_raster_motiondeblur_2018_05_22_19_17_45' # Good dataset
# dataset_path = 'res_target_color_strobe_raster_motiondeblur_2018_05_22_19_17_18'
dataset = iotools.Dataset(dataset_path=dataset_path_stem + dataset_path)
# Expand metadata if necessary
dataset.show('position')
# +
# TODO
# dataset_path_strobed = dataset_stem+'MD-3-15-18/USAF_strobe_motiondeblur_2018_03_15_19_30_51'
# dataset_strobed = iotools.Dataset(dataset_path=dataset_path_strobed)
# dataset_strobed.show('position')
# -
# ## Processing
# Demosaic Frames, subtract background, select linear subsegment
# +
if dataset.metadata.camera.is_color:
color_channel = 0
frame_list_new = np.zeros((len(dataset.frame_list),
int(dataset.frame_list[0].shape[0]/2), int(dataset.frame_list[0].shape[1]/2)),
dtype=np.float)
for index, frame in enumerate(dataset.frame_list):
frame_list_new[index,:,:] = displaytools.demosaicFrameDeconv(frame)[:,:,color_channel]
dataset.frame_list = frame_list_new
dataset.subtractBackground()
linear_segment_index = 3
frame_indicies_to_process = []
total_frame_count = len(dataset.frame_list)
for index, frame_state in enumerate(dataset.frame_state_list):
if frame_state['position']['common']['linear_segment_index'] == linear_segment_index:
frame_indicies_to_process.append(index)
dataset.frame_list = dataset.frame_list[frame_indicies_to_process,:,:]
dataset.frame_state_list = [dataset.frame_state_list[i] for i in frame_indicies_to_process]
print('Segment %d covers %d of %d frames.' % (linear_segment_index, len(frame_indicies_to_process), total_frame_count))
# -
# ## Generate Blur Kernels from Metadata
# This function should return 4 things:
# - object size: the full size of the object
# - image size: the size of individual images
# - blur_kernel_list: the cropped blur kernels (to their own support)
# - crop_roi_list: roi's which indicate where the blur kernel should be placed
# +
# Get blur_kernel_list, object_size, and image_size
blurkernel_info = blurkernel.blurKernelsFromDataset(dataset, flip_illumination_sequence=False,
flip_kernel_y = True, debug=False,
use_phase_ramp=False, return_positions=True)
object_size, image_size, blur_kernel_list_color, blur_kernel_roi_list, position_list, illum_list = blurkernel_info
blur_kernel_roi_list = list(reversed(blur_kernel_roi_list))
# Sum the color channels of all blur kernels (TEMPORARY)
blur_kernel_list = []
for index, blur_kernel in enumerate(blur_kernel_list_color):
first_channel = list(blur_kernel.keys())[0]
new_kernel = np.zeros(blur_kernel_roi_list[index].size(), dtype=blur_kernel[first_channel].dtype)
illum = illum_list[index]
for channel in blur_kernel:
new_kernel += blur_kernel[channel]
blur_kernel_list.append(new_kernel)
# Plot measurement support
bkplt.plotBlurKernelSupport(blur_kernel_roi_list)
# # displaytools.show3dArray(blur_kernel_list, figsize=(10,8))
# plt.figure(figsize=(9,7))
# plt.imshow(np.abs(blur_kernel_list[0]))
# -
np.power(image_size, 2)
print(len(position_list[0]))
print(len(illum_list[0]))
# # Single Frame Recovery
# Options
frame_index = 3
omit_inner_ft = False
# +
# get illumination and positions
illuminations = illum_list[frame_index]
positions = position_list[frame_index]
illums = []; pos_y = []; pos_x = []
for illum, pos in zip(illuminations,positions):
if illum[0] > 0:
illums.append(illum[0])
pos_y.append(pos[0][0])
pos_x.append(pos[0][1])
n_illum = len(illums)
# +
# get basis functions for path
n_basis_splines = [4,4]
spl_y = blurkernel.get_basis_splines(n_illum, n_basis_splines[0])
spl_x = blurkernel.get_basis_splines(n_illum, n_basis_splines[1])
w_y = blurkernel.positions_to_splines(spl_y, pos_y)
w_x = blurkernel.positions_to_splines(spl_x, pos_x)
weights_recorded = np.hstack([w_y, w_x])
# +
# the x path is not smooth -- hard to fit spline
# TODO: fix x positions?
yhat = spl_y.dot(w_y)
xhat = spl_x.dot(w_x)
plt.figure(figsize=(9,3)); plt.subplot(1,2,1)
plt.plot(yhat, label='final');
plt.plot(pos_y, label = 'true'); plt.legend()
plt.subplot(1,2,2); plt.plot(xhat, label='final');
plt.plot(pos_x, label = 'true'); plt.legend()
# -
# Get measurement and normalize
y = dataset.frame_list[frame_index][dataset.metadata.camera.roi.y_start:dataset.metadata.camera.roi.y_end,
dataset.metadata.camera.roi.x_start:dataset.metadata.camera.roi.x_end].copy()
y_mean = np.mean(y)
y = bops.changeBackend(bops.astype(y / y_mean, global_dtype), global_backend)
image_size = y.shape
# +
weights_initial = (weights_recorded+np.random.uniform(size=weights_recorded.shape, high=1, low=-1)).astype(np_dtype)
weights_initial = np.expand_dims(weights_initial,1)
object_initial = np.ones(image_size).astype(np_dtype)
object_initial = object_initial / np.sum(object_initial)
objectives, update_fns = blurkernel.getAutocalibrationFns(y, image_size, illums, \
spl_y, spl_x, weights_initial, object_initial,
dtype=global_dtype, backend=global_backend,
verbose=True)
its_per_step = [1,1]
step_sizes = [1/np.prod(image_size),1/np.prod(image_size)]
#step_sizes = [None, None]
initializations=[object_initial, weights_initial.copy()]
# -
objectives[0].gradient(object_initial)
objectives[1].gradient(weights_initial)
# +
object_recovered, weights_recovered = iterative.AlternatingGradient(objectives, update_fns, its_per_step,
step_sizes=step_sizes, verbose=True,
initializations=initializations,
total_it=250)
# -
# +
# Get blur kernel and support
blur_kernel = bops.changeBackend(bops.astype(blur_kernel_list[frame_index], global_dtype), global_backend)
blur_kernel /= bops.scalar(bops.sumb(blur_kernel))
kernel_support_roi = blur_kernel_roi_list[frame_index]
# Store sizes
measurement_size = bops.shape(y)
kernel_size = kernel_support_roi.size()
# -
# +
# Generate pad operator for image support -> measurement
R0 = ops.Crop(kernel_size, measurement_size, dtype=global_dtype, backend=global_backend, pad_value='reflect')
# Create F.T. operator
F = ops.FourierTransform(kernel_size, dtype=global_dtype, backend=global_backend, normalize=True, center=True, pad=True)
# Diagonalize padded kernel
K = ops.Diagonalize(F * blur_kernel)
# Make forward model (sans outer Fourier Transform)
A = R0 * F.H * K * F
# Initialization: choosing a "good" initial value will help in convergence
initialization = bops.ones(A.N, global_dtype, global_backend)
# Define Objective Function
objective = objectivefunctions.L2(A, y)
# Define Regularizer
# regularizer = 10e-9 * ops.L2Norm(kernel_size)
regularizer = 1e-6 * ops.L1Norm(kernel_size) * ops.FourierTransform(kernel_size)
#regularizer = 1e-6 * regularizers.WaveletSparsity(kernel_size, wavelet_type='db4', extention_mode='symmetric', level=None, use_cycle_spinning=True, axes=None)
# Define Cost Function
cost_function = objective + regularizer
# FISTA implementation
# Note that if objective function is smooth, this is just gradient descent with nesterov acceleration
iteration_count = 50
object_recovered = iterative.Fista(cost_function).solve(initialization=initialization,
step_size=1e6,
nesterov_enabled=True,
iteration_count=iteration_count,
display_type='text',
display_iteration_delta=max((iteration_count // 10),1))
# -
object_recovered_crop = object_recovered
# y = R0.H * R0 * y
plt.figure(figsize=(10,5))
plt.subplot(131)
plt.imshow(np.abs(bops.changeBackend(y, 'numpy')), cmap='gray')
plt.colorbar()
plt.title('Raw Data')
plt.subplot(132)
plt.imshow(np.abs(bops.changeBackend(object_recovered_crop, 'numpy')), cmap='gray')
plt.title('Reconstruction')
plt.colorbar()
plt.subplot(133)
plt.imshow(np.abs(bops.changeBackend(A * object_recovered_crop, 'numpy')), cmap='gray')
plt.title('Forward propagation of Recon')
plt.colorbar()
# +
# Determine ROI
#x_start=1000, x_end=1500, y_start=500, y_end=750
dataset.metadata.camera.roi = iotools.Roi(x_start=500, x_end=1500, y_start=500, y_end=1500)
vmax=np.mean(dataset.frame_list[0]) + 4*np.std(dataset.frame_list[0])
frame_index = 3
plt.figure(figsize=(7,3))
plt.subplot(121)
plt.imshow(dataset.frame_list[frame_index], cmap='gray', vmax=vmax)
plt.subplot(122)
roi_image = dataset.frame_list[frame_index][dataset.metadata.camera.roi.y_start:dataset.metadata.camera.roi.y_end,
dataset.metadata.camera.roi.x_start:dataset.metadata.camera.roi.x_end]
plt.imshow(roi_image,cmap='gray', vmax=vmax)
# +
# same ROI for the strobed data -- comparison
#x_start=1000, x_end=1500, y_start=500, y_end=750
dataset_strobed.metadata.camera.roi = iotools.Roi(x_start=500, x_end=1500, y_start=500, y_end=1500)
vmax=np.mean(dataset.frame_list[0]) + 4*np.std(dataset_strobed.frame_list[0])
frame_index = 3
plt.figure(figsize=(7,3))
plt.subplot(121)
plt.imshow(dataset_strobed.frame_list[frame_index], cmap='gray', vmax=vmax)
plt.subplot(122)
roi_image_strobe = dataset_strobed.frame_list[frame_index][dataset.metadata.camera.roi.y_start:dataset.metadata.camera.roi.y_end,
dataset.metadata.camera.roi.x_start:dataset.metadata.camera.roi.x_end]
plt.imshow(roi_image_strobe,cmap='gray', vmax=vmax)
# -
# # Initializing the Autocalibration Problem
# ## Reading the recorded blur kernels
# +
# Generate Blur Kernels from Metadata
(object_size, image_size_blur_kernel, _, position_list, illum_list) = blurkernel.blurKernelsFromDataset(dataset,
debug=False,
use_phase_ramp=False,
return_positions = True)
# -
# ## Independent points Alternating Min
#
# todo: need to make work for multiframe as well
# TODO got rid of this method
illuminations, shifts = blurkernel.formatIllumShift(position_list, illum_list, [frame_index], roi_image.shape)
# +
y = roi_image
y = y.reshape(-1).astype(np.complex64)
y /= np.mean(y)
objectives, setting_fns, initialization, fh_ops = \
blurkernel.constructAlternatingMin(illuminations, shifts, roi_image.shape, 1, y)
# -
O = fh_ops[1]
O.latex()
# +
objective_object, objective_shift = objectives
A_object_set_shift, A_shift_set_object = setting_fns
obj, shifts = initialization
object_recon, shift_recon = iterative.AlternatingGradient([objective_object, objective_shift],
[A_object_set_shift, A_shift_set_object], [2,1],
initializations = [obj.reshape(-1), shifts], total_it=1)
# -
object_recon
# +
image_size = roi_image.shape
F, Hfull = fh_ops
plt.figure(figsize=(10,5))
plt.subplot(221)
plt.imshow(np.abs(object_recon).reshape(image_size), cmap='gray')
plt.title('Recovered Object')
h_est = F.H * Hfull * shifts
plt.subplot(224)
plt.imshow((np.abs((h_est).reshape(image_size))), cmap='gray')
plt.title('Initial PSF')
h_est = F.H * Hfull * shift_recon
plt.subplot(222)
plt.imshow((np.abs((h_est).reshape(image_size))), cmap='gray')
plt.title('Recovered PSF')
# -
# ## Smooth path optimization
#
# TODO: implement as operators
# +
pos_parameter = recorded_shifts[:,1] # to do: this could change
def get_points_from_pos_param(pos_parameter, poly):
points_x = pos_parameter # to do: this could change
points_y = [poly.evaluate(t) for t in pos_parameter]
return np.vstack([points_y, points_x]).T.flatten()
# polynomial class as a stand in smooth function
class polynomial:
def __init__(self, coeffs, offsets):
self.dim = len(offsets)
assert len(coeffs) == self.dim + 1, 'incorrect number of parameters'
self.coeffs = coeffs
self.offsets = offsets
def evaluate(self, t):
result = self.coeffs[0]
for i in range(0, self.dim):
result = result + self.coeffs[i+1] * (t - self.offsets[i])**(i+1)
return result
def parameter_grad(self, t): # y gradient
# omitting gradient for first position -- keep fixed for now
coeff_grad = [(t - self.offsets[i])**(i+1) for i in range(self.dim)]
offset_grad = [- (i+1) * self.coeffs[i+1] * (t - self.offsets[i])**(i) for i in range(self.dim)]
return np.hstack([coeff_grad, offset_grad])
def update_coeffs(self, coeffs):
assert len(coeffs) == len(self.coeffs), 'incorrect coeffs size'
self.coeffs = coeffs
def update_offsets(self, offsets):
assert len(offsets) == len(self.offsets), 'incorrect offsets size'
self.offsets = offsets
# -
H = ops.PhaseRamp(image_size)
def grad_H(x, i):
grad_y = H.grid[0] * np.exp((H.grid[0] * x[0] + H.grid[1] * x[1]))
grad_x = H.grid[1] * np.exp((H.grid[0] * x[0] + H.grid[1] * x[1]))
return np.vstack([grad_y, grad_x])
# +
# initializing straight line path
deg_poly = 3
coeffs_init = np.zeros(deg_poly)
offsets_init = np.zeros(deg_poly-1)
coeffs_init[0] = recorded_shifts[0,0]
coeffs_init[1] = pos_extent[0]
initial_path = polynomial(coeffs_init, offsets_init)
# initializing operators for gradient
F = ops.FourierTransform(image_size, normalize=True)
L2 = ops.L2Norm(image_size[0] * image_size[1], dtype=np.complex64)
y = roi_image
y = y.reshape(-1).astype(np.complex64)
y /= np.mean(y)
def A_set_object(obj):
D_object = ops.Diagonalize((F * obj.reshape(-1)).reshape(image_size), label='Dobject')
A_shift = F.H * D_object * Hfull
return A_shift, D_object
def A_set_shift(shifts):
D_shift = ops.Diagonalize((Hfull * shifts).reshape(image_size), label='Dshift')
A_object = F.H * D_shift * F
return A_object
def analytic_gradient(x, A_shift, D_object):
inner = A_shift(x) - y
inner_op = (D_object.H * F * inner)
gradients = []
for i in range(len(illums)):
gradH = illums[i] * grad_H(x[2*i:2*i+1+1], i)
E_y = np.dot(np.conj(gradH[0]), inner_op)
E_x = np.dot(np.conj(gradH[1]), inner_op)
gradients.append(np.array([E_y, E_x]))
return np.real(np.hstack(gradients))
def analytic_gradient_smooth_curve(poly, A_shift, D_object):
x = get_points_from_pos_param(pos_parameter, poly).astype(np.complex64)
grad_wrt_pos = analytic_gradient(x, A_shift, D_object)
grad_p_wrt_t = []
for t in pos_parameter:
grad_p_wrt_t.append(poly.parameter_grad(t))
grad_p_wrt_t.append(np.zeros(grad_p_wrt_t[-1].shape)) # for x positions
return np.real(np.array(grad_p_wrt_t).T.dot(grad_wrt_pos))
def analytic_gradient_only_horz(x, A_shift, D_object):
inner = A_shift(x) - y
inner_op = (D_object.H * F * inner)
gradients = []
for i in range(len(illums)):
gradH = illums[i] * grad_H(x[2*i:2*i+1+1], i)
E_y = 0
E_x = np.dot(np.conj(gradH[1]), inner_op)
gradients.append(np.array([E_y, E_x]))
return np.real(np.hstack(gradients))
# +
obj = np.ones(roi_image.shape).reshape(-1).astype(np.complex64)
path = polynomial(coeffs_init.copy(), offsets_init.copy())
h_est = F.H * Hfull * get_points_from_pos_param(pos_parameter, path).astype(np.complex64)
# Show object and psf
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.imshow(np.abs(obj).reshape(image_size), cmap='gray')
plt.title('Initial Object')
plt.subplot(122)
plt.imshow((np.abs((h_est).reshape(image_size))), cmap='gray')
plt.title('Initial PSF')
objective_obj = lambda A_object: L2 * (A_object - y)
# +
N_iter = 50
object_recovery_iter = 1
for i in range(N_iter):
shifts = get_points_from_pos_param(pos_parameter, path).astype(np.complex64)
#print(shifts)
A_object = A_set_shift(shifts)
# update object
if i > 0:
object_recovery_iter = 1
obj = iterative.GradientDescent(objective_obj(A_object)).solve(initialization=obj,
step_size=np.prod(image_size)/50,
nesterov_enabled=True,
iteration_count=object_recovery_iter,
display_type='text',
display_iteration_delta=object_recovery_iter)
A_shift, D_object = A_set_object(obj)
# update path
path_gradient = analytic_gradient_smooth_curve(path, A_shift, D_object)
#print(path_gradient)
path.coeffs -= np.hstack([0, path_gradient[0:deg_poly-1]])
path.offsets -= np.hstack(path_gradient[deg_poly-1:])
plt.figure(figsize=(10,5))
plt.subplot(221)
plt.imshow(np.abs(obj).reshape(image_size), cmap='gray')
plt.title('Recovered Object')
plt.subplot(224)
plt.imshow((np.abs((h_est).reshape(image_size))), cmap='gray')
plt.title('Initial PSF')
h_est = F.H * Hfull * shifts
plt.subplot(222)
plt.imshow((np.abs((h_est).reshape(image_size))), cmap='gray')
plt.title('Recovered PSF')
# -
np.vstack([[1,2,3],[3,4,5]])
p = polynomial([2,3,45,6],[3,4,6])
p.parameter_grad(1)
# # Comparison With Experiemental Kernels from Strobed
# +
static_roi = dataset.metadata.camera.roi
full_roi = iotools.Roi(x_start=0, x_end=dataset.frame_list[frame_index].shape[1], y_start=0,
y_end=dataset.frame_list[frame_index].shape[0])
otf = opticstools.genOtf(full_roi.size(),
dataset.metadata.system.eff_pixel_size_um,
dataset.metadata.objective.na, 0.5)
static = dataset_strobed.frame_list[frame_index] #[static_roi.y_start:static_roi.y_end,
#static_roi.x_start:static_roi.x_end].copy()
kernel_recovered = blurkernel.blurKernelRecoveryFromStatic(dataset.frame_list[frame_index],
dataset_strobed.frame_list[frame_index],
solver='iterative',
reg=None,
system_otf=otf,
iteration_count=10,
threshold=0.03)
# +
# Determine kernel size
plt.figure(figsize=(10,3))
plt.imshow(np.real(kernel_recovered).reshape(static.shape)); plt.colorbar()
# -
| notebooks/test/alternating_min_real_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <NAME>, <EMAIL>, 10/06/2017
#
# # Dialect Project: Twitter
import tweepy
import pandas as pd
import matplotlib.pyplot as plt
# Turn off pretty print# Show all output
# %pprint
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# +
consumerKey= '<KEY>'
consumerSecret='<KEY>'
# Creating Authentication
auth = tweepy.OAuthHandler(consumer_key=consumerKey,
consumer_secret=consumerSecret)
# Connecting to Twitter API with the authentication
api=tweepy.API(auth)
# -
# ## #Australia
# Search for '#Australia'
result = api.search(q='%23Australia') # "%23" == "#"
len(result)
# +
# First tweet (in json format)
tweet = result[0]
# Analyzing the first tweet on all keys in the directory (except for key names beginning with "_")
for param in dir(tweet):
if not param.startswith("_"):
print("%s : %s\n" % (param, eval('tweet.'+param)))
# +
# Individual keys
tweet.lang
tweet.text
tweet.retweet_count
# Why aren't the keys below outputting anything?
tweet.place
tweet.geo
# -
#tweet.user
# Looking at inidividual keys under user
tweet.user.location
tweet.user.time_zone
tweet.created_at
tweet.user.name
tweet.user.screen_name
# +
results = []
#Get the first 5000 items based on the search query
for tweet in tweepy.Cursor(api.search, q='%23Australia').items(2500):
results.append(tweet)
# Verify the number of items returned
print(len(results))
# -
# Checking for duplicate Tweets
tweet_ids=[x.id for x in results]
orig_tweet_ids=set(tweet_ids)
len(tweet_ids) == len(orig_tweet_ids)
len(tweet_ids)
help(results.remove)
# +
tweet_texts=[x.text for x in results]
len(tweet_texts)==len(set(tweet_texts))
len(set(tweet_texts))
len(results)
# +
tweet_texts2=[]
for x in results:
if x.text not in tweet_texts2:
tweet_texts2.append(x.text)
else:
results.remove(x)
len(results)
# +
# Converts given tweet list into Pandas DataFrame, consisting of values only
def toDataFrame(tweets):
DataSet = pd.DataFrame()
DataSet['tweetID'] = [tweet.id for tweet in tweets]
DataSet['tweetText'] = [tweet.text for tweet in tweets]
DataSet['tweetRetweetCt'] = [tweet.retweet_count for tweet in tweets]
DataSet['tweetSource'] = [tweet.source for tweet in tweets]
DataSet['tweetCreated'] = [tweet.created_at for tweet in tweets]
DataSet['userID'] = [tweet.user.id for tweet in tweets]
DataSet['userScreen'] = [tweet.user.screen_name for tweet
in tweets]
DataSet['userName'] = [tweet.user.name for tweet in tweets]
DataSet['userCreateDt'] = [tweet.user.created_at for tweet
in tweets]
DataSet['userDesc'] = [tweet.user.description for tweet in tweets]
DataSet['userFollowerCt'] = [tweet.user.followers_count for tweet
in tweets]
DataSet['userFriendsCt'] = [tweet.user.friends_count for tweet
in tweets]
DataSet['userLocation'] = [tweet.user.location for tweet in tweets]
DataSet['userTimezone'] = [tweet.user.time_zone for tweet
in tweets]
return DataSet
#Pass the tweets list to 'toDataFrame' to create the DataFrame
DataSet = toDataFrame(results)
# -
# Verify DataFrame:
DataSet.head(10)
DataSet.tail(5)
DataSet.loc[(DataSet['userLocation']=="Melbourne")&(DataSet['userTimezone']=="Melbourne"),:]
print("LOCATION")
DataSet["userLocation"].value_counts()
print("TIMEZONE")
DataSet["userTimezone"].value_counts()
# Timezone is better to look at than Location, because people can input their own Location so it's harder to choose tweets by location, whereas with timezone Sydney, Brisbane, and Melbourne seem to be the only ones that show up.
# How many of these tweets are from tourists?
# +
# Removing rows with "None" in "userTimezone"
DataSet = DataSet[DataSet.userTimezone.notnull()]
# Rows remaining
len(DataSet)
# Percentage of rows remaining
#len(DataSet)/2500*100
# -
# Count tweets per time zone for the top 10 time zones
DataSet['userTimezone'].value_counts()
# Bar Graph of Time Zone data
plt.rcParams['figure.figsize'] = (15, 5)
DataSet['userTimezone'].value_counts().plot(kind='bar')
# Graph Labeling
plt.xlabel('Timezones')
plt.ylabel('Tweet Count')
plt.title('Top 10 Timezones tweeting about #Australia')
# +
sydney = DataSet.loc[DataSet['userTimezone'] == 'Sydney', :]
sydney.head(2)
brisbane = DataSet.loc[DataSet['userTimezone'] == 'Brisbane',:]
brisbane.head(2)
melbourne = DataSet.loc[DataSet['userTimezone'] == 'Melbourne',:]
melbourne.head(2)
# -
Aust_result = pd.concat([sydney,melbourne,brisbane])
Aust_result
# +
# TWEETS 212,217,218 ARE THE SAME <-- Posted by the same person a couple of minutes apart.
# They have the same text EXCEPT for the url at the end
# They have different tweetIDs
# is there any other way to find duplicate tweets like this?
Aust_result['tweetText'][212]
Aust_result['tweetID'][212]
Aust_result['tweetText'][217]
Aust_result['tweetID'][217]
# -
Aust_result['tweetText']
Aust_result.to_csv("Australia_Tweets.csv")
# ## #Aussie
# Search for '#Aussie'
aussie_search = api.search(q='%23Aussie')
len(aussie_search)
# +
aussie_results = []
#Get the first 5000 items based on the search query
for tweet in tweepy.Cursor(api.search, q='%23Aussie').items(2500):
aussie_results.append(tweet)
# Verify the number of items returned
print(len(aussie_results))
# -
# Checking for duplicate Tweets
aussie_tweet_ids=[x.id for x in aussie_results]
aussie_orig_tweet_ids=set(aussie_tweet_ids)
len(aussie_tweet_ids) == len(aussie_orig_tweet_ids)
len(aussie_tweet_ids)
# +
tweet_texts2=[]
for x in aussie_results:
if x.text not in tweet_texts2:
tweet_texts2.append(x.text)
else:
aussie_results.remove(x)
len(aussie_results)
# -
#Pass the tweets list to 'toDataFrame' to create the DataFrame
Aussie_df = toDataFrame(aussie_results)
Aussie_df.head(5)
# +
# Removing rows with "None" in "userTimezone"
Aussie_df = Aussie_df[Aussie_df.userTimezone.notnull()]
# Rows remaining
len(Aussie_df)
# Percentage of rows remaining
len(Aussie_df)/100*100
# Lost about 1/2 of the tweets
# -
Aussie_df["userTimezone"].value_counts()
# +
sydney_aussie = Aussie_df.loc[Aussie_df['userTimezone'] == 'Sydney', :]
sydney_aussie.head(2)
brisbane_aussie = Aussie_df.loc[Aussie_df['userTimezone'] == 'Brisbane',:]
brisbane_aussie.head(2)
melbourne_aussie = Aussie_df.loc[Aussie_df['userTimezone'] == 'Melbourne',:]
melbourne_aussie.head(2)
# -
Aussie_result=pd.concat([sydney_aussie,brisbane_aussie,melbourne_aussie])
# result = pd.concat([Aust_result,Aussie_result])
# result.head(5)
# +
# result.to_csv("Australia_Tweets.csv")
# -
Aussie_result.to_csv("Australia_Tweets.csv",mode="a")
| previous_code/twitter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Define a class
# + tags=[] language="sh"
#
# rm -f files/roundtrip_comments.yml
# + tags=[]
from datafiles import datafile
@datafile('files/roundtrip_comments.yml')
class Sample:
foo: int
bar: str
# + [markdown] tags=[]
# # Initialize an instance
# + tags=[]
sample = Sample(42, "Hello, world")
# + tags=[] language="sh"
#
# cat files/roundtrip_comments.yml
# + [markdown] tags=[]
# # Modify the file
# + tags=[]
# %%writefile files/roundtrip_comments.yml
# Heading comment
foo: 42
bar: Hello, world! # Line comment
# + tags=[] language="sh"
#
# cat files/roundtrip_comments.yml
# + [markdown] tags=[]
# # Modify the object
# + tags=[]
sample.foo = 2019
sample.bar = "Hello, notebook"
# + [markdown] tags=[]
# # View merged contents
# + tags=[] language="sh"
#
# cat files/roundtrip_comments.yml
| notebooks/roundtrip_comments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <NAME> | Numerical Algorithms Assignment 3
import numpy as np
import matplotlib.pyplot as plt
# +
#Generator for x points
def point_gen(n):
k = np.arange(n+1,dtype = float)
mat = -1 + 2 * (k / n)
return mat
#Generator for function points
def func_point_gen(x):
mat = 1/(25 * x**2 + 1)
return mat
#Generator for Newton's Difference
def diff_gen(y,x):
coeff = np.zeros((n+1,n+1),dtype = float)
for i in range(n+1):
coeff[i,0] = y[i]
for j in range(1,n+1):
for i in range(0,n-j+1):
coeff[i,j] = (coeff[i+1,j-1] - coeff[i,j-1])/(x[i+j] - x[i])
return coeff
#Generator for Product Values
def prod_val(i,x,x_p):
prod = 1
if i==0:
return prod
else:
for j in range(i):
prod *= x_p - x[j]
return prod
#For an input x, calculating value of interpolating polynomial
def get_val(coeff,x,x_p):
fsum = 0
for i in range(n+1):
fsum += coeff[i] * prod_val(i,x,x_p)
return fsum
def interpolate_func(coeff,x,x_p):
size_p = x_p.size
p = np.zeros(size_p,dtype=float)
for i in range(size_p):
p[i] = get_val(coeff,x,x_p[i])
return p
# +
n = 10
#Generate values for x
x = point_gen(n)
#Generate values for y
y = func_point_gen(x)
#Generate Newton's Difference Matrix
diff_matrix = diff_gen(y,x)
#Generate the required divided diff coeff from the matrix
diff_coeff = diff_matrix[0,]
#Generate an array of 100 x values for plotting the function
x_plot = np.arange(-1,1,0.02,dtype=float)
#Generate the interpolated values
p_plot = interpolate_func(diff_coeff,x,x_plot)
#Generate the values of original function for comparison
a_plot = func_point_gen(x_plot)
plt.plot(x_plot,p_plot)
plt.plot(x_plot,a_plot)
plt.xlim([-1,1])
plt.ylim([-2,4])
plt.show()
# -
# Plot for n = 10; here Orange is the original function while Blue is the interpolated function
# +
n = 20
#Generate values for x
x = point_gen(n)
#Generate values for y
y = func_point_gen(x)
#Generate Newton's Difference Matrix
diff_matrix = diff_gen(y,x)
#Generate the required divided diff coeff from the matrix
diff_coeff = diff_matrix[0,]
#Generate an array of 100 x values for plotting the function
x_plot = np.arange(-1,1,0.02,dtype=float)
#Generate the interpolated values
p_plot = interpolate_func(diff_coeff,x,x_plot)
#Generate the values of original function for comparison
a_plot = func_point_gen(x_plot)
plt.plot(x_plot,p_plot)
plt.plot(x_plot,a_plot)
plt.xlim([-1,1])
plt.ylim([-2,4])
plt.show()
# -
# Plot for n = 20; here Orange is the original function while Blue is the interpolated function
# +
n = 40
#Generate values for x
x = point_gen(n)
#Generate values for y
y = func_point_gen(x)
#Generate Newton's Difference Matrix
diff_matrix = diff_gen(y,x)
#Generate the required divided diff coeff from the matrix
diff_coeff = diff_matrix[0,]
#Generate an array of 100 x values for plotting the function
x_plot = np.arange(-1,1,0.02,dtype=float)
#Generate the interpolated values
p_plot = interpolate_func(diff_coeff,x,x_plot)
#Generate the values of original function for comparison
a_plot = func_point_gen(x_plot)
plt.plot(x_plot,p_plot)
plt.plot(x_plot,a_plot)
plt.xlim([-1,1])
plt.ylim([-2,4])
plt.show()
# -
# Plot for n = 40; here Orange is the original function while Blue is the interpolated function
| Interpolation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print('Hello ex04')
# # 3.4 build a spam classifier (a more challenging exercise)
# ## 3.4.1 Download examples of spam and ham from Apache SpamAssassin’s public datasets.
#
# Downloaded 20021010 dataset
# ## Unzip the datasets and familiarize yourself with the data format.
import os
import glob
HAM_DIR = os.path.join('datasets', 'easy_ham')
SPAM_DIR = os.path.join('datasets', 'spam')
ham_files = [name for name in sorted(os.listdir(HAM_DIR)) if len(name) > 20]
spam_files = [name for name in sorted(os.listdir(SPAM_DIR)) if len(name) > 20]
len(ham_files), ham_files[0], ham_files[-1]
len(spam_files), spam_files[0], spam_files[-1]
# Use `email` module
import email
import email.policy
SPM_PATH = './datasets'
def load_email(is_spam, filename, spam_path=SPM_PATH):
directory = 'spam' if is_spam else 'easy_ham'
with open(os.path.join(spam_path, directory, filename), 'rb') as f:
return email.parser.BytesParser(policy=email.policy.default).parse(f)
ham_email = [load_email(False, name) for name in ham_files]
spam_email = [load_email(True, name) for name in spam_files]
# print(ham_email[13].get_content().strip())
print(ham_email[13].get_payload()[1].get_content_type())
print(spam_email[6].get_content().strip())
def get_email_structure(email):
if isinstance(email, str):
return email
payload = email.get_payload()
if isinstance(payload, list):
return f'multipart({", ".join([get_email_structure(sub_email) for sub_email in payload])})'
else:
return email.get_content_type()
get_email_structure(ham_email[2])
ham_structures = list(map(get_email_structure, ham_email))
ham_structures.index('multipart(text/plain, application/pgp-signature)')
import pandas as pd
ham_df = pd.DataFrame({'type': ham_structures})
ham_df['type'].value_counts()
spam_structures = list(map(get_email_structure, spam_email))
spam_df = pd.DataFrame({'type': spam_structures})
spam_df['type'].value_counts()
for header, value in spam_email[0].items():
print(f'{header} : {value}')
spam_email[0]['Subject']
# ### Train test split
import numpy as np
from sklearn.model_selection import train_test_split
X = np.array(ham_email + spam_email)
y = np.concatenate([np.zeros(len(ham_email)), np.ones(len(spam_email))])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# ### Preprocessing html to plain text
import re
from html import unescape
def html_to_plain_text(html):
text = re.sub(r'<head.*?>.*?</head>', '', html, flags=re.M | re.S | re.I)
text = re.sub(r'<a\s.*?>', ' HYPERLINK ', text, flags=re.M | re.S | re.I)
text = re.sub(r'<.*?>', '', text, flags=re.M | re.S)
text = re.sub(r'(\s*\n)+', '\n', text, flags=re.M | re.S)
return unescape(text)
# Find the spam email with `text/html` contents
html_spam_emails = [email for email in X_train[y_train == 1] if get_email_structure(email) == 'text/html']
sample_html_spam = html_spam_emails[7]
sample_html_spam.get_content().strip()[:1000]
print(html_to_plain_text(sample_html_spam.get_content())[:1000])
# Return email's content as plain text
def email_to_text(email):
html = None
for part in email.walk():
ctype = part.get_content_type()
if not ctype in ("text/plain", "text/html"):
continue
try:
content = part.get_content()
except: # in case of encoding issues
content = str(part.get_payload())
if ctype == "text/plain":
return content
else:
html = content
if html:
return html_to_plain_text(html)
def email_to_text_2(email):
ret = []
for part in email.walk():
ctype = part.get_content_type()
try:
content = part.get_content()
except: # in case of encoding issues
content = str(part.get_payload())
ret.append((ctype, type(content), content[:200]))
return ret
# +
def get_num_of_parts(email):
return len(list(email.walk()))
def count_plain_html_part(email):
return sum([part.get_content_type() in ("text/plain", "text/html") for part in email.walk()])
# -
email_to_text_2(spam_email[466])
[(index, get_num_of_parts(email)) for index, email in enumerate(spam_email) if get_num_of_parts(email) > 1][:5]
[(index, count_plain_html_part(email)) for index, email in enumerate(X_train) if count_plain_html_part(email) == 0]
index = 1047
print(email_to_text(X_train[index]), '...', y_train[index])
# We found an email, 1047 and it doesn't have any context. It's `spam//00467.5b733c506b7165424a0d4a298e67970f`, as you can see the in the following, it does have content.
y_train[1047]
get_email_structure(X_train[1047])
for part in X_train[1047].walk():
print(part.get_content_type())
print(html_to_plain_text(str(part.get_payload()))[:200])
print(email_to_text(sample_html_spam)[:1000], '...')
# ### Throw in stemming
import nltk
stemmer = nltk.PorterStemmer()
for word in ("Computations", "Computation", "Computing", "Computed", "Compute", "Compulsive"):
print(f'{word} => {stemmer.stem(word)}')
import urlextract
url_extractor = urlextract.URLExtract()
print(url_extractor.find_urls("Will it detect github.com and https://youtu.be/7Pq-S557XQU?t=3m32s"))
# ### Transformer to convert emails to word counter
from sklearn.base import BaseEstimator, TransformerMixin
from collections import Counter
class EmailToWordCounterTransformer(BaseEstimator, TransformerMixin):
def __init__(self, strip_headers=True, lower_case=True, remove_punctuation=True,
replace_urls=True, replace_numbers=True, stemming=True):
self.strip_headers = strip_headers
self.lower_case = lower_case
self.remove_punctuation = remove_punctuation
self.replace_urls = replace_urls
self.replace_numbers = replace_numbers
self.stemming = stemming
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = []
for email in X:
text = email_to_text(email) or ''
if self.lower_case:
text = text.lower()
if self.replace_urls and url_extractor is not None:
urls = sorted(url_extractor.find_urls(text, only_unique=True), key=lambda url: len(url), reverse=True)
for url in urls:
text = text.replace(url, ' URL ')
if self.replace_numbers:
text = re.sub(r'\d+(?:\.\d*(?:[eE]\d+)*)?', 'NUMBER', text)
if self.remove_punctuation:
text = re.sub(r'\W+', ' ', text, flags=re.M)
word_counts = Counter(text.split())
if self.stemming and stemmer is not None:
stemmed_word_counts = Counter()
for word, count in word_counts.items():
stemmed_word = stemmer.stem(word)
stemmed_word_counts[stemmed_word] += count
word_counts = stemmed_word_counts
X_transformed.append(word_counts)
return np.array(X_transformed)
X_few = X_train[:3]
X_few_wordcounts = EmailToWordCounterTransformer().fit_transform(X_few)
X_few_wordcounts
from scipy.sparse import csr_matrix
class WordCounterToVectorTransformer(BaseEstimator, TransformerMixin):
def __init__(self, vocabulary_size=1000):
self.vocabulary_size = vocabulary_size
def fit(self, X, y=None):
total_count = Counter()
for word_count in X:
for word, count in word_count.items():
total_count[word] += min(count, 10)
most_common = total_count.most_common()[:self.vocabulary_size]
self.most_common_ = most_common
self.vocabulary_ = {word: index+1 for index, (word, count) in enumerate(most_common)}
return self
def transform(self, X, y=None):
rows, cols, data = [], [], []
for row, word_count in enumerate(X):
for word, count in word_count.items():
rows.append(row)
# Here if a word is not in 'vocabulary_', then the column is 0.
# Seems like if multiple data has the same row and colmun, the data is the summation
# See the code in the next box
cols.append(self.vocabulary_.get(word, 0))
data.append(count)
return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size+1))
rows = [0, 0, 0]
cols = [0, 0, 1]
data = [3, 2, 1]
m = csr_matrix((data, (rows, cols)), shape=(1, 2))
m.toarray()
vocab_transformer = WordCounterToVectorTransformer(vocabulary_size=10)
X_few_vectors = vocab_transformer.fit_transform(X_few_wordcounts)
X_few_vectors
print(vocab_transformer.most_common_)
X_few_vectors.toarray()
vocab_transformer.vocabulary_
X_few_wordcounts[1].most_common()[:10]
# ### Create a pipeline
from sklearn.pipeline import Pipeline
preprocess_pipeline = Pipeline([
('email_to_wordcount', EmailToWordCounterTransformer()),
('wordcount_to_vector', WordCounterToVectorTransformer()),
])
X_train_transformed = preprocess_pipeline.fit_transform(X_train)
X_train_transformed.toarray().shape
# ### Apply the logistic regression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
log_clf = LogisticRegression(solver='lbfgs', random_state=42)
score = cross_val_score(log_clf, X_train_transformed, y_train, cv=3, verbose=3)
score.mean()
# ### Precision and Recall score for test dataset
from sklearn.metrics import precision_score, recall_score, accuracy_score
X_test_transformed = preprocess_pipeline.fit_transform(X_test)
log_clf = LogisticRegression(solver='lbfgs', random_state=42)
log_clf.fit(X_train_transformed, y_train)
y_pred = log_clf.predict(X_test_transformed)
y_test.shape
accuracy_score(y_pred, y_test)
precision_score(y_pred, y_test)
recall_score(y_pred, y_test)
# +
from sklearn.metrics import precision_score, recall_score
X_test_transformed = preprocess_pipeline.transform(X_test)
log_clf = LogisticRegression(solver="lbfgs", random_state=42, max_iter=1000)
log_clf.fit(X_train_transformed, y_train)
y_pred = log_clf.predict(X_test_transformed)
print("Precision: {:.2f}%".format(100 * precision_score(y_test, y_pred)))
print("Recall: {:.2f}%".format(100 * recall_score(y_test, y_pred)))
# -
y_train_pred = log_clf.predict(X_train_transformed)
accuracy_score(y_train, y_train_pred)
y_test_pred = log_clf.predict(X_test_transformed)
accuracy_score(y_test, y_test_pred)
| HandsOnML/ch03/ex04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Let's say you are interested in how well participants can learn a normal distribution (I'm not judging,
# we all have weird hobbies).
#
# You will want to present a discrete number of values to participants, but how can you
# make sure that the values that you select will look "normal"?
#
# In this blog post, I present a simple method to ensure that the values you use as stimuli will always match the
# properties of the distribution you want to describe.
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import seaborn as sns
from IPython.display import HTML, display
# Specifically, you want to test if participants can learn a normal distribution with parameters $\mu = 25$ and $\sigma = 5$.
#
# For the sake of illustration, let's assume the following:
# * Each participant will learn 50 numbers
# * You want those 50 numbers to be integer (to facilitate learning)
# * You want participants to report the numbers that they have seen on a distribution builder with 25 bins, from 0 to 50, in increments of 2.
#
# How to create distribution of 50 integers that will match a normal distribution $\mathcal{N}(\mu, \sigma)$?
#
# ## A bad solution: the "random draw" approach
#
# It might seem obvious: let's just draw those numbers from the distribution!
np.random.seed(25330)
MU = 25
SIGMA = 5
N = 50
BINS = np.arange(1, 51.5, 2)
numbers = np.random.normal(MU, SIGMA, size=N) # Random draw of 50 numbers
round_numbers = np.round(numbers, 0) # Rounded to nearest integet
# When we visualize it however....
fig, ax = plt.subplots(1)
ax.hist(round_numbers, bins=BINS, ec="white", align="mid")
ax.axvline(np.mean(round_numbers), color="darkred")
ax.annotate(
r"$\mu = {:.2f}, \sigma = {:.2f}$".format(
round_numbers.mean(), round_numbers.std()
),
(5, 7),
)
ax.set_ylabel("Number of values")
ax.set_xlabel("Bins")
sns.despine()
plt.close()
display(fig, metadata=dict(filename="Fig1"))
# The mean is lower than what we'd like, the standard deviation is too high, and the distribution would not look normal at all when reported in a distribution builder.
#
# This was expected: random numbers are, by definition, random. Can we do better?
#
# ## A slightly better approach: the iterative approach
# Yes! We could repeated this sampling process several times, until we are sufficiently close to the parameters of the distribution that we want to obtain.
ERR_MU = 0.01
ERR_SIGMA = 0.1
SKEW_SIGMA = 0.01
numbers = np.random.normal(MU, SIGMA, size=N)
round_numbers = np.round(numbers, 0)
m = numbers.mean()
sd = numbers.std()
skew = stats.skew(numbers)
i = 1
while (
(np.abs(m - MU) > ERR_MU)
or (np.abs(sd - SIGMA) > ERR_SIGMA)
or (np.abs(skew) > SKEW_SIGMA)
):
i += 1
numbers = np.random.normal(MU, SIGMA, size=N)
round_numbers = np.round(numbers, 0)
m = numbers.mean()
sd = numbers.std()
skew = stats.skew(numbers)
HTML("After {} iterations, we have a satisfying distribution".format(i))
# That took a few seconds. Let's visualize it...
fig, ax = plt.subplots(1)
ax.hist(round_numbers, bins=BINS, ec="white", align="mid")
ax.axvline(np.mean(round_numbers), color="darkred")
ax.annotate(
r"$\mu = {:.2f}, \sigma = {:.2f}$".format(
round_numbers.mean(), round_numbers.std()
),
(5, 7),
)
ax.set_ylabel("Number of values")
ax.set_xlabel("Bins")
sns.despine()
plt.close()
display(fig, metadata=dict(filename="Fig2"))
# The distribution now has the mean, variance and skew that we want... But it still not perfectly normal. In particular, the mode does not correspond to the mean... Do we really want to give reviewer B something to nitpick about?
# ## The correct method: binning a continuous distribution
# The trick is to follow these steps:
# 1. Obtain the CDF of the distribution that we want to copy (here the CDF of $\mathcal{N}(25, 5)$
# 2. Use this CDF to compute the probability of each random value falling in each bucket of the distribution builder. Formally, we compute for each bucket $P(l \leq X \leq h)$, where $l$ and $h$ are the lower and upper bounds of each bucket.
# 3. Convert those probabilities in a number of observations, rounding them to the nearest integer.
# 4. If this creates less observations than what we want, increase the probability of each observation by a very small amount.
#
# Putting this together into a function:
def bin_dist(dist, buckets, n):
"""
Generate a discrete number of values that match a target distribution.
dist:
The Distribution object from which the CDF will be computed.
Can be any distribution that has support on the `buckets`.
buckets:
The buckets of the distribution builder that will be used.
n:
The number of observations that should be presented.
Returns:
An array of length n containing the values.
"""
spacing = (buckets[1] - buckets[0]) / 2 # Space between buckets
lbounds = buckets - spacing # Lower bound of each bucket
rbounds = buckets + spacing # Upper bound
lcdf = dist.cdf(lbounds) # CDF up to lower bound
rcdf = dist.cdf(rbounds) # CDF up to upper bound
p = rcdf - lcdf # Probability of value being in the bucket
nballs = np.round(
p * n, 0
) # Multiplying by expected number of values, and rounding
mult = 1
while nballs.sum() < n: # In case we don't have enough observations...
mult += 0.05
nballs = np.round(p * n * mult)
return np.repeat(buckets, nballs.astype(int))
# Now if we apply this method:
binned_numbers = bin_dist(stats.norm(MU, SIGMA), np.arange(0, 50, 2), 25)
fig, ax = plt.subplots(1)
ax.hist(binned_numbers, bins=BINS, ec="white", align="mid")
ax.axvline(np.mean(binned_numbers), color="darkred")
ax.annotate(
r"$\mu = {:.2f}, \sigma = {:.2f}$".format(
binned_numbers.mean(), binned_numbers.std()
),
(26, 4.1),
)
ax.set_ylabel("Number of values")
ax.set_xlabel("Bins")
sns.despine()
plt.close()
display(fig, metadata=dict(filename="Fig3"))
# This is exactly what we want ! A normal-like distribution of integers.
#
# This method is also very flexible: it can be applied to any continuous distribution and any number of buckets.
#
# Here are a few illustrations of the function for different distributions, varying the number of buckets.
# +
# Normal, Chi and Beta distributions.
dists = [stats.norm(25, 9), stats.chi(1, 10, 10), stats.beta(0.5, 0.5, -1, 52)]
# 6, 11 and 26 buckets.
buckets = [np.arange(0, 51, 10), np.arange(0, 51, 5), np.arange(0, 51, 2)]
# Corresponding bins and widths
bins = [np.arange(-5, 56, 10), np.arange(-2.5, 53.5, 5), np.arange(-1, 52, 2)]
widths = [10, 5, 2]
r = np.arange(0, 50, 0.001)
fig, axes = plt.subplots(3, 3, figsize=(8, 4), sharey=True, dpi=150)
for i, ax in enumerate(axes):
for j, a in enumerate(ax):
d = dists[i]
balls = bin_dist(d, buckets[j], 50)
a.hist(
balls,
bins=bins[j],
width=widths[j],
align="mid",
rwidth=1,
density=True,
ec="white",
)
a.plot(r, d.pdf(r), ls=":", color="red")
sns.despine(left=True)
a.set_yticks([])
a.set_xticks(buckets[j])
a.tick_params(axis="x", labelrotation=90, labelsize=5)
a.tick_params(axis="y", labelrotation=0, labelsize=5)
for a, t in zip(
axes,
[r"$\mathcal{N}(25, 9)$", r"$\chi(1)$ (scaled)", r"$\beta(0.5, 0.5)$ (scaled)"],
):
a[0].set_ylabel("{}".format(t), size=8)
for a, t in zip(axes[0], ["6", "11", "26"]):
a.set_title("{} Buckets".format(t), size=8)
plt.tight_layout()
plt.close()
display(fig, metadata=dict(filename="Fig4"))
# -
# A few rules to finish:
# * Don't use too few buckets. The larger your buckets are, the less faithful the representation of the distribution will be.
# * Don't present too few observations. You also need a good number of them to faithfully represent the distribution.
# * Make sure that your buckets cover the "full" distribution: your distribution should have support on all buckets, and the buckets should cover the majority of the support of the distribution.
| content/post/generate-distributions/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# +
from sympy import sympify
expor = sympify("Matrix([[-b1*l1*cos(a1) - l2*(b1 + b2)*cos(a1 + a2) - l3*(b1 + b2 + b3)*cos(a1 + a2 + a3) - l4*(b1 + b2 + b3 + b4)*cos(a1 + a2 + a3 + a4), -l2*(b1 + b2)*cos(a1 + a2) - l3*(b1 + b2 + b3)*cos(a1 + a2 + a3) - l4*(b1 + b2 + b3 + b4)*cos(a1 + a2 + a3 + a4), -l3*(b1 + b2 + b3)*cos(a1 + a2 + a3) - l4*(b1 + b2 + b3 + b4)*cos(a1 + a2 + a3 + a4), -l4*(b1 + b2 + b3 + b4)*cos(a1 + a2 + a3 + a4)], [-b1*l1*sin(a1) - l2*(b1 + b2)*sin(a1 + a2) - l3*(b1 + b2 + b3)*sin(a1 + a2 + a3) - l4*(b1 + b2 + b3 + b4)*sin(a1 + a2 + a3 + a4), -l2*(b1 + b2)*sin(a1 + a2) - l3*(b1 + b2 + b3)*sin(a1 + a2 + a3) - l4*(b1 + b2 + b3 + b4)*sin(a1 + a2 + a3 + a4), -l3*(b1 + b2 + b3)*sin(a1 + a2 + a3) - l4*(b1 + b2 + b3 + b4)*sin(a1 + a2 + a3 + a4), -l4*(b1 + b2 + b3 + b4)*sin(a1 + a2 + a3 + a4)]])")
# -
expor
| o/soft_robot/derivation_of_dynamics/misc/sim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-py37] *
# language: python
# name: python3
# ---
import os
import sys
import path
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
sys.path.insert(0, path.Path(os.getcwd()).joinpath("..").abspath())
# +
import matplotlib.pyplot as plt
import seaborn as sns
# from IPython.display import set_matplotlib_formats
# %matplotlib inline
# set_matplotlib_formats("svg")
sns.set_theme(style="whitegrid", font_scale=2, rc={"figure.figsize": (18, 9)})
# -
# ## Common
# +
import os
import numpy as np
import pandas as pd
import torch
from captum.attr import visualization as viz
from captum.attr import (
Saliency,
IntegratedGradients,
NoiseTunnel,
LayerGradCam,
FeatureAblation,
LayerActivation,
LayerAttribution
)
from matplotlib.colors import LinearSegmentedColormap
# -
# ----
#
# ## TS-based
from src.settings import DATA_ROOT, LOGS_ROOT
from src.ts import load_ABIDE1, load_OASIS, load_FBIRN
# ### ABIDE1
# ABIDE1
features, labels = load_ABIDE1()
features = np.swapaxes(features, 1, 2) # [n_samples; seq_len; n_features]
# +
# ABIDE1
# one-sample
feature = features[5].astype(np.float32)
feature = torch.tensor(feature).unsqueeze(0)
feature.requires_grad = True
# all
# feature = features.astype(np.float32)
# feature = torch.tensor(feature)
# feature.requires_grad = True
print(feature.shape)
# -
# ABIDE1
from src.scripts.tune_ts_mlp import MLP
# AUC: 0.7030812324929973
model = MLP(
input_size=53,
output_size=2,
hidden_size=75,
num_layers=1,
dropout=0.30508830411407517,
)
logdir = LOGS_ROOT.joinpath("220428.185640-ts-mlp-qFalse/0002/model.best.pth")
checkpoint = torch.load(logdir, map_location=lambda storage, loc: storage)
# print(checkpoint)
model.load_state_dict(checkpoint)
model = model.eval()
# ATTENTION: captum model should have [bs; ...] inputs and [bs; ...] outputs
# output = torch.sigmoid(model(feature))
# +
# ABIDE1
saliency = Saliency(model)
model.zero_grad()
grads0 = saliency.attribute(feature, target=0)
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
# transpose to [num_features; time_len; 1]
_ = viz.visualize_image_attr(
np.transpose(grads0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='negative',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
grads1 = saliency.attribute(feature, target=1)
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
# transpose to [num_features; time_len; 1]
_ = viz.visualize_image_attr(
np.transpose(grads1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# +
# ABIDE1
ig = IntegratedGradients(model)
model.zero_grad()
attr_ig0, delta = ig.attribute(
inputs=feature,
target=0,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
attr_ig1, delta = ig.attribute(
inputs=feature,
target=1,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# +
# ABIDE1
ig = IntegratedGradients(model)
nt = NoiseTunnel(ig)
model.zero_grad()
attr_ig_nt0, delta = nt.attribute(
inputs=feature,
target=0,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
nt_type='smoothgrad_sq',
nt_samples=5,
stdevs=0.2
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig_nt0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
attr_ig_nt1, delta = nt.attribute(
inputs=feature,
target=1,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
nt_type='smoothgrad_sq',
nt_samples=5,
stdevs=0.2
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig_nt1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# -
# ### FBIRN
# FBIRN
features, labels = load_FBIRN()
features = np.swapaxes(features, 1, 2) # [n_samples; seq_len; n_features]
# +
# FBIRN
# one-sample
# feature = features[5].astype(np.float32)
# feature = torch.tensor(feature).unsqueeze(0)
# feature.requires_grad = True
# all
feature = features.astype(np.float32)
# print(feature)
feature = torch.tensor(feature)
feature.requires_grad = True
print(feature.shape)
# -
# FBIRN
from src.scripts.tune_ts_mlp import MLP
# AUC: 0.7376688197298884
model = MLP(
input_size=53,
output_size=2,
hidden_size=141,
num_layers=3,
dropout=0.5339434622121335,
)
logdir = LOGS_ROOT.joinpath("220428.220948-ts-mlp-fbirn-qFalse/0004/model.best.pth")
checkpoint = torch.load(logdir, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint)
model = model.eval()
# ATTENTION: captum model should have [bs; ...] inputs and [bs; ...] outputs
# output = torch.sigmoid(model(feature))
# +
# FBIRN
saliency = Saliency(model)
model.zero_grad()
grads0 = saliency.attribute(feature, target=0)
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
# transpose to [num_features; time_len; 1]
_ = viz.visualize_image_attr(
np.transpose(grads0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='negative',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
grads1 = saliency.attribute(feature, target=1)
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
# transpose to [num_features; time_len; 1]
_ = viz.visualize_image_attr(
np.transpose(grads1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# +
# FBIRN
ig = IntegratedGradients(model)
model.zero_grad()
attr_ig0, delta = ig.attribute(
inputs=feature,
target=0,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
attr_ig1, delta = ig.attribute(
inputs=feature,
target=1,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# +
# FBIRN
ig = IntegratedGradients(model)
nt = NoiseTunnel(ig)
model.zero_grad()
attr_ig_nt0, delta = nt.attribute(
inputs=feature,
target=0,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
nt_type='smoothgrad_sq',
nt_samples=5,
stdevs=0.2
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig_nt0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
attr_ig_nt1, delta = nt.attribute(
inputs=feature,
target=1,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
nt_type='smoothgrad_sq',
nt_samples=5,
stdevs=0.2
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig_nt1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# -
# ### OASIS
# OASIS
features, labels = load_OASIS()
features = np.swapaxes(features, 1, 2) # [n_samples; seq_len; n_features]
# +
# OASIS
# one-sample
feature = features[5].astype(np.float32)
feature = torch.tensor(feature).unsqueeze(0)
feature.requires_grad = True
# all
# feature = features.astype(np.float32)
# feature = torch.tensor(feature)
# feature.requires_grad = True
# check dimensions
print(feature.shape)
# +
# OASIS
from src.scripts.tune_ts_mlp_oasis import MLP
# AUC: 0.7702583675866118
model = MLP(
input_size=53,
output_size=2,
hidden_size=152,
num_layers=2,
dropout=0.49997745820615697,
)
logdir = LOGS_ROOT.joinpath("220428.205425-ts-mlp-oasis-qFalse/0023/model.best.pth")
checkpoint = torch.load(logdir, map_location=lambda storage, loc: storage)
# print(checkpoint)
model.load_state_dict(checkpoint)
model = model.eval()
# ATTENTION: captum model should have [bs; ...] inputs and [bs; ...] outputs
# output = torch.sigmoid(model(feature))
# +
# OASIS
saliency = Saliency(model)
model.zero_grad()
grads0 = saliency.attribute(feature, target=0)
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
# transpose to [num_features; time_len; 1]
_ = viz.visualize_image_attr(
np.transpose(grads0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='negative',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
grads1 = saliency.attribute(feature, target=1)
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
# transpose to [num_features; time_len; 1]
_ = viz.visualize_image_attr(
np.transpose(grads1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# +
# OASIS
ig = IntegratedGradients(model)
model.zero_grad()
attr_ig0, delta = ig.attribute(
inputs=feature,
target=0,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
attr_ig1, delta = ig.attribute(
inputs=feature,
target=1,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
# outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# +
# OASIS
ig = IntegratedGradients(model)
nt = NoiseTunnel(ig)
model.zero_grad()
attr_ig_nt0, delta = nt.attribute(
inputs=feature,
target=0,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
nt_type='smoothgrad_sq',
nt_samples=5,
stdevs=0.2
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig_nt0.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
model.zero_grad()
attr_ig_nt1, delta = nt.attribute(
inputs=feature,
target=1,
baselines=torch.zeros_like(feature),
return_convergence_delta=True,
nt_type='smoothgrad_sq',
nt_samples=5,
stdevs=0.2
)
print('Approximation delta: ', abs(delta))
fig, axs = plt.subplots(1, 1, figsize=(21, 9))
_ = viz.visualize_image_attr(
np.transpose(attr_ig_nt1.cpu().detach().numpy(), (2,1,0)),
np.transpose(feature.cpu().detach().numpy(), (2,1,0)),
method='heat_map',
# cmap=default_cmap,
show_colorbar=True,
# sign='positive',
outlier_perc=1,
plt_fig_axis=(fig, axs),
use_pyplot=False,
)
plt.show()
plt.close()
# -
| notebooks/intospection_ts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
pd.set_option("display.precision", 10)
# +
#coins_1dolar = pd.read_csv('1_dolar/crypto-data/export_coins.csv')
# +
#coins_50centi = pd.read_csv('50_centi/crypto-data/export_coins.csv')
# +
#coins_5centi = pd.read_csv('5_centi/crypto-data/export_coins.csv')
# -
coins = pd.read_csv('crypto-data/export_coins.csv')
coins
# +
#coins = coins[coins.initial_buy_price >= 10 ]
#coins = coins[coins.initial_buy_price < 15]
# +
#coins
# -
def coins_statistics(coins):
total_profit = 0
for profit in coins[coins.profit >=0].profit:
total_profit += profit
total_gained = 0
for gained in coins[coins.gained >= 0].gained:
total_gained += gained
total_sold_times = 0
for times_it_sold in coins[coins.times_it_sold >= 1].times_it_sold:
total_sold_times += times_it_sold
invest = 0
for initial_buy in coins[coins.times_it_sold == 1].initial_buy_price.values:
invest += initial_buy
if 'rebought_at' in coins.columns:
for rebought_price in coins[coins.times_it_sold > 1].rebought_at.values:
invest += rebought_price
print('Current total gained: {}'.format(total_gained))
print('Current total sold times: {}'.format(total_sold_times))
print('Current mean sold per coin: {}'.format(total_sold_times/len(coins)))
print('Current investment: {}'.format(invest))
print('Current working coins: {}'.format(len(coins[coins.times_it_sold > 1])))
coins_statistics(coins)
coins_statistics(coins_50centi)
coins_statistics(coins_5centi)
coins_1dolar[coins_1dolar.gained > 1]
coins_1dolar[coins_1dolar.gained >= coins_1dolar.gained.max()]
coins_50centi[coins_50centi.gained >= coins_50centi.gained.max()]
coins_5centi[coins_5centi.gained >= coins_5centi.gained.max()]
coins_1dolar[coins_1dolar.times_it_sold >= coins_1dolar.times_it_sold.max()]
coins_50centi[coins_50centi.times_it_sold >= coins_50centi.times_it_sold.max()]
coins_5centi[coins_5centi.times_it_sold >= coins_5centi.times_it_sold.max()]
print( 10 / coins[coins.coin =='BTC'].initial_buy_price)
print( 10 / coins[coins.coin =='BTC'].initial_buy_cost)
print( 10 / coins[coins.coin =='BTC'].updated_price)
print( 10 / coins[coins.coin =='BTC'].out_price)
print( 10 / coins[coins.coin =='BTC'].estimated_cost)
coins[coins.coin =='BTC'].initial_buy_price * coins[coins.coin =='BTC'].initial_buy_cost
coins[coins.coin =='BTC'].updated_price * coins[coins.coin =='BTC'].out_price
coins[coins.coin =='BTC'].updated_price * coins[coins.coin =='BTC'].estimated_cost
print(10/coins[coins.coin =='BTC'].initial_buy_price)
print(10/coins[coins.coin =='BTC'].updated_price)
initial_buying_prices = pd.read_csv('crypto-data/initial_buying_prices.csv')
coins_sold_history = pd.read_csv('crypto-data/coins_sold_history.csv')
#coins_sold_history = coins_sold_history[coins_sold_history.estimated_cost > 0]
#coins_sold_history = coins_sold_history[coins_sold_history.estimated_cost < 15]
coins_sold_history
# +
# for coin in coins_sold_history.coin.values:
# print('------------------------')
# print(coins_sold_history[coins_sold_history.coin == coin])
# print('------------------------')
# -
coins_rebought_history = pd.read_csv('crypto-data/coins_rebought_history.csv')
coins_rebought_history = coins_rebought_history[coins_rebought_history.initial_buy_price > 0]
coins_rebought_history = coins_rebought_history[coins_rebought_history.initial_buy_price < 15]
coins_rebought_history
# coins_rebought_history = coins_rebought_history[coins_rebought_history.gained >= 1]
for coin in coins_rebought_history.coin.values:
index = coins_rebought_history[coins_rebought_history.coin == coin].index[-1]
if coins_rebought_history.loc[index].gained.astype(float) >= 0.5:
print('------------------------')
print(coins_rebought_history.loc[index])
print('------------------------')
coins_rebought_history[coins_rebought_history.gained >= 1]
| binance/Check Cryptos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kartikay-99k/Cough-detector-app/blob/master/Spectrogram.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="LuSbIf8VFO9O" colab_type="code" colab={}
# %matplotlib inline
import pandas as pd
import os
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
# + id="MMrK3dscHERL" colab_type="code" colab={}
def create_spectrogram(filename,no):
plt.interactive(False)
path = "Drive/Drive 2/Colab Notebooks/audio/"+filename
clip, sample_rate = librosa.load(path, sr=None)
fig = plt.figure(figsize=[1,1])
ax = fig.add_subplot(111)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_frame_on(False)
S = librosa.feature.melspectrogram(y=clip, sr=sample_rate)
librosa.display.specshow(librosa.power_to_db(S, ref=np.max))
check_point = links["category"][no]
if check_point!="coughing":
filename = '/Drive/Drive 2/Colab Notebooks/Data/Non-Cough/' + filename + '.png'
else:
filename = '/Drive/Drive 2/Colab Notebooks/Data/Cough/' + filename + '.png'
plt.savefig(filename, dpi=400, bbox_inches='tight',pad_inches=0)
plt.close()
fig.clf()
plt.close(fig)
plt.close('all')
# + id="r_72ELMCHO1p" colab_type="code" colab={}
links = pd.read_csv("/Drive/Drive 2/Colab Notebooks/meta/esc50.csv")
# + id="IuN8R0_4Hvpa" colab_type="code" outputId="2f174d37-c223-4472-8608-9f898db0bb6c" colab={"base_uri": "https://localhost:8080/", "height": 34}
links.columns
# + id="sBL3QVdrH0WP" colab_type="code" outputId="05436056-c433-412a-b9a8-9ee8eb714a49" colab={"base_uri": "https://localhost:8080/", "height": 359}
links.head(10)
# + id="GFsRaHUEITn0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="1569b3c0-af83-458f-e13b-72bcbab12401"
len_N = len(links)
for i in range(len_N):
filename = links["filename"][i]
create_spectrogram(filename, i)
print(i)
# + id="xucYL8IFL1xG" colab_type="code" colab={}
| 01. Spectrogram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# How do you understand how a decision tree makes predictions?
# One of the strengths of decision trees are that they are relatively easy to interpret as you can make a visualization based on your model. This is not only a powerful way to understand your model, but also to communicate how your model works to stakeholders.
#
#
# In this video, I'll show you how Decision Trees can be plotted with Matplotlib.
# ## Import Libraries
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
# -
# ## Load the Dataset
# The Iris dataset is one of datasets scikit-learn comes with that do not require the downloading of any file from some external website. The code below loads the iris dataset.
data = load_iris()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['target'] = data.target
df.head()
# ## Split Data into Training and Test Sets
X_train, X_test, Y_train, Y_test = train_test_split(df[data.feature_names], df['target'], random_state=0)
# ## Scikit-learn 4-Step Modeling Pattern
#
# <b>Step 1:</b> Import the model you want to use
#
# In sklearn, all machine learning models are implemented as Python classes
# +
# This was already imported earlier in the notebook so commenting out
#from sklearn.tree import DecisionTreeClassifier
# -
# <b>Step 2:</b> Make an instance of the Model
clf = DecisionTreeClassifier(max_depth = 2,
random_state = 0)
# <b>Step 3:</b> Training the model on the data, storing the information learned from the data
# Model is learning the relationship between x (features: sepal width, sepal height etc) and y (labels-which species of iris)
clf.fit(X_train, Y_train)
# <b>Step 4:</b> Predict the labels of new data (new flowers)
#
# Uses the information the model learned during the model training process
# Predict for One Observation (image)
clf.predict(X_test.iloc[0].values.reshape(1, -1))
# Predict for Multiple Observations (images) at Once
clf.predict(X_test[0:10])
# ## Measuring Model Performance
# Accuracy is defined as:
# (fraction of correct predictions): correct predictions / total number of data points
score = clf.score(X_test, Y_test)
print(score)
# ## How to Visualize Decision Trees using Matplotlib
# #### Default Visualization Based on the Model
tree.plot_tree(clf);
# #### Adjust Figure Size and Dots per inch (DPI)
# +
fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (4,4), dpi = 300)
tree.plot_tree(clf);
# -
# #### Make Tree More Interpretable
# The code below not only allows you to save a visualization based on your model, but also makes the decision tree more interpretable by adding in feature and class names.
# Putting the feature names and class names into variables
fn = ['sepal length (cm)','sepal width (cm)','petal length (cm)','petal width (cm)']
cn = ['setosa', 'versicolor', 'virginica']
# +
fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (4,4), dpi = 300)
tree.plot_tree(clf,
feature_names = fn,
class_names=cn,
filled = True);
fig.savefig('images/plottreefncn.png')
| scikitlearn/Ex_Files_ML_SciKit_Learn/Exercise Files/02_08_How_to_Visualize_Decision_Trees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# (MCUASINEW)=
# # 7.2 Métodos cuasi Newton
# ```{admonition} Notas para contenedor de docker:
#
# Comando de docker para ejecución de la nota de forma local:
#
# nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.
#
# `docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_optimizacion_2 -p 8888:8888 -p 8787:8787 -d palmoreck/jupyterlab_optimizacion_2:3.0.0`
#
# password para jupyterlab: `<PASSWORD>`
#
# Detener el contenedor de docker:
#
# `docker stop jupyterlab_optimizacion_2`
#
# Documentación de la imagen de docker `palmoreck/jupyterlab_optimizacion_2:3.0.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/optimizacion_2).
#
# ```
# ---
# ```{admonition} Al final de esta nota el y la lectora:
# :class: tip
#
# *
#
# ```
| libro_optimizacion/temas/7.temas_selectos/7.2/Metodos_cuasi_Newton.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv), data manipulation as in SQL
#import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
matplotlib.use("Agg")
# used for plot interactive graph. I like it most for plot
from sklearn.naive_bayes import MultinomialNB
import seaborn as sns # this is used for the plot the graph
from sklearn.model_selection import train_test_split # to split the data into two parts
from sklearn import metrics # for the check the error and accuracy of the model
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
# +
from sklearn.metrics import accuracy_score
from sklearn import svm # To import the svm classifier
import random
data = pd.read_csv('pacific.csv')
print(data.head(6))
# +
#print(data.info())
#data['Status'] = data['Status'].map({'TS':1,'HU':0})
#data.describe() # this will describe the all statistical function of our data
data.Status = pd.Categorical(data.Status)
data['Status'] = data.Status.cat.codes
print(data.head())
# +
# lets get the frequency of different typhoons
#sns.countplot(data['Status'],label="Count")
#plt.show()
# -
random.seed(2)
pred_columns = data[:]
pred_columns.drop(['Status'],axis=1,inplace=True)
pred_columns.drop(['Event'],axis=1,inplace=True)
pred_columns.drop(['Latitude'],axis=1,inplace=True)
pred_columns.drop(['Longitude'],axis=1,inplace=True)
pred_columns.drop(['ID'],axis=1,inplace=True)
pred_columns.drop(['Name'],axis=1,inplace=True)
pred_columns.drop(['Date'],axis=1,inplace=True)
pred_columns.drop(['Time'],axis=1,inplace=True)
prediction_var = pred_columns.columns
print(list(prediction_var))
# +
# now these are the variables which will use for prediction
#now split our data into train and test
train, test = train_test_split(data, test_size = 0.3)# in this our main data is splitted into train and test
# we can check their dimension
print(train.shape)
print(test.shape)
# -
train_X = train[prediction_var]# taking the training data input
train_y= train['Status']# This is output of our training data
#print(list(data.columns))
print(list(train.columns))
# same we have to do for test
test_X= test[prediction_var] # taking test data inputs
test_y =test['Status'] #output value of test dat
#RandomForest classifier
model=RandomForestClassifier(n_estimators=100)# a simple random forest model
model.fit(train_X,train_y)# now fit our model for traiing data
prediction=model.predict(test_X)# predict for the test data
# prediction will contain the predicted value by our model predicted values of diagnosis column for test inputs
print(metrics.accuracy_score(prediction,test_y)) # to check the accuracy
# here we will use accuracy measurement between our predicted value and our test output values
#Decision Tree
model = tree.DecisionTreeClassifier()
model.fit(train_X,train_y)# now fit our model for traiing data
prediction=model.predict(test_X)# predict for the test data
# prediction will contain the predicted value by our model predicted values of diagnosis column for test inputs
# +
df=pd.DataFrame(prediction,test_y)
print(df)
print(metrics.accuracy_score(prediction,test_y)) # to check the accuracy
#here we will use accuracy measurement between our predicted value and our test output values
gnb = GaussianNB()
y_pred_gnb = gnb.fit(train_X, train_y)
target_pred = y_pred_gnb.predict(test_X)
cnf_matrix_gnb = confusion_matrix(test_y, y_pred_gnb)
print(cnf_matrix_gnb)
print(metrics.accuracy_score(y_pred_gnb,test_y)) # to check the accuracy
print("Number of mislabeled points out of a total %d points : %d"
%(data.shape[0],(test_y != y_pred_gnb).sum()))
print(accuracy_score(test_y, target_pred))
# -
labels = gnb.predict(test_X)
mat = confusion_matrix(test_y, target_pred)
print(test_y.shape)
print(target_pred.shape)
print(labels.shape)
print(test_X.shape)
print(train_X.shape)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=prediction_var, yticklabels=prediction_var)
#matplotlib.xlabel('true label')
matplotlib.pyplot.show()
#matplotlib('predicted label')
gnb = GaussianNB()
mnb = MultinomialNB()
y_pred_gnb = gnb.fit(train_X, train_y).predict(test_X)
#target_pred = y_pred_mnb.predict(test_X)
print(accuracy_score(test_y, y_pred_gnb))
cnf_matrix_gnb = confusion_matrix(test_y, y_pred_gnb)
print(cnf_matrix_gnb)
print(metrics.accuracy_score(y_pred_gnb,test_y)) # to check the accuracy
print("Number of mislabeled points out of a total %d points : %d"
%(data.shape[0],(test_y != y_pred_gnb).sum()))
print(accuracy_score(test_y, y_pred_gnb))
model = svm.SVC(kernel='linear')
model.fit(train_X,train_y)
#Predict Output
predicted= model.predict(test_X)
print("SVM accuray:",accuracy_score(test_y, predicted))
| Cls6-Supervised Learning - II/Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import sys
import pandas as pd
import os
import optuna
import matplotlib.pylab as plt
# %matplotlib inline
from lifelines.utils import concordance_index
from sklearn.metrics import r2_score
from torch.utils.data import DataLoader, TensorDataset
from torchcontrib.optim import SWA
from pytorch_lightning import Trainer, seed_everything
from argparse import ArgumentParser
sys.path.append('../data/')
sys.path.append('../data/ml_mmrf')
sys.path.append('../data/synthetic')
sys.path.append('../data/semi_synthetic')
sys.path.append('../ief_core/')
sys.path.append('../ief_core/models/')
# from ml_mmrf.ml_mmrf_v1.data import load_mmrf
from ml_mmrf.data import load_mmrf
from synthetic_data import load_synthetic_data_trt, load_synthetic_data_noisy
from ss_data import *
from models.ssm.ssm import SSM, SSMAtt
from models.ssm.ssm_baseline import SSMBaseline
from models.rnn import GRU
from models.utils import *
print(torch.__version__)
# +
# %matplotlib inline
import matplotlib.pylab as plt
import seaborn as sns
## alternate font/graph format
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
plt.rc('font', weight='heavy')
plt.rc('xtick', labelsize='x-large')
plt.rc('ytick', labelsize='x-large')
plt.rc('axes', labelsize='x-large')
def get_scolor():
scolor = {}
scolor[0] = 'k'
scolor[1] = 'b'
scolor[2] = 'g'
scolor[3] = 'r'
return scolor
# -
# # Semi-Synthetic Data
# +
print(torch.cuda.is_available())
if torch.cuda.is_available():
device = torch.device('cuda:1')
else:
device = torch.device('cpu')
fname = '../ief_core/tests/checkpoints/ssm_semi_syn_moe_20000sample_complexityepoch=00989-val_loss=-323.60.ckpt'
checkpoint = torch.load(fname, map_location=lambda storage, loc: storage)
hparams = checkpoint['hyper_parameters']
del hparams['trial']
print({'bs': hparams.bs, 'lr': hparams.lr, 'C': hparams.C, 'reg_all': hparams.reg_all, 'reg_type': hparams.reg_type, 'dim_stochastic': hparams.dim_stochastic})
trial = optuna.trial.FixedTrial({'bs': hparams.bs, 'lr': hparams.lr, 'C': hparams.C, 'reg_all': hparams.reg_all, 'reg_type': hparams.reg_type, 'dim_stochastic': hparams.dim_stochastic})
model = SSM(trial, **hparams); model.setup(1)
model.load_state_dict(checkpoint['state_dict'])
model.to(device)
print(model.hparams.ss_in_sample_dist)
print(model.hparams.ss_missing)
ddata = load_ss_data(1000, gen_fly=True, eval_mult=200, in_sample_dist=model.hparams.ss_in_sample_dist, add_missing=model.hparams.ss_missing)
print(f'eval set size: {ddata["valid"][0]["X"].shape}')
nelbos = []
for i in range(1,5):
_, valid_loader = load_ss_helper(ddata, tvt='valid', bs=model.hparams['bs'], device=device, valid_fold=i)
batch_nelbos = []
for i_batch, valid_batch_loader in enumerate(valid_loader):
(nelbo, nll, kl, _), _ = model.forward(*valid_batch_loader, anneal = 1.)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
batch_nelbos.append(nelbo)
# (nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
nelbos.append(np.mean(batch_nelbos))
print(f'[COMPLETE] mean nelbo: {np.mean(nelbos)}, std nelbo: {np.std(nelbos)}')
# -
# # Synthetic Data
from numpy.polynomial.polynomial import polyval
np.random.seed(0)
def g(v, params=None):
if params is not None:
a1, a2, a3, b, gamma = params
else:
a1 = 5; a2 = 0.3; a3 = 0.4; b = 2; gamma = 40.
b0 = -a1 / (1 + np.exp(a2*gamma / 2))
a0 = (a1 + 2*b0 - b) / (1 + np.exp(-a3*gamma/2))
if v < gamma:
return b0 + a1 / (1 + np.exp(-a2*(v-(gamma/2))))
else:
return b + a0 / (1 + np.exp(a3*(v-(3*gamma)/2)))
params = [-15, 0.7, 0.4, 2, 3]
params2 = [5, 0.1, 0.3, 2, 2]
Xvals = np.arange(20)
up = [-2, 0.0002, 0.1]
res = np.array([polyval(val,up) for val in Xvals])
res = 0.5*res+0.5*np.random.randn(*res.shape)
rx_t = np.arange(8,14)
res_copy = np.copy(res)
for t in rx_t:
re = np.arange(Xvals.shape[0] - t)
add = np.array([g(v,params) for v in re])
res_copy[np.arange(t,Xvals.shape[0])]+= add
fig, ax = plt.subplots(figsize=(8,5.2))
ax.plot(Xvals, res, color='blue', linestyle='-', label='baseline')
ax.plot(Xvals, res_copy, color='red', linestyle='-.', label='baseline+trt resp')
for t in range(len(rx_t)):
if t == 0:
ax.axvline(rx_t[t], linestyle=':', color='grey', label='treatment')
else:
ax.axvline(rx_t[t], linestyle=':', color='grey')
# ax.set_xlabel('Time', fontsize=18)
ax.tick_params(labelsize=20)
ax.set_xlabel('Time', fontsize=25)
ax.set_ylabel('Z', fontsize=25)
ax.legend(fontsize=20)
# plt.title('Baseline Progression w/ Superimposed Treatment Response', fontsize=15, pad=15)
fig.savefig('./plots/treatment_exp_syn.pdf', bbox_inches='tight')
nsamples = {'train':100, 'valid':1000, 'test':50000}
folds = [0,1,2,3,4]
alpha_1_complex = False; per_missing = 0.; add_feat = 0; num_trt = 1
ddata = load_synthetic_data_trt(fold_span = folds, \
nsamples = nsamples, \
distractor_dims_b=4, \
sigma_ys=0.7, \
include_line=True, \
alpha_1_complex=alpha_1_complex, \
per_missing=per_missing, \
add_feats=add_feat, \
num_trt=num_trt, \
sub=True)
def setup_torch_dataset(ddata, fold, tvt, device=None, oversample=True, att_mask=False, batch_size=600):
if device is not None:
B = torch.from_numpy(ddata[fold][tvt]['b'].astype('float32')).to(device)
X = torch.from_numpy(ddata[fold][tvt]['x'].astype('float32')).to(device)
A = torch.from_numpy(ddata[fold][tvt]['a'].astype('float32')).to(device)
M = torch.from_numpy(ddata[fold][tvt]['m'].astype('float32')).to(device)
else:
B = torch.from_numpy(ddata[fold][tvt]['b'].astype('float32'))
X = torch.from_numpy(ddata[fold][tvt]['x'].astype('float32'))
A = torch.from_numpy(ddata[fold][tvt]['a'].astype('float32'))
M = torch.from_numpy(ddata[fold][tvt]['m'].astype('float32'))
y_vals = ddata[fold][tvt]['ys_seq'][:,0].astype('float32')
idx_sort = np.argsort(y_vals)
if 'digitized_y' in ddata[fold][tvt]:
print ('using digitized y')
Y = torch.from_numpy(ddata[fold][tvt]['digitized_y'].astype('float32'))
else:
Y = torch.from_numpy(ddata[fold][tvt]['ys_seq'][:,[0]]).squeeze()
if device is not None:
Y = Y.to(device)
CE = torch.from_numpy(ddata[fold][tvt]['ce'].astype('float32')).to(device)
else:
CE = torch.from_numpy(ddata[fold][tvt]['ce'].astype('float32'))
if att_mask:
attn_shape = (A.shape[0],A.shape[1],A.shape[1])
Am = get_attn_mask(attn_shape, ddata[fold][tvt]['a'].astype('float32'), device)
data = TensorDataset(B[idx_sort], X[idx_sort], A[idx_sort], M[idx_sort], Y[idx_sort], CE[idx_sort], Am[idx_sort])
else:
data = TensorDataset(B[idx_sort], X[idx_sort], A[idx_sort], M[idx_sort], Y[idx_sort], CE[idx_sort])
data_loader = DataLoader(data, batch_size=batch_size, shuffle=False)
return data, data_loader
# +
sname = {}; models = {}
# Example plots for one model; you should also train an SSM Linear and SSM PK-PD w/o local clock by setting clock_ablation to True in hyperparams
sname['syn_ssm_att'] = '../ief_core/tests/checkpoints/ssm_syn_att1epoch=14958-val_loss=39.48.ckpt'
if torch.cuda.is_available():
device = torch.device('cuda:2')
else:
device = torch.device('cpu')
for model_n in sname.keys():
checkpoint = torch.load(sname[model_n], map_location=lambda storage, loc: storage)
hparams = checkpoint['hyper_parameters']
del hparams['trial']
trial = optuna.trial.FixedTrial({'bs': hparams.bs, 'lr': hparams.lr, 'C': hparams.C, 'reg_all': hparams.reg_all, 'reg_type': hparams.reg_type, 'dim_stochastic': hparams.dim_stochastic})
if 'alaa' in model_n:
model = SSMBaseline(trial, **hparams); model.setup(1)
else:
model = SSM(trial, **hparams); model.setup(1)
model.load_state_dict(checkpoint['state_dict'])
models[model_n] = model
models[model_n].to(device)
# -
folds = [0,1,2,3,4]
for model_ in models:
model = models[model_]
fold_nelbos = []
for fold in folds:
if 'alaa' in model_n:
data, data_loader = setup_torch_dataset(ddata, fold, 'test', device, att_mask=True)
else:
data, data_loader = setup_torch_dataset(ddata, fold, 'test', device, att_mask=False)
batch_nelbos = []
model.eval()
for i_batch, data_batch_loader in enumerate(data_loader):
(nelbo, nll, kl, _), _ = model.forward(*data_batch_loader, anneal = 1.)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
batch_nelbos.append(nelbo)
fold_nelbos.append(np.mean(batch_nelbos))
print('stats for %s'%model_)
print('mean NELBO:', np.mean(fold_nelbos))
print('std NELBO:', np.std(fold_nelbos))
fold = 1
data, data_loader = setup_torch_dataset(ddata, fold, 'valid', device)
subtype = ddata[fold]['valid']['subtype']
y_vals = ddata[fold]['valid']['ys_seq'][:,0].astype('float32').ravel()
idx_sort = np.argsort(y_vals)
sorted_subtype = subtype[idx_sort]
sorted_ys = y_vals[idx_sort]
sorted_xs = ddata[fold]['valid']['x'][idx_sort]
sorted_as = ddata[fold]['valid']['a'][idx_sort]
# +
(B, X, A, M, Y, CE) = data_loader.dataset.tensors
_, _, lens = get_masks(M)
# T_forward = 10; T_condition = 5
T_forward = 17; T_condition = 2
# B, X, A, M, Y, CE = B[lens>T_forward+T_condition], X[lens>T_forward+T_condition], A[lens>T_forward+T_condition], M[lens>T_forward+T_condition], Y[lens>T_forward+T_condition], CE[lens>T_forward+T_condition]
samples = {}
for name, model in models.items():
_, _, _, _, _, tforward, _, _ = model.inspect(T_forward, T_condition, B, X, A, M, Y, CE)
tforw_n = tforward.cpu().detach().numpy()
samples[name] = tforw_n
# +
plt.rc('font', family='serif')
fig, axlist = plt.subplots(1,2,figsize=(12,5))
scolor=get_scolor()
fig.subplots_adjust(hspace = 0.4)
model_name = 'PK-PD'
model_name2 = 'Linear'
model_name3 = 'PK-PD w/o lc'
tvt = 'valid'
pidx = 0
k2alph = {}
k2alph[0] = '(a)'
k2alph[1] = '(b)'
k2alph[2] = '(c)'
k2alph[3] = '(d)'
pt = 1
ks = [1]
axs = axlist.ravel()
for k, ax in enumerate([axs[0]]):
idx = np.where(sorted_subtype==ks[k])[0]
pred = samples['syn_ssm_att'][idx[pt]]
# pred = samples['syn_ssm_att_notexp'][idx[pt]]
# pred2 = samples['syn_ssm_lin'][idx[pt]]
# pred3 = samples['syn_ssm_att_nolc'][idx[pt]]
data = sorted_xs[idx[pt],:,:]
trt_idx= np.where(sorted_as[idx[pt],:,1] == 1.)[0][0]
xvals = np.arange(data.shape[0])
tlist_x = []; tlist_y = []
treat_i = sorted_as[idx[pt],:,1]
ymax = ax.get_ylim()[1]+0.05
for t in range(treat_i.shape[0]):
if treat_i[t] == 1:
tlist_x.append(t)
tlist_y.append(15)
line1 = ax.scatter(tlist_x, tlist_y, marker='v', color='orange')
line2 = ax.fill_between(tlist_x, np.array(tlist_y)+4, np.array(tlist_y)+6, color='darkred', alpha=0.7)
ax.annotate('Line', xy=(40, 247), xycoords='axes points',
size=20, bbox=dict(boxstyle='round', fc='w'))
ax.annotate('Trt', xy=(45, 217), xycoords='axes points',
size=20, bbox=dict(boxstyle='round', fc='w'))
ax.scatter(xvals, data[:,0], s = 64, label = 'Data')
ax.plot(xvals[1:18], pred[:,0],'o-', color='k', label = '%s'%(model_name), linewidth=3., alpha=0.5, markersize=8)
# ax.plot(xvals[1:18], pred2[:,0],'x-', color='r', label = '%s'%(model_name2), linewidth=3., alpha=0.5, markersize=8)
# ax.plot(xvals[1:18], pred3[:,0],'^-', color='darkgrey', label = '%s'%(model_name3), linewidth=3., markersize=8)
tag = 'S[%d]'%(ks[k])
ax.set_title('Patient [%d] (Biomarker 1)'%(ks[k]), fontsize=25, pad=10)
ax.set_xlabel('Time', fontsize=25)
ax.tick_params(labelsize=20)
if k == 0:
ax.legend(fontsize=17, loc=3)
for k, ax in enumerate([axs[1]]):
idx = np.where(sorted_subtype==ks[k])[0]
pred = samples['syn_ssm_att'][idx[pt]]
# pred = samples['syn_ssm_att_notexp'][idx[pt]]
# pred2 = samples['syn_ssm_lin'][idx[pt]]
# pred3 = samples['syn_ssm_att_nolc'][idx[pt]]
data = sorted_xs[idx[pt],:,:]
xvals = np.arange(data.shape[0])
trt_idx= np.where(sorted_as[idx[pt],:,1] == 1.)[0][0]
tlist_x = []; tlist_y = []
treat_i = sorted_as[idx[pt],:,1]
for t in range(treat_i.shape[0]):
if treat_i[t] == 1:
tlist_x.append(t)
tlist_y.append(32)
ax.scatter(tlist_x, tlist_y, marker='v', color='orange')
ax.fill_between(tlist_x, np.array(tlist_y)+4, np.array(tlist_y)+6, color='darkred', alpha=0.7)
bidx = 2
ax.scatter(xvals, data[:,1], label = 'Data', s=64)
ax.plot(xvals[1:18], pred[:,1],'o-', color='k', label = '%s'%(model_name), linewidth=3., alpha=0.5, markersize=8)
# ax.plot(xvals[1:18], pred2[:,1],'x-', color='r', label = '%s'%(model_name2), linewidth=3., alpha=0.5, markersize=8)
# ax.plot(xvals[1:18], pred3[:,1],'^-', color='darkgrey', label = '%s'%(model_name3), linewidth=3., markersize=8)
tag = 'S[%d]'%(ks[k])
ax.set_title('Patient [%d] (Biomarker %d)'%(ks[k],bidx), fontsize=25, pad=10)
ax.set_xlabel('Time', fontsize=25)
ax.tick_params(labelsize=20)
plt.savefig('./plots/aaai-plots/ssm_syn_joint24_bigfonts_nolc_linetrt.pdf', bbox_inches='tight')
# -
# # MM Data
# +
mname = {}; models = {}; fold = 1
data_dir = '/afs/csail.mit.edu/u/z/zeshanmh/research/ief/data/ml_mmrf/ml_mmrf/output/cleaned_mm_fold_2mos_ind.pkl'
mmdata = load_mmrf(fold_span = [fold], \
digitize_K = 20, \
digitize_method = 'uniform', \
data_dir=data_dir, \
restrict_markers=[], \
add_syn_marker=False, \
window='all', \
data_aug=False)
## fold 0
if fold == 0:
mname[f'ssm-lin-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold0_ssm_lin_epoch=14605-val_loss=88.48.ckpt'
mname[f'ssm-nl-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold0_ssm_nl_epoch=07451-val_loss=83.10.ckpt'
mname[f'ssm-moe-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold0_ssm_moe_epoch=10475-val_loss=74.12.ckpt'
mname[f'ssm-att-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold0_ssm_att1epoch=14148-val_loss=65.06.ckpt'
mname[f'ssm-att12-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold0_ssm_att1_att2epoch=14135-val_loss=64.56.ckpt'
mname[f'ssm-baseline{fold}'] = '../ief_core/tests/checkpoints/mmfold0ssm_baselineepoch=14285-val_loss=99.59.ckpt'
elif fold == 1:
# # ## fold 1
mname[f'ssm-lin-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold1ssm_linepoch=14004-val_loss=74.14.ckpt'
# mname[f'ssm-nl-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold1_ssm_nl_epoch=07519-val_loss=76.88.ckpt'
# mname[f'ssm-moe-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold1_ssm_moe_epoch=12673-val_loss=71.12.ckpt'
mname[f'ssm-att-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold1_regFalse_ssm_att1epoch=13142-val_loss=63.89.ckpt' #mmfold1_ssm_att1epoch=13696-val_loss=57.20.ckpt
# mname[f'ssm-att12-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold1_ssm_att1_att2epoch=13623-val_loss=59.14.ckpt'
# mname[f'ssm-baseline{fold}'] = '../ief_core/tests/checkpoints/mmfold1ssm_baselineepoch=14997-val_loss=92.22.ckpt'
elif fold == 2:
# ## fold 2
mname[f'ssm-lin-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold2_ssm_lin_epoch=10805-val_loss=91.49.ckpt'
mname[f'ssm-nl-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold2_ssm_nl_epoch=07478-val_loss=84.18.ckpt'
mname[f'ssm-moe-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold2_ssm_moe_epoch=14598-val_loss=77.14.ckpt'
mname[f'ssm-att-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold2_ssm_att1epoch=14798-val_loss=66.73.ckpt'
mname[f'ssm-att12-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold2_ssm_att1_att2epoch=14606-val_loss=69.02.ckpt'
mname[f'ssm-baseline{fold}'] = '../ief_core/tests/checkpoints/mmfold2ssm_baselineepoch=12751-val_loss=97.99.ckpt'
elif fold == 3:
# ## fold 3
mname[f'ssm-lin-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold3_ssm_lin_epoch=08465-val_loss=80.52.ckpt'
mname[f'ssm-nl-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold3_ssm_nl_epoch=08438-val_loss=70.79.ckpt'
mname[f'ssm-moe-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold3_ssm_moe_epoch=07993-val_loss=63.16.ckpt'
mname[f'ssm-att-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold3_ssm_att1epoch=13974-val_loss=53.37.ckpt'
mname[f'ssm-att12-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold3_ssm_att1_att2epoch=11925-val_loss=55.52.ckpt'
mname[f'ssm-baseline{fold}'] = '../ief_core/tests/checkpoints/mmfold3ssm_baselineepoch=13696-val_loss=88.13.ckpt'
elif fold == 4:
# ## fold 4
mname[f'ssm-lin-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold4_ssm_lin_epoch=13385-val_loss=79.35.ckpt'
mname[f'ssm-nl-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold4_ssm_nl_epoch=07690-val_loss=74.66.ckpt'
mname[f'ssm-moe-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold4_ssm_moe_epoch=08201-val_loss=66.47.ckpt'
mname[f'ssm-att-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold4_ssm_att1epoch=13337-val_loss=58.12.ckpt'
mname[f'ssm-att12-fold{fold}'] = '../ief_core/tests/checkpoints/mmfold4_ssm_att1_att2epoch=13966-val_loss=57.53.ckpt'
mname[f'ssm-baseline{fold}'] = '../ief_core/tests/checkpoints/mmfold4ssm_baselineepoch=08013-val_loss=94.48.ckpt'
if torch.cuda.is_available():
device = torch.device('cuda:1')
else:
device = torch.device('cpu')
print(mname.keys())
for model_n in mname.keys():
checkpoint = torch.load(mname[model_n], map_location=lambda storage, loc: storage)
hparams = checkpoint['hyper_parameters']
del hparams['trial']
if 'baseline' in model_n:
trial = optuna.trial.FixedTrial({'bs': hparams.bs, 'lr': hparams.lr, 'C': hparams.C, 'reg_all': hparams.reg_all, 'reg_type': hparams.reg_type, 'dim_stochastic': hparams.dim_stochastic})
model = SSMBaseline(trial, **hparams); model.setup(1)
model.load_state_dict(checkpoint['state_dict'])
models[model_n] = model
models[model_n].to(device)
elif 'ssm' in model_n and 'att12' not in model_n:
trial = optuna.trial.FixedTrial({'bs': hparams.bs, 'lr': hparams.lr, 'C': hparams.C, 'reg_all': hparams.reg_all, 'reg_type': hparams.reg_type, 'dim_stochastic': hparams.dim_stochastic})
model = SSM(trial, **hparams); model.setup(1)
model.load_state_dict(checkpoint['state_dict'])
models[model_n] = model
models[model_n].to(device)
elif 'ssm' in model_n and 'att12' in model_n:
trial = optuna.trial.FixedTrial({'bs': hparams.bs, 'lr': hparams.lr, 'C': hparams.C, 'reg_all': hparams.reg_all, 'reg_type': hparams.reg_type, 'dim_stochastic': hparams.dim_stochastic})
model = SSMAtt(trial, **hparams); model.setup(1)
model.load_state_dict(checkpoint['state_dict'])
models[model_n] = model
models[model_n].to(device)
# -
# ## Pairwise Comparisons
# +
data, data_loader = models[f'ssm-att-fold{fold}'].load_helper('test', device=device, att_mask=True)
(B, X, A, M, Y, CE, Am) = data_loader.dataset.tensors
_, _, lens = get_masks(M)
B, X, A, M, Y, CE, Am = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1], Am[lens>1]
nelbos = {}
# prepare list of things to compare (i.e. lotc)
# lotc = [(f'ssm-att-fold{fold}', f'ssm-lin-fold{fold}'),(f'ssm-att-fold{fold}', f'ssm-nl-fold{fold}'),(f'ssm-att-fold{fold}', f'ssm-moe-fold{fold}')]
lotc = [(f'ssm-att-fold{fold}', f'ssm-lin-fold{fold}')]
for elem in lotc:
trials1 = []; trials2 = []
nelboP = []; nelboB = []
for t in range(20):
pkpd_n, base_n = elem
nelbo_pkpd, _, _, _ = models[pkpd_n].get_loss(B, X, A, M, Y, CE, anneal=1.)
# nelbo_base, _, _, _ = models[base_n].get_loss(B, X, A, M, Y, CE, Am, anneal=1.)
nelbo_base, _, _, _ = models[base_n].get_loss(B, X, A, M, Y, CE, anneal=1.)
is_pkpd_better = (nelbo_base - nelbo_pkpd) > 0.
trials1.append(np.mean(pt_numpy(is_pkpd_better)))
trials2.append(np.std(pt_numpy(is_pkpd_better)))
nelboP.append(np.mean(pt_numpy(nelbo_pkpd)))
nelboB.append(np.mean(pt_numpy(nelbo_base)))
print(f'{pkpd_n} vs {base_n}: mean -- {np.mean(trials1)}, std -- {np.mean(trials2)}')
print(f'{pkpd_n} nelbo: {np.mean(nelboP)}')
print(f'{base_n} nelbo: {np.mean(nelboB)}')
print(models)
# +
data, data_loader = models[f'ssm-att-fold{fold}'].load_helper('test', device=device, att_mask=True)
(B, X, A, M, Y, CE, Am) = data_loader.dataset.tensors
_, _, lens = get_masks(M)
B, X, A, M, Y, CE, Am = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1], Am[lens>1]
nlls = {}
# lotc = [(f'ssm-att-fold{fold}', f'ssm-lin-fold{fold}'),(f'ssm-att-fold{fold}', f'ssm-nl-fold{fold}'),(f'ssm-att-fold{fold}', f'ssm-moe-fold{fold}'), (f'ssm-att-fold{fold}', f'ssm-baseline{fold}')]
lotc = [(f'ssm-att-fold{fold}', f'ssm-lin-fold{fold}')]
for comp in lotc:
att = comp[0]
base = comp[1]
# print(f'MODEL COMP {att} vs ')
for model in [base, att]:
loss = models[model].imp_sampling(B, X, A, M, Y, CE, imp_samples = 20, idx = -1)
mloss = pt_numpy(loss[0])
nlls[model] = mloss
thres = 10
print(f'total examples: {len(nlls[att])}')
num_pk_better = np.sum(nlls[base] > (nlls[att] + thres))
print(f'num examples where {att} does better: {num_pk_better}, {num_pk_better / len(nlls[att])}')
num_lin_better = np.sum(nlls[att] > (nlls[base] + thres))
print(f'num examples where {base} does better: {num_lin_better}, {num_lin_better / len(nlls[att])}')
t1 = np.sum((nlls[base] > (nlls[att] - thres)) & (nlls[base] < (nlls[att] + thres)))
print(f'num examples where it is unclear: {t1}, {t1 / len(nlls[att])}')
print()
print()
# -
# ## Weights on $\alpha_1$ Linear Function
X_names_orig = mmdata[fold]['train']['feature_names_x']
X_names = mmdata[fold]['train']['feature_names_x']
A_names = mmdata[fold]['train']['feature_names_a']
B_names = mmdata[fold]['train']['feature_names']
all_names = np.concatenate([X_names, A_names, B_names],0)
print(B_names)
print (all_names.shape, X_names.shape)
X_names = [s.replace('_', ' ') for s in X_names]
print(X_names)
print(A_names)
print(all_names)
# +
from sklearn.manifold import TSNE
from pyro.distributions import Normal, Independent, Categorical, LogNormal
class ModelIntrospector:
def __init__(self, model, data_loader, feat_names):
self.model = model
self.data_loader = data_loader
self.X_names, self.A_names, self.B_names = feat_names
def _stratify(self, stratify_params=None):
(B, X, A, M, Y, CE) = self.data_loader.dataset.tensors
if not stratify_params:
return (B, X, A, M, Y, CE)
feat, val, keep_censored = stratify_params['feat'], stratify_params['val'], stratify_params['keep_censored']
idx = np.where(self.B_names == feat)[0]
if not keep_censored:
B = B[np.where(CEn==1)[0]]
X = X[np.where(CEn==1)[0]]
A = A[np.where(CEn==1)[0]]
M = M[np.where(CEn==1)[0]]
Y = Y[np.where(CEn==1)[0]]
CE = CE[np.where(CEn==1)[0]]
Bn = pt_numpy(B)
pt_idxs = np.where(Bn[:,idx] == val)[0]
B, X, A, M, Y, CE = B[pt_idxs], X[pt_idxs], A[pt_idxs], M[pt_idxs], Y[pt_idxs], CE[pt_idxs]
return (B, X, A, M, Y, CE)
def _get_con_signal(self, B, X, A, M, Y, CE, Tmax=None):
base_cat = B[:,None,:].repeat(1, max(1, X.shape[1]-1), 1)
_, _, lens = get_masks(M)
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
X0 = X[:,0,:]; A0 = A[:,0,:]
# get alpha_1
if Tmax == None:
Tmax = X.shape[1]-1
Aval = A[:,1:Tmax,:]
con = torch.cat([Aval[...,[0]],B[:,None,:].repeat(1,Aval.shape[1],1), Aval[...,1:]],-1)
return con
def _p_Zt(self, B, X, A, M, Y, CE, dist='posterior'):
base_cat = B[:,None,:].repeat(1, max(1, X.shape[1]-1), 1)
_, _, lens = get_masks(M)
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
X0 = X[:,0,:]; A0 = A[:,0,:]
if dist == 'posterior':
Z_t, q_zt = self.model.inf_network(X, A, M, B)
elif dist == 'prior':
Tmax = X.shape[1]-1
inp_cat = torch.cat([B, X0, A0], -1)
mu1 = self.model.prior_W(inp_cat)
sig1 = torch.nn.functional.softplus(self.model.prior_sigma(inp_cat))
Z_start = torch.squeeze(Independent(Normal(mu1, sig1), 1).sample((1,)))
meanlist = [mu1[:,None,:]]
sigmalist= [sig1[:,None,:]]
Zlist = [Z_start]
for t in range(1, Tmax):
Ztm1 = Zlist[t-1]
if self.model.hparams.include_baseline:
Aval = A[:,t-1,:]
Acat = torch.cat([Aval[...,[0]], B, Aval[...,1:]], -1)
mut, sigmat= self.model.transition_fxn(Ztm1, Acat)
else:
mut, sigmat= self.transition_fxn(Ztm1, A[:,t-1,:])
meanlist.append(mut[:,None,:])
sigmalist.append(sigmat[:,None,:])
Zlist.append(torch.squeeze(Independent(Normal(mut, sigmat), 1).sample((1,))))
mu_prior, std_prior = torch.cat(meanlist, 1), torch.cat(sigmalist, 1)
Z_t = torch.cat([k[:,None,:] for k in Zlist],1)
else:
raise ValueError('bad distribution type...')
return Z_t[:,:-1,:]
def get_ensemble_weights(self):
return torch.softmax(self.model.transition_fxn.t_mu.alphas, 1)
def get_te_alpha1(self):
str_tensors = self._stratify()
con = self._get_con_signal(*str_tensors)
return self.model.transition_fxn.t_mu.treatment_exp.alpha_1_layer(con)
def get_te_alpha1_weights(self):
return self.model.transition_fxn.t_mu.treatment_exp.alpha_1_layer.weight
def jacobian_alpha1(self, stratify_params=None):
We = self.model.e_mu.weight
Wo = self.model.transition_fxn.t_mu.out_layer.weight
te_weight = self.get_ensemble_weights()[:,2]
print('Emission weights shape....', We.shape)
print('Output layer weights shape...', Wo.shape)
print('Ensemble treatment effect function weights...', te_weight.shape)
# compute jacobian
str_tensors = self._stratify(stratify_params)
con = self._get_con_signal(*str_tensors, Tmax=None)
tvals = con[...,[0]]
tmax_lot = ((tvals*con[...,-3:]).max(1, keepdims=True)[0]*con[...,-3:]).sum(-1, keepdims=True)
pred = self.model.transition_fxn.t_mu.treatment_exp.pred_prms(con[...,-3:])
alpha_2, alpha_3, gamma = torch.sigmoid(pred[...,[0]]), torch.sigmoid(pred[...,[1]]), torch.sigmoid(pred[...,[2]])*tmax_lot
mask = (tvals-gamma)
mask[mask<=0]= 0
mask[mask>0] = 1
res1 = (1-mask)*(1/(1+torch.exp(-alpha_2*(tvals-0.5*gamma))))
res2 = mask*(1/((1+torch.exp(-alpha_3*0.5*gamma))*(1+torch.exp(alpha_3*(tvals-1.5*gamma)))))
res = res1+res2
J = torch.matmul(torch.matmul(res*te_weight[None,None,:], Wo.T), We.T) # should i have transposed??
print('Jacobian shape...', (J.shape))
return J
def jacobian_trt(self, trt, expect='posterior', stratify_params=None):
We = self.model.e_mu.weight
Wo = self.model.transition_fxn.t_mu.out_layer.weight
te_weight1 = self.get_ensemble_weights()[:,0]
te_weight2 = self.get_ensemble_weights()[:,1]
te_weight3 = self.get_ensemble_weights()[:,2]
str_tensors = self._stratify(stratify_params)
inpx = self._p_Zt(*str_tensors, expect)
con = self._get_con_signal(*str_tensors, Tmax=inpx.shape[1]+1)
# term 1
i1 = inpx*(1 - torch.tanh(self.model.transition_fxn.t_mu.control_layer(con))**2)
Wlin = self.model.transition_fxn.t_mu.control_layer.weight[:,-(trt.shape[-1]):]
t1 = torch.matmul(We, torch.matmul(Wo, torch.matmul(torch.diag_embed(i1*te_weight1[None,None,:]), Wlin)))
# term 2
a = con[...,1:]
i2 = inpx*(1 - torch.tanh(self.model.transition_fxn.t_mu.logcell.controlfxn(a)))
Wlc = self.model.transition_fxn.t_mu.logcell.controlfxn.weight[:,-(trt.shape[-1]):]
t2 = torch.matmul(We, torch.matmul(Wo, torch.matmul(torch.diag_embed(i2*te_weight2[None,None,:]), Wlc)))
# term 3
tvals = con[...,[0]]
tmax_lot = ((tvals*con[...,-3:]).max(1, keepdims=True)[0]*con[...,-3:]).sum(-1, keepdims=True)
pred = self.model.transition_fxn.t_mu.treatment_exp.pred_prms(con[...,-3:])
alpha_2, alpha_3, gamma = torch.sigmoid(pred[...,[0]]), torch.sigmoid(pred[...,[1]]), torch.sigmoid(pred[...,[2]])*tmax_lot
mask = (tvals-gamma)
mask[mask<=0]= 0
mask[mask>0] = 1
res1 = (1-mask)*(1/(1+torch.exp(-alpha_2*(tvals-0.5*gamma))))
res2 = mask*(1/((1+torch.exp(-alpha_3*0.5*gamma))*(1+torch.exp(alpha_3*(tvals-1.5*gamma)))))
i3 = res1+res2
Wte = self.model.transition_fxn.t_mu.treatment_exp.alpha_1_layer.weight[...,-(trt.shape[-1]):]
t3 = torch.matmul(We,torch.matmul(Wo,torch.matmul(torch.diag_embed(i3*te_weight3[None,None,:]), Wte)))
# result (compute directional derivative)
grad = t1 + t2 + t3
return torch.matmul(grad, trt)
def get_alpha1_tsne(self, bins, time=None, verbose=True):
alpha_1 = self.get_te_alpha1()
(B, X, A, M, Y, CE) = self.data_loader.dataset.tensors
_, _, lens = get_masks(M)
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
Yn = pt_numpy(Y); CEn = pt_numpy(CE)
Yn_obs = Yn[np.where(CEn==1)[0]]
Yidx_obs = np.where(Yn_obs==1)[1]
binY = np.digitize(Yidx_obs, bins)
# print out information
if verbose:
print(f'time of death (observed+censored): {np.where(Yn==1)[1]}, {len(np.where(Yn==1)[0])}')
print(f'time of death (observed): {Yidx_obs}, {len(Yidx_obs)}')
print(f'binned Y: {binY}')
ys = []
for i in range(len(bins)):
y = np.where(binY == i)[0]
ys.append(y)
if verbose:
print(f'number of patients in group y{i+1}: {y.shape[0]}')
result_CEs = []
weights = models[f'ssm-att-fold{fold}'].transition_fxn.t_mu.attn.attn.squeeze()
# run TSNEs for all t if no time range specified
if time is None:
time = np.arange(alpha_1.shape[1])
for t in time:
tsne = TSNE(n_components = 2)
# idxs = np.where(np.mean(pt_numpy(weights[:,t,:]),dim=0)>0.7)[0]
idxs = np.where(np.mean(pt_numpy(weights[:,t,:,-1]),axis=0)>0.5)[0]
result = tsne.fit_transform(pt_numpy(alpha_1[:,t,idxs]))
result_CE = result[np.where(CEn==1)[0]]
result_CEs.append(result_CE)
return result_CEs, ys
def get_latent_tsne(self, bins, time=None, verbose=True):
(B, X, A, M, Y, CE) = self.data_loader.dataset.tensors
_, _, lens = get_masks(M)
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
Zt = self._p_Zt(B, X, A, M, Y, CE, dist='posterior')
Yn = pt_numpy(Y); CEn = pt_numpy(CE)
Yn_obs = Yn[np.where(CEn==1)[0]]
Yidx_obs = np.where(Yn_obs==1)[1]
binY = np.digitize(Yidx_obs, bins)
# print out information
if verbose:
print(f'time of death (observed+censored): {np.where(Yn==1)[1]}, {len(np.where(Yn==1)[0])}')
print(f'time of death (observed): {Yidx_obs}, {len(Yidx_obs)}')
print(f'binned Y: {binY}')
ys = []
for i in range(len(bins)):
y = np.where(binY == i)[0]
ys.append(y)
if verbose:
print(f'number of patients in group y{i+1}: {y.shape[0]}')
result_CEs = []
# run TSNEs for all t if no time range specified
if time is None:
time = np.arange(alpha_1.shape[1])
for t in time:
tsne = TSNE(n_components = 2)
result = tsne.fit_transform(pt_numpy(Zt[:,t,:]))
result_CE = result[np.where(CEn==1)[0]]
result_CEs.append(result_CE)
return result_CEs, ys
# -
data, data_loader = models[f'ssm-att-fold{fold}'].load_helper('test', device=device)
EI = ModelIntrospector(models[f'ssm-att-fold{fold}'], data_loader, [X_names, A_names, B_names])
alpha_weights = EI.get_te_alpha1_weights()[:,-8:]
Anames_trt = A_names[1:]
fig, ax = plt.subplots(figsize=(15,11))
sns.heatmap(pt_numpy(alpha_weights), ax=ax, cmap='Blues', xticklabels=Anames_trt, yticklabels=False)
ax.set_ylabel('alpha-1 (dim=48)', fontsize=40)
ax.tick_params(axis='x', labelsize=40)
for item in ax.get_xticklabels():
item.set_rotation(55)
cbar = ax.collections[0].colorbar
# here set the labelsize by 20
cbar.ax.tick_params(labelsize=35)
plt.savefig('./plots/aaai-plots/ssm_alpha1_matrix.pdf',bbox_inches='tight')
# ## Attention Maps
data, data_loader = models[f'ssm-att-fold{fold}'].load_helper('test', device=device)
(B, X, A, M, Y, CE) = data_loader.dataset.tensors
_, _, lens = get_masks(M)
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
m_t, m_g_t, _ = get_masks(M[:,1:,:])
Zt, q_zt = models[f'ssm-att-fold{fold}'].inf_network(X, A, M, B)
Tmax = Zt.shape[1]
X0 = X[:,0,:]; Xt = X[:,1:,:]
inp_cat = torch.cat([B, X0, A[:,0,:]], -1)
# mu1 = self.prior_W(inp_cat)[:,None,:]
# sig1 = torch.nn.functional.softplus(self.prior_sigma(inp_cat))[:,None,:]
Zinp = Zt[:,:-1,:]; Aval = A[:,1:Tmax,:]
Acat = torch.cat([Aval[...,[0]],B[:,None,:].repeat(1,Aval.shape[1],1), Aval[...,1:]],-1)
inpx = Zinp; con = Acat
t_mu = models[f'ssm-att-fold{fold}'].transition_fxn.t_mu
# mu2T, sig2T = self.transition_fxn(Zinp, Acat, eps = eps)
inp = t_mu.inp_layer(inpx)
out_linear = inp*torch.tanh(t_mu.control_layer(con))
out_te = t_mu.treatment_exp(inp, con)
out_logcell= t_mu.logcell(inp, con)
value = torch.cat((out_linear[...,None], out_te[...,None], out_logcell[...,None]), dim=-1).transpose(-2,-1)
key = torch.cat((out_linear[...,None], out_te[...,None], out_logcell[...,None]), dim=-1).transpose(-2,-1)
query = inp[...,None,:]
t_mu.attn.dropout = None
_ = t_mu.attn(query, key, value, use_matmul=False)
# out = self.attn.forward(query, key, value, use_matmul=False).squeeze()
# p_x_mu, p_x_std = self.p_X_Z(Z_t, A[:,1:Tmax+1,[0]])
# p_zt = self.p_Zt_Ztm1(Z_t, A, B, X, A[:,0,:])
# print(models[f'ssm-att-fold{fold}'].transition_fxn.t_mu.attn.attn.squeeze()[10:12].shape)
print(models[f'ssm-att-fold{fold}'].transition_fxn.t_mu.attn.attn.squeeze()[:2].shape)
print(torch.mean(models[f'ssm-att-fold{fold}'].transition_fxn.t_mu.attn.attn.squeeze()[:2],dim=1))
# +
print(models[f'ssm-att-fold{fold}'].transition_fxn.t_mu.attn.attn.squeeze().shape)
print(B_names)
temp1 = models[f'ssm-att-fold{fold}'].transition_fxn.t_mu.attn.attn.squeeze()
temp2 = models[f'ssm-att-fold{fold}'].transition_fxn.t_mu.attn.attn.squeeze()
weights = torch.mean(torch.mean(temp1,dim=0)[:3],dim=0)
# weights = torch.mean(models[f'ssm-att-fold{fold}'].transition_fxn.t_mu.attn.attn.squeeze()[19],dim=0)
fig_dims = (8,10)
fig, ax = plt.subplots(figsize=fig_dims)
print(pt_numpy(weights).shape)
ax.tick_params(labelsize=30)
ax2 = sns.heatmap(pt_numpy(weights), cmap="Greens", yticklabels=False, xticklabels=['$g_1$', '$g_2$', '$g_3$'], ax=ax)
# use matplotlib.colorbar.Colorbar object
cbar = ax2.collections[0].colorbar
# here set the labelsize by 20
cbar.ax.tick_params(labelsize=22)
plt.ylabel('State Space Dimension', fontsize=30)
plt.savefig('./plots/aaai-plots/ssm_pkpd_attmap_first10months.pdf',bbox_inches='tight')
# three-dimensional state; each dimension is a
print(np.unique(np.where(pt_numpy(M[:,29:30,:]))[0]))
weights = torch.mean(torch.mean(temp2[np.unique(np.where(pt_numpy(M[:,22:24,:]))[0])],dim=0)[22:24],dim=0)
fig_dims = (8,10)
fig, ax = plt.subplots(figsize=fig_dims)
ax.tick_params(labelsize=30)
ax2 = sns.heatmap(pt_numpy(weights), cmap="Greens", yticklabels=False, xticklabels=['$g_1$', '$g_2$', '$g_3$'], ax=ax)
# use matplotlib.colorbar.Colorbar object
cbar = ax2.collections[0].colorbar
# here set the labelsize by 20
cbar.ax.tick_params(labelsize=22)
plt.ylabel('State Space Dimension', fontsize=30)
plt.savefig('./plots/ssm_pkpd_attmap_last10months_nonmissing.pdf',bbox_inches='tight')
# -
# ## NLL Plots
X_names = mmdata[fold]['train']['feature_names_x']
A_names = mmdata[fold]['train']['feature_names_a']
B_names = mmdata[fold]['train']['feature_names']
all_names = np.concatenate([X_names, A_names, B_names],0)
print (all_names.shape, X_names.shape)
X_names = [s.replace('_', ' ') for s in X_names]
print(X_names)
print(A_names)
print(all_names)
# +
def stratify_by_nll(B, X, A, M, Y, CE, thr=20.):
thr = 20.
model_loss = {}
for model in models:
loss = models[model].get_loss(B, X, A, M, Y, CE)
mloss = pt_numpy(loss[0])
print(model)
model_loss[model] = mloss
lin_loss = model_loss[f'ssm-lin-fold{fold}']; gated_loss = model_loss[f'ssm-att-fold{fold}']
diff = (lin_loss - gated_loss)
idxlist = np.where(diff >= thr)[0]
# gated_sub = gated_loss[idxlist]
return B[idxlist], X[idxlist], A[idxlist], M[idxlist], Y[idxlist], CE[idxlist]
def stratify_by_lens(B, X, A, M, Y, CE, slen=20):
_, _, lens = get_masks(M)
return B[lens>slen], X[lens>slen], A[lens>slen], M[lens>slen], Y[lens>slen], CE[lens>slen]
def stratify_by_line(B, X, A, M, Y, CE):
# filter out patients who don't have second or third line therapies
idxs = np.unique(np.where(pt_numpy(A)[...,-2:] == 1.)[0])
return B[idxs], X[idxs], A[idxs], M[idxs], Y[idxs], CE[idxs]
# -
data, data_loader = models[f'ssm-att-fold{fold}'].load_helper('test', device=device)
(B, X, A, M, Y, CE) = data_loader.dataset.tensors
print(B.shape)
# look at examples that PK-PD models well
# B, X, A, M, Y, CE = stratify_by_lens(B, X, A, M, Y, CE, slen=1)
T_forward = 10; T_condition = 10
B, X, A, M, Y, CE = stratify_by_lens(B, X, A, M, Y, CE, slen=T_forward+T_condition)
# B, X, A, M, Y, CE = stratify_by_line(B, X, A, M, Y, CE)
print(B.shape)
# +
column_names = ['NLL (imp. sampling estimate)', 'Biomarker', 'model']
df = pd.DataFrame(columns = column_names)
for idx, feat in enumerate(X_names):
nlls = {}
for model in [f'ssm-lin-fold{fold}', f'ssm-att-fold{fold}']:
loss = models[model].imp_sampling(B, X, A, M, Y, CE, imp_samples = 10, idx = idx)
mloss = pt_numpy(loss[0])
nlls[model] = mloss
for i in range(len(nlls[f'ssm-att-fold{fold}'])):
df.loc[0 if pd.isnull(df.index.max()) else df.index.max() + 1] = [nlls[f'ssm-att-fold{fold}'][i], " ".join([f for f in feat.split("_")]), f'SSM PK-PD']
df.loc[0 if pd.isnull(df.index.max()) else df.index.max() + 1] = [nlls[f'ssm-lin-fold{fold}'][i], " ".join([f for f in feat.split("_")]), f'SSM Linear']
# -
fig, ax = plt.subplots(figsize=(15,10))
a1 = sns.boxplot(ax=ax, x='Biomarker', y='NLL (imp. sampling estimate)', hue='model',
data=df, palette='muted', showfliers=False)
# a1.set_ylim(-10,10)
a1.set_ylabel('NLL (imp. sampling estimate)', fontsize=35)
a1.set_xlabel('Biomarker', fontsize=35)
a1.tick_params(axis='x', labelsize=28)
a1.tick_params(axis='y', labelsize=28)
a1.legend(fontsize=30)
for item in a1.get_xticklabels():
item.set_rotation(55)
fig.savefig('./plots/supp_fig2_bplot_nll_is.pdf',bbox_inches='tight')
# ## L1 Error Plots
# +
## visualize conditional samples (what happens if you condition on some amount of time)
## try condition on 6 months, one year, two years
pf_samples = {}
cond_samples = {}
prior_samples= {}
print(B.shape)
T_condition = 12; T_forward = 6
nelbo, pf_nelbo, p_z_mu, q_z_mu, inp_x_post, inp_x, _, _ = models[f'ssm-att-fold{fold}'].inspect(T_forward, T_condition, B, X, A, M, Y, CE, nsamples=5)
_, pf_samples[f'ssm-att-fold{fold}'], _, _, cond_samples[f'ssm-att-fold{fold}'], prior_samples[f'ssm-att-fold{fold}'] = tuple([pt_numpy(k) for k in (nelbo, pf_nelbo, p_z_mu, q_z_mu, inp_x_post, inp_x)])
nelbo, pf_nelbo, p_z_mu, q_z_mu, inp_x_post, inp_x, _, _ = models[f'ssm-lin-fold{fold}'].inspect(T_forward, T_condition, B, X, A, M, Y, CE, nsamples=5)
_, pf_samples[f'ssm-lin-fold{fold}'], _, _, cond_samples[f'ssm-lin-fold{fold}'], prior_samples[f'ssm-lin-fold{fold}'] = tuple([pt_numpy(k) for k in (nelbo, pf_nelbo, p_z_mu, q_z_mu, inp_x_post, inp_x)])
data_cond= pt_numpy(X[:,:T_condition+T_forward,:])
obs_cond = pt_numpy(M[:,:T_condition+T_forward,:])
a_cond = pt_numpy(A[:,:T_condition+T_forward,:])
# data_cond[obs_cond==0] = np.nan
data_prior= pt_numpy(X[:,:T_forward,:])
obs_prior = pt_numpy(M[:,:T_forward,:])
a_prior = pt_numpy(A[:,:T_forward,:])
print(data_prior[:,0,:].shape)
print(prior_samples[f'ssm-att-fold{fold}'][:,0,:].shape)
assert data_prior[:,0,:].shape == prior_samples[f'ssm-att-fold{fold}'][:,0,:].shape
# data_prior[obs_prior==0] = np.nan
# +
column_names = ['MSE', 'Biomarker', 'model']
df = pd.DataFrame(columns = column_names)
setup = 'cond'
if setup == 'forward':
samples = prior_samples
elif setup == 'cond':
samples = cond_samples
# model_dict = {'fomm_gated': 'FOMM PK-PD', 'fomm_linear': 'FOMM Linear', 'fomm_nl': 'FOMM NL', 'fomm_moe': 'FOMM MOE'}
model_dict = {f'ssm-att-fold{fold}': 'SSM PK-PD', f'ssm-lin-fold{fold}': 'SSM Linear', 'nl-lin': 'SSM NL', 'moe-lin': 'SSM MOE'}
for model in [f'ssm-att-fold{fold}', f'ssm-lin-fold{fold}']:
if model == 'nl-lin' or model == 'moe-lin':
continue
if setup == 'forward':
Xv = pt_numpy(X[:,1:T_forward])
Mv = pt_numpy(M[:,1:T_forward])
pred = samples[model][:,1:]
elif setup == 'cond':
Xv = pt_numpy(X[:,T_condition:T_condition+T_forward])
Mv = pt_numpy(M[:,T_condition:T_condition+T_forward])
pred = samples[model][:,-T_forward:]
diff_t = ((np.abs(pred-Xv))*Mv)
diff = diff_t.sum(1); m = Mv.sum(1)
print(diff)
for feat, feat_name in enumerate(X_names):
mse_total = diff[:,feat]
mse_true = mse_total[np.where(m[:,feat] != 0.)[0]]
for i in range(len(mse_true)):
df.loc[0 if pd.isnull(df.index.max()) else df.index.max() + 1] = [mse_true[i], " ".join([f for f in feat_name.split("_")]), f'{model_dict[model]}']
fig, ax = plt.subplots(figsize=(15,10))
a1 = sns.boxplot(ax=ax, x='Biomarker', y='MSE', hue='model',
data=df, palette='muted', linewidth=3)
a1.set_ylim(-1,10)
a1.set_ylabel('L1 Error', fontsize=35)
a1.set_xlabel('Biomarker', fontsize=35)
a1.tick_params(axis='x', labelsize=28)
a1.tick_params(axis='y', labelsize=28)
a1.legend(fontsize=30, loc='upper left')
for item in a1.get_xticklabels():
item.set_rotation(55)
fig.savefig('./plots/supp_fig2_bplot_cond2y_f6m.pdf',bbox_inches='tight')
# -
# ## Forward Samples
# +
healthy_mins_max = {
'cbc_abs_neut':(2., 7.5,1/3.), # abs neutrophil count (3.67, 1.), (2.83, 4.51)
'chem_albumin':(34, 50,1/8.), # chemical albumin (43.62, 2.77), (41.30, 45.94)
'chem_bun':(2.5, 7.1,1/5.), #BUN # reference range, (4.8, 1.15)
'chem_calcium':(2.2, 2.7,2.), #Calcium, (2.45, 0.125)
'chem_creatinine':(66, 112,1/36.), # creatinine, (83., 24.85), (62.22, 103.77)
'chem_glucose':(3.9, 6.9,1/5.), # glucose, (4.91, 0.40), (4.58, 5.24)
'cbc_hemoglobin':(13., 17.,1), # hemoglobin (12.90, 15.64), (8.86, 1.02)
'chem_ldh':(2.33, 4.67,1/3.), #LDH, (3.5, 0.585)
'serum_m_protein':(0.1, 1.1, 1), # M protein (<3 g/dL is MGUS, any presence of protein is pathological); am just using the data mean/std for this, (0.85, 1.89)
'urine_24hr_m_protein':(0.0, 0.1, 1), # Urine M protein
'cbc_platelet':(150, 400,1/60.), # platelet count (206.42, 334.57), (270.5, 76.63)
'chem_totprot':(6, 8,1/6.), # total protein, (7, 0.5)
'urine_24hr_total_protein':(0, 0.23, 1), #
'cbc_wbc':(3, 10,1/4.), # WBC (5.71, 8.44), (7.07, 1.63)
'serum_iga':(0.85, 4.99, 1.), # IgA, (2.92, 1.035)
'serum_igg':(6.10, 16.16,1/10.), # IgG, (11.13, 2.515)
'serum_igm':(0.35, 2.42,1), #IgM, (1.385, 0.518)
'serum_lambda':(0.57, 2.63, 1/2.), #serum lambda, (1.6, 0.515)
'serum_kappa':(.33, 1.94,1/8.), #serum kappa , (1.135, 0.403)
'serum_beta2_microglobulin':(0.7, 1.80, 1/3.), #serum_beta2_microglobulin,
'serum_c_reactive_protein':(0.0, 1., 1.) #serum_c_reactive_protein,
}
scaled_healthy_min_max = {}
for k,v in healthy_mins_max.items():
old_min, old_max, scale = v
new_min = (old_min - old_max)*scale
new_max = 0.
scaled_healthy_min_max[k] = (new_min, new_max)
# +
def stratify_by_nll(B, X, A, M, Y, CE, thr=20.):
# thr = 20.
model_loss = {}
for model in models:
loss = models[model].get_loss(B, X, A, M, Y, CE)
mloss = pt_numpy(loss[0])
print(model)
model_loss[model] = mloss
lin_loss = model_loss[f'ssm-lin-fold{fold}']; gated_loss = model_loss[f'ssm-att-fold{fold}']
diff = (lin_loss - gated_loss)
idxlist = np.where(diff >= thr)[0]
# gated_sub = gated_loss[idxlist]
return B[idxlist], X[idxlist], A[idxlist], M[idxlist], Y[idxlist], CE[idxlist]
def stratify_by_lens(B, X, A, M, Y, CE, slen=20):
_, _, lens = get_masks(M)
return B[lens>slen], X[lens>slen], A[lens>slen], M[lens>slen], Y[lens>slen], CE[lens>slen]
def stratify_by_line(B, X, A, M, Y, CE):
# filter out patients who don't have second or third line therapies
idxs = np.unique(np.where(pt_numpy(A)[...,-2:] == 1.)[0])
return B[idxs], X[idxs], A[idxs], M[idxs], Y[idxs], CE[idxs]
# +
data, data_loader = models[f'ssm-att-fold{fold}'].load_helper('valid', device=device)
(B, X, A, M, Y, CE) = data_loader.dataset.tensors
T_forward = 10; T_condition = 10
B, X, A, M, Y, CE = stratify_by_nll(B, X, A, M, Y, CE, thr=20.)
B, X, A, M, Y, CE = stratify_by_lens(B, X, A, M, Y, CE, slen=T_forward+T_condition)
# B, X, A, M, Y, CE = stratify_by_line(B, X, A, M, Y, CE)
print(B.shape)
pf_samples = {}
cond_samples = {}
prior_samples= {}
nelbo, pf_nelbo, p_z_mu, q_z_mu, inp_x_post, inp_x, _, _ = models[f'ssm-att-fold{fold}'].inspect(T_forward, T_condition, B, X, A, M, Y, CE, nsamples=3, eps=0.)
_, pf_samples[f'ssm-att-fold{fold}'], _, _, cond_samples[f'ssm-att-fold{fold}'], prior_samples[f'ssm-att-fold{fold}'] = tuple([pt_numpy(k) for k in (nelbo, pf_nelbo, p_z_mu, q_z_mu, inp_x_post, inp_x)])
nelbo, pf_nelbo, p_z_mu, q_z_mu, inp_x_post, inp_x, _, _ = models[f'ssm-lin-fold{fold}'].inspect(T_forward, T_condition, B, X, A, M, Y, CE, nsamples=3, eps=0.)
_, pf_samples[f'ssm-lin-fold{fold}'], _, _, cond_samples[f'ssm-lin-fold{fold}'], prior_samples[f'ssm-lin-fold{fold}'] = tuple([pt_numpy(k) for k in (nelbo, pf_nelbo, p_z_mu, q_z_mu, inp_x_post, inp_x)])
data_cond= pt_numpy(X[:,:T_condition+T_forward,:])
obs_cond = pt_numpy(M[:,:T_condition+T_forward,:])
a_cond = pt_numpy(A[:,:T_condition+T_forward,:])
data_cond[obs_cond==0] = np.nan
data_prior= pt_numpy(X[:,:T_forward,:])
obs_prior = pt_numpy(M[:,:T_forward,:])
a_prior = pt_numpy(A[:,:T_forward,:])
# print(data_prior[:,0,:].shape)
# print(prior_samples[0.][:,0,:].shape)
# assert data_prior[:,0,:].shape == prior_samples[0.][:,0,:].shape
data_prior[obs_prior==0] = np.nan
# -
X_names_orig = mmdata[fold]['train']['feature_names_x']
A_names = mmdata[fold]['train']['feature_names_a']
B_names = mmdata[fold]['train']['feature_names']
all_names = np.concatenate([X_names_orig, A_names, B_names],0)
print (all_names.shape, X_names_orig.shape)
X_names = [s.replace('_', ' ') for s in X_names_orig]
print(B_names)
print(X_names)
print(A_names)
print(all_names)
# +
def plot_features_alt(plotlist, data, treat, X_names, A_names, group = 'serum', nplots = 3, xpush=1,ypush=1):
assert group in ['serum','cbc'],'bad group'
if group == 'cbc':
if nplots == 1:
fig, axgrid = plt.subplots(nplots,5,figsize=(20,5))
axgrid = axgrid.reshape(1, 5)
else:
fig, axgrid = plt.subplots(nplots,5,figsize=(20,10))
group_idx = [3, 4, 6, 9, 7]
elif group == 'serum':
if nplots == 1:
fig, axgrid = plt.subplots(nplots,2,figsize=(9,5.5))
axgrid = axgrid.reshape(1, 2)
else:
fig, axgrid = plt.subplots(nplots,5,figsize=(20,10))
# group_idx = [12, 13, 14, 15, 7]
group_idx = [13,15]
else:
raise ValueError('bad setting for group')
formatting = {}
formatting[0] = ('--x','r')
formatting[1] = ('--o','k')
formatting[2] = ('--v','g')
formatting[3] = ('--^','b')
formatting[4] = ('--.','m')
A_markers = {}
A_markers['Bor'] = '>'
A_markers['Car'] = '<'
A_markers['Cyc'] = '^'
A_markers['Len'] = 'v'
A_markers['Dex'] = 'o'
axlist = axgrid.ravel()
from matplotlib.legend import Legend
lines = []; labels = []
for j, ax in enumerate(axlist):
ctr= group_idx[j]
x = np.arange(data.shape[1])
ax.scatter(x, data[0][:,ctr].astype('float'), s=64, label='Data')
for pltidx, (name, vals) in enumerate(plotlist):
# x = np.arange(vals.shape[1])
ax.plot(x, vals[0][:,ctr], formatting[pltidx][0], label=name, color=formatting[pltidx][1], alpha = 0.5, markersize=8)
# if i==0:
ax.set_title(X_names[ctr], fontsize=25)
vmin, vmax = scaled_healthy_min_max[X_names_orig[ctr]]
ax.axhline(y=vmax, color='darkgreen', linestyle='--', alpha=0.6)
ax.axhline(y=vmin, color='darkgrey', linestyle='--', alpha=0.6)
ax.xaxis.set_ticks(np.arange(0, data.shape[1], 2))
ymax = ax.get_ylim()[1]+0.05
# Plot treatments
# import pdb; pdb.set_trace()
treat_i = treat[0]
for tidx in range(treat_i.shape[1]-1):
if A_names[tidx]=='local_clock' or 'line' in A_names[tidx]:
continue
tlist_x = []
tlist_y = []
for t in range(treat_i.shape[0]):
if treat_i[t, tidx] ==1:
tlist_x.append(t)
tlist_y.append(ymax+0.34*tidx)
l = ax.scatter(tlist_x, tlist_y, marker=A_markers[A_names[tidx]])
if j == 1:
lines.append(l); labels.append(A_names[tidx])
lot1list_x = []; lot1list_y = []
lot2list_x = []; lot2list_y = []
lot3list_x = []; lot3list_y = []
for t in range(treat_i.shape[0]):
line = np.where(treat_i[t][-3:] == 1.)[0]+1
if line == 1:
lot1list_x.append(t); lot1list_y.append(ymax+0.1*treat_i.shape[1]-1)
elif line == 2:
lot2list_x.append(t); lot2list_y.append(ymax+0.1*treat_i.shape[1]-1)
elif line == 3:
lot3list_x.append(t); lot3list_y.append(ymax+0.1*treat_i.shape[1]-1)
# ax.fill_between(tlist_x, np.array(tlist_y)+4, np.array(tlist_y)+6, color='darkred', alpha=0.7)
if j == 1:
if len(lot3list_x) != 0:
l = ax.fill_between(lot3list_x, np.array(lot3list_y)+.7,np.array(lot3list_y)+1., color='darkgreen', alpha=0.7)
lines.insert(0,l); labels.insert(0,'Line 3')
if len(lot2list_x) != 0:
l = ax.fill_between(lot2list_x, np.array(lot2list_y)+.7,np.array(lot2list_y)+1., color='darkblue', alpha=0.7)
lines.insert(0,l); labels.insert(0,'Line 2')
if len(lot1list_x) != 0:
l = ax.fill_between(lot1list_x, np.array(lot1list_y)+.7,np.array(lot1list_y)+1., color='darkred', alpha=0.7)
lines.insert(0,l); labels.insert(0,'Line 1')
ylims = ax.get_ylim()
if j==0:
ax.legend(loc='best',bbox_to_anchor=(-.3,.3),
fancybox=True, shadow=True, ncol=1, fontsize = 25)
if j==0 or j == 1:
ax.set_xlabel('Time (per 2 months)', fontsize=25)
leg = Legend(axlist[0], lines, labels,
loc='best', frameon=True, fancybox = True, shadow=True, fontsize=25, bbox_to_anchor=(-.33,1.09))
axlist[0].add_artist(leg);
plt.subplots_adjust(top=0.9, wspace=0.25)
# plt.xlabel('Time (per 2 months)', fontsize=25)
return fig
# +
nplots = 1
setup = 'cond'
idx = 1
idxlist = np.arange(prior_samples[f'ssm-att-fold{fold}'].shape[0])+idx
if setup == 'forward':
l = [('Linear', prior_samples[f'ssm-lin-fold{fold}'][idx:])]
l += [(f'PK-PD', prior_samples[f'ssm-att-fold{fold}'][idx:])]
fig = plot_features_alt(l, data_prior[idx:], a_prior[idx:], X_names, A_names, group='serum', nplots = nplots, xpush=6.9, ypush=2)
fig = plot_features_alt(l, data_prior[idx:], a_prior[idx:], X_names, A_names, group='cbc', nplots = nplots, xpush=6.9, ypush=2)
else:
l = [('Linear', cond_samples[f'ssm-lin-fold{fold}'][idx:])]
l += [(f'PK-PD', cond_samples[f'ssm-att-fold{fold}'][idx:])]
fig = plot_features_alt(l, data_cond[idx:], a_cond[idx:], X_names, A_names, group='serum', nplots = nplots, xpush=8)
# fig = plot_features_alt(l, data_cond[idx:], a_cond[idx:], X_names, A_names, group='cbc', nplots = nplots, xpush=8)
# fig.suptitle('Lab for T = %d conditioned on patient %s baseline data'%(T_forward, ','.join([str(k) for k in idxlist[:nplots]])), fontsize=20)
# fig.suptitle('Lab for T = %d conditioned on patient baseline data'%(T_forward), fontsize=20)
fig.savefig('./plots/fold3_idx5_main_cond10_forw10_2markers.pdf',bbox_inches='tight')
# -
# ## Latent State Plots
# +
data, data_loader = models[f'ssm-att-fold{fold}'].load_helper('test', device=device)
X_names_orig = mmdata[fold]['train']['feature_names_x']
A_names = mmdata[fold]['train']['feature_names_a']
B_names = mmdata[fold]['train']['feature_names']
all_names = np.concatenate([X_names_orig, A_names, B_names],0)
print (all_names.shape, X_names_orig.shape)
X_names = [s.replace('_', ' ') for s in X_names_orig]
print(B_names)
print(X_names)
print(A_names)
print(all_names)
bins = np.array([10.,12.,19.])
time = [0, 4, 9, 14, 18,19, 24, 29]
EI = ModelIntrospector(models[f'ssm-att-fold{fold}'], data_loader, [X_names, A_names, B_names])
result_CEs, (y1,y2,y3) = EI.get_latent_tsne(bins, time=time, verbose=True)
(B, X, A, M, Y, CE) = data_loader.dataset.tensors
_, _, lens = get_masks(M)
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
Bn = pt_numpy(B); Xn = pt_numpy(X); Mn = pt_numpy(M); CEn = pt_numpy(CE); An = pt_numpy(A)
Bn_obs = Bn[np.where(CEn==1)[0]]; Xn_obs = Xn[np.where(CEn==1)[0]]; Mn_obs = Mn[np.where(CEn==1)[0]]
An_obs = An[np.where(CEn==1)[0]]
a1 = np.where(An_obs[:,18,-3])[0]
a2 = np.where(An_obs[:,18,-2])[0]
a3 = np.where(An_obs[:,18,-1])[0]
a4 = np.where((An_obs[:,18,-1]+An_obs[:,18,-2]+An_obs[:,18,-3]) == 0.)[0]
for i in range(len(X_names)):
print(f'stats for {X_names[i]}')
print(f'no treatment average: {np.mean(Xn_obs[a4,18,i])}')
print(f'line 3 trt average: {np.mean(Xn_obs[a3,18,i])}')
print(f'line 2 trt average: {np.mean(Xn_obs[a2,18,i])}')
print(f'line 1 trt average: {np.mean(Xn_obs[a1,18,i])}')
a1g3 = np.array([116,110,91,61,63,64,66,60,87])
Yn = pt_numpy(Y)
Yn_obs = Yn[np.where(CEn==1)[0]]
Yidx_obs = np.where(Yn_obs==1)[1]
fig, axlist = plt.subplots(2,4,figsize=(18,9))
ax = axlist.ravel()
for i,t in enumerate(time):
result_CE = result_CEs[i]
bor = np.where(An_obs[:,t,1])[0]
car = np.where(An_obs[:,t,2])[0]
cyc = np.where(An_obs[:,t,3])[0]
dex = np.where(An_obs[:,t,4])[0]
lena = np.where(An_obs[:,t,5])[0]
a1 = np.where(An_obs[:,t,-3])[0]
a2 = np.where(An_obs[:,t,-2])[0]
a3 = np.where(An_obs[:,t,-1])[0]
a4 = np.where((An_obs[:,t,-1]+An_obs[:,t,-2]+An_obs[:,t,-3]) == 0.)[0]
a1g1= np.where(An_obs[:,t,-4]+An_obs[:,t,-3] == 2.)[0]
a1g2= np.where(An_obs[:,t,-4] == 0.)[0]
ax[i].set_title(f'T = {t}', fontsize=25)
if t == 29:
# ax[i].scatter(result_CE[a1g1,0],result_CE[a1g1,1], label = 'LEN (line1)', s=36)
# ax[i].scatter(result_CE[a1g2,0],result_CE[a1g2,1], label = 'no LEN (line1)', s=36)
ax[i].scatter(result_CE[a1,0],result_CE[a1,1], label = 'line1', s=36)
ax[i].scatter(result_CE[a2,0],result_CE[a2,1], label = 'line2', s=36)
ax[i].scatter(result_CE[a3,0],result_CE[a3,1], label = 'line3plus', s=36)
ax[i].scatter(result_CE[a4,0],result_CE[a4,1], label = 'noRx', s=36)
ax[i].legend(loc='lower right', fontsize=15)
else:
# ax[i].scatter(result_CE[a1g1,0],result_CE[a1g1,1], s=36)
# ax[i].scatter(result_CE[a1g2,0],result_CE[a1g2,1], s=36)
ax[i].scatter(result_CE[a1,0],result_CE[a1,1], s=36)
ax[i].scatter(result_CE[a2,0],result_CE[a2,1], s=36)
ax[i].scatter(result_CE[a3,0],result_CE[a3,1], s=36)
ax[i].scatter(result_CE[a4,0],result_CE[a4,1], s=36)
# plt.savefig('./plots/latent_tsne_len_time.pdf',bbox_inches='tight')
# +
fig, axlist = plt.subplots(1,2,figsize=(10,5.3))
ax = axlist.ravel()
times = [4,18]
idxs = [1,4]
for i,t in enumerate(times):
result_CE = result_CEs[idxs[i]]
a1 = np.where(An_obs[:,t,-3])[0]
a2 = np.where(An_obs[:,t,-2])[0]
a3 = np.where(An_obs[:,t,-1])[0]
a4 = np.where((An_obs[:,t,-1]+An_obs[:,t,-2]+An_obs[:,t,-3]) == 0.)[0]
a1g1= np.where(An_obs[:,t,-4]+An_obs[:,t,-3] == 2.)[0]
a1g2= np.where(An_obs[:,t,-4] == 0.)[0]
if i == 0:
ax[i].set_title(f'8 Months', fontsize=25)
if i == 1:
ax[i].set_title(f'36 Months', fontsize=25)
if i == 1:
ax[i].scatter(result_CE[a1,0],result_CE[a1,1], label = 'line1', s=36)
ax[i].scatter(result_CE[a2,0],result_CE[a2,1], label = 'line2', s=36)
ax[i].scatter(result_CE[a3,0],result_CE[a3,1], label = 'line3plus', s=36)
ax[i].scatter(result_CE[a4,0],result_CE[a4,1], label = 'noRx', s=36)
ax[i].legend(loc='upper left', bbox_to_anchor=(-1.2,1.), fontsize=15)
else:
ax[i].scatter(result_CE[a1,0],result_CE[a1,1], s=36)
ax[i].scatter(result_CE[a2,0],result_CE[a2,1], s=36)
ax[i].scatter(result_CE[a3,0],result_CE[a3,1], s=36)
ax[i].scatter(result_CE[a4,0],result_CE[a4,1], s=36)
# plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# plt.savefig('./plots/latent_tsne_2time.pdf',bbox_inches='tight')
# -
| examples/model_analyses_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
groomer_info = { 'pets':
[{'Ginger': [{
'age': 5,
'type': 'Pitbull',
'attribute': ['Playful','Cheery','Hyper']
}],
'Chloe': [{
'age': 1,
'type': 'Maine Coon Cat',
'attribute': ['Alert','Independent','Playful']
}],
'<NAME>': [{
'age': 8,
'type': 'Parrot',
'attribute': ['Loud','Opinionated','Annoying']
}],
'<NAME>': [{
'age': 2,
'type': 'Labrador',
'attributes': ['never','gives','you','up']
}]}]
}
for pet_name in groomer_info:
print((groomer_info["pets"][0][pet_name][0]['type']))
| Phase_1/Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp monte_carlo_eth2
# -
# # Monte Carlo on ETH2 Scenarios
#
#export
from nbdev.showdoc import *
import jovsatools
import fastcore
import numpy as np
from collections import Counter, defaultdict
from tqdm import tqdm
import seaborn as sns
from typing import Optional, List
import pandas as pd
import matplotlib.pyplot as plt
# ### Setup Code
# +
class NumpyDist:
"""Numpy Random Sampling Distribution
list of distributions: https://numpy.org/doc/1.16/reference/routines.random.html
"""
def __init__(self, dist, params:dict):
self.dist = dist
self.params = params
self.dry_run() # basic check
def dry_run(self):
"""Ensures that dist and params can be called correctly. """
try:
self.__call__()
except Exception as e:
raise e
def __call__(self):
return self.dist(**self.params)
class MonteCarlo:
def __init__(self, input_dists, seed):
assert isinstance(input_dists, dict)
self.input_dists = input_dists
self.seed = np.random.seed(seed)
def _get_sample(self):
_inputs = dict()
for name, dist in self.input_dists.items():
_inputs[name] = dist()
return _inputs
def run(self, iters:int, constants:Optional[dict]=None):
stats = defaultdict(list)
for i in tqdm(range(iters)):
# setup
_sample = self._get_sample()
staking_return = _sample['staking_return_pct']
voo_return = 1+_sample['voo_returns']
total_staked = constants['curr_eth_price'] * constants['total_eth_staked']
gross_returns = staking_return * total_staked
tax_due = constants['tax_rate'] * gross_returns
sampled_eth_value = _sample['eth_price'] * constants['total_eth_staked']
# consts
tot_costs = constants['aws_cost'] + tax_due
# returns
net_returns = gross_returns - tot_costs
# opportunity costs
eth_opportunity_cost = sampled_eth_value - total_staked
usd_opportunity_cost = (voo_return*total_staked) - total_staked
# logging calculations
stats['staking_return'].append(staking_return)
stats['voo_return'].append(voo_return)
stats['gross_returns'].append(gross_returns)
stats['total_staked'].append(total_staked)
stats['sampled_eth_price'].append(_sample['eth_price'])
stats['sampled_eth_value'].append(sampled_eth_value)
stats['net_returns'].append(net_returns)
stats['tax_due'].append(tax_due)
stats['tot_costs'].append(tot_costs)
# note: only inlcude opportunity cost if > 0
stats['ETH_opportunity_cost'].append(max(0, eth_opportunity_cost))
stats['USD_opportunity_cost'].append(max(0, usd_opportunity_cost))
stats['total_opportunity_cost'].append(max(0, eth_opportunity_cost, usd_opportunity_cost))
return pd.DataFrame.from_dict(stats)
# -
# ### ETH Specific Values
# +
# per year estimates
input_dists = {
# based on online probability from staking calculator [1]
# online probability = [left=80%, mode=85%, right=99%]
'staking_return_pct': NumpyDist(dist=np.random.triangular, params={"left":0.0466, "mode":0.056, "right":0.0857}),
# price returns: triangular
'eth_price': NumpyDist(dist=np.random.triangular, params={"left":1000, "mode":2000, "right":5000}),
# In the last 10 years, the Vanguard S&P 500 (VOO) ETF obtained a 13.38% compound annual return, with a 13.45% standard deviation [3]
'voo_returns': NumpyDist(dist=np.random.normal, params={"loc":0.1338, "scale":0.1345}),
}
constants = {
# c5.xlarge costs $0.17 per Hour [2]
'aws_cost': 1489.2, #=0.17*24*365
'tax_rate': 0.35,
'curr_eth_price': 1700,
'total_eth_staked': 32
}
mc = MonteCarlo(input_dists=input_dists, seed=4096)
stats = mc.run(iters=1_000_000, constants=constants)
# -
# ### Analyzing Results
# +
COLS_TO_PLOT = ['gross_returns', 'net_returns', 'tax_due',
'tot_costs', 'ETH_opportunity_cost',
'USD_opportunity_cost', 'total_opportunity_cost']
stats[COLS_TO_PLOT].plot.box()
plt.rcParams['figure.figsize'] = [30, 10]
# -
stats.describe()
# ### References
# * [1] [staking calculator](https://beaconcha.in/calculator)
# * [2] [AWS pricing](https://aws.amazon.com/ec2/pricing/on-demand/)
# * [3] [VOO - S&P returns](http://www.lazyportfolioetf.com/etf/vanguard-sp-500-voo/#:~:text=In%20the%20last%2010%20years,granted%20a%201.81%25%20dividend%20yield.)
| notebooks/monte_carlo_eth2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fishing Effort
# Total fishing effort within 'national waters' and capture production by year. Most figures represent activity within exclusive economic zones; figures were also calculated for joint regimes and disputed areas, which are both held separate from single-sovereign waters.
#
# Note that this is a parameterized widget; the specification passed to the API will not be renderable without the geostore identifier being inserted.
#
# _Author: <NAME>_
# _Created: 28 Sep 2021_
# _Environment: jupyterlab_
# ## Style
# - Vega chart
# - Ocean Watch country page - parameterized chart
# - Time series
# - Line chart
# ## Data
# Underlying dataset: com.030d Fishing Effort by Zone
# Widget data: [com_030d](https://resourcewatch.carto.com/u/wri-rw/dataset/com_030d_fishing_effort_by_zone)
# ## Preparation
import json
from vega import Vega
from IPython.display import display
def Vega(spec):
bundle = {}
bundle['application/vnd.vega.v5+json'] = spec
display(bundle, raw=True)
# + tags=[]
widget_width = 400
widget_height = 300
# -
# # Country Widget
# For the moment, we will ignore all areas besides uncontested exclusive economic zones
# ## Demo Query
# `geostore_prod` identifier corresponds to Angola
# Fishing Effort
# ```sql
# SELECT mrgid, geoname, pol_type, gadm.gid_0, year, value FROM (
# SELECT *,
# CASE WHEN iso_ter1 IS NULL THEN iso_sov1 ELSE iso_ter1 END AS gid_0
# FROM com_030d_fishing_effort_by_zone) AS data
# LEFT OUTER JOIN gadm36_0 AS gadm ON data.gid_0 = gadm.gid_0
# WHERE pol_type = '200NM'
# AND year <= 2020
# AND gadm.geostore_prod='c0e30c8d35f81d8d19e2c0f5dd7e0798'
# ORDER BY data.year ASC
# ```
# Capture production
# ```sql
# SELECT gid_0, year, SUM(value) AS capture
# FROM foo_062_rw0_fishery_production_edit
# INNER JOIN gadm36_0 gadm on gadm.gid_0 = iso3_code
# WHERE type = 'Capture_quantity'
# AND measure = 'Q_tlw' AND geostore_prod='c0e30c8d35f81d8d19e2c0f5dd7e0798'
# GROUP BY gid_0, year ORDER BY year ASC
# ```
# ## Parameterized Query
# Fishing effort
# ```sql
# SELECT pol_type, gadm.gid_0, year, SUM(value::NUMERIC) AS value FROM (
# SELECT *,
# CASE WHEN iso_ter1 IS NULL THEN iso_sov1 ELSE iso_ter1 END AS gid_0
# FROM com_030d_fishing_effort_by_zone) AS data
# LEFT OUTER JOIN gadm36_0 AS gadm ON data.gid_0 = gadm.gid_0
# WHERE pol_type = '200NM'
# AND year <= 2020
# AND gadm.{{geostore_env}}='{{geostore_id}}'
# GROUP BY pol_type, gadm.gid_0, year
# ORDER BY data.year ASC
# ```
# Capture production
# ```sql
# SELECT gid_0, year, SUM(value) AS capture
# FROM foo_062_rw0_fishery_production_edit
# INNER JOIN gadm36_0 gadm on gadm.gid_0 = iso3_code
# WHERE type = 'Capture_quantity' AND measure = 'Q_tlw'
# AND {{geostore_env}}='{{geostore_id}}'
# GROUP BY gid_0, year ORDER BY year ASC
# ```
# ## Specification
# Vega code presumes RW-style `config` element present
spec=json.loads("""
{
"schema": "https://vega.github.io/schema/vega/v5.json",
"description": "Fishing effort per year, by zone",
"width": 400,
"height": 250,
"padding": 0,
"autosize": {"type": "fit", "contains": "padding"},
"signals": [{"name": "date_min", "update": "data('effort_table')[0]['year']"}],
"data": [
{
"name": "effort_table",
"url": "https://wri-rw.carto.com/api/v2/sql?q=SELECT pol_type, gadm.gid_0, year, SUM(value::NUMERIC) AS effort FROM (SELECT *, CASE WHEN iso_ter1 IS NULL THEN iso_sov1 ELSE iso_ter1 END AS gid_0 FROM com_030d_fishing_effort_by_zone) AS data LEFT OUTER JOIN gadm36_0 AS gadm ON data.gid_0 = gadm.gid_0 WHERE pol_type = '200NM' AND year <= 2020 AND gadm.gid_0='BRA' GROUP BY pol_type, gadm.gid_0, year ORDER BY data.year ASC",
"format": {
"type": "json",
"property": "rows",
"parse": {"year": "utc:'%Y'"}
},
"transform": [{
"type": "formula",
"expr": "utcyear(datum.year)",
"as": "time"
}]
},
{
"name": "capture_table",
"url": "https://wri-rw.carto.com/api/v2/sql?q=SELECT gid_0, year, SUM(value) AS capture FROM foo_062_rw0_fishery_production_edit INNER JOIN gadm36_0 gadm on gadm.gid_0 = iso3_code WHERE type = 'Capture_quantity' AND measure = 'Q_tlw' AND gid_0 = 'BRA' GROUP BY gid_0, year ORDER BY year ASC",
"format": {
"type": "json",
"property": "rows",
"parse": {"year": "utc:'%Y'"}
},
"transform": [{"type": "filter", "expr": "datum.year >= date_min"},{
"type": "formula",
"expr": "utcyear(datum.year)",
"as": "time"
}]
}
],
"scales": [
{
"name": "datescale",
"type": "time",
"domain": {
"fields": [
{"data": "effort_table", "field": "year"},
{"data": "capture_table", "field": "year"}
]
},
"range": "width"
},
{
"name": "yscale_left",
"type": "linear",
"domain": {"data": "effort_table", "fields": ["effort"]},
"range": "height",
"padding": 0.1
},
{
"name": "yscale_right",
"type": "linear",
"domain": {"data": "capture_table", "fields": ["capture"]},
"range": "height",
"padding": 0.1
},
{
"name": "colors",
"type": "ordinal",
"domain": ["Fishing effort", "Capture production"],
"range": ["#E9573F", "#8CC152"]
}
],
"axes": [
{
"orient": "bottom",
"scale": "datescale",
"domain": true,
"labelFlush": true,
"labelOverlap": true,
"labelBaseline": "middle",
"ticks": true,
"grid": false,
"titleFont": "Lato",
"labelPadding": 7
},
{
"orient": "left",
"scale": "yscale_left",
"domain": true,
"labelBaseline": "middle",
"labelAlign": "right",
"labelPadding": 5,
"grid": false,
"gridOpacity": 0.2,
"format": "s",
"title": "Annual fishing effort (hours)"
},
{
"orient": "right",
"scale": "yscale_right",
"domain": true,
"labelBaseline": "middle",
"labelAlign": "left",
"labelPadding": 5,
"grid": true,
"gridOpacity": 0.2,
"format": "s",
"title": "Annual capture production (tons)"
}
],
"marks": [
{
"name": "effort",
"type": "line",
"from": {"data": "effort_table"},
"interactive": false,
"encode": {
"enter": {
"x": {"scale": "datescale", "field": "year"},
"y": {"scale": "yscale_left", "field": "effort"},
"stroke": {"value": "#E9573F"},
"strokeWidth": {"value": 2.5},
"clip": {"value": true},
"interpolate": {"value": "linear"}
},
"update": {"strokeOpacity": {"value": 1}},
"hover": {"strokeOpacity": {"value": 0.5}}
}
},
{
"name": "effort_points",
"type": "symbol",
"from": {"data": "effort_table"},
"encode": {
"enter": {
"x": {"scale": "datescale", "field": "year"},
"y": {"scale": "yscale_left", "field": "effort"},
"size": {"signal": "(width*height)/1000"},
"stroke": {"value": "white"},
"strokeOpacity": {"value": 1},
"fillOpacity": {"value": 1},
"strokeWidth": {"value": 2},
"clip": {"value": true},
"shape": {"value": "circle"}
},
"update": {"fill": {"value": "#E9573F"}},
"hover": {"fill": {"value": "#666666"}}
}
},
{
"name": "capture",
"type": "line",
"from": {"data": "capture_table"},
"interactive": false,
"encode": {
"enter": {
"x": {"scale": "datescale", "field": "year"},
"y": {"scale": "yscale_right", "field": "capture"},
"stroke": {"value": "#8CC152"},
"strokeWidth": {"value": 2.5},
"clip": {"value": true},
"interpolate": {"value": "linear"}
},
"update": {"strokeOpacity": {"value": 1}},
"hover": {"strokeOpacity": {"value": 0.5}}
}
},
{
"type": "symbol",
"from": {"data": "capture_table"},
"encode": {
"enter": {
"x": {"scale": "datescale", "field": "year"},
"y": {"scale": "yscale_right", "field": "capture"},
"size": {"signal": "(width*height)/1000"},
"stroke": {"value": "white"},
"strokeOpacity": {"value": 1},
"fillOpacity": {"value": 1},
"strokeWidth": {"value": 2},
"clip": {"value": true},
"shape": {"value": "circle"}
},
"update": {"fill": {"value": "#8CC152"}},
"hover": {"fill": {"value": "#666666"}}
}
}
],
"legends": [
{
"titleFont": "Lato",
"labelFont": "Lato",
"fill": "colors",
"symbolOpacity": 0.7,
"direction": "horizontal",
"orient": "bottom"
}
],
"interaction_config": [
{
"name": "tooltip",
"config": {
"fields": [
{
"column": "time",
"property": "Year",
"type": "string",
"format": ""
},
{
"column": "effort",
"property": "Fishing effort (hours)",
"type": "number",
"format": ",.0f"
},
{
"column": "capture",
"property": "Capture production (tons)",
"type": "number",
"format": ",.0f"
}
]
}
}
]
}
""")
vega_view=dict(spec)
vega_view['width']=widget_width
vega_view['height']=widget_height
Vega(vega_view)
# # Global Widget
# ## Queries
# **Fishing Effort**
# Includes all zones (with collected data), but not international waters
# ```sql
# SELECT year, SUM(value::NUMERIC) AS total_value
# FROM com_030d_fishing_effort_by_zone AS data
# WHERE year <= 2020
# GROUP BY year
# ORDER BY year ASC
# ```
# **Capture Production**
# ```sql
# SELECT year, SUM(value) AS capture
# FROM foo_062_rw0_fishery_production_edit
# INNER JOIN gadm36_0 gadm on gadm.gid_0 = iso3_code
# WHERE type = 'Capture_quantity' AND measure = 'Q_tlw'
# GROUP BY year ORDER BY year ASC
# ## Specification
# Vega code presumes RW-style `config` element present
spec=json.loads("""
{
"schema": "https://vega.github.io/schema/vega/v5.json",
"description": "Fishing effort per year, by zone",
"width": 400,
"height": 250,
"padding": 0,
"autosize": {"type": "fit", "contains": "padding"},
"signals": [{"name": "date_min", "update": "data('effort_table')[0]['year']"}],
"data": [
{
"name": "effort_table",
"url": "https://wri-rw.carto.com/api/v2/sql?q=SELECT year, SUM(value::NUMERIC) AS effort FROM com_030d_fishing_effort_by_zone AS data WHERE year <= 2020 GROUP BY year ORDER BY year ASC",
"format": {
"type": "json",
"property": "rows",
"parse": {
"year": "utc:'%Y'"
}
},
"transform": [{
"type": "formula",
"expr": "utcyear(datum.year)",
"as": "time"
}]
},
{
"name": "capture_table",
"url": "https://wri-rw.carto.com/api/v2/sql?q=SELECT year, SUM(value) AS capture FROM foo_062_rw0_fishery_production_edit INNER JOIN gadm36_0 gadm on gadm.gid_0 = iso3_code WHERE type = 'Capture_quantity' AND measure = 'Q_tlw' GROUP BY year ORDER BY year ASC",
"format": {
"type": "json",
"property": "rows",
"parse": {"year": "utc:'%Y'"}
},
"transform": [{"type": "filter", "expr": "datum.year >= date_min"},
{
"type": "formula",
"expr": "utcyear(datum.year)",
"as": "time"
}]
}
],
"scales": [
{
"name": "datescale",
"type": "time",
"domain": {
"fields": [
{"data": "effort_table", "field": "year"},
{"data": "capture_table", "field": "year"}
]
},
"range": "width"
},
{
"name": "yscale_left",
"type": "linear",
"domain": {"data": "effort_table", "fields": ["effort"]},
"range": "height",
"padding": 0.1
},
{
"name": "yscale_right",
"type": "linear",
"domain": {"data": "capture_table", "fields": ["capture"]},
"range": "height",
"padding": 0.1
},
{
"name": "colors",
"type": "ordinal",
"domain": ["Fishing effort", "Capture production"],
"range": ["#FF794E", "#74D723"]
}
],
"axes": [
{
"orient": "bottom",
"scale": "datescale",
"domain": true,
"labelFlush": true,
"labelOverlap": true,
"labelBaseline": "middle",
"ticks": true,
"grid": false,
"titleFont": "Lato",
"labelPadding": 7
},
{
"orient": "left",
"scale": "yscale_left",
"domain": true,
"labelBaseline": "middle",
"labelAlign": "right",
"labelPadding": 5,
"grid": false,
"gridOpacity": 0.2,
"format": "s",
"title": "Annual fishing effort (hours)"
},
{
"orient": "right",
"scale": "yscale_right",
"domain": true,
"labelBaseline": "middle",
"labelAlign": "left",
"labelPadding": 5,
"grid": true,
"gridOpacity": 0.2,
"format": "s",
"title": "Annual capture production (tons)"
}
],
"marks": [
{
"name": "effort",
"type": "line",
"from": {"data": "effort_table"},
"interactive": false,
"encode": {
"enter": {
"x": {"scale": "datescale", "field": "year"},
"y": {"scale": "yscale_left", "field": "effort"},
"stroke": {"value": "#FF794E"},
"strokeWidth": {"value": 2.5},
"clip": {"value": true},
"interpolate": {"value": "linear"}
},
"update": {"strokeOpacity": {"value": 1}},
"hover": {"strokeOpacity": {"value": 0.5}}
}
},
{
"name": "effort_points",
"type": "symbol",
"from": {"data": "effort_table"},
"encode": {
"enter": {
"x": {"scale": "datescale", "field": "year"},
"y": {"scale": "yscale_left", "field": "effort"},
"size": {"signal": "(width*height)/1000"},
"stroke": {"value": "white"},
"strokeOpacity": {"value": 1},
"fillOpacity": {"value": 1},
"strokeWidth": {"value": 2},
"clip": {"value": true},
"shape": {"value": "circle"}
},
"update": {"fill": {"value": "#FF794E"}},
"hover": {"fill": {"value": "#666666"}}
}
},
{
"name": "capture",
"type": "line",
"from": {"data": "capture_table"},
"interactive": false,
"encode": {
"enter": {
"x": {"scale": "datescale", "field": "year"},
"y": {"scale": "yscale_right", "field": "capture"},
"stroke": {"value": "#74D723"},
"strokeWidth": {"value": 2.5},
"clip": {"value": true},
"interpolate": {"value": "linear"}
},
"update": {"strokeOpacity": {"value": 1}},
"hover": {"strokeOpacity": {"value": 0.5}}
}
},
{
"type": "symbol",
"from": {"data": "capture_table"},
"encode": {
"enter": {
"x": {"scale": "datescale", "field": "year"},
"y": {"scale": "yscale_right", "field": "capture"},
"size": {"signal": "(width*height)/1000"},
"stroke": {"value": "white"},
"strokeOpacity": {"value": 1},
"fillOpacity": {"value": 1},
"strokeWidth": {"value": 2},
"clip": {"value": true},
"shape": {"value": "circle"}
},
"update": {"fill": {"value": "#74D723"}},
"hover": {"fill": {"value": "#666666"}}
}
}
],
"legends": [
{
"titleFont": "Lato",
"labelFont": "Lato",
"fill": "colors",
"symbolOpacity": 0.7,
"direction": "horizontal",
"orient": "bottom"
}
],
"interaction_config": [
{
"name": "tooltip",
"config": {
"fields": [
{
"column": "time",
"property": "Year",
"type": "string",
"format": ""
},
{
"column": "effort",
"property": "Fishing effort (hours)",
"type": "number",
"format": ",.0f"
},
{
"column": "capture",
"property": "Capture production (tons)",
"type": "number",
"format": ",.0f"
}
]
}
}
]
}
""")
vega_view=dict(spec)
vega_view['width']=widget_width
vega_view['height']=widget_height
Vega(vega_view)
# # RW API
# Parameterized widgets created via [widget scripts](https://github.com/resource-watch/data-team-tools/tree/master/advanced_widget_writer).
# [com.030d.rw0 Fishing Effort by Zone](https://resourcewatch.org/data/explore/49b76e0f-3aad-4138-b272-87a10748f2df) ([Admin](https://resourcewatch.org/admin/data/datasets/49b76e0f-3aad-4138-b272-87a10748f2dfedit))
# `49b76e0f-3aad-4138-b272-87a10748f2df`
# [Dataset](https://api.resourcewatch.org/v1/dataset/49b76e0f-3aad-4138-b272-87a10748f2df/), [Widgets](https://api.resourcewatch.org/v1/dataset/49b76e0f-3aad-4138-b272-87a10748f2df/widget)
dataset_id = '49b76e0f-3aad-4138-b272-87a10748f2df'
name = 'Annual Fishing Effort'
description = 'Fishing effort per year, by zone'
# - [Country](https://api.resourcewatch.org/v1/widget/6f974b0f-66cf-416b-a5a9-d1012856e4c8?env=production) `6f974b0f-66cf-416b-a5a9-d1012856e4c8`
# dataset_id = '49b76e0f-3aad-4138-b272-87a10748f2df'
# name = 'Annual Fishing Effort Excluding International Waters'
# description = 'Fishing effort per year, across all nationally administered waters'
# - [Global](https://api.resourcewatch.org/v1/widget/c029173d-a568-4f1c-abb5-3835458a91bb?env=production) `c029173d-a568-4f1c-abb5-3835458a91bb`
| widgets/com_030d_fishing_effort/fishing-effort_production_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="included-education"
# # Lecture 5:绘制正多边形
# + [markdown] id="adjacent-chapel"
# - **Name (姓名)**: [Write Your Name Here]
# - **Date (日期)**: [Write the Date on which you completed this assignment]
#
#
# - **Score (成绩)**:A+
# - **Comment By Teacher (评语)**: Super Good!
# + [markdown] id="periodic-register"
# ## Problem
#
# 1. 用已经学过的Turtle库内的绘图方法绘制正三角形、正4、5、6、7、... 边形
# 2. 绘制一个半径为50圆
# + [markdown] id="latest-contamination"
# ## Objective 学习目标
#
# - 讲解上次作业
# - 继续思考尽量不编写重复代码这一编程原则的意义,理解“循环”的意义,初步见识如何使用循环语句来避免编写重复的代码
# - 理解同一个方法名在接受不同的外部数据时会完成类似但不完全的功能。方法接受的不同的数据成为这个方法可以接受的**参数**。一个方法接受的参数的名称、次序和个数是由编写这个方法的人决定的,他人在使用这个方法时需要参考编写该方法的人所公开的关于该方法的详细介绍。
# - 继续使用已掌握的turtle库内的方法练习绘制略复杂的几何图形,思考如何在指定的位置绘制图形
# - 初步学习turtle库中的两个新方法: `goto`, `seth`,引导思考数轴、平面直角坐标系和极坐标系的思想
# - 复习正三角形以及正多边形的几何特点
# - 学习圆的简单性质
# + [markdown] id="animated-batch"
# ## Math
#
# - 组成一个正三角形或一个正多边形的每一条边的边长都相等。
# - 一个圆由一个中心位置和一个长度来确定,这个中心位置确定了圆的位置,这个长度可以用来确定圆的大小
# - 数轴是一个有方向和刻度的水平直线,每一个整数或者小数都可以在数轴上表示。在一个右侧朝向的数轴上,右侧的点表示的数比左侧的点表示的数大
# - 平面直角坐标系和极坐标系都是用一组(2个)按照事先设定的次序列出的数值的形式来表示二维平面中的某一个点的位置的坐标系统
# + [markdown] id="sorted-furniture"
# ## Python
# + id="overall-pixel"
from turtle import setup, reset, bye, pu, pd, bk, fd, left, right, st, ht, goto, seth
setup(800, 600, 0, 0)
reset()
# + id="sudden-lodging"
num_side = 10 # number of sides of a shape
length_side = 100 # length of each side.
angle = 360 / num_side
# + id="informative-offer"
# steps before drawing 绘制前的准备
reset()
pu()
goto(0, -150)
st()
pd()
# drawing one side using each two code lines.
# 每两行绘制一条边
fd(length_side)
left(angle)
fd(length_side)
left(angle)
fd(length_side)
left(angle)
fd(length_side)
left(angle)
fd(length_side)
left(angle)
fd(length_side)
left(angle)
fd(length_side)
left(angle)
fd(length_side)
left(angle)
fd(length_side)
left(angle)
fd(length_side)
left(angle)
# steps after drawing 完成绘制后
pu()
ht()
# + id="forced-visitor"
num_side = 100 # number of sides of a shape
length_side = 1000 / num_side # length of each side.
angle = 360 / num_side
# steps before drawing 绘制前的准备
reset()
pu()
goto(0, -150)
st()
pd()
# drawing sides using loop flow control
# 使用循环控制语句来绘制所有的边
num_side_drawn = 0
while num_side_drawn < num_side:
fd(length_side)
left(angle)
num_side_drawn = num_side_drawn + 1
# steps after drawing 完成绘制后
pu()
ht()
# + [markdown] id="ordered-devil"
# **思考**: 比较使用循环语句和不使用循环语句时,两段代码的相同与不同部分。进一步思考有没有可能继续改进以避免重复出现较多行相同的代码
# + id="qualified-suspension"
st() # show turtle (triangle) 显示小海龟
# + id="adequate-welsh"
# observe the turtle orientation when executing the following codes
# 执行该单元格代码观察绘图区海归方向的变化
seth(0)
# + id="united-cattle"
# observe the turtle orientation when executing the following codes
# 执行该单元格代码观察绘图区海归方向的变化
seth(90)
# + id="involved-chart"
# observe the turtle orientation when executing the following codes
# 执行该单元格代码观察绘图区海归方向的变化
seth(180)
# -
bye()
# + [markdown] id="digital-consensus"
# ## Exercise
#
# **注意:**
# - 使用整个星期完成下面的练习
# - 本讲所有编程练习都要求练习者自行事先导入如下代码提供的方法和绘图设置
# - 如果您复制粘贴下面的代码,请注意修正其中存在的一处错误再运行
#
#
# ```python
# from turtle import setup, reset, bye, pu, pd, bk, fd, left, right, st, ht,goto
# setup(500, 400, 0, 0)
# reset()
# ```
# + [markdown] id="agreed-movie"
# 1. 尝试按照下面列举的数据多次修改`setup(500, 400, 0, 0)`方法接受的外部数据,观察代码执行的效果有什么变化。你能总结出`setup`方法接收的每一个外部数据的意义吗。在下面的代码单元格中以注释的形式写上你的回答。
# - `setup(400, 500, 0, 0)`
# - `setup(800, 400, 0, 0)`
# - `setup(400, 800, 0, 0)`
# - `setup(500, 400, 200, 100)`
# - `setup(500, 400, 200, 0)`
# - `setup(500 ,400, 0, 200)`
# + code_folding=[] id="enhanced-closer"
# 在这里写上你的答案
# The first number 500 is the amount of units for the length of the paper. The second number 400 is the amount of units for the width of the paper.
# The third number 200 is for moving the paper 200 unit to the right. The forth number 100 is for moving the paper 100 unit down.
# So the last two numbers is for moving the paper to the exact position you want.
from turtle import setup, reset, bye, pu, pd, bk, fd, left, right, st, ht, goto
setup(500, 400, 200, 100)
reset()
# + [markdown] id="confirmed-dimension"
# 2. 根据你目前学过的内容,在下面的代码单元格中以注释的形式详细说明已经学过的下列方法分别来自哪一个方法(仓)库、每一个方法完成的功能、所接受的参数(外部数据)的个数以及每一个参数的在该方法中的作用,即当这个方法中的该参数发生变化时,这个方法执行后得到的效果有什么变化。
#
# 注:如果是默认的仓库,在仓库名列下写"default"。
# + code_folding=[] id="excess-distributor"
# 方法名:print
# 所在仓库:Default
# 方法完成的功能:向屏幕输出一段字符串
# 参数个数以及意义:1个参数,表示要输出的字符串
# 方法名: input
# 所在仓库:default
# 方法完成的功能:The string is for the thing you want to show on the screen and wait for the users to type on their keyboard.
# 参数个数以及意义:1 parameter, Show the user type on the computer.
# 方法名: type
# 所在仓库:default
# 方法完成的功能:This is for showing the type of the parameter.
# 参数个数以及意义:1 parameter, the parameter you want to know the type of.
# 方法名: int
# 所在仓库:default
# 方法完成的功能:this is used for changing the type of the parameter into a integer.
# 参数个数以及意义:1 parameter, changing the parameter into a integer.
# 方法名: random
# 所在仓库:random
# 方法完成的功能:This is for generating random decimal numbers between 0 to 1.
# 参数个数以及意义:0 parameter, for generating number between 0 to 1.
# 方法名: randint
# 所在仓库:random
# 方法完成的功能:this is used to generate a random integer.
# 参数个数以及意义:2 parameters, for generating random numbers between the parameters you type.
# 方法名: goto
# 所在仓库:turtle
# 方法完成的功能:goto is used to move the turtle pointer to the coordinate (x, y) that you want it to.
# 参数个数以及意义:2 parameters, coordinate (x, y) where you want to move the pointer to.
# 方法名: setup
# 所在仓库:turtle
# 方法完成的功能:to setup a paper on your screen so you can draw.
# 参数个数以及意义:4 parameters, the first two are length and width of paper; the last two are position of the paper on your screen.
# 方法名: seth
# 所在仓库:turtle
# 方法完成的功能:Is used to turn the turtle pointer instead of using the code left and right.
# 参数个数以及意义:1 parameter, the angle you want to turn the turtle.
# + [markdown] id="educated-defendant"
# 3. 使用`turtle`绘图库中已经学过的方法(可以不使用`goto`和`seth`),在一个或多个代码单元格中编写任意行数的代码,绘制出按下面排列的一组(5个)正方形。
# <img src = "figures/L005B_E03_square_combinations.png" width = 500>
#
# 具体要求:
# - 最中间的正方形位于画布(绘图区)的正中央
# - 每一个正方形的边长为80
# - 你的代码可以使用也可以不使用`while`循环语句
# - 如果你没有使用`while`循环语句,请给你所写的每一行代码进行注释,解释每一行代码的作用。
# - 如果你使用了`while`循环语句,你仅需要给`while`循环内的每一行代码进行注释,解释每一行代码的作用。但需要整个代码结尾以注释的形式详细介绍你在使用while循环时碰到了哪些错误,你是怎么发现和修正这些错误并最终让程序正确运行的。
# + id="powered-teens"
from turtle import setup, reset, bye, pu, pd, bk, fd, left, right, st, ht, goto, speed
num_side = 4
length_side = 80
angle_turn = 360 / num_side
# + id="speaking-reunion"
setup(500,400,0,0)
reset()
pu()
goto(40,-40)
right(90)
pd()
num_side_drawn = 0 # giving the number 0 the name num_side_drawn
while num_side_drawn < num_side: # if num_side_drawn is smaller than num_side than do -
fd(length_side) # -go forward length_side which is length_side = 80 -
left(angle_turn) # -turn left;angle_turn = 360 / num_side. num_side = 4
num_side_drawn = num_side_drawn + 1 # add 1 to num_side_drawn
right(90)
fd(80)
num_side_drawn = 0
while num_side_drawn < num_side:
fd(length_side)
left(angle_turn)
num_side_drawn = num_side_drawn + 1
right(90)
fd(80)
num_side_drawn = 0
while num_side_drawn < num_side:
fd(length_side)
left(angle_turn)
num_side_drawn = num_side_drawn + 1
right(90)
fd(80)
num_side_drawn = 0
while num_side_drawn < num_side:
fd(length_side)
left(angle_turn)
num_side_drawn = num_side_drawn + 1
right(90)
fd(80)
ht()
#I found that if I did not turn the right way the While would mess the drawing up.
#If the code in the While block was turn left than you have to turn right.
# + id="interstate-certificate"
bye()
# + [markdown] id="legal-thomson"
# 3. 使用`turtle`绘图库中已经学过的方法,在一个或多个代码单元格中编写任意行数的代码,绘制出如下图所示的图形。
#
# 图形描述:图片中所描述的图形可以有两种构图方式:
# - 它可以认为是一个大的正三角形和其内部的一个倒立的小的正三角形组合而成,其中大三角形的边长是小三角形边长的2倍;
# - 它也可以认为仅由3个小的正三角形按照一定的规律排列而成
# <img src = "figures/L005B_E04_triangle_combinations.png" width = 500>
#
# 具体绘制要求:
# - 图中的大三角形的边长是240,相应的小三角形的变长是120
# - 整个图形在绘图区中的具体位置可随意设定,但底边需要水平不能倾斜且不能有任何部分超出绘图区而导致大三角形的显示不完整
# - 大三角形中倒立的小三角形最上方的一条边(也可以理解为最上方的小三角形的底边)也必须是水平不能倾斜
# - 所有的三角形必须是正三角形。
# - 不要求代码中出现`while`语句
# - 请给你所写的每一行代码进行注释,解释每一行代码的作用。
# - 可以有多种途径完成此图的绘制,你仅需要提供一种途径的代码即可,如果你有兴趣,欢迎你提供多套代码。
#
# **提示**:
#
# - 下面代码声明的变量和值作为**礼物**送给你,它们会帮助你精确的控制所要绘制的三角形之间的位置关系,你可以选择其中的一个使用在你的代码中。如果你觉得这个礼物对你有帮助,请在代码中以注释的形式感谢它。同时,你可以尝试将这个礼物(神秘)数字(`1.7321`)换成别的数字(例如`1.5`或者`2`), 重新运行代码,观察绘制的图形有什么变化?
# ```python
# GIFT_VALUE1 = 120 * 1.7321 / 2
# GIFT_VALUE2 = 120 * 1.7321
# GIFT_VALUE3 = 1.7321 / 2
# ```
# + id="executed-governor"
from turtle import setup, reset, bye, pu, pd, bk, fd, left, right, st, ht, goto, speed
setup(500,400,0,0) # Get a paper that have the area 200000
reset() # Reset the paper to show the pointer
pu() # Pen up
left(180) # Turn left 180 degrees
fd(60) # go foward 60 units
pd() # pen down
left(60) # turn left 60 degrees
fd(120) # forward 120 units
left(120) # turn left 120 degrees
fd(240) # forward 240 units
left(120) # turn left 120 degrees
fd(240) # forward 240 units
left(120) # turn left 120 degrees
fd(120) # forward 120 units
left(120) # turn left 120 degrees
fd(120) # forward 120 units
right(120) # turn right 120 degrees
fd(120) # forward 120 units
right(120) # turn right 120 degrees
fd(120) # forward 120 units
ht() # hide the turtle pointer
# + id="informal-foundation"
# 1 + 2 + 3 + ... + n = 2000
# (n + 1) * n / 2 = 2000
# n is roughly about 62
# (62 + 1) * 62 / 2 = 1953
# The king have 47 bags of grain left.
# He gave 1953 bags in 62 days.
# +
num_days = 0 # number of days
num_bags = 2000 # total number of bags
sum_bags = 0 # in the num_days' day, the number of bags that was given out
while sum_bags < num_bags:
num_days = num_days + 1
sum_bags = sum_bags + num_days # current number of bags that's given out is equal to total number of bags given out last day plus the number of today
final_num = sum_bags - num_days
print("The king can give",final_num ,"in", num_days - 1,"days.")
left_bags = num_bags - final_num
print("The king have",left_bags ,"left.")
# -
num_days = 0
num_bags = 2000
sum_bags = (num_days + 1) * num_days / 2
while sum_bags < num_bags:
sum_bags = (num_days + 1) * num_days / 2
num_days = num_days + 1
if sum_bags > num_bags :
final_num = (num_days - 1) * (num_days - 2) / 2
else :
print("ho")
print("The king can give",final_num ,"in", num_days - 2,"days.")
left_bags = num_bags - final_num
print("The king have",left_bags ,"left.")
# +
num_days = 0 # number of days
num_bags = 2000 # total number of bags
sum_bags = 2000 # in the num_days' day, the number of bags that was given out
while sum_bags > 0:
num_days = num_days + 1
sum_bags = sum_bags - num_days # current number of bags that's given out is equal to total number of bags given out last day plus the number of today
left_bags = sum_bags + num_days
final_num = num_bags - left_bags
print("The king can give",final_num ,"in", num_days - 1,"days.")
print("The king have",left_bags ,"left.")
# +
num_days = 0 # number of days
num_bags = 2000 # total number of bags
sum_bags = 0 # in the num_days' day, the number of bags that was given out
while sum_bags < num_bags:
sum_bags = sum_bags + num_days # current number of bags that's given out is equal to total number of bags given out last day plus the number of today
num_days = num_days + 1
final_num = sum_bags - num_days + 1
print("The king can give",final_num ,"in", num_days - 2,"days.")
left_bags = num_bags - final_num
print("The king have",left_bags ,"left.")
# -
| assignments/2021-05-08/Jason_assignment_006_marked.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # I. Preparation
# +
import numpy as np
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from nn_interpretability.model.model_trainer import ModelTrainer
from nn_interpretability.model.model_repository import ModelRepository
from nn_interpretability.interpretation.cam.grad_cam import GradCAMInterpreter
from nn_interpretability.visualization.mnist_visualizer import MnistVisualizer
from nn_interpretability.dataset.mnist_data_loader import MnistDataLoader
# +
model_name = 'cam_classifier.pt'
extended_model_name = 'cam_extended_classifier.pt'
train = False
# -
mnist_data_loader = MnistDataLoader()
MnistVisualizer.show_dataset_examples(mnist_data_loader.trainloader)
# # II. Grad-CAM
#
# Gradient-Weighted Class Activation Maps (Grad-CAM) is a generalization of the CAM method, which uses the gradient signal instead of the weights of the last layer for weighing the activations. This makes the method reusable for any kind of CNN models unlike the original CAM method. Furthermore, Grad-CAM can be applied to practically any CONV layer of the CNN model which produces a meaningful gradient signal.
#
# <p float="center">
# <img src="./assets/grad_cam_formula.png"/>
# </p>
#
# Grad-CAM is being computed by multiplying the activations from the forward pass of the chosen layers with global-averaged-pooled incoming gradient from the backward pass. The result of the multiplication is then run through a ReLU activation. The final result is upsampled to the dimensions of the original input.
def generate_cams(interpreter):
images = []
cams = []
for i in range(10):
img = mnist_data_loader.get_image_for_class(i)
cam = interpreter.interpret(img)
images.append(img.cpu().numpy().reshape(28, 28))
cams.append(cam.numpy().reshape(28, 28))
return images, cams
def display_cams(images, cams, show_images = True):
for i in range(5):
plt.figure(figsize=(10,10))
for j in range(2):
plt.subplot(5, 2, i * 2 + j + 1)
if show_images:
plt.imshow(images[i*2 + j], cmap='gray')
plt.imshow(cams[i*2 + j], alpha=.5)
plt.title('Digit {} GRAD-CAM'.format(i * 2 + j))
plt.colorbar()
plt.xticks([])
plt.yticks([])
plt.tight_layout()
def cams(model, layer_name):
classes = [str(i) for i in range(10)]
interpreter = GradCAMInterpreter(model, classes, None, (28, 28), layer_name)
images, cams = generate_cams(interpreter)
display_cams(images, cams)
# ## 1. Grad-CAM with model with small last activation maps (3x3)
#
# In our first experiment, we execute Grad-CAM for a model which has small activation maps being produced by the last CONV layer. Furthermore, we exhibit the resultant Grad-CAM for each of the three available CONV layers. It is noteworthy that the quality of results becomes worse as the depth of the CONV layer under test increases.
# +
model = ModelRepository.get_cam_classifier(model_name)
if train:
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
model.train()
ModelTrainer.train(model, criterion, optimizer, trainloader)
ModelRepository.save(model, model_name)
# -
# ### 1.1 Grad-CAM for first layer
cams(model, 'relu1')
# ### 1.2 Grad-CAM for second layer
cams(model, 'conv2')
# ### 1.3 Grad-CAM for third layer
cams(model, 'relu3')
# ## 2. Grad-CAM with model with medium-sized last activation maps (6x6)
#
# In our first experiment, we execute Grad-CAM for a model which has medium-sized activation maps being produced by the last CONV layer. Furthermore, we exhibit the resultant Grad-CAM for each of the three available CONV layers. It is noteworthy that the quality of results becomes worse as the depth of the CONV layer under test increases. However, the results here are better in comparison to the previous model mostly due to the increased size of activation maps.
# +
model = ModelRepository.get_cam_extended_classifier(extended_model_name)
if train:
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
model.train()
ModelTrainer.train(model, criterion, optimizer, trainloader)
ModelRepository.save(model, model_name)
# -
# ### 2.1 Grad-CAM for first layer
cams(model, 'conv1')
# ### 2.2 Grad-CAM for second layer
cams(model, 'relu2')
# ### 2.3 Grad-CAM for third layer
cams(model, 'relu3')
| 10.2.Grad_Class_Activation_Map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="djL0UFMoIgyt"
# # Функции
#
# <NAME>
# FB: [fb.com/obulygin91](fb.com/obulygin91)
# VK: [vk.com/obulygin91](vk.com/obulygin91)
# LinkedIn: [linkedin.com/in/obulygin](linkedin.com/in/obulygin)
# Telegram: @obulygin91
# email: <EMAIL>
#
# -
# ## План
# - Определение функции
# - Docstring
# - Параметры функции
# - Области видимости
# - lambda-функции
# Функции – это обособленный участок кода, который можно вызывать, обратившись к нему по имени, которым он был назван (подпрограмма).
# Это объект, принимающий аргументы и возвращающий значение.
# Функции помогают избежать дублирования кода, улучшить его структурированность и читаемость.
# + [markdown] id="veWjkUwNIgzL"
# ### Объявление функции
#
# Существуют некоторые правила для создания функций в Python:
#
# # + Блок функции начинается с ключевого слова def, после которого следуют название функции и круглые скобки ()
#
# # + Любые аргументы, которые принимает функция должны находиться внутри этих скобок.
#
# # + После скобок идет двоеточие ( : ) и с новой строки с отступом начинается тело функции.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 133} id="RWlt-EMpIgzN" outputId="93eb6120-a8bb-4359-b39b-1fd9f5196cb8"
def my_function():
print('Эта наша первая функция')
my_function()
# + [markdown] id="ahaklUj0Nw3u"
# Когда запустится строка "Это наша первая функция"?
# + colab={"base_uri": "https://localhost:8080/", "height": 69} id="Gtr_IAjRNmPs" outputId="78b3839c-5f30-42d2-f53e-17722e735cec"
print('Раз')
def your_function():
print('Это наша первая функция')
print('Два')
your_function()
# + [markdown] id="9UmnsAJTN5be"
# Есть **объявление** и **вызов** функции. Это разные процессы
# + [markdown] id="4oBO1B9jIgzh"
# ### Ключевое слово return
#
# * Выражение return прекращает выполнение функции и возвращает указанное после выражения значение.
#
# * Выражение return без аргументов это то же самое, что и выражение return None.
#
# * То, что функция возвращает можно присваивать какой-либо переменной.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="n_ohL5N9ONTL" outputId="e156851f-81e0-4783-f102-8b9208e43a6d"
def square():
user_input = int(input('Введите число'))
result = user_input ** 2
return result
res = square()
print(res)
# когда вызываем функцию внутри print на экран выводится то, что она возвращает. В jupyter можно и без print
print(square())
# +
# как думаете, что получим сейчас?
def square():
user_input = int(input('Введите число'))
result = user_input ** 2
print(result)
print(square())
# + [markdown] id="9aIvBMyoIn8H"
# ### Задача
# Напишите функцию, которая определяет является ли слово палиндромом
# -
def is_palindrome():
word = input('Введите слово: ').lower()
if word == word[::-1]:
return True
else:
return False
def is_palindrome():
word = input('Введите слово: ').lower()
return word == word[::-1]
is_palindrome()
# ### Docstring
#
# (сокр. от documentation string, строка документации)
# встроенное средство документирования модулей, функций, классов и методов.
#
# Сразу после определения указывается строковое значение, которое и будет docstring'ом.
#
help(print)
# ?print
# пишем docstring к своей функции
def square():
"""
this is my function
"""
user_input = int(input('Введите число'))
result = user_input ** 2
return result
# ?square
# + [markdown] id="yQlxKfmHIgz6"
# ### Функции придают программе структуру
# + [markdown] id="1FTFp8paIgz-"
# Польза функций не только в возможности многократного вызова одного и того же кода из разных мест программы. Не менее важно, что благодаря им программа обретает четкую структуру. Функции как бы разделяют ее на обособленные части, каждая из которых выполняет свою конкретную задачу.
#
# Пусть надо написать программу, вычисляющую площади разных фигур. Пользователь указывает, площадь какой фигуры он хочет вычислить. После этого вводит исходные данные. Например, длину и ширину в случае прямоугольника. Чтобы разделить поток выполнения на несколько ветвей, следует использовать оператор if-elif-else:
# + id="dKIiW8aSIg0A" outputId="de0ebde2-be66-44db-d8e7-cf84715abf1b"
figure = input("1-прямоугольник, 2-треугольник, 3-круг: ")
if figure == '1':
a = float(input("Ширина: "))
b = float(input("Высота: "))
print(f"Площадь: {a*b}")
elif figure == '2':
a = float(input("Основание: "))
h = float(input("Высота: "))
print(f"Площадь: {0.5 * a * h}")
elif figure == '3':
r = float(input("Радиус: "))
print(f"Площадь: {3.14 * r**2}")
else:
print("Ошибка ввода")
# + colab={"base_uri": "https://localhost:8080/", "height": 86} id="X4eLEHn6Ig0G" outputId="7481df4c-4942-43d8-c2bb-391eee2e4002"
def rectangle():
"""Эта функция считает площадь прямоугольника"""
a = float(input("Ширина: "))
b = float(input("Высота: "))
print(f"Площадь: {a*b}")
def triangle():
"""Эта функция считает площадь треугольника"""
a = float(input("Основание: "))
h = float(input("Высота: "))
print(f"Площадь: {0.5 * a * h}")
def circle():
"""Эта функция считает площадь круга"""
r = float(input("Радиус: "))
print(f"Площадь: {3.14 * r**2}")
def main():
figure = input("1-прямоугольник, 2-треугольник, 3-круг: ")
if figure == '1':
rectangle()
elif figure == '2':
triangle()
elif figure == '3':
circle()
else:
print("Ошибка ввода")
main()
# + [markdown] id="v4Xh1dEWIg0e"
# Из общей логики программы как бы убраны и обособлены инструкции для нахождения площадей. Программа теперь состоит из отдельных "кирпичиков Лего". В основной ветке мы можем комбинировать их как угодно. Она играет роль управляющего механизма.
#
# Если нам когда-нибудь захочется вычислять площадь треугольника по формуле Герона, а не через высоту, то не придется искать код во всей программе (представьте, что она состоит из тысяч строк кода как реальные программы). Мы пойдем к месту определения функций и изменим тело одной из них.
#
# Если понадобиться использовать эти функции в какой-нибудь другой программе, то мы сможем импортировать их туда, сославшись на данный файл с кодом.
# -
# ### Параметры функции
# Функция может принимать более 1 параметра (а может не принимать параметры вообще).
#
# Для всех параметров функций можно указывать значения по-умолчанию, это дает возможность вызвать функцию с меньшим числом параметров.
#
#
# + [markdown] id="G6_adbf2O_hF"
# Переменные в скобках – это параметры функции, которые мы указываем при объявлении функции. Когда мы её вызываем мы передаем в вызов аргументы. В нашем случае это будут числа.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="EHK_kqH3Igzj" outputId="75f2b0f2-276b-4149-d2dd-f37668916cce"
def bigger(a, b):
"""Эта функция сравнивает числа и выдает большее"""
if a > b:
return a
return b
# присваиваем результат функции bigger переменной num
num = bigger(26, 24)
num
# -
# функция с параметром по умолчанию
def power(number, number_2=2):
result = number ** number_2
return result
power(10, 6)
# Если при создании функции мы указали количество передаваемых ей аргументов и их порядок,
# то и вызывать ее мы должны с тем же количеством аргументов, заданных в нужном порядке.
#
# Если при вызове мы будем явно указывать имена аргументов, то их можно передавать в любом порядке
power(2, 3, 1)
power()
power(number_2=3, number=2)
# + [markdown] id="BF7EhPbcSVBV"
# Иногда возникает ситуация, когда вы заранее не знаете, какое количество аргументов будет необходимо принять функции. В этом случае следует использовать аргументы произвольной длины ([args и kwargs](https://habr.com/ru/company/ruvds/blog/482464/)). Они задаются произвольным именем переменной, перед которой ставится звездочка (args) или две здездочки (kwargs).
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="C6hrJTVuIg1G" outputId="673c6af6-147f-414e-b73f-222bcd161fc2"
def unknown(*args):
print(args) #здесь будет кортеж из всех переданных аргументов
for argument in args:
print(argument)
unknown("hello","world") # напечатает оба слова, каждое с новой строки
unknown(1,2,3,4,5) # напечатает все числа, каждое с новой строки
unknown() # ничего не выведет
# -
def api_request(*params):
date_start = params[0]
date_end = params[1]
print(params)
print(f'Дата старта: {date_start}, дата окончания: {date_end}, прочие данные: {params[2:]}')
api_request('2019-01-01', '2019-01-31')
api_request('2019-01-01', '2019-01-31', 1000, 10000)
def api_requets(**params):
return params
api_requets(a=1, b=2, c=3)
def api_request(**params):
date_start = params['date_start']
date_end = params['date_end']
print(params)
print(f'Дата старта: {date_start}, дата окончания: {date_end}')
api_request(date_start='2019-01-31', date_end='2019-01-01')
# + [markdown] id="9aIvBMyoIn8H"
# ### Задача
# Напишите функцию, которая будет находить среднюю цену квартиры по всем данным (каждый спиоск – отдельный район города)
# -
dict_1 = {'flat_1': 10500, 'flat_2': 11000}
dict_2 = {'flat_3': 15000}
dict_3 = {'flat_4': 6500, 'flat_5': 7000, 'flat_6': 6000}
def mean_flat_price(*args):
all_prices = []
for each in args:
all_prices.extend(each.values())
mean_price = sum(all_prices) / len(all_prices)
return mean_price
mean_flat_price(dict_1)
# + [markdown] id="fPPXAGPHIg1Y"
# ### Области видимости
#
# Область видимости (scope) определяет контекст объекта, в рамках которого его можно использовать.
#
# Рассмотрим 2 типа области видимости:
#
# * Локальная область видимости
# * Глобальная область видимости
#
# Глобальная область видимости подразумевает, что переменная является глобальной, она определена вне любой из функций и доступна любой функции в программе.
#
# В отличие от глобальных переменных локальная переменная определяется внутри функции и доступна только из этой функции, то есть имеет локальную область видимости.
# + colab={"base_uri": "https://localhost:8080/", "height": 69} id="JoUGUs04Ig1a" outputId="6842bda8-e109-4f4a-d8f6-81433acffc1d"
# глобальные переменные
salary = 1000
bonus = 600
def info():
print(salary + bonus)
def info_2():
bonus = 50
print(salary + bonus)
def local_info():
salary = 500
bonus = 200
some_number = 1
print(salary + bonus)
info()
info_2()
local_info()
print(some_number)
# -
# ### Анонимные функции
# [Анонимные функции](https://habr.com/ru/post/507642/) создаются при помощи инструкции lambda и используются для более краткой записи функций с одним выражением. Выполняются быстрее обычных и не требуют инструкции return.
func = lambda x, y: x + y
func(1, 8)
sqrt = lambda x: (x**0.5, x**2)
sqrt(9)
# ### lambda + map
# В Python функция map принимает два аргумента: функцию и аргумент составного типа данных, например, список. map применяет к каждому элементу списка переданную функцию.
# Пишем программу, которая создает список, содержащий квадраты натуральных чисел от 1 до 9
# Традиционным способом
my_list = []
for i in range(1,10):
my_list.append(i**2)
my_list
# С помощью map+lambda
list(map(lambda x: x**2, range(1,10)))
# + id="FLqx6pfXIg3j" outputId="9246f991-8d75-4b42-c087-eada6df8f825"
def miles_to_kilometers(num_miles):
""" Converts miles to the kilometers """
return num_miles * 1.6
mile_distances = [1.0, 6.5, 17.4, 2.4, 9]
kilometer_distances = list(map(miles_to_kilometers, mile_distances))
print(kilometer_distances)
# + id="tz099Zb1Ig3n" outputId="b718a607-a0c4-4e75-e5f3-aa51e6e4893b"
mile_distances = [1.0, 6.5, 17.4, 2.4, 9]
kilometer_distances = list(map(lambda x: x * 1.6, mile_distances))
kilometer_distances
# + [markdown] id="9aIvBMyoIn8H"
# ### Задача
# Напишите функцию, которая будет находить среднюю цену на товары в каждой категории по отдельности.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Up5GPORUIg0o" outputId="ba161ce7-c191-4eef-ec60-d467930574ea"
prices = [[100, 200, 400, 600], [200, 500], [100, 200, 100, 100], [800, 900]]
# -
def mean_price(data):
return list(map(lambda x: sum(x) / len(x), data))
mean_price(prices)
list(map(lambda x: sum(x) / len(x), prices))
# ### Комплексный пример
students_list = [
{"name": "Василий", "surname": "Теркин", "gender": "м", "program_exp": True, "grade": [8, 8, 9, 10, 9], "exam": 8},
{"name": "Мария", "surname": "Павлова", "gender": "ж", "program_exp": True, "grade": [7, 8, 9, 7, 9], "exam": 9},
{"name": "Ирина", "surname": "Андреева", "gender": "ж", "program_exp": False, "grade": [10, 9, 8, 10, 10], "exam": 7},
{"name": "Татьяна", "surname": "Сидорова", "gender": "ж", "program_exp": False, "grade": [7, 8, 8, 9, 8],"exam": 10},
{"name": "Иван", "surname": "Васильев", "gender": "м", "program_exp": True, "grade": [9, 8, 9, 6, 9], "exam": 5},
{"name": "Роман", "surname": "Золотарев", "gender": "м", "program_exp": False, "grade": [8, 9, 9, 6, 9], "exam": 6}
]
# посчитаем среднюю оценку за экзамен по всей группе
def get_avg_exam_grade(students):
sum_ex = 0
for student in students:
sum_ex += student['exam']
return round(sum_ex / len(students_list), 2)
get_avg_exam_grade(students_list)
# посчитаем среднюю оценку за ДЗ по всей группе
def get_avg_hw_grade(students):
sum_hw = 0
for student in students:
sum_hw += sum(student['grade']) / len(student['grade'])
return round(sum_hw / len(students), 2)
get_avg_hw_grade(students_list)
# добавим фильтр по опыту для расчетов
def get_avg_exam_grade(students, exp=False):
sum_ex = 0
counter = 0
for student in students:
if student['program_exp'] == exp:
sum_ex += student['exam']
counter += 1
return round(sum_ex / counter, 2)
get_avg_exam_grade(students_list)
get_avg_exam_grade(students_list, True)
# а как же теперь сделать расчет по всей группе?
def get_avg_exam_grade(students, exp=None):
sum_ex = 0
counter = 0
for student in students:
if exp == None or student['program_exp'] == exp:
sum_ex += student['exam']
counter += 1
return round(sum_ex / counter, 2)
get_avg_exam_grade(students_list)
get_avg_exam_grade(students_list, True)
get_avg_exam_grade(students_list, False)
# реализуем фильтр по полу
def get_avg_exam_grade(students, gender=None):
sum_ex = 0
counter = 0
for student in students:
if student['gender'] == gender or gender is None:
sum_ex += student['exam']
counter += 1
return round(sum_ex / counter, 2)
get_avg_exam_grade(students_list)
get_avg_exam_grade(students_list, 'м')
get_avg_exam_grade(students_list, 'ж')
# реализуем сразу оба фильтра
def get_avg_exam_grade(students, gender=None, exp=None):
sum_ex = 0
counter = 0
for student in students:
if (student['gender'] == gender or gender is None) and (student['program_exp'] == exp or exp is None):
sum_ex += student['exam']
counter += 1
return round(sum_ex / counter, 2)
get_avg_exam_grade(students_list)
get_avg_exam_grade(students_list, 'м')
get_avg_exam_grade(students_list, exp=True)
get_avg_exam_grade(students_list, exp=False)
get_avg_exam_grade(students_list, 'ж', exp=True)
# расчет оценок за ДЗ
def get_avg_hw_grade(students, gender=None, exp=None):
sum_hw = 0
counter = 0
for student in students:
if (student['gender'] == gender or gender is None) and (student['program_exp'] == exp or exp is None):
sum_ex += sum(student['grade']) / len(student['grade'])
counter += 1
return round(sum_ex / counter, 2)
# +
# пишем пользовательский интефейс
def main(students):
while True:
user_input = input('Введите команду')
if user_input == '1':
print(f'Средняя оценка за экзамен по группе: {get_avg_exam_grade(students)}')
elif user_input == '2':
print(f'Средняя оценка за ДЗ по группе: {get_avg_hw_grade(students)}')
elif user_input =='3':
print(f'Средняя оценка за экзамен у студентов без бэкграунда: {get_avg_exam_grade(students, exp=False)}')
elif user_input == 'q':
print('До свидания!')
break
# -
main(students_list)
# + [markdown] id="OgFcwZp-Z-ck"
# ### Спасибо за внимание буду рад ответить на ваши вопросы!
# Ссылка на форму ОС:
# https://forms.gle/y8xaFwJqtbFSjUeG8
#
# + id="RL70At9nLoq9"
| module_1/Module_suppl_notebooks/functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# <br>
#
# # Convert *shp* to *geojson*
# + pycharm={"name": "#%%\n"}
# #!pip3 install open_geodata --upgrade
# + pycharm={"name": "#%%\n"}
import geopandas as gpd
# + pycharm={"name": "#%%\n"}
from open_geodata import converts
# + pycharm={"name": "#%%\n"}
from paths import *
# + pycharm={"name": "#%%\n"}
list_shp = [x for x in os.listdir(output_path_shp) if x.endswith('.shp')]
list_shp
# + [markdown] pycharm={"name": "#%% md\n"}
# <br>
#
# ## Shp to Geojson
# + pycharm={"name": "#%%\n"}
for shp in list_shp:
#print(shp)
geojson_filename = shp.split('.')[0]
geojson_filename = geojson_filename.lower()
print(geojson_filename)
# Read
gdf = gpd.read_file(os.path.join(output_path_shp, shp))
gdf = gdf.to_crs(epsg=4326)
gdf.drop(['Shape_Area', 'Shape_Leng'], axis=1, errors='ignore', inplace=True)
#display(gdf.head(2))
# Write
gdf.to_file(
filename=os.path.join(output_path_geo, f'{geojson_filename}.geojson'),
driver='GeoJSON',
encoding='utf-8',
)
# + [markdown] pycharm={"name": "#%% md\n"}
# <br>
#
# ## Geojson to GPKG
# + pycharm={"name": "#%%\n"}
converts.convert_to_7zip(output_path_geo, output_path_zips)
# + pycharm={"name": "#%%\n"}
| test/02_convert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
from scipy import signal
from datetime import datetime, timedelta
# %matplotlib inline
plt.rcParams['font.size'] = 14
# -
GEM = xr.open_dataset('https://salishsea.eos.ubc.ca/erddap/griddap/ubcSSaSurfaceAtmosphereFieldsV1')
fn = '/home/bmoorema/Desktop/Desktop/Hakai_data/SuperCO2_FannyBay5mindata_submission.txt'
FannyBay = pd.read_csv(
fn, delim_whitespace=True, header=0, names=['Yearday', 'Year', 'mtime', 'date', 'time', 'pCO2', 'T', 'S'],
skiprows=6,
)
fn = '/home/bmoorema/Desktop/Desktop/Hakai_data/SuperCO2_quadra5mindata_submission.txt'
Quadra1 = pd.read_csv(
fn, delim_whitespace=True, header=0, names=['Yearday', 'Year', 'mtime', 'date', 'time', 'pCO2', 'T', 'S'],
skiprows=6,
)
fn = '/home/bmoorema/Desktop/Desktop/Hakai_data/QuadraBoL_5mindata_submission.txt'
Quadra2 = pd.read_csv(
fn, delim_whitespace=True, header=0, names=['Yearday', 'Year', 'mtime', 'date', 'time', 'pCO2', 'T', 'S'],
skiprows=6,
)
FBtime = [
datetime.fromordinal(int(mtime)) + timedelta(days=mtime%1) -
timedelta(days = 366) for mtime in FannyBay.mtime.values
]
Q1time = [
datetime.fromordinal(int(mtime)) + timedelta(days=mtime%1) -
timedelta(days = 366) for mtime in Quadra1.mtime.values
]
Q2time = [
datetime.fromordinal(int(mtime)) + timedelta(days=mtime%1) -
timedelta(days = 366) for mtime in Quadra2.mtime.values
]
Q1time.extend(Q2time)
pCO2 = np.concatenate((Quadra1.pCO2.values, Quadra2.pCO2.values))
# +
nyqst = 1 / 3600 / 2
highcut = 1 / 86400 / 2
v_wind_filt = signal.filtfilt(*signal.butter(2, highcut / nyqst), v_wind)
fig, ax = plt.subplots(1, 1, figsize=(17, 5))
ax.fill_between(GEM.time.values, v_wind_filt, where=v_wind_filt>0, alpha=0.5, color='firebrick', interpolate=True, label='HRDPS wind Sentry Shoal')
ax.fill_between(GEM.time.values, v_wind_filt, where=v_wind_filt<0, alpha=0.5, color='darkslategray', interpolate=True)
ax.plot(GEM.time.values, v_wind_filt, 'k-', alpha=0.5)
ax.set_xlim(['2015 May 1', '2016 Oct 1'])
ax.set_ylim([-10, 10])
ax.set_ylabel('$v$ wind [m/s]')
ax.legend(loc=2)
ax2 = ax.twinx()
ax2.plot(Q1time, pCO2, '-', color='orange', label='Hakai $p$CO$_2$ at Quadra')
#ax2.plot(Q2time, Quadra2.pCO2, '-', color='orange')
ax2.plot([Q1time[0], Q2time[-1]], [400, 400], 'k--')
ax2.set_xlim(['2015 May 1', '2016 Oct 1'])
ax2.set_ylim([0, 1000])
ax2.set_ylabel('$p$CO$_2$ [$\mu$atm]')
ax2.legend(loc=1)
fig.savefig('/home/bmoorema/Desktop/Hakai.pdf', dpi=300, bbox_inches='tight')
# -
v_wind = GEM.v_wind.isel(gridY=183, gridX=107).values
Q1time[50000]
| notebooks/Hakai_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install ctgan
import pandas as pd
pd.set_option('display.max_columns', 500)
data = pd.read_csv('./data/data.csv')
del data['Unnamed: 32']
data
discrete_columns = list(data.columns)
discrete_columns
data.isnull().values
# +
from ctgan import CTGANSynthesizer
ctgan = CTGANSynthesizer()
ctgan.fit(data, discrete_columns, epochs=50)
# -
samples = ctgan.sample(len(data))
samples
samples.to_csv('./data/synthetic.csv', index=False)
| Jupyter Simulations/2. Cancer (CSV) Classification Simulations/Synthetic Data Generation using GAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exemplo 04: Clusterização
# ## Identificação de base para grupo de veículos Uber
# ---
#
# K-Means é um dos algoritmos de agrupamento não supervisionados mais simples e populares [1]. O objetivo é encontrar grupos nos dados, com o número de grupos/clusters representados pela variável K. O algoritmo K-Means aloca iterativamente todos os pontos de dados para o cluster mais próximo. Em cada iteração do algoritmo, cada ponto de dado é atribuído ao cluster mais próximo com base em alguma métrica, que geralmente é a distância euclidiana. As saídas do algoritmo de agrupamento K-means são os centróides dos clusters K e os rótulos dos dados. Depois que o algoritmo executa e identifica todos os grupos de um conjunto de dados, qualquer novo dado pode ser facilmente atribuído a um grupo existente. O algoritmo K-Means pode ser usado para identificar grupos desconhecidos em conjuntos de dados complexos e não rotulados.
#
# Para efeito de comparação são também mostradoso agrupamento usando os algoritmos Bisecting k-means[2], um K-means hierárquico, e o Gaussian Mixture Model (GMM)[3], que distribui os pontos a partir de uma distribuição gaussiana.
#
# Este exemplo usa uma base de dados de localização de chamados do Uber em Nova York. O algoritmo de clusterização agrupa os usuários e identifica os pontos ideais para o estabelecimento de uma base, de onde os carros ficarão aguardando os chamados.
#
# ## References:
#
# 1. <NAME>. "Extensions to the k-means algorithm for clustering large data sets with categorical values". Data mining and knowledge discovery, Vol 2, Issue 3. pp 283-304. Springer. 1998.
#
# 2. Savaresi, <NAME>; <NAME>. "On the performance of bisecting K-means and PDDP". Proceedings of the 2001 SIAM International Conference on Data Mining. 2001
#
# 3. <NAME>.; <NAME>. "Robust text-independent speaker identification using Gaussian mixture speaker models". IEEE Transactions on Speech and Audio Processing. 3 (1): 72–83. January 1995.
#
#
# Spark Lib
import findspark
findspark.init()
# +
# Load libraries
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.clustering import BisectingKMeans
from pyspark.ml.clustering import GaussianMixture
from pyspark.ml.evaluation import ClusteringEvaluator
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
start_time = time.time()
# %matplotlib inline
# -
# ## Configure parameters
# Path to dataset file
data_path='./data/'
# ## Start Spark Session
# +
# Create Spark Session
spark = SparkSession.builder \
.master("local[*]") \
.appName("Clustering") \
.getOrCreate()
# Set evaluator
evaluator = ClusteringEvaluator()
# -
# ## Reading Data
# +
# Load Iris CSV dataset to Spark Dataframe
orig_data = spark.read.format("csv").options(sep=',',header='true',inferschema='true').\
load(data_path+"uber.csv.gz")
print("Original Uber Dataframe read from CSV file")
#orig_data.dtypes
orig_data.show()
# -
# ### Create Clustering Matrix
# +
# The feature column should join all parameters as a Vector
# Set the column names that is not part of features list
ignore = ['Date/Time', 'Base']
# list will be the value of all columns parts of features vector
list = [x for x in orig_data.columns if x not in ignore]
# VectorAssembler mount the vector of features
assembler = VectorAssembler(
inputCols=list,
outputCol='features')
# Create final dataframe composed by label and a column of features vector
data = (assembler.transform(orig_data).select("Base","features"))
print("Final Dataframe to K-Means")
data.show()
# -
#data.printSchema()
data.count()
# ## Evaluate the number of clusters: **K**
#
# Finding the number of cluster in clustering algorithms is crucial. One method is calculate the Silhouette
#
# ### Clustering Silhouette
#
# Silhouette is methodology of interpretation and validation of consistency within clusters of data. The technique provides a succinct representation of how well each object has been classified. The silhouette value is a measure of how similar an object is to its own cluster (cohesion) compared to other clusters (separation). The silhouette ranges from −1 to +1, where a high value indicates that the object is well matched to its own cluster and poorly matched to neighboring clusters. If most objects have a high value, near to +1, then the clustering configuration is appropriate. If many points have a low or negative value, then the clustering configuration may have too many or too few clusters.
# +
# Calculate the silhouette of KMeans from 2 to N clusters
# Set the max K
max_k = 10
# Initiate silhouette array with zero
silhouette = np.zeros(max_k)
# If the dataset is big, get only a sample to find the k
# geting sample 5%
k_eval = data.sample(False, 0.05)
k_eval.count()
#k_eval.show()
# -
# Calculate silhouette for each k (it may be take a long time...)
for k in range(2,max_k):
kmeans = KMeans(featuresCol='features', k=k, maxIter=5, seed=1)
model = kmeans.fit(k_eval)
predictions = model.transform(k_eval)
silhouette[k] = evaluator.evaluate(predictions)
# Plot graph cluster number x cost
fig, ax = plt.subplots(1,1, figsize =(8,6))
ax.plot(range(2,max_k),silhouette[2:max_k])
ax.set_xlabel('k')
ax.set_ylabel('silhouette')
# Set a good number os clusters (silhouette near 1 is better)
k = 3
# ## K-Means Clustering
#
# k-means is one of the most commonly used clustering algorithms that clusters the data points into a predefined number of clusters. The ML implementation includes a parallelized variant of the k-means++ method.
# +
start_time_km = time.time()
# Trains a bisecting k-means model.
kmeans = KMeans().setK(k).setSeed(1)
model = kmeans.fit(data)
# Get centroids
centers = model.clusterCenters()
# Make predictions
predictions = model.transform(data)
# Evaluate clustering by computing Silhouette score
silhouette_km = evaluator.evaluate(predictions)
time_km = time.time() - start_time_km
print("Silhouette with squared euclidean distance = " + str(silhouette_km))
print()
print("Cluster Centers: ")
for center in centers:
print(center)
print()
print("Processing time (s) = %3.3f s" % (time_km))
# -
# ## Bisecting k-means
#
# *Bisecting k-means* is a kind of hierarchical clustering using a divisive (or “top-down”) approach: all observations start in one cluster, and splits are performed recursively as one moves down the hierarchy. *Bisecting K-means* can often be much faster than regular K-means, but it will generally produce a different clustering.
# +
start_time_bk = time.time()
# Trains a bisecting k-means model.
bkm = BisectingKMeans().setK(k).setSeed(1)
model = bkm.fit(data)
# Get centroids
centers = model.clusterCenters()
# Make predictions
predictions = model.transform(data)
# Evaluate clustering by computing Silhouette score
silhouette_bk = evaluator.evaluate(predictions)
time_bk = time.time() - start_time_bk
print("Silhouette with squared euclidean distance = " + str(silhouette_bk))
print()
# Shows the result.
print("Cluster Centers: ")
for center in centers:
print(center)
print()
print("Processing time (s) = %3.3f s" % (time_bk))
# -
# ## Gaussian Mixture Model (GMM)
#
# A Gaussian Mixture Model represents a composite distribution whereby points are drawn from one of k Gaussian sub-distributions, each with its own probability. The spark.ml implementation uses the expectation-maximization algorithm to induce the maximum-likelihood model given a set of samples.
# +
start_time_gm = time.time()
# Trains a GMM model.
gmm = GaussianMixture().setK(k).setSeed(1)
model = gmm.fit(data)
# Make predictions
predictions = model.transform(data)
# Evaluate clustering by computing Silhouette score
silhouette_gm = evaluator.evaluate(predictions)
time_gm = time.time() - start_time_gm
print("Silhouette with squared euclidean distance = " + str(silhouette_gm))
print()
print("Gaussians shown as a DataFrame: ")
model.gaussiansDF.show()
print()
print("Processing time (s) = %3.3f s" % (time_bk))
print()
# -
# ## Sumary
print("====================================================================")
print("============= Compare Algorithm Silhouette and Time ===============")
print()
print("K-Means: silhouette = %3.2f time = %3.3f s" % (silhouette_km, time_km))
print("Bisecting k-means: silhouette = %3.2f time = %3.3f s" % (silhouette_bk, time_bk))
print("Gaussian Mixture Model (GMM): silhouette = %3.2f time = %3.3f s" % (silhouette_gm, time_gm))
print("====================================================================")
spark.stop()
print("--- Execution time: %s seconds ---" % (time.time() - start_time))
| 04-Clusterizacao_Uber.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Source catalogs
#
# `~gammapy.catalog` provides convenient access to common gamma-ray source catalogs.
# This module is mostly independent from the rest of Gammapy.
# Typically you use it to compare new analyses against catalog results, e.g. overplot the spectral model, or compare the source position.
#
# Moreover as creating a source model and flux points for a given catalog from the FITS table is tedious, `~gammapy.catalog` has this already implemented. So you can create initial source models for your analyses.
# This is very common for Fermi-LAT, to start with a catalog model.
# For TeV analysis, especially in crowded Galactic regions, using the HGPS, gamma-cat or 2HWC catalog in this way can also be useful.
#
# In this tutorial you will learn how to:
#
# - List available catalogs
# - Load a catalog
# - Access the source catalog table data
# - Select a catalog subset or a single source
# - Get source spectral and spatial models
# - Get flux points (if available)
# - Get lightcurves (if available)
# - Access the source catalog table data
# - Pretty-print the source information
#
# In this tutorial we will show examples using the following catalogs:
#
# - `~gammapy.catalog.SourceCatalogHGPS`
# - `~gammapy.catalog.SourceCatalogGammaCat`
# - `~gammapy.catalog.SourceCatalog3FHL`
# - `~gammapy.catalog.SourceCatalog4FGL`
#
# All catalog and source classes work the same, as long as some information is available. E.g. trying to access a lightcurve from a catalog and source that doesn't have that information will return ``None``.
#
# Further information is available at `~gammapy.catalog`.
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from gammapy.catalog import CATALOG_REGISTRY
# ## List available catalogs
#
# `~gammapy.catalog` contains a catalog registry ``CATALOG_REGISTRY``, which maps catalog names (e.g. "3fhl") to catalog classes (e.g. ``SourceCatalog3FHL``).
CATALOG_REGISTRY
# ## Load catalogs
#
# If you have run `gammapy download datasets` or `gammapy download tutorials`,
# you have a copy of the catalogs as FITS files in `$GAMMAPY_DATA/catalogs`,
# and that is the default location where `~gammapy.catalog` loads from.
#
# !ls -1 $GAMMAPY_DATA/catalogs
# !ls -1 $GAMMAPY_DATA/catalogs/fermi
# So a catalog can be loaded directly from its corresponding class
# +
from gammapy.catalog import SourceCatalog4FGL
catalog = SourceCatalog4FGL()
print("Number of sources :", len(catalog.table))
# -
# Note that it loads the default catalog from `$GAMMAPY_DATA/catalogs`, you could pass a different `filename` when creating the catalog.
# For example here we load an older version of 4FGL catalog:
catalog = SourceCatalog4FGL("$GAMMAPY_DATA/catalogs/fermi/gll_psc_v20.fit.gz")
print("Number of sources :", len(catalog.table))
#
# Alternatively you can load a catalog by name via `CATALOG_REGISTRY.get_cls(name)()` (note the `()` to instantiate a catalog object from the catalog class - only this will load the catalog and be useful), or by importing the catalog class (e.g. `SourceCatalog3FGL`) directly. The two ways are equivalent, the result will be the same.
#
# FITS file is loaded
catalog = CATALOG_REGISTRY.get_cls("3fgl")()
catalog
# Let's load the source catalogs we will use throughout this tutorial
catalog_gammacat = CATALOG_REGISTRY.get_cls("gamma-cat")()
catalog_3fhl = CATALOG_REGISTRY.get_cls("3fhl")()
catalog_4fgl = CATALOG_REGISTRY.get_cls("4fgl")()
catalog_hgps = CATALOG_REGISTRY.get_cls("hgps")()
# ## Catalog table
#
# Source catalogs are given as `FITS` files that contain one or multiple tables.
#
# However, you can also access the underlying `astropy.table.Table` for a catalog,
# and the row data as a Python `dict`. This can be useful if you want to do something
# that is not pre-scripted by the `~gammapy.catalog` classes, such as e.g. selecting
# sources by sky position or association class, or accessing special source information.
#
type(catalog_3fhl.table)
len(catalog_3fhl.table)
catalog_3fhl.table[:3][["Source_Name", "RAJ2000", "DEJ2000"]]
# Note that the catalogs object include a helper property that gives directly the sources positions as a `SkyCoord` object (we will show an usage example in the following).
catalog_3fhl.positions[:3]
# ## Source object
#
# ### Select a source
#
# The catalog entries for a single source are represented by a `SourceCatalogObject`.
# In order to select a source object index into the catalog using `[]`, with a catalog table row index (zero-based, first row is `[0]`), or a source name. If a name is given, catalog table columns with source names and association names ("ASSOC1" in the example below) are searched top to bottom. There is no name resolution web query.
#
source = catalog_4fgl[49]
source
source.row_index, source.name
source = catalog_4fgl["4FGL J0010.8-2154"]
source.row_index, source.name
source.data["ASSOC1"]
source = catalog_4fgl["PKS 0008-222"]
source.row_index, source.name
# Note that you can also do a `for source in catalog` loop, to find or process
# sources of interest.
#
# ### Source informations
#
# The source objects have a `data` property that contains the informations of the catalog row corresponding to the source.
source.data["Npred"]
source.data["GLON"], source.data["GLAT"]
# As for the catalog object, the source object has a `position` property.
source.position.galactic
# ## Select a catalog subset
#
# The catalog objects support selection using boolean arrays (of the same length), so one can create a new catalog as a subset of the main catalog that verify a set of conditions.
#
# In the next example we selection only few of the brightest sources brightest sources in the 100 to 200 GeV energy band.
mask_bright = np.zeros(len(catalog_3fhl.table), dtype=bool)
for k, source in enumerate(catalog_3fhl):
flux = (
source.spectral_model()
.integral(100 * u.GeV, 200 * u.GeV)
.to("cm-2 s-1")
)
if flux > 1e-10 * u.Unit("cm-2 s-1"):
mask_bright[k] = True
print(f"{source.row_index:<7d} {source.name:20s} {flux:.3g}")
catalog_3fhl_bright = catalog_3fhl[mask_bright]
catalog_3fhl_bright
catalog_3fhl_bright.table["Source_Name"]
# Similarly we can select only sources within a region of interest. Here for example we use the `position` property of the catalog object to select sources whitin 5 degrees from "PKS 0008-222":
#
source = catalog_4fgl["PKS 0008-222"]
mask_roi = source.position.separation(catalog_4fgl.positions) < 5 *u.deg
catalog_4fgl_roi = catalog_4fgl[mask_roi]
print("Number of sources :", len(catalog_4fgl_roi.table))
# ## Source models
#
# The `~gammapy.catalog.SourceCatalogObject` classes have a `sky_model()` model
# which creates a `gammapy.modeling.models.SkyModel` object, with model parameter
# values and parameter errors from the catalog filled in.
#
# In most cases, the `spectral_model()` method provides the `gammapy.modeling.models.SpectralModel`
# part of the sky model, and the `spatial_model()` method the `gammapy.modeling.models.SpatialModel`
# part individually.
#
# We use the `gammapy.catalog.SourceCatalog3FHL` for the examples in this section.
source = catalog_4fgl["PKS 2155-304"]
model = source.sky_model()
model
print(model)
print(model.spatial_model)
print(model.spectral_model)
# + nbsphinx-thumbnail={"tooltip": "Access and explore thew most common gamma-ray source catalogs."}
energy_bounds = (100 * u.MeV, 100 * u.GeV)
opts = dict(energy_power=2, flux_unit="erg-1 cm-2 s-1")
model.spectral_model.plot(energy_bounds, **opts);
model.spectral_model.plot_error(energy_bounds, **opts);
# -
# You can create initial source models for your analyses using the `.to_models()` method of the catalog objects. Here for example we create a `Models` object from the 4FGL catalog subset we previously defined:
models_4fgl_roi = catalog_4fgl_roi.to_models()
models_4fgl_roi
# ## Specificities of the HGPS catalog
#
# Using the `.to_models()` method for the `gammapy.catalog.SourceCatalogHGPS` will return only the models components of the sources retained in the main catalog, several candidate objects appears only in the Gaussian components table (see section 4.9 of the HGPS paper, https://arxiv.org/abs/1804.02432). To acces these components you can do the following:
#
discarded_ind = np.where(["Discarded" in _ for _ in catalog_hgps.table_components["Component_Class"]])[0]
discarded_table = catalog_hgps.table_components[discarded_ind]
# There is no spectral model available for these components but you can acces their spatial models:
discarded_spatial = [catalog_hgps.gaussian_component(idx).spatial_model() for idx in discarded_ind]
# In addition to the source components the HGPS catalog include a large scale diffuse component built by fitting a gaussian model in a sliding window along the Galactic plane. Informations on this model can be accesed via the propoerties `.table_large_scale_component` and `.large_scale_component` of `gammapy.catalog.SourceCatalogHGPS`.
# here we show the 5 first elements of the table
catalog_hgps.table_large_scale_component[:5]
# you can also try :
# help(catalog_hgps.large_scale_component)
# ## Flux points
#
# The flux points are available via the `flux_points` property as a `gammapy.spectrum.FluxPoints` object.
source = catalog_4fgl["PKS 2155-304"]
flux_points = source.flux_points
flux_points
flux_points.to_table(sed_type="flux")
flux_points.plot(sed_type="e2dnde");
# ## Lightcurves
#
# The Fermi catalogs contain lightcurves for each source. It is available via the `source.lightcurve()` method as a `~gammapy.estimators.LightCurve` object.
lightcurve = catalog_4fgl["4FGL J0349.8-2103"].lightcurve()
lightcurve
lightcurve.table[:3]
lightcurve.plot();
# ## Pretty-print source information
#
# A source object has a nice string representation that you can print.
#
source = catalog_hgps["MSH 15-52"]
print(source)
# You can also call `source.info()` instead and pass as an option what information to print. The options available depend on the catalog, you can learn about them using `help()`
help(source.info)
print(source.info("associations"))
| docs/tutorials/api/catalog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
'abc' + "xyz"
3 * 'abc'
80 * '_'
'abc' * 3
'abc"xyz'
'abc"\''
s = 'abcxyz'
len(s)
s[-1]
s[2:-2]
x = 2
y = 3
f'{x} * {y} = {x*y}'
| Python/Strings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # US c-Si PV installations, Residential, Commerical, and Utility Scale
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 28})
plt.rcParams['figure.figsize'] = (30, 15)
# This journal documents the manipulation of PV installation data for the USA. This covers selection of data, and weighting by marketshare.
cwd = os.getcwd() #grabs current working directory
df_installs_raw = pd.read_csv(cwd+"/../../../PV_ICE/baselines/SupportingMaterial/PVInstalls_USA_AllSources.csv", index_col='Year')
sources = df_installs_raw.columns
#print(len(sources))
plt.plot(df_installs_raw.index,df_installs_raw[sources[0]],lw=4,marker='*',label=sources[0])
plt.plot(df_installs_raw.index,df_installs_raw[sources[1]],lw=3,marker='o',label=sources[1])
plt.plot(df_installs_raw.index,df_installs_raw[sources[2]],lw=2,marker='o',label=sources[2])
plt.plot(df_installs_raw.index,df_installs_raw[sources[3]],lw=2,marker='o',label=sources[3])
plt.plot(df_installs_raw.index,df_installs_raw[sources[4]],lw=2,marker='o',label=sources[4])
plt.plot(df_installs_raw.index,df_installs_raw[sources[5]],lw=2,marker='o',label=sources[5])
plt.yscale('log')
plt.ylabel('PV Installed (MW)')
plt.legend(bbox_to_anchor=(0, 1, 1, 0), loc="lower left")
#plt.plot(df_installs_raw, marker='o')
# # Select the data to use for installs
# The IRENA is consistently lower than the other sources from 2012 through the present. Given that all other sources are in agreement, we will select one of these data sets to use for installation data, rather than IRENA. In this case, we will select the Wood Mackenzie Power & Renewables quarterly reports and PV forecasts from 2010 through 2019.
installs_2010_2019 = df_installs_raw.loc[(df_installs_raw.index>=2010) & (df_installs_raw.index<=2020)]
installs_recent = pd.DataFrame(installs_2010_2019[sources[0]])
installs_recent.columns = ['installed_pv_MW']
print(installs_recent)
# Only 1 dataset exists from 1995 to 2000, from IEA PVPS 2010 National Survey report. This seems to track reasonably well with Wood Mackenzie data between 2008 and 2010. Therefore, this will be used up to 2010.
installs_upto2010 = df_installs_raw.loc[(df_installs_raw.index<2010)]
installs_old = pd.DataFrame(installs_upto2010[sources[1]])
installs_old.columns = ['installed_pv_MW']
print(installs_old)
# However, there are 2 problems with this data. 1) There is an error in the original published table, such that it appears only 4 MW of PV were installed in 2006. To address this problem we will utilize SEIA & GTM/Wood Mackenzie data for 2006. 2) Due to the way the original data was presented, we had to calculate the difference between years to capture installed PV data, meaning that we don't have data for 1995. To address this problem, we will fill in the data point from “IEA PVPS Task 1 1997,” IEA-PVPS, IEA PVPS T1:1997, Mar. 1997. Accessed: Aug. 13, 2020. [Online]. Available: https://iea-pvps.org/wp-content/uploads/2020/01/tr_1995_01.pdf, which specifies the added PV in 1995 in the US (Table 2.2).
#
# +
#dealing with 2006 error
installs_old['installed_pv_MW'][2006] = df_installs_raw[sources[5]][2006]
#dealing with 1995 error
installs_old['installed_pv_MW'][1995] = 12.5 #MW
#print(installs_old)
# -
# ### Collect the installation data together into a single df
installs = pd.concat([installs_old,installs_recent])
plt.plot(installs)
plt.yscale('log')
plt.title('Installations of PV in the USA (MW) since 1995')
# # Marketshare weight the installation data for percent of Silicon vs Thin Film
# In addition to compiling a single installation record for 1995 through the present, this data is total cumulative, but the tool it currently considering silicon technology only. Especially in the United States, where First Solar holds significant marketshare, this distinction is important. Currently, we also do not feel it is critical track circular economy of CdTe given that First Solar already recycles all their panels.
cwd = os.getcwd() #grabs current working directory
df_raw_mrktshr_siVtf = pd.read_csv(cwd+"/../../../PV_ICE/baselines/SupportingMaterial/MarketShare_US_siliconVSthinfilm.csv", index_col='Year')
refs = df_raw_mrktshr_siVtf.columns
plt.plot(df_raw_mrktshr_siVtf.index,df_raw_mrktshr_siVtf[refs[0]],marker='o',label=refs[0])
plt.plot(df_raw_mrktshr_siVtf.index,df_raw_mrktshr_siVtf[refs[1]],marker='o',label=refs[1])
plt.plot(df_raw_mrktshr_siVtf.index,df_raw_mrktshr_siVtf[refs[2]],marker='o',label=refs[2])
plt.plot(df_raw_mrktshr_siVtf.index,df_raw_mrktshr_siVtf[refs[3]],marker='o',label=refs[3])
plt.legend(bbox_to_anchor=(0, 1, 1, 0), loc="lower left")
plt.ylim(0,1.1)
# Since there is no overlap among these resources, we will simply consolidate into a single historical marketshare.
#
# Mints does have marketshare data overlapping the late 2000s to the present, however, this data is based on MFG capcity and shipments at a global scale. This PV installation baseline is for the USA, and is focused on the install side, rather than the MFG capacity side.
#bfill function "collapses" values from all columns into a single column, then make a df of only that data
df_raw_mrktshr_siVtf['All_Marketshare'] = pd.to_numeric(df_raw_mrktshr_siVtf[refs].bfill(axis=1).iloc[:, 0], errors='coerce')
df_mrktshr_us = pd.DataFrame(df_raw_mrktshr_siVtf['All_Marketshare'])
#print( df_mrktshr_us)
df_mrktshr_us_si_complete = df_mrktshr_us.interpolate(limit_area='inside')
plt.plot(df_mrktshr_us_si_complete)
plt.title('Marketshare of Silicon PV installed since 1995')
df_mrktshr_us_si_complete.to_csv(cwd+'/../../../PV_ICE/baselines/SupportingMaterial/output_USA_Si_marketshare.csv', index=True)
# # Marketshare weight PV installs by percent Silicon
# Now we have a marketshare percentage of silicon for 1995 through 2018. We will multiply the PV installs by this silicon marketshare to get the MW of silicon PV installed in the US since 1995.
# +
dfs = [installs,df_mrktshr_us_si_complete]
df = pd.concat(dfs, axis=1, join='inner')
df_clean = df.dropna()
us_si_installs = df_clean.agg('prod', axis='columns')
#print(us_si_installs)
plt.rcParams.update({'font.size': 18})
plt.rcParams['figure.figsize'] = (15, 8)
plt.plot(installs, label='All USA PV Installed', color='orange')
plt.plot(us_si_installs, label='Silicon PV Installed, USA', color='blue')
plt.yscale('log')
plt.title('Silicon PV Installations (MW) in the USA, 1995 through 2018')
plt.legend()
# -
us_si_installs.to_csv(cwd+'/../../../PV_ICE/baselines/SupportingMaterial/output_USA_SiPV_installs.csv', index=True)
# This data only takes installes through 2019. For the remaining years through 2050, a compound annual growth rate of 8.9% was used to predict increasing installations. This compound annual growth rate was sourced from IRENA 2016 EoL Mangement Report.
# # Only Commercial and Utility Scale c-Si
# The sources used to create this baseline pre-2010 are:
#
# 1. <NAME>, “U.S. Solar Market Trends 2008,” Interstate Renewable Energy Council, Jul. 2009. Accessed: Sep. 15, 2021. [Online]. Available: https://irecusa.org/wp-content/uploads/2021/07/Solar-Market-Trends-2008.pdf
# 2. <NAME> and <NAME>, “National Survey Report of PV Power Applications in the United States 2010,” IEA-PVPS, National Survey T1-19:2010, 2010. [Online]. Available: https://iea-pvps.org/national-survey-reports/.
# 3. <NAME> al., “U.S. Solar Market Insight Report: 2012 Year in Review Full Report,” Greentech Media Inc. & SEIA, 2013.
#
# Note that data from these sources were digitized from graphs, so there is approximately a +/- 10 MWdc error (based on comparison to charted data within the report).
#
# From 2010 forward we will use Wood Mackenzie Data.
#
| docs/tutorials/baseline development documentation/(baseline development) PV Installations - USA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Watch for any changes in vocabulary.py, data_loader.py, utils.py or model.py, and re-load it automatically.
# %load_ext autoreload
# %autoreload 2
# %reload_ext autoreload
# +
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms
import sys
from pycocotools.coco import COCO
import math
import torch.utils.data as data
import numpy as np
import os
import requests
import time
from utils import train, validate, save_epoch, early_stopping
from data_loader import get_loader
from model import EncoderCNN, DecoderRNN
# +
# Set values for the training variables
batch_size = 32 # batch size
vocab_threshold = 5 # minimum word count threshold
vocab_from_file = True # if True, load existing vocab file
embed_size = 256 # dimensionality of image and word embeddings
hidden_size = 512 # number of features in hidden state of the RNN decoder
num_epochs = 10 # number of training epochs
# Define a transform to pre-process the training images
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
# Define a transform to pre-process the validation images
transform_val = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.CenterCrop(224), # get 224x224 crop from the center
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
# Build data loader, applying the transforms
train_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=vocab_from_file)
val_loader = get_loader(transform=transform_val,
mode='val',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=vocab_from_file)
# The size of the vocabulary
vocab_size = len(train_loader.dataset.vocab)
# +
# Initialize the encoder and decoder
encoder = EncoderCNN(embed_size, architecture='resnet50')
# encoder = EncoderCNN(embed_size, architecture='densenet161')
decoder = DecoderRNN(embed_size, hidden_size, vocab_size)
# Move models to GPU if CUDA is available
if torch.cuda.is_available():
encoder.cuda()
decoder.cuda()
# Define the loss function
criterion = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()
# Specify the learnable parameters of the model
params = list(decoder.parameters()) + list(encoder.embed.parameters()) + list(encoder.bn.parameters())
# Define the optimizer
optimizer = torch.optim.Adam(params=params, lr=0.001)
# Set the total number of training and validation steps per epoch
total_train_step = math.ceil(len(train_loader.dataset.caption_lengths) / train_loader.batch_sampler.batch_size)
total_val_step = math.ceil(len(val_loader.dataset.caption_lengths) / val_loader.batch_sampler.batch_size)
print ("Number of training steps:", total_train_step)
print ("Number of validation steps:", total_val_step)
# +
# Keep track of train and validation losses and validation Bleu-4 scores by epoch
train_losses = []
val_losses = []
val_bleus = []
# Keep track of the current best validation Bleu score
best_val_bleu = float("-INF")
start_time = time.time()
for epoch in range(1, num_epochs + 1):
train_loss = train(train_loader, encoder, decoder, criterion, optimizer,
vocab_size, epoch, total_train_step)
train_losses.append(train_loss)
val_loss, val_bleu = validate(val_loader, encoder, decoder, criterion,
train_loader.dataset.vocab, epoch, total_val_step)
val_losses.append(val_loss)
val_bleus.append(val_bleu)
if val_bleu > best_val_bleu:
print ("Validation Bleu-4 improved from {:0.4f} to {:0.4f}, saving model to best-model.pkl".
format(best_val_bleu, val_bleu))
best_val_bleu = val_bleu
filename = os.path.join("./models", "best-model.pkl")
save_epoch(filename, encoder, decoder, optimizer, train_losses, val_losses,
val_bleu, val_bleus, epoch)
else:
print ("Validation Bleu-4 did not improve, saving model to model-{}.pkl".format(epoch))
# Save the entire model anyway, regardless of being the best model so far or not
filename = os.path.join("./models", "model-{}.pkl".format(epoch))
save_epoch(filename, encoder, decoder, optimizer, train_losses, val_losses,
val_bleu, val_bleus, epoch)
print ("Epoch [%d/%d] took %ds" % (epoch, num_epochs, time.time() - start_time))
if epoch > 5:
# Stop if the validation Bleu doesn't improve for 3 epochs
if early_stopping(val_bleus, 3):
break
start_time = time.time()
# -
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import keras
import tensorflow as tf
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
rcParams.update({'font.size': 16})
# Switch AUTORELOAD ON. Disable this when in production mode!
# %load_ext autoreload
# %autoreload 2
# +
#import deep_boltzmann
# -
from deep_boltzmann.models import DoubleWell
from deep_boltzmann.networks.invertible import create_NICERNet, create_RealNVPNet, invnet
from deep_boltzmann.sampling import GaussianPriorMCMC
from deep_boltzmann.networks.plot import test_xz_projection
from deep_boltzmann.util import count_transitions
from deep_boltzmann.sampling.analysis import free_energy_bootstrap, mean_finite, std_finite
def test_sample(network, temperature=1.0, nsample=100000, plot=True):
if nsample <= 100000:
sample_z, sample_x, energy_z, energy_x, logw = network.sample(temperature=temperature, nsample=nsample)
else:
sample_x = []
for i in range(int(nsample/100000)):
_, sample_x_, _, _, _ = network.sample(temperature=temperature, nsample=nsample)
sample_x.append(sample_x_)
sample_x = np.vstack(sample_x)
# xgen = network.Tzx.predict(np.sqrt(temperature) * np.random.randn(100000, 2))
params = DoubleWell.params_default.copy()
params['dim'] = 2
double_well = DoubleWell(params=params)
plt.figure(figsize=(4, 4))
h, b = np.histogram(sample_x[:, 0], bins=100)
bin_means = 0.5*(b[:-1] + b[1:])
Eh = -np.log(h) / temperature
if plot:
Ex, E = double_well.plot_dimer_energy(temperature=temperature)
Eh = Eh - Eh.min() + E.min()
plt.plot(bin_means, Eh, color='green', linewidth=2)
return bin_means, Eh
# reweighting
def test_sample_rew(network, temperature=1.0, plot=True):
sample_z, sample_x, energy_z, energy_x, log_w = network.sample(temperature=1.0, nsample=100000)
log_w -= log_w.max()
print(log_w)
bin_means, Es = free_energy_bootstrap(sample_x[:, 0], range=(-2.5, 2.5), bins=100, nbootstrap=100, log_weights=np.asarray(log_w))
plt.figure(figsize=(4, 4))
Emean = mean_finite(Es, axis=0)-10.7
Estd = std_finite(Es, axis=0)
var = mean_finite(std_finite(Es, axis=0) ** 2)
if plot:
double_well.plot_dimer_energy()
plt.errorbar(bin_means, Emean, Estd, linewidth=2, color='green')
# variance
print('Estimator Standard Error: ', np.sqrt(var))
return bin_means, Emean, Estd
def hist_weights(network):
sample_z, sample_x, energy_z, energy_x, log_w = network.sample(temperature=1.0, nsample=100000)
log_w -= log_w.max()
bins = np.linspace(-2.5, 2.5, 100)
bin_means = 0.5 * (bins[:-1] + bins[1:])
sample_x_index = np.digitize(sample_x[:, 0], bins)
whist = np.zeros(len(bins) + 1)
for i in range(len(log_w)):
whist[sample_x_index[i]] += np.exp(log_w[i])
return bin_means, whist[1:-1]
def test_mcmc(network, nsample):
fig, axes = plt.subplots(2, 1, figsize=(7, 7))
# test sampling
gp_mcmc = GaussianPriorMCMC(network)#, std_z=[0.7, 1.0])
Z, X, E, J = gp_mcmc.run(nsample)
print('Transitions:', count_transitions(X[:, 0], -1, 1))
axes[0].plot(X[:min(10000, nsample), 0])
# plot PMF
h, b = np.histogram(X[:, 0], bins=100)
b = 0.5 * (b[:-1] + b[1:])
F = -np.log(h)
Edimer = double_well.plot_dimer_energy(axes[1])
axes[1].plot(b, F-F.min()+Edimer.min())
def plot_transformation_field_2d(transformer, bounds, ngrid=20, ):
# build grid
x_coarse_grid = np.linspace(bounds[0], bounds[1], num=ngrid)
y_coarse_grid = np.linspace(bounds[2], bounds[3], num=ngrid)
grid = []
for i in range(len(x_coarse_grid)):
for j in range(len(y_coarse_grid)):
grid.append([x_coarse_grid[i], y_coarse_grid[j]])
grid = np.array(grid)
# compute transformation field
grid_pred = transformer.predict(grid)
# show field
plt.figure(figsize=(5, 5))
plt.quiver(grid[:, 0], grid[:, 1], grid_pred[:, 0], grid_pred[:, 1], units='width')
def getx(x):
return x[:, 0]
# Plotting function, sort of generic
# -----
def plot_network(network, weight_cutoff=1e-2):
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 3.5))
plt.subplots_adjust(wspace=0.25)
# Plot X distribution
axis = axes[0]
axis.plot(traj_left[:, 0], traj_left[:, 1], linewidth=0, marker='.', markersize=3, color='blue')
axis.plot(x_ts[:, 0], x_ts[:, 1], linewidth=0, marker='.', markersize=3, color='orange')
axis.plot(traj_right[:, 0], traj_right[:, 1], linewidth=0, marker='.', markersize=3, color='red')
axis.set_xlabel('$x_1$')
axis.set_xlim(-3, 3)
axis.set_ylabel('$x_2$', labelpad=-12)
axis.set_ylim(-4, 4)
axis.set_yticks([-4, -2, 0, 2, 4]);
# Plot Z distribution
axis = axes[1]
z_left = network.transform_xz(traj_left)
z_ts = network.transform_xz(x_ts)
z_right = network.transform_xz(traj_right)
axis.plot(z_left[:, 0], z_left[:, 1], linewidth=0, marker='.', markersize=3, color='blue')
axis.plot(z_ts[:, 0], z_ts[:, 1], linewidth=0, marker='.', markersize=3, color='orange')
axis.plot(z_right[:, 0], z_right[:, 1], linewidth=0, marker='.', markersize=3, color='red')
circle = plt.Circle((0, 0), radius=1.0, color='black', alpha=0.4, fill=True)
axis.add_artist(circle)
circle = plt.Circle((0, 0), radius=2.0, color='black', alpha=0.25, fill=True)
axis.add_artist(circle)
circle = plt.Circle((0, 0), radius=3.0, color='black', alpha=0.1, fill=True)
axis.add_artist(circle)
axis.set_xlabel('$z_1$')
axis.set_xlim(-4, 4)
axis.set_ylabel('$z_2$', labelpad=-12)
axis.set_ylim(-4, 4)
axis.set_yticks([-4, -2, 0, 2, 4]);
# Plot proposal distribution
X1, Y1 = test_sample(network, temperature=1.0, plot=False);
_, W1 = hist_weights(network)
axis = axes[2]
Ex, E = double_well.plot_dimer_energy(axis=axis, temperature=1.0)
Y1 = Y1 - Y1.min() + E.min()
Inan = np.where(W1 < weight_cutoff)
Y1[Inan] = np.nan
#Y2 = Y2 - Y2.min() + E.min()
#axis.plot(X2, Y2, color='#FF6600', linewidth=2, label='ML+KL+RC')
axis.plot(X1, Y1, color='orange', linewidth=2, label='ML+KL')
axis.set_xlim(-3, 3)
axis.set_ylim(-12, 5.5)
axis.set_yticks([]);
axis.set_xlabel('$x_1$')
axis.set_ylabel('Energy / kT')
#plt.legend(ncol=1, loc=9, fontsize=12, frameon=False)
# Plot reweighted distribution
RX1, RY1, DR1 = test_sample_rew(network, temperature=1.0, plot=False);
axis = axes[3]
Ex, E = double_well.plot_dimer_energy(axis=axis, temperature=1.0)
RY1 = RY1 - RY1[np.isfinite(RY1)].min() + E.min()
RY1[Inan] = np.nan
#RY1[RY1 > -4] = np.nan
#RY2 = RY2 - RY2[np.isfinite(RY2)].min() + E.min()
#axis.errorbar(RX2, RY2, DR2, color='#FF6600', linewidth=2, label='ML+KL+RC')
axis.errorbar(RX1, RY1, DR1, color='orange', linewidth=2, label='ML+KL')
axis.set_xlim(-3, 3)
axis.set_ylim(-12, 5.5)
axis.set_yticks([-12, -10, -8, -6, -4, -2, 0, 2, 4]);
axis.set_xlabel('$x_1$')
axis.set_ylabel('')
return fig, axes
def low_energy_fraction(energies, Emax):
low_energy_count = [np.count_nonzero(E<Emax) for E in energies]
sizes = [E.size for E in energies]
low_energy_fraction = np.array(low_energy_count) / sizes
return low_energy_fraction
def plot_convergence(hist_ML, hist_KL, enerx_cut, enerz_cut, MLcol=1, KLcol=2):
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(5, 10))
niter1 = len(hist_ML[0])
niter2 = hist_KL[1].shape[0]
niter = niter1 + niter2
# ML loss
losses_ML = np.concatenate([hist_ML[0], hist_KL[1][:, MLcol]])
xticks = np.arange(niter1 + niter2) + 1
axes[0].plot(xticks, losses_ML, color='black')
axes[0].set_xlim(0, niter + 1)
axes[0].set_ylabel('ML loss')
axes[0].axvline(x=200, color='red', linestyle='--', linewidth=3)
# KL loss
losses_KL = hist_KL[1][:, KLcol]
xticks = np.arange(niter1, niter1 + niter2) + 1
axes[1].plot(xticks, losses_KL, color='black')
axes[1].set_xlim(0, niter + 1)
axes[1].set_ylabel('KL loss')
axes[1].axvline(x=200, color='red', linestyle='--', linewidth=3)
# low energy fractions
enerx = hist_ML[2] + hist_KL[3]
enerz = hist_ML[3] + hist_KL[4]
lef_x = low_energy_fraction(enerx, enerx_cut)
lef_z = low_energy_fraction(enerz, enerz_cut)
axes[2].plot(lef_x, color='black', label='x')
axes[2].plot(lef_z, color='blue', label='z')
axes[2].set_xlim(0, niter + 1)
axes[2].set_ylim(0, 1.05)
axes[2].axvline(x=200, color='red', linestyle='--', linewidth=3)
axes[2].set_ylabel('Training iterations')
axes[2].set_ylabel('Low energy fraction')
axes[2].legend()
return fig, axes
# Double well
# ---
paper_dir = '/Users/noe/data/papers/NoeEtAl_BoltzmannGeneratorsRev/'
# +
x_grid = np.linspace(-3, 3, num=200)
X = np.hstack([x_grid[:, None], np.zeros((x_grid.size, 2 - 1))])
X;
# -
# energy at this point is:
dimer_energy = self.params['a4'] * x[:, 0] ** 4 - self.params['a2'] * x[:, 0] ** 2 + self.params['a1'] * x[:, 0]
oscillator_energy = 0.0
if self.dim == 2:
oscillator_energy = (self.params['k'] / 2.0) * x[:, 1] ** 2
return dimer_energy + oscillator_energy
params = DoubleWell.params_default.copy()
'''
{'a4' : 1.0,
'a2' : 6.0,
'a1' : 1.0,
'k' : 1.0,
'dim' : 1}
These are all constants for the energy equation.
'''
params['dim'] = 2
double_well = DoubleWell(params=params) # creates it with all of the above but a dim of 2.
plt.figure(figsize=(5,5))
double_well.plot_dimer_energy();
#plt.savefig(paper_dir + 'figs/double_well/potential.pdf', bbox_inches='tight')
def plot_potential(cbar=True, orientation='vertical', figsize=(4, 5.5)):
# 2D potential
xgrid = np.linspace(-3, 3, 100)
ygrid = np.linspace(-7, 7, 100)
Xgrid, Ygrid = np.meshgrid(xgrid, ygrid)
X = np.vstack([Xgrid.flatten(), Ygrid.flatten()]).T
E = double_well.energy(X)
E = E.reshape((100, 100))
E = np.minimum(E, 10.0)
plt.figure(figsize=figsize)
plt.contourf(Xgrid, Ygrid, E, 50, cmap='jet', vmax=4)
if cbar:
if orientation == 'horizontal':
cbar = plt.colorbar(orientation='horizontal', shrink=0.3, aspect=10, anchor=(0.5, 7.5), use_gridspec=False)#, anchor=(0, 0.5))
cbar.outline.set_linewidth(1)
cbar.outline.set_color('white')
cbar.outline.fill = False
plt.setp(plt.getp(cbar.ax.axes, 'xticklabels'), color='w')
cbar.ax.xaxis.set_tick_params(color='white')
#cbar.set_label('Energy / kT', labelpad=0, y=0.0, color='white')
else:
cbar = plt.colorbar()
cbar.set_label('Energy / kT', labelpad=-15, y=0.6)
cbar.set_ticks([-10, 0, 10])
plt.xticks([-2, 0, 2])
plt.yticks([-5, 0, 5])
plt.xlabel('$x_1$', labelpad=0)
plt.ylabel('$x_2$', labelpad=-10)
plot_potential(orientation='horizontal')
#plt.savefig(paper_dir + 'figs/double_well/potential2D_horizontal.pdf', bbox_inches='tight')
# simulation data
from deep_boltzmann.sampling import MetropolisGauss
# +
nsteps = 10000
x0_left = np.array([[-1.8, 0.0]])
x0_right = np.array([[1.8, 0.0]])
sampler = MetropolisGauss(double_well, x0_left, noise=0.1, stride=10)
sampler.run(nsteps)
traj_left = sampler.traj.copy()
sampler.reset(x0_left)
sampler.run(nsteps)
traj_left_val = sampler.traj.copy()
sampler.reset(x0_right)
sampler.run(nsteps)
traj_right = sampler.traj.copy()
sampler.reset(x0_right)
sampler.run(nsteps)
traj_right_val = sampler.traj.copy()
# -
plt.figure(figsize=(9, 4))
ax1 = plt.subplot2grid((1, 3), (0, 0), colspan=2)
ax2 = plt.subplot2grid((1, 3), (0, 2))
ax1.plot(traj_left[:, 0], color='blue', alpha=0.7)
ax1.plot(traj_right[:, 0], color='red', alpha=0.7)
ax1.set_xlim(0, 1000)
ax1.set_ylim(-2.5, 2.5)
ax1.set_xlabel('Time / steps')
ax1.set_ylabel('$x_1$ / a.u.')
ax2.hist(traj_left[:, 0], 30, orientation='horizontal', histtype='stepfilled', color='blue', alpha=0.2);
ax2.hist(traj_left[:, 0], 30, orientation='horizontal', histtype='step', color='blue', linewidth=2);
ax2.hist(traj_right[:, 0], 30, orientation='horizontal', histtype='stepfilled', color='red', alpha=0.2);
ax2.hist(traj_right[:, 0], 30, orientation='horizontal', histtype='step', color='red', linewidth=2);
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_ylim(-2.5, 2.5)
ax2.set_xlabel('Probability')
#plt.savefig(paper_dir + 'figs/double_well/prior_trajs.pdf', bbox_inches='tight')
x = np.vstack([traj_left, traj_right])
xval = np.vstack([traj_left_val, traj_right_val])
# prepare transition state
x_ts = np.vstack([np.zeros(1000), (1.0/double_well.params['k']) * np.random.randn(1000)]).T
# Estimate transition rate
# ----
params
# change barrier but keep a/b
params_flat = params.copy()
params_flat['a2'] = 1.5
params_flat['a4'] = 0.25
double_well_flat = DoubleWell(params=params_flat)
double_well_flat.plot_dimer_energy();
#double_well_flat.plot_dimer_energy();
sampler = MetropolisGauss(double_well_flat, x0_left, noise=0.1, stride=100)
nstep = 1000000
sampler.run(nstep)
# ### all of the above is getting datapoints from the well and a flat version.
# and seeing how large their energy barriers are.
barrier_double_well = double_well.energy(np.array([[0, 0]])) - double_well.energy(np.array([[-2, 0]]))
barrier_double_well_flat = double_well_flat.energy(np.array([[0, 0]])) - double_well_flat.energy(np.array([[-2, 0]]))
from deep_boltzmann.util import count_transitions
Ntrans_low_barrier = count_transitions(sampler.traj[:, 0], -1, 1) # if it goes
#past plus or minus one when its low energy. starting from teh left state.
'''starts from the left state. then if it is detected in the right, we add one and
look for it to cross back over. '''
rate_low_barrier = float(Ntrans_low_barrier) / float(nstep)
print('rate of crossing for low rate barrier', rate_low_barrier)
dE = barrier_double_well - barrier_double_well_flat
rate_high_barrier = rate_low_barrier * np.exp(-dE)
print(1.0/rate_high_barrier)
#plt.plot(sampler.traj[:, 0])
# Training
# -------
epochsZ = 200
epochsE = 500
batchsize_ML = 128
batchsize_KL = 1000
temperature = 1.0
explore = 1.0
# Networks
# ----
# +
network_NICER_KLML = invnet(double_well.dim, 'NNNNS', double_well, nl_layers=3, nl_hidden=100,
nl_activation='relu', nl_activation_scale='tanh')
'''
nl - means non linear
layer_types : str
String describing the sequence of layers. Usage:
N NICER layer
n NICER layer, share parameters with last layer
R RealNVP layer
r RealNVP layer, share parameters with last layer
S Scaling layer
W Whiten layer
P Permute layer
Z Split dimensions off to latent space, leads to a merge and 3-way split.
Splitting and merging layers will be added automatically
'''
'''have a command to split/merge indices of the points. '''
# if scaling is set to be none then it is trainable!
# rg_splitfrac : float - Splitting fraction for Z layers
'''
if energy_model is None:
return InvNet(dim, layers, prior=prior)
else:
return EnergyInvNet(energy_model, layers, prior=prior)
'''
'''InvNet - parent class. '''
'''EnergyInvNet - I think this is used in almost every case. '''
# -
'''nchannels=2
ndim=5
lol = np.tile(np.arange(nchannels), int(ndim/nchannels)+1)[:ndim]
np.where(lol == 0)[0]
np.concatenate([ [5,3], [6,3] ]).argsort() # combines lists. this is cool. '''
# +
# maybe it splits at 0110110. 0,3,6,1,2,4,5 not sure why youd want to argsort this.
# -
hist_NICER_KLML1 = network_NICER_KLML.train_ML(x, xval=xval, epochs=epochsZ, batch_size=batchsize_ML, std=1.0,
verbose=0, return_test_energies=True)
hist_NICER_KLML2 = network_NICER_KLML.train_flexible(x, xval=xval, lr=0.001, epochs=epochsE, batch_size=batchsize_KL,
std=1.0,
weight_ML=1.0, weight_KL=1.0, weight_MC=0.0, weight_W2=0.0,
weight_RCEnt=0.0, rc_func=getx, rc_min=-2.5, rc_max=2.5,
temperature=temperature, explore=explore, verbose=0,
return_test_energies=True)
plot_convergence(hist_NICER_KLML1, hist_NICER_KLML2, 0, 2)
fig, axes = plot_network(network_NICER_KLML, weight_cutoff=1e-2);
# +
network_RNVP_KLML = invnet(double_well.dim, 'RRRR', double_well, nl_layers=3, nl_hidden=100,
nl_activation='relu', nl_activation_scale='tanh')
'''invnet(double_well.dim, 'NNNNS', double_well, nl_layers=3, nl_hidden=100,
nl_activation='relu', nl_activation_scale='tanh')'''
# -
hist_RNVP_KLML1 = network_RNVP_KLML.train_ML(x, xval=xval, epochs=epochsZ, batch_size=batchsize_ML, std=1.0, verbose=0,
return_test_energies=True)
hist_RNVP_KLML2 = network_RNVP_KLML.train_flexible(x, xval=xval, lr=0.001, epochs=epochsE, batch_size=batchsize_KL,
std=1.0,
weight_ML=1.0, weight_KL=1.0, weight_MC=0.0, weight_W2=0.0,
weight_RCEnt=0.0, rc_func=getx, rc_min=-2.5, rc_max=2.5,
temperature=temperature, explore=explore, verbose=0,
return_test_energies=True)
def energy_cut_z(ndim, nstd=3):
z = np.random.randn(10000, ndim)
zener = 0.5 * np.sum(z**2, axis=1)
#return zener
std = np.sqrt(np.mean((zener - zener.mean())**2))
return zener.mean() + nstd*std
zcut = energy_cut_z(double_well.dim, nstd=3)
plot_convergence(hist_RNVP_KLML1, hist_RNVP_KLML2, 0, zcut)
#plt.savefig(paper_dir + 'figs/double_well/training_convergence_KLML.pdf', bbox_inches='tight', transparent=True)
fig, axes = plot_network(network_RNVP_KLML, weight_cutoff=1e-2);
#fig.savefig(paper_dir + 'figs/double_well/network_RNVP_KLML.pdf', bbox_inches='tight')
# HERE IT USES NICE RC
network_NICER_KLRC = invnet(double_well.dim, 'NNNNS', double_well, nl_layers=3, nl_hidden=100,
nl_activation='relu', nl_activation_scale='tanh') # THIS ONE USES REACTION COORDS
hist_NICER_KLRC1 = network_NICER_KLRC.train_ML(x, xval=xval, epochs=epochsZ, batch_size=128, std=1.0, verbose=0,
return_test_energies=True)
hist_NICER_KLRC2 = network_NICER_KLRC.train_flexible(x, xval=xval, lr=0.001, epochs=epochsE, batch_size=2048, std=1.0,
weight_ML=0.01, weight_KL=1.0, weight_MC=0.0, weight_W2=0.0,
weight_RCEnt=1.0, rc_func=getx, rc_min=-2.5, rc_max=2.5,
temperature=temperature, explore=explore, verbose=0,
return_test_energies=True)
plot_convergence(hist_NICER_KLRC1, hist_NICER_KLRC2, 0, 2)
fig, axes = plot_network(network_NICER_KLRC, weight_cutoff=1e-2);
#fig.savefig(paper_dir + 'figs/double_well/network_NICER_KLRC.pdf', bbox_inches='tight')
# NVP RC
network_RNVP_KLRC = invnet(double_well.dim, 'RRRR', double_well, nl_layers=3, nl_hidden=100,
nl_activation='relu', nl_activation_scale='tanh')
hist_RNVP_KLRC1 = network_RNVP_KLRC.train_ML(x, xval=xval, epochs=epochsZ, batch_size=batchsize_ML, std=1.0, verbose=0,
return_test_energies=True)
hist_RNVP_KLRC2 = network_RNVP_KLRC.train_flexible(x, xval=xval, lr=0.001, epochs=epochsE, batch_size=batchsize_KL,
std=1.0,
weight_ML=0.01, weight_KL=1.0, weight_MC=0.0, weight_W2=0.0,
weight_RCEnt=1.0, rc_func=getx, rc_min=-2.5, rc_max=2.5,
temperature=temperature, explore=explore, verbose=0,
return_test_energies=True) # NVP RC
zcut = energy_cut_z(double_well.dim, nstd=3)
plot_convergence(hist_RNVP_KLRC1, hist_RNVP_KLRC2, 0, zcut)
#plt.savefig(paper_dir + 'figs/double_well/training_convergence_KLRC.pdf', bbox_inches='tight', transparent=True)
fig, axes = plot_network(network_NICER_KLRC, weight_cutoff=1e-2);
#fig.savefig(paper_dir + 'figs/double_well/network_RNVP_KLRC.pdf', bbox_inches='tight')
# **Save Figures**
plot_potential(cbar=True, orientation='horizontal')
#plt.savefig(paper_dir + 'figs/double_well/potential2D_horizontal.pdf', bbox_inches='tight')
#plot_potential(cbar=True)
plt.plot(traj_left[::5, 0], traj_left[::5, 1], linewidth=0, marker='.', markersize=4, color='teal')
plt.plot(x_ts[:, 0], x_ts[:, 1], linewidth=0, marker='.', markersize=3, color='yellow')
plt.plot(traj_right[::5, 0], traj_right[::5, 1], linewidth=0, marker='.', markersize=4, color='red')
plt.xlabel('$x_1$')
#plt.xlim(-3, 3)
plt.ylabel('$x_2$')
#plt.ylim(-4, 4)
plt.yticks([-4, -2, 0, 2, 4]);
#plt.savefig(paper_dir + 'figs/double_well/xdist.pdf', bbox_inches='tight', transparent=True)
z_left = network_NICER_KLRC.transform_xz(traj_left)
z_ts = network_NICER_KLRC.transform_xz(x_ts)
z_right = network_NICER_KLRC.transform_xz(traj_right)
plt.figure(figsize=(4, 4))
plt.plot(z_left[:, 0], z_left[:, 1], linewidth=0, marker='.', markersize=3, color='blue')
plt.plot(z_ts[:, 0], z_ts[:, 1], linewidth=0, marker='.', markersize=3, color='orange')
plt.plot(z_right[:, 0], z_right[:, 1], linewidth=0, marker='.', markersize=3, color='red')
circle = plt.Circle((0, 0), radius=1.0, color='black', alpha=0.4, fill=True)
plt.gca().add_artist(circle)
circle = plt.Circle((0, 0), radius=2.0, color='black', alpha=0.25, fill=True)
plt.gca().add_artist(circle)
circle = plt.Circle((0, 0), radius=3.0, color='black', alpha=0.1, fill=True)
plt.gca().add_artist(circle)
plt.xlabel('$z_1$')
plt.xlim(-4, 4)
plt.ylabel('$z_2$')
plt.ylim(-4, 4)
plt.yticks([-4, -2, 0, 2, 4]);
#plt.savefig(paper_dir + 'figs/double_well/zdist_NICER.pdf', bbox_inches='tight')
plot_transformation_field_2d(network_NICER_KLRC.Txz, [-3, 3, -3, 3], ngrid=20)
plt.xlabel('z1');
plt.ylabel('z2');
X1, Y1 = test_sample(network_NICER_KLML, temperature=1.0, plot=False);
X2, Y2 = test_sample(network_NICER_KLRC, temperature=1.0, plot=False);
nsample=100000
_, sample_x1, _, energy_x1, _ = network_NICER_KLML.sample(temperature=temperature, nsample=nsample)
_, sample_x2, _, energy_x2, _ = network_NICER_KLRC.sample(temperature=temperature, nsample=nsample)
plt.figure(figsize=(4,2))
nsample = 100000
plt.hist(sample_x1[:, 0], 1000, histtype='step', color='#00BB00', linewidth=2, label='KL+ML');
plt.hist(sample_x2[:, 0], 1000, histtype='step', color='#FF6600', linewidth=2, label='KL+RC');
plt.xlim(-3, 3)
plt.yticks([]);
plt.xlabel('$x_1$')
plt.ylabel('Frequency')
plt.xticks([]);
plt.legend(ncol=1, loc='upper right', frameon=False)
#plt.savefig(paper_dir + 'figs/double_well/hist_samplex.pdf', bbox_inches='tight', transparent=True)
nsteps = 100000
sampler.reset(x0_left)
sampler.run(nsteps)
traj_left_ref = sampler.traj.copy()
sampler.reset(x0_right)
sampler.run(nsteps)
traj_right_ref = sampler.traj.copy()
x_ref = np.vstack([traj_left_ref, traj_right_ref])
plt.figure(figsize=(4, 2))
nsample = 100000
energy_md = double_well.energy(x_ref)
plt.hist(energy_md, 100, density=True, color='grey', linewidth=1, label='MD');
plt.hist(energy_md, 100, density=True, histtype='step', color='#555555', linewidth=2);
plt.hist(energy_x1, 1000, density=True, histtype='step', color='#00BB00', linewidth=2, label='KL+ML');
plt.hist(energy_x2, 1000, density=True, histtype='step', color='#FF6600', linewidth=2, label='KL+RC');
plt.xlim(-12, 5)
plt.yticks([]);
plt.xlabel('Energy / kT')
plt.ylabel('Frequency')
plt.legend(ncol=1, loc=9, fontsize=12, frameon=False)
#plt.savefig(paper_dir + 'figs/double_well/hist_sample_energy.pdf', bbox_inches='tight', transparent=True)
plt.figure(figsize=(4,4))
Ex, E = double_well.plot_dimer_energy(temperature=1.0)
Y1 = Y1 - Y1.min() + E.min()
Y2 = Y2 - Y2.min() + E.min()
plt.plot(X1, Y1, color='#00BB00', linewidth=2, label='KL+ML')
plt.plot(X2, Y2, color='#FF6600', linewidth=2, label='KL+RC')
plt.xlim(-3, 3)
plt.ylim(-12, 5.5)
plt.yticks([-12, -10, -8, -6, -4, -2, 0, 2, 4]);
plt.legend(ncol=1, loc=9, fontsize=12, frameon=False)
#plt.savefig(paper_dir + 'figs/double_well/energy_px.pdf', bbox_inches='tight')
RX1, RY1, DR1 = test_sample_rew(network_NICER_KLML, temperature=1.0, plot=False);
RX2, RY2, DR2 = test_sample_rew(network_NICER_KLRC, temperature=1.0, plot=False);
Ex, E = double_well.plot_dimer_energy(temperature=1.0)
#test_sample_rew(network_NICER_KLRC, temperature=1.0);
plt.figure(figsize=(4, 5))
plt.plot(Ex, E, linewidth=3, color='black')
RY1 = RY1 - RY1[np.isfinite(RY1)].min() + E.min()
RY1[RY1 > -4] = np.nan
RY2 = RY2 - RY2[np.isfinite(RY2)].min() + E.min()
plt.errorbar(RX2, RY2, DR2, color='#FF6600', linewidth=2, label='ML+KL+RC')
plt.errorbar(RX1, RY1, DR1, color='#00BB00', linewidth=2, label='ML+KL')
plt.xlim(-3, 3)
plt.ylim(-12, 4)
#plt.yticks([-12, -10, -8, -6, -4, -2, 0, 2, 4]);
plt.yticks([-12, -8, -4, 0, 4]);
plt.xticks([]);
plt.xlabel('$x_1$')
plt.ylabel('Free energy / kT', labelpad=-10)
#plt.savefig(paper_dir + 'figs/double_well/free_energy_rew.pdf', bbox_inches='tight', transparent=True)
z_left = network_RNVP_KLRC.transform_xz(traj_left)
z_ts = network_RNVP_KLRC.transform_xz(x_ts)
z_right = network_RNVP_KLRC.transform_xz(traj_right)
plt.figure(figsize=(4, 4))
plt.plot(z_left[:, 0], z_left[:, 1], linewidth=0, marker='.', markersize=3, color='blue')
plt.plot(z_ts[:, 0], z_ts[:, 1], linewidth=0, marker='.', markersize=3, color='orange')
plt.plot(z_right[:, 0], z_right[:, 1], linewidth=0, marker='.', markersize=3, color='red')
circle = plt.Circle((0, 0), radius=1.0, color='black', alpha=0.4, fill=True)
plt.gca().add_artist(circle)
circle = plt.Circle((0, 0), radius=2.0, color='black', alpha=0.25, fill=True)
plt.gca().add_artist(circle)
circle = plt.Circle((0, 0), radius=3.0, color='black', alpha=0.1, fill=True)
plt.gca().add_artist(circle)
plt.xlabel('$z_1$')
plt.xlim(-4, 4)
plt.ylabel('$z_2$')
plt.ylim(-4, 4)
plt.yticks([-4, -2, 0, 2, 4]);
#plt.savefig(paper_dir + 'figs/double_well/zdist_realNVP.pdf', bbox_inches='tight', transparent=True)
def latent_interpolation(bg, x1, x2, nstep=1000):
lambdas = np.array([np.linspace(0, 1, num=nstep)]).T
x1 = np.array([x1])
x2 = np.array([x2])
z1 = bg.transform_xz(x1)
z2 = bg.transform_xz(x2)
zpath = z1 + lambdas*(z2-z1)
xpath = bg.transform_zx(zpath)
return xpath
xpaths = []
for i in range(10):
x1 = traj_left[np.random.randint(1000)]
x2 = traj_right[np.random.randint(1000)]
xpaths.append(latent_interpolation(network_RNVP_KLRC, x1, x2, nstep=1000))
plot_potential(cbar=False, figsize=(5, 5))
plt.xticks([])
plt.xlabel('')
plt.yticks([])
plt.ylabel('')
for xpath in xpaths:
plt.plot(xpath[:, 0], xpath[:, 1], linewidth=2, color='white')
#plt.savefig(paper_dir + 'figs/double_well/paths.pdf', bbox_inches='tight', transparent=True)
| notebooks/.ipynb_checkpoints/Fig2_DoubleWell-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''base'': conda)'
# name: python37464bitbaseconda889e2a20be874e85ba6bccbdfb8985e1
# ---
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
# -
df = sns.load_dataset('iris')
df.head()
X = df[['sepal_length','sepal_width','petal_length','petal_width']]
y_enc = LabelEncoder()
labels = y_enc.fit_transform(df['species'])
df['labels'] = labels
y = df['labels']
xtrain,xtest,ytrain,ytest = train_test_split(X,y, test_size=.3, random_state = 0)
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(xtrain,ytrain)
clf.score(xtest,ytest) * 100
from sklearn.metrics import confusion_matrix
confusion_matrix(y , clf.predict(X))
| supervised_ml/classfication/naive_bayes/naive_bayes_algo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# The orbital parameters of the binary solar twin HIP 67620
# ---------------------
#
# `radial` is a simple program designed to do a not very trivial task: simulate radial velocities of a star orbited by a massive object or "reverse engineer" radial velocity measurements to estimate the orbital parameters of the system being studied. The formalism behind it is based on https://arxiv.org/abs/1009.1738.
#
# Our objective in this notebook is to use radial velocity data of the solar twin HIP 67620 to estimate the projected mass, separation and other orbital parameters of its companion. We start by importing the necessary packages. Notice that we will specifically import the modules `orbit`, `estimate`, and `dataset` from the `radial` package.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import astropy.units as u
from radial import estimate, dataset
# %matplotlib inline
# We then extract the data from the text files located in the `tests` folder. They will be stored in `RVDataSet` objects, which are defined in the `dataset` module.
harps = dataset.RVDataSet(file='../../tests/HIP67620_HARPS.dat', # File name
t_offset=-2.45E6, # Time offset (units of days)
rv_offset='subtract_mean', # RV offset
instrument_name='HARPS',
target_name='HIP 67620',
skiprows=1, # Number of rows to skip in the data file
t_col=5, # Column corresponding to time in the data file
rv_col=6, # Column corresponding to RVs
rv_unc_col=7 # Column corresponding to RV ucnertainties
)
aat = dataset.RVDataSet(file='../../tests/HIP67620_AAT.dat', t_offset=-2.45E6, rv_offset='subtract_mean',
instrument_name='AATPS', target_name='HIP 67620', delimiter=',')
w16 = dataset.RVDataSet(file='../../tests/HIP67620_WF16.dat', t_offset=-5E4, rv_offset='subtract_mean',
instrument_name='W16', target_name='HIP 67620', t_col=1,
rv_col=3, rv_unc_col=4)
# We can visualize the radial velocities by running the function `plot()` of a given `dataset` object. For instance:
w16.plot()
# Now that we have the data, how do we estimate the orbital parameters of the system? We use the methods and functions inside the `estimate` module. But first, we need to provide an initial guess for the orbital parameters. They are:
#
# * `k`: radial velocity semi-amplitude $K$ (in m/s)
# * `period`: orbital period $T$ (in days)
# * `t0`: time of periastron passage $t_0$ (in days)
# * `omega`: argument of periapse $\omega$ (in radians)
# * `ecc`: eccentricity of the orbit $e$
# * `gamma_X`: RV offset $\gamma$ of the dataset number $X$ (in m/s)
#
# A first guess is usually an educated guess based on either a periodogram and/or simple visual inspection of the data.
# guess is a dictionary, which is a special type of "list" in python
# Instead of being indexed by a number, the items in a dictionary
# are indexed by a key (which is a string)
guess = {'k': 6000,
'period': 4000,
't0': 5000,
'omega': 180 * np.pi / 180,
'ecc': 0.3,
'gamma_0': 0,
'gamma_1': 0,
'gamma_2': 0}
# Now we need to instantiate a `FullOrbit` object with the datasets and our guess, as well as the parametrization option we want to use. Then, we plot it.
estim = estimate.FullOrbit(datasets=[w16],
guess=guess,
parametrization='mc10')
plot = estim.plot_rvs(plot_guess=True, fold=False, legend_loc=2)
plt.show()
# We estimate the orbital parameters of the system using the Nelder-Mead optimization algorithm implemented in the `lmfit` package. This will compute the best solution or, in other words, the one that minimizes the residuals of the fit.
#
# It is probable that the first solutions are not good, and that is fine. Just run the estimation a couple of times until you get the satisfactory result.
result = estim.lmfit_orbit(update_guess=True)
# Now let's plot the solution we obtained.
pylab.rcParams['font.size'] = 12
fig, gs = estim.plot_rvs(plot_guess=True, fold=False, legend_loc=4)
# If the result looks good, that is great: we have the best solution of the orbit. However, we still need to estimate uncertainties for the orbital parameters. We do that using `emcee`. This is a Markov-Chain Monte Carlo (MCMC) simulation, in which we simulate a bunch of sets of orbital parameters that could still fit the data given the uncertainties of the observations, but are a little bit off from the best solution. They will make up the uncertainties of the fit.
#
# This simulation starts from the best solution and do random walks across the parameter space. We will provide the number of *walkers* (`nwalkers`) for the MCMC simulation, as well as the number of *steps* (`nsteps`) that each one will take.
#
# How do we know the number of walkers and steps to use? As a general rule of thumb, it is recommended to use at least 2 times the number of parameters for the number of walkers, and as many steps as it takes for the simulation to converge.
#
# **Note**: We can use multiprocessing in `emcee` to make the calculations somewhat faster. For that, we need to provide the number of processing threads (in the parameter `nthreads`) of your computer. Most laptops have 2 or 4 threads.
estim.emcee_orbit(nwalkers=12, nsteps=1000, nthreads=4)
# With that done, we plot the walkers to see how the simulation went.
estim.plot_emcee_sampler()
# Let's cut the beginning of the simulation (the first 500 steps) because they correspond to the *burn-in* phase.
estim.make_chains(500)
# Now we use a corner plot to analyze the posterior distributions of the parameters, as well as the correlations between them.
fig = estim.plot_corner()
plt.show()
# And that should be pretty much it. Finally, we compute the orbital parameters in a human-readable fashion.
estim.print_emcee_result(main_star_mass=0.954, # in M_sol units
mass_sigma=0.006)
| docs/examples/HIP67620_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from glob import glob
from pickle import load
from scipy.stats import norm
import matplotlib.pyplot as plt
from simtk import unit
import numpy as np
from tqdm import tqdm
# %matplotlib inline
pkls = glob('../data/water_cluster_rigid_near_eq/*.pkl')
#pkls = glob('../data/alanine_constrained_near_eq/*.pkl')
name = "water_cluster_rigid"
#name = "alanine_constrained"
len(pkls)
# -
summary = []
for fname in pkls:
with open(fname, 'rb') as f:
summary.append(load(f))
summary[0]
def get_max_conf_dt_result(scheme='VRORV'):
max_dt = 0
for result_ in summary:
dt = result_[0][1].value_in_unit(unit.femtosecond)
if (dt > max_dt) and (result_[0][0] == scheme) and (result_[0][2] == 'configuration'):
result = result_
max_dt = dt
print(max_dt)
return result
result = get_max_conf_dt_result()
result
W_F = result[1]['W_shads_F']
W_R = result[1]['W_shads_R']
# +
from benchmark.evaluation import estimate_nonequilibrium_free_energy
mean, sq_unc = estimate_nonequilibrium_free_energy(W_F, W_R)
mean, np.sqrt(sq_unc)
# -
plt.hist(W_F, bins=100, alpha=0.5);
plt.hist(W_R, bins=100, alpha=0.5);
mean, np.sqrt(sq_unc)
# +
from scipy.stats import norm
stdev = np.sqrt(sq_unc)
n = norm(loc=mean, scale=stdev)
x_grid = np.linspace(min(0, mean - stdev * 6), mean + stdev * 6, 1000)
y_grid = np.exp(n.logpdf(x_grid))
plt.plot(x_grid, y_grid)
# +
import seaborn.apionly as sns
schemes = sorted(['RVOVR', 'VRORV', 'OVRVO', 'ORVRO'])
color_schemes = dict(zip(schemes, ['Blues', 'Greens', 'Oranges', 'Purples']))
colormaps = dict()
dts = sorted(list(set([r[0][1].value_in_unit(unit.femtosecond) for r in summary])))
for scheme in schemes:
colormap = sns.color_palette(color_schemes[scheme], n_colors=len(dts))
colormaps[scheme] = dict(zip(dts, colormap))
dt_ = dts[int(len(dts)/2)]
colors = dict()
for scheme in schemes:
colors[scheme] = colormaps[scheme][dt_]
# -
def plot(mean, sq_unc, scheme='VRORV'):
stdev = np.sqrt(sq_unc)
n = norm(loc=mean, scale=stdev)
x_grid = np.linspace(min(0, mean - stdev * 6), mean + stdev * 6, 1000)
y_grid = np.exp(n.logpdf(x_grid))
plt.plot(x_grid, y_grid, color=colors[scheme], label=scheme)
def nan_safe(x, y):
"""Return x, y only on indices where x[i], y[i] are both finite"""
mask = np.isfinite(x) * np.isfinite(y)
return x[mask], y[mask]
for scheme in schemes:
result = get_max_conf_dt_result(scheme)
W_F = result[1]['W_shads_F']
W_R = result[1]['W_shads_R']
W_F, W_R = nan_safe(W_F, W_R)
mean, sq_unc = estimate_nonequilibrium_free_energy(W_F, W_R)
plot(mean, sq_unc, scheme)
plt.legend(loc='best')
plt.xlim(0,)
# +
def resample(x):
return x[np.random.randint(0,len(x),len(x))]
def estimate_delta_F_using_medians(W_F, W_R):
return 0.5 * (np.median(W_F) - np.median(W_R))
def bootstrap_median_estimates(W_F, W_R, n_bootstrap=1000):
return np.array([estimate_delta_F_using_medians(resample(W_F), resample(W_R)) for _ in tqdm(range(n_bootstrap))])
# -
for scheme in schemes:
result = get_max_conf_dt_result(scheme)
W_F = result[1]['W_shads_F']
W_R = result[1]['W_shads_R']
W_F, W_R = nan_safe(W_F, W_R)
median_estimates = bootstrap_median_estimates(W_F, W_R)
mean, sq_unc = np.mean(median_estimates), np.std(median_estimates)**2
#mean, sq_unc = estimate_nonequilibrium_free_energy(W_F, W_R)
plot(mean, sq_unc, scheme)
plt.legend(loc='best')
plt.xlim(0,)
median_estimates = bootstrap_median_estimates(W_F, W_R)
plt.hist(median_estimates, normed=True, bins=50);
plt.plot(x_grid, y_grid)
# +
def get_curves_with_unc(scheme='RVOVR', marginal='configuration'):
dts = []
near_eq_estimates = []
near_eq_unc = []
for i, (descr, (result_dict)) in enumerate(summary):
if descr[0] == scheme and descr[2] == marginal:
dts.append(descr[1].value_in_unit(unit.femtosecond))
W_F, W_R = result_dict["W_shads_F"], result_dict["W_shads_R"]
W_F, W_R = nan_safe(W_F, W_R)
mean, sq_unc = estimate_nonequilibrium_free_energy(W_F, W_R)
near_eq_estimates.append(mean)
near_eq_unc.append(np.sqrt(sq_unc))
inds = np.argsort(dts)
sort_by_dt = lambda l : np.array(l)[inds]
return list(map(sort_by_dt, [dts, near_eq_estimates, near_eq_unc]))
def plot_marginal(marginal='configuration', legend=True):
plt.title(marginal)
linestyle = {'full': 'solid',
'configuration': 'dotted',
}
for i, scheme in enumerate(schemes):
dts, near_eq_estimates, near_eq_unc = get_curves_with_unc(scheme, marginal)
plt.plot(dts, near_eq_estimates, label=scheme, linestyle=linestyle[marginal], color=colors[scheme])
plt.fill_between(dts, near_eq_estimates - near_eq_unc, near_eq_estimates + near_eq_unc, alpha=0.5, color=colors[scheme])
if legend:
plt.legend(loc='upper left', title='scheme')
plt.xlabel(r'$\Delta t$ (fs)')
if marginal == 'configuration':
d_kl_arguments = r'$(\rho_\mathbf{x} \| \pi_\mathbf{x})$'
else:
d_kl_arguments = r'$(\rho \| \pi)$'
plt.ylabel(r'$\mathcal{D}_{KL}$' + d_kl_arguments)
log = True
scale_factor = 3.2
plt.figure(figsize=(3*scale_factor,1*scale_factor))
ax = plt.subplot(1,3,1)
plt.plot(np.nan * np.ones(10))
plt.xticks([])
plt.yticks([])
ax = plt.subplot(1,3,2)
plot_marginal('full')
plt.title('(a) phase space error')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if log:
plt.yscale('log')
plt.legend(loc='best')
ax = plt.subplot(1,3,3, sharey=ax)
plot_marginal('configuration', legend=False)
plt.title('(b) configuration space error')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if log:
plt.yscale('log')
plt.legend(loc='best')
plt.ylim(0,)
plt.tight_layout()
plt.savefig('{}{}.jpg'.format(name, '_log_scale' if log else ''), dpi=300, bbox_inches='tight')
# +
def get_curves_with_unc(scheme='RVOVR', marginal='configuration'):
dts = []
near_eq_estimates = []
near_eq_unc = []
for i, (descr, (result_dict)) in enumerate(summary):
if descr[0] == scheme and descr[2] == marginal:
dts.append(descr[1].value_in_unit(unit.femtosecond))
W_F, W_R = result_dict["W_shads_F"], result_dict["W_shads_R"]
W_F, W_R = nan_safe(W_F, W_R)
mean, sq_unc = estimate_nonequilibrium_free_energy(W_F, W_R)
near_eq_estimates.append(mean)
near_eq_unc.append(np.sqrt(sq_unc))
inds = np.argsort(dts)
sort_by_dt = lambda l : np.array(l)[inds]
return list(map(sort_by_dt, [dts, near_eq_estimates, near_eq_unc]))
def plot_marginal(marginal='configuration', legend=True):
plt.title(marginal)
linestyle = {'full': 'solid',
'configuration': 'dotted',
}
for i, scheme in enumerate(schemes):
dts, near_eq_estimates, near_eq_unc = get_curves_with_unc(scheme, marginal)
plt.plot(dts, near_eq_estimates, label=scheme, linestyle=linestyle[marginal], color=colors[scheme])
plt.fill_between(dts, near_eq_estimates - near_eq_unc, near_eq_estimates + near_eq_unc, alpha=0.5, color=colors[scheme])
if legend:
plt.legend(loc='upper left', title='scheme')
plt.xlabel(r'$\Delta t$ (fs)')
if marginal == 'configuration':
d_kl_arguments = r'$(\rho_\mathbf{x} \| \pi_\mathbf{x})$'
else:
d_kl_arguments = r'$(\rho \| \pi)$'
plt.ylabel(r'$\mathcal{D}_{KL}$' + d_kl_arguments)
log = False
scale_factor = 3.2
plt.figure(figsize=(3*scale_factor,1*scale_factor))
ax = plt.subplot(1,3,1)
plt.plot(np.nan * np.ones(10))
plt.xticks([])
plt.yticks([])
ax = plt.subplot(1,3,2)
plot_marginal('full')
plt.title('(a) phase space error')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if log:
plt.yscale('log')
plt.legend(loc='best')
ax = plt.subplot(1,3,3, sharey=ax)
plot_marginal('configuration', legend=False)
plt.title('(b) configuration space error')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if log:
plt.yscale('log')
plt.legend(loc='best')
plt.ylim(0,)
plt.tight_layout()
plt.savefig('{}{}.jpg'.format(name, '_log_scale' if log else ''), dpi=300, bbox_inches='tight')
# +
# what is the exponent?
# -
ys = []
for scheme in schemes:
_, near_eq_estimates, _ = get_curves_with_unc(scheme, "full")
ys.append(near_eq_estimates)
[len(y) for y in ys]
dts = np.array(dts)
# +
def log_prob(theta):
coeff, exponent = theta
if min(theta) < 0:
return -np.inf
model = coeff * (dts**exponent)
log_prob = 0
for y in ys:
log_prob -= np.sum(np.abs(y - model))
return log_prob
log_prob([1e-5, 4.0])
# +
from scipy.optimize import fmin
exponents = np.linspace(1,10)
def marginal_map(exponent):
f_to_min = lambda coeff: -log_prob([coeff, exponent])
xopt = fmin(f_to_min, 1, disp=0)
return log_prob([xopt[0], exponent])
marginal_maps = [marginal_map(exponent) for exponent in exponents]
plt.plot(exponents, marginal_maps)
#plt.yscale('log')
plt.xlabel('exponent')
plt.ylabel('marginal MAP')
plt.vlines(4,min(marginal_maps), max(marginal_maps))
plt.vlines(5,min(marginal_maps), max(marginal_maps))
plt.title('what exponent best explains dependence of phase-space error on $\Delta t$?')
# -
exponents[np.argmax(marginal_maps)]
# +
import emcee
from emcee import EnsembleSampler
mcmc = EnsembleSampler(6,2,log_prob)
_ = mcmc.run_mcmc(np.random.rand(6,2), 10000)
# -
max(mcmc.flatlnprobability)
mcmc.flatchain[np.argmax(mcmc.flatlnprobability)][1]
from corner import corner
corner(mcmc.flatchain[10000:], labels=['coeff', 'exponent'])
# +
# what if we normalize by the amount of error at 2fs for ovrvo?
def plot_marginal_normalized(marginal='configuration', normalization_scheme='OVRVO', normalization_dt=2.0):
plt.title(marginal)
linestyle = {'full': 'solid',
'configuration': 'dotted',
}
# get error for normalization
dts, near_eq_estimates, near_eq_unc = get_curves_with_unc(normalization_scheme, marginal)
normalize = near_eq_estimates[np.argmax(dts >= normalization_dt)]
for i, scheme in enumerate(schemes):
dts, near_eq_estimates, near_eq_unc = get_curves_with_unc(scheme, marginal)
plt.plot(dts, np.abs(normalize / near_eq_estimates), label=scheme, linestyle=linestyle[marginal], color=colors[scheme])
#plt.fill_between(dts, near_eq_estimates - near_eq_unc, near_eq_estimates + near_eq_unc, alpha=0.5, color=colors[scheme])
plt.legend(loc='upper left', title='scheme')
plt.xlabel(r'$\Delta t$ (fs)')
if marginal == 'configuration':
d_kl_arguments = r'$(\rho_\mathbf{x} \| \pi_\mathbf{x})$'
else:
d_kl_arguments = r'$(\rho \| \pi)$'
plt.ylabel(r'$\mathcal{D}_{KL}$' + d_kl_arguments)
# -
plot_marginal_normalized()
plt.yscale('log')
dts, near_eq_estimates, near_eq_unc = get_curves_with_unc("OVRVO", "configuration")
dts
np.argmax(dts >= 2)
# let's save the near-equilibrium estimates
result_summary = {}
for scheme in schemes:
result_summary[scheme] = {}
for marginal in ['configuration', 'full']:
result_summary[scheme][marginal] = {}
dts, near_eq_estimates, near_eq_unc = get_curves_with_unc(scheme, marginal)
keys = ('dts', 'near_eq_estimates', 'near_eq_uncertainty')
values = (dts, near_eq_estimates, near_eq_unc)
for (key, value) in zip(keys, values):
result_summary[scheme][marginal][key] = value
name
np.save('result_summary_{}.npy'.format(name), result_summary)
result_summary_ = np.load('result_summary_{}.npy'.format(name))
from pickle import dump, load
with open('result_summary_{}.pkl'.format(name), 'wb') as f:
dump(result_summary, f)
with open('result_summary_{}.pkl'.format(name), 'rb') as f:
result_summary_ = load(f)
result_summary_['OVRVO']
0.5 * (np.mean(W_F) - np.mean(W_R)), 0.5 * (np.median(W_F) - np.median(W_R))
| notebooks/near-eq result plotting.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] gradient={"editing": false} id="m3k2MwJTSZTj"
# # Hotdog Classifier
# + [markdown] id="UQZ_7VzSvAAK"
# Opsætning af FastAI
# + id="89pNWep9SZTe"
#hide
# !pip install -Uqq fastbook
import fastbook
fastbook.setup_book()
# + id="exnO-R5rSZTi"
#hide
from fastbook import *
from fastai.vision.widgets import *
# + id="9-fz19xDSZTm"
# Azure API Key
key = os.environ.get('AZURE_SEARCH_KEY', '88f816fb74d147e08b211baf0cceebbb')
# + id="QcdRFxVdSZTn"
bear_types = 'hotdog', 'random'
path = Path('hotdogs')
# + id="uuWV1TaaSZTn"
# Looper over de specificerede typer og søger på dem hver især
if not path.exists():
path.mkdir()
for o in bear_types:
dest = (path/o)
dest.mkdir(exist_ok=True)
results = search_images_bing(key, f'{o}')
download_images(dest, urls=results.attrgot('contentUrl'))
# + id="B998hdFcSZTn" colab={"base_uri": "https://localhost:8080/"} outputId="dc430613-afd2-4b5a-9398-50c7a85caced"
# Tjekker om der er billeder som er failed
fns = get_image_files(path)
failed = verify_images(fns)
failed
# + id="JXaEa2KDSZTo"
# Fjerner billeder som er failed
failed.map(Path.unlink);
# + id="8y0gaJD5SZTo"
# Laver DataLoaders for datasettet vi lige har downloaded udfra følgende:
# What kinds of data we are working with
# How to get the list of items
# How to label these items
# How to create the validation set
bears = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=Resize(128))
# + id="NAgPmkpVxfKI"
# Fortæller FastAI hvor kilden af dataen kan findes
dls = bears.dataloaders(path)
# + colab={"base_uri": "https://localhost:8080/", "height": 192} id="-0KVjaEoxpTH" outputId="4e33bda2-7cd9-42e5-f683-1caf82826bb1"
dls.valid.show_batch(max_n=4, nrows=1)
# + id="FNJqmOQjzSOl"
# Træning af modellen
bears = bears.new(
item_tfms=RandomResizedCrop(224, min_scale=0.5),
batch_tfms=aug_transforms())
dls = bears.dataloaders(path)
# + id="xteUsfeOzWVn" colab={"base_uri": "https://localhost:8080/", "height": 596} outputId="74952b6b-dfde-44c4-81e6-e28f6ce3d3c0"
learn = cnn_learner(dls, resnet18, metrics=error_rate)
learn.fine_tune(4)
# + id="wNv_zybtzbqq" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="e512d7cc-dbb2-45f9-a421-9eadf245d67c"
# Confusion matrix
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
# + id="yr5hjkg5zfwI" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="6799d792-4618-4bcc-c446-9ae635c2ce93"
interp.plot_top_losses(5, nrows=1)
# + id="j45d4CnzzmDV" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="975cd02d-ef93-4164-d03a-ca75b2e0a73e"
# Data cleaning
cleaner = ImageClassifierCleaner(learn)
# + [markdown] id="SuV3KHP8C49c"
# Eksportering af trænet model
# + id="z70dOXG7zu1x" colab={"base_uri": "https://localhost:8080/"} outputId="1492152a-a0a4-4187-a69c-3b7ec04d7132"
learn.export()
path = Path()
path.ls(file_exts='.pkl')
# + [markdown] id="XRZTjKrGNgk3"
# APP
# + id="ZD--pgN8z3Np"
learn_inf = load_learner(path/'export.pkl')
# + id="rUNV81sLQRrs" outputId="a7aa53a5-0477-43ee-c3fb-5b5d8426cb82" colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["e59804679966437b8e01c40da76858a0", "086eb912eac34a4299d2d29ce1c9cac1", "d44b311caa0e44d28e739da0bdf2604c"]}
btn_upload = widgets.FileUpload()
btn_upload
# + id="GMgaG99xNv0Q"
img = PILImage.create(btn_upload.data[-1])
# + id="fyj09DXlLlDH" outputId="63d317c7-f603-4960-fa74-75d39f9cfcd9" colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["7cc5af1369fe4a3f9fb135a864b260ec", "0a53a571863e4390af3866c63f786249"]}
out_pl = widgets.Output()
out_pl.clear_output()
with out_pl: display(img.to_thumb(128,128))
out_pl
# + id="h61_s9NlLyLl" outputId="b31dbc75-8990-4f4e-d0d8-1ea9f24ffca4" colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["bf9f701df37f4cd29aff8bb711534a6b", "d3a788aa1fef45a9ab3058f756c1da93", "6a7832e3d27e4f4fb59994610022cd95"]}
pred,pred_idx,probs = learn_inf.predict(img)
lbl_pred = widgets.Label()
lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
lbl_pred
# + id="VBloYMNQLaJH"
btn_run = widgets.Button(description='Classify')
# + id="dNmmkma90Xel"
def on_click_classify(change):
img = PILImage.create(btn_upload.data[-1])
out_pl.clear_output()
with out_pl: display(img.to_thumb(128,128))
pred,pred_idx,probs = learn_inf.predict(img)
lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
btn_run.on_click(on_click_classify)
# + id="9zlt9HZE0d5f" outputId="d44b49e2-d9ec-4a29-9995-7ec3a6567e00" colab={"base_uri": "https://localhost:8080/", "height": 273, "referenced_widgets": ["ab0925cc76314ec4b71c5b52d489be06", "d79058e232764c13af415af37ac3deb0", "2b650a21825042dcb3c2bbb764b5da36", "e59804679966437b8e01c40da76858a0", "c82009766eed419782d3b446f5750fe5", "7cc5af1369fe4a3f9fb135a864b260ec", "bf9f701df37f4cd29aff8bb711534a6b", "0b557c601504408c83abedffef726dba", "f223587a20704cab9847415ac7b401b1", "086eb912eac34a4299d2d29ce1c9cac1", "d44b311caa0e44d28e739da0bdf2604c", "c3b6e50705f4401ab79e8f6b5cedfce3", "<KEY>", "<KEY>", "<KEY>", "<KEY>"]}
VBox([widgets.Label('Select your hotdog!'),
btn_upload, btn_run, out_pl, lbl_pred])
| _notebooks/2021-02-16-Hotdog-Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:test_env]
# language: python
# name: conda-env-test_env-py
# ---
# # Retrieve geo-locations, create maps with markers and popups
#
# Use OpenStreetMap data and the DKRZ logo.
#
# <br>
#
# geopy - Python client for several popular geocoding web services
#
# folium - visualization tool for maps
#
# <br>
from geopy.geocoders import Nominatim
import folium
# <br>
#
# ## Use Nominatim geocoder for OpenStreetMap data.
#
# <br>
geolocator = Nominatim(user_agent='any_agent')
# <br>
#
# ## Retrieve the geo-location of the given address.
#
# <br>
# +
location = geolocator.geocode('Hamburg')
print(location.address)
# -
print((location.latitude, location.longitude))
print(location.raw)
# <br>
#
# ## Create the map with the retrieved location.
#
# <br>
m = folium.Map(location=[location.latitude, location.longitude])
# <br>
#
# ## Display the map in the notebook.
#
# <br>
display(m)
# <br>
#
# ## Set marker at the center of the city.
#
# <br>
# +
tooltip = location.latitude, location.longitude
folium.Marker([location.latitude, location.longitude], tooltip=tooltip).add_to(m)
display(m)
# -
# <br>
#
# ## Zoom in.
#
# <br>
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=12, zoom_control=False)
display(m)
# <br>
#
# ## Retrieve the location data of the DKRZ. Set Marker type.
#
# <br>
# +
dkrz_location = geolocator.geocode('Bundesstrasse 45a, Hamburg, Germany', language='en')
print(dkrz_location.address)
# -
# <br>
#
# ## Locate DKRZ on map
#
# <br>
# +
dkrz_map = folium.Map(location=[dkrz_location.latitude, dkrz_location.longitude], zoom_start=16, zoom_control=False)
tooltip = dkrz_location.latitude, dkrz_location.longitude
popup_name = 'Deutsches Klimarechenzentrum GmbH'
folium.Marker([dkrz_location.latitude, dkrz_location.longitude], popup=popup_name, icon=folium.Icon(icon="cloud"),).add_to(dkrz_map)
display(dkrz_map)
# -
# <br>
#
# ## Display DKRZ logo as marker popup.
#
# <br>
# +
from folium import IFrame
import base64
width, height = 700, 700
f = folium.Figure(width=width, height=height)
dkrz_map = folium.Map(location=[dkrz_location.latitude, dkrz_location.longitude],
zoom_start=16, zoom_control=False,
width=width, height=height).add_to(f)
png = 'DKRZ_Logo_plus_text_small.png'.format(42)
encoded = base64.b64encode(open(png, 'rb').read())
html = '<img src="data:image/png;base64,{}">'.format
iframe = IFrame(html(encoded.decode('UTF-8')), width=200+20, height=100+20)
popup = folium.Popup(iframe, max_width=2650)
icon = folium.Icon(color='blue', icon='cloud')
marker = folium.Marker(location=[dkrz_location.latitude, dkrz_location.longitude], popup=popup, icon=icon)
marker.add_to(dkrz_map)
display(dkrz_map)
# -
# <br>
#
# ## Retrieve location information in a different language.
#
# <br>
# +
from functools import partial
geocode = partial(geolocator.geocode, language='es')
print(geocode('london'))
# +
reverse = partial(geolocator.reverse, language='es')
print(reverse('52.509669, 13.376294'))
# -
# <br>
#
# ## Calculate distances
#
# <br>
#
# +
from geopy import distance
newport_ri = (41.49008, -71.312796)
cleveland_oh = (41.499498, -81.695391)
print(distance.distance(newport_ri, cleveland_oh).miles)
# +
wellington = (-41.32, 174.81)
salamanca = (40.96, -5.50)
print(distance.distance(wellington, salamanca).km)
# -
# <br>
#
# Using great circle distance
#
# <br>
print(distance.great_circle(newport_ri, cleveland_oh).km)
# Change the ellispoid
#
# <pre>
# model major (km) minor (km) flattening
# ELLIPSOIDS = {'WGS-84': (6378.137, 6356.7523142, 1 / 298.257223563),
# 'GRS-80': (6378.137, 6356.7523141, 1 / 298.257222101),
# 'Airy (1830)': (6377.563396, 6356.256909, 1 / 299.3249646),
# 'Intl 1924': (6378.388, 6356.911946, 1 / 297.0),
# 'Clarke (1880)': (6378.249145, 6356.51486955, 1 / 293.465),
# 'GRS-67': (6378.1600, 6356.774719, 1 / 298.25),
# }
# <\pre>
# +
ne, cl = newport_ri, cleveland_oh
print(distance.geodesic(ne, cl, ellipsoid='GRS-80').km)
# -
| Visualization/miscellaneous/create_street_maps_from_geolocations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Solve the hydro-thermal power system planning problem: periodical SDDP
# ===========================================
#
# The hydro-thermal power system planning problem is periodical with a period of 12. In this tutorial, we use periodical SDDP to solve the problem for infinite horizon.
# The syntax of periodical SDDP is very similar to the classical SDDP. We will only highlight the differences. More details can be found at http://www.optimization-online.org/DB_FILE/2019/09/7367.pdf
# +
import pandas
import numpy
import gurobipy
from msppy.msp import MSLP
from msppy.solver import PSDDP
from msppy.evaluation import EvaluationTrue, Evaluation
import sys
gamma = numpy.array(pandas.read_csv(
"./data/gamma.csv",
names=[0,1,2,3],
index_col=0,
skiprows=1,
))
sigma = [
numpy.array(pandas.read_csv(
"./data/sigma_{}.csv".format(i),
names=[0,1,2,3],
index_col=0,
skiprows=1,
)) for i in range(12)
]
exp_mu = numpy.array(pandas.read_csv(
"./data/exp_mu.csv",
names=[0,1,2,3],
index_col=0,
skiprows=1,
))
hydro_ = pandas.read_csv("./data/hydro.csv", index_col=0)
demand = pandas.read_csv("./data/demand.csv", index_col=0)
deficit_ = pandas.read_csv("./data/deficit.csv", index_col=0)
exchange_ub = pandas.read_csv("./data/exchange.csv", index_col=0)
exchange_cost = pandas.read_csv("./data/exchange_cost.csv", index_col=0)
thermal_ = [pandas.read_csv("./data/thermal_{}.csv".format(i),
index_col=0) for i in range(4)]
stored_initial = hydro_['INITIAL'][:4]
inflow_initial = hydro_['INITIAL'][4:8]
def sampler(t):
def inner(random_state):
noise = numpy.exp(
random_state.multivariate_normal(mean=[0]*4, cov=sigma[t%12]))
coef = [None]*4
rhs = [None]*4
for i in range(4):
coef[i] = -noise[i]*gamma[t%12][i]*exp_mu[t%12][i]/exp_mu[(t-1)%12][i]
rhs[i] = noise[i]*(1-gamma[t%12][i])*exp_mu[t%12][i]
return (coef+rhs)
return inner
# -
# Build the true problem and make discretization
# --------------------------
HydroThermal = MSLP(T=13, bound=0, discount=0.9906)
# Periodical SDDP algorithm solves the problem for a single period plus an initial stage. In this case, the number of stages to consider is 13, set by $T=13$.
for t in range(13):
m = HydroThermal[t]
stored_now,stored_past = m.addStateVars(4, ub=hydro_['UB'][:4], name="stored")
inflow_now,inflow_past = m.addStateVars(4, name="inflow")
spill = m.addVars(4, obj=0.001, name="spill")
hydro = m.addVars(4, ub=hydro_['UB'][-4:], name="hydro")
deficit = m.addVars(
[(i,j) for i in range(4) for j in range(4)],
ub = [
demand.iloc[t%12][i] * deficit_['DEPTH'][j]
for i in range(4) for j in range(4)
],
obj = [
deficit_['OBJ'][j]
for i in range(4) for j in range(4)
],
name = "deficit")
thermal = [None] * 4
for i in range(4):
thermal[i] = m.addVars(
len(thermal_[i]),
ub=thermal_[i]['UB'],
lb=thermal_[i]['LB'],
obj=thermal_[i]['OBJ'],
name="thermal_{}".format(i)
)
exchange = m.addVars(5,5, obj=exchange_cost.values.flatten(),
ub=exchange_ub.values.flatten(), name="exchange")
thermal_sum = m.addVars(4, name="thermal_sum")
m.addConstrs(thermal_sum[i] ==
gurobipy.quicksum(thermal[i].values()) for i in range(4))
for i in range(4):
m.addConstr(
thermal_sum[i]
+ gurobipy.quicksum(deficit[(i,j)] for j in range(4))
+ hydro[i]
- gurobipy.quicksum(exchange[(i,j)] for j in range(5))
+ gurobipy.quicksum(exchange[(j,i)] for j in range(5))
== demand.iloc[t%12][i],
name = 'demand',
)
m.addConstr(
gurobipy.quicksum(exchange[(j,4)] for j in range(5))
- gurobipy.quicksum(exchange[(4,j)] for j in range(5))
== 0
)
m.addConstrs(
stored_now[i] + spill[i] + hydro[i] - stored_past[i] == inflow_now[i]
for i in range(4)
)
if t == 0:
m.addConstrs(stored_past[i] == stored_initial[i] for i in range(4))
m.addConstrs(inflow_now[i] == inflow_initial[i] for i in range(4))
else:
TS = m.addConstrs(inflow_now[i] + inflow_past[i] == 0 for i in range(4))
m.add_continuous_uncertainty(
uncertainty=sampler(t),
locations=(
[(TS[i],inflow_past[i]) for i in range(4)]
+ [TS[i] for i in range(4)]
),
)
HydroThermal.discretize(n_samples=100, random_state=888)
# Solve the problem
# ---------------------
# We now call PSDDP solver to run the periodical SDDP for 20 iterations.
#
# Backward passes of the periodical SDDP generates cuts for the first 13 stages.
#
# Forward passes of the periodical SDDP generates trial points. Trial points can be just obtained from solving these 13 stages. They can also be obtained from later stages (since the problem is periodical). It is often found solving more stages makes the algorithm converge faster. Here we set $\textrm{forward_T}=120$, meaning that trial points are generated from the first 120 stages.
HT_psddp = PSDDP(HydroThermal)
HT_psddp.solve(max_iterations=10, forward_T=120)
# Evaluate the obtained policy
# ---------------------------------
# The obtained policy is implementable feasible for any finite number of stages. We can for example, set $\textrm{query}_T=60}$ as below to evaluate the policy for the first 60 stages.
result = Evaluation(HydroThermal)
result.run(n_simulations=10, query_T=60)
result.CI
| doc/source/examples/hydro_thermal/infinity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="rpYyrvmvNvuM" executionInfo={"status": "ok", "timestamp": 1644215461479, "user_tz": -480, "elapsed": 73952, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gig8u-bJz6DvFEfKE8JoOb4YpRwSQe412ZaJ3yC0A=s64", "userId": "00819988094894161328"}} outputId="074a0205-8b56-4fe5-8cc2-2dbec5ac897d"
# We will install the malaya library package from https://github.com/huseinzol05/malaya
# !pip3 install malaya
# !pip3 install tensorflow-text==1.15.1
# Import the Malaya package, Tensorflow and PrettyPrint from Python
import malaya
import tensorflow as tf
from pprint import pprint
# Check the loaded Tensorflow version
tf.__version__
# + colab={"base_uri": "https://localhost:8080/"} id="N9JOcxqkQbxJ" executionInfo={"status": "ok", "timestamp": 1626623624793, "user_tz": -480, "elapsed": 26363, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj1bB00dP4qmZnq1f6qPaN6xNHosmMyHtf0vKcdHw=s64", "userId": "00819988094894161328"}} outputId="13a26e77-6284-4d13-b121-21144d248c6b"
model = malaya.generator.transformer(model = 't5', quantized = True)
# + id="HVyoW4-TUgsU"
isi_penting = ['Dr M perlu dikekalkan sebagai perdana menteri',
'Muhyiddin perlulah menolong Dr M',
'rakyat perlu menolong Muhyiddin']
# + colab={"base_uri": "https://localhost:8080/"} id="BieNBr9JQqhL" executionInfo={"status": "ok", "timestamp": 1626438388962, "user_tz": -480, "elapsed": 109794, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj1bB00dP4qmZnq1f6qPaN6xNHosmMyHtf0vKcdHw=s64", "userId": "00819988094894161328"}} outputId="b9491fc0-fd50-4070-d4cd-425f3dc22eaf"
pprint(model.greedy_decoder(isi_penting))
# + id="Qe0ET1ELUdaZ"
isi_penting = ['Dapat memupuk semangat kerjasama',
'Dapat mengeratkan hubungan silaturahim.',
'Kebersihan kawasan persekitaran terpelihara.',
'Terhindar daripada wabak penyakit seperti Denggi',
'Mengisi masa lapang',
'Menerapkan nilai-nilai murni dalam kehidupan']
# + colab={"base_uri": "https://localhost:8080/"} id="grJnhncVUeZq" executionInfo={"status": "ok", "timestamp": 1626623857782, "user_tz": -480, "elapsed": 228173, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj1bB00dP4qmZnq1f6qPaN6xNHosmMyHtf0vKcdHw=s64", "userId": "00819988094894161328"}} outputId="c28ce812-c30c-48ca-c930-736dd72da869"
pprint(model.greedy_decoder(isi_penting))
| Bahasa Text Generation w_ Malaya Python Library.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy
#import tick
from tick import hawkes
import pandas as pd
#import ncsr_import
import numpy as np
from tick.plot import plot_hawkes_kernels
adj = pd.read_csv('all_adjacency_matrix.csv', index_col=0)
age = pd.read_csv('justage_vars_init.csv', index_col=0)
ncsr = ncsr_import.ncsr_data()
age_mdd = pd.read_csv('age_mdd.csv', index_col=0)
all_vars = pd.read_csv('all_vars_ncsr.csv', index_col=0)
# + [markdown] tags=[]
# ncsr_var_desc = pd.DataFrame(columns = ['VarName', 'Description', 'Root_DF', 'Start', 'End', 'DataFrame', 'recursion_flag'])
#
# for x in ncsr.root.iloc[:,0]:
# ncsr_var_desc = ncsr_var_desc.append(ncsr.get_value_from_string(x))
#
# ncsr_var_desc = ncsr_var_desc.reset_index(drop=True)
# ncsr_var_desc.to_csv("all_vars_ncsr.csv")
# +
# MDD is 3380
#mdd = ncsr.ncsr[ncsr.ncsr.iloc[:, 3380] == 1]
# +
#age_mdd = age.iloc[mdd.index, :].reset_index(drop = True)
# +
#age_mdd.to_csv('age_mdd.csv')
# -
msk = np.random.rand(len(age_mdd)) < 0.02
mdd_train = age_mdd[msk]
mdd_test = age_mdd[~msk]
mdd_train = [ np.array(x) for x in mdd_train.values.tolist()]
mdd_train = [x.astype(float) for x in mdd_train]
train_dim = len(mdd_train)
train_dim
learner = hawkes.HawkesEM(train_dim, n_threads=2, verbose = True, tol = 1e-3)
learner.fit(mdd_train)
plot_hawkes_kernels(learner)
| ddp-stuff/hawkes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier
#
# This example shows you how to use MLflow together with Azure Machine Learning services for tracking the metrics and artifacts while training a Keras model to classify MNIST digit images and deploy the model as a web service. You'll learn how to:
#
# 1. Set up MLflow tracking URI so as to use Azure ML
# 2. Create experiment
# 3. Instrument your model with MLflow tracking
# 4. Train a Keras model locally with MLflow auto logging
# 5. Train a model on GPU compute on Azure with MLflow auto logging
# 6. View your experiment within your Azure ML Workspace in Azure Portal
# 7. Deploy the model as a web service on Azure Container Instance
# 8. Call the model to make predictions
#
# ### Pre-requisites
#
# If you are using a Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met.
#
# Install TensorFlow and Keras, this notebook has been tested with TensorFlow version 2.1.0 and Keras version 2.3.1.
#
# Also, install azureml-mlflow package using ```pip install azureml-mlflow```. Note that azureml-mlflow installs mlflow package itself as a dependency if you haven't done so previously.
#
# ### Set-up
#
# Import packages and check versions of Azure ML SDK and MLflow installed on your computer. Then connect to your Workspace.
# +
import sys, os
import mlflow
import mlflow.azureml
import azureml.core
from azureml.core import Workspace
print("SDK version:", azureml.core.VERSION)
print("MLflow version:", mlflow.version.VERSION)
# -
ws = Workspace.from_config()
ws.get_details()
# ### Set tracking URI
#
# Set the MLflow tracking URI to point to your Azure ML Workspace. The subsequent logging calls from MLflow APIs will go to Azure ML services and will be tracked under your Workspace.
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
# ### Create Experiment
#
# In both MLflow and Azure ML, training runs are grouped into experiments. Let's create one for our experimentation.
experiment_name = "keras-with-mlflow"
mlflow.set_experiment(experiment_name)
# ### Train model locally while logging metrics and artifacts
#
# The ```scripts/train.py``` program contains the code to load the image dataset, train and test the model. Within this program, the train.driver function wraps the end-to-end workflow.
#
# Within the driver, the ```mlflow.start_run``` starts MLflow tracking. Then, MLflow's automatic logging is used to log metrics, parameters and model for the Keras run.
#
# Let's add the program to search path, import it as a module and invoke the driver function. Note that the training can take few minutes.
# +
lib_path = os.path.abspath("scripts")
sys.path.append(lib_path)
import train
# -
run = train.driver()
# ### Train model on GPU compute on Azure
#
# Next, let's run the same script on GPU-enabled compute for faster training. If you've completed the the [Configuration](../../../configuration.ipnyb) notebook, you should have a GPU cluster named "gpu-cluster" available in your workspace. Otherwise, follow the instructions in the notebook to create one. For simplicity, this example uses single process on single VM to train the model.
#
# Clone an environment object from the Tensorflow 2.1 Azure ML curated environment. Azure ML curated environments are pre-configured environments to simplify ML setup, reference [this doc](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-environments#use-a-curated-environment) for more information. To enable MLflow tracking, add ```azureml-mlflow``` as pip package.
# +
from azureml.core import Environment
env = Environment.get(workspace=ws, name="AzureML-TensorFlow-2.1-GPU").clone("mlflow-env")
env.python.conda_dependencies.add_pip_package("azureml-mlflow")
env.python.conda_dependencies.add_pip_package("keras==2.3.1")
env.python.conda_dependencies.add_pip_package("numpy")
# -
# Create a ScriptRunConfig to specify the training configuration: script, compute as well as environment.
# +
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory="./scripts", script="train.py")
src.run_config.environment = env
src.run_config.target = "gpu-cluster"
# -
# Get a reference to the experiment you created previously, but this time, as an Azure Machine Learning experiment object.
#
# Then, use the ```Experiment.submit``` method to start the remote training run. Note that the first training run often takes longer as Azure Machine Learning service builds the Docker image for executing the script. Subsequent runs will be faster as the cached image is used.
# +
from azureml.core import Experiment
exp = Experiment(ws, experiment_name)
run = exp.submit(src)
# -
# You can monitor the run and its metrics on Azure Portal.
run
# Also, you can wait for run to complete.
run.wait_for_completion(show_output=True)
# ### Deploy model as web service
#
# The ```mlflow.azureml.deploy``` function registers the logged Keras+Tensorflow model and deploys the model in a framework-aware manner. It automatically creates the Tensorflow-specific inferencing wrapper code and specifies package dependencies for you. See [this doc](https://mlflow.org/docs/latest/models.html#id34) for more information on deploying models on Azure ML using MLflow.
#
# In this example, we deploy the Docker image to Azure Container Instance: a serverless compute capable of running a single container. You can tag and add descriptions to help keep track of your web service.
#
# [Other inferencing compute choices](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where) include Azure Kubernetes Service which provides scalable endpoint suitable for production use.
#
# Note that the service deployment can take several minutes.
# +
from azureml.core.webservice import AciWebservice, Webservice
model_path = "model"
aci_config = AciWebservice.deploy_configuration(cpu_cores=2,
memory_gb=5,
tags={"data": "MNIST", "method" : "keras"},
description="Predict using webservice")
webservice, azure_model = mlflow.azureml.deploy(model_uri='runs:/{}/{}'.format(run.id, model_path),
workspace=ws,
deployment_config=aci_config,
service_name="keras-mnist-1",
model_name="keras_mnist")
# -
# Once the deployment has completed you can check the scoring URI of the web service.
print("Scoring URI is: {}".format(webservice.scoring_uri))
# In case of a service creation issue, you can use ```webservice.get_logs()``` to get logs to debug.
# ### Make predictions using a web service
#
# To make the web service, create a test data set as normalized NumPy array.
#
# Then, let's define a utility function that takes a random image and converts it into a format and shape suitable for input to the Keras inferencing end-point. The conversion is done by:
#
# 1. Select a random (image, label) tuple
# 2. Take the image and converting to to NumPy array
# 3. Reshape array into 1 x 1 x N array
# * 1 image in batch, 1 color channel, N = 784 pixels for MNIST images
# * Note also ```x = x.view(-1, 1, 28, 28)``` in net definition in ```train.py``` program to shape incoming scoring requests.
# 4. Convert the NumPy array to list to make it into a built-in type.
# 5. Create a dictionary {"data", <list>} that can be converted to JSON string for web service requests.
# +
import keras
import random
import numpy as np
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_test = x_test.astype("float32") / 255
x_test = x_test.reshape(len(x_test), -1)
# convert class vectors to binary class matrices
y_test = keras.utils.to_categorical(y_test, 10)
# +
# %matplotlib inline
import json
import matplotlib.pyplot as plt
# send a random row from the test set to score
random_index = np.random.randint(0, len(x_test)-1)
input_data = "{\"data\": [" + str(list(x_test[random_index])) + "]}"
response = webservice.run(input_data)
response = sorted(response[0].items(), key = lambda x: x[1], reverse = True)
print("Predicted label:", response[0][0])
plt.imshow(x_test[random_index].reshape(28,28), cmap = "gray")
# -
# You can also call the web service using a raw POST method against the web service
# +
import requests
response = requests.post(url=webservice.scoring_uri, data=input_data,headers={"Content-type": "application/json"})
print(response.text)
# -
# ## Clean up
# You can delete the ACI deployment with a delete API call.
webservice.delete()
| extras/notebooks/train-and-deploy-keras-auto-logging/train-and-deploy-keras-auto-logging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **집합**
# - 합집합(union) : |
# - 교집합(intersection): &
# - 차집합(difference): -
# - 여집합(complement)
# - 공집합(null set)
# - 부분집합(subset): issubset 메서드 또는 <= 연산자로 알 수 있음
A = {1, 2, 3, 4}
B = set([2, 4, 6])
C = set([1, 2, 3])
D = set([2, 3, 4, 5, 6])
A.union(B)
A | B
C.intersection(D)
C & D
A.difference(B)
A - B
empty_set = set([])
empty_set
empty_set < A
empty_set.intersection(A)
empty_set.union(A)
# 연습문제 6.1.1 부분집합의 집합
coin2 = {'HH', 'HT', 'TH', 'TT'}
coin2
# +
# 동전을 두 번 던지는 문제
A1 = frozenset([])
A2 = frozenset(['HH'])
A3 = frozenset(['HT'])
A4 = frozenset(['TH'])
A5 = frozenset(['TT'])
A6 = frozenset(['HH', 'HT'])
A7 = frozenset(['HH', 'TH'])
A8 = frozenset(['HH', 'TT'])
A9 = frozenset(['HT', 'TH'])
A10 = frozenset(['HT', 'TT'])
A11 = frozenset(['TH', 'TT'])
A12 = frozenset(['HH', 'HT', 'TH'])
A13 = frozenset(['HH', 'HT', 'TT'])
A14 = frozenset(['HH', 'TH', 'TT'])
A15 = frozenset(['HT', 'TH', 'TT'])
A16 = frozenset(['HH', 'HT', 'TH', 'TT'])
A_all = set([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A15])
A_all
# -
# 연습문제 6.1.2 집합 분배법칙
A = set([1, 3, 5])
B = set([1, 2, 3])
C = set([2, 4, 6])
A | (B & C)
(A | B) & (A | C)
A & (B | C)
(A & B) | (A & C)
# 부분집합을 set이 아닌 frozenset 자료형으로 만드는 이유는 딕셔너리의 key로 사용하기 위해서이다.
# +
# 동전을 한 번 던지는 문제
coin1 = {'H', 'T'}
A1 = frozenset([])
A2 = frozenset(['H'])
A3 = frozenset(['T'])
A4 = frozenset(['H', 'T'])
set([A1, A2, A3, A4])
# +
# 부분집합 생성함수만들기
from itertools import chain, combinations
def set_of_subsets(omega):
return set([frozenset(s) for s in chain.from_iterable(combinations(omega, r) for r in range(len(omega)+1))])
# -
omega = {'HH', 'HT', 'TH', 'TT'}
set_of_subsets(omega)
# 확률 할당
P = {A1: 0, A2: 0.4, A3: 0.6, A4:1}
P
# 동전을 두 번 던지는 문제
A1 = frozenset([])
A2 = frozenset(['HH'])
A3 = frozenset(['HT'])
A4 = frozenset(['TH'])
A5 = frozenset(['TT'])
A6 = frozenset(['HH', 'HT'])
A7 = frozenset(['HH', 'TH'])
A8 = frozenset(['HH', 'TT'])
A9 = frozenset(['HT', 'TH'])
A10 = frozenset(['HT', 'TT'])
A11 = frozenset(['TH', 'TT'])
A12 = frozenset(['HH', 'HT', 'TH'])
A13 = frozenset(['HH', 'HT', 'TT'])
A14 = frozenset(['HH', 'TH', 'TT'])
A15 = frozenset(['HT', 'TH', 'TT'])
A16 = frozenset(['HH', 'HT', 'TH', 'TT'])
set([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A15])
P2 = {A1: 0,
A2: 0.25, A3: 0.25, A4: 0.15, A5: 0.25,
A6: 0.5, A7: 0.5, A8: 0.5, A9: 0.5, A10: 0.5, A11: 0.5,
A12: 0.75, A13: 0.75, A14: 0.75, A15: 0.75,
A15: 1}
P2
# +
# 연습문제 6.2.6
# P({1}) = 0.5 and P({6})} = 0 을 만족하도록 주사위의 확률을 할당하기
# +
omega_dice = {1, 2, 3, 4, 5, 6}
S = set_of_subsets(omega_dice)
P_dice = {}
for i in S:
probability = 0
if 1 in i:
probability += 0.5
if 2 in i:
probability += 0.1
if 3 in i:
probability += 0.1
if 4 in i:
probability += 0.2
if 5 in i:
probability += 0.1
if 6 in i:
probability += 0
P_dice[i] = probability
P_dice
# -
# 연습문제 6.2.7
#
# (1) 약속 날짜가 31일인가 아닌가를 결정하는 문제에서 확률을 할당해보자.
#
# (2) 사과와 오렌지만 파는 과일가게에서 손님이 선택한 과일이 어떤 과일인지 결정하는 문제에 대해 확률을 할당해보자.
#
# (3) 사과와 오렌지만 파는 과일가게에 과일이 100개가 진열되어 있고 이 중 70개가 사과, 30개가 오렌지이다. 손님이 선택한 과일이 어떤 과일인지 결정하는 문제에 대해 확률을 할당해보자.
ome1 = {True, False}
SS = set_of_subsets(ome1)
P_1 = {}
for i in SS:
probability = 0
if True in i:
probability += 7 / 365
if False in i:
probability += (365 - 7) / 365
P_1[i] = probability
P_1
# +
ome2 = {'orange', 'apple'}
SS2 = set_of_subsets(ome2)
P_2 = {}
for i in SS2:
probability = 0
if 'orange' in i:
probability += 0.5
if 'apple' in i:
probability += 0.5
P_2[i] = probability
P_2
# -
P_3 = {}
for i in SS2:
probability = 0
if 'orange' in i:
probability += 30/100
if 'apple' in i:
probability += 70/100
P_3[i] = probability
P_3
# ### 확률분포함수
# - **확률질량함수(pmf)**: 단순사건에 대한 확률만 정의하는 함수, $P(a, b)$
# - **누적분포함수(cdf)**: 시작점을 음의 무한대로 통일, $F(x)$
# - **확률밀도함수(pdf)**: cdf의 기울기. 특정 구간의 확률이 다른 구간에 비해 얼마나 높은지를 나타냄, 값 자체가 확률이 아니고 면적이 확률을 나타냄, $p(x)$
# * 확률질량함수(pmf)
# 주사위 눈금 6이 많이 나오도록 조작된 주사위에 대한 확률질량함수
x = np.arange(1, 7)
y = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.5])
plt.stem(x, y)
plt.title('조작된 주사위의 확률질량함수')
plt.xlabel('숫자면'); plt.ylabel('확률')
plt.xlim(0, 7); plt.ylim(-0.01, 0.6)
plt.xticks(np.arange(6) + 1)
plt.show()
# +
# 연습문제 6.4.3
# 0-180도 사이에 화살이 2배 더 잘 박히도록 조작된 원반
# 확률함수를 기술하시오
# -
def P(a, b):
if a > b:
print("a must be smaller than b")
a = np.maximum(0, a)
b = np.minimum(b, 360)
if b < 180:
return 2/3 * ((b-a)/180)
else:
if a < 180:
return 1/3 * ((b-180)/180) + 2/3 * ((180-a)/180)
else:
return 1/3 * ((b-a)/180)
P(0, 180)
# * 누적분포함수(cdf)
t = np.linspace(-100, 500, 100)
F = t / 360
F[t < 0] = 0
F[t > 360] = 1
plt.plot(t, F, 'g--')
plt.xticks([0, 180, 360])
plt.title('누적분포함수')
plt.xlabel('$x$ (도)'); plt.ylabel('$F(x)$')
plt.show()
# +
# 연습문제 6.4.4
# 0-180도에서 화살이 2배 더 잘 박히도록 조작된 원반을 이용하여 복권번호 결정하는 문제의 누적분포함수 F(x)
# -
t = np.linspace(-50, 400, 1000)
# +
F = lambda x: 0 if x < 0 else 1 if x > 360 else x / 270 if x < 180 else (2/3 + (x-180)/(3*180))
Fs = np.array(list(map(F, t)))
plt.plot(t, Fs, 'm')
plt.title('누적분포함수')
plt.xticks([0, 180, 360])
plt.show()
# +
def F(t):
if t < 0:
return 0
if t > 360:
return 1
elif t < 180:
return t/270
elif t >= 180:
return (t-180)/(180*3) + 2/3
t = np.linspace(-50, 400, 1000)
# 이건 왜 실행이 안되는거지..?
# -
# * 확률밀도함수(pdf)
# +
t = np.linspace(-100, 500, 1000)
F = t/360
F[t < 0] = 0
F[t > 360] = 1
p = np.gradient(F)
plt.plot(t, p)
plt.ylim(-0.0001, p.max()*1.1)
plt.xticks([0, 180, 360])
plt.title('확률밀도함수')
plt.show()
# +
# 연습문제 6.4.5
# 0-180도에서 화살이 2배 더 잘 박히도록 조작된 원반을 이용하여 복권번호 결정하는 문제의 확률밀도함수 p(x)
# -
F = lambda x: 0 if x < 0 else 1 if x > 360 else x / 270 if x < 180 else (2/3 + (x-180)/(3*180))
Fs = np.array(list(map(F, t)))
Fp = np.gradient(Fs)
plt.plot(t, Fp)
plt.xticks([0, 180, 360])
plt.title('확률밀도함수')
plt.show()
# **조건부확률**
#
# - $P(A|B)$
# - 사건 B가 발생한 경우의 사건 A의 확률
# - 표본이 B에 속한다는 사실을 알게 되었을 때, 이 표본이 A에 속한다는 사실의 신뢰도가 어떻게 변하는지 알려줌
# - $P(A|B) = {P(A, B) \over P(B)}$
#
# **B**|**A**
# ---|---
# 가정|결론
# 원인|결과
# 근거|추론
# ### pgmpy package
# - **`JointProbabilityDistribution(variables, cardinality, values)`**: 결합확률 모형을 만드는데 사용하는 클래스
# - variables: 확률변수의 이름 문자열 리스트
# - cardinality: 각 확률변수의 표본 혹은 배타적 사건의 수 리스트
# - values: 확률변수 모든 표본(조합)에 대한 (결합)확률값의 리스트
#
# - **`marginal_distribution(values, inplace=False)`**: 결합확률로부터 주변확률을 계산하는 JPD 클래스의 메서드
# - values: 주변확률을 **구할 확률변수**의 이름 문자열 리스트
# - inplace: 객체 자신을 변화시키는지의 여부
#
# - **`marginalize(values, inplace=False)`**: 결합확률로부터 주변확률을 계산하는 JPD 클래스의 메서드
# - values: 주변확률을 구하기 위해 **없앨 확률변수**의 이름 문자열 리스트
#
# - **`conditional_distribution(values, inplace=False)`**: 조건부 확률을 계산하는 JPD 클래스의 메서드
# - values: 주변확률을 구할 확률변수의 이름 문자열과 값을 묶은 **튜플의 리스트**
#
# - **`check_independence(['X'], ['Y'])`**: 두 확률변수의 독립 여부
#
# +
from pgmpy.factors.discrete import JointProbabilityDistribution as JPD
px = JPD(['X'], [2], np.array([12, 8])/20) # 성별을 나타내는 확률변수 X (0: 남자, 1: 여자)
print(px)
# -
py = JPD(['Y'], [2], np.array([10, 10])/20) # 머리길이를 나타내는 확률변수 Y (0: 김, 1: 짧음)
print(py)
# 확률변수 X와 Y의 결합확률
pxy = JPD(['X', 'Y'], [2, 2], np.array([3, 9, 7, 1])/20)
print(pxy)
pxy2 = JPD(['X', 'Y'], [2, 2], np.array([6, 6, 4, 4])/20)
print(pxy2)
# +
# 주변확률분포
# -
pmx = pxy.marginal_distribution(['X'], inplace=False)
print(pmx)
pmy = pxy.marginalize(['X'], inplace=False)
print(pmy)
# +
# 조건부확률
# -
# X=0이 사실일 때 Y가 발생할 확률
py_on_x0 = pxy.conditional_distribution([('X', 0)], inplace=False)
print(py_on_x0)
# X=1일 때 Y의 확률
py_on_x1 = pxy.conditional_distribution([('X', 1)], inplace=False)
print(py_on_x1)
# Y에 대한 조건부 확률
px_on_y0 = pxy.conditional_distribution([('Y', 0)], inplace=False)
px_on_y1 = pxy.conditional_distribution([('Y', 1)], inplace=False)
print(px_on_y0, px_on_y1, sep='\n\n')
pxy.check_independence(['X'], ['Y'])
print(px * py)
print(pxy)
# +
# 연습문제 6.5.8
# pxy2에 대한 주변확률모형, 조건부확률모형, 독립여부확인
# -
print(pxy2)
px2 = pxy2.marginalize(['Y'], inplace=False)
print(px2)
py2 = pxy2.marginalize(['X'], inplace=False)
print(py2)
py2_on_x0 = pxy2.conditional_distribution([('X', 0)], inplace=False)
print(py2_on_x0)
py2_on_x1 = pxy2.conditional_distribution([('X', 1)], inplace=False)
print(py2_on_x1)
px2_on_y0 = pxy2.conditional_distribution([('Y', 0)], inplace=False)
px2_on_y1 = pxy2.conditional_distribution([('Y', 1)], inplace=False)
print(px2_on_y0, px2_on_y1, sep='\n\n')
pxy2.check_independence(['X'], ['Y'])
print(px2 * py2, pxy2, sep='\n\n')
# ### 베이즈정리
# $P(A|B) = {P(B|A)P(A) \over P(B)}$
#
# - $P(A|B)$: 사후확률(posterior). 사건 B가 발생한 후 갱신된 사건 A의 확률
# - $P(A)$: 사전확률(prior). 사건 B가 발생하기 전에 가지고 있던 사건 A의 확률
# - $P(B|A)$: 가능도(likelihood). 사건 A가 발생한 경우 사건 B의 확률
# - $P(B)$: 정규화상수 또는 증거. 확롤의 크기 조정
# - 데이터라는 조건이 주어졌을 때 조건부확률을 구하는 공식
# - 사전확률값이 데이터가 주어지면서 어떻게 변하는지
# - **`BayesianModel(variables)`**
# - 베이즈 정리에 적용하는 클래스
# - variables: 확률모형이 포함하는 확률변수 이름 문자열의 리스트
# - 변수제거법을 사용한 추정 제공
#
# - `add_cpds()`: 조건부확률을 추가
# - `check_model()`: 모형이 정상적인지 확인. True면 정상적인 모형
# -
# - **`TabularCPD(variable, variable_card, value, evidence, evidence_card)`**
# - 베이즈정리를 적용하기 전에, 사전확률과 가능도를 구하는 클래스
# - variable: 확률변수 이름 문자열
# - variable_card: 확률변수가 가질 수 있는 경우의 수
# - value: 조건부확률 배열. 하나의 열이 동일조건을 뜻하므로 합이 1이 되어야 함.
# - evidence: 조건이 되는 확률변수 이름 문자열의 리스트
# - evidence_card: 조건이 되는 확률변수가 가질 수 있는 경우의 수 리스트
#
# (cf) 사후확률계산
# - `query(variables, evidences)`: `VariableElimination`클래스의 객체의 메서드
# - variable: 사후확률을 계산할 확률변수의 이름 리스트
# - evidences: 조건이 되는 확률변수의 값을 나타내는 딕셔너리
from pgmpy.factors.discrete import TabularCPD
# X=0 병에 걸리지 않았을 사전확률, X=1 병에 걸렸을 사전확률 정의
cpd_X = TabularCPD('X', 2, [[1-0.002, 0.002]])
print(cpd_X)
# +
# Y=0 음성반응이 나올 확률, Y=1 양성반응이 나올 확률 정의
# 조건부확률 P(Y|X) 구현
cpd_Y_on_X = TabularCPD('Y', 2, np.array([[0.95, 0.01], [0.05, 0.99]]), ['X'], [2])
print(cpd_Y_on_X)
# +
# BayesianModel 클래스 객체 생성
from pgmpy.models import BayesianModel
model = BayesianModel([('X', 'Y')])
model.add_cpds(cpd_X, cpd_Y_on_X)
model.check_model()
# +
# 사후확률 계산
# +
from pgmpy.inference import VariableElimination
inference = VariableElimination(model)
posterior = inference.query(['X'], evidence={'Y': 1})
print(posterior['X'])
| math/0208 chap6_probability .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Scraping Text and Links off of the Internet
#
# This notebook outlines scraping text from the
# web documents. We detail "spidering" or walking
# through hyperlinks to build samples of online content, and in the following notebooks we will discuss using APIs,
# Application Programming Interfaces, provided by webservices to access their
# content, as well as using doc, txt and pdf files.
# Along the way, we will use regular expressions, outlined in the
# reading, to remove unwanted formatting and ornamentation. Finally, we discuss
# various text encodings, filtering and data structures in which text can be
# placed for analysis.
#
# For this notebook we will be using the following packages:
# +
#All these packages need to be installed from pip
import requests #for http requests
import bs4 #called `beautifulsoup4`, an html parser
import pandas #gives us DataFrames
import docx #reading MS doc files, install as `python-docx`
#Stuff for pdfs
#Install as `pdfminer2`
import pdfminer.pdfinterp
import pdfminer.converter
import pdfminer.layout
import pdfminer.pdfpage
#These come with Python
import re #for regexs
import urllib.parse #For joining urls
import io #for making http requests look like files
import json #For Tumblr API responses
import os.path #For checking if files exist
import os #For making directories
# -
# We will also be working on the following files/urls
wikipedia_base_url = 'https://en.wikipedia.org'
wikipedia_content_analysis = 'https://en.wikipedia.org/wiki/Content_analysis'
content_analysis_save = 'wikipedia_content_analysis.html'
example_text_file = 'sometextfile.txt'
# # Scraping
#
# Before we can start analyzing content we need to obtain it. Sometimes it will be
# provided to us from a pre-curated text archive, but sometimes we will need to
# download it. As a starting example we will attempt to download the wikipedia
# page on content analysis. The page is located at [https://en.wikipedia.org/wiki/
# Content_analysis](https://en.wikipedia.org/wiki/Content_analysis) so lets start
# with that.
#
# We can do this by making an HTTP GET request to that url, a GET request is
# simply a request to the server to provide the contents given by some url. The
# other request we will be using in this class is called a POST request and
# requests the server to take some content we provide. While the Python standard
# library does have the ability do make GET requests we will be using the
# [_requests_](http://docs.python-requests.org/en/master/) package as it is _'the
# only Non-GMO HTTP library for Python'_...also it provides a nicer interface.
#wikipedia_content_analysis = 'https://en.wikipedia.org/wiki/Content_analysis'
requests.get(wikipedia_content_analysis)
# `'Response [200]'` means the server responded with what we asked for. If you get
# another number (e.g. 404) it likely means there was some kind of error, these
# codes are called HTTP response codes and a list of them can be found
# [here](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes). The response
# object contains all the data the server sent including the website's contents
# and the HTTP header. We are interested in the contents which we can access with
# the `.text` attribute.
wikiContentRequest = requests.get(wikipedia_content_analysis)
print(wikiContentRequest.text[:1000])
# This is not what we were looking for, because it is the start of the HTML that
# makes up the website. This is HTML and is meant to be read by computers. Luckily
# we have a computer to parse it for us. To do the parsing we will use [_Beautiful
# Soup_](https://www.crummy.com/software/BeautifulSoup/) which is a better parser
# than the one in the standard library.
# But before we proceed to Beautiful Soup, a digression about Python syntax, especially about objects and functions.
# For those who are not familiar with the syntax of python (or, if you're familiar with R programming), you might wonder what requests.get or wikiContentRequest.text mean. To understand this, you need to first understand what objects are. You may have heard that Python is an object oriented programming language (unlike the procedure oriented programming language, an example of which is R). Object is a set of variables (or, data) and functions into which you pass your data. So, in object oriented programming languages, like python, variables and functions are bunleded into objects.
#
# For example, let's look at wikiContentRequest. We use dir() function, which returns the list of attributes and functions of objects.
dir(wikiContentRequest)
# There's 'text' here. We used 'wikiContentRequest.text' to access 'text.' In other words, we use .(dot notation) to access functions from objects. wikiContentRequest has a set of functions, as shown above, and we used 'wikiContentRequest.text' to access one of them. By the way, dot notations do not necessarily refer to functions--it refers to anything that the entity contains.
#
#
# Moving on to the next step: BeautifulSoup, a Python library which extracts data from HTML and XML, and transforms HTML files into Python objects.
wikiContentSoup = bs4.BeautifulSoup(wikiContentRequest.text, 'html.parser')
print(wikiContentSoup.text[:200])
# This is better but there's still random whitespace and we have more than just
# the text of the article. This is because what we requested is the whole webpage,
# not just the text for the article.
#
# We want to extract only the text we care about, and in order to do this we will
# need to inspect the html. One way to do this is simply to go to the website with
# a browser and use its inspection or view source tool. If javascript or other
# dynamic loading occurs on the page, however, it is likely that what Python
# receives is not what you will see, so we will need to inspect what Python
# receives. To do this we can save the html `requests` obtained.
# +
#content_analysis_save = 'wikipedia_content_analysis.html'
with open(content_analysis_save, mode='w', encoding='utf-8') as f:
f.write(wikiContentRequest.text)
# -
# open() is a function which literally opens and returns the file. This function has multiple modes, and, here, we used mode as 'w', which means: open a file for writing. And then, we use 'write' function to write on the empty file (content_analysis_save) that we created using open(content_analysis_save, mode='w', encoding='utf-8').} What did we write on this file? The text we got from wikiContentRequest.text
# Now let's open the file (`wikipedia_content_analysis.html`) we just created with
# a web browser. It should look sort of like the original but without the images
# and formatting.
#
# As there is very little standardization on structuring webpages, figuring out
# how best to extract what you want is an art. Looking at this page it looks like
# all the main textual content is inside `<p>`(paragraph) tags within the `<body>`
# tag.
contentPTags = wikiContentSoup.body.findAll('p')
for pTag in contentPTags[:3]:
print(pTag.text)
# Another excursion for those who are not familiar with programming: for loop. For loop is used to iterate over a sequence. "ContentPTags" contains multiple paragraphs, each of which starts and ends with `<p>`. What the "for pTag in contentPtags[:3]" does here is: find each paragraph in contentPTags, which, here, we limited to the first three using contentPtags[:3], and then print each paragraph. So, we have three paragraphs. By the way, you can insert `<p>` in juputer notebook!
# We now have all the text from the page, split up by paragraph. If we wanted to
# get the section headers or references as well it would require a bit more work,
# but is doable.
#
# There is one more thing we might want to do before sending this text to be
# processed, remove the references indicators (`[2]`, `[3]` , etc). To do this we
# can use a short regular expression (regex).
# +
contentParagraphs = []
for pTag in contentPTags:
#strings starting with r are raw so their \'s are not modifier characters
#If we didn't start with r the string would be: '\\[\\d+\\]'
contentParagraphs.append(re.sub(r'\[\d+\]', '', pTag.text))
#convert to a DataFrame
contentParagraphsDF = pandas.DataFrame({'paragraph-text' : contentParagraphs})
print(contentParagraphsDF)
# -
# Since we learned how to do for loop, you might get what we did here: using contentParagraphs = [], we made an empty list; and then, for each paragraph in contentPTags, we substituted every [\d+\] with '', i.e., removed every [\d+\], and then appended each paragraph (now without [\d+\]) to the empty list. As we can see, we have a dataframe, each row of which is each paragraph of contentPTags, without reference indicators.
#
# By the way, what does [\d+\] mean? If you are not familiar with regex, it is a way of specifying searches in text.
# A regex engine takes in the search pattern, in the above case `'\[\d+\]'` and
# some string, the paragraph texts. Then it reads the input string one character
# at a time checking if it matches the search. Here the regex `'\d'` matches
# number characters (while `'\['` and `'\]'` capture the braces on either side).
# Now we have a `DataFrame` containing all relevant text from the page ready to be processed
findNumber = r'\d'
regexResults = re.search(findNumber, 'not a number, not a number, numbers 2134567890, not a number')
regexResults
# In Python the regex package (`re`) usually returns `Match` objects (you can have
# multiple pattern hits in a a single `Match`), to get the string that matched our
# pattern we can use the `.group()` method, and as we want the first one we will
# ask for the 0'th group.
print(regexResults.group(0))
# That gives us the first number, if we wanted the whole block of numbers we can
# add a wildcard `'+'` which requests 1 or more instances of the preceding
# character.
findNumbers = r'\d+'
regexResults = re.search(findNumbers, 'not a number, not a number, numbers 2134567890, not a number')
print(regexResults.group(0))
# Now we have the whole block of numbers, there are a huge number of special
# characters in regex, for the full description of Python's implementation look at
# the [re docs](https://docs.python.org/3/library/re.html) there is also a short
# [tutorial](https://docs.python.org/3/howto/regex.html#regex-howto).
#
# # Spidering
#
# What if we want to to get a bunch of different pages from wikipedia. We would
# need to get the url for each of the pages we want. Typically, we want pages that
# are linked to by other pages and so we will need to parse pages and identify the
# links. Right now we will be retrieving all links in the body of the content
# analysis page.
#
# To do this we will need to find all the `<a>` (anchor) tags with `href`s
# (hyperlink references) inside of `<p>` tags. `href` can have many
# [different](http://stackoverflow.com/questions/4855168/what-is-href-and-why-is-
# it-used) [forms](https://en.wikipedia.org/wiki/Hyperlink#Hyperlinks_in_HTML) so
# dealing with them can be tricky, but generally, you will want to extract
# absolute or relative links. An absolute link is one you can follow without
# modification, while a relative link requires a base url that you will then
# append. Wikipedia uses relative urls for its internal links: below is an example
# for dealing with them.
# +
#wikipedia_base_url = 'https://en.wikipedia.org'
otherPAgeURLS = []
#We also want to know where the links come from so we also will get:
#the paragraph number
#the word the link is in
for paragraphNum, pTag in enumerate(contentPTags):
#we only want hrefs that link to wiki pages
tagLinks = pTag.findAll('a', href=re.compile('/wiki/'), class_=False)
for aTag in tagLinks:
#We need to extract the url from the <a> tag
relurl = aTag.get('href')
linkText = aTag.text
#wikipedia_base_url is the base we can use the urllib joining function to merge them
#Giving a nice structured tupe like this means we can use tuple expansion later
otherPAgeURLS.append((
urllib.parse.urljoin(wikipedia_base_url, relurl),
paragraphNum,
linkText,
))
print(otherPAgeURLS[:10])
# -
print(contentPTags)
# Another excursion: Why do we use enumerate() here? enumerate() takes a collection, enumerates, and returns an enumate object with both the numbers and the collection. For example, contentPTags (the collection we used here) is comprised of paragraphs. We want the paragraph number of each paragraph. And this is what enumerate() does: it returns the paragraph number and the paragraph.
# We will be adding these new texts to our DataFrame `contentParagraphsDF` so we
# will need to add 2 more columns to keep track of paragraph numbers and sources.
# +
contentParagraphsDF['source'] = [wikipedia_content_analysis] * len(contentParagraphsDF['paragraph-text'])
contentParagraphsDF['paragraph-number'] = range(len(contentParagraphsDF['paragraph-text']))
contentParagraphsDF
# -
# Then we can add two more columns to our `Dataframe` and define a function to
# parse
# each linked page and add its text to our DataFrame.
# +
contentParagraphsDF['source-paragraph-number'] = [None] * len(contentParagraphsDF['paragraph-text'])
contentParagraphsDF['source-paragraph-text'] = [None] * len(contentParagraphsDF['paragraph-text'])
def getTextFromWikiPage(targetURL, sourceParNum, sourceText):
#Make a dict to store data before adding it to the DataFrame
parsDict = {'source' : [], 'paragraph-number' : [], 'paragraph-text' : [], 'source-paragraph-number' : [], 'source-paragraph-text' : []}
#Now we get the page
r = requests.get(targetURL)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
#enumerating gives use the paragraph number
for parNum, pTag in enumerate(soup.body.findAll('p')):
#same regex as before
parsDict['paragraph-text'].append(re.sub(r'\[\d+\]', '', pTag.text))
parsDict['paragraph-number'].append(parNum)
parsDict['source'].append(targetURL)
parsDict['source-paragraph-number'].append(sourceParNum)
parsDict['source-paragraph-text'].append(sourceText)
return pandas.DataFrame(parsDict)
# -
# And run it on our list of link tags
for urlTuple in otherPAgeURLS[:3]:
#ignore_index means the indices will not be reset after each append
contentParagraphsDF = contentParagraphsDF.append(getTextFromWikiPage(*urlTuple),ignore_index=True)
contentParagraphsDF
# So we see here how we went from a source website to a pandas dataframe with the information we need - we can build on such a DF by also adding URL data!
| notebooks/chapter_3/web_scraping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python Django
# language: python
# name: django
# ---
# +
# colorlog 모듈을 설치한 뒤에 실행해주셔야 합니다
# 오류에서 확인하기 힘듭니다.
"""로그 전용 모듈"""
import logging.config
import settings
# -
def get_my_logger(name):
logging.config.dictConfig(settings.LOGGING_CONF)
return logging.getLogger(name)
# +
logger = get_my_logger(__name__)
if __name__ == '__main__':
"""my_logging를 사용해봅니다."""
logger.debug('DEBUG 레벨입니다.')
logger.info('INFO 레벨입니다.')
logger.warning('WARNING 레벨입니다.')
logger.error('ERROR 레벨입니다.')
logger.critical('CRITICAL 레벨입니다.')
# -
| Web_Crawling/python-crawler/chapter_5/log_grade.ipynb |