code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id="title_ID"></a>
# # JWST Pipeline Validation Testing Notebook: MIRI LRS Slit spectroscopy
# # Step: flat_field() in Spec2Pipeline
#
# <span style="color:red"> **Instruments Affected**</span>: MIRI
#
# ### Table of Contents
# <div style="text-align: left">
#
# <br> [Imports](#imports_ID) <br> [Introduction](#intro_ID) <br> [Get Documentaion String for Markdown Blocks](#markdown_from_docs) <br> [Loading Data](#data_ID) <br> [Run JWST Pipeline](#pipeline_ID) <br> [Create Figure or Print Output](#residual_ID) <br> [About This Notebook](#about_ID) <br>
#
# </div>
# Create a temporary directory to hold notebook output, and change the working directory to that directory.
from tempfile import TemporaryDirectory
import os
data_dir = TemporaryDirectory()
os.chdir(data_dir.name)
# <a id="imports_ID"></a>
# # Imports
# List the library imports and why they are relevant to this notebook.
#
#
# * os for simple operating system functions
# * gwcs.wcstools for bounding box operations
# * astropy.io for opening fits files
# * inspect to get the docstring of our objects.
# * IPython.display for printing markdown output
# * jwst.datamodels for building model for JWST Pipeline
# * jwst.module.PipelineStep is the pipeline step being tested
# * matplotlib.pyplot to generate plot
# * matplotlib.patches to plot shapes
# * crds for retrieving a reference file
# * ci_watson from data retrieval from artifactory
#
#
# [Top of Page](#title_ID)
# +
from astropy.io import fits
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
import os
from IPython.display import Markdown
from jwst.pipeline import Detector1Pipeline, Spec2Pipeline, collect_pipeline_cfgs
from jwst.background import BackgroundStep
from jwst.assign_wcs import AssignWcsStep
from jwst.extract_2d import Extract2dStep
from jwst.flatfield import FlatFieldStep
from gwcs.wcstools import grid_from_bounding_box
import crds
from jwst import datamodels
from ci_watson.artifactory_helpers import get_bigdata
# -
# <a id="intro_ID"></a>
# # Introduction
#
#
# For this test we are using the flat fielding step for MIRI LRS slit data. For more information on the pipeline step visit the links below.
#
# Step description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/flatfield/main.html#imaging-and-non-nirspec-spectroscopic-data
#
# Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/flat_field
#
#
#
# ### Defining Term
# Here is where you will define terms or acronymns that may not be known a general audience (ie a new employee to the institute or an external user). For example
#
# JWST: James Webb Space Telescope
# MIRI: Mid-Infrared Instrument
# LRS: Low Resolution Spectrometer
#
#
# [Top of Page](#title_ID)
# # Run Pipeline
#
# We are using here a simulated LRS slit observation, generated with MIRISim v2.3.0 (as of Dec 2020). It is a simple along-slit-nodded observation of a point source (the input was modelled on the flux calibrator BD+60). LRS slit observations cover the full array.
#
#
# [Top of Page](#title_ID)
# +
Slitfile1 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'calwebb_spec2',
'spec2_miri_test',
'miri_lrs_slit_pt_nod1_v2.3.fits')
Slitfile2 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'calwebb_spec2',
'spec2_miri_test',
'miri_lrs_slit_pt_nod2_v2.3.fits')
files = [Slitfile1, Slitfile2]
# -
# ### CalDetector1
#
# We first run the data through the Detector1 pipeline.
det1_out = []
nods = []
for ff in files:
det1 = Detector1Pipeline.call(ff, save_results=True)
if 'nod1' in ff:
nodno = 'nod1'
else:
nodno = 'nod2'
det1_out.append(det1)
nods.append(nodno)
print(det1_out)
print(nods)
# ### CalSpec2
#
# Next up are the CalSpec2 steps up to flat fielding.
awcs_nod1 = AssignWcsStep.call(det1_out[0], save_results=True)
awcs_nod2 = AssignWcsStep.call(det1_out[1], save_results=True)
# Let's check the location of the bounding box that has been attached to the data.
# There is a dedicated notebook for this step, but top-level checks are:
# * the bounding box coordinates should be the same for both nods
# * the spectral trace should fall within the bounding box boundary
# +
bbox_w_n1 = awcs_nod1.meta.wcs.bounding_box[0][1] - awcs_nod1.meta.wcs.bounding_box[0][0]
bbox_ht_n1 = awcs_nod1.meta.wcs.bounding_box[1][1] - awcs_nod1.meta.wcs.bounding_box[1][0]
print('Model bbox for nod 1 = {0} '.format(awcs_nod1.meta.wcs.bounding_box))
print('Model: Height x width of bounding box for nod 1 = {0} x {1} pixels'.format(bbox_ht_n1, bbox_w_n1))
bbox_w_n2 = awcs_nod2.meta.wcs.bounding_box[0][1] - awcs_nod2.meta.wcs.bounding_box[0][0]
bbox_ht_n2 = awcs_nod2.meta.wcs.bounding_box[1][1] - awcs_nod2.meta.wcs.bounding_box[1][0]
print('Model bbox for nod 2 = {0} '.format(awcs_nod1.meta.wcs.bounding_box))
print('Model: Height x width of bounding box for nod 2 = {0} x {1} pixels'.format(bbox_ht_n2, bbox_w_n2))
# +
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=[12,10])
bbox1 = Rectangle((awcs_nod1.meta.wcs.bounding_box[0][0],awcs_nod1.meta.wcs.bounding_box[1][0]), bbox_w_n1, bbox_ht_n1, angle=0.0, ec='r', lw=2, fc='None')
ax[0].imshow(awcs_nod1.data, origin='lower', aspect='equal', interpolation='None')
ax[0].set_title('Nod 1')
ax[0].add_patch(bbox1)
ax[0].set_xlim([200,400])
ax[0].set_ylim([0,400])
bbox2 = Rectangle((awcs_nod2.meta.wcs.bounding_box[0][0],awcs_nod2.meta.wcs.bounding_box[1][0]), bbox_w_n2, bbox_ht_n2, angle=0.0, ec='r', lw=2, fc='None')
ax[1].imshow(awcs_nod2.data, origin='lower', aspect='equal', interpolation='None')
ax[1].set_title('Nod 2')
ax[1].add_patch(bbox2)
ax[1].set_xlim([200,400])
ax[1].set_ylim([0,400])
print(awcs_nod1.meta.filename)
# -
bgsub_1 = [awcs_nod2.meta.filename]
bgsub_2 = [awcs_nod1.meta.filename]
bgr_nod1 = BackgroundStep.call(awcs_nod1, bgsub_1, save_results='True')
bgr_nod2 = BackgroundStep.call(awcs_nod2, bgsub_2, save_results='True')
# Check the output of the background subtraction. We should now have in each exposure a positive and negative spectral trace, with the sides of pos-neg switched between them.
# +
fig, ax = plt.subplots(ncols=2, nrows=1, figsize=[12,10])
im1 = ax[0].imshow(bgr_nod1.data, origin='lower', aspect='equal', interpolation='None')
ax[0].set_title('Nod 1')
ax[0].set_xlim([200,400])
ax[0].set_ylim([0,400])
cbar1= fig.colorbar(im1, extend='both', shrink=0.9, ax=ax[0])
im2 = ax[1].imshow(bgr_nod2.data, origin='lower', aspect='equal', interpolation='None')
ax[1].set_title('Nod 2')
ax[1].set_xlim([200,400])
ax[1].set_ylim([0,400])
cbar2= fig.colorbar(im2, extend='both', shrink=0.9, ax=ax[1])
# -
e2d_nod1 = Extract2dStep.call(bgr_nod1, save_results='True')
e2d_nod2 = Extract2dStep.call(bgr_nod2, save_results='True')
ff_nod1 = FlatFieldStep.call(e2d_nod1, save_results='True')
ff_nod2 = FlatFieldStep.call(e2d_nod2, save_results='True')
# # Flat Field Step Check
# OK, so we now have the output from the flat fielding step. We will download the reference file from crds and perform an independent check that the step is correctly executed.
# +
#flat1_file = fits.open('flatfield_testing/det_image_seq1_MIRIMAGE_P750Lexp1_flat_field.fits')
#flat2_file = fits.open('flatfield_testing/det_image_seq2_MIRIMAGE_P750Lexp1_flat_field.fits')
#flat1 = flat1_file[1]
#flat2 = flat2_file[1]
# -
# Display flat, region chosen because all areas around the slit are nan's
fig = plt.figure(figsize=[12,10])
plt.imshow(ff_nod1.data[:400, 200:400], origin='lower')
plt.title('Nod 1 - flat fielded')
plt.colorbar()
# Let's now do a manual check based on the flat field reference file. We can access the name of this file from the model metadata (or the file header). The flat reference file has non-NaN value only in the region where the spectrum is dispersed. We'd like to check that this region covers at least that covered by the bounding box definition to ensure that that entire region will be accurately flat fielded.
# +
flat_reffile = ff_nod1.meta.ref_file.flat.name
basename = crds.core.config.pop_crds_uri(flat_reffile)
filepath = crds.locate_file(basename, "jwst")
reffile = datamodels.open(filepath)
#print(reffile.data[0:430,304:346])
bbox2 = Rectangle((awcs_nod1.meta.wcs.bounding_box[0][0],awcs_nod1.meta.wcs.bounding_box[1][0]), bbox_w_n1, bbox_ht_n1, angle=0.0, ec='r', lw=2, fc='None')
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=[12, 12])
im0 = ax[0].imshow(reffile.data, origin='lower', interpolation='None')
ax[0].add_patch(bbox2)
ax[0].set_xlim([275,375])
ax[0].set_ylim([0,400])
cbar = fig.colorbar(im0, extend='both', orientation='horizontal', shrink=0.9, ax=ax[0])
bbox3 = Rectangle((awcs_nod1.meta.wcs.bounding_box[0][0],awcs_nod1.meta.wcs.bounding_box[1][0]), bbox_w_n1, bbox_ht_n1, angle=0.0, ec='r', lw=2, fc='None')
ax[1].imshow(reffile.data, origin='lower', interpolation='None', aspect='auto')
ax[1].add_patch(bbox3)
ax[1].set_xlim([290,310])
ax[1].set_ylim([300,400])
ax[1].set_title('Zoom in - left edge')
bbox4 = Rectangle((awcs_nod1.meta.wcs.bounding_box[0][0],awcs_nod1.meta.wcs.bounding_box[1][0]), bbox_w_n1, bbox_ht_n1, angle=0.0, ec='r', lw=2, fc='None')
ax[2].imshow(reffile.data, origin='lower', interpolation='None')
ax[2].add_patch(bbox4)
ax[2].set_xlim([335,355])
ax[2].set_ylim([300,400])
ax[2].set_title('Zoom in - right edge')
#fig.tight_layout()
plt.suptitle('Slit flat field ({}) vs. Assigned bounding box'.format(flat_reffile))
plt.savefig('slit_flatfield_boundingbox.png')
# -
def flatfield_corners(arr):
# create a boolean array for the NaNs
barr = np.isnan(arr)
# then extarct the indices where boolean array is False
ii = np.where(~barr)
# then identify the corner coordinates
xmin, xmax = np.min(ii[1]), np.max(ii[1])
ymin, ymax = np.min(ii[0]), np.max(ii[0])
return xmin, xmax, ymin, ymax
# Compare the corner coordinates of the flat field region with the corners of the bounding box. We check to ensure that the flat field region is NOT SMALLER than the bounding box region. Bigger is not a problem.
# +
# get the corner coordinates of the flat field region using the above function
ff_xmin, ff_xmax, ff_ymin, ff_ymax = flatfield_corners(reffile.data)
print(ff_xmin, ff_xmax, ff_ymin, ff_ymax)
# get the boundign box coordinates
bb_xmin, bb_xmax, bb_ymin, bb_ymax = awcs_nod1.meta.wcs.bounding_box[0][0], awcs_nod1.meta.wcs.bounding_box[0][1], awcs_nod1.meta.wcs.bounding_box[1][0], awcs_nod1.meta.wcs.bounding_box[1][1]
print(bb_xmin, bb_xmax, bb_ymin, bb_ymax)
try:
assert(ff_xmin <= bb_xmin) and (ff_xmax >= bb_xmax), "Flat field region SMALLER than BBox in X"
except AssertionError as e:
print("************************************************")
print("")
print("ERROR: {}".format(e))
print("")
print("************************************************")
try:
assert(ff_ymin <= bb_ymin) and (ff_ymax >= bb_ymax), "Flat field region SMALLER than BBox in Y"
except AssertionError as e:
print("************************************************")
print("")
print("ERROR: {}".format(e))
print("")
print("************************************************")
# -
#
# Now we go back to the output of the step prior to flat fielding (extract_2d()), and perform the flat fielding manually. This should be a straightfoward division. The variables are ``e2d_nod1`` and ``e2d_nod2``.
manflat_nod1 = e2d_nod1.data / reffile.data
manflat_nod2 = e2d_nod2.data / reffile.data
# Compare the output to the pipeline flat field output (``ff_nod1`` and ``ff_nod2``). We use the ``np.allclose()`` function to compare two arrays element-wise. We accept the default tolerances ``atol = 1e-08`` and ``rtol = `e-05``, which means that:
#
# ``abs(ff_nod1.data - manflat_nod1) <= 1e-08 + 1e-05 * manflat_nod1``
# +
fcheck_nod1 = (ff_nod1.data - manflat_nod1)
fcheck_nod2 = (ff_nod2.data - manflat_nod2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=[12,10])
im0 = ax[0].imshow(fcheck_nod1[np.int(bb_ymin):np.int(bb_ymax), np.int(bb_xmin):np.int(bb_xmax)], origin='lower', interpolation='None')
ax[0].set_title('Nod 1 check')
cbar = fig.colorbar(im0, shrink=0.9, ax=ax[0])
im1 = ax[1].imshow(fcheck_nod2[np.int(bb_ymin):np.int(bb_ymax), np.int(bb_xmin):np.int(bb_xmax)], origin='lower', interpolation='None')
ax[1].set_title('Nod 2 check')
cbar2 = fig.colorbar(im1, shrink=0.9, ax=ax[1])
nans1 = np.isnan(fcheck_nod1)
nans2 = np.isnan(fcheck_nod2)
print('Min difference between manual & pipeline files, nod 1 = {0} -- Max = {1}'.format(np.min(fcheck_nod1[~nans1]), np.max(fcheck_nod1[~nans1])))
print('Min difference between manual & pipeline files, nod 2 = {0} -- Max = {1}'.format(np.min(fcheck_nod2[~nans2]), np.max(fcheck_nod2[~nans2])))
# perform a numeric check. Ignoring NaNs for now as these do not fully match:
assert (np.allclose(ff_nod1.data[~nans1], manflat_nod1[~nans1], equal_nan=True) and np.allclose(ff_nod2.data[~nans2], manflat_nod2[~nans2], equal_nan=True)), "Pipeline calibrated file doesn't match the manual check to within tolerances"
# -
# If all assert statements in this notebook PASS, then the test is successful.
#
# **END**
#
# Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>) -- MIRI branch
#
| jwst_validation_notebooks/flat_field/jwst_flat_field_miri_test/flat-miri-lrs-slit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Explore Dataset
# ## IQ2 Debates
# *<NAME> and <NAME>*
#
# We are seeking to explore the basic tenets of our dataset. We will later be exploring complexity of language, so we will focus on something that comes quite close to this: word and sentence length.
# ## 1. Get averages
# First, we want to gather some basic average facts:
# * What is the average word length in a debate?
# * What is the average sentence length in a debate?
# * What is the average utterance length in a debate?
# * What is the variability of word, sentence, or utterance length depending on the segment of a debate?
# * What is the variability of word, sentence, or utterance length depending on the speaker type of an utterance?
# +
# import required modules and set up environment
import json
import os
import re
# replace file path below with your own local convokit
os.chdir('/Users/marianneaubin/Documents/Classes/CS6742/Cornell-Conversational-Analysis-Toolkit')
import convokit
# -
# open created IQ2 corpus
corpus = convokit.Corpus(filename='datasets/iq2_corpus/iq2_corpus')
# print basic info about the corpus
corpus.print_summary_stats()
# +
import re
# for each utterance, calculate how many words and sentences are in the utterance.
utter_ids = corpus.get_utterance_ids()
word_counts = []
sentence_counts = []
num_sentences = 0
for utt_id in utter_ids:
utt = corpus.get_utterance(utt_id)
#we simply use spaces to delineate words
words = utt.text.split()
word_count = len(words)
word_counts.append(word_count)
#we use regex to separate sentences
sentences = re.split(r'[.!?]+', utt.text)
sentences = list(filter(None, sentences))
for sentence in sentences:
words_in_sentence = sentence.split()
words_in_sentence_count = len(words_in_sentence)
sentence_counts.append(words_in_sentence_count)
num_sentences = num_sentences + 1;
# get average word count per utterance
word_len_sum = sum(word_counts)
utt_num = len(list(corpus.iter_utterances()))
avg_word_len = word_len_sum/utt_num
print("average number of words per utterance is " + str(round(avg_word_len,2)))
# get average word count per sentence
word_sent_len_sum = sum(sentence_counts)
avg_sentence_len = word_sent_len_sum/num_sentences
print("average number of words per sentence is " + str(round(avg_sentence_len, 2)))
# +
# function get_averages
# this function will count the averages of a number of words for a specific scenario
# inputs: list of utterances, instance of corpus
# outputs: average number of words per utterance, average number of words per sentence
def get_averages(utt_list, corp):
word_counts = []
sentence_counts = []
letter_counts = []
num_sentences = 0
for utt_id in utt_list:
utt = corp.get_utterance(utt_id)
#we simply use spaces to delineate words
words = utt.text.split()
word_count = len(words)
word_counts.append(word_count)
for word in words:
word_length = len(word)
letter_counts.append(word_length)
#we use regex to separate sentences
sentences = re.split(r'[.!?]+', utt.text)
sentences = list(filter(None, sentences))
for sentence in sentences:
words_in_sentence = sentence.split()
words_in_sentence_count = len(words_in_sentence)
sentence_counts.append(words_in_sentence_count)
num_sentences = num_sentences + 1;
# get average word count per utterance
word_sum = sum(word_counts)
utt_num = len(utt_list)
avg_word_len = word_sum/utt_num
print("average number of words per utterance is " + str(round(avg_word_len,2)))
# get average word count per sentence
word_sent_len_sum = sum(sentence_counts)
avg_sentence_len = word_sent_len_sum/num_sentences
print("average number of words per sentence is " + str(round(avg_sentence_len, 2)))
# get average letter count per word
word_len_sum = sum(letter_counts)
avg_word_len = word_len_sum/word_sum
print("average number of letters per word is " + str(round(avg_word_len,2)))
return avg_word_len, avg_sentence_len
# +
# function get_segment_utterances
# this function takes in a whole corpus and will return a list of utterances
# only for the segment specified
# inputs: corpus, desired segment
# outputs: list of utterances in a segment
def get_segment_utterances(corp, seg):
seg_utter_ids = []
utter_ids = corp.get_utterance_ids()
for utt_id in utter_ids:
segment = corp.get_utterance(utt_id).meta['segment']
if segment == seg:
seg_utter_ids.append(utt_id)
return seg_utter_ids;
# +
print("overall segment stats:")
avg_word_len, avg_sentence_len = get_averages((corpus.get_utterance_ids()), corpus)
print("\nintro segment stats:")
#get intro segment id
seg_utt_1 = get_segment_utterances(corpus, 0)
avg_word_len_1, avg_sentence_len_1 = get_averages(seg_utt_1, corpus)
print("\ndiscussion segment stats:")
#get discussion segment id
seg_utt_2 = get_segment_utterances(corpus, 1)
avg_word_len_2, avg_sentence_len_2 = get_averages(seg_utt_2, corpus)
print("\nconclusion segment stats:")
#get conclusion segment id
seg_utt_3 = get_segment_utterances(corpus, 2)
avg_word_len_3, avg_sentence_len_3 = get_averages(seg_utt_3, corpus)
# +
# function: get_stance
# function that will yield whether a given utterance was from a 'for' 'against' or neutral side
# inputs: utterance
# outputs: 0 for neutral, 1 for 'for', -1 for 'against'
def get_stance(utterance):
stance = utterance.user.meta['stance']
if stance == 'for':
return 1
elif stance == 'against':
return -1
else:
return 0
# +
# function: get_winner
# function that will yield whether a given utterance was from a winner or from a loser
# inputs: utterance
# outputs: 0 for tie, 1 for winner, -1 for loser, -2 for neutral
def get_winner(corp, utterance):
root = utterance.root
results = corp.conversations[root].meta['results']
#if delta1 > delta2, the winning side was against
#if delta2 > delta1, the winning side was for
delta1 = int(results['post']['against']) - int(results['pre']['against'])
delta2 = int(results['post']['for']) - int(results['pre']['for'])
#if the stance of speaker is against
#and against won, return 1
stance = get_stance(utterance)
#if stance is neutral, return -2
if stance == 0:
return -2
#if stance is against, determine if the against side won
if stance == -1:
if delta1 > delta2:
return 1
elif delta2 > delta1:
return -1
else:
return 0
else:
if delta1>delta2:
return -1
elif delta2>delta1:
return 1
else:
return 0
return None
# -
def get_utt_averages(utt_list):
word_counts = []
sentence_counts = []
num_sentences = 0
letter_counts = []
for utt in utt_list:
#we simply use spaces to delineate words
words = utt.text.split()
word_count = len(words)
word_counts.append(word_count)
for word in words:
word_length = len(word)
letter_counts.append(word_length)
#we use regex to separate sentences
sentences = re.split(r'[.!?]+', utt.text)
sentences = list(filter(None, sentences))
for sentence in sentences:
words_in_sentence = sentence.split()
words_in_sentence_count = len(words_in_sentence)
sentence_counts.append(words_in_sentence_count)
num_sentences = num_sentences + 1;
# get average word count per utterance
word_sum = sum(word_counts)
utt_num = len(utt_list)
avg_word_len = word_sum/utt_num
print("average number of words per utterance is " + str(round(avg_word_len,2)))
# get average word count per sentence
word_sent_len_sum = sum(sentence_counts)
avg_sentence_len = word_sent_len_sum/num_sentences
print("average number of words per sentence is " + str(round(avg_sentence_len, 2)))
# get average letter count per word
word_len_sum = sum(letter_counts)
avg_word_len = word_len_sum/word_sum
print("average number of letters per word is " + str(round(avg_word_len,2)))
return avg_word_len, avg_sentence_len
# +
# now we try and see if there is a correlation between winning utterances and losing utterances
# we also include neutral stances for completion
winning_utts = []
losing_utts = []
tied_utts = []
neutral_utts = []
#iterate through each utterance
for utt in corpus.utterances:
utterance = corpus.get_utterance(utt)
if (get_winner(corpus, utterance) == 1):
winning_utts.append(utterance)
elif get_winner(corpus, utterance) == 0:
tied_utts.append(utterance)
elif get_winner(corpus, utterance) == -1:
losing_utts.append(utterance)
else:
neutral_utts.append(utterance)
print(len(winning_utts))
print(len(losing_utts))
print(len(tied_utts))
print(len(neutral_utts))
print("winning stats:")
get_utt_averages(winning_utts)
print("\nlosing stats")
get_utt_averages(losing_utts)
print("\ntied stats")
get_utt_averages(tied_utts)
print("\nneutral stats")
get_utt_averages(neutral_utts)
# -
| datasets/iq2_corpus/explore_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Clustering using TFIDF
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import ward, dendrogram, single, complete
import pandas as pd
# ### Load Data and their titles for the use cases 3 & 4
# +
# with open('C:/Users/anast/Desktop/Thesis/MachineLearning/datasetTitles.txt') as t:
# titles = t.read().splitlines()
# # Use Case 3
# with open("C:/Users/anast/Desktop/Thesis/MachineLearning/Ontology/DatasetOntology/all.txt") as f:
# data = f.read().splitlines()
# # Use Case 4
# with open("C:/Users/anast/Desktop/Thesis/MachineLearning/Data/datasetProjects.txt") as f:
# data = f.read().splitlines()
# -
# ### Load Data and their titles for the use cases 7 & 8
# +
with open('C:/Users/anast/Desktop/Thesis/MachineLearning/Th-Ur-Titles.txt') as t:
titles = t.read().splitlines()
# # Use Case 7
# with open("C:/Users/anast/Desktop/Thesis/MachineLearning/Ontology/DatasetOntology/Th-Ur-all.txt") as f:
# data = f.read().splitlines()
# Use Case 8
with open("C:/Users/anast/Desktop/Thesis/MachineLearning/Data/Th-Ur-Projects.txt") as f:
data = f.read().splitlines()
# -
# ### Number of clusters
n_clusters = 10
# ### Preprocessing of data
# - Exclude the words of common functioanallity according to the use cases 3,4,7,8
# - Clean from numbers, punctuation and stop words
# - Lemmatize the words
# +
nlp = spacy.load('en_core_web_lg')
exclude = []
rules = pd.read_csv('C:/Users/anast/Desktop/Results/results-all1.csv')
rules = rules[(rules['Support']>0.2)][['Left Hand Side', 'Right Hand Side']]
exclude.extend(rules['Left Hand Side'].tolist())
exclude.extend(rules['Right Hand Side'].tolist())
exclude = list(dict.fromkeys(exclude))
exclude.extend(['datum', 'administrator', 'log', 'know', 'able', 'ability'])
# Clean the data from numbers, punctuation and stop words and lemmatize
all_docs = []
for line in data:
doc = nlp(line)
cleanData = []
for token in doc:
if not token.is_alpha:
continue
elif token.is_stop:
continue
elif token.pos_ == "PUNCT":
continue
elif token.text in exclude:
continue
elif token.lemma_ in exclude:
continue
else:
cleanData.append(token.lemma_)
all_docs.append(" ".join(cleanData))
# -
# ### Load the testing project (Stereo)
# Test Data
with open("C:/Users/anast/Desktop/testDataLDA.txt") as f:
testdata = f.read().splitlines()
# ### Apply the same preprocessing steps as the training data
# +
# Clean the data from numbers, punctuation and stop words
clean_corpus_test = []
for line in testdata:
doc = nlp(line)
cleanData = []
for token in doc:
if not token.is_alpha:
continue
elif token.is_stop:
continue
elif token.pos_ == "PUNCT":
continue
elif token.text in exclude:
continue
elif token.lemma_ in exclude:
continue
else:
cleanData.append(token.lemma_)
cleanData = " ".join(cleanData)
all_docs.append(cleanData)
titles.append('Stereo')
cleanData
# -
# ### Use the TF-IDF algorithm to vectorize the data
# Tf-Idf Vectorizer
vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.14, stop_words='english',
use_idf=True, norm=None)
tfidf_matrix = vectorizer.fit_transform(all_docs)
feature_names = vectorizer.get_feature_names()
dense = tfidf_matrix.todense()
denselist = dense.tolist()
print(feature_names)
# print(denselist)
print(tfidf_matrix.shape)
# Similarity
dist = 1 - cosine_similarity(tfidf_matrix)
# print(dist)
# ### Train a hierarchical clustering model
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = n_clusters, affinity = 'euclidean', linkage = 'ward')
y_hc = hc.fit_predict(dist)
# ### Organize the results in a data frame
titlesDF = pd.DataFrame(titles, columns = ['Project'])
clusterDF = pd.DataFrame(y_hc, columns = ['Cluster'])
results = pd.concat([titlesDF, clusterDF], axis =1)
# Find which projects belong to the cluster of the last one (testing project)
results[results.Cluster == results.iloc[-1, 1]]
# ### Cluster of the testing project
results[results.Cluster == results.iloc[-1, 1]]['Project']
| Text Clustering/clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import disk
from misfit import MisfitFunction, DataMisfit, RegMisfit
from optimization import BBiter, CGiter
from core import ForwardSolver, getA, getP
from scipy.sparse import linalg
from scipy.ndimage import gaussian_filter
from scipy.io import loadmat
def slowness_model(n):
# Model
vback = 2.0
vel = vback * np.ones(n)
rr, cc = disk((n[0] // 2, n[1] // 2), radius=20, shape=vel.shape)
vel[rr, cc] = 2.25
m = 1. / (vel.reshape(-1, 1)) ** 2
m0 = 1. / (vback * np.ones(n).reshape(-1, 1)) ** 2
return m, m0
# +
h = np.array([10., 10.])
f = np.array([5., 10., 15.])
xs = np.arange(h[0], 1.e3, 2 * h[0])
zs = 2 * h[0] * np.ones_like(xs)
xr = np.arange(h[0], 1.e3, 2 * h[0])
zr = h[0] * np.ones_like(xr)
n = np.array([101, 101])
z = np.arange(0, 1e3 + h[0], h[0])
x = np.arange(0, 1e3 + h[0], h[1])
q = np.eye(len(xs))
# -
model = {
'h': h,
'f': f,
'xr': xr,
'zr': zr,
'xs': xs,
'zs': zs,
'n': n,
'z': z,
'x': x,
'q': q,
}
# +
m, m0 = slowness_model(n)
Fm = ForwardSolver(model)
Dobs, Jo = Fm.solve(m)
dataMisfit = DataMisfit(Dobs, model)
regMisfit = RegMisfit(n, alpha=0.5, m0=m0)
misfitFn = MisfitFunction(dataMisfit, regMisfit)
# -
Dobs
history, mk, gk = BBiter(misfitFn, m0, tol=1.e-3, maxit=20)
# history, mk, gk = CGiter(misfitFn, m0, Dobs, Fm, tol=1.e-6, maxit=10)
plt.figure()
plt.semilogy(history[:, 0], history[:, 1] / history[0, 1], "b-", label='misfit')
plt.semilogy(history[:, 0], history[:, 2] / history[0, 2], "r-", label='norm(g)')
plt.xlabel('x')
plt.ylabel('y')
plt.title('History', fontsize=15, fontweight='bold')
plt.legend()
plt.grid('on', which='both')
plt.axis('tight')
plt.show()
# +
# plot model
plt.figure(figsize=(20, 8))
plt.subplot(121)
plt.imshow(m.reshape(n))
plt.plot(xr // h[0], zr // h[1], 'w^')
plt.plot(xs // h[0], zs // h[1], 'r*')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Velocity model', fontsize=15, fontweight='bold')
plt.colorbar()
plt.grid('on', which='both')
plt.axis('tight')
plt.subplot(122)
plt.imshow(m0.reshape(n))
plt.plot(xr // h[0], zr // h[1], 'w^')
plt.plot(xs // h[0], zs // h[1], 'r*')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Initial Velocity model', fontsize=15, fontweight='bold')
plt.colorbar()
plt.grid('on', which='both')
plt.axis('tight')
plt.show()
# +
# plot model
plt.figure(figsize=(20, 8))
plt.subplot(121)
plt.imshow(1./np.sqrt(mk).reshape(n))
plt.plot(xr // h[0], zr // h[1], 'w^')
plt.plot(xs // h[0], zs // h[1], 'r*')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Reconstructed Velocity model', fontsize=15, fontweight='bold')
plt.colorbar()
plt.grid('on', which='both')
plt.axis('tight')
plt.subplot(122)
plt.imshow(gk.reshape(n))
plt.plot(xr // h[0], zr // h[1], 'w^')
plt.plot(xs // h[0], zs // h[1], 'r*')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Gradient model', fontsize=15, fontweight='bold')
plt.colorbar()
plt.grid('on', which='both')
plt.axis('tight')
plt.show()
| Example_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('..')
import explain_utils
import explainer
# +
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GATConv
# -
# ### Load data
# dataset = 'Cora'
dataset = 'Citeseer'
dataset = Planetoid('data', dataset, transform=T.NormalizeFeatures())
data = dataset[0]
# ### Build Model
class Model(nn.Module):
def __init__(self, feature_num: int, hidden_size: int = 8, heads: int = 8, dropout: float = 0.5):
super().__init__()
self.dropout = dropout
self.conv1 = GATConv(feature_num, hidden_size, heads=heads, dropout=dropout)
self.conv2 = GATConv(hidden_size * heads, hidden_size, heads=heads, concat=True, dropout=dropout)
def forward(self, x, edge_index):
attentions = []
x = nn.Dropout(p=self.dropout)(x)
x, (attn_edge_index, attn_weight) = self.conv1(x, edge_index, return_attention_weights=True)
attentions.append((attn_edge_index.detach(), attn_weight.detach()))
x = nn.ELU()(x)
x = nn.Dropout(p=self.dropout)(x)
x, (attn_edge_index, attn_weight) = self.conv2(x, edge_index, return_attention_weights=True)
attentions.append((attn_edge_index.detach(), attn_weight.detach()))
return nn.LogSoftmax(dim=1)(x), attentions
model = Model(dataset.num_node_features, hidden_size=16, heads=4, dropout=0.3)
# ### Train
# +
@torch.no_grad()
def eval(model, data):
model.eval()
(logits, _), accs = model(data.x, data.edge_index), []
for _, mask in data('train_mask', 'val_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
def train(model, data, num_epochs, lr=0.005):
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-4)
for epoch in range(1, num_epochs + 1):
model.train()
optimizer.zero_grad()
logits, attentions = model(data.x, data.edge_index)
loss = nn.NLLLoss()(logits[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
if epoch % 10 == 0:
train_acc, val_acc, test_acc = eval(model, data)
print(f'Epoch: {epoch}, train loss: {loss.item():.4f}, train acc: {train_acc:4f}, Val: {val_acc:.4f}, Test: {test_acc:.4f}')
# -
model = Model(dataset.num_node_features, hidden_size=16, heads=4, dropout=0.5)
train(model, data, 400, lr=0.01)
# ### Get Attentions
model.eval()
with torch.no_grad():
logits, attentions = model(data.x, data.edge_index)
# +
print('======Layer 1======')
attn1 = attentions[0][1]
print(attn1.min(-1)[0].mean())
print((attn1.max(-1)[0] - attn1.min(-1)[0]).mean())
print(attn1.mean(-1).mean())
print(attn1.mean(-1).std())
print('======Layer 2======')
attn2 = attentions[1][1]
print(attn2.min(-1)[0].mean())
print((attn2.max(-1)[0] - attn2.min(-1)[0]).mean())
print(attn2.mean(-1).mean())
print(attn2.mean(-1).std())
# -
def get_attention_tensors(attentions):
ret = []
for edge_indices, attention_weights in attentions:
edge_indices = edge_indices.numpy().T
attention_weights = explain_utils.attention_weights_pooling(attention_weights.numpy(), 'mean')
ret.append((edge_indices, attention_weights))
return ret
# ### Top paths cumlative importance
topk = 32
top_path_scores = explain_utils.viterbi(get_attention_tensors(attentions), topk=topk)
cum_imp = []
for i, (node, path_scores) in enumerate(top_path_scores.items()):
#if i > 100:
# break
#print(sum(top_scores[node]))
cum = []
for path, score in path_scores.items():
if cum:
cum.append(cum[-1] + score)
else:
cum.append(score)
cum += [1] * (topk - len(cum))
cum_imp.append(cum)
# print(path, score)
#print()
cum_imp = np.array(cum_imp)
plt.plot(cum_imp.mean(0))
# ### Top paths (remove duplicated) cumlative importance
# +
cum_imp = []
cum_node_cnt = []
for i, (node, path_scores) in enumerate(top_path_scores.items()):
cum = []
cum2 = []
cum_nodes = set()
norm_scores = {}
for path, score in path_scores.items():
path = tuple(list(sorted(set(path))))
norm_scores[path] = norm_scores.get(path, 0) + score
norm_scores = list(sorted(list(norm_scores.items()), key=lambda _: _[1], reverse=True))
for path, score in norm_scores:
if cum:
cum.append(cum[-1] + score)
else:
cum.append(score)
cum_nodes |= set(path)
cum2.append(len(cum_nodes))
cum += [1] * (32 - len(cum))
cum2 += [cum2[-1]] * (32 - len(cum2))
cum_imp.append(cum)
cum_node_cnt.append(cum2)
# print(path, score)
#print()
cum_imp = np.array(cum_imp)
cum_node_cnt = np.array(cum_node_cnt)
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(cum_imp.mean(0))
plt.subplot(1,2,2)
plt.plot(cum_node_cnt.mean(0))
# -
# ### Explainer
ex = explainer.NodeClassificationExplainer()
ex.fit(get_attention_tensors(attentions), topk=16)
ex.explain(1)
| examples/node_classfication.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import jax.numpy as jnp
import matplotlib.pyplot as plt
import jax
from jax import lax
from deluca.envs import Pendulum
from deluca.agents import LQR
# + pycharm={"name": "#%%\n"}
def loop(context, x):
env, agent = context
control = agent(env.state)
_, reward, _, _ = env.step(control)
return (env, agent), reward
# -
# LQR
# specify dynamics matrices
A = jnp.array([[0,1],[-30,0]])
B = jnp.array([[0],[3]])
agent = LQR(A,B)
env = Pendulum()
# +
# for loop version
T = 50
xs = jnp.array(jnp.arange(T))
print(env.reset())
reward = 0
for i in range(T):
(env, agent), r = loop((env, agent), 0)
reward += r
reward_forloop = reward
print('reward_forloop = ' + str(reward_forloop))
'''
# scan version # TODO: need to address problem of LQR with jax.lax.scan
env = Pendulum()
print(env.reset())
_,reward_scan = lax.scan(loop, (env, agent), xs)
# correctness test
print('reward_scan sum = ' + str(jnp.sum(reward_scan)))'''
# -
| examples/agents/pendulum_lqr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
from fastai.dataloader import *
from fastai.dataset import *
from fastai.transforms import *
from fastai.models import *
from fastai.conv_learner import *
DIR = Path('data/imagenet/')
TRAIN_CSV='train.csv'
from pathlib import Path
# # Load ImageNet sample and format data
# !wget --header="Host: files.fast.ai" --header="User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36" --header="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8" --header="Accept-Language: en-GB,en-US;q=0.9,en;q=0.8" "http://files.fast.ai/data/imagenet-sample-train.tar.gz" -O "imagenet-sample-train.tar.gz" -c
# !mkdir data/imagenet
# !tar -xzf imagenet-sample-train.tar.gz -C data/imagenet
# ls data/imagenet/train/n01558993
TRAIN = Path('data/imagenet/train')
CATEGORIES = list(Path(TRAIN).iterdir())
cats_to_files = {cat : list(cat.iterdir()) for cat in CATEGORIES}
a= [1,2]
a.append([3])
files = []
for k,v in cats_to_files.items():
for file in v:
files.append(file)
# mkdir data/imagenet/train1
import shutil
for file in files:
shutil.move(str(file), 'data/imagenet/train1')
ALL_FILES = Path('data/imagenet/train1')
afiles = list(ALL_FILES.iterdir())
import pandas as pd
IMAGE='image'
CATEGORY='category'
df = pd.DataFrame(afiles, columns=[IMAGE])
df[IMAGE] = df[IMAGE].apply(lambda x: str(x)[str(x).rfind('/')+1:])
df['category'] = df[IMAGE].apply(lambda x: x[:x.find('_')])
df.to_csv(DIR/TRAIN_CSV, index=False)
# # Actual Model
# ls {DIR}
arch = resnet34
tfms = tfms_from_model(arch, 256, aug_tfms=transforms_side_on)
bs = 128
data = ImageClassifierData.from_csv(DIR, 'train1', DIR/TRAIN_CSV, tfms=tfms, bs=bs)
models = ConvnetBuilder(resnet34, data.c, data.is_multi, data.is_reg, pretrain_w8s=False)
learner = ConvLearner(data, models)
learner.unfreeze()
learner.lr_find()
learner.sched.plot()
learner.metrics = ['accuracy']
learner.fit(0.5,1)
learner.
| python_scripts/Imagenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import warnings
warnings.filterwarnings('ignore')
from sklearn.pipeline import Pipeline
# %run query_features.py
# %run scripts/helper.py
# %run scripts/model_train_plus_test.py
crowd_train = load_file('./data/train.csv/train.csv', None)
crowd_test = load_file('./data/test.csv/test.csv', None)
# +
# crowd_train = crowd_train[crowd_train.relevance_variance < 0.5]
# -
target = crowd_train.median_relevance.values
# +
# train_index, test_index = ssSplit(target, train_size=8000, random_state=44)
# train_index, test_index = ssSplit(target, train_size=500, random_state=44)
# +
Xt = crowd_train.iloc[train_index]
Xv = crowd_train.iloc[test_index]
# Xt = crowd_train
# Xv = crowd_test
# +
yt = target[train_index]
yv = target[test_index]
# yt = target
# -
correct_map = build_query_correction_map(Xt, crowd_test)
# +
def spell_correct_query(x):
if x not in correct_map:
return x
else:
return correct_map[x]
Xt['query'] = Xt['query'].map(spell_correct_query)
# -
Xv['query'] = Xv['query'].map(spell_correct_query)
Xt_tweaked = tweak_text(Xt)
Xv_tweaked = tweak_text(Xv)
Xfitted, tfv = TFIDF(Xt_tweaked, None)
# +
svd = TruncatedSVD(n_components=200, algorithm='randomized', n_iter=5, random_state=None, tol=0.0)
scl = StandardScaler(copy=True, with_mean=True, with_std=True)
clf = SVC(C=10.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True, probability=False,
tol=0.001, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None)
keywords = keyword_counter(Xt)
# -
features = stack([keywords, Xfitted])
pipeline = Pipeline([('svd', svd), ('scl', scl), ('clf', clf)])
pipeline.fit(features, yt)
keywords_test = keyword_counter(Xv)
Xtest = tfv.transform(Xv_tweaked)
features_test = stack([keywords_test, Xtest])
preds_new_model = pipeline.predict(features_test)
# +
# make_submission(crowd_test.id.values.astype(int), preds_new_model, 'spell_correct_rel.csv')
# -
print 'Kappa score on validation set ', (quadratic_weighted_kappa(yv, preds_new_model))
# ### Linear model
linear_model, select = build_linear_model(features, yt)
features_test_selected = select.transform(features_test)
linear_preds = linear_model.predict(features_test_selected)
# +
# print 'Kappa score on validation set ', (quadratic_weighted_kappa(yv, linear_preds))
# -
ensemble_lin_svm = (preds_new_model + linear_preds) / 2
# +
# print 'Kappa score on validation set ', (quadratic_weighted_kappa(yv, ensemble_lin_svm))
# -
make_submission(crowd_test.id.values.astype(int), ensemble_lin_svm, 'ensemble_lin_svm_title.csv')
# ## Best score
best_score_df = pd.read_csv('./submissions/spell_correct_final_only_title.csv')
best_score = best_score_df.prediction
ensemble = (preds_new_model + best_score) / 2
ensemble_int = [int(score) for score in ensemble]
make_submission(crowd_test.id.values.astype(int), ensemble_int, 'spell_correct_title_relevance.csv')
| Kaggle-Competitions/CrowdFlower/SpellCorrection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Convolutional Constained MOD
# ============================
#
# This example demonstrates the use of [ccmod.ConvCnstrMOD_Consensus](http://sporco.rtfd.org/en/latest/modules/sporco.admm.ccmod.html#sporco.admm.ccmod.ConvCnstrMOD_Consensus) for computing a convolutional dictionary update via the convolutional constrained method of optimal directions problem [[1]](http://sporco.rtfd.org/en/latest/zreferences.html#id44) [[26]](http://sporco.rtfd.org/en/latest/zreferences.html#id25). This problem is mainly useful as a component within convolutional dictionary learning, but its use is demonstrated here since a user may wish to construct such objects as part of a custom convolutional dictionary learning algorithm, using [dictlrn.DictLearn](http://sporco.rtfd.org/en/latest/modules/sporco.dictlrn.dictlrn.html#sporco.dictlrn.dictlrn.DictLearn).
# +
from __future__ import print_function
from builtins import input
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco.admm import cbpdn
from sporco.admm import ccmod
from sporco import util
from sporco import signal
from sporco import plot
plot.config_notebook_plotting()
# -
# Load training images.
exim = util.ExampleImages(scaled=True, zoom=0.25, gray=True)
S1 = exim.image('barbara.png', idxexp=np.s_[10:522, 100:612])
S2 = exim.image('kodim23.png', idxexp=np.s_[:, 60:572])
S3 = exim.image('monarch.png', idxexp=np.s_[:, 160:672])
S4 = exim.image('sail.png', idxexp=np.s_[:, 210:722])
S5 = exim.image('tulips.png', idxexp=np.s_[:, 30:542])
S = np.dstack((S1, S2, S3, S4, S5))
# Highpass filter training images.
npd = 16
fltlmbd = 5
sl, sh = signal.tikhonov_filter(S, fltlmbd, npd)
# Load initial dictionary.
D0 = util.convdicts()['G:12x12x36']
# Compute sparse representation on current dictionary.
lmbda = 0.1
opt = cbpdn.ConvBPDN.Options({'Verbose': True, 'MaxMainIter': 100,
'HighMemSolve': True})
c = cbpdn.ConvBPDN(D0, sh, lmbda, opt)
X = c.solve()
# Update dictionary for training image set.
opt = ccmod.ConvCnstrMOD_Consensus.Options({'Verbose': True,
'MaxMainIter': 100, 'rho': 1e1})
c = ccmod.ConvCnstrMOD_Consensus(X, sh, D0.shape, opt)
c.solve()
D1 = c.getdict().squeeze()
print("ConvCnstrMOD_Consensus solve time: %.2fs" % c.timer.elapsed('solve'))
# Display initial and final dictionaries.
fig = plot.figure(figsize=(14, 7))
plot.subplot(1, 2, 1)
plot.imview(util.tiledict(D0), title='D0', fig=fig)
plot.subplot(1, 2, 2)
plot.imview(util.tiledict(D1), title='D1', fig=fig)
fig.show()
# Get iterations statistics from CCMOD solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number.
its = c.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(its.DFid, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(1, 3, 3)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
| cdl/ccmod_cns_gry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tensorflow_gpuenv
# language: python
# name: tensorflow_gpuenv
# ---
# +
# TRAINING
# split into train and test set
from os import listdir
from xml.etree import ElementTree
from numpy import zeros
from numpy import asarray
import os
from Mask_RCNN.mrcnn.utils import Dataset
from Mask_RCNN.mrcnn.utils import extract_bboxes
from Mask_RCNN.mrcnn.visualize import display_instances
from numpy import expand_dims
from numpy import mean
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from mrcnn.utils import Dataset
from mrcnn.utils import compute_ap
from mrcnn.model import load_image_gt
from mrcnn.model import mold_image
from matplotlib import pyplot
from our_dataset import OurDataset
ann_file_path = '/home/test/data/nightowls/nightowls_validation_small.json'
images_path = '/home/test/data/nightowls/validation/nightowls_validation'
# train set
train_set = OurDataset()
train_set.load_dataset(images_path, ann_file_path, is_train=True)
train_set.prepare()
print('Train: %d' % len(train_set.image_ids))
# val set
val_set = OurDataset()
val_set.load_dataset(images_path, ann_file_path, is_train=False)
val_set.prepare()
print('Validation: %d' % len(val_set.image_ids))
'''
k = 0
# plot first few images
for i in range(18,27):
# define subplot
pyplot.subplot(3,3,1+k)
k = k+1
# plot raw pixel data
image = train_set.load_image(i)
pyplot.imshow(image)
# plot all masks
mask, _ = train_set.load_mask(i)
for j in range(mask.shape[2]):
pyplot.imshow(mask[:, :, j], cmap='gray', alpha=0.3)
# show the figure
pyplot.show()
#useful debug tool:
# enumerate all images in the dataset
i = 0
for image_id in train_set.image_ids:
if i > 9:
continue
# load image info
info = train_set.image_info[image_id]
# display on the console
print(info)
i = i+1
'''
'''
# print all the images with the masks in the dataset
for i in range(len(train_set.image_ids)):
# define image id
image_id = i
# load the image
image = train_set.load_image(image_id)
# load the masks and the class ids
mask, class_ids = train_set.load_mask(image_id)
# extract bounding boxes from the masks
bbox = extract_bboxes(mask)
# display image with masks and bounding boxes
display_instances(image, bbox, mask, class_ids, train_set.class_names)
'''
from Mask_RCNN.mrcnn.config import Config
from Mask_RCNN.mrcnn.model import MaskRCNN
# define a configuration for the model
class TrainConfig(Config):
# Give the configuration a recognizable name
NAME = "kangaroo_cfg"
# Number of classes (background + kangaroo)
NUM_CLASSES = 3 +1
# Number of training steps per epoch
STEPS_PER_EPOCH = 150
# prepare config
config = TrainConfig()
# define the model
model = MaskRCNN(mode='training', model_dir='/home/test/data/trained_models/', config=config)
# load weights (mscoco)
model.load_weights('/home/test/data/mask_rcnn_coco.h5', by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"])
# train weights (output layers or 'heads')
model.train(train_set, val_set, learning_rate=config.LEARNING_RATE, epochs=1, layers='heads')
# +
# Plot actual images and predicted images
from os import listdir
from xml.etree import ElementTree
from numpy import zeros
from numpy import asarray
import os
from Mask_RCNN.mrcnn.utils import Dataset
from Mask_RCNN.mrcnn.utils import extract_bboxes
from Mask_RCNN.mrcnn.visualize import display_instances
from numpy import expand_dims
from numpy import mean
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from mrcnn.utils import Dataset
from mrcnn.utils import compute_ap
from mrcnn.model import load_image_gt
from mrcnn.model import mold_image
from matplotlib import pyplot
# %matplotlib inline
from our_dataset import OurDataset
# define the prediction configuration
class PredictionConfig(Config):
# define the name of the configuration
NAME = "kangaroo_cfg"
# number of classes (background + kangaroo)
NUM_CLASSES = 3 +1
# simplify GPU config
GPU_COUNT = 1
IMAGES_PER_GPU = 1
ann_file_path = '/home/test/data/nightowls/nightowls_validation_small.json'
images_path = '/home/test/data/nightowls/validation/nightowls_validation'
# train set
train_set = OurDataset()
train_set.load_dataset(images_path, ann_file_path, is_train=True)
train_set.prepare()
print('Train: %d' % len(train_set.image_ids))
# val set
val_set = OurDataset()
val_set.load_dataset(images_path, ann_file_path, is_train=False)
val_set.prepare()
print('Validation: %d' % len(val_set.image_ids))
# create config
cfg = PredictionConfig()
# define the model
model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
# load model weights
model.load_weights('./trained_model/kangaroo_cfg20191028T1042/mask_rcnn_kangaroo_cfg_0001.h5', by_name=True)
from matplotlib.patches import Rectangle
# plot a number of photos with ground truth and predictions
def plot_actual_vs_predicted(dataset, model, cfg, n_images=1):
# load image and mask
for i in range(n_images):
# load the image and mask
image = dataset.load_image(i)
mask, _ = dataset.load_mask(i)
# convert pixel values (e.g. center)
scaled_image = mold_image(image, cfg)
# convert image into one sample
sample = expand_dims(scaled_image, 0)
# make prediction
yhat = model.detect(sample, verbose=0)[0]
# define subplot
fig = pyplot.figure(figsize=(150, 150))
pyplot.subplot(n_images, 2, i*2+1)
# plot raw pixel data
pyplot.imshow(image)
pyplot.title('Actual')
# plot masks
for j in range(mask.shape[2]):
pyplot.imshow(mask[:, :, j], cmap='gray', alpha=0.3)
# get the context for drawing boxes
pyplot.subplot(n_images, 2, i*2+2)
# plot raw pixel data
pyplot.imshow(image)
pyplot.title('Predicted')
ax = pyplot.gca()
# plot each box
for box in yhat['rois']:
# get coordinates
y1, x1, y2, x2 = box
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='red')
# draw the box
ax.add_patch(rect)
# show the figure
pyplot.show()
# plot predictions for train dataset
plot_actual_vs_predicted(train_set, model, cfg)
# plot predictions for test dataset
plot_actual_vs_predicted(val_set, model, cfg)
'''
# calculate the mAP for a model on a given dataset
def evaluate_model(dataset, model, cfg):
APs = list()
for image_id in dataset.image_ids:
# load image, bounding boxes and masks for the image id
image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(dataset, cfg, image_id, use_mini_mask=False)
# convert pixel values (e.g. center)
scaled_image = mold_image(image, cfg)
# convert image into one sample
sample = expand_dims(scaled_image, 0)
# make prediction
yhat = model.detect(sample, verbose=0)
# extract results for first sample
r = yhat[0]
# calculate statistics, including AP
AP, _, _, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks'])
# store
APs.append(AP)
# calculate the mean AP across all images
mAP = mean(APs)
return mAP
# evaluate model on training dataset
train_mAP = evaluate_model(train_set, model, cfg)
print("Train mAP: %.3f" % train_mAP)
# evaluate model on val dataset
test_mAP = evaluate_model(val_set, model, cfg)
print("Test mAP: %.3f" % test_mAP)
'''
# +
# create the file for the output
# that file is a json containint a list of objects like this
# {"image_id":7000000,"category_id":1,
# "bbox":[645.55109817521941,229.397521973,21.598609313961,52.6795349121],"score":0.0935320705175}
from os import listdir
from xml.etree import ElementTree
from numpy import zeros
from numpy import asarray
import os
from Mask_RCNN.mrcnn.utils import Dataset
from Mask_RCNN.mrcnn.utils import extract_bboxes
from Mask_RCNN.mrcnn.visualize import display_instances
from numpy import expand_dims
from numpy import mean
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from mrcnn.utils import Dataset
from mrcnn.utils import compute_ap
from mrcnn.model import load_image_gt
from mrcnn.model import mold_image
import json
from matplotlib import pyplot
# %matplotlib inline
from our_dataset import OurDataset
def fromOutputToAnn(image_id,out):
recognized_objects = []
for i in range(len(out['class_ids'])):
class_id = out['class_ids'][i]
bbox = out['rois'][i]
score = out['scores'][i]
# from [xmin, ymin, xmax, ymax] to [xmin, ymin, width, height]
bbox[2] = bbox[2]-bbox[0]
bbox[3] = bbox[3]-bbox[1]
bbox[0] = float(bbox[0])
bbox[1] = float(bbox[1])
bbox[2] = float(bbox[2])
bbox[3] = float(bbox[3])
bbox = bbox.tolist()
entry = {
"category_id": int(class_id),
"bbox" : bbox,
"score": float(score),
"image_id" : int(image_id)
}
recognized_objects.append(entry)
return recognized_objects
def generateAnnotations(dataset,model):
i = 0
all_outputs = []
for image_id in dataset.image_ids:
# load image info
info = dataset.image_info[image_id]
image = dataset.load_image(i)
mask, _ = dataset.load_mask(i)
scaled_image = mold_image(image, cfg)
# convert image into one sample
sample = expand_dims(scaled_image, 0)
# make prediction
yhat = model.detect(sample, verbose=0)[0]
out = fromOutputToAnn(info['real_id'],yhat)
all_outputs.extend(out)
i = i+1
return all_outputs
# define the prediction configuration
class PredictionConfig(Config):
# define the name of the configuration
NAME = "kangaroo_cfg"
# number of classes (background + kangaroo)
NUM_CLASSES = 3 +1
# simplify GPU config
GPU_COUNT = 1
IMAGES_PER_GPU = 1
ann_file_path = '/home/test/data/nightowls/nightowls_validation_small.json'
images_path = '/home/test/data/nightowls/validation/nightowls_validation'
# val set
val_set = OurDataset()
val_set.load_dataset(images_path, ann_file_path, is_train=False)
val_set.prepare()
print('Validation: %d' % len(val_set.image_ids))
i = 0
# create config
cfg = PredictionConfig()
# define the model
model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
# load model weights
model.load_weights('/home/test/data/trained_models/kangaroo_cfg20191030T0853/mask_rcnn_kangaroo_cfg_0001.h5', by_name=True)
json_output = generateAnnotations(val_set,model)
#print(json_output)
with open('out.json', 'w') as outfile:
json.dump(json_output, outfile)
print('DONE')
# +
'''
Function that takes in input the ground truth file of the annotation
and the output of a network in the json format and outputs the
miss rates in the output file
'''
def evaluation(annFile,resFile,outFile = "results.txt"):
from coco import COCO # IMPORT THEIR COCO, not pycocotools
from eval_MR_multisetup import COCOeval
# running evaluation
res_file = open("results.txt", "w")
for id_setup in range(0,4):
cocoGt = COCO(annFile)
cocoDt = cocoGt.loadRes(resFile)
imgIds = sorted(cocoGt.getImgIds())
cocoEval = COCOeval(cocoGt,cocoDt,'bbox')
cocoEval.params.imgIds = imgIds
cocoEval.evaluate(id_setup)
cocoEval.accumulate()
cocoEval.summarize(id_setup,res_file)
res_file.close()
# Ground truth
annFile = '/home/test/data/nightowls/nightowls_validation_small.json'
#annFile = '/home/test/data/nightowls/nightowls_validation.json'
# Detections
resFile = './out.json'
#resFile = '../sample-Faster-RCNN-nightowls_validation.json'
evaluation(annFile,resFile)
# -
| Code/Maskrcnn-keras/base_implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import time
from sklearn.model_selection import KFold
from keras import backend as K
### Custom Py Script from src folder
from src import Pipeline, Toxic_Models, Model_trainer
# -
# # Load Pretrained Embedding Model
# emb_model = Pipeline.load_emb_model('./emb_model/crawl-300d-2M.vec') # FastText Embeddings
emb_model = Pipeline.load_emb_model('./emb_model/glove.840B.300d.txt') # Glove Embeddings
# # Hyper parameter
# +
### classes names
list_classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
### preprocessing parameter
maxlen = 180
max_features = 100000
embed_size = 300
### model parameter
cell_size = 64 ### Cell unit size
cell_type_GRU = True ### Cell Type: GRU/LSTM
filter_size = 64
kernel_size = 2
stride = 1
### K-fold cross-validation
k= 5
kf = KFold(n_splits=k, shuffle=False)
### training protocol
epochs= 13
batch_size = 128
lr_s = True ### Use of Learning Schedule
# -
# # Load data
submission = pd.read_csv("./input/sample_submission.csv")
X_tr, Y_tr, X_te, emb_matrix = Pipeline.load_data_2path(emb_model, max_features = max_features, maxlen = maxlen)
# # Model
# +
model_name = 'rnn'
### ================================================================== ###
oofs = []
res = np.zeros_like(submission[list_classes])
for train_index, val_index in kf.split(X_tr[0], Y_tr):
mdl = Toxic_Models.get_model_rnn(emb_matrix, cell_size=cell_size, maxlen=maxlen, cell_type_GRU=cell_type_GRU)
pred, oof = Model_trainer.model_train_cv(mdl, X_tra = [X_tr[0][train_index], X_tr[1][train_index]], X_val = [X_tr[0][val_index], X_tr[1][val_index]],
y_tra= Y_tr[train_index], y_val= Y_tr[val_index], x_test=X_te,
model_name=model_name, batch_size=batch_size, epochs=epochs, lr_schedule=lr_s)
res += pred
oofs.append(oof)
K.clear_session()
time.sleep(20)
res = res/k
### Collect result & Report
submission[list_classes] = res
submission.to_csv("submission_{}.csv".format(model_name), index = False)
np_oofs = np.array(oofs)
pd_oofs = pd.DataFrame(np.concatenate(np_oofs), columns=list_classes)
pd_oofs.to_csv("oofs_{}.csv".format(model_name), index=False)
# +
model_name = 'rnncnn'
### ================================================================== ###
oofs = []
res = np.zeros_like(submission[list_classes])
for train_index, val_index in kf.split(X_tr[0], Y_tr):
mdl = Toxic_Models.get_model_rnn_cnn(emb_matrix, cell_size=cell_size, maxlen=maxlen, cell_type_GRU=cell_type_GRU,
filter_size=filter_size, kernel_size=kernel_size, stride=stride)
pred, oof = Model_trainer.model_train_cv(mdl, X_tra = [X_tr[0][train_index], X_tr[1][train_index]], X_val = [X_tr[0][val_index], X_tr[1][val_index]],
y_tra= Y_tr[train_index], y_val= Y_tr[val_index], x_test=X_te,
model_name=model_name, batch_size=batch_size, epochs=epochs, lr_schedule=lr_s)
res += pred
oofs.append(oof)
K.clear_session()
time.sleep(20)
res = res/k
### Collect result & Report
submission[list_classes] = res
submission.to_csv("submission_{}.csv".format(model_name), index = False)
np_oofs = np.array(oofs)
pd_oofs = pd.DataFrame(np.concatenate(np_oofs), columns=list_classes)
pd_oofs.to_csv("oofs_{}.csv".format(model_name), index=False)
# +
model_name = 'rnn_caps'
### ================================================================== ###
oofs = []
res = np.zeros_like(submission[list_classes])
for train_index, val_index in kf.split(X_tr[0], Y_tr):
mdl = Toxic_Models.get_model_rnn_caps(emb_matrix, cell_size=cell_size, maxlen=maxlen, cell_type_GRU=cell_type_GRU)
pred, oof = Model_trainer.model_train_cv(mdl, X_tra = [X_tr[0][train_index], X_tr[1][train_index]], X_val = [X_tr[0][val_index], X_tr[1][val_index]],
y_tra= Y_tr[train_index], y_val= Y_tr[val_index], x_test=X_te,
model_name=model_name, batch_size=batch_size, epochs=epochs, lr_schedule=lr_s)
res += pred
oofs.append(oof)
K.clear_session()
time.sleep(20)
res = res/k
### Collect result & Report
submission[list_classes] = res
submission.to_csv("submission_{}.csv".format(model_name), index = False)
np_oofs = np.array(oofs)
pd_oofs = pd.DataFrame(np.concatenate(np_oofs), columns=list_classes)
pd_oofs.to_csv("oofs_{}.csv".format(model_name), index=False)
# +
model_name = '2rnn'
### ================================================================== ###
oofs = []
res = np.zeros_like(submission[list_classes])
for train_index, val_index in kf.split(X_tr[0], Y_tr):
mdl = Toxic_Models.get_model_2rnn(emb_matrix, cell_size=cell_size, maxlen=maxlen, cell_type_GRU=cell_type_GRU)
pred, oof = Model_trainer.model_train_cv(mdl, X_tra = [X_tr[0][train_index], X_tr[1][train_index]], X_val = [X_tr[0][val_index], X_tr[1][val_index]],
y_tra= Y_tr[train_index], y_val= Y_tr[val_index], x_test=X_te,
model_name=model_name, batch_size=batch_size, epochs=epochs, lr_schedule=lr_s)
res += pred
oofs.append(oof)
K.clear_session()
time.sleep(20)
res = res/k
### Collect result & Report
submission[list_classes] = res
submission.to_csv("submission_{}.csv".format(model_name), index = False)
np_oofs = np.array(oofs)
pd_oofs = pd.DataFrame(np.concatenate(np_oofs), columns=list_classes)
pd_oofs.to_csv("oofs_{}.csv".format(model_name), index=False)
# +
model_name = '2rnncnn'
### ================================================================== ###
oofs = []
res = np.zeros_like(submission[list_classes])
for train_index, val_index in kf.split(X_tr[0], Y_tr):
mdl = Toxic_Models.get_model_2rnn_cnn(emb_matrix, cell_size=cell_size, maxlen=maxlen, cell_type_GRU=cell_type_GRU,
filter_size=filter_size, kernel_size=kernel_size, stride=stride)
pred, oof = Model_trainer.model_train_cv(mdl, X_tra = [X_tr[0][train_index], X_tr[1][train_index]], X_val = [X_tr[0][val_index], X_tr[1][val_index]],
y_tra= Y_tr[train_index], y_val= Y_tr[val_index], x_test=X_te,
model_name=model_name, batch_size=batch_size, epochs=epochs, lr_schedule=lr_s)
res += pred
oofs.append(oof)
K.clear_session()
time.sleep(20)
res = res/k
### Collect result & Report
submission[list_classes] = res
submission.to_csv("submission_{}.csv".format(model_name), index = False)
np_oofs = np.array(oofs)
pd_oofs = pd.DataFrame(np.concatenate(np_oofs), columns=list_classes)
pd_oofs.to_csv("oofs_{}.csv".format(model_name), index=False)
# -
# # END
| [Kaggle] Toxic_Comment_Classification_Challenge/Main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 64-bit
# name: python_defaultSpec_1600614429847
# ---
# ### Задание 1
# +
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn
boston = load_boston()
X = pd.DataFrame(data = boston['data'], columns = boston['feature_names'])
y = boston['target']
# -
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42)
# +
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), columns=X_test.columns)
# +
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, learning_rate=250, random_state=42)
X_train_tsne = tsne.fit_transform(X_train_scaled)
plt.style.use('fivethirtyeight')
# %config InlineBackend.figure_format = 'svg'
# %matplotlib inline
plt.scatter(X_train_tsne[:, 0], X_train_tsne[:, 1])
plt.show()
# -
# ### Задание 2
# +
from sklearn.cluster import KMeans
model = KMeans(n_clusters=3, random_state=100)
labels_train = model.fit_predict(X_train_tsne)
plt.scatter(X_train_tsne[:, 0], X_train_tsne[:, 1], c=labels_train)
plt.show()
# + tags=[]
print('Кластер 0 средняя цена: {}'.format(y_train[labels_train == 0].mean()))
print('Кластер 1 средняя цена: {}'.format(y_train[labels_train == 1].mean()))
print('Кластер 2 средняя цена: {}'.format(y_train[labels_train == 2].mean()))
print('Кластер 0 средий CRIM: {}'.format(X_train.loc[labels_train == 0, 'CRIM'].mean()))
print('Кластер 1 средий CRIM: {}'.format(X_train.loc[labels_train == 1, 'CRIM'].mean()))
print('Кластер 2 средий CRIM: {}'.format(X_train.loc[labels_train == 2, 'CRIM'].mean()))
# -
# ### Задание 3
# + tags=[]
X_test_tsne = tsne.fit_transform(X_train_scaled)
labels_test = model.fit_predict(X_test_tsne)
print('Кластер 0 средняя цена: {}'.format(y_test[labels_test == 0].mean()))
print('Кластер 1 средняя цена: {}'.format(y_test[labels_test == 1].mean()))
print('Кластер 2 средняя цена: {}'.format(y_test[labels_test == 2].mean()))
print('Кластер 0 средий CRIM: {}'.format(X_test.loc[labels_test == 0, 'CRIM'].mean()))
print('Кластер 1 средий CRIM: {}'.format(X_test.loc[labels_test == 1, 'CRIM'].mean()))
print('Кластер 2 средий CRIM: {}'.format(X_test.loc[labels_test == 2, 'CRIM'].mean()))
| python_data_science/ex8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# ###### Created by [<NAME>](https://github.com/romogo17)
#
# `pgcv` is a PostgreSQL extension for Computer Vision from the database server. It aims to be an extension that implements several image manipulation algorithms to store and manipulate images from the database server.
#
# ## Background
#
# As part of the research process in extending PostgreSQL, a first prototype of this extension was made using C hoping to study how the native extensions with base types worked. Here are some things I learnt from this prototype:
#
# 1. Base extensions only make sense, when the datatype itself and its representation have a semantic meaning without needing additional functions.
#
# One example of this is the datatype that the PostgreSQL documentation uses as example of a base type: `text`.
# ```c
# typedef struct {
# int32 length;
# char data[FLEXIBLE_ARRAY_MEMBER];
# } text;
# ```
# The `text` datatype has a meaning of its own. We don't need to ask for any attribute given a `text` type, its meaning is axiomatic.
#
# Defining complex structures as base types would require to create several functions to access its atributes, since PostgreSQL treats this structures as chunks of memory without any consideration of its internal structures.
#
# 2. The process of using, dynamic arrays, returning arrays, returning sets... requires a lot of boilerplate and careful memory management.
#
# Using dynamic arrays of structured base types from C isn't an easy task. It requires a lot of considerations only to use the types: TOAST, `FLEXIBLE_ARRAY_MEMBER`, `varlena` structures (header, data, etc.), deTOAST, storage, copying, array constructing and deconstructing, `Oid`s, memory corruption, testing.
# All this considerations have to be taken into account when implementing the data manipulation algorithms required by the extension which made the development process a lot slower.
# With this in mind, when starting to design the real extension I decided not to make it a 100% native extension and instead implement most of it using PL/pgSQL.
#
# The approach I was going to use for this purpose was to create a "Tensor-like" datatype in PL/pgSQL that would describe an N dimensional array to represent the images.
#
# However, I thought this would require me to create a set of functions for N dimensional array manipulation and then create the specific algorithms required by the project's domain so I started to look for options and, after having read about a hundred pages of the PostgreSQL documentation about extending SQL, I arrived to the PL/Python section, which immediately caught my attention because this means I could use the wonderful N dimensional arrays provided by `numpy.ndarray`
#
# This made manipulating the images, structures and arrays a lot easier and faster. Also, I'm quite interested in Deep Learning so I've been wanting to use Python more for a while
# +
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from PIL import Image
img = Image.open('data/mdb147lx.png')
img = np.array(img)
med = signal.medfilt(img, 3)
plt.imshow(med, cmap = plt.get_cmap('gray'))
print("image shape: ", list(img.shape), " image data: ", np.ravel(img))
# -
# ## Packages used
#
# This extension uses `numpy`, `scipy` and `scikit-image`. From the documentation:
#
# - NumPy’s array type augments the Python language with an efficient data structure useful for numerical work, e.g., manipulating matrices. NumPy also provides basic numerical routines, such as tools for finding eigenvectors.
# - SciPy contains additional routines needed in scientific work: for example, routines for computing integrals numerically, solving differential equations, optimization, and sparse matrices.
#
# Aditionally, `Pillow` is used in order to support multiple file formats for reading images into `ndarray`s. This is a list of the supported file formats of Pillow
#
# - Fully supported formats
# 1. BMP
# 1. EPS
# 1. GIF
# 1. ICNS
# 1. ICO
# 1. IM
# 1. JPEG and JPEG 2000
# 1. MSP
# 1. PCX
# 1. PNG
# 1. PPM (which includes PGM files)
# 1. SGI
# 1. SPIDER
# 1. TGA
# 1. TIFF
# 1. WebP
# 1. XBM
# - Read-only formats
# 1. BLP
# 1. CUR
# 1. DCX
# 1. DDS
# 1. FLI, FLC
# 1. FPX
# 1. FTEX
# 1. GBR
# 1. GD
# 1. IMT
# 1. IPTC / NAA
# 1. MCIDAS
# 1. MIC
# 1. MPO
# 1. PCD
# 1. PIXAR
# 1. PSD
# 1. WAL
# 1. XPM
# - Write-only formats
# 1. BUFR
# 1. FITS
# 1. GRIB
# 1. HDF5
# 1. MPEG
# 1. WMF
#
| notebooks/01-Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# 说明:
# 给定一棵二叉树,在树的最后一行中找到最左边的值。
# 注意:
# 您可以假设树(即给定的根节点)不是NULL。
# + active=""
# 思路:
# 都是一样的套路,求每一行树节点的val
# 最后返回最后一层level的第一个值即可。
# -
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def findBottomLeftValue(self, root: TreeNode) -> int:
vals = []
nodes = [root]
while nodes:
temp_nodes = []
temp_vals = []
for n in nodes:
if n:
temp_vals.append(n.val)
temp_nodes.append(n.left)
temp_nodes.append(n.right)
nodes = temp_nodes
if temp_vals:
vals.append(temp_vals)
return vals[-1][0]
| Tree/0831/513. Find Bottom Left Tree Value.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h2 style="text-align: center;">Basics of Data Science Using Python</h2>
#
# In this session, we will learn the basics of data science using Python. In the previous sessions, we already discussed about the basics of data science and the applications of data science. So, we will move forward to today's session where we will be covering many python modules and their applications.
#
# The modules covered in this session are,
# <ol>
# <li>Numpy</li>
# <li>Pandas</li>
# <li>Matplotlib</li>
# </ol>
# <h3 style="text-align: center;"> 1. Learning NumPy </h3>
#
# #### What is NumPy?
# <p> NumPy is a python library used for working with arrays. It also has functions for working in domain of linear algebra, fourier transform, and matrices. NumPy stands for Numerical Python.</p>
#
# #### What are we going to learn?
# <p>We are going to learn the following </p>
# <ol>
# <li>Numpy Arrays</li>
# <li>Aggregation Functions</li>
# <li>Comparison, Mask and Boolean Logic</li>
# <li>Fancy Indexing</li>
# <li>Sorting Arrays</li>
# <li>Structured Data</li>
# </ol>
# ### Import NumPy
import numpy as np
# ### 1. NumPy Arrays
# +
np.random.seed(42)
arrLen_1d = 6
x1 = np.random.randint(5, size=arrLen_1d) # Creating 1D array
arrShape_2d = (3, 4)
x2 = np.random.randint(8, size=arrShape_2d) # creating 2D array
arrShape_3d = (4, 3, 2)
x3 = np.random.randint(low=0, high=30, size=arrShape_3d) # creating 3D array
# -
print('x1:\n', x1.tolist()) # printing array x1 as list
print('x2:\n', x2.tolist()) # printing array x2 as list
print('x3:\n', x3.tolist()) # printing array x3 as list
def print_array_property(arr):
print('Number of Dimension:', arr.ndim) # number of dimensions
print('Array Shape:', arr.shape) # size of each dimension
print('Array Size:', arr.size) # Total size of the array
print('Array Datatype:', arr.dtype) # Data type of the given array
print('Array Item Size:', arr.itemsize, 'bytes') # size of each array element in bytes
print('Array total Size:', arr.nbytes, 'bytes') # total size of array in bytes
print_array_property(x2)
# ### Array Indexing
# Array indexing in Numpy is almost similar as list indexing using python. If you are familiar with lists in python, this section won't take much of your time.
print('Array x1 from index 2 to 4:', x1[2:4])
print('Element of array x3 in index (2,1):', x3[2][1])
print('Last element of array x2:', x2[-1])
print('First 3 elements of array x1:', x1[:3])
print('Last 3 elements of array x1:', x1[-3:])
# ### 2. Aggregation Functions
# <p>To get the statistical summary of a data, the aggregation functions like mean, median, mode plays an important role. Some of these operations are demonstrated below.</p>
# +
arr = np.random.randint(low=1, high=10, size=(10))
print('Created array is:', arr.tolist())
print('Sum of all array elements in arr:', np.sum(arr))
print('Product of all array elements in arr:', np.prod(arr))
print('Mean of all array elements in arr:', np.mean(arr))
print('Median of all array elements in arr:', np.median(arr))
print('Average of all array elements in arr:', np.average(arr))
print('Maximum value of all array elements in arr:', np.max(arr))
print('Index of maximum value in arr:', np.argmax(arr))
print('Minimum value of all array elements in arr:', np.min(arr))
print('Index of minimum value in arr:', np.argmin(arr))
print('Standard Deviation of all array elements in arr:', np.std(arr))
print('Variance of all array elements in arr:', np.var(arr))
print('If any of the array elements are true in arr:', np.any(arr))
print('If all of the array elements are true in arr:', np.all(arr))
# -
# ### 3. Comparison, Mask and Boolean Logic
# +
x = np.array([7, 2, 9, 8, 5, 10, 1, 3, 4, 6]) # Creating a numpy array
print('[MASKS] Print False if the condition is not satisfied. Else print true\n')
print('Elements in x that are less than 4 OR greater than 8', ((x<4) | (x>8))) # OR OPERATOR
print('Elements in x that are less than 4 AND greater than 8', ((x<4) & (x>8))) # AND OPERATOR
print('Number of elements in x that are not equal to 4', np.sum(x != 4 )) # NOT OPERATOR
print('XOR Operation', (x ^ 7)) # XOR OPERATOR
print('\n')
print('The elements in x are less than 4:', x<4)
print('The elements in x are less than equal to 2:', x<=2)
print('The elements in x are greater than 7:', x>7)
print('The elements in x are greater equal to 5:', x>=5)
print('The elements in x are equal to 5:', x==5)
print('For the elements in x, 2x = x^2', 2*x==x**2)
print('\n')
print('Every elements in x that are less than 5:', (x<5).all())
print('Any elements in x that is less than 5:', (x<5).any())
# -
# DEFINING ARRAYS TO PERFORM BOOLEAN OPERATIONS
A = np.array([1, 0, 1, 1, 0])
B = np.array([0, 1, 1, 0, 1])
print('A OR B', (A|B))
print('A OR B', (A&B))
print('A XOR B', (A^B))
# ### 4. Fancy Indexing
# As of now, we saw how to access portions of arrays using simple indices. Now, we'll look at another style of array indexing, known as fancy indexing. Fancy indexing is like the simple indexing we've already seen, but we pass arrays of indices in place of single scalars. This allows us to very quickly access and modify complicated subsets of an array’s values.
print('Fancy Indexed Array from array X1:',[x1[1], x1[4], x1[0]])
print('Fancy Indexed Array from array X2:',[x2[0].tolist(), x2[2].tolist()])
print('Fancy Indexed Array from array X3:',[x3[1][0][1], x3[2][1][0], x3[0][0][1] ])
# ### 5. Sorting Arrays
# +
# WE CAN SORT AN ARRAY IN TWO WAYS..
# sort method - 1
a = np.array([1, 2, 9, 6, 2, 4, 1, 0, 1])
a = np.sort(a)
print(a)
#sort method - 2
b = np.array([9, 2, 8, 6, 20, 4, 11, 2, 6])
b.sort()
print(b)
# +
# NUMPY ALSO ALLOWS US TO SORT A MULTIDIMENSIONAL MATRIX ALONG IT'S AXIS (ROW WISE, COLUMN WISE...)
# sorting array through axis
rand = np.random.RandomState(42)
X = rand.randint(0, 10, (4, 6))
print('Unsorted Array:\n', X)
X = np.sort(X, axis=0)
print('Sorted Array along axis 0:\n',X) # SORRY IT'S AXIS 1, JUST AN ERROR IN PRINT STATEMENT
# -
# ### 6. Structured and Record Arrays
#
# #### Structured Array
# While often our data can be well represented by a homogeneous array of values, sometimes this is not the case. This code cells demonstrates the use of NumPy’s structured arrays and record arrays, which provide efficient storage for compound, hetero geneous data. While the patterns shown here are useful for simple operations, scenarios like this often lend themselves to the use of Pandas DataFrames. We will see Pandas in the next part.
# +
name = ['Alex', 'Andrew', 'Casey', 'Halen'] # 4 PEOPLE ARE THERE
age = [25, 45, 37, 19] # THEIR AGES ARE DEFINED BY age ARRAY
weight = [55.0, 85.5, 68.0, 61.5] # THEIR WEIGHTS ARE DEFINED USING weight ARRAY
data = np.zeros(4, dtype={
'names':('name', 'age', 'weight'),
'formats':('U10', 'i4', 'f8')
})
# ASSIGNING VALUES
data['name'] = name
data['age'] = age
data['weight'] = weight
print('Complete Structured Data:\n', data.tolist()) # PRINTED AS LIST
# ACCESSING USING THE KEYS
print('All names:', data['name'])
# USING SOME OPERATIONS
print('Name of people whose age is less than 30:', data[data['age'] < 30]['name'])
# -
# #### RecordArrays
# NumPy also provides the np.recarray class, which is almost identical to the structured arrays just described, but with one additional feature: fields can be accessed as attributes rather than as dictionary keys
data_rec = data.view(np.recarray)
print(data_rec.age)
print(data_rec.weight)
print(data_rec.name)
# The downside is that for record arrays, there is some extra overhead involved in accessing the fields, even when using the same syntax. We can see this here:
# %timeit data['age']
# %timeit data_rec['age']
# %timeit data_rec.age
# +
# THIS CONCLUDES OUR INTRODUCTION TO NUMPY
# THANK YOU FOR WATCHING. PLEASE LIKE THIS VIDEO AND SUBSCRIBE TO THIS CHANNEL.
| DataScience/DS_4_Data_Science_Using_Python/[DS-Tutorial-1] Basics_of_Data_Science - 1 - Numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Tce3stUlHN0L"
# ##### Copyright 2018 The TensorFlow Authors.
#
# + cellView="form" colab={} colab_type="code" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="MfBg1C5NB3X0"
# # Use TPUs
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/tpu"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/tpu.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/tpu.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/tpu.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="Ys81cOhXOWUP"
# Experimental support for Cloud TPUs is currently available for Keras
# and Google Colab. Before you run this Colab notebooks, ensure that
# your hardware accelerator is a TPU by checking your notebook settings:
# Runtime > Change runtime type > Hardware accelerator > TPU.
# + [markdown] colab_type="text" id="ek5Hop74NVKm"
# ## Setup
# + colab={} colab_type="code" id="Cw0WRaChRxTL"
import tensorflow as tf
import os
import tensorflow_datasets as tfds
# + [markdown] colab_type="text" id="yDWaRxSpwBN1"
# ## TPU Initialization
# TPUs are usually on Cloud TPU workers which are different from the local process running the user python program. Thus some initialization work needs to be done to connect to the remote cluster and initialize TPUs. Note that the `tpu` argument to `TPUClusterResolver` is a special address just for Colab. In the case that you are running on Google Compute Engine (GCE), you should instead pass in the name of your CloudTPU.
# + [markdown] colab_type="text" id="dCqWMqvtwOLs"
# Note: The TPU initialization code has to be at the beginning of your program.
# + colab={} colab_type="code" id="dKPqF8d1wJCV"
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
# + [markdown] colab_type="text" id="Mv7kehTZ1Lq_"
# ## Manual device placement
# After the TPU is initialized, you can use manual device placement to place the computation on a single TPU device.
#
# + colab={} colab_type="code" id="XRZ4kMoxBNND"
a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
with tf.device('/TPU:0'):
c = tf.matmul(a, b)
print("c device: ", c.device)
print(c)
# + [markdown] colab_type="text" id="_NJm-kgFO0cC"
# ## Distribution strategies
# Most times users want to run the model on multiple TPUs in a data parallel way. A distribution strategy is an abstraction that can be used to drive models on CPU, GPUs or TPUs. Simply swap out the distribution strategy and the model will run on the given device. See the [distribution strategy guide](./distributed_training.ipynb) for more information.
# + [markdown] colab_type="text" id="DcDPMZs-9uLJ"
# First, creates the `TPUStrategy` object.
# + colab={} colab_type="code" id="7SO23K8oRpjI"
strategy = tf.distribute.TPUStrategy(resolver)
# + [markdown] colab_type="text" id="JlaAmswWPsU6"
# To replicate a computation so it can run in all TPU cores, you can simply pass it to `strategy.run` API. Below is an example that all the cores will obtain the same inputs `(a, b)`, and do the matmul on each core independently. The outputs will be the values from all the replicas.
# + colab={} colab_type="code" id="-90CL5uFPTOa"
@tf.function
def matmul_fn(x, y):
z = tf.matmul(x, y)
return z
z = strategy.run(matmul_fn, args=(a, b))
print(z)
# + [markdown] colab_type="text" id="uxgYl6kGHJLc"
# ## Classification on TPUs
# As we have learned the basic concepts, it is time to look at a more concrete example. This guide demonstrates how to use the distribution strategy `tf.distribute.TPUStrategy` to drive a Cloud TPU and train a Keras model.
#
# + [markdown] colab_type="text" id="gKRALGgt_kCo"
# ### Define a Keras model
# Below is the definition of MNIST model using Keras, unchanged from what you would use on CPU or GPU. Note that Keras model creation needs to be inside `strategy.scope`, so the variables can be created on each TPU device. Other parts of the code is not necessary to be inside the strategy scope.
# + colab={} colab_type="code" id="DiBiN-Z_R7P7"
def create_model():
return tf.keras.Sequential(
[tf.keras.layers.Conv2D(256, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(256, 3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)])
# + [markdown] colab_type="text" id="qYOYjYTg_31l"
# ### Input datasets
# Efficient use of the `tf.data.Dataset` API is critical when using a Cloud TPU, as it is impossible to use the Cloud TPUs unless you can feed them data quickly enough. See [Input Pipeline Performance Guide](./data_performance.ipynb) for details on dataset performance.
#
# For all but the simplest experimentation (using `tf.data.Dataset.from_tensor_slices` or other in-graph data) you will need to store all data files read by the Dataset in Google Cloud Storage (GCS) buckets.
#
# For most use-cases, it is recommended to convert your data into `TFRecord` format and use a `tf.data.TFRecordDataset` to read it. See [TFRecord and tf.Example tutorial](../tutorials/load_data/tfrecord.ipynb) for details on how to do this. This, however, is not a hard requirement and you can use other dataset readers (`FixedLengthRecordDataset` or `TextLineDataset`) if you prefer.
#
# Small datasets can be loaded entirely into memory using `tf.data.Dataset.cache`.
#
# Regardless of the data format used, it is strongly recommended that you use large files, on the order of 100MB. This is especially important in this networked setting as the overhead of opening a file is significantly higher.
#
# Here you should use the `tensorflow_datasets` module to get a copy of the MNIST training data. Note that `try_gcs` is specified to use a copy that is available in a public GCS bucket. If you don't specify this, the TPU will not be able to access the data that is downloaded.
# + colab={} colab_type="code" id="noAd416KSCo7"
def get_dataset(batch_size, is_training=True):
split = 'train' if is_training else 'test'
dataset, info = tfds.load(name='mnist', split=split, with_info=True,
as_supervised=True, try_gcs=True)
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
dataset = dataset.map(scale)
# Only shuffle and repeat the dataset in training. The advantage to have a
# infinite dataset for training is to avoid the potential last partial batch
# in each epoch, so users don't need to think about scaling the gradients
# based on the actual batch size.
if is_training:
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
return dataset
# + [markdown] colab_type="text" id="mgUC6A-zCMEr"
# ### Train a model using Keras high level APIs
#
# You can train a model simply with Keras fit/compile APIs. Nothing here is TPU specific, you would write the same code below if you had mutliple GPUs and where using a `MirroredStrategy` rather than a `TPUStrategy`. To learn more, check out the [Distributed training with Keras](https://www.tensorflow.org/tutorials/distribute/keras) tutorial.
# + colab={} colab_type="code" id="ubmDchPqSIx0"
with strategy.scope():
model = create_model()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
batch_size = 200
steps_per_epoch = 60000 // batch_size
validation_steps = 10000 // batch_size
train_dataset = get_dataset(batch_size, is_training=True)
test_dataset = get_dataset(batch_size, is_training=False)
model.fit(train_dataset,
epochs=5,
steps_per_epoch=steps_per_epoch,
validation_data=test_dataset,
validation_steps=validation_steps)
# + [markdown] colab_type="text" id="8hSGBIYtUugJ"
# To reduce python overhead, and maximize the performance of your TPU, try out the **experimental** `experimental_steps_per_execution` argument to `Model.compile`. Here it increases throughput by about 50%:
# + colab={} colab_type="code" id="M6e3aVVLUorL"
with strategy.scope():
model = create_model()
model.compile(optimizer='adam',
# Anything between 2 and `steps_per_epoch` could help here.
experimental_steps_per_execution = 50,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
model.fit(train_dataset,
epochs=5,
steps_per_epoch=steps_per_epoch,
validation_data=test_dataset,
validation_steps=validation_steps)
# + [markdown] colab_type="text" id="0rRALBZNCO4A"
# ### Train a model using custom training loop.
# You can also create and train your models using `tf.function` and `tf.distribute` APIs directly. `strategy.experimental_distribute_datasets_from_function` API is used to distribute the dataset given a dataset function. Note that the batch size passed into the dataset will be per replica batch size instead of global batch size in this case. To learn more, check out the [Custom training with tf.distribute.Strategy](https://www.tensorflow.org/tutorials/distribute/custom_training) tutorial.
#
# + [markdown] colab_type="text" id="DxdgXPAL6iFE"
# First, create the model, datasets and tf.functions.
# + colab={} colab_type="code" id="9aHhqwao2Fxi"
# Create the model, optimizer and metrics inside strategy scope, so that the
# variables can be mirrored on each device.
with strategy.scope():
model = create_model()
optimizer = tf.keras.optimizers.Adam()
training_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)
training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
'training_accuracy', dtype=tf.float32)
# Calculate per replica batch size, and distribute the datasets on each TPU
# worker.
per_replica_batch_size = batch_size // strategy.num_replicas_in_sync
train_dataset = strategy.experimental_distribute_datasets_from_function(
lambda _: get_dataset(per_replica_batch_size, is_training=True))
@tf.function
def train_step(iterator):
"""The step function for one training step"""
def step_fn(inputs):
"""The computation to run on each TPU device."""
images, labels = inputs
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits, from_logits=True)
loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
training_loss.update_state(loss * strategy.num_replicas_in_sync)
training_accuracy.update_state(labels, logits)
strategy.run(step_fn, args=(next(iterator),))
# + [markdown] colab_type="text" id="Ibi7Z97V6xsQ"
# Then run the training loop.
# + colab={} colab_type="code" id="1du5cXWt6Vtw"
steps_per_eval = 10000 // batch_size
train_iterator = iter(train_dataset)
for epoch in range(5):
print('Epoch: {}/5'.format(epoch))
for step in range(steps_per_epoch):
train_step(train_iterator)
print('Current step: {}, training loss: {}, accuracy: {}%'.format(
optimizer.iterations.numpy(),
round(float(training_loss.result()), 4),
round(float(training_accuracy.result()) * 100, 2)))
training_loss.reset_states()
training_accuracy.reset_states()
# + [markdown] colab_type="text" id="TnZJUM3qIjKu"
# ### Improving performance by multiple steps within `tf.function`
# The performance can be improved by running multiple steps within a `tf.function`. This is achieved by wrapping the `strategy.run` call with a `tf.range` inside `tf.function`, AutoGraph will convert it to a `tf.while_loop` on the TPU worker.
#
# Although with better performance, there are tradeoffs comparing with a single step inside `tf.function`. Running multiple steps in a `tf.function` is less flexible, you cannot run things eagerly or arbitrary python code within the steps.
#
# + colab={} colab_type="code" id="2grYvXLzJYkP"
@tf.function
def train_multiple_steps(iterator, steps):
"""The step function for one training step"""
def step_fn(inputs):
"""The computation to run on each TPU device."""
images, labels = inputs
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits, from_logits=True)
loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
training_loss.update_state(loss * strategy.num_replicas_in_sync)
training_accuracy.update_state(labels, logits)
for _ in tf.range(steps):
strategy.run(step_fn, args=(next(iterator),))
# Convert `steps_per_epoch` to `tf.Tensor` so the `tf.function` won't get
# retraced if the value changes.
train_multiple_steps(train_iterator, tf.convert_to_tensor(steps_per_epoch))
print('Current step: {}, training loss: {}, accuracy: {}%'.format(
optimizer.iterations.numpy(),
round(float(training_loss.result()), 4),
round(float(training_accuracy.result()) * 100, 2)))
# + [markdown] colab_type="text" id="WBKVhMvWjibf"
# ## Next steps
#
# * [Google Cloud TPU Documentation](https://cloud.google.com/tpu/docs/) - Set up and run a Google Cloud TPU.
# * [Distributed training with TensorFlow](./distributed_training.ipynb) - How to use distribution strategy and links to many example showing best practices.
# * [TensorFlow Official Models](https://github.com/tensorflow/models/tree/master/official) - Examples of state of the art TensorFlow 2.x models that are Cloud TPU compatible.
# * [The Google Cloud TPU Performance Guide](https://cloud.google.com/tpu/docs/performance-guide) - Enhance Cloud TPU performance further by adjusting Cloud TPU configuration parameters for your application.
| site/en/guide/tpu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import data_analysis
import pandas as pd
import tensorflow as tf
import string
import numpy as np
import spacy
import data_analysis
import re
from spacy.lang.en.stop_words import STOP_WORDS
# +
df = pd.read_json(open("final_dataset/merged_dset.json", "r", encoding="utf8"))
print(df.info())
print('\n')
print(df.describe())
print('\n')
print(df.isna().sum())
print('\n')
print(df['themes'])
# -
topics = data_analysis.scrape_topics()
print(topics)
encoded_topics = data_analysis.encode_topics(topics)
print(encoded_topics)
row = df['themes'][1]
print(row)
encoded = data_analysis.encode_column(row)
print(encoded)
df = df[df['themes'].notna()]
df['themes'] = df['themes'].apply(data_analysis.encode_column)
print(df['themes'])
# #### Using added functions to clear df and encode authors
df.head()
df.shape
df = data_analysis.clear_nans(df)
df.shape
df.isna().sum()
df
df = data_analysis.init_author_encoding(df)
df.head()
# ### Text preprocessing
# +
# set up data types
df = df.astype({'text': 'str', 'year': 'int32'})
# change the texts to lowercase
df['text'] = df['text'].str.lower()
df['title'] = df['title'].str.lower()
# Drop empty texts
df['text'].replace('', np.nan, inplace=True)
df.dropna(subset=['text'], inplace=True)
# Remove punctuation
table = str.maketrans('', '', string.punctuation)
df['text'] = [row['text'].translate(table) for index, row in df.iterrows()]
df['title'] = [row['title'].translate(table) for index, row in df.iterrows()]
# -
# Plot the words distribution
freq = pd.Series(' '.join(df['text']).split(' ')).value_counts()
freq[:50].plot(figsize=(25, 15))
# Remove stopwords
df['text'] = df['text'].apply(lambda x: " ".join(x for x in x.split() if x not in STOP_WORDS))
# Words distribution after stop words removing
freq = pd.Series(' '.join(df['text']).split(' ')).value_counts()
freq[:100].plot(figsize=(25, 15))
# Texts length distribution
texts_len = df['text'].apply(len)
texts_len.hist(bins=200, figsize=(20, 10))
# Set max text length
df['text'] = [row['text'][:1000] for index, row in df.iterrows()]
df['text'] = [row['text'].rsplit(' ', 1)[0] for index, row in df.iterrows()]
# Texts length distribution after max setted up
texts_len = df['text'].apply(len)
texts_len.hist(bins=200, figsize=(20, 10))
# +
# Lemmatization
nlp = spacy.load("en_core_web_sm")
lemmatizer = nlp.get_pipe("lemmatizer")
df['text'] = [
[token.lemma_ for token in nlp(row['text'])]
for index, row in df.iterrows()
]
df['title'] = [
[token.lemma_ for token in nlp(row['title'])]
for index, row in df.iterrows()
]
print(df.head())
# +
def full_form(word):
if word == 'nt': word = 'not'
if word == 're': word = 'are'
if word == 's': word = 'is'
if word == 'd': word = 'would'
if word == 'll': word = 'will'
if word == 't': word = 'not'
if word == 've': word = 'have'
if word == 'm': word = 'am'
return word
df['text'] = [
[full_form(w) for w in row['text']]
for index, row in df.iterrows()
]
print(df.head())
# -
df.head()
# +
### Texts to digits transforming
# -
max_features=5001 # set maximum number of words to 5000
tok = tf.keras.preprocessing.text.Tokenizer(num_words=max_features)
tok.fit_on_texts(df['text'])
import json
from keras_preprocessing.text import tokenizer_from_json
tokenizer_json = tok.to_json()
with open('tokenizer.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(tokenizer_json, ensure_ascii=False))
tok.get_config()['word_counts'][:500]
# create sequences
df['text'] = tok.texts_to_sequences(df['text'])
df['title'] = tok.texts_to_sequences(df['title'])
df.head()
data = df.to_json()
with open('data.json', 'w') as f:
f.write(data)
| data/research_and_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Build Up My Own Recommend Playlist from Scratch
# Collaborative Filtering is usually the first type of method for recommender system. It has two approachs, the first one is user-user based model, which use the similiarity between user to recommend new items, another one is item-item based model. Instead of using the similarity between users, it uses that between items to recommend new items.
#
# Here, we are going to go beyond collaborative filtering and introduce latent factor model in the application of recommender system. In this article, we are going to use [Million Song Dataset](https://labrosa.ee.columbia.edu/millionsong/). It contains users and song data. The main motivation behind is that when we using music streaming service like Spotify, kkbox, and youtube, the recommended songs often catch my eye. Take Spotify for example, there is a feature called *Discover Weekly*, which automatically generate a recommended playlist weekly. Very often, I enjoyed listening to the recommended songs. Therefore, I think it will be a great idea if I can build up a recommend playlist or songs using different methods, and to see what the result will be.
#
# Here are the steps that I take for this experiment:
# * Take [Million Song Dataset](https://labrosa.ee.columbia.edu/millionsong/)
# * Use user-user based collaborative filtering to build up a recommended playlist
# * Use item-item based collaborative filtering to build up a recommended playlist
# * Use Latent Factor Model to build up a recommended playlist
# * Measure the performance using Root Mean Square Error(RMSE)
# * Compare the result of different approachs
# For collaborative filtering, we follow the same step as the [previous notebook for recommending movies](https://github.com/johnnychiuchiu/Machine-Learning/blob/master/RecommenderSystem/collaborative_filtering.ipynb).
# Firstly, in order to calculate the similarity, we need to get a utility matrix using the song dataframe. For illustration purpose, I also manually append three rows into the utility matrix. Each row represent a person with some specific music taste.
#
# We use two method to compare the measure the result of different approachs. The first approach is calculate the Mean Square Error. Since both collaborative filtering and latent factor model need all the dataset to calcualte the predicted result. The way we generate train and test different from the way we usually use, that is randomly select some row to be test.
#
# In the song data, we randomly take 3 listen_count of each user out and place it in the test dataset. Then we use only train dataset to predict the recommended playlist. After have the predicted score for all the songs, we then compare the nonzero values in test data set with the corresponding value in the train dataset and calcualte the MSE of it. Also, I have make sure each user has at least listened to 5 different songs in the song data.
# ---
# ## Implementing Collaborative Filtering to build up Recommeded Playlist
# +
# %matplotlib inline
import pandas as pd
from sklearn.cross_validation import train_test_split
import numpy as np
import os
from sklearn.metrics import mean_squared_error
# +
def compute_mse(y_true, y_pred):
"""ignore zero terms prior to comparing the mse"""
mask = np.nonzero(y_true)
mse = mean_squared_error(y_true[mask], y_pred[mask])
return mse
def create_train_test(ratings):
"""
split into training and test sets,
remove 3 ratings from each user
and assign them to the test set
"""
test = np.zeros(ratings.shape)
train = ratings.copy()
for user in range(ratings.shape[0]):
test_index = np.random.choice(
np.flatnonzero(ratings[user]), size=3, replace=False)
train[user, test_index] = 0.0
test[user, test_index] = ratings[user, test_index]
# assert that training and testing set are truly disjoint
assert np.all(train * test == 0)
return (train, test)
# -
class collaborativeFiltering():
def __init__(self):
pass
def readSongData(self, top):
"""
Read song data from targeted url
"""
if 'song.pkl' in os.listdir('../../data/'):
song_df = pd.read_pickle('../../data/song.pkl')
else:
# Read userid-songid-listen_count triplets
# This step might take time to download data from external sources
triplets_file = 'https://static.turi.com/datasets/millionsong/10000.txt'
songs_metadata_file = 'https://static.turi.com/datasets/millionsong/song_data.csv'
song_df_1 = pd.read_table(triplets_file, header=None)
song_df_1.columns = ['user_id', 'song_id', 'listen_count']
# Read song metadata
song_df_2 = pd.read_csv(songs_metadata_file)
# Merge the two dataframes above to create input dataframe for recommender systems
song_df = pd.merge(song_df_1, song_df_2.drop_duplicates(['song_id']), on="song_id", how="left")
# Merge song title and artist_name columns to make a merged column
song_df['song'] = song_df['title'].map(str) + " - " + song_df['artist_name']
n_users = song_df.user_id.unique().shape[0]
n_items = song_df.song_id.unique().shape[0]
print(str(n_users) + ' users')
print(str(n_items) + ' items')
song_df.to_pickle('../data/processed/song.pkl')
# keep top_n rows of the data
song_df = song_df.head(top)
song_df = self.drop_freq_low(song_df)
return(song_df)
def drop_freq_low(self, song_df):
freq_df = song_df.groupby(['user_id']).agg({'song_id': 'count'}).reset_index(level=['user_id'])
below_userid = freq_df[freq_df.song_id <= 5]['user_id']
new_song_df = song_df[~song_df.user_id.isin(below_userid)]
return(new_song_df)
def utilityMatrix(self, song_df):
"""
Transform dataframe into utility matrix, return both dataframe and matrix format
:param song_df: a dataframe that contains user_id, song_id, and listen_count
:return: dataframe, matrix
"""
song_reshape = song_df.pivot(index='user_id', columns='song_id', values='listen_count')
song_reshape = song_reshape.fillna(0)
ratings = song_reshape.as_matrix()
return(song_reshape, ratings)
def fast_similarity(self, ratings, kind='user', epsilon=1e-9):
"""
Calculate the similarity of the rating matrix
:param ratings: utility matrix
:param kind: user-user sim or item-item sim
:param epsilon: small number for handling dived-by-zero errors
:return: correlation matrix
"""
if kind == 'user':
sim = ratings.dot(ratings.T) + epsilon
elif kind == 'item':
sim = ratings.T.dot(ratings) + epsilon
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
def predict_fast_simple(self, ratings, kind='user'):
"""
Calculate the predicted score of every song for every user.
:param ratings: utility matrix
:param kind: user-user sim or item-item sim
:return: matrix contains the predicted scores
"""
similarity = self.fast_similarity(ratings, kind)
if kind == 'user':
return similarity.dot(ratings) / np.array([np.abs(similarity).sum(axis=1)]).T
elif kind == 'item':
return ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
def get_overall_recommend(self, ratings, song_reshape, user_prediction, top_n=10):
"""
get the top_n predicted result of every user. Notice that the recommended item should be the song that the user
haven't listened before.
:param ratings: utility matrix
:param song_reshape: utility matrix in dataframe format
:param user_prediction: matrix with predicted score
:param top_n: the number of recommended song
:return: a dict contains recommended songs for every user_id
"""
result = dict({})
for i, row in enumerate(ratings):
user_id = song_reshape.index[i]
result[user_id] = {}
zero_item_list = np.where(row == 0)[0]
prob_list = user_prediction[i][np.where(row == 0)[0]]
song_id_list = np.array(song_reshape.columns)[zero_item_list]
result[user_id]['recommend'] = sorted(zip(song_id_list, prob_list), key=lambda item: item[1], reverse=True)[
0:top_n]
return (result)
def get_user_recommend(self, user_id, overall_recommend, song_df):
"""
Get the recommended songs for a particular user using the song information from the song_df
:param user_id:
:param overall_recommend:
:return:
"""
user_score = pd.DataFrame(overall_recommend[user_id]['recommend']).rename(columns={0: 'song_id', 1: 'score'})
user_recommend = pd.merge(user_score,
song_df[['song_id', 'title', 'release', 'artist_name', 'song']].drop_duplicates(),
on='song_id', how='left')
return (user_recommend)
def createNewObs(self, artistName, song_reshape, index_name):
"""
Append a new row with userId 0 that is interested in some specific artists
:param artistName: a list of artist names
:return: dataframe, matrix
"""
interest = []
for i in song_reshape.columns:
if i in song_df[song_df.artist_name.isin(artistName)]['song_id'].unique():
interest.append(10)
else:
interest.append(0)
print(pd.Series(interest).value_counts())
newobs = pd.DataFrame([interest],
columns=song_reshape.columns)
newobs.index = [index_name]
new_song_reshape = pd.concat([song_reshape, newobs])
new_ratings = new_song_reshape.as_matrix()
return (new_song_reshape, new_ratings)
# ## Take Million Song Dataset
# We only keep the first 50000 rows for this notebook. Otherwise it will take too long to execute it. As following, we can see that there are around **17k** users and **93k** different songs out of the first 50k rows.
cf = collaborativeFiltering()
song_df = cf.readSongData(100000)
song_df.head()
artist_df= song_df.groupby(['artist_name']).agg({'song_id':'count'}).reset_index(level=['artist_name']).sort_values(by='song_id',ascending=False).head(100)
n_users = song_df.user_id.unique().shape[0]
n_items = song_df.song_id.unique().shape[0]
print(str(n_users) + ' users')
print(str(n_items) + ' songs')
# • **Get the utility matrix**
song_reshape, ratings = cf.utilityMatrix(song_df)
# • **Append new rows to simulate a users who love different kinds of musicians**
song_reshape, ratings = cf.createNewObs(['Beyoncé', '<NAME>', 'Alicia Keys'], song_reshape, 'GirlFan')
song_reshape, ratings = cf.createNewObs(['Metallica', 'Guns N\' Roses', 'Linkin Park', 'Red Hot Chili Peppers'],
song_reshape, 'HeavyFan')
song_reshape, ratings = cf.createNewObs(['Daft Punk','<NAME>','Hot Chip','Coldplay'],
song_reshape, 'Johnny')
# • **Create train test dataset**
train, test = create_train_test(ratings)
song_reshape.shape
# ## Calculate user-user collaborative filtering
user_prediction = cf.predict_fast_simple(train, kind='user')
user_overall_recommend = cf.get_overall_recommend(train, song_reshape, user_prediction, top_n=10)
user_recommend_girl = cf.get_user_recommend('GirlFan', user_overall_recommend, song_df)
user_recommend_heavy = cf.get_user_recommend('HeavyFan', user_overall_recommend, song_df)
user_recommend_johnny = cf.get_user_recommend('Johnny', user_overall_recommend, song_df)
# ## Calculate item-item collaborative filtering
item_prediction = cf.predict_fast_simple(train, kind='item')
item_overall_recommend = cf.get_overall_recommend(train, song_reshape, item_prediction, top_n=10)
item_recommend_girl = cf.get_user_recommend('GirlFan', item_overall_recommend, song_df)
item_recommend_heavy = cf.get_user_recommend('HeavyFan', item_overall_recommend, song_df)
item_recommend_johnny = cf.get_user_recommend('Johnny', item_overall_recommend, song_df)
# ---
# The main idea behind Latent Factor Model is that we can transform our utility matrix into the multiple of two lower rank matrix. For example, if we have 5 users and 10 songs, then our utility matrix is 5 * 10. We can transform the matrix in to two matrixs, say 5 x 3 (say Q) and 3 x 10 (say P). Each user can be represented by a vector in 3 dimension, and each song can als obe represented by a vector in 3 dimension. The meaning of each dimension for Q can be, for example, do the user like jazz related music; each dimension for P can be, for example, is it a jazz song. The picture copied from google search result visualize it more clearly:
#
# 
# In order to get all the values in the Q and P, we need some optimization method to help us. The optimization method suggested by the winner of Netflix is called **Alternating Least Squares with Weighted Regularization (ALS-WR)**.
#
# Our cost function is as follows:
#
# $$ \begin{align} L &= \sum\limits_{u,i \in S}( r_{ui} - \textbf{x}_{u} \textbf{y}_{i}^{T} )^{2} + \lambda \big( \sum\limits_{u} \left\Vert \textbf{x}_{u} \right\Vert^{2} + \sum\limits_{i} \left\Vert \textbf{y}_{i} \right\Vert^{2} \big) \end{align} $$
# We will try to minimize the loss function to get our optimal $x_u$ and $y_i$ vectors. The main idea behind ALS-WR method is that we try to get the optimal Q and P matrix by holding one vector to be fixed at a time. We alternate back and forth until the value of Q and P converges. The reason why we don't optimize both vector at the same time is that it is hard to get optimal vectors at the same time. By holding one vector to be fixed and optimize another vector alternately, we can find the optimal Q and P more efficiently.
#
# For a detailed explaination on how to
# please check [Ethen's Alternating Least Squares with Weighted Regularization (ALS-WR) from scratch](http://nbviewer.jupyter.org/github/ethen8181/machine-learning/blob/master/recsys/1_ALSWR.ipynb).
# ## Recommend using Latent Factor Model
class ExplicitMF:
"""
This function is directly taken from Ethen's github (http://nbviewer.jupyter.org/github/ethen8181/machine-learning/blob/master/recsys/1_ALSWR.ipynb)
Train a matrix factorization model using Alternating Least Squares
to predict empty entries in a matrix
Parameters
----------
n_iters : int
number of iterations to train the algorithm
n_factors : int
number of latent factors to use in matrix
factorization model, some machine-learning libraries
denote this as rank
reg : float
regularization term for item/user latent factors,
since lambda is a keyword in python we use reg instead
"""
def __init__(self, n_iters, n_factors, reg):
self.reg = reg
self.n_iters = n_iters
self.n_factors = n_factors
def fit(self, train):#, test
"""
pass in training and testing at the same time to record
model convergence, assuming both dataset is in the form
of User x Item matrix with cells as ratings
"""
self.n_user, self.n_item = train.shape
self.user_factors = np.random.random((self.n_user, self.n_factors))
self.item_factors = np.random.random((self.n_item, self.n_factors))
# record the training and testing mse for every iteration
# to show convergence later (usually, not worth it for production)
# self.test_mse_record = []
# self.train_mse_record = []
for _ in range(self.n_iters):
self.user_factors = self._als_step(train, self.user_factors, self.item_factors)
self.item_factors = self._als_step(train.T, self.item_factors, self.user_factors)
predictions = self.predict()
# test_mse = self.compute_mse(test, predictions)
# train_mse = self.compute_mse(train, predictions)
# self.test_mse_record.append(test_mse)
# self.train_mse_record.append(train_mse)
return self
def _als_step(self, ratings, solve_vecs, fixed_vecs):
"""
when updating the user matrix,
the item matrix is the fixed vector and vice versa
"""
A = fixed_vecs.T.dot(fixed_vecs) + np.eye(self.n_factors) * self.reg
b = ratings.dot(fixed_vecs)
A_inv = np.linalg.inv(A)
solve_vecs = b.dot(A_inv)
return solve_vecs
def predict(self):
"""predict ratings for every user and item"""
pred = self.user_factors.dot(self.item_factors.T)
return pred
# • **Fit using Alternating Least Square Method**
als = ExplicitMF(n_iters=200, n_factors=10, reg=0.01)
als.fit(train)
latent_prediction = als.predict()
latent_overall_recommend = cf.get_overall_recommend(train, song_reshape, latent_prediction, top_n=10)
latent_recommend_girl = cf.get_user_recommend('GirlFan', latent_overall_recommend, song_df)
latent_recommend_heavy = cf.get_user_recommend('HeavyFan', latent_overall_recommend, song_df)
latent_recommend_johnny = cf.get_user_recommend('Johnny', latent_overall_recommend, song_df)
# ## Measure the performance using Root Mean Square Error(RMSE)
# +
user_mse = compute_mse(test, user_prediction)
item_mse = compute_mse(test, item_prediction)
latent_mse = compute_mse(test, latent_prediction)
print("MSE for user-user approach: "+str(user_mse))
print("MSE for item-item approach: "+str(item_mse))
print("MSE for latent factor model: "+str(latent_mse))
# -
# We can see that even though latent factor model is somewhat a more advanced model, the MSE not the lowerest for some reason. It is something that I should keep in mind.
# ## Compare the result of different approachs
# ### > Recommend Playlist for someone who is a big fan of *Beyoncé*, *Katy Perry* and *Alicia Keys*
# • **User-user approach**
user_recommend_girl
# • **Item-item approach**
item_recommend_girl
# • **Latent Factor Model**
latent_recommend_girl
# ### > Recommend Playlist for someone who is a big fan of *Metallica*, *Guns N' Roses*, *Linkin Park* and *Red Hot Chili Peppers*
user_recommend_heavy
item_recommend_heavy
latent_recommend_heavy
# ### > Recommend Playlist for myself, I like *Daft Punk*, *<NAME>*, *Hot Chip* and *Coldplay*
user_recommend_johnny
item_recommend_johnny
latent_recommend_johnny
# We see that the all the recommended playlist are actually kind of make sense. In the following notebook, I will continue to try some other methods to build up custom recommended playlists.
# ### Reference
#
# * http://nbviewer.jupyter.org/github/ethen8181/machine-learning/blob/master/recsys/1_ALSWR.ipynb
# * https://github.com/dvysardana/RecommenderSystems_PyData_2016/blob/master/Song%20Recommender_Python.ipynb
| src/notebooks/latentFactorModel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Mit Widgets interagieren
import ipywidgets as widgets
from IPython.display import display
# +
slider = widgets.IntSlider(
value=7,
min=0,
max=10,
# step=1,
description='X-Wert:',
orientation='horizontal',
slider_color='white'
)
def on_slider_move(b):
print(b["new"])
print("Slider wurde bewegt!")
slider.observe(on_slider_move, names="value")
display(slider)
# -
| UDEMY_Datavis_Python/08 - jupyter widgets/Mit Widgets interagieren.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # `smlb` mini demonstration:<br>Compare different optimization techniques on the same response surface.
#
# Scientific Machine Learning Benchmark:<br>
# A benchmark of regression models in chem- and materials informatics.<br>
# 2019-2020, Citrine Informatics.
# +
import smlb
import numpy as np
import matplotlib.pyplot as plt
# -
# ## Setup
#
# Generate a stream of pseudo-random number generators.
prng = smlb.Random(rng=0)
seeds = list(np.flip(prng.random.split(30)))
# ## The dataset: Friedman-Silverman (1989)
#
# Load a 10-dimensional dataset
from smlb.datasets.synthetic import FriedmanSilverman1989Data
dataset = FriedmanSilverman1989Data(dimensions=10)
# ## Identity learner
#
# First, test optimization algorithms against the Friedman-Silverman function itself. An `IdentityLearner` learns to perfectly reproduce the provided dataset (must be of type `VectorSpaceData`). The `ExpectedValue` scorer then returns the value of the function as the score. In this case we attempt to maximize the function.
from smlb.learners import IdentityLearner
model_identity = IdentityLearner(dataset)
score_ev = smlb.ExpectedValue(maximize=True)
# ## Optimizers
#
# For this demonstrate, we compare three optimizers: a random sampler, differential evolution as implemented in Scipy, and dual annealing as implemented in Scipy. For the random optimizer we must specify the number of samples. Here we take 1000. To keep things comparable we also specify 1000 function evaluations for dual annealing, though the algorithm will finish out its current iteration when it passes that threshold. Differential evolution does not expose the number of function evaluations as a parameter, but we can set the number of iterations and find that 10 yields good results.
from smlb.optimizers import RandomOptimizer, ScipyDifferentialEvolutionOptimizer, ScipyDualAnnealingOptimizer
max_evals = 1e3
max_de_iters = 10
optimizers = [
RandomOptimizer(num_samples=max_evals, rng=seeds.pop()),
ScipyDifferentialEvolutionOptimizer(rng=seeds.pop(), maxiter=max_de_iters),
ScipyDualAnnealingOptimizer(rng=seeds.pop(), maxfun=max_evals)
]
labels = [
"Random Samples",
"Differential Evolution",
"Dual Annealing"
]
# ## Running the workflow
#
# The `OptimizationTrajecotoryPlot` is the only evaluation currently implemented. It draws the median trajectory, shades the quantiles, and optionally draws the extremal results as well. Here we shade the 0.25 to the 0.75 quantile and choose to draw the best/worst trajectory at each point. We run 6 trials for each optimizer.
from smlb.workflows import OptimizationTrajectoryComparison
num_trials = 6
fig, ax = plt.subplots()
trajectory_plot = smlb.OptimizationTrajectoryPlot(
target=ax,
optimizer_names=labels,
log_scale=True,
quantile_width=0.5,
show_extrama=True
)
workflow = OptimizationTrajectoryComparison(
data=dataset,
model=model_identity,
scorer=score_ev,
optimizers=optimizers,
evaluations=[trajectory_plot,],
num_trials=num_trials
)
workflow.run()
ax.set_title("Friedman-Silverman function (1989)")
ax.legend()
plt.show()
# Dual annealing does the best, finding the optimum within a few dozen function evaluations. Differential evolution doesn't do much better than random sampling at first, but pulls ahead after a few hundred evaluations and eventually finds the optimum.
# ## Trained Learner
#
# Next, we train a learner on some data drawn from the Friedman-Silverman function and optimize a score applied to that model.
from smlb.learners import RandomForestRegressionSklearn
model_rf = RandomForestRegressionSklearn(rng=seeds.pop(), uncertainties="naive")
num_train = 50
sampler = smlb.RandomVectorSampler(size=num_train, rng=seeds.pop())
training_data = sampler.fit(dataset).apply(dataset)
model_rf.fit(training_data)
# In this example we assume that the goal is to minimize the function. The lowest value in the training data is taken to be the target and we calculate the probability of exceeding that target. The goal of "minimize" in the score indicates that the score calculates the probability of being _below_ the target.
min_value = min(training_data.labels())
score_pi = smlb.ProbabilityOfImprovement(target=min_value, goal="minimize")
# ## Running the workflow
#
# Use the same optimizers as above and similar plotting settings. To demonstrate some different settings, here the plot is on a linear scale and does not include the extremal trajectories.
num_trials = 6
fig, ax = plt.subplots()
trajectory_plot = smlb.OptimizationTrajectoryPlot(
target=ax,
optimizer_names=labels,
log_scale=False,
quantile_width=0.5,
show_extrama=False
)
workflow = OptimizationTrajectoryComparison(
data=dataset,
model=model_rf,
scorer=score_pi,
optimizers=optimizers,
evaluations=[trajectory_plot,],
num_trials=num_trials
)
workflow.run()
ax.set_title("Friedman-Silverman function (1989)")
ax.legend()
plt.show()
# Dual annealing is once again the best performer.
| examples/mini_demo_optimization_trajectories.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print("starting 2_occlusion_activations_in_Inception_V1")
# # Investigate Occlusion Stimuli
# ## Imports
# general imports
import numpy as np
import math
import random
import pandas as pd
import tensorflow as tf
import torch
import os
import csv
import time
import copy
import matplotlib.pyplot as plt
import argparse
# lucid imports
import lucid.modelzoo.vision_models as models
from render import import_model
# custom imports
import occlusion_utils as ut
# ## Load model
# import InceptionV1 from the Lucid modelzoo
model = models.InceptionV1()
model.load_graphdef()
# ## Parameters
batch_size_forward_pass = 128
# setting seeds
tf.set_random_seed(1234)
random.seed(0)
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--stimuli-dir", required=True, help="Path to save stimuli to.")
parser.add_argument("-t", "--trial-type", required=True, help="instruction_practice_catch or sampled_trials.")
args = parser.parse_args()
print(args)
stimuli_dir = args.stimuli_dir
trial_type = args.trial_type
# ## Load experiment specification
# read in unit specifications from csv into pandas dataframe
path_to_csv_file = os.path.join(stimuli_dir, f"layer_folder_mapping_{trial_type}.csv")
unit_specs_df = pd.read_csv(path_to_csv_file, header=1)
unit_specs_df
# +
for _, row in unit_specs_df.iterrows():
# load unit specification
layer_number = row["layer_number"]
kernel_size_number = row["kernel_size_number"]
channel_number = row["channel_number"]
feature_map_number = row["feature_map_number"]
layer_name = row["layer_name"]
pre_post_relu = row["pre_post_relu"]
print(row)
for batch in range(ut.n_batches):
print(f"batch {batch}")
start = time.time()
# dataloader
data_dir = os.path.join(
stimuli_dir,
ut.objective,
trial_type,
f"layer_{layer_number}",
f"kernel_size_{kernel_size_number}",
f"channel_{channel_number}",
"natural_images",
f"batch_{batch}",
)
data_loader = ut.get_data_loader(os.path.join(data_dir, "val"))
for heatmap_size_i, occlusion_size_i, percentage_side_length_i in zip(ut.heatmap_sizes_list, ut.occlusion_sizes_list, ut.percentage_side_length_list):
print(heatmap_size_i, occlusion_size_i, percentage_side_length_i)
list_of_positions = ut.get_list_of_occlusion_positions(heatmap_size_i, occlusion_size_i)
if 'session' in locals() and session is not None:
print('Close interactive session')
session.close()
with tf.Graph().as_default() as graph, tf.Session() as sess:
image = tf.placeholder(tf.float32, shape=(batch_size_forward_pass, 224, 224, 3))
print("image.shape", image.shape)
model_instance = import_model(model, image)
tf_activations_list, unique_layer_str_list = ut.get_tf_activations_list(model_instance, layer_name, pre_post_relu)
# iterate over all occlusion positions in batches
list_of_activations_for_occlusions = []
n_iterations = math.ceil(len(list_of_positions) / batch_size_forward_pass)
for iteration_i in range(n_iterations):
cur_list_of_positions = list_of_positions[iteration_i*batch_size_forward_pass:iteration_i*batch_size_forward_pass+batch_size_forward_pass]
images, targets, paths = next(iter(data_loader))
# forward pass
images_np_transformed = images.numpy().transpose(0,2,3,1) # (1, 224, 224, 3)
images_np_transformed_copied = copy.deepcopy(images_np_transformed)
images_np_transformed_ready_for_occlusion = np.tile(images_np_transformed_copied, (batch_size_forward_pass, 1, 1, 1)) # (batch_size_forward_pass, 224, 224, 3)
# loop through occlusion positions
for idx, (x_start, x_end, y_start, y_end) in enumerate(cur_list_of_positions):
images_np_transformed_ready_for_occlusion[idx, x_start:x_end, y_start:y_end, :] = np.mean(np.mean(images_np_transformed_ready_for_occlusion[idx, x_start:x_end, y_start:y_end, :], axis=0), axis=0)
# # plot patches to check if occlusion worked fine
# plt.imshow(images_np_transformed_ready_for_occlusion[idx, :, :, :])
# plt.show()
activations_list = sess.run(tf_activations_list, {image: images_np_transformed_ready_for_occlusion})
# unpack single list item
activations_np = activations_list[0]
unit_activations = ut.get_activation_according_to_objective(ut.objective, activations_np, feature_map_number)
list_of_activations_for_occlusions.append(list(unit_activations))
# after having calculated all occlusions: save activations
final_list_of_activations_for_occlusions = [item for sublist in list_of_activations_for_occlusions for item in sublist]
destination_dir = os.path.join(data_dir, f"{percentage_side_length_i}_percent_side_length")
os.makedirs(destination_dir, exist_ok=True)
filename = os.path.join(destination_dir, f"activations_for_occlusions_of_{percentage_side_length_i}_percent.npy")
np.save(filename, final_list_of_activations_for_occlusions[:len(list_of_positions)+1])
print(f"activations saved under{filename}")
print('Done!!!')
end = time.time()
print(f" time for one batch: {end-start}")
print("Completely done!")
# -
print("done with 2_occlusion_activations_in_Inception_V1")
| tools/data-generation/causal-occlusion/2_occlusion_activations_in_Inception_V1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Questão 1: Considerando o programa abaixo e sabendo que o usuário digitou 3 e 6 (nessa ordem), preencha no campo resposta o valor final da variável total:
# +
num1 = int(input("Digite um número"))
num2 = int(input("Digite outro número"))
total = num1 + num2/3
print("O novo número é ", total)
# -
# Questão 2: Considerando o programa abaixo e sabendo que o usuário digitou 200 e 40 (nessa ordem), o que seria impresso na ordem exata de execução?
# +
salario = float(input("Digite o valor do salário"))
aumento = float(input("Digite o percentual de aumento"))
novoSalario = salario*(1+aumento/100)
print(novoSalario)
print(salario)
# -
# Questão 3: O programa abaixo foi executado diversas vezes e, após o usuário digitar um valor, o programa imprimiu a situação do aluno. Para cada valor digitado, coloque do lado a situação do aluno (“Reprovado”, “Recuperação” ou “Aprovado”):
# +
media = float(input("Qual a média do aluno?"))
if 0 < media <=5:
print("Reprovado")
else:
if 5 < media < 6:
print("Recuperação")
else:
print("Aprovado")
# -
# | Valor Digitado | Situação |
# | --- |--- |
# | 5 | Reprovado |
# | 8 | Aprovado |
# | 1 | Reprovado |
# | -5.5 | Aprovado |
# | 4 | Reprovado |
# Questão 4: Para cada execução do programa abaixo, indique na tabela os valores finais das variáveis precoA e precoB:
# +
precoA = int(input("Digite o valor do produto 1"))
precoB = int(input("Digite o valor do produto 2"))
if precoA>precoB:
precoB = precoB*2
else:
if precoA==precoB:
precoB = precoB*1.5
else:
precoA = precoB*2 + 10
print(precoA)
print(precoB)
# -
# | Valor Digitado 1 | Valor Digitado 2 | precoA | precoB
# | --- |--- | --- |--- |
# | 100 | 200 |410 | 200 |
# | 50 | 20 |50 | 40 |
# | 60 | 60 |60 | 90 |
# | 70 | 20 |70 | 40 |
# | 40 | 50 |110 | 50|
# Questão 5: Ao executar o programa abaixo, qual é a média final do aluno?
notas = (10,6,5,9)
soma = 10
for nota in notas:
soma = soma + nota
print("Média", soma/4)
# Questão 6: Ao executar o programa abaixo, o usuário digitou DF e SP (nessa ordem). Qual será o valor final da variável percentual?
# +
origem = input("Digite um estado")
destino = input("Digite outro estado")
percentual = 18
if origem=="SP" and destino=="DF":
percentual = 12
if origem=="GO" or destino=="DF":
percentual = 7
print("Percentual", percentual)
# -
# Questão 7: Um usuário pretendia digitar os números 2, 3, 2, 1, 4, -3, 1, 4, 1, 2, 0, mas o programa encerrou antes que ele digitasse todos os valores. Qual foi o último valor digitado?
soma = 0
rodadas = 10
num = 0
while soma <= rodadas:
num = int(input("Digite um número de 1 a 10"))
soma = soma + num
if soma > 20:
break
print(soma/4)
# Questão 8: Ao executar o programa abaixo, qual será o valor final da variável mediaFinal:
# +
notas = (10,5,4,5)
mediaFinal = 0
soma = 0
for i in notas:
if i < 4:
mediaFinal = 0
else:
soma = soma + i
mediaFinal = soma / 4
print(mediaFinal)
# -
# Questão 9: Uma faculdade deseja implementar um programa para cálculo de médias de notas de provas, de modo que seja calculada a média independente de quantas notas o aluno terá. Ou seja, o programa deve calcular a média para uma turma com 3 provas e para outra turma com 4 provas. Continue a implementação abaixo desse programa considerando os requisitos acima, sabendo que você só precisa implementar de 3 a 4 linhas para o cálculo e mais uma para imprimir a média.
# +
notas = (10,6,4,8,2,6)
quantidade = len(notas)
soma = 0
for nota in notas:
soma += nota
print(soma/quantidade)
| Fontes-UDF/programming/cadernos_provas/Prova_TP_1_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Groupby operations
# + [markdown] slideshow={"slide_type": "slide"}
# Some imports:
# + slideshow={"slide_type": "-"}
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn
except ImportError:
pass
pd.options.display.max_rows = 10
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Recap: the groupby operation (split-apply-combine)
#
# The "group by" concept: we want to **apply the same function on subsets of your dataframe, based on some key to split the dataframe in subsets**
#
# This operation is also referred to as the "split-apply-combine" operation, involving the following steps:
#
# * **Splitting** the data into groups based on some criteria
# * **Applying** a function to each group independently
# * **Combining** the results into a data structure
#
# <img src="img/splitApplyCombine.png">
#
# Similar to SQL `GROUP BY`
# + [markdown] slideshow={"slide_type": "subslide"}
# The example of the image in pandas syntax:
# -
df = pd.DataFrame({'key':['A','B','C','A','B','C','A','B','C'],
'data': [0, 5, 10, 5, 10, 15, 10, 15, 20]})
df
# Using the filtering and reductions operations we have seen in the previous notebooks, we could do something like:
#
#
# df[df['key'] == "A"].sum()
# df[df['key'] == "B"].sum()
# ...
#
# But pandas provides the `groupby` method to do this:
# + slideshow={"slide_type": "subslide"}
df.groupby('key').aggregate('sum') # np.sum
# -
df.groupby('key').sum()
# Pandas does not only let you group by a column name. In `df.groupby(grouper)` can be many things:
#
# - Series (or string indicating a column in df)
# - function (to be applied on the index)
# - dict : groups by values
# - levels=[], names of levels in a MultiIndex
#
#
df.groupby(lambda x: x % 2).mean()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## And now applying this on some real data
# -
# These exercises are based on the [PyCon tutorial of <NAME>](https://github.com/brandon-rhodes/pycon-pandas-tutorial/) (so all credit to him!) and the datasets he prepared for that. You can download these data from here: [`titles.csv`](https://drive.google.com/open?id=0B3G70MlBnCgKajNMa1pfSzN6Q3M) and [`cast.csv`](https://drive.google.com/open?id=0B3G70MlBnCgKal9UYTJSR2ZhSW8) and put them in the `/data` folder.
# `cast` dataset: different roles played by actors/actresses in films
#
# - title: title of the film
# - name: name of the actor/actress
# - type: actor/actress
# - n: the order of the role (n=1: leading role)
cast = pd.read_csv('data/cast.csv')
cast.head()
titles = pd.read_csv('data/titles.csv')
titles.head()
# <div class="alert alert-success">
# <b>EXERCISE</b>: Using groupby(), plot the number of films that have been released each decade in the history of cinema.
# </div>
# + clear_cell=true
titles.groupby(titles.year // 10 * 10).size().plot(kind='bar')
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: Use groupby() to plot the number of "Hamlet" films made each decade.
# </div>
# + clear_cell=true
hamlet = titles[titles['title'] == 'Hamlet']
hamlet.groupby(hamlet.year // 10 * 10).size().plot(kind='bar')
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: How many leading (n=1) roles were available to actors, and how many to actresses, in each year of the 1950s?
# </div>
# + clear_cell=true
cast1950 = cast[cast.year // 10 == 195]
cast1950 = cast1950[cast1950.n == 1]
cast1950.groupby(['year', 'type']).size()
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: List the 10 actors/actresses that have the most leading roles (n=1) since the 1990's.
# </div>
# + clear_cell=true
cast1990 = cast[cast['year'] >= 1990]
cast1990 = cast1990[cast1990.n == 1]
cast1990.groupby('name').size().nlargest(10)
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: Use groupby() to determine how many roles are listed for each of The Pink Panther movies.
# </div>
# + clear_cell=true
c = cast
c = c[c.title == 'The Pink Panther']
c = c.groupby(['year'])[['n']].max()
c
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: List, in order by year, each of the films in which <NAME> has played more than 1 role.
# </div>
# + clear_cell=true
c = cast
c = c[c.name == '<NAME>']
g = c.groupby(['year', 'title']).size()
g[g > 1]
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: List each of the characters that <NAME> has portrayed at least twice.
# </div>
# + clear_cell=true
c = cast
c = c[c.name == '<NAME>']
g = c.groupby(['character']).size()
g[g > 1].sort_values()
# -
# ## Transforms
# Sometimes you don't want to aggregate the groups, but transform the values in each group. This can be achieved with `transform`:
df
df.groupby('key').transform('mean')
def normalize(group):
return (group - group.mean()) / group.std()
df.groupby('key').transform(normalize)
df.groupby('key').transform('sum')
# <div class="alert alert-success">
# <b>EXERCISE</b>: Add a column to the `cast` dataframe that indicates the number of roles for the film.
# </div>
# + clear_cell=true
cast['n_total'] = cast.groupby('title')['n'].transform('max')
cast.head()
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: Calculate the ratio of leading actor and actress roles to the total number of leading roles per decade.
# </div>
#
# Tip: you can to do a groupby twice in two steps, once calculating the numbers, and then the ratios.
# + clear_cell=true
leading = cast[cast['n'] == 1]
sums_decade = leading.groupby([cast['year'] // 10 * 10, 'type']).size()
sums_decade
# + clear_cell=true
#sums_decade.groupby(level='year').transform(lambda x: x / x.sum())
ratios_decade = sums_decade / sums_decade.groupby(level='year').transform('sum')
ratios_decade
# + clear_cell=true
ratios_decade[:, 'actor'].plot()
ratios_decade[:, 'actress'].plot()
# -
# ## Intermezzo: string manipulations
# Python strings have a lot of useful methods available to manipulate or check the content of the string:
s = 'Bradwurst'
s.startswith('B')
# In pandas, those methods (together with some additional methods) are also available for string Series through the `.str` accessor:
s = pd.Series(['Bradwurst', 'Kartoffelsalat', 'Sauerkraut'])
s.str.startswith('B')
# For an overview of all string methods, see: http://pandas.pydata.org/pandas-docs/stable/api.html#string-handling
# <div class="alert alert-success">
# <b>EXERCISE</b>: We already plotted the number of 'Hamlet' films released each decade, but not all titles are exactly called 'Hamlet'. Give an overview of the titles that contain 'Hamlet', and that start with 'Hamlet':
# </div>
# + clear_cell=true
hamlets = titles[titles['title'].str.contains('Hamlet')]
hamlets['title'].value_counts()
# + clear_cell=true
hamlets = titles[titles['title'].str.match('Hamlet')]
hamlets['title'].value_counts()
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: List the 10 movie titles with the longest name.
# </div>
# + clear_cell=true
title_longest = titles['title'].str.len().nlargest(10)
title_longest
# + clear_cell=true
pd.options.display.max_colwidth = 210
titles.loc[title_longest.index]
# -
# ## Value counts
# A useful shortcut to calculate the number of occurences of certain values is `value_counts` (this is somewhat equivalent to `df.groupby(key).size())`)
#
# For example, what are the most occuring movie titles?
titles.title.value_counts().head()
# <div class="alert alert-success">
# <b>EXERCISE</b>: Which years saw the most films released?
# </div>
# + clear_cell=true
t = titles
t.year.value_counts().head(3)
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: Plot the number of released films over time
# </div>
# + clear_cell=true
titles.year.value_counts().sort_index().plot()
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: Plot the number of "Hamlet" films made each decade.
# </div>
# + clear_cell=true
t = titles
t = t[t.title == 'Hamlet']
(t.year // 10 * 10).value_counts().sort_index().plot(kind='bar')
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: What are the 11 most common character names in movie history?
# </div>
# + clear_cell=true
cast.character.value_counts().head(11)
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: Which actors or actresses appeared in the most movies in the year 2010?
# </div>
# + clear_cell=true
cast[cast.year == 2010].name.value_counts().head(10)
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: Plot how many roles <NAME> has played in each year of his career.
# </div>
# + clear_cell=true
cast[cast.name == '<NAME>'].year.value_counts().sort_index().plot()
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: What are the 10 most film titles roles that start with the word "The Life"?
# </div>
# + clear_cell=true
c = cast
c[c.title.str.startswith('The Life')].title.value_counts().head(10)
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>: How many leading (n=1) roles were available to actors, and how many to actresses, in the 1950s? And in 2000s?
# </div>
# + clear_cell=true
c = cast
c = c[c.year // 10 == 195]
c = c[c.n == 1]
c.type.value_counts()
# + clear_cell=true
c = cast
c = c[c.year // 10 == 200]
c = c[c.n == 1]
c.type.value_counts()
# -
| solved - 04b - Advanced groupby operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from os.path import join, split, isdir, dirname
from os import mkdir
from glob import glob
from cell_mrcnn.utils import get_image_description, calc_layers, convert_to_bit8, get_cell_mrcnn_path_from_config_file, get_image_paths, load_image
from skimage.io import imread
import tensorflow as tf
from cell_mrcnn import cell
import pandas as pd
import numpy as np
import cell_mrcnn.model as modellib
from PIL import Image
from datetime import datetime
from matplotlib import pyplot as plt
from itertools import product
data_path = join(get_cell_mrcnn_path_from_config_file(), 'data')
MODEL_DIR = join(get_cell_mrcnn_path_from_config_file(), "logs")
config = cell.CellInferenceConfig()
mask_dir = join(data_path, '20201210/preprocessed/venus')
dataset_dir = join(data_path, '20201210')
# +
genotypes = ['L','Q','wt']
stim = ['st', 'nst']
wells = ['_'.join(prod) for prod in product(genotypes, stim)]
# get the CB2 channel paths for each well
cb2_paths = get_image_paths(dataset_dir, channels=('Venus'), genotypes=('wt', 'Q', 'L'), stims=('st', 'nst'))
cb2_path_dict = {}
# get the image paths for mask prediction (could be the same, could be different)
mask_image_paths = glob(join(mask_dir, '*.png'))
mask_path_dict = {}
# initialise other dicts
result_dict = {}
df_dict = {}
for well in wells:
cb2_path_dict[well] = {}
mask_path_dict[well] = {}
result_dict[well] = {}
df_dict[well] = pd.DataFrame()
for path in cb2_paths:
ch, gen, st, pos = get_image_description(path)
well = '_'.join((gen,st))
cb2_path_dict[well][pos] = path
for path in mask_image_paths:
ch, gen, st, pos = get_image_description(path)
well = '_'.join((gen,st))
mask_path_dict[well][pos] = path
# -
config.DETECTION_MIN_CONFIDENCE = 0.9
DEVICE = "/cpu:0"
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config)
weights_path = join(MODEL_DIR, "cell20201216T2001/mask_rcnn_cell_0020.h5")
print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)
for i, well in enumerate(wells):
for j,(pos, path) in enumerate(cb2_path_dict[well].items()):
print('\r{}/{} well: {}/{} position...'.format(i+1, len(wells), j+1, len(cb2_path_dict[well].items())), end='')
layers_image = imread(path)
mask_image = load_image(mask_path_dict[well][pos])
results = model.detect([mask_image], verbose=0)[0]
result_dict[well][pos] = results
layers = calc_layers(layers_image, results['masks'])
df = pd.DataFrame(layers)
df_dict[well] = pd.concat([df_dict[well], df], axis=0, ignore_index = True)
df_dict[well].loc['mean',:] = df_dict[well].mean(axis=0)
rows=2; cols=3; size = 6
fig, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows), sharey = True)
row, col = 0, 0
for j, well in enumerate(wells):
ax[row, col].plot(np.arange(len(df_dict[well].loc['mean',:])), df_dict[well].loc['mean',:])
ratio = df_dict[well].loc['mean',:][:20].sum() / df_dict[well].loc['mean',:][20:40].sum()
ax[row, col].set_title(f"{well}; n= {df_dict[well].shape[0]-1}; ratio = {ratio:.2f}")
col += 1
if (j+1) % 3 == 0:
row += 1
col = 0
# save the results
results_dir = join(get_cell_mrcnn_path_from_config_file(),'results')
if not isdir(results_dir):
mkdir(results_dir)
date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
output_folder = join(results_dir, date)
if not isdir(output_folder):
mkdir(output_folder)
for well in result_dict.keys():
well_dir = join(output_folder, well)
if not isdir(well_dir):
mkdir(well_dir)
with open(join(well_dir, 'results.csv'), 'w') as f:
f.write(df_dict[well].to_csv())
for pos, r in result_dict[well].items():
for m in range(r['masks'].shape[2]):
mask_ = Image.fromarray((r['masks'][:,:,m]*255).astype(np.uint8),
mode = 'L')
mask_ = mask_.convert(mode='1')
mask_.save(join(well_dir, pos + '_mask_' + str(m) +
'.png'))
| notebooks/Analyse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rselent/DS-Unit-2-Sprint-2-Kaggle-Challenge/blob/master/module4/2_2_4_assignment_kaggle_challenge_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ZEce0B9VaAx-" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 2, Module 4*
#
# ---
# + [markdown] colab_type="text" id="nCc3XZEyG3XV"
# # Classification Metrics
#
# ## Assignment
# - [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
# - [ ] Plot a confusion matrix for your Tanzania Waterpumps model.
# - [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 70% accuracy (well above the majority class baseline).
# - [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_
# - [ ] Commit your notebook to your fork of the GitHub repo.
# - [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student <NAME>. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.
#
#
# ## Stretch Goals
#
# ### Reading
# - [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_
# - [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)
# - [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by <NAME>, with video
# - [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415)
#
#
# ### Doing
# - [ ] Share visualizations in our Slack channel!
# - [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)
# - [ ] More Categorical Encoding. (See module 2 assignment notebook)
# - [ ] Stacking Ensemble. (See below)
#
# ### Stacking Ensemble
#
# Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:
#
# ```python
# import pandas as pd
#
# # Filenames of your submissions you want to ensemble
# files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']
#
# target = 'status_group'
# submissions = (pd.read_csv(file)[[target]] for file in files)
# ensemble = pd.concat(submissions, axis='columns')
# majority_vote = ensemble.mode(axis='columns')[0]
#
# sample_submission = pd.read_csv('sample_submission.csv')
# submission = sample_submission.copy()
# submission[target] = majority_vote
# submission.to_csv('my-ultimate-ensemble-submission.csv', index=False)
# ```
# + colab_type="code" id="lsbRiKBoB5RE" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + colab_type="code" id="BVA1lph8CcNX" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9ceed340-3d5b-43ef-db9f-031f67788788"
import numpy as np
import pandas as pd
import seaborn as sns
import category_encoders as ce
import matplotlib.pyplot as plt
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.multiclass import unique_labels
from sklearn.model_selection import train_test_split
def dangleWrangle( x):
x = x.copy()
x['latitude'] = x['latitude'].replace( -2e-08, 0)
zerodCols = ['latitude', 'longitude',
'construction_year',
'gps_height','population']
for cols in zerodCols:
x[cols] = x[cols].replace( 0, np.nan)
x[cols + '_MISSING'] = x[cols].isnull()
x['date_recorded'] = pd.to_datetime( x['date_recorded'],
infer_datetime_format= True)
x['year_recorded'] = x['date_recorded'].dt.year
x['month_recorded'] = x['date_recorded'].dt.month
x['day_recorded'] = x['date_recorded'].dt.day
## moved date stratification up so df.drop only needs to be called once
x = x.drop( columns= ['quantity_group', 'payment_type', ## duplicates
'recorded_by', 'id', ## unusable
'date_recorded']) ## superfluous
x['age'] = ( x['year_recorded'] - x['construction_year'] )
x['years_MISSING'] = x['age'].isnull()
return x
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
target = 'status_group'
train, validate = train_test_split( train, test_size= .5,
stratify= train['status_group'],
random_state= 16)
train = dangleWrangle( train) ## Defining vars
validate = dangleWrangle( validate)
test = dangleWrangle( test)
xTrain = train.drop( columns= target)
yTrain = train[ target]
xValidate = validate.drop( columns= target)
yValidate = validate[ target]
xTest = test
popeline = make_pipeline( ## Nanananananananana, Popeline!
ce.OrdinalEncoder(),
SimpleImputer( strategy= 'mean'),
RandomForestClassifier( n_estimators= 160, random_state= 16, n_jobs= -1)
)
popeline.fit( xTrain, yTrain)
yPred = popeline.predict( xValidate)
print( 'Validation accuracy: {:.5f}%'.format(
accuracy_score( yValidate, yPred) *100) )
# + id="iONOvE0MaAyN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 422} outputId="4aef2d98-d8e7-45f3-ffe5-7076ea538b25"
def confuMatrixPlot( yTrue, yPred):
labeled = unique_labels( yTrue)
# columned = ['Predicted {}'.format( labeled, for label in labeled)] ## Oops. My way didn't work lol
# indexed = ['Actual {}'.format( labeled, for label in labeled)]
columned = [f'Predicted {label}' for label in labeled]
indexed = [f'Actual {label}' for label in labeled]
tabled = pd.DataFrame( confusion_matrix( yTrue, yPred),
columns= columned, index = indexed)
return sns.heatmap( tabled, annot= True, fmt= 'd', cmap= 'viridis')
confuMatrixPlot( yValidate, yPred);
# + id="bHPIjQjRabzZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="726b7d7e-22d8-4b44-d95b-b838e31c13b9"
print( classification_report( yValidate, yPred) )
# + id="-_-56GgViCa5" colab_type="code" colab={}
yPred = popeline.predict( xTest) ## For export
# + id="T8-Th-_nabw5" colab_type="code" colab={}
from google.colab import files
mySub = sample_submission.copy()
mySub[ 'status_group'] = yPred
mySub.to_csv( 'dsx-2-2-4-kaggle-rselent.csv', index= False)
# + id="ltAhJuOMabtE" colab_type="code" colab={}
# + id="XtJcSQDLabqF" colab_type="code" colab={}
| module4/2_2_4_assignment_kaggle_challenge_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# what is a contrast map? (or image) after we have estimated our model (found the $\beta$s that best fit our input data to response signal) for _each_ voxel. remembering that $\beta$ is a vector and ***we are only interested in relative values (absolute values are meaningless)***. So we make a contrast map by subtracting one $\beta^i$ from another $\beta^j$. We find those voxels that for which the $\beta_v^i - \beta_v^j$ is statistically significant relevant to the RSS - residual squared sum of errors and after error correction for multiple comparisons, or possibly clustering.
#
# ------------
#
#
# what is the difference between a T-contrast and a F-contrast? [mit mindhive response here](http://mindhive.mit.edu/node/60). My synopsis: a t-statistic is the parameter estimate of a nearly gaussian distributed variable divided by its standard error. Remembering that a linear combination of Gaussian variables is also a Gaussian variable, and that contrast maps are just that. So a T-contrast is a t-statistic of a contrast map.
#
# -----------
# ----------
# An F-stastistic is an _OR_ of conditions such as $(A > B) \cup (A > C) \cup (B > C)$. While general enough to account for multiple comparaisons it does not tell us which of these conditions is significant, only that at least one of them is.
# -----------
# reading [nilearn plot decoding tutorial](http://localhost:8888/notebooks/jnotebooks/plot_decoding_tutorial%20(1).ipynb). It would seem that a `mask` is not binary, rather it seems to be a gaussian variable. Mask probably refers to a t-statistic of a voxel over time. but why would these be normalized over time, and
# _______________
# Functions accept either 3D or 4D images, and we need to use on the one hand `nilearn.image.index_img` or `nilearn.image.iter_img` to break down 4D images into 3D images, and on the other hand `nilearn.image.concat_imgs` to group a list of 3D images into a 4D image.
# ## some goals!
#
# - make a function that can make a "significant" signal. so it would have to be stimulus aware. Said function needs to take a parameter that would let us vary the noise to signal.
# why don't we just make a signal and 10 kinds of noise, and dot them with some weighting vector.
# Except, we want the matrix to be brain aware. so it produces the right signal in the right place.
#
# - how do we make the right signal happen at the right place? ah, there's the rub.. easy, we look at a billion studies and we grap their results. but here's the thing, the brain is, ideally, this GLM of signals. that when we stimulate it the correct way, it sends us a coded message by lighting up in different places. Whence the term: _decoding_. of course the message is ambiguous to us, so we compare its responses with other responses hoping to be able to figure what's going on.
#
# - back to locations and signals. location space is easy-ish 30 * 64 * 64 = 122880 voxels, but we can group them somehow. or have seeds that grow somehow. but here's the thing what is our "signal space" $SS$ and what is our "location space" $LS$. location space would be any subset of voxels or super-voxels.
# do we simply make up an arbitrary point in $LS$ and assign it a signal? -
# other goodies might be time lag, recursive things (eg associations) eg. that may be a house to you buts its extreme wealth to me. (which is why we do contrast maps, right?). so in general, individual differences.
#
# - we might want to use have options for different stim signals as well. that's the other thing, when using different stim signals,
#
#
# - we might want to make not only BOLD spatially dependent but also some kinds of noise
#
# - so what
#
#
#
#
# %matplotlib inline
import math
import numpy as np
# +
# let's make a signal
def signal( stim_a):
pass
def gammaHRF(k,tau_h,t):
return 1/(k*tau_h*math.factorial(k-1) * (t/tau_h)**k * np.exp(- t / tau_h))
# -
def stim(totaltime, onsets, durations, accuracy=1):
"""
Parameters:
totaltime: total length in seconds of stimulus signal
onsets: array of times when stimulus starts
durations: length of time stimulus occurs after each onset
accuracy: time resolution
"""
assert onsets[-1] + durations[-1] <= totaltime
s = np.zeros(int(totaltime // accuracy)) # dtype?
os = onsets / accuracy
dur = durations / accuracy
# if there is only one duration we assume it generalizes to all durations
# todo: generalize to tuples
if len(durations) == 1:
dur = dur * np.ones(len(onsets))
assert len(durations) == len(onsets)
offsets = os + dur
for i in range(len(os)):
start = int(os[i])
stop = min(int(offsets[i]), totaltime // accuracy )
s[ start : stop ] = 1
# print(s)
return s
plt.plot( stim(80, np.arange(5)*5, np.ones(5)*2, 0.5))
plt.plot( stim(80, np.arange(5)*5, np.ones(5)*2, 1))
(3 // 0.3)
xs = np.linspace(0,200,200)
k = 3
fwhm = 1
tau_h = 0.242 * fwhm
plt.plot(gammaHRF(k,tau_h, xs))
xs
signal([1,0,0,1])
# # make a testing-led suite
# eg. make some tests, and then pass them. If you don't know what tests to write, that's probably the problem...
# already in docker image. cool.
import pytest
# ## Test1 - Communication interface
#
# The self-identifying _unqualified_ participant in any research project faces the challenge of seeking information from the the _qualified_ participants while not becoming more of a burden than an assistant. A high-reward but high-risk strategy is to reduce communication to a minimum, and thus the burden, while attempting to solve problems autonomously. The strategy is freqently doomed as the new-comer often does not have the theoretical background to completely understand the requests that are made of them.
#
import nilearn
import niwidgets
from nilearn._utils.testing import generate_fake_fmri
im_1,mask_1 = generate_fake_fmri()
import matplotlib.pyplot as plt
data = im_1.get_data()
dir(mask_1)
mask_1.shape
im_1.shape
# > why is there a shape file with the fmri??
#
# it's there to reduce computation time, and improve chances of getting a result: Bonferoni correction requires that you divide your signal by the the number of voxels - so you want to choose the right voxels to get the right contrasts...(i think)
plt.imshow(mask_1.get_data()[:,:,5], cmap='Greys_r')
plt.show()
plt.imshow(data[:,:,5,0], cmap='Greys_r')
plt.show()
plt.plot(data[:,4,5,0])
plt.show()
# voxel 4 4 5 over time
plt.plot(data[4,4,5,:])
plt.show()
# total energy over time
plt.plot( data.mean(axis=(0,1,2)))
plt.show()
# total energy over time
plt.plot( data.mean(axis=(0,1,2)))
plt.show()
voxel
| notebooks/simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: vrb-know
# language: python
# name: vrb-know
# ---
# +
# Data Source
# https://www.kaggle.com/lislejoem/us-minimum-wage-by-state-from-1968-to-2017?select=Minimum+Wage+Data.csv
# -
import pandas as pd
# +
data_path = 'Minimum Wage Data.csv'
min_wage = pd.read_csv(data_path, encoding="ISO-8859-1")
print(min_wage.shape)
min_wage.head()
# -
min_wage_2020 = min_wage[min_wage['Year'] == 2020]
min_wage_2020.sort_values(by=['State.Minimum.Wage'], ascending=False).head()
list(min_wage_2020['State.Minimum.Wage.2020.Dollars']) == list(min_wage_2020['State.Minimum.Wage'])
min_wage_2020 = min_wage_2020.drop(['Year',
'State.Minimum.Wage.2020.Dollars',
'Federal.Minimum.Wage.2020.Dollars',
'Effective.Minimum.Wage.2020.Dollars'], axis=1)
min_wage_2020 = min_wage_2020[['State',
'State.Minimum.Wage',
'Federal.Minimum.Wage',
'Effective.Minimum.Wage',
'CPI.Average'
]]
min_wage_2020.head()
min_wage_2020.to_csv('cols_infra_min_wage.csv')
| data/infrastructure_cost/min_wage_data_explore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Generalized Linear Mixed Models
# ===============================
#
# The UCBadmit data is sourced from the study [1] of gender biased in graduate admissions at
# UC Berkeley in Fall 1973:
#
# .. table:: UCBadmit dataset
# :align: center
#
# ====== ====== ============== =======
# dept male applications admit
# ====== ====== ============== =======
# 0 1 825 512
# 0 0 108 89
# 1 1 560 353
# 1 0 25 17
# 2 1 325 120
# 2 0 593 202
# 3 1 417 138
# 3 0 375 131
# 4 1 191 53
# 4 0 393 94
# 5 1 373 22
# 5 0 341 24
# ====== ====== ============== =======
#
# This example replicates the multilevel model `m_glmm5` at [3], which is used to evaluate whether
# the data contain evidence of gender biased in admissions accross departments. This is a form of
# Generalized Linear Mixed Models for binomial regression problem, which models
#
# - varying intercepts accross departments,
# - varying slopes (or the effects of being male) accross departments,
# - correlation between intercepts and slopes,
#
# and uses non-centered parameterization (or whitening).
#
# A more comprehensive explanation for binomial regression and non-centered parameterization can be
# found in Chapter 10 (Counting and Classification) and Chapter 13 (Adventures in Covariance) of [2].
#
# **References:**
#
# 1. <NAME>., <NAME>., and <NAME>. (1975), "Sex Bias in Graduate Admissions:
# Data from Berkeley", Science, 187(4175), 398-404.
# 2. <NAME>. (2018), "Statistical Rethinking: A Bayesian Course with Examples in R and Stan",
# Chapman and Hall/CRC.
# 3. https://github.com/rmcelreath/rethinking/tree/Experimental#multilevel-model-formulas
#
# +
import argparse
import os
import matplotlib.pyplot as plt
import numpy as onp
from jax import random
import jax.numpy as np
from jax.scipy.special import expit
import numpyro
import numpyro.distributions as dist
from numpyro.examples.datasets import UCBADMIT, load_dataset
from numpyro.infer import MCMC, NUTS, Predictive
def glmm(dept, male, applications, admit=None):
v_mu = numpyro.sample('v_mu', dist.Normal(0, np.array([4., 1.])))
sigma = numpyro.sample('sigma', dist.HalfNormal(np.ones(2)))
L_Rho = numpyro.sample('L_Rho', dist.LKJCholesky(2, concentration=2))
scale_tril = sigma[..., np.newaxis] * L_Rho
# non-centered parameterization
num_dept = len(onp.unique(dept))
z = numpyro.sample('z', dist.Normal(np.zeros((num_dept, 2)), 1))
v = np.dot(scale_tril, z.T).T
logits = v_mu[0] + v[dept, 0] + (v_mu[1] + v[dept, 1]) * male
if admit is None:
# we use a Delta site to record probs for predictive distribution
probs = expit(logits)
numpyro.sample('probs', dist.Delta(probs), obs=probs)
numpyro.sample('admit', dist.Binomial(applications, logits=logits), obs=admit)
def run_inference(dept, male, applications, admit, rng_key, args):
kernel = NUTS(glmm)
mcmc = MCMC(kernel, args.num_warmup, args.num_samples, args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True)
mcmc.run(rng_key, dept, male, applications, admit)
return mcmc.get_samples()
def print_results(header, preds, dept, male, probs):
columns = ['Dept', 'Male', 'ActualProb', 'Pred(p25)', 'Pred(p50)', 'Pred(p75)']
header_format = '{:>10} {:>10} {:>10} {:>10} {:>10} {:>10}'
row_format = '{:>10.0f} {:>10.0f} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'
quantiles = onp.quantile(preds, [0.25, 0.5, 0.75], axis=0)
print('\n', header, '\n')
print(header_format.format(*columns))
for i in range(len(dept)):
print(row_format.format(dept[i], male[i], probs[i], *quantiles[:, i]), '\n')
def main(args):
_, fetch_train = load_dataset(UCBADMIT, split='train', shuffle=False)
dept, male, applications, admit = fetch_train()
rng_key, rng_key_predict = random.split(random.PRNGKey(1))
zs = run_inference(dept, male, applications, admit, rng_key, args)
pred_probs = Predictive(glmm, zs)(rng_key_predict, dept, male, applications)['probs']
header = '=' * 30 + 'glmm - TRAIN' + '=' * 30
print_results(header, pred_probs, dept, male, admit / applications)
# make plots
fig, ax = plt.subplots(1, 1)
ax.plot(range(1, 13), admit / applications, "o", ms=7, label="actual rate")
ax.errorbar(range(1, 13), np.mean(pred_probs, 0), np.std(pred_probs, 0),
fmt="o", c="k", mfc="none", ms=7, elinewidth=1, label=r"mean $\pm$ std")
ax.plot(range(1, 13), np.percentile(pred_probs, 5, 0), "k+")
ax.plot(range(1, 13), np.percentile(pred_probs, 95, 0), "k+")
ax.set(xlabel="cases", ylabel="admit rate", title="Posterior Predictive Check with 90% CI")
ax.legend()
plt.savefig("ucbadmit_plot.pdf")
plt.tight_layout()
if __name__ == '__main__':
assert numpyro.__version__.startswith('0.2.4')
parser = argparse.ArgumentParser(description='UCBadmit gender discrimination using HMC')
parser.add_argument('-n', '--num-samples', nargs='?', default=2000, type=int)
parser.add_argument('--num-warmup', nargs='?', default=500, type=int)
parser.add_argument('--num-chains', nargs='?', default=1, type=int)
parser.add_argument('--device', default='cpu', type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
| numpyro/_downloads/e9427083d2450dd3ada73ea1d9d694f0/ucbadmit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ss-shankar/ml-classification-ottoProduct/blob/main/ottoproduct.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="qmAuMs8S787I"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + id="7DxhtfyuBVob"
df = pd.read_csv('https://raw.githubusercontent.com/ss-shankar/ml-otto_product-classification/main/Data/train.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 253} id="Q1oB2AfABbb1" outputId="e88dd9f1-a9ad-4c09-dba7-c5142f31a4f3"
df.head()
# + id="xibZ32iFBmlB" colab={"base_uri": "https://localhost:8080/"} outputId="9e339a14-272a-4945-bd01-ce9082f7924a"
df.info()
# + colab={"base_uri": "https://localhost:8080/"} id="40bDXMUGEyEl" outputId="b313ae0f-47ea-4ad6-c1ab-929305601a4b"
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 346} id="KEKsm0n4EzsZ" outputId="2fa3e825-6df9-4344-bfaf-1baf5ff07a5e"
df.describe()
# + [markdown] id="qiGRRuHGFBXM"
# **Observation:**
#
#
# * We can drop id column
# * We need to encode targer variable
#
#
# + id="CfeWGYk4E3_y"
df = df.drop(['id'], axis=1)
# + id="hBxNO0B4FU4w"
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['target'] = le.fit_transform(df['target'])
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="vTq8WwN6Frj7" outputId="e0860822-8a0c-4841-e6be-3120d57e713a"
sns.countplot(df['target'])
# + [markdown] id="xgckrHQ5F5i4"
# **Observation:**
#
#
# * We can observe that classes 1,5 and 7 dominates
#
#
# + id="lJDx_TLeF0My"
y = df['target']
x = df.drop(['target'], axis=1)
# + id="-3ZF0UqCGdqf"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0, test_size=0.20)
# + id="JkgrX1vaG0sw"
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr=LogisticRegression(max_iter=100000)
lr.fit(x_train,y_train)
pred_1=lr.predict(x_test)
score_1=accuracy_score(y_test,pred_1)
# + colab={"base_uri": "https://localhost:8080/"} id="LGiQa1bqHgPu" outputId="4e702f2e-f695-4181-8938-6751b58bd9f3"
score_1
# + id="Ukefc2WBH3NN"
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier()
rfc.fit(x_train,y_train)
pred_2=rfc.predict(x_test)
score_2=accuracy_score(y_test,pred_2)
# + colab={"base_uri": "https://localhost:8080/"} id="QevsgzGJH_Xx" outputId="80971aed-e857-48ae-a7a1-7c611e06b682"
score_2
# + id="0h5Uu-pGIE7W"
from sklearn.svm import SVC
svm=SVC()
svm.fit(x_train,y_train)
pred_3=svm.predict(x_test)
score_3=accuracy_score(y_test,pred_3)
# + colab={"base_uri": "https://localhost:8080/"} id="AeoeF_BTINRr" outputId="d75b3a98-be4b-49bf-9b40-911d1a3a9035"
score_3
# + id="UmQ-14bIJ2vo"
from xgboost import XGBClassifier
xgb=XGBClassifier()
xgb.fit(x_train,y_train)
pred_4=xgb.predict(x_test)
score_4=accuracy_score(y_test,pred_4)
# + colab={"base_uri": "https://localhost:8080/"} id="IEZZqNrWJ4v3" outputId="f5f0f8bd-b237-4177-efdb-6163db9c3734"
score_4
# + id="jR6Rf8vXLelp"
| ottoproduct.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# p - populacija
# SI - swarm intelligence
# PSO - particle swarm optimization
# swarm = [Particle()]
# globalno
# while not stop_condition:
# for svaka cestica:
# r_l, r_g ~ U(0,1)
# izracunaj novu brzinu: v = c_i*inercija + r_l*c_l*lokalno + r_g*c_g*globalno
# pomeri cesticu: p = p + v
# usput azuriraj ako treba lokalni i globalni minimum
# resenje je: globalni minimum
# -
import numpy as np
import random
import matplotlib.pyplot as plt
def rastrigin(x):
A = 10
return A*2 + sum([x_i**2 - A * np.cos(2*np.pi*x_i) for x_i in x])
def rosenbrock(x):
a = 1
b = 100
return (a - x[0])**2 + b*(x[1] - x[0]**2)**2
bounds = [(-5.12, 5.12), (-5.12, 5.12)]
class Particle:
def __init__(self, objective, bounds, w=0.75, c1=1, c2=2):
self.position = [random.uniform(bounds[i][0], bounds[i][1]) for i in range(len(bounds))]
self.velocity = [random.uniform(-(bounds[i][1] - bounds[i][0]),
bounds[i][1] - bounds[i][0]) for i in range(len(bounds))]
self.bestPosition = list(self.position)
self.currentValue = objective(self.position)
self.bestValue = self.currentValue
self.objective = objective
self.bounds = bounds
self.w = w
self.c1 = c2
self.c2 = c2
def updatePosition(self, globalBestPosition, globalBestValue):
for i in range(len(self.position)):
self.position[i] += self.velocity[i]
if self.position[i] > self.bounds[i][1]:
self.position[i] = self.bounds[i][1]
elif self.position[i] < self.bounds[i][0]:
self.position[i] = self.bounds[i][0]
self.currentValue = self.objective(self.position)
if self.currentValue < self.bestValue:
self.bestValue = self.currentValue
self.bestPosition = list(self.position)
if self.currentValue < globalBestValue:
globalBestValue = self.currentValue
globalBestPosition = list(self.position)
return globalBestPosition, globalBestValue
def updateVelocity(self, globalBestPosition):
for i in range(len(self.velocity)):
r1 = random.random()
r2 = random.random()
cognitive_velocity = r1 * self.c1 * (self.bestPosition[i] - self.position[i])
social_velocity = r2 * self.c2 * (globalBestPosition[i] - self.position[i])
self.velocity[i] = self.w * self.velocity[i] + cognitive_velocity + social_velocity
# +
SWARM_SIZE = 50
MAX_ITER = 300
swarm = [Particle(rosenbrock, bounds) for _ in range(SWARM_SIZE)]
globalBestPosition = list(swarm[0].position)
globalBestValue = swarm[0].currentValue
for particle in swarm:
if particle.currentValue < globalBestValue:
globalBestValue = particle.currentValue
globalBestPosition = list(particle.position)
bests = []
for i in range(MAX_ITER):
for j in range(len(swarm)):
swarm[j].updateVelocity(globalBestPosition)
globalBestPosition, globalBestValue = swarm[j].updatePosition(globalBestPosition, globalBestValue)
bests.append(globalBestValue)
print('Solution: {}, value: {}'.format(globalBestPosition, globalBestValue))
plt.plot(range(MAX_ITER), bests)
# -
| 2020_2021/live/08_particle_swarm_optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/madhurpatle/MalwareDetection/blob/main/Maincnn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="JfMJo2TJlYIp" outputId="789c09cb-41e2-404b-a7da-6883037638bb"
from google.colab import drive
drive.mount('/gdrive')
# + id="abg-EstQlkvV"
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.utils import to_categorical
from keras.preprocessing import image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tqdm import tqdm
# + id="6kPaHq98rESe"
#Not to Be executed!!!!!
import subprocess as sbp
import os
path=input('Please enter a path\n')
fol = os.listdir(path)
p2 = input('Please enter a path\n')
for i in fol:
p1 = os.path.join(path,i)
p3 = 'cp -r ' + p1 +' ' + p2+'/.'
sbp.Popen(p3,shell=True)
# + id="t0j5gYgLz2cE"
train = pd.read_csv('/gdrive/MyDrive/kaggle/trainLabels.csv')
# + colab={"base_uri": "https://localhost:8080/"} id="xJyspASGmKOv" outputId="0cf159de-bb54-4b98-f781-c91873350604"
# We have grayscale images, so while loading the images we will keep grayscale=True, if you have RGB images, you should set grayscale as False
train_image = []
for i in tqdm(range(train.shape[0])):
img = image.load_img('/gdrive/MyDrive/kaggle/tt/'+train['Id'][i]+'.jpg', target_size=(28,28,1), color_mode='grayscale')
img = image.img_to_array(img)
img = img/255
train_image.append(img)
X = np.array(train_image)
# + id="JINpDucmK6MZ"
y=train['Class'].values
y = to_categorical(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2)
# + id="eTp3dSS3KO1J"
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(28,28,1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# + id="_nU9_kRy1avM"
model.compile(loss='categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="HSZIU8-a1cST" outputId="f14188ca-e194-4781-e47b-70928596038d"
history=model.fit(X_train, y_train, epochs=20, validation_data=(X_test, y_test))
# + colab={"base_uri": "https://localhost:8080/"} id="PvhRkcLuPOmV" outputId="50c31494-8512-4c7f-cb0e-58a7cc8e9217"
print(history.history.keys())
# + id="w8QGDx2fPUDO" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="dfbf2388-800c-4e41-8e6c-71fb3b40d3ee"
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="Pid__0vDPZvV" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="b4082835-f5a9-4d45-9911-688bffbfdc74"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
| Maincnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import pytablewriter
writer = pytablewriter.MarkdownTableWriter()
writer.table_name = "Table formatting for Jupyter Notebook."
writer.header_list = ["int", "float", "str", "bool", "mix", "time"]
writer.value_matrix = [
[0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"],
[2, "-2.23", "foo", False, None, "2017-12-23 12:34:51+0900"],
[3, 0, "bar", "true", "inf", "2017-03-03 22:44:55+0900"],
[-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"],
]
# All of the table writer classes in pytablewriter can be formatting in Jupyter Notebook table.
writer
# -
| test/data/jupyter_notebook_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup as bs
import numpy as np
import pandas as pd
import re, os
# +
path = 'data'
dirs = []
for dirpath, dirnames, filenames in os.walk(path):
dirs.append(dirpath)
dirs = sorted(dirs[1:])
print(dirs)
# -
for filedir in dirs:
for dirpath, dirnames, filenames in os.walk(path):
filelist = [os.path.join(dirpath, f) for f in filenames if f.startswith('file')]
for htmlfile in filelist:
fob = open(htmlfile,'r')
f_text = "".join(fob.readlines())
# f_text = f_text.decode("utf-8").encode('ascii','ignore')
# print("".join(f_text).strip())
print(f_text)
soup = bs(f_text, 'html.parser')
# hotels = soup.findAll(class_="search-snippet-card")
# print(hotels)
break
break
fob = open('data/blr/file-0','r')
f_text = "".join(fob.readlines())
soup = bs(f_text, 'html.parser')
a = [x for x in soup.find_all("div") if x.has_attr("search-snippet-card")]
a[0]
| mishtidoi_parser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="nXxZV5PQfjWF"
# ## AICE_ learning practice
#
#
#
#
# + [markdown] id="DkLDujJifjWG"
# This notebook is a practice and a deep dive
# on the knowldge received I am receiving from AICE training.
# I have done:
# 1. Data cleaning
# 2. EDA( Exploratory Data Analysis)
# 3. Data preprocessing
# 4. Predictive modelling
# + [markdown] id="knVyDE3zfjWH"
# Import all necessary packages for the work.
# + id="OOTS1pWnfjWI"
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas_profiling
from sklearn.preprocessing import StandardScaler
# + id="A1luMDKJfjWJ"
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
# + id="dHZB7iTffjWJ" outputId="ef4d900f-e5e8-4079-9be5-8c53e5243d67"
data = pd.read_csv("bank-additional-full.csv", sep=";")
data.head()
# + id="0Lw5QGk7fjWK" outputId="9a15b0fd-87ab-4fb4-cd03-c0d251f5a06a"
data.y.value_counts()
# + id="WBtVrBnCfjWK" outputId="b103f3ec-1530-45ce-f419-4284c0796a0a"
print("The data has {} rows with {} features/columns".format(data.shape[0], data.shape[1]))
# + id="cBDfX2lpfjWL" outputId="c9d88b00-54f5-4b6b-f6b6-a3c9b1e618ec"
data.info()
# + [markdown] id="IMBdZewQfjWL"
# #### Check if there are missing values in the data
#
# + id="BQEQSDyzfjWM" outputId="64220709-40c6-4f78-95eb-d72cf4fc40bc"
print("The number of missing value(s): {}".format(data.isnull().sum().sum()))
# + id="5QJkXrY9fjWM" outputId="4c93b363-2738-4992-ad6e-cc39ef18c04f"
data.select_dtypes(include=["int64", "float64"]).describe().T
# + [markdown] id="4xSpN98QfjWN"
# From the data, we observe that the mean age is $40$ years with a maximum age of $98$ years and minimum age of $17$ years. The balance is the average yearly balance which is in euros. Try to understand the rest of the data descriptions. Whiles trying to understand the data, you could try to answer these questions:
#
# + [markdown] id="B1uvgdqEfjWN"
# ### Univariate Analysis
# + id="O3avAuevfjWN"
def catplot(x,data):
plot= sns.catplot(x, kind="count", data=data, palette="Set1")
plt.xticks(rotation=45, horizontalalignment='right' )
plt.title("counts"+ " "+ "of" + " "+ " " + x )
return
# + [markdown] id="xk0rfzGdfjWO"
# ### Target
#
# First thing to cross check is to know if the target class is balanced or not.
# + id="5h5BSk9sfjWO" outputId="bac9b13c-6c8e-4993-dc01-fcf202443983"
catplot("y",data=data)
# + [markdown] id="B40EgYDbfjWO"
# There seems to be alot of more clients that have not subscribed to a term deposit.This is certainly a class imbalanced problem.
#
# 1. How does this class imbalance problem impact your model performance?
#
# 2. What can data techniques approaches could be useful?
#
# Here is a link on how to deal with imbalanced datasets
#
# https://towardsdatascience.com/methods-for-dealing-with-imbalanced-data-5b761be45a18
#
#
# + [markdown] id="Uz7c6eemfjWP"
# **Marital Status**
# + id="SIyVNC1LfjWP" outputId="42243fef-86e1-4dca-93fe-fddda621183d"
catplot("marital", data=data)
# + [markdown] id="n9m--zD4fjWP"
# **Education**
# + id="cpgvWBrufjWP" outputId="d77e67e5-9e93-4e4d-8b2f-0f93702b8b48"
print(data['education'].value_counts())
plt.figure(figsize=(10,5))
data['education'].value_counts(normalize=True).plot(kind='bar')
plt.ylabel('counts')
plt.xlabel('education')
# + [markdown] id="52Jg7IvXfjWQ"
# **Job**
# + id="4vJLbJmHfjWQ" outputId="c84077f7-8036-4ccc-f8a4-ec65a3e2e2b2"
print(data['job'].value_counts())
plt.figure(figsize=(10,5))
data['job'].value_counts(normalize=True).plot(kind='bar')
plt.ylabel('counts')
plt.xlabel('job')
# + [markdown] id="Qe_VH8wufjWR"
# **Age Distribution**
# + id="I4s_f1THfjWR" outputId="85c52a1e-2e7a-4345-9d32-636971020bf1"
plt.figure(figsize=(8, 6))
data.age.hist( color="orange")
plt.xlabel('Ages')
plt.title("Age Distribution")
# + [markdown] id="NmBButS-fjWR"
# ### Bivariate Analysis
# + id="Wn9WU8pQfjWS"
def boxplot(x, y, data=data, hue= "y"):
plot = sns.boxplot(x= x, y=y, hue=hue, data= data)
plt.xticks( rotation=45, horizontalalignment='right' )
plt.title("Boxplot of " + " " + x.upper() + " " + "and "+ " " + y.upper())
return plot
# + [markdown] id="YbXZ-vAAfjWS"
# **Marital vs. age and target**
# + id="akaNANfOfjWS" outputId="102e17ac-fb42-4e62-f53d-c29d62356fb7"
boxplot("marital", "age", data=data, hue= "y")
# + [markdown] id="g7c7sLUVfjWS"
# **Education vs. age and target**
# + id="dgsp47IlfjWT" outputId="f5e4f9e4-ad51-4953-e7cc-6aee93efed51"
boxplot("education", "age", data=data, hue= "y")
# + [markdown] id="N4sKsHnifjWT"
# **Correlation**
# + id="IAMQZX91fjWT" outputId="98146e29-9408-4f88-c8aa-74665734c8fa"
data.corr()
# + [markdown] id="zgSLcFatfjWU"
# ### Data Preprocessing
# + [markdown] id="z6c7zHp4fjWU"
# **Label Encoding for all Categorical Data**
# + id="0ZBXeez3fjWU"
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
# + id="Hw_ESVKrfjWU" outputId="c3f68c6a-979b-4755-973b-b4043be58650"
data.poutcome.value_counts()
# + id="uQfdqp1RfjWV" outputId="b35de8eb-b9ae-4c39-d549-296009bc770e"
data.dtypes
# + id="UX_kV33IfjWV"
data["job"] = encoder.fit_transform(data["job"])
data['education'] = encoder.fit_transform(data["education"])
data["marital"] = encoder.fit_transform(data["marital"])
data['default'] = encoder.fit_transform(data["default"])
data['housing'] = encoder.fit_transform(data["housing"])
data['loan'] = encoder.fit_transform(data["loan"])
data['contact'] = encoder.fit_transform(data["contact"])
data['month'] = encoder.fit_transform(data["month"])
data['poutcome'] = encoder.fit_transform(data["poutcome"])
data['day_of_week'] = encoder.fit_transform(data["day_of_week"])
data['y'] = encoder.fit_transform(data["y"])
# + id="vVL4ckE8fjWW"
num_cols = ['emp.var.rate',"pdays","age", 'cons.price.idx','cons.conf.idx', 'euribor3m', 'nr.employed']
# + id="_P_Xp0rkfjWW"
scaler = StandardScaler()
data[num_cols] = scaler.fit_transform(data[num_cols])
# + id="osH-eUUzfjWW" outputId="4a721481-9927-4994-e834-3827b230fadd"
data
# + id="y6GksT0VfjWX" outputId="b7520fc5-3934-40a9-80ea-edf80c8790ca"
X = data.drop(columns=[ "y"])
y = data["y"]
print(X.shape)
print(y.shape)
# + [markdown] id="qD6x2dl2fjWX"
# ### Split Data using sklearn train test split
# + id="4Bum8lIBfjWX" outputId="6632c4f4-c015-45e7-f4af-86e830507642"
from sklearn.model_selection import train_test_split,cross_val_score
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2,random_state=1)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# + [markdown] id="DDA9MJ_PfjWX"
# ### Machine Learning Model
# + [markdown] id="951f5bJDfjWY"
# **1.Logistic Regression**
# + id="2MjMus-mfjWY" outputId="c836512c-bafb-41bb-9492-4218aebbcc6b"
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
model_logreg = logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test)))
# + [markdown] id="fhYLiA2dfjWY"
# **Confusion Matrix**
# + id="kS87bgXufjWZ"
from sklearn.metrics import accuracy_score,confusion_matrix,recall_score,precision_recall_curve, f1_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
confusion_matrix = confusion_matrix(y_test, y_pred)
# + id="dA41XA8efjWZ" outputId="d69dacb9-41eb-46fc-bc40-7b94d40d9a58"
confusion_matrix
# + [markdown] id="GQoJ9bOVfjWZ"
# **Intepretating Confusion Matrix**
#
#
# | | Predicted: No | Predicted Yes |
# | --- | --- | --- |
# | Actual No |TN: 7069 | FP: 225 |
# | | | |
# | Actual Yes |FN: 553 | TP: 391 |
#
# 1. Correct predictions: 7837+220 = 7069
#
# 2. Wrong Predictions: 553+ 225 = 778
# + [markdown] id="0yldapVWfjWa"
# **Classification Report**
# + id="6oMbu8sbfjWa" outputId="a25f8bb8-8a57-4a3c-d031-ce7ba01f1aed"
print(classification_report(y_test, y_pred))
# + [markdown] id="pcvImRO5fjWa"
# **ROC Curve:**
#
# The ROC curve shows the trade-off between sensitivity (or TPR) and specificity (1 – FPR). Classifiers that give curves closer to the top-left corner indicate a better performance
# + id="0-9_ENwafjWi" outputId="1f25bfc0-4641-4704-83c6-e7bb753e06bf"
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
# + [markdown] id="vupQiFUmfjWj"
# **2. Decision Trees**
# + id="Ld2VphuBfjWj"
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
# + id="qiYf9x5hfjWj" outputId="57ded753-a06b-47b5-8156-4718df65a1b9"
clf_decision = DecisionTreeClassifier(criterion="entropy", max_depth=3)
clf_decision = clf_decision.fit(X_train, y_train)
y_pred = clf_decision.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
# + [markdown] id="ENRwcLjlfjWk"
# **Important Features**
# + id="agUQWtvufjWk" outputId="e926dce7-40b5-4956-8568-c9dcc383e66b"
import_decision= pd.DataFrame({'feature': list(X_train.columns),
'importance': clf_decision.feature_importances_}).\
sort_values('importance', ascending = False)
import_decision.head(4)
# + [markdown] id="WbMYWWVbfjWk"
# **Tree Plotting**
# + id="VY-9-rd5fjWk"
from sklearn.tree import export_graphviz
from io import StringIO
import pydotplus
from IPython.display import Image
# + id="SiCMdhOffjWl" outputId="0971fe83-19c1-416e-b7ea-631ea8ae9742"
feature_cols = ['age', 'job', 'marital', 'education', 'default', 'housing', 'loan',
'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays',
'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx',
'cons.conf.idx', 'euribor3m', 'nr.employed']
dot_data = StringIO()
export_graphviz(clf_decision, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = feature_cols,class_names=['Yes','No'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('term_deposit.png')
Image(graph.create_png())
| DS_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# ## Amazon SageMaker Data-Processing & Training Job
#
# With Amazon SageMaker, you can leverage a simplified, managed experience to run data pre- or post-processing and model evaluation workloads on the Amazon SageMaker platform.
#
# A processing job downloads input from Amazon Simple Storage Service (Amazon S3), then uploads outputs to Amazon S3 during or after the processing job.
#
# This notebook shows how you can:
#
# 1. Run a processing job to run a scikit-learn script that cleans, pre-processes, performs feature engineering, and splits the input data into train and test sets.
# 2. Run a training job on the pre-processed training data to train a model
# 3. Predict on the trained model
#
# The dataset used here is the [Census-Income KDD Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29). You select features from this dataset, clean the data, and turn the data into features that the training algorithm can use to train a binary classification model, and split the data into train and test sets. The task is to predict whether rows representing census responders have an income greater than `$50,000`, or less than `$50,000` by training a logistic regression model.
# ## Mounting the EFS filesystem
# +
import boto3
client = boto3.client('efs')
# -
ip_addr = client.describe_mount_targets(FileSystemId='fs-7b1a6df8')['MountTargets'][0]['IpAddress']
ip_addr
# + language="sh"
# mkdir efs
# sudo mount -t nfs \
# -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 \
# 10.0.2.136:/ \
# ./efs
#
# sudo chmod go+rw ./efs
# -
# ## Data pre-processing and feature engineering
# To run the scikit-learn preprocessing script as a processing job, create a `SKLearnProcessor`, which lets you run scripts inside of processing jobs using the scikit-learn image provided.
# !jupyter kernelspec list
# !pip install pandas==0.25.3
# !pip install scikit-learn==0.21.3
# +
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.sklearn.processing import SKLearnProcessor
region = boto3.session.Session().region_name
role = get_execution_role()
sklearn_processor = SKLearnProcessor(framework_version='0.20.0',
role=role,
instance_type='ml.m5.xlarge',
instance_count=1)
# -
# Before introducing the script you use for data cleaning, pre-processing, and feature engineering, inspect the first 20 rows of the dataset. The target is predicting the `income` category. The features from the dataset you select are `age`, `education`, `major industry code`, `class of worker`, `num persons worked for employer`, `capital gains`, `capital losses`, and `dividends from stocks`.
# +
import pandas as pd
input_data = 's3://sagemaker-sample-data-{}/processing/census/census-income.csv'.format(region)
df = pd.read_csv(input_data, nrows=10)
df.head(n=10)
# -
# This notebook cell writes a file `preprocessing.py`, which contains the pre-processing script. You can update the script, and rerun this cell to overwrite `preprocessing.py`. You run this as a processing job in the next cell. In this script, you
#
# * Remove duplicates and rows with conflicting data
# * transform the target `income` column into a column containing two labels.
# * transform the `age` and `num persons worked for employer` numerical columns into categorical features by binning them
# * scale the continuous `capital gains`, `capital losses`, and `dividends from stocks` so they're suitable for training
# * encode the `education`, `major industry code`, `class of worker` so they're suitable for training
# * split the data into training and test datasets, and saves the training features and labels and test features and labels.
#
# Our training script will use the pre-processed training features and labels to train a model, and our model evaluation script will use the trained model and pre-processed test features and labels to evaluate the model.
# +
# %%writefile preprocessing.py
import argparse
import os
import warnings
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelBinarizer, KBinsDiscretizer
from sklearn.preprocessing import PolynomialFeatures
from sklearn.compose import make_column_transformer
from pickle import dump
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
columns = ['age', 'education', 'major industry code', 'class of worker', 'num persons worked for employer',
'capital gains', 'capital losses', 'dividends from stocks', 'income']
class_labels = [' - 50000.', ' 50000+.']
def print_shape(df):
negative_examples, positive_examples = np.bincount(df['income'])
print('Data shape: {}, {} positive examples, {} negative examples'.format(df.shape, positive_examples, negative_examples))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-test-split-ratio', type=float, default=0.3)
args, _ = parser.parse_known_args()
print('Received arguments {}'.format(args))
input_data_path = os.path.join('/opt/ml/processing/input', 'census-income.csv')
print('Reading input data from {}'.format(input_data_path))
df = pd.read_csv(input_data_path)
df = pd.DataFrame(data=df, columns=columns)
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
df.replace(class_labels, [0, 1], inplace=True)
negative_examples, positive_examples = np.bincount(df['income'])
print('Data after cleaning: {}, {} positive examples, {} negative examples'.format(df.shape, positive_examples, negative_examples))
split_ratio = args.train_test_split_ratio
print('Splitting data into train and test sets with ratio {}'.format(split_ratio))
X_train, X_test, y_train, y_test = train_test_split(df.drop('income', axis=1), df['income'], test_size=split_ratio, random_state=0)
preprocess = make_column_transformer(
(['age', 'num persons worked for employer'], KBinsDiscretizer(encode='onehot-dense', n_bins=10)),
(['capital gains', 'capital losses', 'dividends from stocks'], StandardScaler()),
(['education', 'major industry code', 'class of worker'], OneHotEncoder(sparse=False))
)
print('Running preprocessing and feature engineering transformations')
train_features = preprocess.fit_transform(X_train)
test_features = preprocess.transform(X_test)
print('Train data shape after preprocessing: {}'.format(train_features.shape))
print('Test data shape after preprocessing: {}'.format(test_features.shape))
train_features_output_path = os.path.join('/opt/ml/processing/train', 'train_features.csv')
train_labels_output_path = os.path.join('/opt/ml/processing/train', 'train_labels.csv')
test_features_output_path = os.path.join('/opt/ml/processing/test', 'test_features.csv')
test_labels_output_path = os.path.join('/opt/ml/processing/test', 'test_labels.csv')
print('Saving training features to {}'.format(train_features_output_path))
pd.DataFrame(train_features).to_csv(train_features_output_path, header=False, index=False)
print('Saving test features to {}'.format(test_features_output_path))
pd.DataFrame(test_features).to_csv(test_features_output_path, header=False, index=False)
print('Saving training labels to {}'.format(train_labels_output_path))
y_train.to_csv(train_labels_output_path, header=False, index=False)
print('Saving test labels to {}'.format(test_labels_output_path))
y_test.to_csv(test_labels_output_path, header=False, index=False)
dump(preprocess, open('/opt/ml/processing/processor/preprocessor.pkl', 'wb'))
# -
# Run this script as a processing job. Use the `SKLearnProcessor.run()` method. You give the `run()` method one `ProcessingInput` where the `source` is the census dataset in Amazon S3, and the `destination` is where the script reads this data from, in this case `/opt/ml/processing/input`. These local paths inside the processing container must begin with `/opt/ml/processing/`.
#
# Also give the `run()` method a `ProcessingOutput`, where the `source` is the path the script writes output data to. For outputs, the `destination` defaults to an S3 bucket that the Amazon SageMaker Python SDK creates for you, following the format `s3://sagemaker-<region>-<account_id>/<processing_job_name>/output/<output_name/`. You also give the ProcessingOutputs values for `output_name`, to make it easier to retrieve these output artifacts after the job is run.
#
# The `arguments` parameter in the `run()` method are command-line arguments in our `preprocessing.py` script.
# +
from sagemaker.processing import ProcessingInput, ProcessingOutput
sklearn_processor.run(code='preprocessing.py',
inputs=[ProcessingInput(
source=input_data,
destination='/opt/ml/processing/input')],
outputs=[ProcessingOutput(output_name='train_data',
source='/opt/ml/processing/train'),
ProcessingOutput(output_name='test_data',
source='/opt/ml/processing/test'),
ProcessingOutput(output_name='saved_processor',
source='/opt/ml/processing/processor')],
arguments=['--train-test-split-ratio', '0.2']
)
preprocessing_job_description = sklearn_processor.jobs[-1].describe()
output_config = preprocessing_job_description['ProcessingOutputConfig']
for output in output_config['Outputs']:
if output['OutputName'] == 'train_data':
preprocessed_training_data = output['S3Output']['S3Uri']
if output['OutputName'] == 'test_data':
preprocessed_test_data = output['S3Output']['S3Uri']
if output['OutputName'] == 'saved_processor':
preprocessor = output['S3Output']['S3Uri']
# -
preprocessor
# + language="sh"
# mkdir efs/ml/sagemaker_model
# os.system("aws s3 cp {} efs/ml/sagemaker_model".format(preprocessor))
#
# -
# Now inspect the output of the pre-processing job, which consists of the processed features.
training_features = pd.read_csv(preprocessed_training_data + '/train_features.csv', nrows=10)
print('Training features shape: {}'.format(training_features.shape))
training_features.head(n=10)
# ## Training using the pre-processed data
#
# We create a `SKLearn` instance, which we will use to run a training job using the training script `train.py`.
# +
from sagemaker.sklearn.estimator import SKLearn
sklearn = SKLearn(
entry_point='train.py',
train_instance_type="ml.m5.xlarge",
role=role)
# -
# The training script `train.py` trains a logistic regression model on the training data, and saves the model to the `/opt/ml/model` directory, which Amazon SageMaker tars and uploads into a `model.tar.gz` file into S3 at the end of the training job.
# +
# %%writefile train.py
import os
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
if __name__=="__main__":
training_data_directory = '/opt/ml/input/data/train'
train_features_data = os.path.join(training_data_directory, 'train_features.csv')
train_labels_data = os.path.join(training_data_directory, 'train_labels.csv')
print('Reading input data')
X_train = pd.read_csv(train_features_data, header=None)
y_train = pd.read_csv(train_labels_data, header=None)
model = LogisticRegression(class_weight='balanced', solver='lbfgs')
print('Training LR model')
model.fit(X_train, y_train)
model_output_directory = os.path.join('/opt/ml/model', "model.joblib")
print('Saving model to {}'.format(model_output_directory))
joblib.dump(model, model_output_directory)
# -
# Run the training job using `train.py` on the preprocessed training data.
sklearn.fit({'train': preprocessed_training_data})
training_job_description = sklearn.jobs[-1].describe()
model_data_s3_uri = '{}{}/{}'.format(
training_job_description['OutputDataConfig']['S3OutputPath'],
training_job_description['TrainingJobName'],
'output/model.tar.gz')
model_data_s3_uri
os.system("aws s3 cp {} efs/ml/sagemaker_model".format(model_data_s3_uri))
os.system("tar -xzf efs/ml/sagemaker_model/model.tar.gz --directory efs/ml/sagemaker_model")
# ## Creating Sample Test Data For Inference
os.system('mkdir test_data')
import pandas as pd
df = pd.read_csv('s3://sagemaker-sample-data-us-east-1/processing/census/census-income.csv')
df1 =df.sample(5)
df1['income']
pd.DataFrame(df1).to_csv("test_data/test_data.csv", index=False)
# + language="sh"
#
# aws s3 cp test_data/test_data.csv s3://lambdatestbucket
# -
df2 = pd.read_csv("s3://lambdatestbucket/test_data.csv")
df2
# ## Inference
#
# +
import json
import os
import tarfile
from pickle import load
import pandas as pd
from sklearn.externals import joblib
from sklearn.metrics import classification_report, roc_auc_score, accuracy_score
model_path = os.path.join('efs/ml/sagemaker_model', 'model.joblib')
preprocessor_path = os.path.join('efs/ml/sagemaker_model','preprocessor.pkl' )
preprocessor = load(open(preprocessor_path, 'rb'))
print("Preprocessor Loaded")
print('Loading model')
model = joblib.load(model_path)
# -
columns = ['age', 'education', 'major industry code', 'class of worker', 'num persons worked for employer',
'capital gains', 'capital losses', 'dividends from stocks', 'income']
class_labels = [' - 50000.', ' 50000+.']
print('Loading test input data')
test_data = "test_data/test_data.csv"
df = pd.read_csv(test_data)
df = pd.DataFrame(data=df, columns=columns)
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
df.replace(class_labels, [0, 1], inplace=True)
X_test = df.drop('income', axis=1)
y_test = df['income']
X_test
# +
print('Running preprocessing and feature engineering transformations')
test_features = preprocessor.transform(X_test)
test_features_output_path = os.path.join('test_data', 'test_features.csv')
test_labels_output_path = os.path.join('test_data', 'test_labels.csv')
print('Saving test features to {}'.format(test_features_output_path))
pd.DataFrame(test_features).to_csv(test_features_output_path, header=False, index=False)
print('Saving test labels to {}'.format(test_labels_output_path))
y_test.to_csv(test_labels_output_path, header=False, index=False)
X_test = pd.read_csv(test_features_output_path, header=None)
actual_values = pd.read_csv(test_labels_output_path, header=None)
# -
predictions = model.predict(X_test)
predictions_df = pd.DataFrame(predictions)
predictions_df.replace([0,1], ["Less than 50K", "Greater than 50K"], inplace=True)
actual_values.replace([0,1], ["Less than 50K", "Greater than 50K"], inplace=True)
print("Actual Values:")
actual_values
print("Model Predicitons:")
predictions_df
| SageMaker_EFS_Lambda_Integration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="mUKVrZFTWcav"
'''
<NAME>, <NAME>
MSc student in Artificial Intelligence
@ Alma Mater Studiorum, University of Bologna
March, 2021
'''
# + [markdown] id="I5ZafFAkWfqy"
# ## Import and parameters
# + id="VcKjsZbL8WKi"
# Doc: https://segmentation-models.readthedocs.io/
# !pip install -U segmentation-models
# + id="-T_wmed9Sxgk" colab={"base_uri": "https://localhost:8080/"} outputId="b6c553c3-61e1-4228-abda-4b6eb00ab7be"
# %env SM_FRAMEWORK = tf.keras
# + id="TKV-D10pmvoJ" colab={"base_uri": "https://localhost:8080/"} outputId="684cf712-8dce-4f8e-ff2f-f5517e3497f3"
import os
import random
import numpy as np
import tensorflow as tf
import keras
import segmentation_models
from tqdm import tqdm
from pycocotools.coco import COCO
import pandas as pd
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# + id="TBBuo048QjQN" colab={"base_uri": "https://localhost:8080/"} outputId="f9598e74-b34e-4938-efa6-65ab108edac9"
''' Upload files from the drive '''
from google.colab import drive
drive.mount('/content/drive')
# + id="9ppvMBWgnAdJ"
'''Image parameters'''
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
IMAGE_CHANNELS = 3
CLASSES = 18 # 16 categories + background + other categories
# + [markdown] id="6EIDoBDLwSUZ"
# # Loading data
# + colab={"base_uri": "https://localhost:8080/"} id="Y2wF3L1DnZkw" outputId="4676f317-b233-4095-ce76-1f8ad4a1d471"
'''Loading annotations file for the training set'''
annFile = '/content/drive/MyDrive/deep_learning_project/train/annotations.json'
coco_train = COCO(annFile)
# + id="az9rj11onZkw"
'''Display COCO categories'''
cats = coco_train.loadCats(coco_train.getCatIds())
nms = [cat['name'] for cat in cats]
# + id="KOq3qlCUqYtr" colab={"base_uri": "https://localhost:8080/"} outputId="379bcfcc-38dd-4ecb-d0ae-2e2c818c07be"
'''Getting all categories with respect to their total images and showing the 16 most frequent categories'''
no_images_per_category = {}
for n, i in enumerate(coco_train.getCatIds()):
imgIds = coco_train.getImgIds(catIds=i)
label = nms[n]
no_images_per_category[label] = len(imgIds)
img_info = pd.DataFrame(coco_train.loadImgs(coco_train.getImgIds()))
'''Most frequent categories'''
categories = pd.DataFrame(no_images_per_category.items()).sort_values(1).iloc[::-1][0][:30].tolist()[0:16]
print(categories)
'''Dict with most frequent categories, the ones we chose'''
category_channels = dict(zip(categories, range(1, len(categories) + 1)))
print(category_channels)
# + colab={"base_uri": "https://localhost:8080/"} id="kodNHwphnZkx" outputId="87157376-8219-4d16-b8b7-b751d42b5e10"
'''Extraction of COCO annotations for the selected categories'''
image_directory = '/content/drive/MyDrive/deep_learning_project/train_reduced/images/'
folder_cats = os.listdir(image_directory)
coco_imgs_train = []
for i, folder in tqdm(enumerate(folder_cats), total = len(folder_cats), position = 0, leave = True):
if not folder.startswith('.'):
images_train = os.listdir(image_directory + folder)
for image_name in images_train:
imgId = int(coco_train.getImgIds(imgIds = [image_name.split('.')[0]])[0].lstrip("0"))
coco_imgs_train.append(coco_train.loadImgs([imgId])[0])
TRAINING_SET_SIZE = len(coco_imgs_train)
# + [markdown] id="HwU6CS1SWKqg"
# # Generators
# + id="iLJPdPVbnZky"
'''Creating masks splitted out in channels: each channel corresponds to one category'''
def read_resize_image(coco_img, path):
image = cv2.imread(path + coco_img['file_name'], cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT))
image = np.asarray(image)
return image
def generate_mask(coco_img, coco_annotations):
annIds = coco_annotations.getAnnIds(imgIds=coco_img['id'], iscrowd = None)
anns = coco_annotations.loadAnns(annIds)
mask = np.zeros((coco_img['height'], coco_img['width'], CLASSES), dtype=np.float32)
# Setting all pixels of the background channel to 1
mask[:,:,0] = np.ones((coco_img['height'], coco_img['width']), dtype=np.float32)
for ann in anns:
catName = [cat['name'] for cat in cats if cat['id'] == ann['category_id']][0]
if catName in category_channels:
mask[:,:,category_channels[catName]] = coco_annotations.annToMask(ann)
mask[:,:,0] -= mask[:,:,category_channels[catName]]
else:
mask[:,:,-1] += coco_annotations.annToMask(ann)
mask[:,:,0] -= mask[:,:,-1]
mask[mask < 0] = 0
mask[mask > 1] = 1
mask = (cv2.resize(mask, (IMAGE_WIDTH, IMAGE_HEIGHT)))
return mask
def dataset_generator(coco_imgs, path, coco_annotations, cats, category_channels, dataset_size, batch_size):
batch_features = np.zeros((batch_size, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS))
batch_labels = np.zeros((batch_size, IMAGE_WIDTH, IMAGE_HEIGHT, CLASSES), dtype = np.float64)
c = 0
random.shuffle(coco_imgs)
while True:
for i in range(c, c + batch_size):
coco_img = coco_imgs[i]
batch_features[i - c] = read_resize_image(coco_img, path)
batch_labels[i - c] = generate_mask(coco_img, coco_annotations)
c = c + batch_size
if(c + batch_size >= dataset_size):
c = 0
random.shuffle(coco_imgs)
yield (batch_features, batch_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="mCNvWPDgs0tK" outputId="cf8ca1ed-48f9-4cbd-c20e-3bd13bcafefa"
'''Loading annotations file for the validation set'''
annFile = '/content/drive/MyDrive/deep_learning_project/val/annotations.json'
coco_val = COCO(annFile)
# + colab={"base_uri": "https://localhost:8080/"} id="sUdjPNXws0tM" outputId="ca604794-bc64-4725-9935-8b9695e190be"
'''Extraction of COCO annotations for the selected categories in the validation set'''
image_directory = '/content/drive/MyDrive/deep_learning_project/val/images/'
images_val = os.listdir(image_directory)
coco_imgs_val = []
for i, image in tqdm(enumerate(images_val), total = len(images_val), position = 0, leave = True):
imgId = int(coco_val.getImgIds(imgIds = [image.split('.')[0]])[0].lstrip("0"))
coco_img_val = coco_val.loadImgs([imgId])[0]
annIds = coco_val.getAnnIds(imgIds=coco_img_val['id'], iscrowd = None)
anns = coco_val.loadAnns(annIds)
for ann in anns:
catName = [cat['name'] for cat in cats if cat['id'] == ann['category_id']][0]
if catName in category_channels.keys():
coco_imgs_val.append(coco_val.loadImgs([imgId])[0])
break
VALIDATION_SET_SIZE = len(coco_imgs_val)
# + [markdown] id="N6PtpsUcvl5n"
# # Loss functions and metrics
# + [markdown] id="vE-0G6h2Yqv2"
# ## Smoothed Jaccard distance loss
# + id="NQtH2qq-fhzO"
def jaccard_distance(y_true, y_pred, smooth = 100):
""" Calculates mean of Jaccard distance as a loss function """
intersection = tf.reduce_sum(y_true * y_pred, axis = -1)
union = tf.reduce_sum(y_true + y_pred, axis = -1)
jac = (intersection + smooth) / (union - intersection + smooth)
jd = (1 - jac) * smooth
return tf.reduce_mean(jd)
# + id="jIEI2JFz5aRb"
''' Metrics '''
IoU_metric = segmentation_models.metrics.IOUScore()
F_metric = segmentation_models.metrics.FScore()
''' Losses '''
crossentropy_loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing = 0.5, name = 'categorical_crossentropy')
jaccard_loss = segmentation_models.losses.JaccardLoss()
dice_loss = segmentation_models.losses.DiceLoss()
focal_loss = segmentation_models.losses.CategoricalFocalLoss() # Non ha righe ma la IoU rimane bassa
# Dice + Focal loss
combined_loss = dice_loss + (1 * focal_loss)
# + [markdown] id="wAVWjtxrcNfV"
# # Evaluation
# For a known ground truth mask A, you propose a mask B, then we first compute IoU (Intersection Over Union).
#
# IoU measures the overall overlap between the true region and the proposed region. Then we consider it a **true detection** when there is at least half an overlap, namely when IoU > 0.5
#
# Then we can define the following parameters :
# * Precision (IoU > 0.5);
# * Recall (IoU > 0.5).
#
# The final scoring parameters:
# * AP{IoU > 0.5};
# * AR{IoU > 0.5};
#
# are computed by averaging over all the precision and recall values for all known annotations in the ground truth.
#
# Guide 1: https://www.jeremyjordan.me/evaluating-image-segmentation-models/
#
# Guide 2: https://towardsdatascience.com/metrics-to-evaluate-your-semantic-segmentation-model-6bcb99639aa2
# + id="f8aXJYo-3ukP"
'''Generator initialization'''
path_val = "/content/drive/MyDrive/deep_learning_project/val/images/"
gen_val = dataset_generator(coco_imgs = coco_imgs_val,
path = path_val,
coco_annotations = coco_val,
cats = cats,
category_channels = category_channels,
dataset_size = VALIDATION_SET_SIZE,
batch_size = VALIDATION_SET_SIZE)
model = tf.keras.models.load_model('/content/drive/MyDrive/deep_learning_project/trained_models/[dA]-model_for_segmentation_deep-modella-128-32-jaccard_distance.h5',
custom_objects = {'jaccard_distance': jaccard_distance})
# + id="Luvd_w-G845j"
validation_set = next(gen_val)
images_val_set = validation_set[0] # Images
masks_val_set = validation_set[1] # Masks
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="ZHmCsL64vDmQ" outputId="ebd4c363-0eef-4564-deb0-c190ce0f4df6"
plt.imshow(images_val_set[4].astype(np.uint8))
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="yxJ6-WvS6PIb" outputId="876a57c1-c481-4707-d327-342834a08692"
predictions = model.predict(images_val_set, verbose = 1)
# + id="JmLSWxgnvSai"
def show_masks_threshold(prediction):
labels = list(category_channels.keys())
labels.insert(0, "background")
labels.append("other")
prediction_threshold = prediction.copy()
prediction_threshold[prediction_threshold >= 0.4] = 1.
prediction_threshold[prediction_threshold < 0.4] = 0.
for i in range(CLASSES):
if np.max(prediction_threshold[:,:,i]) != 0:
plt.imshow(prediction_threshold[:,:,i])
plt.title(labels[i])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 809} id="HQx6S2HI7d_r" outputId="c0f507a1-4783-4569-a2a6-78f7f0813fc2"
'''Showing the predicted masks'''
show_masks_threshold(predictions[4,:,:,:])
# + id="7-KPpveIwxLb"
def show_mask_overlapping(prediction):
labels = list(category_channels.keys())
labels.insert(0, "background")
labels.append("other")
prediction_threshold = prediction.copy()
prediction_threshold[prediction_threshold >= 0.4] = 1.
prediction_threshold[prediction_threshold < 0.4] = 0.
mask_plot = np.zeros((IMAGE_WIDTH, IMAGE_HEIGHT), dtype = np.float32)
'''Preparing the mask with overlapping'''
for i in range(CLASSES):
prediction_threshold[:,:,i] = prediction_threshold[:,:,i] * i
mask_plot += prediction_threshold[:,:,i]
mask_plot[mask_plot >= i] = i
values = np.array(np.unique(mask_plot), dtype=np.uint8)
plt.figure(figsize=(8,4))
im = plt.imshow(mask_plot, interpolation='none')
colors = [ im.cmap(im.norm(value)) for value in range(len(labels))]
patches = [ mpatches.Patch(color=colors[i], label=labels[i] ) for i in values ]
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
plt.axis('off')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 248} id="9Oj3p30YxrrP" outputId="87de5181-6efa-48f4-8ee7-b33bd8bcfb91"
show_mask_overlapping(predictions[4,:,:,:])
# + colab={"base_uri": "https://localhost:8080/"} id="3n-3rE6zBncC" outputId="248ccdeb-aaa1-4824-d8cd-a6083f92a146"
'''Computing the IoU, Recall and Precision metrics'''
def compute_results(mean_iou, recall, precision, mask, prediction):
mean_iou.update_state(mask, prediction)
recall.update_state(mask, prediction)
precision.update_state(mask, prediction)
return recall.result().numpy(), precision.result().numpy(), mean_iou.result().numpy()
mean_iou = tf.keras.metrics.MeanIoU(num_classes = 17)
recall = tf.keras.metrics.Recall()
precision = tf.keras.metrics.Precision()
mean_iou_results = []
recall_results = []
precision_results = []
threshold = 0.5
for i in range(VALIDATION_SET_SIZE):
mask = masks_val_set[i,:,:,:-1]
prediction = predictions[i,:,:,:-1]
recall_res, precision_res, mean_iou_res = compute_results(mean_iou, recall, precision, mask, prediction)
mean_iou_results.append(mean_iou_res)
mean_iou.reset_states()
if mean_iou_res >= threshold:
precision_results.append(precision_res)
precision.reset_states()
recall_results.append(recall_res)
recall.reset_states()
print('Mean precision: {}.'.format(np.average(precision_results)))
print('Mean recall: {}.'.format(np.average(recall_results)))
print('Calculated on {} samples, over {} total samples, that passed the IoU test.'.format(len(np.asarray(mean_iou_results)[np.asarray(mean_iou_results) >= threshold]), VALIDATION_SET_SIZE))
print(mean_iou_results)
# + colab={"base_uri": "https://localhost:8080/"} id="jXbQq-xhWeUF" outputId="e031914b-0df4-4e6d-a658-37a38dd572a7"
print(np.max(mean_iou_results), np.min(mean_iou_results))
# + colab={"base_uri": "https://localhost:8080/"} id="WuAUqADmE5s1" outputId="6f13daa5-c3f7-46e6-e65a-2c9a1c73d9a8"
print(np.mean(mean_iou_results))
| demo_evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os,datetime,re
from utils import News, Site
# +
class EngENNews(News):
def __init__(self, tag, base_url):
'''
<parameter>
tag (bs4.element.Tag) : single topic object
'''
self.tag = tag
self.base_url = base_url
self.summary()
# this should be overrided
# because the format of news will be different from the others
def summary(self):
time_tag = self.tag.find_next("th")
time_split = time_tag.text.split("/")
print(f"time_split is {time_split}")
# time = "{}/{}/{}".format(time_split[0], re.search(
# r'\d+', (time_split[1])).group())
time = "{}/{}/{}".format(time_split[0], time_split[1], time_split[2])
a_tag = self.tag.find("a")
if a_tag is not None:
href = a_tag.get("href")
if href[0:4] != "http":
href = self.base_url + href
self.content = f"《{time}》\n{a_tag.text}\n{href}"
else:
content = "\n".join(time_tag.find_next("th").text.split())
url = "https://www.eng.tohoku.ac.jp/english/news/news4/"
self.content = f"《{time}》\n{content}\n{url}"
self.time = self.timeobj(time)
def timeobj(self, timestr=""):
tmp = datetime.datetime.strptime(timestr, "%Y/%m/%d")
return datetime.date(tmp.year, tmp.month, tmp.day)
class EngEN(Site):
path = os.path.join("..", os.path.join("sites_db", "eng.pickle"))
url = "https://www.eng.tohoku.ac.jp/english/news/news4/"
base_url = "https://www.eng.tohoku.ac.jp/english"
majors = ["School of Engineering", "Graduate School of Engineering"]
def get(self):
soup = self.request()
# 以降、サイトに合わせて書き直す必要あり
info_list = soup.find(class_="table nt news").find_all("tr")
print(f"info_list is {info_list}")
info_list = self.abstract(info_list)
info_list = [EngENNews(info, self.base_url) for info in info_list]
return self.dic(info_list)
def abstract(self, tags=[]):
result = []
exception = []
for tag in tags:
if tag.text not in exception:
result.append(tag)
exception.append(tag.text)
return result
# -
engen = EngEN()
engen.update()
engen.now
engen.new
# +
class EngNews(News):
def __init__(self, tag, base_url):
'''
<parameter>
tag (bs4.element.Tag) : single topic object
'''
self.tag = tag
self.base_url = base_url
self.summary()
# this should be overrided
# because the format of news will be different from the others
def summary(self):
time_tag = self.tag.find_next("td")
time_split = time_tag.text.split(".")
print(f"time_split is {time_split}")
time = "{}.{}".format(time_split[0], re.search(
r'\d+', (time_split[1])).group())
a_tag = self.tag.find("a")
if a_tag is not None:
href = a_tag.get("href")
if href[0:4] != "http":
href = self.base_url + href
self.content = f"《{time}》\n{a_tag.text}\n{href}"
else:
content = "\n".join(time_tag.find_next("td").text.split())
url = "https://www.eng.tohoku.ac.jp/news/detail-,-id,1561.html"
self.content = f"《{time}》\n{content}\n{url}"
self.time = self.timeobj(time)
def timeobj(self, timestr=""):
year = "2020."
tmp = datetime.datetime.strptime(year + timestr, "%Y.%m.%d")
return datetime.date(tmp.year, tmp.month, tmp.day)
class Eng(Site):
path = os.path.join("..", os.path.join("sites_db", "eng.pickle"))
url = "https://www.eng.tohoku.ac.jp/news/detail-,-id,1561.html"
base_url = "https://www.eng.tohoku.ac.jp"
majors = ["工学部", "工学研究科"]
def get(self):
soup = self.request()
# 以降、サイトに合わせて書き直す必要あり
info_list = soup.find(id="main").find_all("tr")[1:]
info_list = self.abstract(info_list)
# 固定情報
stick1 = EngNews(info_list[0], self.base_url)
contents = ["《4/10》",
"【新型コロナウイルス感染拡大防止のための自宅待機のお願い】",
"令和2年4月9日(木)から5月6日(水)まで、原則として登校を禁止し、研究室活動を制限します",
"https://www.eng.tohoku.ac.jp/news/detail-,-id,1582.html",
"https://www.eng.tohoku.ac.jp/news/detail-,-id,1581.html"]
stick1.time = stick1.timeobj(timestr="4.10")
stick1.content = "\n".join(contents)
stick2 = EngNews(info_list[0], self.base_url)
contents = ["《4/10》",
"【全学生 要回答】東北大ID受取確認(新入生対象), 遠隔授業の受講環境等の調査を実施しています。",
"https://www.eng.tohoku.ac.jp/news/detail-,-id,1576.html#survey"]
stick2.time = stick1.timeobj(timestr="4.10")
stick2.content = "\n".join(contents)
sticks = [stick1, stick2]
###
info_list = [EngNews(info, self.base_url) for info in info_list]
info_list = sticks + info_list
return self.dic(info_list)
def abstract(self, tags=[]):
result = []
exception = []
for tag in tags:
if tag.text not in exception:
result.append(tag)
exception.append(tag.text)
return result
# +
eng = Eng()
# -
eng.now
eng.new
# +
class LawNews:
def __init__(self):
self.time = ""
self.content = ""
def timeobj(self, timestr=""):
year = "2020/"
tmp = datetime.datetime.strptime(year + timestr, "%Y/%m/%d")
return datetime.date(tmp.year, tmp.month, tmp.day)
class Law(Site):
path = os.path.join("..", os.path.join("sites_db", "law.pickle"))
url = "http://www.law.tohoku.ac.jp/covid19/"
base_url = "http://www.law.tohoku.ac.jp"
majors = ["法学部", "法学研究科", "法科大学院", "公共政策大学院"]
def get(self):
soup = self.request()
info_list = []
permanent1 = LawNews()
time = soup.find(class_="law-sub-contents pos-left").find("p").text
time = re.search(r"更新:\d+/\d+", time).group().split("更新:")[-1]
permanent1.content = "《{}》\n新コロナウイルス感染症(COVID-19)への対応についてが更新されました\n{}".format(
time, self.url)
permanent1.time = permanent1.timeobj(timestr=time)
info_list.append(permanent1)
return self.dic(info_list)
# +
law = Law()
# -
law.now
law.new
| superviser/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_kaggle)
# language: python
# name: conda_kaggle
# ---
# # UberEats配達データマージ
#
import pandas as pd
MASTER_FILE_PATH = './../data/trip_master.csv'
master = pd.read_csv(MASTER_FILE_PATH, index_col='id')
TARGET_FILE_PATH = "./../rawdata/trips/191230_130421_trips.csv"
df = pd.read_csv(TARGET_FILE_PATH, index_col="id")
data = pd.concat([master, df], sort=False).drop_duplicates(subset="url", keep="last")
data.to_csv("./../data/trip_master_tmp.csv", index=True)
| notebooks/trip_merge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Final Project Submission
#
# Please fill out:
# * Student name:
# * Student pace: self paced / part time / full time
# * Scheduled project review date/time:
# * Instructor name:
# * Blog post URL:
#
# +
# Your code here - remember to use markdown cells for comments as well!
| student.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Stress Test Instructions
# ## Instructions for stress test #1
# Stress test #1 is to be completed after finishing the following steps:
#
# Step 1: Problem Statement <br>
# Step 2: Data Acquisition <br>
# Step 3: Data Dictionary <br>
#
#
# Here are the main steps you will need to complete to finish stress test #1
#
# 1) Work with your team and decide on the final content you need to present for all the three steps outlined above.
# 2) Create a notebook main.ipynb (if it already does not exist) in the notebooks folder of your project repository and this notebook add all three steps outlined above. You should have one markdown cell with each step number and title (just like above) and another markdown cell with your content.
#
# 3) Make sure you commit this to the main/master branch and update the github branches of all the teammates.
# 4) You will find the instructions to Ali’s Project Creation template here https://app.colaberry.com/app/network/network/206/projectinstructions
#
# 5) In Ali’s project creation template, follow the instructions for creating basecamp tickets. Your team will need to create a ticket in the Data Science center for excellence and start putting your updates.
#
# 6) As you move forward in your project, keep updating the ticket that your team creates. This part is essential.
# 7) Take a screenshot of the head of your dataset. I.e., put it in a pandas data frame and take a screenshot.
#
# 8) Add the Problem statement and Data Dictionary to the Readme.md of your Repo.
#
#
# For the stress test submission, upload the stress test document the same way you did for the DS1 projects (as messages in the Data Science Center of excellence, follow the instructions on Ali’s project creation document)
#
#
#
#
# ## Instructions for stress test #2
# Stress test #2 is to be completed after finishing the following steps:
#
# Step 4: Feature extraction <br>
# Step 5: Data Cleaning <br>
# Step 6: EDA and Data Visualizations <br>
# Step 7: Deriving Key insights from EDA and Data visualizations <br>
#
# Step 7 is the equivalent of writing down a summary of all the things that you learnt from EDA and data visualizations.
#
# Deliverables and core things to keep in mind:
# 1) Similar to what you did in stress test #1 write a message in the Data Science center for excellence.
#
# 2) First part of your deliverable will be to put out Data visualizations. Make sure you put a pair plot and discuss some of the general things that you inferred from it. Then you can put all the other interesting plots. Please do not share all the plots (you may generate dozens based on how many variables you have). Try to limit the number of plots you share to a max of 10. Pick the 10 most interesting plots and discuss them.
#
# 3) Make sure that you follow the instructions for plotting data as given in the data visualization section. No plots without title and axis labels. No fancy colors unless the color means something. Remember color must provide meaning!
#
# 4) Your core deliverable will be step 7 where you will write down a summary of all the insights that you have gathered from the EDA. What did you learn about the features? How strong are the relationships between the target variable and the features? How much missing data did you have? What part did you have to clean. You need to present this in a sequential, clear and concise manner. The outline of the summary should follow the same sequence of steps as you carried out during the analysis i.e the first part of your summary should be about data cleaning, what did you learn in this process? Then on the second part about EDA and then third part wrapping all of this up together.
#
# 5) Make sure that you update your Main.ipynb in your main/master branch with all the content. That way we can take a look at all the steps that you have worked on
#
# 6) PLEASE write a lot of comments. Write explanations for everything you are doing. Remember you are using jupyter notebooks meaning you can easily mix markdown and code so please write a small intro to what you are doing in your analysis. It's helpful for the reader and more than that. It's helpful for you in the future!
#
#
# If you have any questions on this contact: <EMAIL>
#
| {{ cookiecutter.repo_name }}/notebooks/Stress_test_instructions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
Mask R-CNN
Train on the toy Balloon dataset and implement color splash effect.
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=imagenet
# Apply color splash to an image
python3 balloon.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply color splash to video using the last weights you trained
python3 balloon.py splash --weights=last --video=<URL or path to file>
"""
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
from mrcnn.visualize import display_instances
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = 'Desktop/pothole/Mask_RCNN-master/'
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# +
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class CustomConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "pothole"
# We use a GPU with 6GB memory, which can fit only one image.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Car Background + scratch
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
############################################################
# Dataset
############################################################
# +
class CustomDataset(utils.Dataset):
def load_custom(self, dataset_dir, subset):
"""Load a subset of the dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("pothole", 1, "pothole")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir + subset)
# Load annotations
# VGG Image Annotator saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
annotations1 = json.load(open(os.path.join(dataset_dir + '/' + "via_region_data.json"),'r',encoding="utf8",errors='ignore'))
# print(annotations1)
annotations = list(annotations1.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# print(a)
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
polygons = [r['shape_attributes'] for r in a['regions'].values()]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"pothole", ## for a single class just add the name here
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "pothole":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "pothole":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = CustomDataset()
dataset_train.load_custom(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = CustomDataset()
dataset_val.load_custom(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
# Download mask_rcnn_coco.h5 weights before starting the training
print("Training network heads")
model.train(dataset_train,dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=15,
layers='heads')
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
# Copy color pixels from the original color image where mask is set
if mask.shape[0] > 0:
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray
return splash
def detect_and_color_splash(model, image_path=None):
assert image_path
# Run model detection and generate the color splash effect
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash
splash = color_splash(image, r['masks'])
# Save output
file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, splash)
print("Saved to ", file_name)
# +
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect custom class.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/custom/dataset/",
help='Directory of the custom dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
args = parser.parse_args()
# -
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video,\
"Provide --image or --video to apply color splash"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = CustomConfig()
else:
class InferenceConfig(CustomConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()[1]
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "splash":
detect_and_color_splash(model, image_path=args.image,
video_path=args.video)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
| Untitled2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys ###motion_test_header
sys.path.append('../scripts/')
from robot import *
# +
world = World(20.0, 0.1) ###motion_test_linear
initial_pose = np.array([0, 0, 0]).T
rot = Agent(0.0, 0.1)
robots = []
for i in range(100):
r = Robot(initial_pose, sensor=None, agent=rot)
world.append(r)
robots.append(r)
### アニメーション実行 ###
world.draw()
# -
import pandas as pd ###motion_test_stats(下のセルまで。データは上の5行程度を掲載)
poses = pd.DataFrame([ [math.sqrt(r.pose[0]**2 + r.pose[1]**2), r.pose[2]] for r in robots], columns=['r', 'theta'])
poses
poses.mean()
poses.var()
poses.std()
| section_particle_filter/parameter/motion_test_2rad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''base'': conda)'
# name: python3
# ---
# +
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
train_dir='../../day4/dataset/cats_and_dogs_small/train'
validation_dir='../../day4/dataset/cats_and_dogs_small/validation'
test_dir='../../day4/dataset/cats_and_dogs_small/test'
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
# 검증 데이터는 argumentation 금지
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
# +
train_generator = train_datagen.flow_from_directory(
# 타깃 디렉터리
train_dir,
# 모든 이미지의 크기를 150 × 150로 변경합니다
target_size=(150, 150),
batch_size=20,
# binary_crossentropy 손실을 사용하므로 이진 레이블이 필요합니다
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
# +
## back born
conv_base=tf.keras.applications.VGG16(
weights='imagenet', ## 모델을 포기화할 checkpoint 지정 (여기는 imagenet 확습 모델)
include_top=False ## 네트워크 최상위 FC(분류기)를 포함할지 안할지 지정 (imagenet은 1000개 클래스이므로 지금 개 고양이 에선 사용x)
) ## 네트워크에 입력할 이미지 텐서 크기
## 모델 구성 확인하기.
## 확인하면 최종출력은 (4,4,512)이다. 즉, 4x4x512=8192인 (batch,8192) 인 FC를 만들어 연결해주어야한다. 그러나 fc만 할거면 Flatten()해서 입력
## 이제 이거를 FC에 연결하여 학습을 하여 개 고양이 분류 모델을 완성한다.
conv_base.summary()
## 모델
input_Layer = tf.keras.layers.Input(shape=(150,150,3))
x=conv_base(input_Layer)
x=tf.keras.layers.Flatten()(x)
x= tf.keras.layers.Dense(512, activation='relu')(x)
Out_Layer= tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs=[input_Layer], outputs=[Out_Layer])
# model.summary()
print(' 동결하기 전 conv_base를훈련되는 가중치의 수:',
len(model.trainable_weights))
## fully connected 부분을 제외하고 학습하지 마라! fully conntected = classifier
conv_base.trainable = False ## True면 back의 초기값이 가지고있던 웨이트로해서 학습 False면 bakcborn뒤의 모델만 학습
#특정 layer별로 학습을 enable / disable하는 예제코드
#print(conv_base.layers)
#set_trainable = False
#for layer in conv_base.layers:
# if layer.name == 'block5_conv1': ## block5_conv1이후의 모든 레이어들은 학습이 된다.
# set_trainable = True
# if set_trainable:
# layer.trainable = True
# else:
# layer.trainable = False
print('conv_base를 동결한 후 훈련되는 가중치의 수:',
len(model.trainable_weights)) ## dence layer가 2개 선언. 각 layer 마다 w,b 파라미터 2개가 있고, 2개의 레이어만 학습하므로 총 4개 파라미터만 학습
model.summary()
# +
loss_function=tf.keras.losses.binary_crossentropy
optimize=tf.keras.optimizers.RMSprop(learning_rate=0.0001)
metric=tf.keras.metrics.binary_accuracy
model.compile(loss=loss_function,
optimizer=optimize,
metrics=[metric])
history = model.fit(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
model.save('cats_and_dogs_small_3.h5')
# -
print(history.history.keys())
# +
acc = history.history['binary_accuracy']
val_acc = history.history['val_binary_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
import matplotlib.pyplot as plt
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# -
| tensorflow/day5/practice/P_05_00_pretraind_fine_tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: desdeo-problem
# language: python
# name: desdeo-problem
# ---
# # Data Based Problems
#
# The DESDEO framework provides handling of data-driven optimization problems. Some methods, such as E-NAUTILUS in `desdeo-mcdm`, find the most preffered solution from a provided dataset. Other methods, such as most of the EA's from `desdeo-emo`, require a surrogate model to be trained for each of the objectives. The `desdeo_problem` provides support for both of these cases.
#
# For data based problems, use the data specific objective/problem classes
import pandas as pd
import numpy as np
# VectorDataObjective is an objective class that can handle data, as well as multi-objective evaluators.
#
# The GaussianProcessRegressor here is same as the one in scikit-learn with one small difference. The predict method has been replaced to return uncertainity values (in the form of standard deviation of the prediction) by default. It supports hyperparameters in the same format as the sklearn method.
from desdeo_problem import VectorDataObjective as VDO
from desdeo_problem.surrogatemodels.SurrogateModels import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
# ### Creating some random data
#
# 'a' and 'b' are randomly generated between 0 and 1.
#
# f1 = a + b
# f2 = a * b
#
# For data-driven problems, make sure that the input dataset is in the pandas DataFrame format, with the column names being the same as the variable/objective names.
# +
data = np.random.rand(100,2)
f1 = (data[:,0]+data[:,1]).reshape(-1,1)
f2 = (data[:,0]*data[:,1]).reshape(-1,1)
data = np.hstack((data, f1, f2))
X = ['a','b']
y = ['f1','f2']
datapd = pd.DataFrame(data, columns=X+y)
datapd.head()
# -
# ### Using VectorDataObjective class
#
# The `VectorDataObjective` class takes as its input the data in a dataframe format and the objective names in a list.
obj = VDO(data=datapd, name=y)
# ### Training surrogate models
#
# Pass the surrogate modelling technique and the model parameters to the train method of the objective instance.
#
# If only one modelling technique is passed, the `model_parameters` should be a dict (or None) and this will be used for all the objectives.
#
# If multiple modelling techniques are passed, `models` should be the list of modelling techniques, and `model_parameters` should be a list of dicts. The length of these lists should be the same as the number of objectives and each list element will be used to train one objective in order.
obj.train(models=GaussianProcessRegressor, model_parameters={'kernel': Matern(nu=1.5)})
# ### Using surrogate models to evaluate objective values
#
# Use the obj.evaluate method to get predictions. Note that `use_surrogates` should be true.
print(obj.evaluate(np.asarray([[0.5,0.3]]), use_surrogate=True))
obj._model_trained
# ### Creating data problem class
#
# Creating the objective class should be bypassed for now, use `DataProblem` class directly with the data in a dataframe.
#
# The `DataProblem` provides a `train` method which trains all the objectives sequentially. The input arguments for this train method is the same as that of the `VectorDataObjective` class.
#
# To make sure that the `evaluate` method uses the surrogate models for evaluations, pass the `use_surrogate=True` argument.
from desdeo_problem import DataProblem
maximize = pd.DataFrame([[True, False]], columns=['f1','f2'])
prob = DataProblem(data=datapd, objective_names=y, variable_names=X, maximize=maximize)
prob.train(GaussianProcessRegressor)
print(prob.evaluate(np.asarray([[0.1,0.8], [0.5,0.3]]), use_surrogate=True))
# ## Lipschitian models
from desdeo_problem.surrogatemodels.lipschitzian import LipschitzianRegressor
prob = DataProblem(data=datapd, objective_names=y, variable_names=X)
prob.train(LipschitzianRegressor)
print(prob.evaluate(np.asarray([[0.1,0.8], [0.5,0.3]]), use_surrogate=True))
| docs/notebooks/Defining_a_data_based_problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Portfolio Value at Risk and Conditional Value at Risk
#
# By <NAME> and <NAME>.
#
# Part of the Quantopian Lecture Series:
#
# * [www.quantopian.com/lectures](https://www.quantopian.com/lectures)
# * [github.com/quantopian/research_public](https://github.com/quantopian/research_public)
#
# Notebook released under the Creative Commons Attribution 4.0 License.
#
# ---
#
# Value at Risk (VaR) is a key concept in portfolio risk management. It uses the past observed distribution of portfolio returns to estimate what your future losses might be at difference likelihood levels. Let's demonstrate this concept through an example.
# +
import numpy as np
import pandas as pd
from scipy.stats import norm
import time
import matplotlib.pyplot as plt
# -
# ## Simulated Data Example
#
# Let's simulate some returns of 10 hypothetical assets.
#
# ####NOTE
#
# We use normal distributions to simulate the returns, in practice real returns will almost never follow normal distributions and usually have weird behavior including fat tails. We'll discuss this more later.
# +
# mu = 0.01, std = 0.10, 1000 bars, 10 assets
mu = 0.01
sigma = 0.10
bars = 1000
num_assets = 10
returns = np.random.normal(mu, sigma, (bars, num_assets))
# Fake asset names
names = ['Asset %s' %i for i in range(num_assets)]
# Put in a pandas dataframe
returns = pd.DataFrame(returns, columns=names)
# Plot the last 50 bars
plt.plot(returns.head(50))
plt.xlabel('Time')
plt.ylabel('Return');
# -
# The Value at Risk (VaR) for coverage $\alpha$ is defined as the maximum amount we could expect to lose with likelihood $p = 1 - \alpha$. Put another way, on no more that $100 \times p \%$ of days should we expect to lose more than the VaR. There are many ways to estimate VaR and none of them are perfect. In fact you should not put complete trust in VaR, it is rather intended as a way to get a sense of how much might be lost in different levels of extreme scenarios, and provide this info to people responsible for risk management.
#
# VaR for a high $\alpha$ is a measure of worst case outcomes. For example one might track their $\alpha = 0.999$ VaR to understand how a 1/1000 crisis event might affect them. Because real distributions tend to diverge and become less and less consistent the further along the tail we go, extreme VaR should be taken with a grain of salt.
#
# ### Relationship to Confidence Intervals
#
# For those familiar with confidence intervals, VaR is very similar. The idea of trying to cover a set of possible values with an interval specified by $\alpha$ is similar to how VaR tries to cover a set of possible losses. For those unfamiliar there is a lecture available [here](https://www.quantopian.com/lectures/confidence-intervals).
# ### Historical (Non-Parametric) VaR
#
# We'll use historical VaR, which looks at previous returns distributions and uses that to compute the $p$ percentile. This percentile is the amount of loss you could reasonably expect to experience with probability $p$, assuming future returns are close to past returns. Again, this isn't perfect, and requires that there is no regime change in which the returns distribution changes. For instance, if your historical window doesn't include any crisis events, your VaR estimate will be far lower than it should be.
#
# To compute historical VaR for coverage $\alpha$ we simply take the $100 \times (1 - \alpha)$ percentile of lowest oberserved returns and multiply that by our total value invested.
#
# Now let's compute the VaR of this set of 10 assets. To do this we need a set of portfolio weights. We'll start super simple.
weights = np.ones((10, 1))
# Normalize
weights = weights / np.sum(weights)
def value_at_risk(value_invested, returns, weights, alpha=0.95, lookback_days=520):
returns = returns.fillna(0.0)
# Multiply asset returns by weights to get one weighted portfolio return
portfolio_returns = returns.iloc[-lookback_days:].dot(weights)
# Compute the correct percentile loss and multiply by value invested
return np.percentile(portfolio_returns, 100 * (1-alpha)) * value_invested
# We'll compute the VaR for $\alpha = 0.95$.
# +
value_invested = 1000000
value_at_risk(value_invested, returns, weights, alpha=0.95)
# -
# Interpreting this, we say that historically no more than $5\%$ of days resulted in losses more extreme than this, or that on each day your probability of losing this much is less than $5\%$. Keeping in mind that any forecast like this is just an estimate.
#
# ## Normal vs. Non-Parametric Historical VaR
#
# ### Normal Case
#
# A special case of VaR is when you assume that the returns follow a given distribution rather than non-parametrically estiamting it historically. In this case a normal VaR would fit our data, because all our returns were simulated form a normal distribution. We can check this by using a normal distribution Cumulative Distribution Function (CDF), which sums the area under a normal curve to figure out how likely certain values are. We'll use an inverse CDF, or PPF, which for a given likelihood will tell us to which value that likelihood corresponds.
#
# Specifically, the closed form formula for Normal VaR is
#
# $$VaR_{\alpha}(x) = \mu - \sigma N^{-1}(\alpha)$$
# +
# Portfolio mean return is unchanged, but std has to be recomputed
# This is because independent variances sum, but std is sqrt of variance
portfolio_std = np.sqrt( np.power(sigma, 2) * num_assets ) / num_assets
# manually
(mu - portfolio_std * norm.ppf(0.95)) * value_invested
# -
# Seems close enough to within some random variance. Let's visualize the continuous normal case. Notice that the VaR is expressed as a return rather than an absolute loss. To get aboslute loss we just need to multiply by value invested.
# +
def value_at_risk_N(mu=0, sigma=1.0, alpha=0.95):
return mu - sigma*norm.ppf(alpha)
x = np.linspace(-3*sigma,3*sigma,1000)
y = norm.pdf(x, loc=mu, scale=portfolio_std)
plt.plot(x,y);
plt.axvline(value_at_risk_N(mu = 0.01, sigma = portfolio_std, alpha=0.95), color='red', linestyle='solid');
plt.legend(['Return Distribution', 'VaR for Specified Alpha as a Return'])
plt.title('VaR in Closed Form for a Normal Distribution');
# -
# ### Historical (Non-Parametric) Case
#
# Historical VaR instead uses historical data to draw a discrete Probability Density Function, or histogram. Then finds the point at which only $100 \times (1-\alpha)\%$ of the points are below that return. It returns that return as the VaR return for coverage $\alpha$.
# +
lookback_days = 520
alpha = 0.95
# Multiply asset returns by weights to get one weighted portfolio return
portfolio_returns = returns.fillna(0.0).iloc[-lookback_days:].dot(weights)
portfolio_VaR = value_at_risk(value_invested, returns, weights, alpha=0.95)
# Need to express it as a return rather than absolute loss
portfolio_VaR_return = portfolio_VaR / value_invested
plt.hist(portfolio_returns, bins=20)
plt.axvline(portfolio_VaR_return, color='red', linestyle='solid');
plt.legend(['VaR for Specified Alpha as a Return', 'Historical Returns Distribution'])
plt.title('Historical VaR');
# -
# ### Underlying Distributions Are Not Always Normal
#
# In real financial data the underlying distributions are rarely normal. This is why we prefer historical VaR as opposed to an assumption of an underlying distribution. Historical VaR is also non-parametric, so we aren't at risk of overfitting distribution parameters to some data set.
#
# #### Real Data Example
#
# We'll show this on some real financial data.
# OEX components as of 3/31/16
# http://www.cboe.com/products/indexcomponents.aspx?DIR=OPIndexComp&FILE=snp100.doc
oex = ['MMM','T','ABBV','ABT','ACN','ALL','GOOGL','GOOG','MO','AMZN','AXP','AIG','AMGN','AAPL','BAC',
'BRK-B','BIIB','BLK','BA','BMY','CVS','COF','CAT','CELG','CVX','CSCO','C','KO','CL','CMCSA',
'COP','CSOT','DHR','DOW','DUK','DD','EMC','EMR','EXC','XOM','FB','FDX','F','GD','GE','GM','GILD',
'GS','HAL','HD','HON','INTC','IBM','JPM','JNJ','KMI','LLY','LMT','LOW','MA','MCD','MDT','MRK',
'MET,','MSFT','MDZL','MON','MS','NKE','NEE','OXY','ORCL','PYPL','PEP','PFE','PM','PG','QCOM',
'RTN','SLB','SPG','SO','SBUX','TGT','TXN','BK','PCLN','TWX','FOXA','FOX','USB','UNP','UPS','UTX',
'UNH','VZ','V','WMT','WBA','DIS','WFC']
tickers = symbols(oex)
num_stocks = len(tickers)
start = time.time()
data = get_pricing(tickers, fields='close_price', start_date='2014-01-01', end_date='2016-04-04')
end = time.time()
print "Time: %0.2f seconds." % (end - start)
# +
returns = data.pct_change()
returns = returns - returns.mean(skipna=True) # de-mean the returns
data.plot(legend=None);
returns.plot(legend=None);
# -
# Now we need to generate some weights.
# +
def scale(x):
return x / np.sum(np.abs(x))
weights = scale(np.random.random(num_stocks))
plt.bar(np.arange(num_stocks),weights);
# -
# Now let's compute the VaR for $\alpha = 0.95$. We'll write this as $VaR_{\alpha=0.95}$ from now on.
value_at_risk(value_invested, returns, weights, alpha=0.95, lookback_days=520)
# Let's visualize this.
# +
lookback_days = 520
alpha = 0.95
# Multiply asset returns by weights to get one weighted portfolio return
portfolio_returns = returns.fillna(0.0).iloc[-lookback_days:].dot(weights)
portfolio_VaR = value_at_risk(value_invested, returns, weights, alpha=0.95)
# Need to express it as a return rather than absolute loss
portfolio_VaR_return = portfolio_VaR / value_invested
plt.hist(portfolio_returns, bins=20)
plt.axvline(portfolio_VaR_return, color='red', linestyle='solid');
plt.legend(['VaR for Specified Alpha as a Return', 'Historical Returns Distribution'])
plt.title('Historical VaR');
plt.xlabel('Return');
plt.ylabel('Observation Frequency');
# -
# The distribution looks visibly non-normal, but let's confirm that the returns are non-normal using a statistical test. We'll use Jarque-Bera, and our p-value cutoff is 0.05.
# +
from statsmodels.stats.stattools import jarque_bera
_, pvalue, _, _ = jarque_bera(portfolio_returns)
if pvalue > 0.05:
print 'The portfolio returns are likely normal.'
else:
print 'The portfolio returns are likely not normal.'
# -
# Sure enough, they're likely not normal, so it would be a big mistake to use a normal distribution to underlie a VaR computation here.
# ## We Lied About 'Non-Parametric'
#
# You'll notice the VaR computation conspicuously uses a lookback window. This is a parameter to the otherwise 'non-parametric' historical VaR. Keep in mind that because lookback window affects VaR, it's important to pick a lookback window that's long enough for the VaR to converge. To check if our value has seemingly converged let's run an experiment.
#
# Also keep in mind that even if something has converged on a say 500 day window, that may be ignoring a financial collapse that happened 1000 days ago, and therefore is ignoring crucial data. On the other hand, using all time data may be useless for reasons of non-stationarity in returns varaince. Basically as returns variance changes over time, older measurements may reflect state that is no longer accurate. For more information on non-stationarity you can check out [this lecture](https://www.quantopian.com/lectures/integration-cointegration-and-stationarity).
# +
N = 1000
VaRs = np.zeros((N, 1))
for i in range(N):
VaRs[i] = value_at_risk(value_invested, returns, weights, lookback_days=i)
plt.plot(VaRs)
plt.xlabel('Lookback Window')
plt.ylabel('VaR');
# -
# We can see here that VaR does appear to converge within a 400-600 lookback window period. Therefore our 520 day parameter should be fine. In fact, 1000 may be better as it uses strictly more information, but more computationally intensive and prey to stationarity concerns.
#
# It can be useful to do analyses like this when evaluating whether a VaR is meaningful. Another check we'll do is for stationarity of the portfolio returns over this time period.
# +
from statsmodels.tsa.stattools import adfuller
results = adfuller(portfolio_returns)
pvalue = results[1]
if pvalue < 0.05:
print 'Process is likely stationary.'
else:
print 'Process is likely non-stationary.'
# -
# ## Conditional Value at Risk (CVaR)
#
# CVaR is what many consider an improvement on VaR, as it takes into account the shape of the returns distribution. It is also known as Expected Shortfall (ES), as it is an expectation over all the different possible losses greater than VaR and their corresponding estimated likelihoods.
#
# If you are not familiar with expectations, much content is available online. However we will provide a brief refresher.
#
# ### Expected Value
#
# Say you have a fair six sided die. Each number is equally likely. The notion of an expectation, written as $\mathrm{E}(X)$, is what should you expect to happen out of all the possible outcomes. To get this you multiply each event by the probability of that event and add that up, think of it as a probability weighted average. With a die we get
#
# $$1/6 \times 1 + 1/6 \times 2 + 1/6 \times 3 + 1/6 \times 4 + 1/6 \times 5 + 1/6 \times 6 = 3.5$$
#
# When the probabilities are unequal it gets more complicated, and when the outcomes are continuous we have to use integration in closed form equations. Here is the formula for CVaR.
#
# $$CVaR_{\alpha}(x) \approx \frac{1}{(1-\alpha)} \int_{f(x,y) \geq VaR_{\alpha}(x)} f(x,y)p(y)dy dx$$
def cvar(value_invested, returns, weights, alpha=0.95, lookback_days=520):
# Call out to our existing function
var = value_at_risk(value_invested, returns, weights, alpha, lookback_days=lookback_days)
returns = returns.fillna(0.0)
portfolio_returns = returns.iloc[-lookback_days:].dot(weights)
# Get back to a return rather than an absolute loss
var_pct_loss = var / value_invested
return value_invested * np.nanmean(portfolio_returns[portfolio_returns < var_pct_loss])
# Let's compute CVaR on our data and see how it compares with VaR.
cvar(value_invested, returns, weights, lookback_days=500)
value_at_risk(value_invested, returns, weights, lookback_days=500)
# CVaR is higher because it is capturing more information about the shape of the distribution, AKA the moments of the distribution. If the tails have more mass, this will capture that. In general it is considered to be a far superior metric compared with VaR and you should use it over VaR in most cases.
#
# Let's visualize what it's capturing.
# +
lookback_days = 520
alpha = 0.95
# Multiply asset returns by weights to get one weighted portfolio return
portfolio_returns = returns.fillna(0.0).iloc[-lookback_days:].dot(weights)
portfolio_VaR = value_at_risk(value_invested, returns, weights, alpha=0.95)
# Need to express it as a return rather than absolute loss
portfolio_VaR_return = portfolio_VaR / value_invested
portfolio_CVaR = cvar(value_invested, returns, weights, alpha=0.95)
# Need to express it as a return rather than absolute loss
portfolio_CVaR_return = portfolio_CVaR / value_invested
# Plot only the observations > VaR on the main histogram so the plot comes out
# nicely and doesn't overlap.
plt.hist(portfolio_returns[portfolio_returns > portfolio_VaR_return], bins=20)
plt.hist(portfolio_returns[portfolio_returns < portfolio_VaR_return], bins=10)
plt.axvline(portfolio_VaR_return, color='red', linestyle='solid');
plt.axvline(portfolio_CVaR_return, color='red', linestyle='dashed');
plt.legend(['VaR for Specified Alpha as a Return',
'CVaR for Specified Alpha as a Return',
'Historical Returns Distribution',
'Returns < VaR'])
plt.title('Historical VaR and CVaR');
plt.xlabel('Return');
plt.ylabel('Observation Frequency');
# -
# ###Checking for Convergence Again
#
# Finally, we'll check for convergence.
# +
N = 1000
CVaRs = np.zeros((N, 1))
for i in range(N):
CVaRs[i] = cvar(value_invested, returns, weights, lookback_days=i)
plt.plot(CVaRs)
plt.xlabel('Lookback Window')
plt.ylabel('VaR');
# -
# # Sources
#
# * http://www.wiley.com/WileyCDA/WileyTitle/productCd-1118445597.html
# * http://www.ise.ufl.edu/uryasev/publications/
# * http://www.ise.ufl.edu/uryasev/files/2011/11/VaR_vs_CVaR_CARISMA_conference_2010.pdf
# * http://faculty.washington.edu/ezivot/econ589/me20-1-4.pdf
# *This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
| Notebooks/quantopian_research_public/notebooks/lectures/VaR_and_CVaR/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # 自动并行
#
# 在[延后执行](./lazy-evaluation.md)里我们提到后端系统会自动构建计算图。通过计算图系统可以知道所有计算的依赖关系,有了它系统可以选择将没有依赖关系任务同时执行来获得性能的提升。
#
# 仍然考虑下面这个例子,这里`a = ...`和`b = ...`之间没有数据依赖关系,从而系统可以选择并行执行他们。
#
# 
#
# 通常一个运算符,例如`+`或者`dot`,会用掉一个计算设备上所有计算资源。`dot`同样用到所有CPU的核(即使是有多个CPU)和单GPU上所有线程。因此在单设备上并行运行多个运算符可能效果并不明显。自动并行主要的用途是多设备的计算并行,和计算与通讯的并行。
#
# 【注意】本章需要至少一个GPU才能运行。
#
# ## 多设备的并行计算
#
# 我们首先定义一个函数,它做10次矩阵乘法。
# +
from mxnet import nd
def run(x):
"""push 10 matrix-matrix multiplications"""
return [nd.dot(x,x) for i in range(10)]
# -
# 我们分别计算在CPU和GPU上运行时间
# +
from mxnet import gpu
from time import time
x_cpu = nd.random.uniform(shape=(2000,2000))
x_gpu = nd.random.uniform(shape=(6000,6000), ctx=gpu(0))
nd.waitall()
# warm up
run(x_cpu)
run(x_gpu)
nd.waitall()
start = time()
run(x_cpu)
nd.waitall()
print('Run on CPU: %f sec'%(time()-start))
start = time()
run(x_gpu)
nd.waitall()
print('Run on GPU: %f sec'%(time()-start))
# -
# 我们去掉两次`run`之间的`waitall`,希望系统能自动并行这两个任务:
start = time()
run(x_cpu)
run(x_gpu)
nd.waitall()
print('Run on both CPU and GPU: %f sec'%(time()-start))
# 可以看到两个一起执行时,总时间不是分开执行的总和。这个表示后端系统能有效并行执行它们。
#
# ## 计算和通讯的并行
#
# 在多设备计算中,我们经常需要在设备之间复制数据。例如下面我们在GPU上计算,然后将结果复制回CPU。
# +
from mxnet import cpu
def copy_to_cpu(x):
"""copy data to a device"""
return [y.copyto(cpu()) for y in x]
start = time()
y = run(x_gpu)
nd.waitall()
print('Run on GPU: %f sec'%(time()-start))
start = time()
copy_to_cpu(y)
nd.waitall()
print('Copy to CPU: %f sec'%(time() - start))
# -
# 同样我们去掉运行和复制之间的`waitall`:
start = time()
y = run(x_gpu)
copy_to_cpu(y)
nd.waitall()
t = time() - start
print('Run on GPU then Copy to CPU: %f sec'%(time() - start))
# 可以看到总时间小于前面两者之和。这个任务稍微不同于上面,因为运行和复制之间有依赖关系。就是`y[i]`必须先计算好才能复制到CPU。但在计算`y[i]`的时候系统可以复制`y[i-1]`,从而获得总运行时间的减少。
#
# ## 总结
#
# MXNet能够自动并行执行没有数据依赖关系的任务从而提升系统性能。
#
# ## 练习
#
# - `run`里面计算了10次运算,他们也没有依赖关系。看看系统有没有自动并行执行他们
# - 试试有更加复杂数据依赖的任务,看看系统能不能得到正确的结果,而且性能有提升吗?
#
# **吐槽和讨论欢迎点**[这里](https://discuss.gluon.ai/t/topic/1883)
| chapter_gluon-advances/auto-parallelism.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Calculate skill of a MJO Index of S2S models as function of daily lead time
# linting
# %load_ext nb_black
# %load_ext lab_black
# +
import xarray as xr
xr.set_options(display_style="html")
import numpy as np
import matplotlib.pyplot as plt
from climpred import HindcastEnsemble
import climpred
# -
# IRIDL hosts various subseasonal initialized forecast and hindcast simulations:
#
# - `S2S project`:
# - http://iridl.ldeo.columbia.edu/SOURCES/.ECMWF/.S2S/
# - hindcast/reforecast: one variable, one model: ~ 80 GB
# - login required
# - `SubX project`:
# - http://iridl.ldeo.columbia.edu/SOURCES/.Models/.SubX/
# - hindcast/reforecast: one variable, one model: ~ 100 GB
# - login not required
#
# ---
# Here, we demonstrate how to set a cookie for IRIDL and access the skill of RMM1 subseasonal reforecasts.
#
# ---
# Here are instructions for configuring xarray to open protected Data Library datasets, after you have created a Data Library account and accepted the terms and conditions for the dataset.
# 1. Visit https://iridl.ldeo.columbia.edu/auth/genkey . Log in to the Data Library. Copy the key from the response.
#
# 2. Create a file with the following content, substituting the key from step 1 for `"xxxx"`:
# `Set-Cookie: __dlauth_id=xxxx; domain=.iridl.ldeo.columbia.edu`
#
# 3. Put the following in `~/.daprc`, which is `/home/jovyan/.daprc` on renku, substituting the path to the above file for `/path/to/cookie/file`:
# `HTTP.COOKIEJAR=/path/to/cookie/file`. You may need to copy `.daprc` to `/home/jovyan` on renku, because `/home/jovyan` is not tracked by `git`.
# !cat ~/.daprc
# +
# #%writefile ~/.cookie_iridl
# Set-Cookie: __dlauth_id=xxxx; domain=.iridl.ldeo.columbia.edu
# -
# ### Get observations
# pre-computed
obsds = climpred.tutorial.load_dataset("RMM-INTERANN-OBS")[
"rmm1"
].to_dataset() # only until 2017
obsds = obsds.dropna("time").sel(time=slice("1995", None)) # Get rid of missing times.
# ### Get `on-the-fly` reforecasts
#
# S2S models:
#
# - `ECMF`
# - `ECCC`
# - `HMCR`
# - `KMA`
# - `UKMO`
# There are a set of reforecasts of the ECMWF model that match each real time forecast. They are made "on the fly" when a real time forecast is issued. So for S=0000 8 Feb 2021, there are reforecasts initialized on 0000 8 Feb 2020 and the 19 previous years on 8 Feb.
# %%time
fcstds = xr.open_dataset(
"https://iridl.ldeo.columbia.edu/SOURCES/.ECMWF/.S2S/.ECMF/.reforecast/.RMMS/.ensembles/.RMM1/dods",
decode_times=False,
chunks=None,
).compute()
# calendar '360' not recognized, but '360_day'
if fcstds.hdate.attrs["calendar"] == "360":
fcstds.hdate.attrs["calendar"] = "360_day"
# The S2S data dimensions correspond to the following `climpred` dimension definitions: `M=member`, `S=init`. We will rename the dimensions to their `climpred` names.
# rename to match climpred dims: https://climpred.readthedocs.io/en/stable/setting-up-data.html
fcstds = fcstds.rename({"S": "init", "L": "lead", "M": "member", "RMM1": "rmm1"})
fcstds = xr.decode_cf(fcstds, use_cftime=True)
fcstds.coords
# #### Skill for a single real-time forecast from corresponding reforecasts
# +
# assessing the skill of the reforecasts done annually from 8 Feb 2001 to 8 Feb 2020
# for the real-time forecast 8 Feb 2021
d = "08"
m = "02"
y = "2021"
fcstds.sel(init=f"{y}-{m}-{d}").squeeze().rmm1.mean("member").plot()
# +
import cftime
# create a new init coordinate
new_init = xr.concat(
[
xr.DataArray(
cftime.DatetimeProlepticGregorian(int(h.dt.year.values), int(m), int(d))
)
for h in fcstds.hdate
],
"init",
)
# select new inits for same dayofyear, drop all NaNs
fcstds_date = (
fcstds.sel(init=f"{y}-{m}-{d}", drop=True)
.squeeze(drop=True)
.assign_coords(hdate=new_init)
.rename({"hdate": "init"})
.dropna("init", how="all")
)
# -
hindcast = HindcastEnsemble(fcstds_date)
hindcast = hindcast.add_observations(obsds)
# %time skill = hindcast.verify(metric='acc', comparison='e2o', dim='init', alignment='maximize')
skill.rmm1.plot()
plt.title(f"ACC: RMM1 daily initialized {m}-{d}")
# #### skill over many initializations
# create large `xr.DataArray` with all `hdate` stacked into `init`
# restricting myself to years 2001-2003 for faster computation
fcstds = fcstds.sel(hdate=slice("2001", "2003"))
obs_ds = obsds.sel(time=slice("2001", "2004"))
# +
# %%time
fcstds_dates = []
# loop over all inits, ignoring leap day
for s in fcstds.init:
d = str(s.init.dt.day.values).zfill(2)
m = str(s.init.dt.month.values).zfill(2)
y = s.init.dt.year.values
if d == "29" and m == "02":
continue
new_init = xr.concat(
[
xr.DataArray(
cftime.DatetimeProlepticGregorian(int(h.dt.year.values), int(m), int(d))
)
for h in fcstds.hdate
],
"init",
)
# select new inits for same dayofyear, drop all NaNs
fcstds_date = (
fcstds.sel(init=f"{y}-{m}-{d}", drop=True)
.squeeze(drop=True)
.assign_coords(hdate=new_init)
.rename({"hdate": "init"})
.dropna("init", how="all")
)
if fcstds_date.init.size > 0: # not empty
fcstds_dates.append(fcstds_date)
fcstds_dates = xr.concat(fcstds_dates, "init")
fcstds_dates = fcstds_dates.sortby(fcstds_dates.init)
# +
# drop duplicates, unnecessary?
# _, index = np.unique(fcstds_dates.init, return_index=True)
# fcstds_dates = fcstds_dates.isel(init=index)
# -
hindcast = HindcastEnsemble(fcstds_dates)
hindcast = hindcast.add_observations(obs_ds)
# +
# %time skill_all = hindcast.verify(metric='acc', comparison='e2o', dim='init', alignment='maximize')
skill_all.rmm1.plot()
# -
# #### skill when initialized in different months
# +
import warnings
warnings.filterwarnings(
"ignore"
) # ignore climpred UserWarnings triggered by verification.sel(init)
# -
# %%time
for m in np.arange(1, 13, 3):
hindcast_month = hindcast.sel(init=fcstds_dates.init.dt.month == m)
month_name = hindcast_month.get_initialized().init[:2].to_index().strftime("%b")[0]
skill = (
hindcast_month.verify(metric="acc", comparison="e2o", dim="init", alignment="maximize")
)
skill.rmm1.plot(label=f"month = {month_name}")
skill_all.rmm1.plot(label='all months',c='k')
plt.legend()
# ### Get reforecasts without `on-the-fly`
# very similar workflow as in the `SubX` examples as there is no `hdate` coordinate: [subseasonal SubX examples](examples.html#subseasonal)
#
# S2S models:
#
# - `CRNM`
# - `CMA`
# - `BOM`
# - `ISAC`
# - `JMA`
# - `NCEP`
# +
# %%time
fcstds = xr.open_dataset(
"https://iridl.ldeo.columbia.edu/SOURCES/.ECMWF/.S2S/.CNRM/.reforecast/.RMMS/.ensembles/.RMM1/dods",
decode_times=True,
).compute()
fcstds = fcstds.dropna("S", how="all")
# -
# rename to match climpred dims: https://climpred.readthedocs.io/en/stable/setting-up-data.html
fcstds = fcstds.rename({"S": "init", "L": "lead", "M": "member", "RMM1": "rmm1"})
hindcast = HindcastEnsemble(fcstds)
hindcast = hindcast.add_observations(obsds)
# %time skill = hindcast.verify(metric='acc', comparison='e2o', dim='init', alignment='maximize')
skill.rmm1.plot()
| docs/source/examples/subseasonal/daily-S2S-IRIDL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="SRXuMG2eqbKW" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 1, Module 4*
#
# ---
# + [markdown] colab_type="text" id="7IXUfiQ2UKj6"
# # Logistic Regression
#
#
# ## Assignment 🌯
#
# You'll use a [**dataset of 400+ burrito reviews**](https://srcole.github.io/100burritos/). How accurately can you predict whether a burrito is rated 'Great'?
#
# > We have developed a 10-dimensional system for rating the burritos in San Diego. ... Generate models for what makes a burrito great and investigate correlations in its dimensions.
#
# - [ ] Do train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later.
# - [ ] Begin with baselines for classification.
# - [ ] Use scikit-learn for logistic regression.
# - [ ] Get your model's validation accuracy. (Multiple times if you try multiple iterations.)
# - [ ] Get your model's test accuracy. (One time, at the end.)
# - [ ] Commit your notebook to your fork of the GitHub repo.
#
#
# ## Stretch Goals
#
# - [ ] Add your own stretch goal(s) !
# - [ ] Make exploratory visualizations.
# - [ ] Do one-hot encoding.
# - [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html).
# - [ ] Get and plot your coefficients.
# - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
# + colab_type="code" id="o9eSnDYhUGD7" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + id="COIFfemWqbKb" colab_type="code" colab={}
# Load data downloaded from https://srcole.github.io/100burritos/
import pandas as pd
df = pd.read_csv(DATA_PATH+'burritos/burritos.csv')
# + id="O_xWZOIrqbKd" colab_type="code" colab={}
# Derive binary classification target:
# We define a 'Great' burrito as having an
# overall rating of 4 or higher, on a 5 point scale.
# Drop unrated burritos.
df = df.dropna(subset=['overall'])
df['Great'] = df['overall'] >= 4
# + id="x1KNuZLCqbKf" colab_type="code" colab={}
# Clean/combine the Burrito categories
df['Burrito'] = df['Burrito'].str.lower()
california = df['Burrito'].str.contains('california')
asada = df['Burrito'].str.contains('asada')
surf = df['Burrito'].str.contains('surf')
carnitas = df['Burrito'].str.contains('carnitas')
df.loc[california, 'Burrito'] = 'California'
df.loc[asada, 'Burrito'] = 'Asada'
df.loc[surf, 'Burrito'] = 'Surf & Turf'
df.loc[carnitas, 'Burrito'] = 'Carnitas'
df.loc[~california & ~asada & ~surf & ~carnitas, 'Burrito'] = 'Other'
# + id="uSo1xW8EqbKh" colab_type="code" colab={}
# Drop some high cardinality categoricals
df = df.drop(columns=['Notes', 'Location', 'Reviewer', 'Address', 'URL', 'Neighborhood'])
# + id="epqFzGtnqbKi" colab_type="code" colab={}
# Drop some columns to prevent "leakage"
df = df.drop(columns=['Rec', 'overall'])
# + id="WUhujGnMqbKk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 232} outputId="f0ec500e-acab-4e42-ed84-86d083c3405c"
df.head()
# + id="nExCFrCOX49Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="24d4b163-ede5-4b58-cb33-9574b8543451"
df.dtypes
# + id="_iJT4tEiX85a" colab_type="code" colab={}
df["year"] = pd.to_datetime(df["Date"]).apply(lambda x: x.year)
# + id="611dshwbamOG" colab_type="code" colab={}
# handle NaNs
# number columns: NaN -> median
# object columns: looks like NaN means "no", switch to 0/1 representation
import numpy as np
number_cols = df.select_dtypes(include="number").columns
object_cols = df.select_dtypes(include="object").columns
df[number_cols] = df[number_cols].fillna(df[number_cols].median(skipna=True))
# + id="SaBnpLq-fjpD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="ba9341ca-46fb-4a72-c37d-5e5be82ab138"
for col in object_cols:
print(df[col].value_counts())
# + id="DZ8PzaqrgEK5" colab_type="code" colab={}
df[object_cols] = df[object_cols].replace(
{
"x": 1,
"X": 1,
"Yes": 1,
"No": 0,
np.NaN: 0,
}
)
# + id="fnaZpaMFdy4-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 232} outputId="c9944cc7-016d-4567-8178-aaf18291ee0d"
df.head()
# + id="bnldKBaWcfDl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="31e5b507-e3ec-420f-b6f4-46312a0daa60"
df.isnull().sum()
# + id="gAk89WHtcv3y" colab_type="code" colab={}
df = df[df.columns.drop(["Queso"])]
# + id="h9nYTAVdKSER" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 617} outputId="ed8a7eeb-72ec-42f0-b9d9-ce0fa496b02a"
from plotly import express as px
ratings_df = df.pivot_table(values=["Yelp", "Google"], index=["Burrito"]).reset_index(["Burrito"])
px.bar(ratings_df, x="Burrito", y="Google")
# + id="_uGHW98IYP-z" colab_type="code" colab={}
# Do train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later.
train = df[df["year"] <= 2016]
val = df[df["year"] == 2017]
test = df[df["year"] >= 2018]
# + id="qEy_BT3eYoXX" colab_type="code" colab={}
# Begin with baselines for classification.
target = "Google"
y_train = train[target]
y_val = val[target]
y_test = test[target]
mode = y_train.mode()[0]
y_pred = [mode] * len(y_val)
# + id="nQMe4SLuQAcl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="75fb28db-efa8-4c72-df46-386638d8d855"
from sklearn.metrics import accuracy_score, mean_absolute_error
mean_absolute_error(y_pred, y_val)
# + id="yNHGvqj0Voah" colab_type="code" colab={}
# Use scikit-learn for logistic regression.
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(solver="lbfgs", max_iter=4000)
features = train.columns.drop(["Burrito", "Date", "Yelp", "Google", "Hunger", "Chips", "Cost", "Mass (g)", "Density (g/mL)"])
X_train = train[features]
X_val = val[features]
X_test = test[features]
# + id="9b5sStJqF7Xj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 438} outputId="97656494-8b9f-42b7-d158-128d003fd0c4"
X_train
# + id="9FtzVWsWdI0c" colab_type="code" colab={}
from sklearn.impute import SimpleImputer
imputer = SimpleImputer()
X_train_imputed = imputer.fit_transform(X_train)
X_val_imputed = imputer.transform(X_val)
X_test_imputed = imputer.transform(X_test)
# + id="Z6Pt4V65nT71" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} outputId="14ecfe4e-a9e3-41d8-edef-0aac53438b46"
y_train.apply(lambda x: np.int64(x*10))
# + id="E--iK16qb3_H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="c34441c1-237e-4695-9caa-c106e283ee1e"
def standardizeY(y):
med = np.median(y)
return y.apply(lambda x: 1 if x >= med else 0)
model.fit(X_train, standardizeY(y_train))
y_pred = model.predict(X_val)
y_pred
# + id="MPzaXu1vMdLC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dac39d1c-257e-4930-c850-d1ea50a9dad7"
mean_absolute_error(y_pred, standardizeY(y_val))
# + id="mxEL06mgk4CO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bf9a73d9-51ec-4eb8-ccab-109252afc16c"
mean_absolute_error(
model.predict(X_test),
standardizeY(y_test)
)
# + id="ZhxU3kqWlDEr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="1add3da5-9856-4ace-c1bd-7d24ab809db8"
model.predict(X_test)
# + id="u1-Qo8EalHGX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 252} outputId="cc90464f-0d1f-4ee7-9433-59fc85a216d1"
model.predict(X_train)
| module4-logistic-regression/LS_DS_214_assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import os
import pathlib
import pandas as pd
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="0";
# -
# !pip install ktrain
import ktrain
from ktrain import text
MODEL_NAME="Musixmatch/umberto-wikipedia-uncased-v1"
# +
train_path= "../input/dati-di-training-e-di-test/haspeede2_dev_taskAB.csv"
test_path="../input/dati-di-training-e-di-test/haspeede2_reference_taskAB(misto).csv"
tr_path=pathlib.Path(train_path)
te_path=pathlib.Path(test_path)
if tr_path.exists():
print("Train data path set.")
else:
raise SystemExit("Training Data Path does not exist.")
if te_path.exists():
print("Test data path set.")
else:
raise SystemExit("Test Data Path does not exist.")
# -
train_df= pd.read_csv(train_path, encoding='utf-8', sep="\t", header=None)
train_df=train_df.drop([0])
train_df=train_df.drop([3], axis=1)
train_df.head()
test_df= pd.read_csv(test_path, encoding='utf-8', sep=";",header=None)
test_df=test_df.drop([3], axis=1)
test_df.head()
# +
x_train=train_df[1].tolist()
y_train=train_df[2].tolist()
x_test=test_df[1].tolist()
y_test=test_df[2].tolist()
t=text.Transformer(MODEL_NAME,maxlen=100)
trn=t.preprocess_train(x_train,y_train)
val=t.preprocess_test(x_test,y_test)
# +
model=t.get_classifier()
learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32)
learner.lr_find(show_plot=True,max_epochs=10)
learner.autofit(2e-4,early_stopping=5,epochs=10)
learner.validate(class_names=t.get_classes())
# +
predictor = ktrain.get_predictor(learner.model, preproc=t)
data=test_df[1].tolist()
label=test_df[2].tolist()
i = 0
correct = 0
wrong = 0
total = len(data)
true_lab = []
pred_lab = []
text = []
for dt in data:
result = predictor.predict(dt)
if not int(result) == label[i]:
text.append(dt)
pred_lab.append(result)
true_lab.append(label[i])
wrong += 1
else:
correct += 1
i += 1
name_dict = {
'Name': text,
'Gold Label': true_lab,
'Predicted Label': pred_lab
}
wrong_data = pd.DataFrame(name_dict)
wrong_data.to_csv("wrong_results.csv", sep=';')
print("Correct: ", correct,"/",total,"\nWrong: ", wrong,"/",total)
| Test sul misto/umberto-wikipedia-test-misto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Partial bond orders and benzene
#
# This notebook applies a SMIRNOFF FFXML involving partial bond orders (reducing the number of parameters) to assign bond parameters for benzene.
# Import stuff we need
from openff.toolkit.typing.engines.smirnoff import *
import openeye.oechem as oechem
import openeye.oeiupac as oeiupac
import openeye.oeomega as oeomega
# NBVAL_SKIP
# Load our force field from this directory. Use of partial bond orders here means there is only one [#6X3]~[#6X3]
# bond parameter line rather than three which otherwise would be required
ffxml = 'Frosst_AlkEthOH_extracarbons.offxml'
ff = ForceField(ffxml)
# Initialize benzene as a test molecule
mol = oechem.OEMol()
oeiupac.OEParseIUPACName(mol, 'benzene')
omega = oeomega.OEOmega()
omega(mol)
oechem.OETriposAtomNames(mol)
# NBVAL_SKIP
topology = generateTopologyFromOEMol(mol)
system = ff.createSystem(topology, [mol], chargeMethod = 'OECharges_AM1BCCSym', verbose = True)
| examples/deprecated/partial_bondorder/test_partialbondorder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Answer the following question:
#
# People in DC constantly complain that the metro consistently runs an average of 10 minutes late. You actually think it’s less than this, so you gather data for ten different trains at a specific location in DC. The following is your data in minutes of lateness: [4, 12, 6, 2, 1, 6, 7, 3, 16, 0]. Based on your data, are the people in DC correct?
import numpy as np
from scipy import stats
lateness = np.array([4, 12, 6, 2, 1, 6, 7, 3, 16, 0])
x_bar = lateness.mean()
x_bar
mu = 10
s = lateness.std(ddof=1)
s
n = lateness.shape[0]
n
t = (x_bar - mu) / (s / np.sqrt(n))
t
alpha = 1.0 - 0.95
alpha
p = stats.t.cdf(x=t, df=n-1)
p
p <= alpha
| boulder/DC_Trains_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/zhouchun0105/Bitcoin-Price-and-Movement-Prediction/blob/main/Copy_of_Overall_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="W1jg_InS27i2"
# Here, we are trying to see if using LSTM can help predict the price of bitcoin
# + [markdown] id="CCFMXshdyTqs"
# Import Needed *Libraries*
# + id="kt_VmMr-yMzL"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
# + [markdown] id="Zgjq6jOqyXkV"
# Import Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="lk0oq0W4yagU" outputId="454362c1-6f7a-4127-ec98-a50b42e92417"
from google.colab import drive
drive.mount('/content/drive')
# + id="FeB25y-aycda"
bitcoin=pd.read_csv("bitcoin.csv")
# + [markdown] id="UYtBbdpNyeY0"
# Overall View of Dataset
#
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="gGxU_pNbyh9y" outputId="78e13238-1ac1-4280-f463-8b23f32ab134"
bitcoin.head(5)
# + [markdown] id="8JFUufjDyoUC"
# Clean up the data and use certain variables such as 'High','Low', and Timestamp. Do note that I am using the average of hight and low, thus the values have been added up and divided by 2
#
# + id="G_CGifkzykpq"
bitcoin['price'] = (bitcoin['High']+ bitcoin['Low'])/2
bitcoin.drop(['Open','Close','Volume_(BTC)','Volume_(Currency)', 'Weighted_Price','High','Low'],axis=1, inplace=True)
bitcoin['Timestamp'] = pd.to_datetime(bitcoin['Timestamp'],unit='s')
bitcoin = bitcoin.set_index('Timestamp')
bitcoin = bitcoin.resample('6H').mean()
bitcoin = bitcoin.dropna()
# + [markdown] id="p8pM96_pyyNk"
# View data after unnesserry data has been dropped
# + colab={"base_uri": "https://localhost:8080/", "height": 394} id="jVym2_H8yy_Y" outputId="2a3200dd-1d6d-4bbd-e40a-151e6ea9b02e"
bitcoin.head(10)
# + [markdown] id="EA-4-AK0y1Jo"
# View of what the data looks like in plt plot
# + colab={"base_uri": "https://localhost:8080/", "height": 625} id="Zq-Ug83wy4Ev" outputId="30243e10-18e5-4fb6-f001-7877c559981d"
plt.figure(figsize=(20,10))
plt.plot(bitcoin)
plt.title('Bitcoin price',fontsize=30)
plt.xlabel('year',fontsize=20)
plt.ylabel('price',fontsize=20)
plt.show()
# + [markdown] id="TurHQY__y7dC"
# Additional Information about the data for better understanding
# + colab={"base_uri": "https://localhost:8080/"} id="uYe2pbXmy6uq" outputId="f8ab47b9-9383-452d-a00a-208f659671a7"
price = bitcoin['price']
max_value = price.max()
min_value = price.min()
avg_value=price.mean()
print(max_value, ' is the maximum price of bitcoin ')
print(min_value, ' is the minimum price of bitcoin')
print(avg_value, 'is the average price of bitcoin')
# + [markdown] id="8G50LLITzCJY"
# Using Scalar to get values in 0's and 1's for price
# + id="b2raQ15LzEiM"
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from keras import models, layers
# + id="_xEFqUNyzG9H"
scaler = MinMaxScaler()
price = scaler.fit_transform(np.array(bitcoin['price']).reshape(-1,1))
bitcoin['price'] = price
# + [markdown] id="5wocX6gL0gdl"
# Testing 1 date based on what done above to check if code works. In this test I will predict price for 1 date based on price of 100 dates.
# + colab={"base_uri": "https://localhost:8080/"} id="CZHhlJBc0hQQ" outputId="80eabdfa-986a-472d-a0f0-95cf55141a70"
X_l = []
y_l = []
Number = len(bitcoin)
Dates = 100
for i in range(Number-Dates-1):
X_l.append(bitcoin.iloc[i:i+Dates])
y_l.append(bitcoin.iloc[i+Dates])
X = np.array(X_l)
y = np.array(y_l)
print(X.shape, y.shape)
# 12646 is the price for 1 day based on 100 days
# + [markdown] id="n5Jpu4h70ni-"
# Split Data
# + id="oG_zAEqB0oKd"
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state= 100)
# + [markdown] id="e8AED4LG0zfn"
# Model for Overall Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="WYmIS-L30rkR" outputId="5c4a7e2b-c2e1-4526-ae20-08c24ecadbb6"
m_x = layers.Input(shape = X_train.shape[1:])
m_h = layers.LSTM(10)(m_x)
m_y = layers.Dense(1)(m_h)
m = models.Model(m_x,m_y)
m.compile('adam','mse')
m.summary()
# + [markdown] id="o_RrgRVk1O6I"
# Evaluate model (100 times)
#
# + id="x9dERRsU1Pcs"
history = m.fit(X_train, y_train, epochs=100, validation_data=(X_test, y_test),verbose=0)
# + [markdown] id="X7Uf0anj1SN_"
# Show Model
# + colab={"base_uri": "https://localhost:8080/", "height": 351} id="Wb1PqPgz1Uww" outputId="787b6ef0-7507-468f-d21e-53dcca499e5f"
plt.figure(figsize=(15,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['Train','Test'])
plt.title("The model's evaluation", fontsize=14)
plt.xlabel('Epoch')
plt.xlim(2,100)
plt.ylabel('Loss')
plt.show()
# + [markdown] id="-wvil2lo1hfP"
# Predict price on 200 values
# + colab={"base_uri": "https://localhost:8080/"} id="9eXtEbLA1ied" outputId="89aad172-fef2-450d-ae13-014c50f4f41d"
pred =[]
pr = m.predict(np.array(bitcoin[-40:]))
pred.append(pr[0])
for i in range(1,40):
pr = m.predict(np.concatenate((np.array(bitcoin[-40+i:]), pred[:]), axis=0))
pred.append(pr[0])
for i in range(0,160):
pr = m.predict(np.concatenate(pred[i:],axis=0).reshape(-1,1))
pred.append(pr[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 517} id="N-Cxj2-H1vsn" outputId="c52a4216-e693-4e19-8710-d22805356b08"
pred = pd.DataFrame(pred)
pred = pred.reset_index()
pred.columns = ['z','price']
pred.drop(['z'],axis=1,inplace=True)
data = pd.concat([bitcoin.reset_index().drop('Timestamp',axis=1),pred],ignore_index=True)
plt.figure(figsize=(17,7))
plt.plot(data[-1300:-300])
plt.title("Bitcoin predict",fontsize=20)
plt.text(13200,1,"predict data",fontsize=14)
plt.text(13015,1,"- 2020/12/31",fontsize=14)
plt.plot(data[-300:])
plt.show()
# + [markdown] id="2_V2MA8N3G-x"
# When looking at the overall results, it is clear that we cannot predict the price of bitcoin with this model. But what if we break the model down to 2 parts and see if the results are better
| Copy_of_Overall_LSTM.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5 with Spark
# language: python3
# name: python3
# ---
# This is the last assignment for the Coursera course "Advanced Machine Learning and Signal Processing"
#
# Just execute all cells one after the other and you are done - just note that in the last one you should update your email address (the one you've used for coursera) and obtain a submission token, you get this from the programming assignment directly on coursera.
#
# Please fill in the sections labelled with "###YOUR_CODE_GOES_HERE###"
#
# The purpose of this assignment is to learn how feature engineering boosts model performance. You will apply Discrete Fourier Transformation on the accelerometer sensor time series and therefore transforming the dataset from the time to the frequency domain.
#
# After that, you’ll use a classification algorithm of your choice to create a model and submit the new predictions to the grader. Done.
#
# Please make sure you run this notebook from an Apache Spark 2.3 notebook.
#
# So the first thing we need to ensure is that we are on the latest version of SystemML, which is 1.3.0 (as of 20th March'19) Please use the code block below to check if you are already on 1.3.0 or higher. 1.3 contains a necessary fix, that's we are running against the SNAPSHOT
#
from systemml import MLContext
ml = MLContext(spark)
ml.version()
#
#
# If you are blow version 1.3.0, or you got the error message "No module named 'systemml'" please execute the next two code blocks and then
#
# # PLEASE RESTART THE KERNEL !!!
#
# Otherwise your changes won't take effect, just double-check every time you run this notebook if you are on SystemML 1.3
#
!pip install https://github.com/IBM/coursera/blob/master/systemml-1.3.0-SNAPSHOT-python.tar.gz?raw=true
#
#
# Now we need to create two sym links that the newest version is picket up - this is a workaround and will be removed as soon as SystemML 1.3 will be pre-installed on Watson Studio once officially released.
#
!ln -s -f ~/user-libs/python3/systemml/systemml-java/systemml-1.3.0-SNAPSHOT-extra.jar ~/user-libs/spark2/systemml-1.3.0-SNAPSHOT-extra.jar
!ln -s -f ~/user-libs/python3/systemml/systemml-java/systemml-1.3.0-SNAPSHOT.jar ~/user-libs/spark2/systemml-1.3.0-SNAPSHOT.jar
# # Please now restart the kernel and start from the beginning to make sure you've installed SystemML 1.3
#
# Let's download the test data since it's so small we don't use COS (IBM Cloud Object Store) here
!wget https://github.com/IBM/coursera/blob/master/coursera_ml/shake.parquet?raw=true
!mv shake.parquet?raw=true shake.parquet
# Now it’s time to read the sensor data and create a temporary query table.
df=spark.read.parquet('shake.parquet')
df.show()
!pip install pixiedust
# + pixiedust={"displayParams": {"handlerId": "tableView"}}
import pixiedust
display(df)
# -
df.createOrReplaceTempView("df")
# We’ll use Apache SystemML to implement Discrete Fourier Transformation. This way all computation continues to happen on the Apache Spark cluster for advanced scalability and performance.
from systemml import MLContext, dml
ml = MLContext(spark)
# As you’ve learned from the lecture, implementing Discrete Fourier Transformation in a linear algebra programming language is simple. Apache SystemML DML is such a language and as you can see the implementation is straightforward and doesn’t differ too much from the mathematical definition (Just note that the sum operator has been swapped with a vector dot product using the %*% syntax borrowed from R
# ):
#
# <img style="float: left;" src="https://wikimedia.org/api/rest_v1/media/math/render/svg/1af0a78dc50bbf118ab6bd4c4dcc3c4ff8502223">
#
#
dml_script = '''
PI = 3.141592654
N = nrow(signal)
n = seq(0, N-1, 1)
k = seq(0, N-1, 1)
M = (n %*% t(k))*(2*PI/N)
Xa = cos(M) %*% signal
Xb = sin(M) %*% signal
DFT = cbind(Xa, Xb)
'''
# Now it’s time to create a function which takes a single row Apache Spark data frame as argument (the one containing the accelerometer measurement time series for one axis) and returns the Fourier transformation of it. In addition, we are adding an index column for later joining all axis together and renaming the columns to appropriate names. The result of this function is an Apache Spark DataFrame containing the Fourier Transformation of its input in two columns.
#
# +
from pyspark.sql.functions import monotonically_increasing_id
def dft_systemml(signal,name):
prog = dml(dml_script).input('signal', signal).output('DFT')
return (
#execute the script inside the SystemML engine running on top of Apache Spark
ml.execute(prog)
#read result from SystemML execution back as SystemML Matrix
.get('DFT')
#convert SystemML Matrix to ApacheSpark DataFrame
.toDF()
#rename default column names
.selectExpr('C1 as %sa' % (name), 'C2 as %sb' % (name))
#add unique ID per row for later joining
.withColumn("id", monotonically_increasing_id())
)
# -
# Now it’s time to create DataFrames containing for each accelerometer sensor axis and one for each class. This means you’ll get 6 DataFrames. Please implement this using the relational API of DataFrames or SparkSQL.
#
x0 = spark.sql("SELECT X from df where class = 0")
y0 = spark.sql("SELECT Y from df where class = 0")
z0 = spark.sql("SELECT Z from df where class = 0")
x1 = spark.sql("SELECT X from df where class = 1")
y1 = spark.sql("SELECT Y from df where class = 1")
z1 = spark.sql("SELECT Z from df where class = 1")
# Since we’ve created this cool DFT function before, we can just call it for each of the 6 DataFrames now. And since the result of this function call is a DataFrame again we can use the pyspark best practice in simply calling methods on it sequentially. So what we are doing is the following:
#
# - Calling DFT for each class and accelerometer sensor axis.
# - Joining them together on the ID column.
# - Re-adding a column containing the class index.
# - Stacking both Dataframes for each classes together
#
#
# +
from pyspark.sql.functions import lit
df_class_0 = dft_systemml(x0,'x') \
.join(dft_systemml(y0,'y'), on=['id'], how='inner') \
.join(dft_systemml(z0,'z'), on=['id'], how='inner') \
.withColumn('class', lit(0))
df_class_1 = dft_systemml(x1,'x') \
.join(dft_systemml(y1,'y'), on=['id'], how='inner') \
.join(dft_systemml(z1,'z'), on=['id'], how='inner') \
.withColumn('class', lit(1))
df_dft = df_class_0.union(df_class_1)
df_dft.show()
# -
# Please create a VectorAssembler which consumes the newly created DFT columns and produces a column “features”
#
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(
inputCols = ["xa","xb","ya","yb","za","zb"],
outputCol = "features")
# Please insatiate a classifier from the SparkML package and assign it to the classifier variable. Make sure to set the “class” column as target.
#
from pyspark.ml.classification import RandomForestClassifier
classifier = RandomForestClassifier(labelCol="class", featuresCol="features", numTrees=6)
# Let’s train and evaluate…
#
from pyspark.ml import Pipeline
pipeline = Pipeline(stages=[vectorAssembler, classifier])
model = pipeline.fit(df_dft)
prediction = model.transform(df_dft)
prediction.show()
# +
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
binEval = MulticlassClassificationEvaluator().setMetricName("accuracy") .setPredictionCol("prediction").setLabelCol("class")
binEval.evaluate(prediction)
# -
# If you are happy with the result (I’m happy with > 0.8) please submit your solution to the grader by executing the following cells, please don’t forget to obtain an assignment submission token (secret) from the Courera’s graders web page and paste it to the “secret” variable below, including your email address you’ve used for Coursera.
#
!rm -Rf a2_m4.json
prediction = prediction.repartition(1)
prediction.write.json('a2_m4.json')
!rm -f rklib.py
!wget wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py
from rklib import zipit
zipit('a2_m4.json.zip','a2_m4.json')
!base64 a2_m4.json.zip > a2_m4.json.zip.base64
# +
from rklib import submit
key = <KEY>"
part = "IjtJk"
email = "<EMAIL>"
submission_token = "<PASSWORD>"
with open('a2_m4.json.zip.base64', 'r') as myfile:
data=myfile.read()
submit(email, submission_token, key, part, [part], data)
| Advanced Data Science with IBM/Advanced Machine Learning and Signal Processing/Week 4 Programming Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dev and Test notebook
# ## Imports
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
# +
import os
from pathlib import Path
import skimage.external.tifffile as tiff
from common import Statistics, dataset_source
from resources.conv_learner import *
from resources.plots import *
from pprint import pprint
import matplotlib.pyplot as plt
# %matplotlib inline
# -
PATH = "datasets/yeast_v10/"
data_path = Path(PATH)
CLASSES = ('WT', 'mfb1KO', 'mmr1KO','mmm1KO', 'num1KO','mfb1KO_mmr1KO')
NUM_CLASSES = len(CLASSES)
BATCH_SIZE = 64
SIZE = 200
# +
# stats_name = "yeast_v5.3_per_class.dict"
classes = Statistics.source_class(data_path)
train_val = zip(classes['train'], classes['val'])
# Xtest = zip(classes['test'])
main_stats = Statistics.per_class(train_val, inspect = False)
# test_stats = Statistics.per_class(Xtest, save_name=stats_name)
# -
for keys in main_stats.keys():
print(f"{keys}: \t \t \t {main_stats[keys]}")
print(main_stats['01_WT'])
# # Bug in balancing batch composition:
wgths = to_np(data.trn_dl.sampler.weights) #__dict__.keys()
ws = dict.fromkeys(list(wgths)).keys()
list(ws)[0] /sum(ws)
print(set(wgths))
# +
#raw code from dataset.compute_adjusted_weights()
dataset = data.trn_dl.dataset.y
weights = np.zeros(len(dataset))
labels = list(set(dataset))
occurrences = np.bincount(dataset)
probs = [100 * count/sum(occurrences) for count in occurrences]
for idx, label in enumerate(labels):
weights[dataset == label] = probs[idx] / occurrences[idx]
desired = 100 / len(labels) # desired probability per class
for idx, prob in enumerate(probs):
delta = desired - prob # unlikely to be 0
correction = delta / occurrences[idx]
weights[dataset == labels[idx]] += correction
# -
print(sorted(set(probs), reverse = True))
print(desired)
# print(weights)
print(occurrences)
print(labels)
print(delta)
print(correction)
print(sorted(set(weights)))
def analyze_batch_composition():
bat_ = iter(data.trn_dl)
for i in range(len(data.trn_dl)):
x, y = next(bat_)
ys = np.array([list(to_np(y)).count(j) for j in range(NUM_CLASSES)])
print
if i == 0:
bys = ys
else:
bys = np.vstack((bys, ys))
means = np.mean(bys, axis = 0)/64
print(means)
analyze_batch_composition()
# # Analyzing and visualizing inference results:
# +
# Need to define and warm-up model...
# -
test_log_preds, targs = learn.predict_with_targs(is_test=True)
testprobs = np.exp(test_log_preds)
preds = np.argmax(testprobs, axis=1)
_, lbl2idx_, test_lbl2idx_ = ImageClassifierData.prepare_from_path(PATH, val_name='val', bs=64, num_workers=1, test_name='test', test_with_labels=True)
# +
# make predictions dictionary
h = 0
preds_dict = {}
for i, key in enumerate(test_lbl2idx_.keys()):
l = h
h = h + list(data.test_dl.dataset.src_idx).count(i)
preds_dict[key] = list(preds[l:h])
print(f"{key} predictions ready ({h - l} elements)")
# -
preds_rel = {}
for key in preds_dict.keys():
val = {cls: preds_dict[key].count(i)/len(preds_dict[key]) for i, cls in enumerate(data.classes)}
preds_rel[key]= val
def plot_test_preds(targets, preds_rel):
if not isinstance(targets, list):
targets = [targets]
x = math.ceil((int(len(targets)) /2)) # dynamic scaling of GridSpec
sz = 4 * x # dynamic scaling of figuresize
# plotting:
plt.figure(figsize=(12,sz))
gs1 = plt.GridSpec(x,2)
gs1.update(wspace = 0.4)
for i, targ in enumerate(targets):
to_plot = [preds_rel[targ][key] for key in data.classes] # extracting data
ax1 = plt.subplot(gs1[i])
ax1.barh(data.classes, to_plot)
ax1.set_title(targ)
ax1.set_xlim(0,1)
plt.show()
# +
test_classes = list(test_lbl2idx_.keys())
plot_test_preds(test_classes, preds_rel)
# plot_test_preds(['01_WT', '03_WT', '03_fzo1KO', '01_mfb1KO'], preds_rel)
# -
# # Plotting means of dataset
def source_classX(root_path: Path):
folders = {}
for ds in root_path.iterdir(): # train, test, val
if any(sub in str(ds) for sub in ['test', 'train', 'val']):
folders[ds.name] = [dir_ for dir_ in ds.iterdir()]
return folders
# +
inspect = True
src_folders = source_classX(data_path)
zipped = zip(src_folders['train'], src_folders['val'])
norm_value=65536
stats = {}
for t in zipped:
class_images = []
class_stats = []
for class_dir in t: # t is a tuple
class_name = class_dir.name
# read from each dir and append to the images
for file in class_dir.iterdir():
image = tiff.imread(str(file))
class_images.append(image)
# print(f"working on: {class_name}")
# print(f"working on: {class_dir}")
print(len(class_images))
if inspect is False:
mean = np.mean(class_images, axis=(0, 2, 3)) / norm_value
stdev = np.std(class_images, axis=(0, 2, 3)) / norm_value
else:
mean = np.mean(class_images, axis=(2, 3)) #/ norm_value
stdev = np.std(class_images, axis=(2, 3)) #/ norm_value
stats[class_name] = (mean, stdev)
# +
# d = [key.split('_')[0] for key in stats.keys()]
# ds = list(dict.fromkeys(d))
ims_means = {key: stats[key][0] for key in stats.keys()}
ims_means_scaled = {key: (((ims_means[key] / norm_value) - main_stats[key][0]) / main_stats[key][1]) for key in ims_means.keys()}
ims_to_plot = ims_means_scaled
plt.style.use('seaborn-whitegrid')
fig = plt.figure()
ax = plt.subplot(111)
for k in stats.keys():
if '01' in k:
ax.plot(ims_to_plot[k][:,0], ims_to_plot[k][:,1], 'o', label=k, color = 'C0', alpha = 0.2)
elif '02' in k:
ax.plot(ims_to_plot[k][:,0], ims_to_plot[k][:,1], 'o', label=k, color = 'C1', alpha = 0.2)
elif '03' in k:
ax.plot(ims_to_plot[k][:,0], ims_to_plot[k][:,1], 'o', label=k, color = 'C2', alpha = 0.2)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel('Mean DIC intensity')
plt.ylabel('Mean Cit1-mCherry intensity');
# plt.xlim(0,1750)
# plt.ylim(0,550);
# plt.legend(fancybox=True, frameon=True, loc='lower center', ncol=2);
# -
ims_means = {key: stats[key][0] for key in stats.keys()}
# ims_means_scaledX = {x: (((ims_means[x] / norm_value) - main_stats[x][0]) / main_stats[x][1]) for x in im_means.keys()}
dude = ((ims_means['03_dnm1KO'] / norm_value) - main_stats['03_dnm1KO'][0]) / main_stats['03_dnm1KO'][1]
# +
plt.style.use('seaborn-whitegrid')
d = [key.split('_')[0] for key in stats.keys()]
ds = list(dict.fromkeys(d))
for k in stats.keys():
if '01' in k:
plt.plot(stats[k][0][:,0], stats[k][0][:,1], 'o', label=k, color = 'C0', alpha = 0.2)
else:
plt.plot(stats[k][0][:,0], stats[k][0][:,1], 'o', label=k, color = 'C1', alpha = 0.2)
plt.xlabel('Mean Cit-mCherry intensity')
plt.ylabel('Mean DIC intensity')
plt.xlim(0,1500)
plt.ylim(0,500)
plt.legend(fancybox=True, frameon=True, loc='lower center', ncol=2)
# -
# ## Allowing normalization according to both dataset (source) and class
def read_dirsX(path, folder):
'''
Fetches name of all files in path in long form, and labels associated by extrapolation of directory names.
'''
lbls, fnames, all_lbls = [], [], [] #(!) Need to modify here for z lable
full_path = os.path.join(path, folder)
for lbl in sorted(os.listdir(full_path)):
if lbl not in ('.ipynb_checkpoints', '.DS_Store'):
all_lbls.append(lbl)
for fname in os.listdir(os.path.join(full_path, lbl)):
if fname not in ('.DS_Store'):
fnames.append(os.path.join(folder, lbl, fname))
lbls.append(lbl)
return fnames, lbls, all_lbls
# +
path = PATH
folder = 'val'
fnames, lbls, all_lbls = read_dirsX(path, folder)
lbls2classes = {l: l.split('_')[1] for l in all_lbls} # (!) dict mapping lbls (folders) to classes (genotype)
u_classes = list(dict.fromkeys(list(lbls2classes.values()))) # (!) get unique classes
d = {}
if d == {}:
d_cls = {label: idx for idx, label in enumerate(u_classes)}
d_src = {label_: idx_ for idx_, label_ in enumerate(lbls2classes)}
# for idx, label in enumerate(u_classes):
# d_cls[label] = idx
# for idx_, label_ in enumerate(lbls2classes):
# d_src[label_] = idx_
# (!) populating d with lbls: class, class_idx
for key, value in lbls2classes.items():
d[key] = [d_src[key], d_cls[value], lbls2classes[key]]
src_idxs = [d[lbl][0] for lbl in lbls] # (!) using d to generate class-lables for each image
cls_idxs = [d[lbl][1] for lbl in lbls] # (!) using d to generate class-lables for each image
src_idx_arr = np.array(src_idxs, dtype=int)
cls_idx_arr = np.array(cls_idxs, dtype=int)
# -
cls_idx_arr.shape
print(d)
print(src_idx_arr)
print(cls_idx_arr)
lbl2indexX = {}
trnX = folder_sourceX(PATH, 'val', lbl2indexX)
def folder_sourceX(path, folder, d):
"""
Returns the filenames and labels for a folder within a path
(!) Modified to accomodate multiple source folders for a single class e.g. WT from Experiment 01 and 02
Returns:
-------
fnames: a list of the filenames within `folder`
cls_idx_arr: a numpy array of the label indices in `u_classes` (indices per unique classes)
u_classes: a list of unique classes (e.g. genotypes) present in all_lbls
src_idx_arr: a numpy array of the label indices in 'all_lbls' (indices per labels in 'folder'), useful for normalization.
all_lbls: a list of all of the labels in `folder`, where the # of labels is determined by the # of directories within `folder`
"""
fnames, lbls, all_lbls = read_dirsX(path, folder)
lbls2classes = {l: l.split('_')[1] for l in all_lbls} # (!) dict mapping lbls (folders) to classes (genotype)
u_classes = list(dict.fromkeys(list(lbls2classes.values()))) # (!) get unique classes
if d == {}:
d_cls = {label: idx for idx, label in enumerate(u_classes)}
d_src = {label_: idx_ for idx_, label_ in enumerate(all_lbls)}
# (!) populating d with lbls: class, class_idx
for key, value in lbls2classes.items():
d[key] = [d_src[key], d_cls[value], lbls2classes[key]]
src_idxs = [d[lbl][0] for lbl in lbls] # (!) using d to generate class-lables for each image
cls_idxs = [d[lbl][1] for lbl in lbls] # (!) using d to generate source-lables (which dataset) for each image
src_idx_arr = np.array(src_idxs, dtype=int)
cls_idx_arr = np.array(cls_idxs, dtype=int)
# temp = [idxs.index(i) for i in range(len(all_lbls))]
# for ii in temp: print(f"{idxs[ii]} maps to {fnames[ii]}")
return fnames, cls_idx_arr, u_classes, src_idx_arr, all_lbls
PATH = "datasets/yeast_v10.1/"
data_path = Path(PATH)
lbl2indexX = {}
trnX = folder_sourceX(PATH, 'test', lbl2indexX)
isinstance(trnX, tuple)
# ## data_inspect with data_loader
# +
dl_ = data.trn_dl
batch_ = iter(dl_)
plt.style.use('seaborn-whitegrid')
fig = plt.figure()
ax = plt.subplot(111)
for b in range(0,len(dl)):
x_, y_ = next(batch_)
# x_np = x_.cpu().numpy()
# y_np = y.cpu().numpy().copy()
x_np = to_np(x_)
y_np = to_np(y_)
im_means = np.mean(x_np, axis=(2,3))
ax.plot(im_means[:,0], im_means[:,1], 'o', color = 'C1' , alpha=0.5)
| YNet_dev/01_Dev_and_testing_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:temp9]
# language: python
# name: conda-env-temp9-py
# ---
# # Basic objects
# A `striplog` depends on a hierarchy of objects. This notebook shows the objects and their basic functionality.
#
# - [Lexicon](#Lexicon): A dictionary containing the words and word categories to use for rock descriptions.
# - [Component](#Component): A set of attributes.
# - [Interval](#Interval): One element from a Striplog — consists of a top, base, a description, one or more Components, and a source.
#
# Striplogs (a set of `Interval`s) are described in [a separate notebook](Striplog_object.ipynb).
#
# Decors and Legends are also described in [another notebook](Display_objects.ipynb).
# +
import striplog
striplog.__version__
# If you get a lot of warnings here, just run it again.
# -
# <hr />
# ## Lexicon
from striplog import Lexicon
print(Lexicon.__doc__)
lexicon = Lexicon.default()
lexicon
lexicon.synonyms
# Most of the lexicon works 'behind the scenes' when processing descriptions into `Rock` components.
lexicon.find_synonym('Halite')
s = "grysh gn ss w/ sp gy sh"
lexicon.expand_abbreviations(s)
# <hr />
# ## Component
# A set of attributes. All are optional.
from striplog import Component
print(Component.__doc__)
# We define a new rock with a Python `dict` object:
r = {'colour': 'grey',
'grainsize': 'vf-f',
'lithology': 'sand'}
rock = Component(r)
rock
# The Rock has a colour:
rock['colour']
# And it has a summary, which is generated from its attributes.
rock.summary()
# We can format the summary if we wish:
rock.summary(fmt="My rock: {lithology} ({colour}, {grainsize!u})")
# The formatting supports the usual `s`, `r`, and `a`:
#
# * `s`: `str`
# * `r`: `repr`
# * `a`: `ascii`
#
# Also some string functions:
#
# * `u`: `str.upper`
# * `l`: `str.lower`
# * `c`: `str.capitalize`
# * `t`: `str.title`
#
# And some numerical ones, for arrays of numbers:
#
# * `+` or `∑`: `np.sum`
# * `m` or `µ`: `np.mean`
# * `v`: `np.var`
# * `d`: `np.std`
# * `x`: `np.product`
# +
x = {'colour': ['Grey', 'Brown'],
'bogosity': [0.45, 0.51, 0.66],
'porosity': [0.2003, 0.1998, 0.2112, 0.2013, 0.1990],
'grainsize': 'VF-F',
'lithology': 'Sand',
}
X = Component(x)
# This is not working at the moment.
#fmt = 'The {colour[0]!u} {lithology!u} has a total of {bogosity!∑:.2f} bogons'
#fmt += 'and a mean porosity of {porosity!µ:2.0%}.'
fmt = 'The {lithology!u} is {colour[0]!u}.'
X.summary(fmt)
# -
X.json()
# We can compare rocks with the usual `==` operator:
rock2 = Component({'grainsize': 'VF-F',
'colour': 'Grey',
'lithology': 'Sand'})
rock == rock2
rock
# In order to create a Component object from text, we need a lexicon to compare the text against. The lexicon describes the language we want to extract, and what it means.
rock3 = Component.from_text('Grey fine sandstone.', lexicon)
rock3
# Components support double-star-unpacking:
"My rock: {lithology} ({colour}, {grainsize})".format(**rock3)
# <hr />
# ## Position
#
# Positions define points in the earth, like a top, but with uncertainty. You can define:
#
# * `upper` — the highest possible location
# * `middle` — the most likely location
# * `lower` — the lowest possible location
# * `units` — the units of measurement
# * `x` and `y` — the _x_ and _y_ location (these don't have uncertainty, sorry)
# * `meta` — a Python dictionary containing anything you want
#
# Positions don't have a 'way up'.
from striplog import Position
print(Position.__doc__)
# +
params = {'upper': 95,
'middle': 100,
'lower': 110,
'meta': {'kind': 'erosive', 'source': 'DOE'}
}
p = Position(**params)
p
# -
# Even if you don't give a `middle`, you can always get `z`: the central, most likely position:
params = {'upper': 75, 'lower': 85}
p = Position(**params)
p
p.z
# <hr />
# ## Interval
#
# Intervals are where it gets interesting. An interval can have:
#
# * a top
# * a base
# * a description (in natural language)
# * a list of `Component`s
#
# Intervals don't have a 'way up', it's implied by the order of `top` and `base`.
from striplog import Interval
print(Interval.__doc__)
# I might make an `Interval` explicitly from a Component...
Interval(10, 20, components=[rock])
# ... or I might pass a description and a `lexicon` and Striplog will parse the description and attempt to extract structured `Component` objects from it.
Interval(20, 40, "Grey sandstone with shale flakes.", lexicon=lexicon).__repr__()
# Notice I only got one `Component`, even though the description contains a subordinate lithology. This is the default behaviour, we have to ask for more components:
interval = Interval(20, 40, "Grey sandstone with black shale flakes.", lexicon=lexicon, max_component=2)
print(interval)
# `Interval`s have a `primary` attribute, which holds the first component, no matter how many components there are.
interval.primary
# Ask for the summary to see the thickness and a `Rock` summary of the primary component. Note that the format code only applies to the `Rock` part of the summary.
interval.summary(fmt="{colour} {lithology}")
# We can change an interval's properties:
interval.top = 18
interval
interval.top
# <hr />
#
# ## Comparing and combining intervals
# +
# Depth ordered
i1 = Interval(top=61, base=62.5, components=[Component({'lithology': 'limestone'})])
i2 = Interval(top=62, base=63, components=[Component({'lithology': 'sandstone'})])
i3 = Interval(top=62.5, base=63.5, components=[Component({'lithology': 'siltstone'})])
i4 = Interval(top=63, base=64, components=[Component({'lithology': 'shale'})])
i5 = Interval(top=63.1, base=63.4, components=[Component({'lithology': 'dolomite'})])
# Elevation ordered
i8 = Interval(top=200, base=100, components=[Component({'lithology': 'sandstone'})])
i7 = Interval(top=150, base=50, components=[Component({'lithology': 'limestone'})])
i6 = Interval(top=100, base=0, components=[Component({'lithology': 'siltstone'})])
# -
i2.order
# **Technical aside:** The `Interval` class is a `functools.total_ordering`, so providing `__eq__` and one other comparison (such as `__lt__`) in the class definition means that instances of the class have implicit order. So you can use `sorted` on a Striplog, for example.
#
# It wasn't clear to me whether this should compare tops (say), so that '>' might mean 'above', or if it should be keyed on thickness. I chose the former, and implemented other comparisons instead.
print(i3 == i2) # False, they don't have the same top
print(i1 > i4) # True, i1 is above i4
print(min(i1, i2, i5).summary()) # 0.3 m of dolomite
i2 > i4 > i5 # True
# We can combine intervals with the `+` operator. (However, you cannot subtract intervals.)
i2 + i3
# Adding a rock adds a (minor) component and adds to the description.
interval + rock3
i6.relationship(i7), i5.relationship(i4)
print(i1.partially_overlaps(i2)) # True
print(i2.partially_overlaps(i3)) # True
print(i2.partially_overlaps(i4)) # False
print()
print(i6.partially_overlaps(i7)) # True
print(i7.partially_overlaps(i6)) # True
print(i6.partially_overlaps(i8)) # False
print()
print(i5.is_contained_by(i3)) # True
print(i5.is_contained_by(i4)) # True
print(i5.is_contained_by(i2)) # False
x = i4.merge(i5)
x[-1].base = 65
x
i1.intersect(i2, blend=False)
i1.intersect(i2)
i1.union(i3)
i3.difference(i5)
# <hr />
#
# <p style="color:gray">©2015 Agile Geoscience. Licensed CC-BY. <a href="https://github.com/agile-geoscience/striplog">striplog.py</a></p>
| tutorial/Basic_objects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Introdution a Statical Learning
# ### introdrução
# ### Capítulo : Statstical Learning
# #### error irredutível - irreducible error
# nossa previsão
# ainda teria algum erro nele! Isso ocorre porque Y também é uma função de
# , que, por definição, não pode ser previsto usando X. Portanto, a variabilidade
# associado também afeta a precisão de nossas previsões.
# ## Suporte e Decisão
# # Clusters: Verdadeiro ou Falso
# ## Os algoritmos hierárquicos single e complet linkage tem complexidade computacional O(n) e, por este motivo, são aconselhados para grandes quantidades de dados.
# ### Falso
# Para mínima árvore de decisão mínima, ele tem complexidade Operacional indicado maior que indicado na questão a partir pela ordem de N²
# ## O algoritmo Bisecting K-Means é um exemplo de algoritmo hierárquico do tipo top-down (divisivos).
# ### Verdadeiro
# O algoritmo Bisecting k-means consiste em uma variação hierárquica do algoritmo k-means, que em cada iteração, seleciona um grupo e o divide, de forma a gerar umahierarquia [Fontana e Naldi, 2009].
# ## Sob a perspectiva de otimização, o algoritmo K-Means pode ser escrito como um minimizador da distância intra-cluster.
# ## O algoritmo K-Medians é menos sensível a outliers e tem menor complexidade computacional que o algoritmo K-Means.
# ## A distância Manhattan (ou city-block) tem a mesma formulação da distância Minkowski com grau 2 (p=2).
# ## A distância Mahalanobis tem um alto custo computacional devido o cálculo da inversa da matriz de covariância.
# ## No algoritmo DBScan, o resultado final é pouco sensível aos seus hiper-parâmetros (i)raio e (ii)número mínimo de vizinhos.
# ## os algoritmos baseados em densidade, o conceito de partição rígida não é obedecido devido à detecção de outliers que não são agrupados.
# ## A soma das distâncias dos pontos para seus respectivos centroides pode ser utilizada para determinar o número de clusters (este critério é popularmente conhecido como joelho ou cotovelo).
# ## A métrica "Silhueta Simplificada" tem complexidade computacional linear e é uma boa aproximação para a métrica "Silhueta".
| conceitos_ML&Stastics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pDL] *
# language: python
# name: conda-env-pDL-py
# ---
# # Extending PyTorch differentiable functions
#
# In this notebook you'll see how to add your custom differentiable function for which you need to specify `forward` and `backward` passes.
# Import some libraries
import torch
import numpy
# For a gentle introduction see [PyTorch extension](https://pytorch.org/docs/stable/notes/extending.html) tutorial.
#
# Source for `torch.autograd.Function` available [here](https://github.com/pytorch/pytorch/blob/master/torch/autograd/function.py).
# These are the two that we have to override:
#
# ```python
# @staticmethod
# def forward(ctx, *args, **kwargs):
# """Performs the operation.
# This function is to be overridden by all subclasses.
# It must accept a context ctx as the first argument, followed by any
# number of arguments (tensors or other types).
# The context can be used to store tensors that can be then retrieved
# during the backward pass.
# """
# raise NotImplementedError
#
# @staticmethod
# def backward(ctx, *grad_outputs):
# """Defines a formula for differentiating the operation.
# This function is to be overridden by all subclasses.
# It must accept a context :attr:`ctx` as the first argument, followed by
# as many outputs did :func:`forward` return, and it should return as many
# tensors, as there were inputs to :func:`forward`. Each argument is the
# gradient w.r.t the given output, and each returned value should be the
# gradient w.r.t. the corresponding input.
# The context can be used to retrieve tensors saved during the forward
# pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
# of booleans representing whether each input needs gradient. E.g.,
# :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
# first input to :func:`forward` needs gradient computated w.r.t. the
# output.
# """
# raise NotImplementedError
# ```
# Custom addition module
class MyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2):
# ctx is a context where we can save
# computations for backward.
ctx.save_for_backward(x1, x2)
return x1 + x2
@staticmethod
def backward(ctx, grad_output):
x1, x2 = ctx.saved_tensors
grad_x1 = grad_output * torch.ones_like(x1)
grad_x2 = grad_output * torch.ones_like(x2)
# need to return grads in order
# of inputs to forward (excluding ctx)
return grad_x1, grad_x2
# Let's try out the addition module
x1 = torch.randn((3), requires_grad=True)
x2 = torch.randn((3), requires_grad=True)
print(f'x1: {x1}')
print(f'x2: {x2}')
myadd = MyAdd.apply # aliasing the apply method
y = myadd(x1, x2)
print(f' y: {y}')
z = y.mean()
print(f' z: {z}, z.grad_fn: {z.grad_fn}')
z.backward()
print(f'x1.grad: {x1.grad}')
print(f'x2.grad: {x2.grad}')
# Custom split module
class MySplit(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
x1 = x.clone()
x2 = x.clone()
return x1, x2
@staticmethod
def backward(ctx, grad_x1, grad_x2):
x = ctx.saved_tensors[0]
print(f'grad_x1: {grad_x1}')
print(f'grad_x2: {grad_x2}')
return grad_x1 + grad_x2
# Let's try out the split module
x = torch.randn((4), requires_grad=True)
print(f' x: {x}')
split = MySplit.apply
x1, x2 = split(x)
print(f'x1: {x1}')
print(f'x2: {x2}')
y = x1 + x2
print(f' y: {y}')
z = y.mean()
print(f' z: {z}, z.grad_fn: {z.grad_fn}')
z.backward()
print(f' x.grad: {x.grad}')
# Custom max module
class MyMax(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
# example where we explicitly use non-torch code
maximum = x.detach().numpy().max()
argmax = x.detach().eq(maximum).float()
ctx.save_for_backward(argmax)
return torch.tensor(maximum)
@staticmethod
def backward(ctx, grad_output):
argmax = ctx.saved_tensors[0]
return grad_output * argmax
# Let's try out the max module
x = torch.randn((5), requires_grad=True)
print(f'x: {x}')
mymax = MyMax.apply
y = mymax(x)
print(f'y: {y}, y.grad_fn: {y.grad_fn}')
y.backward()
print(f'x.grad: {x.grad}')
| PyTorch_examples/pytorch-Deep-Learning-master/extra/b-custom_grads.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Ganymede 1.1.0.20210614 (Java 11)
// language: java
// name: ganymede-1.1.0.20210614-java-11
// ---
// +
import java.util.stream.DoubleStream;
var x =
DoubleStream.concat(DoubleStream.iterate(- Math.PI, t -> t < Math.PI, t -> t + 0.01),
DoubleStream.of(Math.PI))
.toArray();
var cosx = DoubleStream.of(x).map(t -> Math.cos(t)).toArray();
var sinx = DoubleStream.of(x).map(t -> Math.sin(t)).toArray();
// -
// # JFreeChart
//
// https://zetcode.com/java/jfreechart/
//
// https://www.jfree.org/jfreechart/api/javadoc/
// %%pom
---
dependencies:
- org.jfree:jfreechart:1.5.3
// +
import java.awt.BasicStroke;
import java.awt.Color;
import org.jfree.chart.ChartFactory;
import org.jfree.chart.JFreeChart;
import org.jfree.chart.plot.PlotOrientation;
import org.jfree.chart.plot.XYPlot;
import org.jfree.chart.renderer.xy.XYLineAndShapeRenderer;
import org.jfree.data.xy.XYDataItem;
import org.jfree.data.xy.XYDataset;
import org.jfree.data.xy.XYSeries;
import org.jfree.data.xy.XYSeriesCollection;
var sin = new XYSeries("sin");
for (int i = 0; i < x.length; i += 1) {
sin.add(new XYDataItem(x[i], sinx[i]));
}
XYSeries cos = new XYSeries("cos");
for (var i = 0; i < x.length; i += 1) {
sin.add(new XYDataItem(x[i], cosx[i]));
}
var collection = new XYSeriesCollection();
collection.addSeries(sin);
collection.addSeries(cos);
var jfreechart =
ChartFactory.createXYLineChart("Trig", "X", "Y", collection,
PlotOrientation.VERTICAL, true, true, false);
var plot = jfreechart.getXYPlot();
var renderer = new XYLineAndShapeRenderer();
plot.setRenderer(renderer);
plot.setBackgroundPaint(Color.gray);
plot.setRangeGridlinesVisible(true);
plot.setDomainGridlinesVisible(true);
renderer.setSeriesPaint(0, Color.RED);
renderer.setSeriesStroke(0, new BasicStroke(2.0f));
renderer.setSeriesPaint(1, Color.BLUE);
renderer.setSeriesStroke(1, new BasicStroke(2.0f));
print(jfreechart);
// -
// # Tablesaw
//
// https://jtablesaw.github.io/tablesaw/userguide/Introduction_to_Plotting
// %%pom
---
dependencies:
- tech.tablesaw:tablesaw-jsplot:0.38.2
// +
import tech.tablesaw.api.DoubleColumn;
import tech.tablesaw.api.Table;
import tech.tablesaw.plotly.components.Figure;
import tech.tablesaw.plotly.api.LinePlot;
var table =
Table.create(DoubleColumn.create("x", x),
DoubleColumn.create("sinx", sinx),
DoubleColumn.create("cosx", cosx));
print(LinePlot.create("Trig", table, "x", "sinx"));
print(LinePlot.create("Trig", table, "x", "cosx"));
/*
* Note: Javascript will not be visible in GitHub's static HTML rendering.
*/
// -
// # XChart
//
// https://knowm.org/open-source/xchart/xchart-example-code/
// %%pom
dependencies:
- org.knowm.xchart:xchart:3.8.0
// +
import org.knowm.xchart.XYChart;
import org.knowm.xchart.XYChartBuilder;
var xchart = new XYChartBuilder().title("Trig").build();
xchart.addSeries("sin", x, sinx);
xchart.addSeries("cos", x, cosx);
print(xchart);
| trig.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import torch
from scipy.io import loadmat
import matplotlib.pylab as plt
# DeepMoD stuff
from deepymod_torch import DeepMoD
from deepymod_torch.model.func_approx import NN
from deepymod_torch.model.library import Library1D
from deepymod_torch.model.constraint import LeastSquares
from deepymod_torch.model.sparse_estimators import Threshold, PDEFIND
from deepymod_torch.training import train_split_full
from deepymod_torch.training.sparsity_scheduler import TrainTestPeriodic
if torch.cuda.is_available():
device ='cuda'
else:
device = 'cpu'
# Settings for reproducibility
np.random.seed(42)
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# -
V2_2 = np.loadtxt('data/data_17_09/V1_V2/data_V2.csv', delimiter=',', skiprows=3)
V2_3 = np.loadtxt('data/data_17_09/V1_V3/data_V3.csv', delimiter=',', skiprows=3)
V2_4 = np.loadtxt('data/data_17_09/V1_V4/data_V4.csv', delimiter=',', skiprows=3)
V2_5 = np.loadtxt('data/data_17_09/V1_V5/data_V5.csv', delimiter=',', skiprows=3)
V2_6 = np.loadtxt('data/data_17_09/V1_V6/data_V6.csv', delimiter=',', skiprows=3)
V2_7 = np.loadtxt('data/data_17_09/V1_V7/data_V7.csv', delimiter=',', skiprows=3)
V2_8 = np.loadtxt('data/data_17_09/V1_V8/data_V8.csv', delimiter=',', skiprows=3)
V2_9 = np.loadtxt('data/data_17_09/V1_V9/data_V9.csv', delimiter=',', skiprows=3)
V2_10 = np.loadtxt('data/data_17_09/V1_V10/data_V10.csv', delimiter=',', skiprows=3)
V2_11 = np.loadtxt('data/data_17_09/V1_V11/data_V11.csv', delimiter=',', skiprows=3)
V2_12 = np.loadtxt('data/data_17_09/V1_V12/data_V12.csv', delimiter=',', skiprows=3)
V2_13 = np.loadtxt('data/data_17_09/V1_V13/data_V13.csv', delimiter=',', skiprows=3)
V2_14 = np.loadtxt('data/data_17_09/V1_V14/data_V14.csv', delimiter=',', skiprows=3)
V2_15 = np.loadtxt('data/data_17_09/V1_V15/data_V15.csv', delimiter=',', skiprows=3)
delta_V = np.concatenate((V2_2[:, 1:], V2_3[:, 1:], V2_4[:, 1:], V2_5[:, 1:], V2_6[:, 1:], V2_7[:, 1:], V2_8[:, 1:], V2_9[:, 1:], V2_10[:, 1:], V2_11[:, 1:], V2_12[:, 1:], V2_13[:, 1:], V2_14[:, 1:], V2_15[:, 1:]), axis=1)
#delta_V_1 = np.concatenate((V10_2[:, 1:], V10_3[:, 1:], V10_4[:, 1:], V10_5[:, 1:], V10_6[:, 1:], V10_7[:, 1:], V10_8[:, 1:], V10_9[:, 1:], V10_10[:, 1:]), axis=1)
delta_V.shape
#plt.figure(figsize=(6, 6))
#plt.subplot(121)
plt.contourf(delta_V[:,:])
plt.figure(figsize=(9, 6))
plt.plot(V2_2[560:2000, 1])
plt.plot(V2_3[560:2000, 1])
plt.plot(V2_4[560:2000, 1])
plt.plot(V2_5[560:2000, 1])
plt.plot(V2_6[560:2000, 1])
plt.plot(V2_7[560:2000, 1])
plt.plot(V2_8[560:2000, 1])
plt.plot(V2_9[560:2000, 1])
plt.plot(V2_10[560:2000, 1])
plt.plot(V2_11[560:2000, 1])
plt.plot(V2_12[560:2000, 1])
#plt.plot(time, V2_4[:, 1])
plt.plot(V2_2[560:3000, 1])
plt.plot(V2_3[560:3000, 1])
plt.plot(V2_4[560:3000, 1])
output_data = delta_V[560:2000,:].T
output_data.shape
plt.contourf(output_data)
x = np.linspace(0, 1, output_data.shape[0])
t = np.linspace(0, 1, output_data.shape[1])
x_grid, t_grid = np.meshgrid(x, t, indexing='ij')
X = np.transpose((t_grid.flatten(), x_grid.flatten()))
y = np.real(output_data).reshape((output_data.size, 1))
y = y/np.max(y)
# +
number_of_samples = 2500
idx = np.random.permutation(y.shape[0])
X_train = torch.tensor(X[idx, :][:number_of_samples], dtype=torch.float32, requires_grad=True)
y_train = torch.tensor(y[idx, :][:number_of_samples], dtype=torch.float32)
# -
network = NN(2, [30, 30, 30, 30, 30, 30, 30], 1)
library = Library1D(poly_order=1, diff_order=2) # Library function
estimator = PDEFIND(lam=1e-4) # Sparse estimator
constraint = LeastSquares() # How to constrain
model = DeepMoD(network, library, estimator, constraint).to(device) # Putting it all in the model
sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=8, delta=1e-5) # in terms of write iterations
optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=1e-3) # Defining optimizer
train_split_full(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir='runs/exp_data_3/', split=0.8, test='full', write_iterations=25, max_iterations=100000, delta=1e-5, patience=20)
train_split_full(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir='runs/exp_data_4/', split=0.8, test='full', write_iterations=25, max_iterations=100000, delta=1e-5, patience=20)
train_split_full(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir='runs/exp_data_5/', split=0.8, test='full', write_iterations=25, max_iterations=100000, delta=1e-5, patience=20)
| paper/Cable_equation/old/testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.10 64-bit (''pygeostat_dev'': conda)'
# name: python361064bitpygeostatdevconda214fe2f8e703488293b6f095560eab08
# ---
# # Importing and Exporting GSLIB (GEO-EAS) Files
# - categories: [Jupyter, GSLIB, Pandas, Geostatistics, Python]
# - comments: true
# Though a bit dated GSLIB remains the standard in many Geostatistical workflows, unfortunately the GSLIB data format can be a bit of hassle.
# The standard GSLIB aka GEO-EAS data format as described on [gslib.com](http://www.gslib.com/gslib_help/format.html):
#
# > * The first line in the file is taken as a title and is possibly transferred to output files.
# > * The second line should be a numerical value specifying the number of numerical variables nvar in the data file.
# > * The next nvar lines contain character identification labels and additional text (optional) that describe each variable.
# > * The following lines, from nvar+3 until the end of the file, are considered as data points and must have nvar numerical values per line. Missing values are typically considered as large negative or positive numbers (e.g., less than -1.0e21 or greater than 1.0e21). The number of data will be the number of lines in the file minus nvar+2 minus the number of missing values. The programs read numerical values and not alphanumeric characters; alphanumeric variables may be transformed to integers or the source code modified.
#
#
#
#
# The header is informative, but not convienent for importing into Pandas.
# It should be noted that line #2 in the header can often contain grid definition information in addition to ncols, and in the case of multiple simulations nsim is commonly given after the grid definition (this is overlooked in the read/write functions to follow).
#
# The goal here is just to provide a couple simple functions to save a little time for anyone who needs to do this.
#
# ## Reading GSLIB data
#
# Importing GSLIB data really happens in 2 steps.
# 1. read the header
# 2. read all the data to a dataframe.
#
# > side note: I've found `skip_rows` and `delim_whitespace` are useful when it comes to reading ASCII data from other scientific software (MODFLOW, PEST, TOUGH2 etc.)
#hide
import pandas as pd
def read_gslib(filename:str):
with open(filename, "r") as f:
lines = f.readlines()
ncols = int(lines[1].split()[0])
col_names = [lines[i+2].strip() for i in range(ncols)]
df = pd.read_csv(filename, skiprows=ncols+2, delim_whitespace=True, names=col_names)
return df
df = read_gslib(filename="data/example.dat")
df.head()
# Now go about your business analyzing data, making plots and doing all the other things python does well until you need re-export to GSLIB to run specific Geostatistical algorithm.
# ## Writing a Pandas DataFrame to GSLIB Format
#
# As with reading in the data, I'm sure there are a number of ways this can be done. Below is one rather simple approach where I write the header than iterate over each row as a tuple.
#
# > If speed is a consideration when iterating over a pandas DataFrame use `.itertuples` its noticeably faster than `.iterrows`.
def write_gslib(df:pd.DataFrame, filename:str):
with open(filename, "w") as f:
f.write("GSLIB Example Data\n")
f.write(f"{len(df.columns)}\n")
f.write("\n".join(df.columns)+"\n")
for row in df.itertuples():
row_data = "\t".join([f"{i:.3f}" for i in row[1:]])
f.write(f"{row_data}\n")
write_gslib(df, "data/exported_data.dat")
# Now, just have a quick look at the file to be sure its correct:
# + tags=[]
with open("data/exported_data.dat","r") as f:
for i in range(10):
print(f.readline().strip())
# -
# Really the whole purpose here is to have these functions readily available to copy/paste when you need them.
| _notebooks/2020-09-14-pandas-and-gslib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Run interventions in loop with OpenABM
# +
import sys
sys.path.insert(0,'./src/')
sib_folder = "../sib" # sib path
abm_folder = "../OpenABM-Covid19/src" #Open ABM path
sys.path.insert(0,sib_folder)
sys.path.insert(0,abm_folder)
from pathlib import Path
import log, logging
from importlib import reload
import loop_abm, abm_utils, scipy
#logging
output_dir = "./output/"
fold_out = Path(output_dir)
if not fold_out.exists():
fold_out.mkdir(parents=True)
reload(log)
logger = log.setup_logger()
num_threads = 40 #number of threads used for sib
# -
# ## Epidemic model
#
# #### Set the parameters of the OpenABM foward simulation
# +
N=50000 #Number of individuals
T=100 #Total time of simulations
seed = 1 #seed of the random number generator
n_seed_infection = 10 #number of patient zero
params_model = {
"rng_seed" : seed,
"end_time" : T,
"n_total" : N,
"days_of_interactions" : T,
"n_seed_infection" : n_seed_infection,
}
# -
# ## Set testing and quarantine rules
fraction_SM_obs = 0.5 #fraction of Symptomatic Mild tested positive
fraction_SS_obs = 1 #fraction of Symptomatic Severe tested positive
initial_steps = 12 #starting time of intervention
quarantine_HH = True #Households quarantine
test_HH = True #Tests the households when quarantined
adoption_fraction = 1 #app adoption (fraction)
num_test_random = 0 #number of random tests per day
num_test_algo = 200 #number of tests using by the ranker per day
fp_rate = 0.0 #test false-positive rate
fn_rate = 0.0 #test false-negative rate
# ## Choose the rankers algorithms class
# +
from rankers import dotd_rank, tracing_rank, mean_field_rank, sib_rank
import sib
prob_seed = 1/N
prob_sus = 0.55
pseed = prob_seed / (2 - prob_seed)
psus = prob_sus * (1 - pseed)
pautoinf = 1/N
dotd = dotd_rank.DotdRanker()
tracing = tracing_rank.TracingRanker(
tau=5,
lamb=0.014
)
MF = mean_field_rank.MeanFieldRanker(
tau = 5,
delta = 10,
mu = 1/30,
lamb = 0.014
)
sib.set_num_threads(num_threads)
sib_ranker = sib_rank.SibRanker(
params = sib.Params(
# faster implementation
#prob_i = sib.Cached(sib.Scaled(sib.PDF(sib.Gamma(k=5.76, mu=0.96)), scale=0.25), T+1),
#prob_r = sib.Cached(sib.Gamma(k = 10, mu = 1/1.7452974337097158), T+1),
prob_i = sib.PiecewiseLinear(sib.RealParams(list(0.25*abm_utils.gamma_pdf_array(T+1,6,2.5)))),
prob_r = sib.PiecewiseLinear(sib.RealParams(list(scipy.stats.gamma.sf(range(T+1), 10., scale=1.7452974337097158)))),
pseed = pseed,
psus = psus,
fp_rate = fp_rate,
fn_rate = fn_rate,
pautoinf = pautoinf),
maxit0 = 20,
maxit1 = 20,
tol = 1e-3,
memory_decay = 1e-5,
window_length = 21,
tau=7
)
ress = {}
# -
rankers = {
"RG" : dotd,
"CT": tracing,
"SMF" : MF,
"BP": sib_ranker,
}
# ### Set up figure
# +
# %matplotlib widget
import matplotlib.pyplot as plt
import plot_utils
import time
plots = plot_utils.plot_style(N, T)
save_path_fig = f"./output/plot_run_N_{N}_SM_{fraction_SM_obs}_test_{num_test_algo}_n_seed_infection_{n_seed_infection}_seed_{seed}_fp_{fp_rate}_fn_{fn_rate}.png"
fig, callback = plot_utils.plotgrid(rankers, plots, initial_steps, save_path=save_path_fig)
time.sleep(0.5)
fig.canvas
# +
import imp
imp.reload(loop_abm)
for s in rankers:
data = {"algo":s}
loop_abm.loop_abm(
params_model,
rankers[s],
seed=seed,
logger = logging.getLogger(f"iteration.{s}"),
data = data,
callback = callback,
initial_steps = initial_steps,
num_test_random = num_test_random,
num_test_algo = num_test_algo,
fraction_SM_obs = fraction_SM_obs,
fraction_SS_obs = fraction_SS_obs,
quarantine_HH = quarantine_HH,
test_HH = test_HH,
adoption_fraction = adoption_fraction,
fp_rate = fp_rate,
fn_rate = fn_rate,
name_file_res = s + f"_N_{N}_T_{T}_obs_{num_test_algo}_SM_obs_{fraction_SM_obs}_seed_{seed}"
)
ress[s] = data
# saves a bit of memory: rankers[s] = {}
# -
# ## Results
# [see complete results in the output folder]
# %matplotlib inline
to_plot = "I"
for s in ress.keys():
plt.plot(ress[s][to_plot], label = s)
plt.semilogy()
plt.ylabel("Infected")
plt.xlabel("days")
plt.legend()
plt.show()
| epi_mitigation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Methods & Results
# We are going to use multiple analysis to classify the type of the animals using 16 variables including hair, feathers, eggs, milk, airborne, aquatic, predator, toothed, backbone, breathes, venomous, fins, legs, tail, domestic, catsize as our predictors. To predict the class of a new observation, the algorithms of each type will be further explained before implementation.
# + tags=["remove-cell"]
{
"tags": [
"hide-cell"
]
}
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append( '..' )
from src.pre_processing import *
from src.train_and_predict_model import *
from src.line_plot import *
from src.para_optimize import *
from src.std_acc import *
from src.line_plot import *
# -
# The first thing is to import the data. The data set is downloaded from [UCI repository]("https://archive-beta.ics.uci.edu/ml/datasets/zoo"). It is then saved as a csv file in this project repository. Some exploratory data analysis needs to be run before running the actual analyses on the data set. Here is a preview of pre-processed data set:
# + tags=["remove-input"]
{
"tags": [
"hide-input"
]
}
zoo_data = pd.read_csv("../results/csv/head.csv")
zoo_data.columns.values[0] = "index"
zoo_data
# -
# It is checked that there aren't missing values in the data set, we can clearly deduce that the data set is clean according to the data summary we generated above. Since most features are binary and categorical, there is no need to do normalization and standardization.
# ```{figure-md} f1
# <img src="../results/figures/fig1.png" alt="num" class="bg-primary mb-1" width="800px">
#
# A summary table of the data set
# ```
# As shown in [fig.1](f1), the histograms of each feature are generated. The ones with skewed distribution might be more decisive in the prediction. However, since the data set is relatively small, all the features except the `animalName` are going to be used to predict. In the next part, we are going to split the data, into the training set and testing set. After that, different classification models will be trained and evaluated.
# ## Classification
# Now we will use the training set to build an accurate model, whereas the testing set is used to report the accuracy of the models. Here is a list of algorithms we will use in the following section:
#
# - K Nearest Neighbor(KNN)
# - Decision Tree
# - Support Vector Machine
# - Logistic Regression
# To train and evaluate each model, we split the dataset into training and testing sets. We use 80% of the total data to train the models, and the rest of the data is aimed to test the models.
# + [markdown] tags=[]
# ### KNN
# KNN captures the idea of similarity (sometimes called distance, proximity, or closeness) with some basic mathematics we might have learned earlier. Basically in terms of geometry we can always calculate the distance between points on a graph. Similarly, using KNN we can group similar points together and predict the target with our feature variables(x).
#
# First of all, we have to train the model for different set of K values and finding the best K value.
# Then we want to plot the accuracy for different K values.
# -
# ```{figure-md} f2
# <img src="../results/figures/k_accuracy.png" alt="num" class="bg-primary mb-1" width="500px">
#
# A plot reveals the relationship between K and corresponding accuracy
# ```
# As shown in [fig.2](f2), less K values provide higher accuracy. To find the best K value, we tuned the hyperparameter using GridSearch algorithm. After tuning, the best K value is 1.
# ### KNN final model & Evaluation
# After fitting the model using `K=1`, we evaluate the KNN model by Cross Validation and calculating the precision, recall, f1-score and support.
# KNN Cross Validation Result:
# + tags=["remove-input"]
{
"tags": [
"hide-input"
]
}
knn_cross_validate_result = pd.read_csv("../results/csv/knn_cross_validate_result.csv")
knn_cross_validate_result.columns=["criteria", "score"]
knn_cross_validate_result
# -
# KNN Classification Report:
# + tags=["remove-input"]
{
"tags": [
"hide-input"
]
}
knn_classification_report= pd.read_csv("../results/csv/knn_classification_report.csv")
knn_classification_report.columns.values[0]="index"
knn_classification_report
# + [markdown] tags=["remove-input"]
# ### Decision Tree
# A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility The goal of using a Decision Tree is to create a training model that can use to predict the class or value of the target variable by learning simple decision rules inferred from prior data(training data).
# -
# ```{figure-md} f3
# <img src="../results/figures/dt_accuracy.png" alt="num" class="bg-primary mb-1" width="500px">
#
# A plot reveals the relationship between deepth and corresponding accuracy
# ```
# As shown in the [fig.3](f3), the best depth of the Decision Tree is around small. We can confirm that the best value of the depth is 5 after tuning the hyperparameter and calculating the accuracy.
# ### Decision Tree final model & evaluation
# After training the model, we obtain the Cross Validation score, as well as the precision, recall, f1-score and support.
# DT Cross Validation Result:
# + tags=["remove-input"]
{
"tags": [
"hide-input"
]
}
dt_cross_validate_result = pd.read_csv("../results/csv/dt_cross_validate_result.csv")
dt_cross_validate_result.columns=["criteria", "score"]
dt_cross_validate_result
# -
# DT Cross Validation Result:
# + tags=["remove-input"]
{
"tags": [
"hide-input"
]
}
dt_classification_report = pd.read_csv("../results/csv/dt_classification_report.csv")
dt_classification_report.columns.values[0]="index"
dt_classification_report
# -
# ### Support Vector Machine
# SVM or Support Vector Machine is a linear model for classification and regression problems. It can solve linear and non-linear problems and work well for many practical problems. The idea of SVM is simple: The algorithm creates a line or a hyperplane which separates the data into classes{cite:p}`towards-dsci`.
#
# Final SVM is here used the splited test part to train again for better training, and better prediction. An svm evaluation as well as the final model is also provided below.
# ### SVM training model Jaccard Score, final model and evaluation
# SVM Classification Report:
# + tags=["remove-input"]
{
"tags": [
"hide-input"
]
}
svm_classification_report= pd.read_csv("../results/csv/svm_classification_report.csv")
svm_classification_report.columns.values[0]="index"
svm_classification_report
# -
# ### Logistic Regression
# Logistic Regression is a "Supervised machine learning" algorithm that can be used to model the probability of a certain class or event. It is used when the data is linearly separable and the outcome is binary or dichotomous in nature. That means Logistic regression is usually used for Binary classification problems{cite:p}`ibm-dsci`.
# ### Logistic Regression training model Jaccard Score, final model and evaluation
#
# LR Classification Report:
# + tags=["remove-input"]
{
"tags": [
"hide-input"
]
}
lr_classification_report= pd.read_csv("../results/csv/lr_classification_report.csv")
lr_classification_report.columns.values[0]="index"
lr_classification_report
# -
| analysis/_build/jupyter_execute/analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h2> ======================================================</h2>
# <h1>MA477 - Theory and Applications of Data Science</h1>
# <h1>Lesson 15: Tokenization, Speech Tagging, Chunking</h1>
#
# <h4>Dr. <NAME></h4>
# United States Military Academy, West Point
# AY20-2
# <h2>======================================================</h2>
# <h2>Lecture Outline</h2>
#
# <ul>
# <li>Tokenization</li>
# <li> Normalizing</li>
# <li>Tagging Part of Speech</li>
# <li>Chunking</li>
#
#
# </ul>
import nltk
# <h2>Tokenization</h2>
#
# Tokenization is the process of breaking a text down into words or sentences. When dealing with text, typically they don't come already broken down into words or sentences, so it's up to us to do so. NLTK has a built-in method that easily breaks text down into words or sentences.
#
# Below we'll use some text describing the coronavirus as our example
text="""What are Coronaviruses? Coronaviruses (CoV) are a large family of viruses that cause illness
ranging from the common cold to more severe diseases such as Middle East Respiratory Syndrome (MERS-CoV) and
Severe Acute Respiratory Syndrome (SARS-CoV). A novel coronavirus (nCoV) is a new strain that has not
been previously identified in humans!
Coronaviruses are zoonotic; meaning they are transmitted between animals and people.
Detailed investigations found that SARS-CoV was transmitted from civet cats to humans and MERS-CoV from
dromedary camels to humans. Several known coronaviruses are circulating in animals that have not yet infected
humans.
Common signs of infection include respiratory symptoms, fever, cough, shortness of breath and breathing
difficulties. In more severe cases, infection can cause pneumonia, severe acute respiratory syndrome, kidney
failure and even death.
"""
print(text)
# First we'll tokenize the text into words. In other words, we will break-down the text at each whitespace and punctuation signs.
word_tokens=nltk.word_tokenize(text)
word_tokens[:10]
# We can also tokenize by sentences. That is, we can break-down text at every punctuation mark that indicates the end of a sentence(e.g a period, question mark, exclamation mark etc.)
sents_tokens=nltk.sent_tokenize(text)
sents_tokens
# If we wanted to tokenize into words each of the sentences then one way to do so is via a list comprehension
# +
#[nltk.word_tokenize(item) for item in sents_tokens]
# -
tokens=[nltk.word_tokenize(sent) for sent in sents_tokens]
tokens[:3]
# <h2>Text Normalization</h2>
#
# When performing text analysis often we want only to look at the words and get rid of all the punctuation and other meaningless characters. The process of extracting only the words out of a text is typically knonw as text normalization.
covid_1=tokens[1]
# +
#tokens
# -
covid_1
for word in covid_1:
if word.isalpha():
print(word)
# Let's normalize the entire `text`. You can either use a `for` loop or use list `comprehension`
tokens
# +
#[[word for word in item if word.isalpha()]for item in tokens]
# -
text_norm=[[word for word in item if word.isalpha()] for item in tokens]
text_norm[5]
# Often times when doing text analysis, such as frequency distribution, we don't want to distinguish between Data, data, DATA, dAta, etc. In other words, we don't want them to count as different tokens but rather as the same token. To avoid things like this, we often may want to convert the entire text to either lower or upper case.
#
# For example, let's turn everything in our text tokens into lower case
tokens
tokens_lower=[[word.lower() for word in item if word.isalpha()] for item in tokens]
tokens_lower
# Sometimes we don't want to distinguish between words such as dog and dogs, or woman and women, lie and lying or liar etc. In other words, maybe we only care if the words have the same stem so to speak. In this case, we may try to first use some normalization technique that corrects for the different references to essentially the same word.
#
# This may be accomplished via what's known as `Stemmers`. As we will shortly see though, they are imperfect and not always yield great results, so we have to be cautious when using stemmers.
#
# Let's begin by creating a list first
list1=['cat','cats','dog','dogs','doggies','woman','women','lie','liar','lying',
'week','weekly','break','breaking']
porter=nltk.PorterStemmer()
list1_porter=[porter.stem(word) for word in list1]
list1_porter
# SO, the Porter Stemmer did fairly well, however it missed some. Let's see if another stemmer can do better:
lan=nltk.LancasterStemmer()
list1_lan=[lan.stem(word) for word in list1]
list1_lan
# In an attempt to increase the accuracy of the stemmers, we can try to use them sequentially, one after the other:
[lan.stem(word) for word in list1_porter]
[porter.stem(word) for word in list1_lan]
# <h2>Tagging Parts of Speech</h2>
#
# Often times when performing text analysis, and especially text summary etc., it may become very important knowing the parts of speech. So, being able to correctly identify the parts of speech and tag them accordingly may be a crucial step in providing a sophisticated solution to a problem concerning a large amount of text. For example maybe we want to extract only nouns from a text, or we want to count how many adjectives per sentence are there etc....tagging the text with the appropriate speech tag allowes us to do all of these and much more.
text2=text_norm[3]
# +
#text_norm
# -
text2
nltk.download('averaged_perceptron_tagger')
nltk.download('tagsets')
text2_tag=nltk.pos_tag(text2)
text2_tag
# To understand what each of these abbreviations mean we can do so as follows:
nltk.help.upenn_tagset()
# <font color='red' size=4>Exercise</font>
#
# Find the most common nouns in the text above.
print(text)
nltk.download('universal_tagset')
nltk.word_tokenize(text)
# +
#nltk.pos_tag(nltk.word_tokenize(text),tagset='universal')
# -
text_tag=[item[0] for item in nltk.pos_tag(nltk.word_tokenize(text),tagset='universal') if item[0].isalpha()
and item[1]=='NOUN']
# +
# text_tag=[word for word in nltk.pos_tag(nltk.word_tokenize(text),
# tagset='universal') if word[0].isalpha() and
# word[1]=='NOUN']
# -
text_tag
fd=nltk.FreqDist([word for word in text_tag])
fd.most_common(3)
cfd=nltk.ConditionalFreqDist((len(word[0]),word[0]) for word in text_tag)
[cfd[i].most_common(1) for i in range(3,8)]
cfd[5].most_common()
# <font color='red' size=5>Exercise 1</font>
#
# As we know, often the same word may be used as a different part of speech.
#
# In the text Alice: Find all the parts of speech used for the words <b>well, like</b> and <b>out</b>, of any case (upper or lower).
alice=nltk.corpus.gutenberg.words("carroll-alice.txt")
alice_tag=[word for word in nltk.pos_tag(alice, tagset='universal') if word[0].isalpha()]
alice_tag
alice_cfd=nltk.ConditionalFreqDist(alice_tag)
alice_cfd
alice_cfd['like']
alice_cfd['well']
alice_cfd['out']
# <font size=5 color='red'>Exercise 2</font>
#
# In the text Alice:
#
# <ul>
# <li>Find all the cases where there was a choice between two nouns. For example, <b> water</b> or <b>food</b></li>
# <li> Find all the cases where there is a noun followed by the word <b>and</b> and another noun. For example, <b>apple</b> and <b>sword</b></li>
# </ul>
#
# <h2>Chunking</h2>
#
# Often times words come in pairs, for example <b> New Orleans, Coffee Shop, Star Wars, Coffee Table, TV Stand </b> etc. so we don't want to tokenize them separately, but rather we would want to keep them together so as not to loose meaning. We can do so via chunking, by specifying the type of word structures we'd like to chunk together.
text="""We are taking a Data Science course at the Military Academy in the state of New York. We brought five
coffee tables via two separate jet planes from New Orleans.We ran into cbs news station"""
# Let's tag and tokenize to see what kind of tags our words of interest have
text_tag=nltk.pos_tag(nltk.word_tokenize(text))
text_tag
# What we are looking for is a noun followed by a noun without anything in between separating them.
sequence="""
chunk:
{<NN>+}
{<NNP>+}
{<NNPS>+}
"""
chunk=nltk.RegexpParser(sequence)
results=chunk.parse(text_tag)
print(results)
# <h2>Stop Words</h2>
#
# When trying to extract only the meaningful words from a text, we want to get rid of what's known as non-descriptive or stop-words, such as `the, a, an, is ` etc.
nltk.download('stopwords')
stop_words=nltk.corpus.stopwords.words('english')
stop_words
# <font color='red' size=5>Exercise</font>
#
# Extract the top 10 most common words in the Alice book, that are descriptive to the text itself.
| MA477 - Theory and Applications of Data Science/Lessons/Lesson 15 - Tokenization, Speech Taging, Chunking/Lesson 15 - Tokenization, Speech Tagging, Chunking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Here we'll be making a GAN to generate preferences.
#
# Followed along to a tutorial from <NAME> on Medium.com.
# +
# %load_ext autoreload
# %matplotlib inline
# %autoreload 2
from IPython import display
from loggingutils import Logger
from IPython.core.interactiveshell import InteractiveShell
# InteractiveShell.ast_node_interactivity = "last"
import numpy as np
import torch
from torch import nn, optim
from torch.autograd.variable import Variable
from torchvision import transforms, datasets
import tqdm
from tqdm import tqdm_notebook
# -
# ## Load Data
# #### Define the dataloader structure
# %autoreload 2
import importlib
import preference_loader as pl
data = pl.Dataset('../data_in/Practice/ED-01-03.soi')
data_loader = torch.utils.data.DataLoader(data, batch_size=20, shuffle=False)
num_batches = len(data_loader)
data.pairs[0], data.votes[0]
# +
# testing graph_visualization
# %autoreload 2
import graph_visualize as gv
i = 90
print(data.votes[i])
print(data.pairs[i])
gv.vec_to_graph(data.pairs[i])
nums_graph = gv.vote_to_graph([6,5,4,3,2,1])
nums_graph
# -
# #### Shape of the data
# InteractiveShell.ast_node_interactivity = "last_expr'"
num_votes = len(data)
num_features = len(data.pairs[-1])
num_votes, num_features
print(data_loader)
print(data_loader.dataset[0])
# ## Networks
# +
'''
Originally was N -- 1024 -- 512 -- 256 -- 1
'''
N = num_features
class DiscriminatorNet(torch.nn.Module):
"""
A three hidden-layer discriminative neural network
"""
def __init__(self):
super(DiscriminatorNet, self).__init__()
n_features = N
n_out = 1
self.hidden0 = nn.Sequential(
nn.Linear(N, N),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.hidden1 = nn.Sequential(
nn.Linear(N, N),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.out = nn.Sequential(
torch.nn.Linear(N, n_out),
torch.nn.Sigmoid()
)
def forward(self, x):
x = self.hidden0(x)
x = self.hidden1(x)
x = self.hidden1(x)
x = self.hidden1(x)
x = self.out(x)
return x
# def matrix_to_vectors(images):
# return images.view(images.size(0), 784)
# def vectors_to_images(vectors):
# return vectors.view(vectors.size(0), 1, 28, 28)
;
# +
'''
original architecture was in -- 256 -- 512 -- 1024 -- out
new architecture is -- in -- in -- in -- out
'''
N = num_features
class GeneratorNet(torch.nn.Module):
"""
A three hidden-layer generative neural network
"""
def __init__(self):
super(GeneratorNet, self).__init__()
n_features = 100
# hidden layer size
h = 100
n_out = N
self.first = nn.Sequential(
nn.Linear(n_features, h),
nn.LeakyReLU(0.2)
)
self.hidden0 = nn.Sequential(
nn.Linear(h,2*h),
nn.LeakyReLU(0.2)
)
self.hidden1 = nn.Sequential(
nn.Linear(2*h, 4*h),
nn.LeakyReLU(0.2)
)
self.hidden2 = nn.Sequential(
nn.Linear(4*h, 8*h),
nn.LeakyReLU(0.2)
)
self.hidden3 = nn.Sequential(
nn.Linear(8*h, 8*h),
nn.LeakyReLU(0.2)
)
self.hidden4 = nn.Sequential(
nn.Linear(8*h, 4*h),
nn.LeakyReLU(0.2)
)
self.hidden5 = nn.Sequential(
nn.Linear(4*h, 2*h),
nn.LeakyReLU(0.2)
)
self.out = nn.Sequential(
nn.Linear(2*h, N),
nn.Sigmoid()
)
def forward(self, x):
x = self.first(x)
x = self.hidden0(x)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.hidden3(x)
x = self.hidden4(x)
x = self.hidden5(x)
x = self.out(x)
return x
# Noise
def noise(size):
n = Variable(torch.randn(size, 100))
if torch.cuda.is_available(): return n.cuda()
return n
;
# -
# ## Send networks to GPU if available
discriminator = DiscriminatorNet()
generator = GeneratorNet()
if torch.cuda.is_available():
print('GPU available')
discriminator.cuda()
generator.cuda()
;
# ## Optimization
# +
# Optimizers
# for soi_07_01 these values work best around .0002 and .0001
# for soi_01_03 these values work best around .00004 and .00002
d_optimizer = optim.Adam(discriminator.parameters(), lr=0.0002)
g_optimizer = optim.Adam(generator.parameters(), lr=0.0001)
# Loss function
loss = nn.BCELoss()
# Number of steps to apply to the discriminator
d_steps = 1 # In Goodfellow et. al 2014 this variable is assigned to 1
# -
# ## Training
# +
'''
The author of this blog post
https://medium.com/@utk.is.here/keep-calm-and-train-a-gan-pitfalls-and-tips-on-training-generative-adversarial-networks-edd529764aa9
suggest using real=0 and fake=1 for improvbed 'gradient flow in the early generations'
'''
def real_data_target(size):
'''
Tensor containing ones, with shape = size
'''
data = Variable(torch.zeros(size, 1))
if torch.cuda.is_available(): return data.cuda()
return data
def fake_data_target(size):
'''
Tensor containing zeros, with shape = size
'''
data = Variable(torch.ones(size, 1))
if torch.cuda.is_available(): return data.cuda()
return data
# +
def train_discriminator(optimizer, real_data, fake_data):
# Reset gradients
optimizer.zero_grad()
# 1.1 Train on Real Data
prediction_real = discriminator(real_data)
# Calculate error and backpropagate
error_real = loss(prediction_real, real_data_target(real_data.size(0)))
error_real.backward()
# 1.2 Train on Fake Data
prediction_fake = discriminator(fake_data)
# Calculate error and backpropagate
error_fake = loss(prediction_fake, fake_data_target(real_data.size(0)))
error_fake.backward()
# 1.3 Update weights with gradients
optimizer.step()
# Return error
return error_real + error_fake, prediction_real, prediction_fake
def train_generator(optimizer, fake_data):
# 2. Train Generator
# Reset gradients
optimizer.zero_grad()
# Sample noise and generate fake data
prediction = discriminator(fake_data)
# Calculate error and backpropagate
error = loss(prediction, real_data_target(prediction.size(0)))
error.backward()
# Update weights with gradients
optimizer.step()
# Return error
return error
# -
# ### Generate Samples for Testing
num_test_samples = 5
test_noise = [noise(num_test_samples) for i in range(num_test_samples)]
# test_noise
# ### Start training
# +
# %%time
# Number of epochs
num_epochs = 1
logger = Logger(model_name='PrefGAN', data_name='soi_01_03')
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
print('Logging in graph_logs//{}'.format(timestr))
g_display = None
test_vote_logs = [[] for i in test_noise]
for epoch in range(num_epochs):
for n_batch, real_batch in enumerate(data_loader):
# 1. Train Discriminator
real_data = Variable(real_batch)
if torch.cuda.is_available(): real_data = real_data.cuda()
# Generate fake data
fake_data = generator(noise(real_data.size(0))).detach()
# Train D
d_error, d_pred_real, d_pred_fake = train_discriminator(d_optimizer,
real_data, fake_data)
# 2. Train Generator
# Generate fake data
fake_data = generator(noise(real_batch.size(0)))
# Train G
g_error = train_generator(g_optimizer, fake_data)
logger.log(d_error, g_error, epoch, n_batch, num_batches)
# Display Progress
if (n_batch) % 10 == 0 or num_batches - n_batch <= 10:
display.clear_output(True)
# Display Graph
for i, noise_vector in enumerate(test_noise):
test_vote = generator(noise_vector).data.cpu()[0]
test_vote_logs[i].append(test_vote)
# g_display = gv.vec_to_graph(test_vote)
# g_display.save('epoch{}batch{}graph{}'.format(epoch,n_batch,i),'../graph_logs/{}'.format(timestr))
# # g_display.render('../graph_logs/{}/epoch{}batch{}graph{}'.format(timestr,epoch,n_batch,i))
# Display status Logs
logger.display_status(
epoch, num_epochs, n_batch, num_batches,
d_error, g_error, d_pred_real, d_pred_fake
)
# -
len(test_vote_logs)
# +
# torch.save(test_vote_logs,'test_votes_big')
# -
vote0 = test_vote_logs[0][-1]
vote1 = test_vote_logs[1][-1]
vote2 = test_vote_logs[2][-1]
vote3 = test_vote_logs[3][-1]
vote4 = test_vote_logs[4][-1]
gv.vec_to_graph(vote0)
gv.vec_to_graph(vote1)
gv.vec_to_graph(vote2)
gv.vec_to_graph(vote3)
gv.vec_to_graph(vote4)
test_vote_logs[0];
# +
# def pairwise_matrix(vote):
# n = len(vote)
# occurance_matrix = np.full(shape = (n,n), dtype = int, fill_value = 0)
# for i, v in enumerate(vote):
# if v == 0:
# continue
# # list of alts that the current alt is better than
# better_than = [i+1 for i in range(n)]
# before = vote[:i+1]
# for b in before:
# better_than.remove(b)
# for p in better_than:
# occurance_matrix[v-1][p-1] = 1
# occurance_matrix[p-1][v-1] = -1
# return occurance_matrix
# # Converts an upper triangular matrix to a vector
# def matrix_to_vec(matrix):
# vec = []
# n = len(matrix[0])
# vec_length = n * (n-1) / 2
# offset = 1
# for inner in matrix:
# vec.extend(inner[offset:])
# offset += 1
# return np.array(vec)
# # Creates an upper triangular matrix from a vector
# def vec_to_matrix(vec_):
# data_type = vec_.dtype
# vec = list(vec_)
# m = len(vec)
# n = math.floor(math.sqrt(2*m))
# prob_matrix = np.full(shape = (n+1,n+1), dtype = data_type, fill_value = 0)
# row_offset = 1
# for row in prob_matrix:
# for i in range(row_offset,n+1):
# row[i] = vec.pop(0)
# row_offset += 1
# return prob_matrix
| src/PrefGAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Some functions
def run_regression(models, economies, df, x, y):
"""
Perform linear regression for one or multiple economies.
economy = list of economies
models = {economy: LinearRegression() for economy in economies}
The function returns a dictionary of economy-model pairs. That is,
each economy will have its own set of coefficients.
"""
for economy, model in models.items():
(model.fit(df.loc[economy, x],
df.loc[economy, y]))
return models
def run_prediction(models, economies, df, ResultsColumn):
"""
Use coefficients from run_regression to generate predictions.
Pass a dataframe df with the X and Y data.
ResultsColumn = name of prediction results
"""
df_list =[]
# run predictions
for economy, model in models.items():
years = df['Year']
years.reset_index(drop=True, inplace=True)
prediction = model.predict(df.loc[economy,:])
df_name = pd.DataFrame(np.exp(prediction), columns=ResultsColumn)
df_name.insert(loc=0,column='Year',value=years)
df_name.insert(loc=0,column='Economy',value=economy)
df_list.append(df_name)
# combine individual economy dataframes to one dataframe
dfResults = pd.concat(df_list, sort=True)
return dfResults
def cagr(start_value, end_value, num_periods):
"""
Calculate compound annual growth rate
"""
return (end_value / start_value) ** (1 / (num_periods - 1)) - 1
def calcCAGR(df,economies):
"""
Calculate CAGR for all economies.
df = dataframe with columns of data for growth rates
economies = list of economies
"""
df_list = []
for economy in economies.flatten():
df11 = df[df['Economy']==economy]
for col in df11.drop(['Economy','Year'], axis=1):
start_value = float(df11[col].iloc[0])
end_value = float(df11[col].iloc[-1])
num_periods = len(df11[col])
cagr_result = cagr(start_value, end_value, num_periods)
df_list.append((economy,col,cagr_result))
df = pd.DataFrame(df_list, columns=['A','B','C'])
return df
def calcYOY(df,economies):
"""
Calculate year-over-year for all economies.
df = dataframe with columns of data for growth rates
economies = list of economies
"""
df_list = []
for economy in economies.flatten():
df11 = df.loc[economy]
yoy = df11.pct_change()
yoy.reset_index(inplace=True)
yoy.insert(loc=0,column='Economy',value=economy)
df_list.append(yoy)
dfPC = pd.concat(df_list)
return dfPC
| notebooks/Some-functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Graph:
def __init__(self, graph, heuristicNodeList, startNode):
self.graph = graph
self.H=heuristicNodeList
self.start=startNode
self.parent={}
self.status={}
self.solutionGraph={}
def applyAOStar(self):
self.aoStar(self.start, False)
def getNeighbors(self, v):
return self.graph.get(v,'')
def getStatus(self,v):
return self.status.get(v,0)
def setStatus(self,v, val):
self.status[v]=val
def getHeuristicNodeValue(self, n):
return self.H.get(n,0)
def setHeuristicNodeValue(self, n, value):
self.H[n]=value
def printSolution(self):
print("FOR GRAPH SOLUTION, TRAVERSE THE GRAPH FROM THE START NODE:",self.start)
print(" ")
print(self.solutionGraph)
print(" ")
def computeMinimumCostChildNodes(self, v):
minimumCost=0
costToChildNodeListDict={}
costToChildNodeListDict[minimumCost]=[]
flag=True
for nodeInfoTupleList in self.getNeighbors(v):
cost=0
nodeList=[]
for c, weight in nodeInfoTupleList:
cost=cost+self.getHeuristicNodeValue(c)+weight
nodeList.append(c)
if flag==True:
minimumCost=cost
costToChildNodeListDict[minimumCost]=nodeList
flag=False
else:
if minimumCost>cost:
minimumCost=cost
costToChildNodeListDict[minimumCost]=nodeList
return minimumCost, costToChildNodeListDict[minimumCost]
def aoStar(self, v, backTracking):
print("HEURISTIC VALUES :", self.H)
print("SOLUTION GRAPH :", self.solutionGraph)
print("PROCESSING NODE :", v)
print(" ")
if self.getStatus(v) >= 0:
minimumCost, childNodeList = self.computeMinimumCostChildNodes(v)
print(minimumCost, childNodeList)
self.setHeuristicNodeValue(v, minimumCost)
self.setStatus(v,len(childNodeList))
solved=True
for childNode in childNodeList:
self.parent[childNode]=v
if self.getStatus(childNode)!=-1:
solved=solved & False
if solved==True:
self.setStatus(v,-1)
self.solutionGraph[v]=childNodeList
if v!=self.start:
self.aoStar(self.parent[v], True)
if backTracking==False:
for childNode in childNodeList:
self.setStatus(childNode,0)
self.aoStar(childNode, False)
h1 = {'A': 1, 'B': 6, 'C': 2, 'D': 12, 'E': 2, 'F': 1, 'G': 5, 'H': 7, 'I': 7, 'J': 1}
graph1 = {
'A': [[('B', 1), ('C', 1)], [('D', 1)]],
'B': [[('G', 1)], [('H', 1)]],
'C': [[('J', 1)]],
'D': [[('E', 1), ('F', 1)]],
'G': [[('I', 1)]]
}
G1= Graph(graph1, h1, 'A')
G1.applyAOStar()
G1.printSolution()
h2 = {'A': 1, 'B': 6, 'C': 12, 'D': 10, 'E': 4, 'F': 4, 'G': 5, 'H': 7}
graph2 = {
'A': [[('B', 1), ('C', 1)], [('D', 1)]],
'B': [[('G', 1)], [('H', 1)]],
'D': [[('E', 1), ('F', 1)]]
}
G2 = Graph(graph2, h2, 'A')
G2.applyAOStar()
G2.printSolution()
| AO*/aostar.py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Db2 Connection Document
# This notebook contains the connect statement that will be used for connecting to Db2. The typical way of connecting to Db2 within a notebooks it to run the db2 notebook (`db2.ipynb`) and then issue the `%sql connect` statement:
# ```sql
# # %run db2.ipynb
# # %sql connect to sample user ...
# ```
#
# Rather than having to change the connect statement in every notebook, this one file can be changed and all of the other notebooks will use the value in here. Note that if you do reset a connection within a notebook, you will need to issue the `CONNECT` command again or run this notebook to re-connect.
#
# The `db2.ipynb` file is still used at the beginning of all notebooks to highlight the fact that we are using special code to allow Db2 commands to be issues from within Jupyter Notebooks.
# ### Connect to Db2
# This code will connect to Db2 locally.
# %sql CONNECT TO BANKING USER db2inst1 USING db2inst1 HOST localhost PORT 50001
# #### Credits: IBM 2019, <NAME> [<EMAIL>]
| ml/connectiondb2ml-banking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # 实战Kaggle比赛——使用Gluon预测房价和K折交叉验证
#
# 本章介绍如何使用``Gluon``来实战[Kaggle比赛](https://www.kaggle.com)。我们以[房价预测问题](https://www.kaggle.com/c/house-prices-advanced-regression-techniques)为例,为大家提供一整套实战中常常需要的工具,例如**K折交叉验证**。我们还以``pandas``为工具介绍如何对**真实世界**中的数据进行重要的预处理,例如:
#
# * 处理离散数据
# * 处理丢失的数据特征
# * 对数据进行标准化
#
# 需要注意的是,本章仅提供一些基本实战流程供大家参考。对于数据的预处理、模型的设计和参数的选择等,我们特意只提供最基础的版本。希望大家一定要通过动手实战、仔细观察实验现象、认真分析实验结果并不断调整方法,从而得到令自己满意的结果。
#
# 这是一次宝贵的实战机会,我们相信你一定能从动手的过程中学到很多。
#
# > Get your hands dirty。
#
# ## Kaggle中的房价预测问题
#
# [Kaggle](https://www.kaggle.com)是一个著名的供机器学习爱好者交流的平台。为了便于提交结果,请大家注册[Kaggle](https://www.kaggle.com)账号。请注意,**目前Kaggle仅限每个账号一天以内10次提交结果的机会**。所以提交结果前务必三思。
#
# 
#
#
#
#
# 我们以[房价预测问题](https://www.kaggle.com/c/house-prices-advanced-regression-techniques)为例教大家如何实战一次Kaggle比赛。请大家在动手开始之前点击[房价预测问题](https://www.kaggle.com/c/house-prices-advanced-regression-techniques)了解相关信息。
#
# 
#
#
#
# ## 读入数据
#
# 比赛数据分为训练数据集和测试数据集。两个数据集都包括每个房子的特征,例如街道类型、建造年份、房顶类型、地下室状况等特征值。这些特征值有连续的数字、离散的标签甚至是缺失值'na'。只有训练数据集包括了我们需要在测试数据集中预测的每个房子的价格。数据可以从[房价预测问题](https://www.kaggle.com/c/house-prices-advanced-regression-techniques)中下载。
#
# [训练数据集下载地址](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv)
# [测试数据集下载地址](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv)
#
# 我们通过使用``pandas``读入数据。请确保安装了``pandas`` (``pip install pandas``)。
# +
import pandas as pd
import numpy as np
train = pd.read_csv("../data/kaggle_house_pred_train.csv")
test = pd.read_csv("../data/kaggle_house_pred_test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# -
# 我们看看数据长什么样子。
train.head()
# 数据大小如下。
train.shape
test.shape
# ## 预处理数据
#
# 我们使用pandas对数值特征做标准化处理:
#
# $$x_i = \frac{x_i - \mathbb{E} x_i}{\text{std}(x_i)}。$$
numeric_feats = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feats] = all_X[numeric_feats].apply(lambda x: (x - x.mean())
/ (x.std()))
# 现在把离散数据点转换成数值标签。
all_X = pd.get_dummies(all_X, dummy_na=True)
# 把缺失数据用本特征的平均值估计。
all_X = all_X.fillna(all_X.mean())
# 下面把数据转换一下格式。
# +
num_train = train.shape[0]
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
# -
# ## 导入NDArray格式数据
#
# 为了便于和``Gluon``交互,我们需要导入NDArray格式数据。
# +
from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
# -
# 我们把损失函数定义为平方误差。
square_loss = gluon.loss.L2Loss()
# 我们定义比赛中测量结果用的函数。
def get_rmse_log(net, X_train, y_train):
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
# ## 定义模型
#
# 我们将模型的定义放在一个函数里供多次调用。这是一个基本的线性回归模型。
def get_net():
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(1))
net.initialize()
return net
# 我们定义一个训练的函数,这样在跑不同的实验时不需要重复实现相同的步骤。
# +
# %matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.dpi']= 120
import matplotlib.pyplot as plt
def train(net, X_train, y_train, X_test, y_test, epochs,
verbose_epoch, learning_rate, weight_decay):
train_loss = []
if X_test is not None:
test_loss = []
batch_size = 100
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(
dataset_train, batch_size,shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.collect_params().initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
cur_train_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, cur_train_loss))
train_loss.append(cur_train_loss)
if X_test is not None:
cur_test_loss = get_rmse_log(net, X_test, y_test)
test_loss.append(cur_test_loss)
plt.plot(train_loss)
plt.legend(['train'])
if X_test is not None:
plt.plot(test_loss)
plt.legend(['train','test'])
plt.show()
if X_test is not None:
return cur_train_loss, cur_test_loss
else:
return cur_train_loss
# -
# ## K折交叉验证
#
# 在[过拟合](underfit-overfit.md)中我们讲过,过度依赖训练数据集的误差来推断测试数据集的误差容易导致过拟合。事实上,当我们调参时,往往需要基于K折交叉验证。
#
# > 在K折交叉验证中,我们把初始采样分割成$K$个子样本,一个单独的子样本被保留作为验证模型的数据,其他$K-1$个样本用来训练。
#
# 我们关心K次验证模型的测试结果的平均值和训练误差的平均值,因此我们定义K折交叉验证函数如下。
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay):
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_i in range(k):
X_val_test = X_train[test_i * fold_size: (test_i + 1) * fold_size, :]
y_val_test = y_train[test_i * fold_size: (test_i + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_i:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss, test_loss = train(
net, X_val_train, y_val_train, X_val_test, y_val_test,
epochs, verbose_epoch, learning_rate, weight_decay)
train_loss_sum += train_loss
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# ### 训练模型并交叉验证
#
# 以下的模型参数都是可以调的。
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 5
weight_decay = 0.0
# 给定以上调好的参数,接下来我们训练并交叉验证我们的模型。
train_loss, test_loss = k_fold_cross_valid(k, epochs, verbose_epoch, X_train,
y_train, learning_rate, weight_decay)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
# 即便训练误差可以达到很低(调好参数之后),但是K折交叉验证上的误差可能更高。当训练误差特别低时,要观察K折交叉验证上的误差是否同时降低并小心过拟合。我们通常依赖K折交叉验证误差结果来调节参数。
#
#
#
# ## 预测并在Kaggle提交预测结果(选学)
#
# 本部分为选学内容。网络不好的同学可以通过上述K折交叉验证的方法来评测自己训练的模型。
#
# 我们首先定义预测函数。
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay):
net = get_net()
train(net, X_train, y_train, None, None, epochs, verbose_epoch,
learning_rate, weight_decay)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
# 调好参数以后,下面我们预测并在Kaggle提交预测结果。
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay)
# 执行完上述代码后,会生成一个`submission.csv`文件。这是Kaggle要求的提交格式。这时我们可以在Kaggle上把我们预测得出的结果提交并查看与测试数据集上真实房价的误差。你需要登录Kaggle网站,打开[房价预测问题地址](https://www.kaggle.com/c/house-prices-advanced-regression-techniques),并点击下方右侧`Submit Predictions`按钮提交。
#
# 
#
#
#
# 请点击下方`Upload Submission File`选择需要提交的预测结果。然后点击下方的`Make Submission`按钮就可以查看结果啦!
#
# 
#
# 再次温馨提醒,**目前Kaggle仅限每个账号一天以内10次提交结果的机会**。所以提交结果前务必三思。
#
# ## 作业([汇报作业和查看其他小伙伴作业](https://discuss.gluon.ai/t/topic/1039)):
#
# * 运行本教程,目前的模型在5折交叉验证上可以拿到什么样的loss?
# * 如果网络条件允许,在Kaggle提交本教程的预测结果。观察一下,这个结果能在Kaggle上拿到什么样的loss?
# * 通过重新设计模型、调参并对照K折交叉验证结果,新模型是否比其他小伙伴的更好?除了调参,你可能发现我们之前学过的以下内容有些帮助:
# * [多层感知机 --- 使用Gluon](mlp-gluon.md)
# * [正则化 --- 使用Gluon](reg-gluon.md)
# * 如果不使用对数值特征做标准化处理能拿到什么样的loss?
# * 你还有什么其他办法可以继续改进模型?小伙伴们都期待学习到你独特的富有创造力的解决方案。
#
# **吐槽和讨论欢迎点**[这里](https://discuss.gluon.ai/t/topic/1039)
| chapter_supervised-learning/kaggle-gluon-kfold.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Desafio 2
#
# ## Instruções básicas
#
# O desafio 2 é sobre a extração, limpeza e manipulação de dados no INEP sobre o Censo da Educação Superior. Estão apresentados abaixo um artigo de 2016 sobre a divulgação dos dados, assim como um vídeo sobre esta masma divulgação, da edição de 2015. Além disso, está oferecido o Manual de Instruções da última edição publicada, de 2017.
#
# Ao final, são colocadas as questões do desafio em duas partes, a primeira coletiva para ser feita em grupo, e a segunda para ser feita individual. Qualquer dúvida ou comentário, por favor procure o corpo docente.
# # MEC e Inep divulgam dados do Censo da Educação Superior 2016
#
# <br>
# <img src="img/inep.png" width="550" />
# <br>
#
#
# Em 2016, 34.366 cursos de graduação foram ofertados em 2.407 instituições de educação superior (IES) no Brasil para um total de 8.052.254 estudantes matriculados. Os dados são do Censo da Educação Superior e foram divulgados nesta quinta feira (31) em coletiva de imprensa com a presença do ministro da Educação, Mendonça Filho, da secretária executiva do Ministério da Educação (Mec), <NAME>, e da presidente do Instituto Nacional de Estudos e Estatísticas Educacionais Anísio Teixeira (Inep), <NAME>.
#
# Segundo as estatísticas apresentadas, as 197 universidades existentes no país equivalem a 8,2% do total de IES, mas concentram 53,7% das matrículas em cursos de graduação.
#
# No ano passado, o número de matrículas na educação superior (graduação e sequencial) continuou crescendo, mas essa tendência desacelerou quando se comparado aos últimos anos. Entre 2006 e 2016, houve aumento de 62,8%, com uma média anual de 5% de crescimento. Porém, em relação a 2015, a variação positiva foi de apenas 0,2%.
#
# ## Cursos
#
# Os cursos de bacharelado mantêm sua predominância na educação superior brasileira com uma participação de 69% das matrículas. Os cursos de licenciatura tiveram o maior crescimento (3,3%) entre os graus acadêmicos em 2016, quando comparado a 2015.
#
# ## Vagas
#
# Em 2016, foram oferecidas mais de 10,6 milhões de vagas em cursos de graduação, sendo 73,8% vagas novas e 26,0%, vagas remanescentes. Das novas vagas oferecidas no ano passado, 33,5% foram preenchidas, enquanto apenas 12,0% das vagas remanescentes foram ocupadas no mesmo período.
#
# ## Ingressantes
#
# Em 2016, quase 3 milhões de alunos ingressaram em cursos de educação superior de graduação. Desse total, 82,3% em instituições privadas.
# Após uma queda observada em 2015, o número de ingressantes teve um crescimento de 2,2% em 2016. Isso ocorreu porque a modalidade a distância aumentou mais de 20% entre os dois anos, enquanto nos cursos presenciais houve um decréscimo no número de ingressantes de 3,7%.
#
# ## Concluintes
#
# No ano passado, mais de um 1,1 milhão de estudantes concluíram a educação superior. O número de concluintes em cursos de graduação presencial teve aumento de 2,4% em relação a 2015. A modalidade a distância diminuiu -1,3% no mesmo período.
#
# Entre 2015 e 2016, o número de concluintes na rede pública aumentou 2,9%. Já na rede privada a variação positiva foi de 1,4%. No período de 2006 a 2016, a variação percentual do número de concluintes em cursos de graduação foi maior na rede privada, com 62,6%, enquanto na pública esse crescimento foi de 26,5% no mesmo período.
#
# ## Censo da Educação Superior
#
# O Censo da Educação Superior, realizado anualmente pelo Instituto Nacional de Estudos e Pesquisas Educacionais Anísio Teixeira (Inep), constitui-se importante instrumento de obtenção de dados para a geração de informações que subsidiam a formulação, o monitoramento e a avaliação das políticas públicas, além de ser elemento importante para elaboração de estudos e pesquisas sobre o setor. O Censo coleta informações sobre as Instituições de Educação Superior (IES), os cursos de graduação e sequenciais de formação específica e sobre os discentes e docentes vinculados a esses cursos.
#
# Os resultados coletados subsidiam o Sistema Nacional de Avaliação da Educação Superior (Sinaes), seja no cálculo dos indicadores de Conceito Preliminar de Curso (CPC) e do Índice Geral de Cursos (IGC), seja no fornecimento de informações, como número de matrículas, de ingressos, de concluintes, entre outras. As estatísticas possibilitam ainda, através da justaposição de informações de diferentes edições da pesquisa, a análise da trajetória dos estudantes a partir de seu ingresso em determinado curso de graduação, e, consequentemente, a geração de indicadores de acompanhamento e de fluxo na educação superior.
# +
# Vídeo sobre a divulgação dos resultados do Censo da Educação Superior da edição de 2015
from IPython.display import HTML
HTML('<iframe width="640" height="360" src="https://www.youtube.com/embed/31rWZN5D_YE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# -
# # Microdados do Censo da Educação Superior 2017
#
# ## Manual do Usuário
#
# ### Ministério da Educação Instituto Nacional de Estudos e Pesquisas Educacionais Anísio Teixeira – INEP Diretoria de Estatísticas Educacionais
#
# O Instituto Nacional de Estudos e Pesquisas Educacionais Anísio Teixeira (Inep), autarquia vinculada ao Ministério da Educação, tem como missão institucional produzir e disseminar informações educacionais. As estatísticas e avaliações produzidas pelo Inep visam fornecer os subsídios para a formulação e implementação de políticas voltadas para a melhoria contínua da educação no país.
#
# Entre as informações educacionais produzidas pelo Inep, destacam-se os dados coletados no Censo da Educação Superior, levantamento de âmbito nacional, realizado anualmente pela Diretoria de Estatísticas Educacionais em todas as Instituições de Educação Superior (IES), públicas e privadas do país.
#
# O Inep desenvolve uma série de ações de disseminação de dados que se encontram divulgados de forma ativa no portal do instituto na internet (www.inep.gov.br ). O presente documento figura entre os instrumentos utilizados por este Instituto para garantir a transparência requerida pela sociedade e promover a participação dos diferentes agentes sociais envolvidos nas ações educativas.
#
# Nesse contexto, os Microdados foram estruturados em formato CSV (CommaSeparated Values) e seus dados estão delimitados por Pipe ( | ), de modo a garantir que praticamente qualquer software estatístico, inclusive open source, consiga importar e carregar as bases de dados.
#
# Por meio dos Microdados é possível obter um amplo panorama da educação brasileira e, como importante ferramenta de transparência, é indubitavelmente um rico acervo sobre a educação superior do nosso país e uma fonte segura e eficaz de obtenção de dados, acessíveis aos pesquisadores, estudantes, gestores e sociedade em geral.
#
# #### DADOS
#
# Estão disponíveis no site do Inep os Microdados do Censo da Educação Superior 2017 (DM_IES, DM_CURSO, DM_DOCENTE, DM_ALUNO, DM_LOCAL_OFERTA e TB_AUX_AREA_OCDE) em formato CSV delimitados por Pipe ( | ). Os arquivos encontram-se compactados (em formato .zip) pelo software 7-zip e devem ser descompactados no diretório C:\ do seu disco rígido.
#
# #### LEIA-ME
#
# Este presente instrumento traz instruções fundamentais para a correta extração e manipulação dos Microdados, como orientações para abrir os arquivos nos softwares R, SPSS e SAS. É, portanto, uma espécie de “Manual do Usuário” que traz os Dicionários das Variáveis do Censo da Educação Superior 2017.
#
# #### FILTROS DA EDUCAÇÃO SUPERIOR
#
# É um documento que norteia o usuário na obtenção de dados em sintonia com os valores publicados nas Sinopses Estatísticas produzidas pelo INEP. Nele, são elencados, de forma simples, os principais filtros e funções que devem ser aplicados.
#
# #### ANEXO I – Dicionários de dados e Tabelas Auxiliares
#
# Contém, em formato .xlsx (Excel), o Dicionário de Dados do Censo da Educação Superior 2017 e também uma tabela auxiliar com o código e o nome dos países:
#
# 1.DICIONÁRIO DE DADOS
#
# - TABELA DE ALUNO
# - TABELA DE CURSO
# - TABELA DE IES
# - TABELA DE LOCAL DE OFERTA
# - TABELA DE DOCENTE
# - TABELA AUXILIAR OCDE
#
# 2.TABELA CONTENDO O NOME DO PAÍS DE ORIGEM OU NATURALIZAÇÃO
#
# #### ANEXO II – Questionários do Censo da Educação Superior
#
# Contém, em formato .pdf (Portable Document Format), os seguintes questionários do Censo da Educação Superior 2017 e estão disponíveis para download na pasta anexos:
#
# - MÓDULO IES
# - MÓDULO CURSO
# - MÓDULO DOCENTE
# - MÓDULO ALUNO
# # DESAFIO 2
#
# <br>
# <img src="img/dh.png" width="550" />
# <br>
#
# ## Parte Coletiva
#
# ### Obrigatório:
#
# 1) Tabelas com número de universidades (DM_ALUNO.CO_IES) públicas e privadas (DM_ALUNO.TP_CATEGORIA_ADMINISTRATIVA) por estado (DSM_CURSO.CO_UF) e número de alunos por universidade (DM_ALUNO.CO_ALUNO)
#
# 2) Pergunta-se: é verdade que existe menos mulheres (DM_ALUNO.TP_SEXO) nos cursos de exatas (DM_CURSO.CO_OCDE_AREA_GERAL CO_OCDE_AREA_ESPECIFICA CO_OCDE_AREA_DETALHADA CO_OCDE)? Explique com os dados.
#
# 3) Quantos cursos (DM_CURSO.DT_INICIO_FUNCIONAMENTO / DM_CURSO.CO_CURSO) novos abrem por ano?
#
# 4) Se usarmos a taxa de concluientes (DM_ALUNO.TP_SITUACAO = 6.Formado/DM_ALUNO.TP_SITUACAO) de um curso como variável de dificuldade dos cursos, eles tem ficado mais faceis ou mais duros ao longo do tempo (DM_CURSO.NU_ANO_INGRESSO)? Quais as dificuldades para uma afirmação dessas?
#
# 5) Rode uma regressão multipla que explique o abandono dos cursos (DM_ALUNO.TP_SITUACAO), será que professores mais/menos preparados influência nessas taxas (DM_DOCENTE.TP_REGIME_TRABALHO IN_BOLSA_PESQUISA IN_ATUACAO_PESQUISA TP_ESCOLARIDADE)?
#
# 6) Quais os cursos com maior crescimento de matriculas (DM_ALUNO.IN_MATRICULA / DM_CURSO.NU_ANO_INGRESSO) por região (CRIAR)? E quais os com maior queda? Como você explicaria isso.
#
# 7) Construa uma variável "Signo" (DM_ALUNO.NU_ANO_NASCIMENTO NU_MES_NASCIMENTO NU_DIA_NASCIMENTO) dos estudantes e explique porque ela é correlacionada com a variável "probabilidade de formação" (construir)
#
# ### Facultativo:
#
# 1) Crie um mapa das universidades no pais.
#
# 2) Alunos estrangeiros são melhores resilientes (para terminar um curso) que os brasileiros? Quais as dificuldades dessa análise?
#
# 3) Se você tivesse que abrir um curso (apenas um curso de graduação), qual seria, onde ele seria, qual a modalidade.
#
# ## Parte Individual.
#
# ### Obrigatório:
#
# 1) Pense na Hipótese de 2 universidades A e B onde X é a média de notas delas e elas tem exatamente os mesmos cursos. É possível que A_X > B_X e TODOS os cursos de B sejam melhores (notas maiores) que os de A? Explique (não são necessários códigos, apenas a explicação). Dica: paradoxo de simpson.
#
# 2) Rode uma regressão multipla que use, entre outras, a carga horária de um curso como variavel independente para explicar a idade dos alunos formados.
#
# 3) Qual a probabilidade de pegarmos um professor ao acaso no Brasil e ele ter mais do que o dobro da sua idade dado que ele é homem.
#
# Obs: Não usar informações que não estejam na pasta Desafios nas partes **Obrigatórias**
#
# ## Data da entrega: 22 de Abril (terça-feira)
# # DESAFIO 2 - Grupo:
#
# ## Nome dos integrantes do grupo:
#
# <NAME>
#
# <NAME>
#
# <NAME>
#
# Kallita
#
# <NAME>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import statsmodels.formula.api as smf
# +
# Outra forma de fazer amostra de uma base enorme
#n = sum(1 for line in open('DM_ALUNO.csv')) - 1
#s = 1000000
#skip = sorted(random.sample(range(1, n+1), n-s))
#df_aluno = pd.read_csv('DM_ALUNO.csv', skiprows=skip, sep='|', encoding='latin1')
# -
end_aluno = r"D:\Dropbox\Digital House - Aulas Gravadas e Bases\Desafio 2 - Bases\DADOS\DM_ALUNO.CSV"
end_curso = r"D:\Dropbox\Digital House - Aulas Gravadas e Bases\Desafio 2 - Bases\DADOS\DM_CURSO.CSV"
end_curso_depara = r"D:\Dropbox\Digital House - Aulas Gravadas e Bases\Desafio 2 - Bases\DADOS\TB_AUX_AREA_OCDE.CSV"
end_docente = r"D:\Dropbox\Digital House - Aulas Gravadas e Bases\Desafio 2 - Bases\DADOS\DM_DOCENTE.CSV"
rows_aluno = sum(1 for line in open(end_aluno)) - 1
rows_curso = sum(1 for line in open(end_curso)) - 1
rows_docente = sum(1 for line in open(end_docente)) - 1
print(rows_aluno, rows_curso, rows_docente)
# +
# carregando a base de dados em partes (chunks)
colunas_aluno = ['CO_IES','CO_ALUNO','TP_CATEGORIA_ADMINISTRATIVA','TP_SEXO','CO_OCDE_AREA_GERAL','CO_OCDE_AREA_ESPECIFICA','CO_OCDE_AREA_DETALHADA','CO_OCDE','CO_CURSO','NU_ANO_INGRESSO','TP_SITUACAO','NU_ANO_NASCIMENTO','NU_MES_NASCIMENTO','NU_DIA_NASCIMENTO','IN_MATRICULA','IN_APOIO_BOLSA_TRABALHO','IN_APOIO_MATERIAL_DIDATICO','IN_APOIO_MORADIA','IN_APOIO_TRANSPORTE','IN_ATIVIDADE_EXTRACURRICULAR','IN_COMPLEMENTAR_ESTAGIO','IN_COMPLEMENTAR_EXTENSAO','IN_COMPLEMENTAR_MONITORIA','IN_COMPLEMENTAR_PESQUISA','IN_BOLSA_ESTAGIO','IN_BOLSA_EXTENSAO','IN_BOLSA_MONITORIA','IN_BOLSA_PESQUISA']
data_iterator = pd.read_csv(end_aluno, usecols = colunas_aluno, sep = "|", encoding='latin1', chunksize=1000000)
base_chunk = []
# Each chunk is in dataframe format
for data_chunk in data_iterator:
chunk = data_chunk.sample(100000)
base_chunk.append(chunk)
del chunk
df_aluno = pd.concat(base_chunk,ignore_index=True)
del base_chunk
df_aluno
# -
df_aluno.info()
colunas_curso = ['CO_IES','DT_INICIO_FUNCIONAMENTO','CO_UF', 'CO_CURSO','CO_OCDE_AREA_GERAL','CO_OCDE_AREA_ESPECIFICA','CO_OCDE_AREA_DETALHADA','CO_OCDE']
df_curso = pd.read_csv(end_curso, usecols = colunas_curso, sep = "|", encoding='latin1')
df_curso
df_curso_depara =pd.read_csv(end_curso_depara, sep = "|", encoding='latin1')
df_curso_depara
end_estado_depara = r"D:\Dropbox\Digital House - Aulas Gravadas e Bases\Desafio 2 - Bases\DADOS\depara_estado.CSV"
df_estado_depara = pd.read_csv(end_estado_depara, sep=";")
df_estado_depara
df_docente = pd.read_csv(end_docente,dtype=pd.Int64Dtype(), sep="|")
df_docente
colunas_dummies = ['TP_CATEGORIA_ADMINISTRATIVA','TP_ORGANIZACAO_ACADEMICA','TP_SITUACAO','TP_REGIME_TRABALHO','TP_SEXO','TP_COR_RACA']
colunas_docente = ['CO_DOCENTE','NU_IDADE','IN_DEFICIENCIA_CEGUEIRA','IN_DEFICIENCIA_BAIXA_VISAO','IN_DEFICIENCIA_SURDEZ','IN_DEFICIENCIA_AUDITIVA','IN_DEFICIENCIA_FISICA','IN_DEFICIENCIA_SURDOCEGUEIRA','IN_DEFICIENCIA_MULTIPLA','IN_DEFICIENCIA_INTELECTUAL','IN_ATUACAO_EAD','IN_ATUACAO_EXTENSAO','IN_ATUACAO_GESTAO','IN_ATUACAO_GRAD_PRESENCIAL','IN_ATUACAO_POS_EAD','IN_ATUACAO_POS_PRESENCIAL','IN_ATUACAO_SEQUENCIAL','IN_ATUACAO_PESQUISA','IN_BOLSA_PESQUISA','IN_SUBSTITUTO','IN_EXERCICIO_DATA_REFERENCIA','IN_VISITANTE']
df_docente_dummies = df_docente['CO_IES']
for x in colunas_dummies:
temp = pd.get_dummies(df_docente[x],prefix=x)
df_docente_dummies = pd.concat([df_docente_dummies,temp],axis=1)
del temp
df_docente_continuo = df_docente[colunas_docente]
df_docente_completo = pd.concat([df_docente_continuo, df_docente_dummies], axis=1)
df_docente_completo
df_curso_completo = pd.merge(df_curso, df_curso_depara, how='left')
df_curso_completo = pd.merge(df_curso_completo, df_estado_depara, how='left')
df_aluno_completo = pd.merge(df_aluno, df_curso_completo, how='left')
del df_aluno
df_aluno_completo
# ## 1) Tabelas com número de universidades (DM_ALUNO.CO_IES) públicas e privadas (DM_ALUNO.TP_CATEGORIA_ADMINISTRATIVA) por estado (DSM_CURSO.CO_UF) e número de alunos por universidade (DM_ALUNO.CO_ALUNO)
df_aluno_completo.loc[df_aluno_completo['TP_CATEGORIA_ADMINISTRATIVA'] < 4, 'tipo_faculdade'] = 'pública'
df_aluno_completo.loc[df_aluno_completo['TP_CATEGORIA_ADMINISTRATIVA'] >= 4, 'tipo_faculdade'] = 'privada'
df_aluno_completo
questao_um = df_aluno_completo.groupby(['UF','tipo_faculdade'])['CO_IES','CO_ALUNO'].aggregate([pd.Series.nunique])
questao_um.unstack()
# ## 2) Pergunta-se: é verdade que existe menos mulheres (DSM_ALUNO.TP_SEXO) nos cursos de exatas (DSM_CURSO.CO_OCDE_AREA_GERAL CO_OCDE_AREA_ESPECIFICA CO_OCDE_AREA_DETALHADA CO_OCDE)? Explique com os dados.
df_aluno_completo['curso_exatas'] = (df_aluno_completo['CO_OCDE_AREA_GERAL'].isin([4]) & ~df_aluno_completo['CO_OCDE_AREA_DETALHADA'].isin([421,422]) ) | df_aluno_completo['CO_OCDE_AREA_GERAL'].isin([5])
df_aluno_completo
soh_exatas = df_aluno_completo[df_aluno_completo['curso_exatas'] == True][['CO_CURSO','TP_SEXO']]
prop_sexo = df_aluno_completo.groupby(['CO_CURSO'])['TP_SEXO'].value_counts(normalize=True)
prop_sexo = prop_sexo.unstack().rename(columns={1: "Mulheres", 2: "Homens"})
prop_sexo
prop_sexo['Mulheres'].plot.hist(alpha=0.7);
prop_sexo['Homens'].plot.hist(alpha=0.7);
# +
# média, desvio-padrão amostral e número de observações
mean = prop_sexo['Mulheres'].mean()
std = prop_sexo['Mulheres'].std()
n = len(prop_sexo['Mulheres'])
# intervalo de confiança
# da tabela Z-Score: 0.95 / 2 = 0.4750 --> 1.96
limite_inferior , limite_superior = mean - 1.96*std/np.sqrt(n), mean + 1.96*std/np.sqrt(n)
limite_inferior , limite_superior
# -
p_H0 = 0.5
limite_inferior < p_H0 < limite_superior
# ## 3) Quantos cursos (DM_CURSO.DT_INICIO_FUNCIONAMENTO / DM_CURSO.CO_CURSO) novos abrem por ano?
df_aluno_completo.DT_INICIO_FUNCIONAMENTO.head(10)
df_aluno_completo.DT_INICIO_FUNCIONAMENTO = pd.to_datetime(df_aluno_completo.DT_INICIO_FUNCIONAMENTO,errors='coerce',dayfirst=True)
df_aluno_completo.DT_INICIO_FUNCIONAMENTO.head(10)
ano_funcionamento = df_aluno_completo.DT_INICIO_FUNCIONAMENTO.dt.year
questao_tres = df_aluno_completo.groupby([ano_funcionamento])['CO_CURSO'].aggregate([pd.Series.nunique]).reset_index()
questao_tres = questao_tres.rename(columns={'DT_INICIO_FUNCIONAMENTO': "Ano de Funcionamento", 'nunique':'Cursos Novos'})
questao_tres
questao_tres.plot.line(x='Ano de Funcionamento', y = 'Cursos Novos')
questao_tres.loc[questao_tres['Ano de Funcionamento'] >= 1950].plot.line(x='Ano de Funcionamento', y = 'Cursos Novos');
questao_tres['Acumulado'] = questao_tres['Cursos Novos'].cumsum()
questao_tres.plot.line(x='Ano de Funcionamento', y = 'Acumulado')
# $$
# \frac{d f}{dx} = \frac{\Delta y}{\Delta x} = \frac{(y + \epsilon) - (y - \epsilon)}{(x + \epsilon) - (x-\epsilon)}
# $$
questao_tres['Dif Cursos Novos'] = questao_tres['Acumulado'] - questao_tres['Acumulado'].shift(1)
questao_tres[questao_tres['Ano de Funcionamento'] >= 2000]['Dif Cursos Novos'].mean()
# ## 5) Rode uma regressão multipla que explique o abandono dos cursos (DM_ALUNO.TP_SITUACAO), será que professores mais/menos preparados influência nessas taxas (DM_DOCENTE.TP_REGIME_TRABALHO IN_BOLSA_PESQUISA IN_ATUACAO_PESQUISA TP_ESCOLARIDADE)?
# +
df_aluno_completo['abandono'] = df_aluno_completo['TP_SITUACAO'].isin([3,4,5])
questao_cinco_aluno = df_aluno_completo.groupby(['CO_IES'])['CO_ALUNO','abandono'].agg({'CO_ALUNO' : 'count', 'abandono':'sum'})
# -
questao_cinco_aluno['taxa_abandono'] = questao_cinco_aluno['abandono'] / questao_cinco_aluno['CO_ALUNO']
del questao_cinco_aluno['abandono']
del questao_cinco_aluno['CO_ALUNO']
questao_cinco_aluno.reset_index()
lista_docente = list(df_docente_completo.columns)
lista_docente.remove('CO_DOCENTE')
lista_docente.remove('CO_IES')
lista1 = []
lista2 = []
for x in lista_docente:
if x in colunas_docente:
lista1.append(x)
else:
lista2.append(x)
print(lista1)
print(lista2)
questao_cinco_docente = df_docente_completo.groupby('CO_IES')['CO_DOCENTE'].agg('size')
for y in lista1:
temp = df_docente_completo.groupby('CO_IES',as_index=False)[y].agg('mean')
questao_cinco_docente = pd.merge(questao_cinco_docente,temp,how='inner',on='CO_IES')
del temp
for y in lista2:
temp = df_docente_completo.groupby('CO_IES',as_index=False)[y].agg('sum')
questao_cinco_docente = pd.merge(questao_cinco_docente,temp,how='inner',on='CO_IES')
del temp
questao_cinco_docente
questao_cinco = pd.merge(questao_cinco_docente,questao_cinco_aluno, how='inner', on='CO_IES')
questao_cinco
questao_cinco_var = list(questao_cinco.columns)
questao_cinco_var.remove('CO_IES')
questao_cinco_var.remove('taxa_abandono')
questao_cinco_var = str(questao_cinco_var).strip('[]')
questao_cinco_var = str(questao_cinco_var).replace("'","")
questao_cinco_var = str(questao_cinco_var).replace(',',' + ')
function = 'taxa_abandono ~ ' + questao_cinco_var
model = smf.ols(function, questao_cinco).fit()
results_summary = model.summary()
results_summary
results_as_html = results_summary.tables[1].as_html()
results_tabela = pd.read_html(results_as_html, header=0, index_col=0)[0]
results_tabela
results_tabela = results_tabela[results_tabela['P>|t|'] < 0.05]
results_tabela
function = 'taxa_abandono ~ NU_IDADE + IN_ATUACAO_EXTENSAO + IN_ATUACAO_POS_PRESENCIAL'
model = smf.ols(function, questao_cinco).fit()
model.summary()
# ## 6) Quais os cursos com maior crescimento de matriculas (DM_ALUNO.IN_MATRICULA) por região (CRIAR)? E quais os com maior queda? Como você explicaria isso.
questao_seis = df_aluno_completo[df_aluno_completo['NU_ANO_INGRESSO'] >= 2000].groupby(['Região','NO_OCDE'])['IN_MATRICULA'].agg(['sum','count']).reset_index(level=[0,1])
questao_seis['taxa_matricula'] = questao_seis['sum'] / questao_seis['count']
questao_seis.sort_values(by=['Região','sum','taxa_matricula'],ascending=False, inplace=True)
questao_seis
maiores = []
menores = []
for reg in questao_seis['Região'].unique():
t = questao_seis.loc[questao_seis['Região']==reg]
M = t.nlargest (5, columns=['sum'])
m = t.nsmallest(5, columns=['sum'])
maiores.append(M)
menores.append(m)
lista1 = pd.concat(maiores)
lista2 = pd.concat(menores)
pd.concat([lista1, lista2]).sort_values(by=['Região','sum'],ascending=False)
# 7) Construa uma variável "Signo" (DM_ALUNO.NU_ANO_NASCIMENTO NU_MES_NASCIMENTO NU_DIA_NASCIMENTO) dos estudantes e explique porque ela é correlacionada com a variável "probabilidade de formação" (construir)
end_signos = r"D:\Dropbox\Digital House - Aulas Gravadas e Bases\Desafio 2 - Bases\DADOS\tabela_signos.CSV"
tabela_signos = pd.read_csv(end_signos, sep = ";")
tabela_signos['nova_data_nasc'] = pd.to_datetime(tabela_signos['Data_nascimento'],format='%d/%m')
tabela_signos
| desafio_2_jonatas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recognition of hailstorm events using the hail reports collected by the MeteoSwiss smartphone application
# <p>
# <NAME>, February 2019
# </p>
#
# <p>
# AM: This work is based on the Tutorial V, given by <NAME>:
# </p>
# <p>
# Bern Winter School on Machine Learning, 28.01-01.02 2019<br>
# <NAME>
# </p>
#
# This work is licensed under a <a href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.
# +
# AM: Libraries AND hail image files are unpacked at the first usage of the code, otherwise the following line shoud remain commented
# #!tar -xvzf ./material.tgz
# #!tar -xvzf ./hailimages_daily.tar
# +
# AM: Importing required libraries
import sys
import os
import csv
import math
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as ipyd
import tensorflow as tf
from PIL import Image
# We'll tell matplotlib to inline any drawn figures like so:
# %matplotlib inline
plt.style.use('ggplot')
from utils import gr_disp
from utils import inception
from IPython.core.display import HTML
HTML("""<style> .rendered_html code {
padding: 2px 5px;
color: #0000aa;
background-color: #cccccc;
} </style>""")
# -
# AM: loading the pre-trained inception model
def tfSessionLimited(graph=None):
session_config=tf.ConfigProto( gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.85))
session_config.gpu_options.visible_device_list = str(0) #use 1st gpu
return tf.Session(graph=graph, config=session_config)
net, net_labels = inception.get_inception_model()
# +
#get model graph definition and NOT change it to use GPU
gd = net
str_dg = gd.SerializeToString()
#uncomment next line to use GPU acceleration
# AM: the code has been ran on CPUs, the next line is commented:
#str_dg = str_dg.replace(b'/cpu:0', b'/gpu:0') #a bit extreme approach, but works =)
gd = gd.FromString(str_dg)
#gr_disp.show(gd)
# -
gd2 = tf.graph_util.extract_sub_graph(gd, ['output'])
g2 = tf.Graph() # full graph
with g2.as_default():
tf.import_graph_def(gd2, name='inception')
names = [op.name for op in g2.get_operations()]
# ## 4. Build own regressor on top
# We will now create a fully connected regressor the same way as in previous session. The only difference is that instead of raw image data as input we will use 2048 image features that Inceprion is trained to detect. We will classify images in 2 classes.
# +
#
def fully_connected_layer(x, n_output, name=None, activation=None):
"""Fully connected layer.
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
TF Scope to apply
activation : None, optional
Non-linear activation function
Returns
-------
h, W : tf.Tensor, tf.Tensor
Output of the fully connected layer and the weight matrix
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=None)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=None):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
# -
with g2.as_default():
x = g2.get_tensor_by_name('inception/input:0')
features = g2.get_tensor_by_name('inception/head0_bottleneck/reshape:0')
#placeholder for the true one-hot label
Y = tf.placeholder(name='Y', dtype=tf.float32, shape=[None, 2])
#one layer with 512 neurons with sigmoid activation and one with 2, softmax activation.
L1, W1 = fully_connected_layer(features, 512, 'FC1', tf.nn.sigmoid )
L2, W2 = fully_connected_layer(L1, 512, 'FC2', tf.nn.sigmoid )
L3, W3 = fully_connected_layer(L2 , 2, 'FC3')
Y_onehot = tf.nn.softmax(L3, name='Logits')
Y_pred = tf.argmax(Y_onehot, axis=1, name='YPred')
#cross-entropy used as a measure for qulity of each image.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=L3, labels=Y)
#mean cross_entropy - for a set of images.
loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
#Accuracy is defined as fraction of correctly recognized images.
Y_true = tf.argmax(Y, 1)
Correct = tf.equal(Y_true, Y_pred, name='CorrectY')
Accuracy = tf.reduce_mean(tf.cast(Correct, dtype=tf.float32), name='Accuracy')
# +
## Dataset
# -
# AM: two labels: no radar hail and presence of radar hail
text_label = ['No Hail', 'Hail']
# +
labels0 = []
images0 = []
labels1 = []
images1 = []
#NoHail
filenames=os.listdir("./NoHail_daily_80/")
#np.array(img.convert('RGB'))
for filename in filenames:
img = np.array(Image.fromarray(plt.imread('./NoHail_daily_80/' + filename)).convert('RGB'))
assert(img.shape[0]>=50 and img.shape[1]>=50 and len(img.shape)==3)
images0.append(inception.prepare_training_img(img))
labels0.append([1,0])
#Hail
filenames=os.listdir("./Hail_daily_80/")
for filename in filenames:
img = np.array(Image.fromarray(plt.imread('./Hail_daily_80/' + filename)).convert('RGB'))
assert(img.shape[0]>=50 and img.shape[1]>=50 and len(img.shape)==3)
images1.append(inception.prepare_training_img(img))
labels1.append([0,1])
#AM: permutation: different number of "hail" and "no hail" cases. while equal numbers are required.
#Ad hoc solution: permutation of "no hail", then additional permulation, only the number equal to that of "hail" cases is retained
#AM: saving the full no hail image set for a manual validation check
images0_full=images0
idx1 = np.random.permutation(len(labels1))
labels1 = np.array(labels1)[idx1]
images1 = np.array(images1)[idx1]
idx0 = np.random.permutation(len(labels0))
labels0 = np.array(labels0)[idx0]
images0 = np.array(images0)[idx0]
labels0 = np.array(labels0)[idx1]
images0 = np.array(images0)[idx1]
# -
_, axs = plt.subplots(1, 2, figsize=(20,20))
img_d = inception.training_img_to_display(images0[20])
axs[0].imshow(img_d)
axs[0].grid(False)
img_d = inception.training_img_to_display(images1[20])
axs[1].imshow(img_d)
axs[1].grid(False)
plt.show()
#print(len(img_d))
# +
#We will take 80% from each for training and 20 for validation
#AM: separation between "hail" and "no hail" events: not finished yet
n_half0 = images0.shape[0]
n_train_half0 = n_half0*80//100
n_train0 = n_train_half0*2
n_half1 = images1.shape[0]
n_train_half1 = n_half1*80//100
n_train1 = n_train_half1*2
x_train = np.r_[images0[:n_train_half0], images1[:n_train_half1]]
y_train = np.r_[labels0[:n_train_half0], labels1[:n_train_half1]]
x_valid = np.r_[images0[n_train_half0:], images1[n_train_half1:]]
y_valid = np.r_[labels0[n_train_half0:], labels1[n_train_half1:]]
mini_batch_size = 10
#directory where the model will be stored
try:
os.mkdir('Hail_80_output')
except:
pass
with tfSessionLimited(graph=g2) as sess:
#initialize all the variables
a_tr = []
a_vld = []
losses_t = []
losses_v = []
#create saver
saver = tf.train.Saver(tf.global_variables())
sess.run(tf.global_variables_initializer())
saver.export_meta_graph(os.path.join('Hail_80_output', 'model.meta'))
for epoch in range (80):
#shuffle the data and perform stochastic gradient descent by runing over all minibatches
idx0 = np.random.permutation(n_train0)
idx1 = np.random.permutation(n_train1)
for mb in range(n_train1//mini_batch_size):
sub_idx = idx1[mini_batch_size*mb:mini_batch_size*(mb+1)]
_, l = sess.run((optimizer, loss), feed_dict={x:x_train[sub_idx], Y:y_train[sub_idx]})
l_v = sess.run(loss, feed_dict={x:x_valid, Y:y_valid})
losses_t.append(np.mean(l))
losses_v.append(np.mean(l_v))
#get accuracy on the training set and test set
accuracy_train = sess.run(Accuracy, feed_dict={x:x_train, Y:y_train})
accuracy_valid = sess.run(Accuracy, feed_dict={x:x_valid, Y:y_valid})
#every 10th epoch print accuracies and current loss
# if epoch%10 == 0:
print(epoch,accuracy_train, accuracy_valid, l,l_v)
a_tr.append(accuracy_train)
a_vld.append(accuracy_valid)
#save the graph state, checkpoint ch-0
checkpoint_prefix = os.path.join('Hail_80_output', 'ch')
saver.save(sess, checkpoint_prefix, global_step=0, latest_filename='ch_last')
plt.plot(a_tr)
plt.plot(a_vld)
plt.legend(('training accuracy', 'validation accuracy'), loc='lower right')
plt.show()
plt.plot(losses_t)
plt.plot(losses_v)
plt.legend(('training loss','validation loss'), loc='upper right')
plt.show()
# +
with tfSessionLimited(graph=g2) as sess:
#create saver and restore values
saver = tf.train.Saver()
saver.restore(sess, os.path.join('Hail_80_output', 'ch-0'))
#check that we still get proper performance oh a random image: no hail, full dataset
r0f = sess.run(Y_onehot, feed_dict={x:images0_full[:]})
print("NoHail")
print(r0f)
# #check that we still get proper performance oh a random image: no hail
# r0 = sess.run(Y_onehot, feed_dict={x:images0[:]})
# print("NoHail")
# print(r0)
# #check that we still get proper performance oh a random image: hail
# r1 = sess.run(Y_onehot, feed_dict={x:images1[:]})
# print("Hail")
# print(r1)
# -
| M3/Project/Martynov_Hailstorm_recognition.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
#
# <a id='lake-model'></a>
# <div id="qe-notebook-header" style="text-align:right;">
# <a href="https://quantecon.org/" title="quantecon.org">
# <img style="width:250px;display:inline;" src="https://assets.quantecon.org/img/qe-menubar-logo.svg" alt="QuantEcon">
# </a>
# </div>
# # A Lake Model of Employment and Unemployment
#
#
# <a id='index-0'></a>
# ## Contents
#
# - [A Lake Model of Employment and Unemployment](#A-Lake-Model-of-Employment-and-Unemployment)
# - [Overview](#Overview)
# - [The Model](#The-Model)
# - [Implementation](#Implementation)
# - [Dynamics of an Individual Worker](#Dynamics-of-an-Individual-Worker)
# - [Endogenous Job Finding Rate](#Endogenous-Job-Finding-Rate)
# - [Exercises](#Exercises)
# - [Solutions](#Solutions)
# ## Overview
#
# This lecture describes what has come to be called a *lake model*
#
# The lake model is a basic tool for modeling unemployment
#
# It allows us to analyze
#
# - flows between unemployment and employment
# - how these flows influence steady state employment and unemployment rates
#
#
# It is a good model for interpreting monthly labor department reports on gross and net jobs created and jobs destroyed
#
# The “lakes” in the model are the pools of employed and unemployed
#
# The “flows” between the lakes are caused by
#
# - firing and hiring
# - entry and exit from the labor force
#
#
# For the first part of this lecture, the parameters governing transitions into
# and out of unemployment and employment are exogenous
#
# Later, we’ll determine some of these transition rates endogenously using the [McCall search model](dynamic_programming/mccall_model.html)
#
# We’ll also use some nifty concepts like ergodicity, which provides a fundamental link between *cross-sectional* and *long run time series* distributions
#
# These concepts will help us build an equilibrium model of ex ante homogeneous workers whose different luck generates variations in their ex post experiences
# ### Prerequisites
#
# Before working through what follows, we recommend you read the [lecture
# on finite Markov chains](tools_and_techniques/finite_markov.html)
#
# You will also need some basic [linear algebra](tools_and_techniques/linear_algebra.html) and probability
# ## The Model
#
# The economy is inhabited by a very large number of ex ante identical workers
#
# The workers live forever, spending their lives moving between unemployment and employment
#
# Their rates of transition between employment and unemployment are governed by the following parameters:
#
# - $ \lambda $, the job finding rate for currently unemployed workers
# - $ \alpha $, the dismissal rate for currently employed workers
# - $ b $, the entry rate into the labor force
# - $ d $, the exit rate from the labor force
#
#
# The growth rate of the labor force evidently equals $ g=b-d $
# ### Aggregate Variables
#
# We want to derive the dynamics of the following aggregates
#
# - $ E_t $, the total number of employed workers at date $ t $
# - $ U_t $, the total number of unemployed workers at $ t $
# - $ N_t $, the number of workers in the labor force at $ t $
#
#
# We also want to know the values of the following objects
#
# - The employment rate $ e_t := E_t/N_t $
# - The unemployment rate $ u_t := U_t/N_t $
#
#
# (Here and below, capital letters represent stocks and lowercase letters represent flows)
# ### Laws of Motion for Stock Variables
#
# We begin by constructing laws of motion for the aggregate variables $ E_t,U_t, N_t $
#
# Of the mass of workers $ E_t $ who are employed at date $ t $,
#
# - $ (1-d)E_t $ will remain in the labor force
# - of these, $ (1-\alpha)(1-d)E_t $ will remain employed
#
#
# Of the mass of workers $ U_t $ workers who are currently unemployed,
#
# - $ (1-d)U_t $ will remain in the labor force
# - of these, $ (1-d) \lambda U_t $ will become employed
#
#
# Therefore, the number of workers who will be employed at date $ t+1 $ will be
#
# $$
# E_{t+1} = (1-d)(1-\alpha)E_t + (1-d)\lambda U_t
# $$
#
# A similar analysis implies
#
# $$
# U_{t+1} = (1-d)\alpha E_t + (1-d)(1-\lambda)U_t + b (E_t+U_t)
# $$
#
# The value $ b(E_t+U_t) $ is the mass of new workers entering the labor force unemployed
#
# The total stock of workers $ N_t=E_t+U_t $ evolves as
#
# $$
# N_{t+1} = (1+b-d)N_t = (1+g)N_t
# $$
#
# Letting $ X_t := \left(\begin{matrix}U_t\\E_t\end{matrix}\right) $, the law of motion for $ X $ is
#
# $$
# X_{t+1} = A X_t
# \quad \text{where} \quad
# A :=
# \begin{pmatrix}
# (1-d)(1-\lambda) + b & (1-d)\alpha + b \\
# (1-d)\lambda & (1-d)(1-\alpha)
# \end{pmatrix}
# $$
#
# This law tells us how total employment and unemployment evolve over time
# ### Laws of Motion for Rates
#
# Now let’s derive the law of motion for rates
#
# To get these we can divide both sides of $ X_{t+1} = A X_t $ by $ N_{t+1} $ to get
#
# $$
# \begin{pmatrix}
# U_{t+1}/N_{t+1} \\
# E_{t+1}/N_{t+1}
# \end{pmatrix}
# =
# \frac1{1+g} A
# \begin{pmatrix}
# U_{t}/N_{t}
# \\
# E_{t}/N_{t}
# \end{pmatrix}
# $$
#
# Letting
#
# $$
# x_t :=
# \left(\begin{matrix}
# u_t\\ e_t
# \end{matrix}\right)
# = \left(\begin{matrix}
# U_t/N_t\\ E_t/N_t
# \end{matrix}\right)
# $$
#
# we can also write this as
#
# $$
# x_{t+1} = \hat A x_t
# \quad \text{where} \quad
# \hat A := \frac{1}{1 + g} A
# $$
#
# You can check that $ e_t + u_t = 1 $ implies that $ e_{t+1}+u_{t+1} = 1 $
#
# This follows from the fact that the columns of $ \hat A $ sum to 1
# ## Implementation
#
# Let’s code up these equations
#
# Here’s the code:
# ### Setup
# + hide-output=false
using InstantiateFromURL
activate_github("QuantEcon/QuantEconLecturePackages", tag = "v0.9.8");
# + hide-output=true
using LinearAlgebra, Statistics, Compat
using Distributions, Expectations, NLsolve, Parameters, Plots
using QuantEcon, Roots, Random
# + hide-output=false
gr(fmt = :png);
# + hide-output=false
LakeModel = @with_kw (λ = 0.283, α = 0.013, b = 0.0124, d = 0.00822)
function transition_matrices(lm)
@unpack λ, α, b, d = lm
g = b - d
A = [(1 - λ) * (1 - d) + b (1 - d) * α + b
(1 - d) * λ (1 - d) * (1 - α)]
 = A ./ (1 + g)
return (A = A, Â = Â)
end
function rate_steady_state(lm)
@unpack  = transition_matrices(lm)
sol = fixedpoint(x -> Â * x, fill(0.5, 2))
converged(sol) || error("Failed to converge in $(result.iterations) iterations")
return sol.zero
end
function simulate_stock_path(lm, X0, T)
@unpack A = transition_matrices(lm)
X_path = zeros(eltype(X0), 2, T)
X = copy(X0)
for t in 1:T
X_path[:, t] = X
X = A * X
end
return X_path
end
function simulate_rate_path(lm, x0, T)
@unpack  = transition_matrices(lm)
x_path = zeros(eltype(x0), 2, T)
x = copy(x0)
for t in 1:T
x_path[:, t] = x
x = Â * x
end
return x_path
end
# -
# Let’s observe these matrices for the baseline model
# + hide-output=false
lm = LakeModel()
A, Â = transition_matrices(lm)
A
# + hide-output=false
Â
# -
# And a revised model
# + hide-output=false
lm = LakeModel(α = 2.0)
A, Â = transition_matrices(lm)
A
# + hide-output=false
Â
# -
# ### Aggregate Dynamics
#
# Let’s run a simulation under the default parameters (see above) starting from $ X_0 = (12, 138) $
# + hide-output=false
lm = LakeModel()
N_0 = 150 # population
e_0 = 0.92 # initial employment rate
u_0 = 1 - e_0 # initial unemployment rate
T = 50 # simulation length
U_0 = u_0 * N_0
E_0 = e_0 * N_0
X_0 = [U_0; E_0]
X_path = simulate_stock_path(lm, X_0, T)
x1 = X_path[1, :]
x2 = X_path[2, :]
x3 = dropdims(sum(X_path, dims = 1), dims = 1)
plt_unemp = plot(title = "Unemployment", 1:T, x1, color = :blue, lw = 2, grid = true, label = "")
plt_emp = plot(title = "Employment", 1:T, x2, color = :blue, lw = 2, grid = true, label = "")
plt_labor = plot(title = "Labor force", 1:T, x3, color = :blue, lw = 2, grid = true, label = "")
plot(plt_unemp, plt_emp, plt_labor, layout = (3, 1), size = (800, 600))
# -
# The aggregates $ E_t $ and $ U_t $ don’t converge because their sum $ E_t + U_t $ grows at rate $ g $
#
# On the other hand, the vector of employment and unemployment rates $ x_t $ can be in a steady state $ \bar x $ if
# there exists an $ \bar x $ such that
#
# - $ \bar x = \hat A \bar x $
# - the components satisfy $ \bar e + \bar u = 1 $
#
#
# This equation tells us that a steady state level $ \bar x $ is an eigenvector of $ \hat A $ associated with a unit eigenvalue
#
# We also have $ x_t \to \bar x $ as $ t \to \infty $ provided that the remaining eigenvalue of $ \hat A $ has modulus less that 1
#
# This is the case for our default parameters:
# + hide-output=false
lm = LakeModel()
A, Â = transition_matrices(lm)
e, f = eigvals(Â)
abs(e), abs(f)
# -
# Let’s look at the convergence of the unemployment and employment rate to steady state levels (dashed red line)
# + hide-output=false
lm = LakeModel()
e_0 = 0.92 # initial employment rate
u_0 = 1 - e_0 # initial unemployment rate
T = 50 # simulation length
xbar = rate_steady_state(lm)
x_0 = [u_0; e_0]
x_path = simulate_rate_path(lm, x_0, T)
plt_unemp = plot(title ="Unemployment rate", 1:T, x_path[1, :],color = :blue, lw = 2,
alpha = 0.5, grid = true, label = "")
plot!(plt_unemp, [xbar[1]], color=:red, linetype = :hline, linestyle = :dash, lw = 2, label = "")
plt_emp = plot(title = "Employment rate", 1:T, x_path[2, :],color = :blue, lw = 2, alpha = 0.5,
grid = true, label = "")
plot!(plt_emp, [xbar[2]], color=:red, linetype = :hline, linestyle = :dash, lw = 2, label = "")
plot(plt_unemp, plt_emp, layout = (2, 1), size=(700,500))
# -
# ## Dynamics of an Individual Worker
#
# An individual worker’s employment dynamics are governed by a [finite state Markov process](tools_and_techniques/finite_markov.html)
#
# The worker can be in one of two states:
#
# - $ s_t=0 $ means unemployed
# - $ s_t=1 $ means employed
#
#
# Let’s start off under the assumption that $ b = d = 0 $
#
# The associated transition matrix is then
#
# $$
# P = \left(
# \begin{matrix}
# 1 - \lambda & \lambda \\
# \alpha & 1 - \alpha
# \end{matrix}
# \right)
# $$
#
# Let $ \psi_t $ denote the [marginal distribution](tools_and_techniques/finite_markov.html#mc-md) over employment / unemployment states for the worker at time $ t $
#
# As usual, we regard it as a row vector
#
# We know [from an earlier discussion](tools_and_techniques/finite_markov.html#mc-md) that $ \psi_t $ follows the law of motion
#
# $$
# \psi_{t+1} = \psi_t P
# $$
#
# We also know from the [lecture on finite Markov chains](tools_and_techniques/finite_markov.html)
# that if $ \alpha \in (0, 1) $ and $ \lambda \in (0, 1) $, then
# $ P $ has a unique stationary distribution, denoted here by $ \psi^* $
#
# The unique stationary distribution satisfies
#
# $$
# \psi^*[0] = \frac{\alpha}{\alpha + \lambda}
# $$
#
# Not surprisingly, probability mass on the unemployment state increases with
# the dismissal rate and falls with the job finding rate rate
# ### Ergodicity
#
# Let’s look at a typical lifetime of employment-unemployment spells
#
# We want to compute the average amounts of time an infinitely lived worker would spend employed and unemployed
#
# Let
#
# $$
# \bar s_{u,T} := \frac1{T} \sum_{t=1}^T \mathbb 1\{s_t = 0\}
# $$
#
# and
#
# $$
# \bar s_{e,T} := \frac1{T} \sum_{t=1}^T \mathbb 1\{s_t = 1\}
# $$
#
# (As usual, $ \mathbb 1\{Q\} = 1 $ if statement $ Q $ is true and 0 otherwise)
#
# These are the fraction of time a worker spends unemployed and employed, respectively, up until period $ T $
#
# If $ \alpha \in (0, 1) $ and $ \lambda \in (0, 1) $, then $ P $ is [ergodic](tools_and_techniques/finite_markov.html#ergodicity), and hence we have
#
# $$
# \lim_{T \to \infty} \bar s_{u, T} = \psi^*[0]
# \quad \text{and} \quad
# \lim_{T \to \infty} \bar s_{e, T} = \psi^*[1]
# $$
#
# with probability one
#
# Inspection tells us that $ P $ is exactly the transpose of $ \hat A $ under the assumption $ b=d=0 $
#
# Thus, the percentages of time that an infinitely lived worker spends employed and unemployed equal the fractions of workers employed and unemployed in the steady state distribution
# ### Convergence rate
#
# How long does it take for time series sample averages to converge to cross sectional averages?
#
# We can use [QuantEcon.jl’s](http://quantecon.org/julia_index.html)
# MarkovChain type to investigate this
#
# Let’s plot the path of the sample averages over 5,000 periods
# + hide-output=false
using QuantEcon, Roots, Random
# + hide-output=false
lm = LakeModel(d = 0, b = 0)
T = 5000 # Simulation length
@unpack α, λ = lm
P = [(1 - λ) λ
α (1 - α)]
# + hide-output=false
Random.seed!(42)
mc = MarkovChain(P, [0; 1]) # 0=unemployed, 1=employed
xbar = rate_steady_state(lm)
s_path = simulate(mc, T; init=2)
s̄_e = cumsum(s_path) ./ (1:T)
s̄_u = 1 .- s̄_e
s_bars = [s̄_u s̄_e]
plt_unemp = plot(title = "Percent of time unemployed", 1:T, s_bars[:,1],color = :blue, lw = 2,
alpha = 0.5, label = "", grid = true)
plot!(plt_unemp, [xbar[1]], linetype = :hline, linestyle = :dash, color=:red, lw = 2, label = "")
plt_emp = plot(title = "Percent of time employed", 1:T, s_bars[:,2],color = :blue, lw = 2,
alpha = 0.5, label = "", grid = true)
plot!(plt_emp, [xbar[2]], linetype = :hline, linestyle = :dash, color=:red, lw = 2, label = "")
plot(plt_unemp, plt_emp, layout = (2, 1), size=(700,500))
# -
# The stationary probabilities are given by the dashed red line
#
# In this case it takes much of the sample for these two objects to converge
#
# This is largely due to the high persistence in the Markov chain
# ## Endogenous Job Finding Rate
#
# We now make the hiring rate endogenous
#
# The transition rate from unemployment to employment will be determined by the McCall search model [[McC70]](zreferences.html#mccall1970)
#
# All details relevant to the following discussion can be found in [our treatment](dynamic_programming/mccall_model.html) of that model
# ### Reservation Wage
#
# The most important thing to remember about the model is that optimal decisions
# are characterized by a reservation wage $ \bar w $
#
# - If the wage offer $ w $ in hand is greater than or equal to $ \bar w $, then the worker accepts
# - Otherwise, the worker rejects
#
#
# As we saw in [our discussion of the model](dynamic_programming/mccall_model.html), the reservation wage depends on the wage offer distribution and the parameters
#
# - $ \alpha $, the separation rate
# - $ \beta $, the discount factor
# - $ \gamma $, the offer arrival rate
# - $ c $, unemployment compensation
# ### Linking the McCall Search Model to the Lake Model
#
# Suppose that all workers inside a lake model behave according to the McCall search model
#
# The exogenous probability of leaving employment remains $ \alpha $
#
# But their optimal decision rules determine the probability $ \lambda $ of leaving unemployment
#
# This is now
#
#
# <a id='equation-lake-lamda'></a>
# $$
# \lambda
# = \gamma \mathbb P \{ w_t \geq \bar w\}
# = \gamma \sum_{w' \geq \bar w} p(w') \tag{1}
# $$
# ### Fiscal Policy
#
# We can use the McCall search version of the Lake Model to find an optimal level of unemployment insurance
#
# We assume that the government sets unemployment compensation $ c $
#
# The government imposes a lump sum tax $ \tau $ sufficient to finance total unemployment payments
#
# To attain a balanced budget at a steady state, taxes, the steady state unemployment rate $ u $, and the unemployment compensation rate must satisfy
#
# $$
# \tau = u c
# $$
#
# The lump sum tax applies to everyone, including unemployed workers
#
# Thus, the post-tax income of an employed worker with wage $ w $ is $ w - \tau $
#
# The post-tax income of an unemployed worker is $ c - \tau $
#
# For each specification $ (c, \tau) $ of government policy, we can solve for the worker’s optimal reservation wage
#
# This determines $ \lambda $ via [(1)](#equation-lake-lamda) evaluated at post tax wages, which in turn determines a steady state unemployment rate $ u(c, \tau) $
#
# For a given level of unemployment benefit $ c $, we can solve for a tax that balances the budget in the steady state
#
# $$
# \tau = u(c, \tau) c
# $$
#
# To evaluate alternative government tax-unemployment compensation pairs, we require a welfare criterion
#
# We use a steady state welfare criterion
#
# $$
# W := e \, {\mathbb E} [V \, | \, \text{employed}] + u \, U
# $$
#
# where the notation $ V $ and $ U $ is as defined in the [McCall search model lecture](dynamic_programming/mccall_model.html)
#
# The wage offer distribution will be a discretized version of the lognormal distribution $ LN(\log(20),1) $, as shown in the next figure
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/lake_distribution_wages.png" style="width:80%;height:80%">
#
#
# We take a period to be a month
#
# We set $ b $ and $ d $ to match monthly [birth](http://www.cdc.gov/nchs/fastats/births.htm) and [death rates](http://www.cdc.gov/nchs/fastats/deaths.htm), respectively, in the U.S. population
#
# - $ b = 0.0124 $
# - $ d = 0.00822 $
#
#
# Following [[DFH06]](zreferences.html#davis2006flow), we set $ \alpha $, the hazard rate of leaving employment, to
#
# - $ \alpha = 0.013 $
# ### Fiscal Policy Code
#
# We will make use of (with some tweaks) the code we wrote in the [McCall model lecture](dynamic_programming/mccall_model.html), embedded below for convenience
# + hide-output=false
function solve_mccall_model(mcm; U_iv = 1.0, V_iv = ones(length(mcm.w)), tol = 1e-5,
iter = 2_000)
@unpack α, β, σ, c, γ, w, E, u = mcm
# necessary objects
u_w = u.(w, σ)
u_c = u(c, σ)
# Bellman operator T. Fixed point is x* s.t. T(x*) = x*
function T(x)
V = x[1:end-1]
U = x[end]
[u_w + β * ((1 - α) * V .+ α * U); u_c + β * (1 - γ) * U + β * γ * E * max.(U, V)]
end
# value function iteration
x_iv = [V_iv; U_iv] # initial x val
xstar = fixedpoint(T, x_iv, iterations = iter, xtol = tol).zero
V = xstar[1:end-1]
U = xstar[end]
# compute the reservation wage
w_barindex = searchsortedfirst(V .- U, 0.0)
if w_barindex >= length(w) # if this is true, you never want to accept
w̄ = Inf
else
w̄ = w[w_barindex] # otherwise, return the number
end
# return a NamedTuple, so we can select values by name
return (V = V, U = U, w̄ = w̄)
end
# -
# And the McCall object
# + hide-output=false
# a default utility function
u(c, σ) = c > 0 ? (c^(1 - σ) - 1) / (1 - σ) : -10e-6
# model constructor
McCallModel = @with_kw (α = 0.2,
β = 0.98, # discount rate
γ = 0.7,
c = 6.0, # unemployment compensation
σ = 2.0,
u = u, # utility function
w = range(10, 20, length = 60), # wage values
E = Expectation(BetaBinomial(59, 600, 400))) # distribution over wage values
# -
# Now let’s compute and plot welfare, employment, unemployment, and tax revenue as a
# function of the unemployment compensation rate
# + hide-output=false
# some global variables that will stay constant
α = 0.013
α_q = (1 - (1 - α)^3)
b_param = 0.0124
d_param = 0.00822
β = 0.98
γ = 1.0
σ = 2.0
# the default wage distribution: a discretized log normal
log_wage_mean, wage_grid_size, max_wage = 20, 200, 170
w_vec = range(1e-3, max_wage, length = wage_grid_size + 1)
logw_dist = Normal(log(log_wage_mean), 1)
cdf_logw = cdf.(logw_dist, log.(w_vec))
pdf_logw = cdf_logw[2:end] - cdf_logw[1:end-1]
p_vec = pdf_logw ./ sum(pdf_logw)
w_vec = (w_vec[1:end-1] + w_vec[2:end]) / 2
E = expectation(Categorical(p_vec)) # expectation object
function compute_optimal_quantities(c, τ)
mcm = McCallModel(α = α_q,
β = β,
γ = γ,
c = c - τ, # post-tax compensation
σ = σ,
w = w_vec .- τ, # post-tax wages
E = E) # expectation operator
@unpack V, U, w̄ = solve_mccall_model(mcm)
indicator = wage -> wage > w̄
λ = γ * E * indicator.(w_vec .- τ)
return w̄, λ, V, U
end
function compute_steady_state_quantities(c, τ)
w̄, λ_param, V, U = compute_optimal_quantities(c, τ)
# compute steady state employment and unemployment rates
lm = LakeModel(λ = λ_param, α = α_q, b = b_param, d = d_param)
x = rate_steady_state(lm)
u_rate, e_rate = x
# compute steady state welfare
indicator(wage) = wage > w̄
indicator(wage) = wage > w̄
decisions = indicator.(w_vec .- τ)
w = (E * (V .* decisions)) / (E * decisions)
welfare = e_rate .* w + u_rate .* U
return u_rate, e_rate, welfare
end
function find_balanced_budget_tax(c)
function steady_state_budget(t)
u_rate, e_rate, w = compute_steady_state_quantities(c, t)
return t - u_rate * c
end
τ = find_zero(steady_state_budget, (0.0, 0.9c))
return τ
end
# levels of unemployment insurance we wish to study
Nc = 60
c_vec = range(5, 140, length = Nc)
tax_vec = zeros(Nc)
unempl_vec = similar(tax_vec)
empl_vec = similar(tax_vec)
welfare_vec = similar(tax_vec)
for i in 1:Nc
t = find_balanced_budget_tax(c_vec[i])
u_rate, e_rate, welfare = compute_steady_state_quantities(c_vec[i], t)
tax_vec[i] = t
unempl_vec[i] = u_rate
empl_vec[i] = e_rate
welfare_vec[i] = welfare
end
plt_unemp = plot(title = "Unemployment", c_vec, unempl_vec, color = :blue, lw = 2, alpha=0.7,
label = "",grid = true)
plt_tax = plot(title = "Tax", c_vec, tax_vec, color = :blue, lw = 2, alpha=0.7, label = "",
grid = true)
plt_emp = plot(title = "Employment", c_vec, empl_vec, color = :blue, lw = 2, alpha=0.7, label = "",
grid = true)
plt_welf = plot(title = "Welfare", c_vec, welfare_vec, color = :blue, lw = 2, alpha=0.7, label = "",
grid = true)
plot(plt_unemp, plt_emp, plt_tax, plt_welf, layout = (2,2), size = (800, 700))
# -
# Welfare first increases and then decreases as unemployment benefits rise
#
# The level that maximizes steady state welfare is approximately 62
# ## Exercises
# ### Exercise 1
#
# Consider an economy with initial stock of workers $ N_0 = 100 $ at the
# steady state level of employment in the baseline parameterization
#
# - $ \alpha = 0.013 $
# - $ \lambda = 0.283 $
# - $ b = 0.0124 $
# - $ d = 0.00822 $
#
#
# (The values for $ \alpha $ and $ \lambda $ follow [[DFH06]](zreferences.html#davis2006flow))
#
# Suppose that in response to new legislation the hiring rate reduces to $ \lambda = 0.2 $
#
# Plot the transition dynamics of the unemployment and employment stocks for 50 periods
#
# Plot the transition dynamics for the rates
#
# How long does the economy take to converge to its new steady state?
#
# What is the new steady state level of employment?
# ### Exercise 2
#
# Consider an economy with initial stock of workers $ N_0 = 100 $ at the
# steady state level of employment in the baseline parameterization
#
# Suppose that for 20 periods the birth rate was temporarily high ($ b = 0.0025 $) and then returned to its original level
#
# Plot the transition dynamics of the unemployment and employment stocks for 50 periods
#
# Plot the transition dynamics for the rates
#
# How long does the economy take to return to its original steady state?
# ## Solutions
# ### Exercise 1
#
# We begin by constructing an object containing the default parameters and assigning the
# steady state values to x0
# + hide-output=false
lm = LakeModel()
x0 = rate_steady_state(lm)
println("Initial Steady State: $x0")
# -
# Initialize the simulation values
# + hide-output=false
N0 = 100
T = 50
# -
# New legislation changes $ \lambda $ to $ 0.2 $
# + hide-output=false
lm = LakeModel(λ = 0.2)
# + hide-output=false
xbar = rate_steady_state(lm) # new steady state
X_path = simulate_stock_path(lm, x0 * N0, T)
x_path = simulate_rate_path(lm, x0, T)
println("New Steady State: $xbar")
# -
# Now plot stocks
# + hide-output=false
x1 = X_path[1, :]
x2 = X_path[2, :]
x3 = dropdims(sum(X_path, dims = 1), dims = 1)
plt_unemp = plot(title = "Unemployment", 1:T, x1, color = :blue, grid = true, label = "",
bg_inside = :lightgrey)
plt_emp = plot(title = "Employment", 1:T, x2, color = :blue, grid = true, label = "",
bg_inside = :lightgrey)
plt_labor = plot(title = "Labor force", 1:T, x3, color = :blue, grid = true, label = "",
bg_inside = :lightgrey)
plot(plt_unemp, plt_emp, plt_labor, layout = (3, 1), size = (800, 600))
# -
# And how the rates evolve
# + hide-output=false
plt_unemp = plot(title = "Unemployment rate", 1:T, x_path[1,:], color = :blue, grid = true,
label = "", bg_inside = :lightgrey)
plot!(plt_unemp, [xbar[1]], linetype = :hline, linestyle = :dash, color =:red, label = "")
plt_emp = plot(title = "Employment rate", 1:T, x_path[2,:], color = :blue, grid = true,
label = "", bg_inside = :lightgrey)
plot!(plt_emp, [xbar[2]], linetype = :hline, linestyle = :dash, color =:red, label = "")
plot(plt_unemp, plt_emp, layout = (2, 1), size = (800, 600))
# -
# We see that it takes 20 periods for the economy to converge to it’s new
# steady state levels
# ### Exercise 2
#
# This next exercise has the economy experiencing a boom in entrances to
# the labor market and then later returning to the original levels
#
# For 20 periods the economy has a new entry rate into the labor market
#
# Let’s start off at the baseline parameterization and record the steady
# state
# + hide-output=false
lm = LakeModel()
x0 = rate_steady_state(lm)
# -
# Here are the other parameters:
# + hide-output=false
b̂ = 0.003
T̂ = 20
# -
# Let’s increase $ b $ to the new value and simulate for 20 periods
# + hide-output=false
lm = LakeModel(b=b̂)
X_path1 = simulate_stock_path(lm, x0 * N0, T̂) # simulate stocks
x_path1 = simulate_rate_path(lm, x0, T̂) # simulate rates
# -
# Now we reset $ b $ to the original value and then, using the state
# after 20 periods for the new initial conditions, we simulate for the
# additional 30 periods
# + hide-output=false
lm = LakeModel(b = 0.0124)
X_path2 = simulate_stock_path(lm, X_path1[:, end-1], T-T̂+1) # simulate stocks
x_path2 = simulate_rate_path(lm, x_path1[:, end-1], T-T̂+1) # simulate rates
# -
# Finally we combine these two paths and plot
# + hide-output=false
x_path = hcat(x_path1, x_path2[:, 2:end]) # note [2:] to avoid doubling period 20
X_path = hcat(X_path1, X_path2[:, 2:end])
# + hide-output=false
x1 = X_path[1,:]
x2 = X_path[2,:]
x3 = dropdims(sum(X_path, dims = 1), dims = 1)
plt_unemp = plot(title = "Unemployment", 1:T, x1, color = :blue, lw = 2, alpha = 0.7,
grid = true, label = "", bg_inside = :lightgrey)
plot!(plt_unemp, ylims = extrema(x1) .+ (-1, 1))
plt_emp = plot(title = "Employment", 1:T, x2, color = :blue, lw = 2, alpha = 0.7, grid = true,
label = "", bg_inside = :lightgrey)
plot!(plt_emp, ylims = extrema(x2) .+ (-1, 1))
plt_labor = plot(title = "Labor force", 1:T, x3, color = :blue, alpha = 0.7, grid = true,
label = "", bg_inside = :lightgrey)
plot!(plt_labor, ylims = extrema(x3) .+ (-1, 1))
plot(plt_unemp, plt_emp, plt_labor, layout = (3, 1), size = (800, 600))
# -
# And the rates
# + hide-output=false
plt_unemp = plot(title = "Unemployment Rate", 1:T, x_path[1,:], color = :blue, grid = true,
label = "", bg_inside = :lightgrey, lw = 2)
plot!(plt_unemp, [x0[1]], linetype = :hline, linestyle = :dash, color =:red, label = "", lw = 2)
plt_emp = plot(title = "Employment Rate", 1:T, x_path[2,:], color = :blue, grid = true,
label = "", bg_inside = :lightgrey, lw = 2)
plot!(plt_emp, [x0[2]], linetype = :hline, linestyle = :dash, color =:red, label = "", lw = 2)
plot(plt_unemp, plt_emp, layout = (2, 1), size = (800, 600))
| multi_agent_models/lake_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.13 64-bit (''tensorflow'': conda)'
# language: python
# name: python3613jvsc74a57bd07257430773ff2786ada9528d4be6e0e9fe11a1482846f09bd681c7e1b20b89d3
# ---
# +
"""
This is a implementation of Word2Vec using numpy. Uncomment the print functions to see Word2Vec in action! Also remember to change the number of epochs and set training_data to training_data[0] to avoid flooding your terminal. A Google Sheet implementation of Word2Vec is also available here - https://docs.google.com/spreadsheets/d/1mgf82Ue7MmQixMm2ZqnT1oWUucj6pEcd2wDs_JgHmco/edit?usp=sharing
Have fun learning!
Author: <NAME>
Email: <EMAIL>
"""
import numpy as np
from collections import defaultdict
## Randomly initialise
getW1 = [[0.236, -0.962, 0.686, 0.785, -0.454, -0.833, -0.744, 0.677, -0.427, -0.066],
[-0.907, 0.894, 0.225, 0.673, -0.579, -0.428, 0.685, 0.973, -0.070, -0.811],
[-0.576, 0.658, -0.582, -0.112, 0.662, 0.051, -0.401, -0.921, -0.158, 0.529],
[0.517, 0.436, 0.092, -0.835, -0.444, -0.905, 0.879, 0.303, 0.332, -0.275],
[0.859, -0.890, 0.651, 0.185, -0.511, -0.456, 0.377, -0.274, 0.182, -0.237],
[0.368, -0.867, -0.301, -0.222, 0.630, 0.808, 0.088, -0.902, -0.450, -0.408],
[0.728, 0.277, 0.439, 0.138, -0.943, -0.409, 0.687, -0.215, -0.807, 0.612],
[0.593, -0.699, 0.020, 0.142, -0.638, -0.633, 0.344, 0.868, 0.913, 0.429],
[0.447, -0.810, -0.061, -0.495, 0.794, -0.064, -0.817, -0.408, -0.286, 0.149]]
getW2 = [[-0.868, -0.406, -0.288, -0.016, -0.560, 0.179, 0.099, 0.438, -0.551],
[-0.395, 0.890, 0.685, -0.329, 0.218, -0.852, -0.919, 0.665, 0.968],
[-0.128, 0.685, -0.828, 0.709, -0.420, 0.057, -0.212, 0.728, -0.690],
[0.881, 0.238, 0.018, 0.622, 0.936, -0.442, 0.936, 0.586, -0.020],
[-0.478, 0.240, 0.820, -0.731, 0.260, -0.989, -0.626, 0.796, -0.599],
[0.679, 0.721, -0.111, 0.083, -0.738, 0.227, 0.560, 0.929, 0.017],
[-0.690, 0.907, 0.464, -0.022, -0.005, -0.004, -0.425, 0.299, 0.757],
[-0.054, 0.397, -0.017, -0.563, -0.551, 0.465, -0.596, -0.413, -0.395],
[-0.838, 0.053, -0.160, -0.164, -0.671, 0.140, -0.149, 0.708, 0.425],
[0.096, -0.995, -0.313, 0.881, -0.402, -0.631, -0.660, 0.184, 0.487]]
class word2vec():
def __init__(self):
self.n = settings['n']
self.lr = settings['learning_rate']
self.epochs = settings['epochs']
self.window = settings['window_size']
def generate_training_data(self, settings, corpus):
# Find unique word counts using dictonary
word_counts = defaultdict(int)
for row in corpus:
for word in row:
word_counts[word] += 1
#########################################################################################################################################################
# print(word_counts) #
# # defaultdict(<class 'int'>, {'natural': 1, 'language': 1, 'processing': 1, 'and': 2, 'machine': 1, 'learning': 1, 'is': 1, 'fun': 1, 'exciting': 1}) #
#########################################################################################################################################################
## How many unique words in vocab? 9
self.v_count = len(word_counts.keys())
#########################
# print(self.v_count) #
# 9 #
#########################
# Generate Lookup Dictionaries (vocab)
self.words_list = list(word_counts.keys())
#################################################################################################
# print(self.words_list) #
# ['natural', 'language', 'processing', 'and', 'machine', 'learning', 'is', 'fun', 'exciting'] #
#################################################################################################
# Generate word:index
self.word_index = dict((word, i) for i, word in enumerate(self.words_list))
#############################################################################################################################
# print(self.word_index) #
# # {'natural': 0, 'language': 1, 'processing': 2, 'and': 3, 'machine': 4, 'learning': 5, 'is': 6, 'fun': 7, 'exciting': 8} #
#############################################################################################################################
# Generate index:word
self.index_word = dict((i, word) for i, word in enumerate(self.words_list))
#############################################################################################################################
# print(self.index_word) #
# {0: 'natural', 1: 'language', 2: 'processing', 3: 'and', 4: 'machine', 5: 'learning', 6: 'is', 7: 'fun', 8: 'exciting'} #
#############################################################################################################################
training_data = []
# Cycle through each sentence in corpus
for sentence in corpus:
sent_len = len(sentence)
# Cycle through each word in sentence
for i, word in enumerate(sentence):
# Convert target word to one-hot
w_target = self.word2onehot(sentence[i])
# Cycle through context window
w_context = []
# Note: window_size 2 will have range of 5 values
for j in range(i - self.window, i + self.window+1):
# Criteria for context word
# 1. Target word cannot be context word (j != i)
# 2. Index must be greater or equal than 0 (j >= 0) - if not list index out of range
# 3. Index must be less or equal than length of sentence (j <= sent_len-1) - if not list index out of range
if j != i and j <= sent_len-1 and j >= 0:
# Append the one-hot representation of word to w_context
w_context.append(self.word2onehot(sentence[j]))
# print(sentence[i], sentence[j])
#########################
# Example: #
# natural language #
# natural processing #
# language natural #
# language processing #
# language append #
#########################
# training_data contains a one-hot representation of the target word and context words
#################################################################################################
# Example: #
# [Target] natural, [Context] language, [Context] processing #
# print(training_data) #
# [[[1, 0, 0, 0, 0, 0, 0, 0, 0], [[0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0]]]] #
#################################################################################################
training_data.append([w_target, w_context])
return np.array(training_data)
def word2onehot(self, word):
# word_vec - initialise a blank vector
word_vec = [0 for i in range(0, self.v_count)] # Alternative - np.zeros(self.v_count)
#############################
# print(word_vec) #
# [0, 0, 0, 0, 0, 0, 0, 0] #
#############################
# Get ID of word from word_index
word_index = self.word_index[word]
# Change value from 0 to 1 according to ID of the word
word_vec[word_index] = 1
return word_vec
def train(self, training_data):
# Initialising weight matrices
# np.random.uniform(HIGH, LOW, OUTPUT_SHAPE)
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.random.uniform.html
self.w1 = np.array(getW1)
self.w2 = np.array(getW2)
# self.w1 = np.random.uniform(-1, 1, (self.v_count, self.n))
# self.w2 = np.random.uniform(-1, 1, (self.n, self.v_count))
# Cycle through each epoch
for i in range(self.epochs):
# Intialise loss to 0
self.loss = 0
# Cycle through each training sample
# w_t = vector for target word, w_c = vectors for context words
for w_t, w_c in training_data:
# Forward pass
# 1. predicted y using softmax (y_pred) 2. matrix of hidden layer (h) 3. output layer before softmax (u)
y_pred, h, u = self.forward_pass(w_t)
#########################################
# print("Vector for target word:", w_t) #
# print("W1-before backprop", self.w1) #
# print("W2-before backprop", self.w2) #
#########################################
# Calculate error
# 1. For a target word, calculate difference between y_pred and each of the context words
# 2. Sum up the differences using np.sum to give us the error for this particular target word
EI = np.sum([np.subtract(y_pred, word) for word in w_c], axis=0)
#########################
# print("Error", EI) #
#########################
# Backpropagation
# We use SGD to backpropagate errors - calculate loss on the output layer
self.backprop(EI, h, w_t)
#########################################
#print("W1-after backprop", self.w1) #
#print("W2-after backprop", self.w2) #
#########################################
# Calculate loss
# There are 2 parts to the loss function
# Part 1: -ve sum of all the output +
# Part 2: length of context words * log of sum for all elements (exponential-ed) in the output layer before softmax (u)
# Note: word.index(1) returns the index in the context word vector with value 1
# Note: u[word.index(1)] returns the value of the output layer before softmax
self.loss += -np.sum([u[word.index(1)] for word in w_c]) + len(w_c) * np.log(np.sum(np.exp(u)))
#############################################################
# Break if you want to see weights after first target word #
# break #
#############################################################
print('Epoch:', i, "Loss:", self.loss)
def forward_pass(self, x):
# x is one-hot vector for target word, shape - 9x1
# Run through first matrix (w1) to get hidden layer - 10x9 dot 9x1 gives us 10x1
h = np.dot(x, self.w1)
# Dot product hidden layer with second matrix (w2) - 9x10 dot 10x1 gives us 9x1
u = np.dot(h, self.w2)
# Run 1x9 through softmax to force each element to range of [0, 1] - 1x8
y_c = self.softmax(u)
return y_c, h, u
def softmax(self, x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def backprop(self, e, h, x):
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.outer.html
# Column vector EI represents row-wise sum of prediction errors across each context word for the current center word
# Going backwards, we need to take derivative of E with respect of w2
# h - shape 10x1, e - shape 9x1, dl_dw2 - shape 10x9
# x - shape 9x1, w2 - 10x9, e.T - 9x1
dl_dw2 = np.outer(h, e)
dl_dw1 = np.outer(x, np.dot(self.w2, e.T))
########################################
# print('Delta for w2', dl_dw2) #
# print('Hidden layer', h) #
# print('np.dot', np.dot(self.w2, e.T)) #
# print('Delta for w1', dl_dw1) #
#########################################
# Update weights
self.w1 = self.w1 - (self.lr * dl_dw1)
self.w2 = self.w2 - (self.lr * dl_dw2)
# Get vector from word
def word_vec(self, word):
w_index = self.word_index[word]
v_w = self.w1[w_index]
return v_w
# Input vector, returns nearest word(s)
def vec_sim(self, word, top_n):
v_w1 = self.word_vec(word)
word_sim = {}
for i in range(self.v_count):
# Find the similary score for each word in vocab
v_w2 = self.w1[i]
theta_sum = np.dot(v_w1, v_w2)
theta_den = np.linalg.norm(v_w1) * np.linalg.norm(v_w2)
theta = theta_sum / theta_den
word = self.index_word[i]
word_sim[word] = theta
words_sorted = sorted(word_sim.items(), key=lambda kv: kv[1], reverse=True)
for word, sim in words_sorted[:top_n]:
print(word, sim)
#####################################################################
settings = {
'window_size': 2, # context window +- center word
'n': 10, # dimensions of word embeddings, also refer to size of hidden layer
'epochs': 50, # number of training epochs
'learning_rate': 0.01 # learning rate
}
text = "natural language processing and machine learning is fun and exciting"
# Note the .lower() as upper and lowercase does not matter in our implementation
# [['natural', 'language', 'processing', 'and', 'machine', 'learning', 'is', 'fun', 'and', 'exciting']]
corpus = [[word.lower() for word in text.split()]]
# Initialise object
w2v = word2vec()
# Numpy ndarray with one-hot representation for [target_word, context_words]
training_data = w2v.generate_training_data(settings, corpus)
# Training
w2v.train(training_data)
# Get vector for word
word = "machine"
vec = w2v.word_vec(word)
print(word, vec)
# Find similar words
w2v.vec_sim("machine", 3)
# -
| w8/w8d3/word2vec_numpy_full.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="UEBilEjLj5wY"
# *Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [<NAME>](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
#
# Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 536, "status": "ok", "timestamp": 1524974472601, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-cxK6yOSQ6uE/AAAAAAAAAAI/AAAAAAAAIfw/P9ar_CHsKOQ/s50-c-k-no/photo.jpg", "userId": "118404394130788869227"}, "user_tz": 240} id="GOzuY8Yvj5wb" outputId="c19362ce-f87a-4cc2-84cc-8d7b4b9e6007"
# %load_ext watermark
# %watermark -a '<NAME>' -v -p torch
# + [markdown] colab_type="text" id="rH4XmErYj5wm"
# # Model Zoo -- CNN Gender Classifier (ResNet-18 Architecture, CelebA) with Data Parallelism
# -
# ### Network Architecture
# The network in this notebook is an implementation of the ResNet-18 [1] architecture on the CelebA face dataset [2] to train a gender classifier.
#
#
# References
#
# - [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770-778). ([CVPR Link](https://www.cv-foundation.org/openaccess/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html))
#
# - [2] <NAME>., <NAME>., <NAME>., & <NAME>. (2016). Gender and smile classification using deep convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (pp. 34-38).
#
# The following figure illustrates residual blocks with skip connections such that the input passed via the shortcut matches the dimensions of the main path's output, which allows the network to learn identity functions.
#
# 
#
#
# The ResNet-18 architecture actually uses residual blocks with skip connections such that the input passed via the shortcut matches is resized to dimensions of the main path's output. Such a residual block is illustrated below:
#
# 
# For a more detailed explanation see the other notebook, [resnet-ex-1.ipynb](resnet-ex-1.ipynb).
# + [markdown] colab_type="text" id="MkoGLH_Tj5wn"
# ## Imports
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="ORj09gnrj5wp"
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import time
import matplotlib.pyplot as plt
from PIL import Image
# -
# ## Settings
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 23936, "status": "ok", "timestamp": 1524974497505, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-cxK6yOSQ6uE/AAAAAAAAAAI/AAAAAAAAIfw/P9ar_CHsKOQ/s50-c-k-no/photo.jpg", "userId": "118404394130788869227"}, "user_tz": 240} id="NnT0sZIwj5wu" outputId="55aed925-d17e-4c6a-8c71-0d9b3bde5637"
##########################
### SETTINGS
##########################
# Hyperparameters
RANDOM_SEED = 1
LEARNING_RATE = 0.001
NUM_EPOCHS = 10
# Architecture
NUM_FEATURES = 128*128
NUM_CLASSES = 2
BATCH_SIZE = 256*torch.cuda.device_count()
DEVICE = 'cuda:0' # default GPU device
GRAYSCALE = False
# -
# ## Dataset
# ### Downloading the Dataset
# Note that the ~200,000 CelebA face image dataset is relatively large (~1.3 Gb). The download link provided below was provided by the author on the official CelebA website at http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html.
# 1) Download and unzip the file `img_align_celeba.zip`, which contains the images in jpeg format.
#
# 2) Download the `list_attr_celeba.txt` file, which contains the class labels
#
# 3) Download the `list_eval_partition.txt` file, which contains training/validation/test partitioning info
# ### Preparing the Dataset
# +
df1 = pd.read_csv('list_attr_celeba.txt', sep="\s+", skiprows=1, usecols=['Male'])
# Make 0 (female) & 1 (male) labels instead of -1 & 1
df1.loc[df1['Male'] == -1, 'Male'] = 0
df1.head()
# +
df2 = pd.read_csv('list_eval_partition.txt', sep="\s+", skiprows=0, header=None)
df2.columns = ['Filename', 'Partition']
df2 = df2.set_index('Filename')
df2.head()
# -
df3 = df1.merge(df2, left_index=True, right_index=True)
df3.head()
df3.to_csv('celeba-gender-partitions.csv')
df4 = pd.read_csv('celeba-gender-partitions.csv', index_col=0)
df4.head()
df4.loc[df4['Partition'] == 0].to_csv('celeba-gender-train.csv')
df4.loc[df4['Partition'] == 1].to_csv('celeba-gender-valid.csv')
df4.loc[df4['Partition'] == 2].to_csv('celeba-gender-test.csv')
img = Image.open('img_align_celeba/000001.jpg')
print(np.asarray(img, dtype=np.uint8).shape)
plt.imshow(img);
# ### Implementing a Custom DataLoader Class
class CelebaDataset(Dataset):
"""Custom Dataset for loading CelebA face images"""
def __init__(self, csv_path, img_dir, transform=None):
df = pd.read_csv(csv_path, index_col=0)
self.img_dir = img_dir
self.csv_path = csv_path
self.img_names = df.index.values
self.y = df['Male'].values
self.transform = transform
def __getitem__(self, index):
img = Image.open(os.path.join(self.img_dir,
self.img_names[index]))
if self.transform is not None:
img = self.transform(img)
label = self.y[index]
return img, label
def __len__(self):
return self.y.shape[0]
# +
# Note that transforms.ToTensor()
# already divides pixels by 255. internally
custom_transform = transforms.Compose([transforms.CenterCrop((178, 178)),
transforms.Resize((128, 128)),
#transforms.Grayscale(),
#transforms.Lambda(lambda x: x/255.),
transforms.ToTensor()])
train_dataset = CelebaDataset(csv_path='celeba-gender-train.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
valid_dataset = CelebaDataset(csv_path='celeba-gender-valid.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
test_dataset = CelebaDataset(csv_path='celeba-gender-test.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=4)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
# +
torch.manual_seed(0)
for epoch in range(2):
for batch_idx, (x, y) in enumerate(train_loader):
print('Epoch:', epoch+1, end='')
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', y.size()[0])
x = x.to(DEVICE)
y = y.to(DEVICE)
time.sleep(1)
break
# + [markdown] colab_type="text" id="I6hghKPxj5w0"
# ## Model
# -
# The following code cell that implements the ResNet-34 architecture is a derivative of the code provided at https://pytorch.org/docs/0.4.0/_modules/torchvision/models/resnet.html.
# +
##########################
### MODEL
##########################
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, grayscale):
self.inplanes = 64
if grayscale:
in_dim = 1
else:
in_dim = 3
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1, padding=2)
self.fc = nn.Linear(2048 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.fc(x)
probas = F.softmax(logits, dim=1)
return logits, probas
def resnet18(num_classes):
"""Constructs a ResNet-18 model."""
model = ResNet(block=BasicBlock,
layers=[2, 2, 2, 2],
num_classes=NUM_CLASSES,
grayscale=GRAYSCALE)
return model
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="_lza9t_uj5w1"
torch.manual_seed(RANDOM_SEED)
##########################
### COST AND OPTIMIZER
##########################
model = resnet18(NUM_CLASSES)
#### DATA PARALLEL START ####
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
model = nn.DataParallel(model)
#### DATA PARALLEL END ####
model.to(DEVICE)
cost_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# + [markdown] colab_type="text" id="RAodboScj5w6"
# ## Training
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 1547} colab_type="code" executionInfo={"elapsed": 2384585, "status": "ok", "timestamp": 1524976888520, "user": {"displayName": "<NAME>", "photoUrl": <KEY>", "userId": "118404394130788869227"}, "user_tz": 240} id="Dzh3ROmRj5w7" outputId="5f8fd8c9-b076-403a-b0b7-fd2d498b48d7"
def compute_accuracy(model, data_loader, device):
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
start_time = time.time()
for epoch in range(NUM_EPOCHS):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = cost_fn(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, NUM_EPOCHS, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Valid: %.3f%%' % (
epoch+1, NUM_EPOCHS,
compute_accuracy(model, train_loader, device=DEVICE),
compute_accuracy(model, valid_loader, device=DEVICE)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
# + [markdown] colab_type="text" id="paaeEQHQj5xC"
# ## Evaluation
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 6514, "status": "ok", "timestamp": 1524976895054, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-cxK6yOSQ6uE/AAAAAAAAAAI/AAAAAAAAIfw/P9ar_CHsKOQ/s50-c-k-no/photo.jpg", "userId": "118404394130788869227"}, "user_tz": 240} id="gzQMWKq5j5xE" outputId="de7dc005-5eeb-4177-9f9f-d9b5d1358db9"
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader, device=DEVICE)))
# +
for batch_idx, (features, targets) in enumerate(test_loader):
features = features
targets = targets
break
plt.imshow(np.transpose(features[0], (1, 2, 0)))
# -
model.eval()
logits, probas = model(features.to(DEVICE)[0, None])
print('Probability Female %.2f%%' % (probas[0][0]*100))
| code/model_zoo/pytorch_ipynb/convnet-resnet18-celeba-dataparallel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:openpiv] *
# language: python
# name: conda-env-openpiv-py
# ---
# # Ensemble correlation concept using OpenPIV
# Ensemble correlation should work in places where the flow is really steady and repeatable
# or could be phase averaged in the sense that the correlation map in a single interrogation
# window represents displacements from a statistically stationary distribution.
#
# In such case, the noisy position of the correlation peak is due to randomness that can
# be averaged out like the white noise and the avergaging of the correlation maps will
# yield a high quality peak that has great signal to noise ratio and close to Gaussian
#
# In this case the velocity estimate in the interrogation window will approach the mean
# velocity value at that location.
from openpiv.pyprocess import *
from openpiv.tools import *
from glob import glob
from pylab import *
imlist = glob('../test12/*.tif')
imlist.sort()
print(imlist)
# just a quick look at the data
from openpiv.piv import simple_piv
simple_piv(imlist[0], imlist[1]);
# ## Ensemble averaged correlation using FFT based correlation from OpenPIV
corrs = []
for i,j in zip(imlist[::2],imlist[1::2]):
# print(i,j)
corrs.append(fft_correlate_images(moving_window_array(imread(i),64,32),
moving_window_array(imread(j),64,32),
normalized_correlation=True))
corrs = np.array(corrs) # save also single image pair correlations
mean_correlation = corrs.mean(axis=0) # ensemble average
# Let's compare the result with instantaneous results
contourf(mean_correlation[23,:,:])
colorbar()
for i in range(corrs.shape[0]):
figure()
contourf(corrs[i,252,:,:])
colorbar()
im = imread(imlist[0])
im.shape
grid = get_field_shape(im.shape,search_area_size=64,overlap=32)
nrows, ncols = grid[0], grid[1]
u,v = correlation_to_displacement(mean_correlation, nrows, ncols)
x,y = get_coordinates(im.shape, 64, 32)
fig, ax = subplots(figsize=(8,8))
ax.quiver(x,y,u,v,scale=80,width=.003)
ax.invert_yaxis()
plot(u.mean(axis=1)*80+400,y[:,0])
# +
# another way is the averaging of velocity fields
# +
U = []
V = []
for i in range(corrs.shape[0]):
tmpu,tmpv = correlation_to_displacement(corrs[i,:,:,:], nrows, ncols)
U.append(tmpu)
V.append(tmpv)
fig, ax = subplots(figsize=(6,6))
ax.quiver(x,y,tmpu,tmpv,scale=200)
ax.invert_yaxis()
plot(tmpu.mean(axis=1)*80+400,y[:,0])
U = np.array(U)
V = np.array(V)
meanU = np.mean(U, axis=0)
meanV = np.mean(V, axis=0)
# -
fig, ax = subplots(figsize=(8,8))
ax.quiver(x,y,meanU,meanV,scale=200)
ax.invert_yaxis()
plot(meanU.mean(axis=1)*80+400,y[:,0])
| notebooks/ensemble_correlation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Circuit Generation for Surface Codes
#
# Surface Codes are CSS codes which encode logical states in a two dimensional lattice of qubits. Syndromes are measured along all the different squares as $X_1 X_2 Z_1 Z_2$ on each edge.
#
# The graph below is a surface code patch of distance d=3 such that number of qubits required is $d^2$ and number of syndrome qubits required is $d^2-1$. Here, blue patches are Z stabilizers and red patches are X stabilizers. Z stabilizers entangle qubits on data qubits (grey) in corners as control, with syndrome qubits (black) in the centre as target.
#
# Similarly, X stabilizers entangle qubits with controlled not on data qubits (grey) in corners as target, with syndrome qubits (black) in the centre as control. The Z and N marked in each patch determines the order of CX labeled in black. Grey coordinate labels are data qubit locations and black labels are syndrome qubit locations.
#
# The straight line marked Z<sub>L</sub> signifies that a logical Z is applied by operating Z on each qubit, on any horizontal line in the lattice. Similarly, the straight line marked X<sub>L</sub> signifies that a logical X is applied by operating X on each qubit, on any vertical line in the lattice. We choose one convention and say top edge signifies a Z logical operation and left edge signifies X logical operation!
#
# <p align="center">
# <img width="672" alt="Lattice" src="https://user-images.githubusercontent.com/293681/86267952-7541f700-bb95-11ea-8292-240bf344f7f8.png">
# </p>
#
# The code assigns node locations to the lattices using `SurfaceCode(d, T).lattice()` and then assigns the order of controlled not for each square using `SurfaceCode(d, T).connection()`. This is then used by the syndrome measurement to apply controlled nots for each square according to the numbers and type of stabilizer the square execute (red: X, blue:Z).
#
# Thus, the above circuit is created and each syndrome qubit is measured. This is called syndrome measurement and is repeated T=d times. The results from each syndrome measurement are then processed to extract error nodes i.e. nodes which were flipped in consecutive syndrome measurements. This information is then utilized by the classes in syndrome_graph and fitter.py files to create error graphs and perform matching (see the [decoder tutorial](2_surface_code_decoding.ipynb)) to deduce the most probable error.
#
# Finally, logical Z error is concluded by checking if there were odd number of qubits with errors on top (Z<sub>L</sub>) edge and logical X error is concluded if there odd number of qubits with errors on the left (X<sub>L</sub>) edge
# # Circuit Execution (Logical 0)
#
# +
from qiskit import execute
try:
from qiskit import Aer
HAS_AER = True
except ImportError:
from qiskit import BasicAer
HAS_AER = False
if HAS_AER:
simulator = Aer.get_backend('qasm_simulator')
else:
simulator = BasicAer.get_backend('qasm_simulator')
# +
import sys
sys.path.insert(0, '../')
from surface_code.circuits import SurfaceCode
# Set up a d=3, T=3 code
code = SurfaceCode(3, 3)
job = execute(code.circuit['0'], simulator)
raw_results = {}
raw_results['0'] = job.result().get_counts()
processed_results={}
processed_results=code.process_results(raw_results['0'])
j=0
for i in (processed_results):
print("results from circuit execution round",j,":", processed_results[2])
j=j+1
nodesX,nodesZ = code.extract_nodes(processed_results)
print("error nodes in X", nodesX)
print("error nodes in Z", nodesZ)
print("No Z error as logical 0 state is an eigenstate of logical Z (given no noise is added to the system)")
# -
# # Visualize the SurfaceEncoder Circuit
# Each round of the syndrome measurement runs the following measurements between the ancilla qubits on top and the N,S,E,W data qubits:
#
# <p align="center">
# <img width="549" alt="Screen Shot 2020-07-01 at 3 43 38 PM" src="https://user-images.githubusercontent.com/293681/86285090-02934480-bbb2-11ea-8b18-57eda706b206.png">
# </p>
#
# - <NAME>., <NAME>., <NAME>. & <NAME>. Threshold error rates for the toric and surface codes. arXiv:0905.0531 [quant-ph] (2009).
#
# We can visualize the output of the "linearized" circuit in Qiskit (below):
code.circuit["0"].draw(output='mpl', fold=50)
| tutorials/1_surface_code_encoding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Analysis
# ## Cycle Share
# ### <NAME>
# ### September 03, 2019
import pandas as pd
station_data = pd.read_csv('station.csv')
weather_data = pd.read_csv('weather.csv')
trip_data = pd.read_csv('trip.csv', error_bad_lines=False)
# #### What is the average trip duration for a borrowed bicycle?
trip_data.tripduration.mean()
# #### What’s the most common age of a bicycle-sharer?
age = (2019 - (trip_data.birthyear.mode()))
age
# ##### Answer: 32
# #### Given all the weather data here, find the average precipitation per month, and the median precipitation.
weather_data['Date'] = pd.to_datetime(weather_data['Date'], errors='coerce')
weather_data['Month'] = weather_data['Date'].dt.month
average_precip = pd.DataFrame(weather_data[['Month','Precipitation_In']]).groupby(weather_data.Month).mean()
average_precip.sort_values('Precipitation_In', ascending=False)
mean_precip = average_precip['Precipitation_In'].mean()
mean_precip
# #### What’s the average number of bikes at a given bike station?
#
station_data['current_dockcount'].mean()
# ##### Answer: About 16
# #### When a bike station is modified, is it more likely that it’ll lose bikes or gain bikes? How do you know?
# +
bike_mod_stat = station_data['install_dockcount'] - station_data['current_dockcount']
lose = 0
gain = 0
for val in bike_mod_stat:
if val < 0:
lose += 1
if val > 0:
gain += 1
if gain > lose:
print('A bike station is likely to gain bikes when modified.')
ele:
print('a bike station is likely to lose bikes when modified')
# -
| bike-stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Esercizio1 - Pysam
# Prendere in input un BAM file che contiene allineamenti ad un'unica reference (cromosoma X) e:
#
# - controllare se sono presenti *paired-end* reads
# - determinare il numero e le lunghezze di tutti gli introni supportati dagli allineamenti del BAM file
# - determinare la base della reference a copertura massima e produrre un file SAM contenenti i soli allineamenti che coprono tale base
# Importare il modulo `pysam`
# Caricare il file `BAM` in input
# #### Controllare se ci sono dei reads paired-end.
# #### Determinare il numero e le lunghezze di tutti gli introni supportati dagli allineamenti
#
# Determinare la lista delle lunghezze degli introni supportati con duplicati.
# Determinare il set delle lunghezze degli introni supportati.
# #### Trovare la base a copertura massima e produrre il SAM file contenente tutti gli allineamenti che coprono questa base.
#
# Determinare la lista delle *pileup columns*.
# Estrarre la colonna di *pileup* con il maggior numero di reads.
# Settare a 0 la qualità minima della colonna di *pileup* (altrimenti le reads non vengono tenute perché hanno una qualità bassa).
# Estrarre la lista dei read di *pileup* relativi alla colonna di altezza massima.
# Estrarre la lista degli allineamenti relativi alla colonna di altezza massima.
# Produrre gli allineamenti in un SAM file utilizzando la stessa Header Section del BAM file.
| laboratorio/lezione18-17dic21/.ipynb_checkpoints/esercizio1-pysam-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/giswqs/GEE-Courses/blob/master/docs/gee_intro/Analysis/zonal_stats_by_group.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
#
# Uncomment the following line to install [geemap](https://geemap.org) if needed.
# +
# # !pip install geemap
# -
import os
import ee
import geemap
# ## Analyzing U.S. Land Cover
Map = geemap.Map()
Map
# ### Add NLCD data
#
# NLCD: USGS National Land Cover Database
#
# https://developers.google.com/earth-engine/datasets/catalog/USGS_NLCD_RELEASES_2016_REL
# +
dataset = ee.Image('USGS/NLCD/NLCD2016')
landcover = ee.Image(dataset.select('landcover'))
Map.addLayer(landcover, {}, 'NLCD 2016')
states = ee.FeatureCollection("TIGER/2018/States")
Map.addLayer(states, {}, 'US States')
# -
Map.add_legend(builtin_legend='NLCD')
# ### Calculate land cover compostion of each US state
out_dir = os.path.expanduser('~/Downloads')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# +
nlcd_stats = os.path.join(out_dir, 'nlcd_stats_sum.csv')
# statistics_type can be either 'SUM' or 'PERCENTAGE'
# denominator can be used to convert square meters to other areal units, such as square kilimeters
geemap.zonal_statistics_by_group(
landcover,
states,
nlcd_stats,
statistics_type='SUM',
denominator=1000000,
decimal_places=2,
)
# -
nlcd_stats = os.path.join(out_dir, 'nlcd_stats_pct.csv')
geemap.zonal_statistics_by_group(
landcover, states, nlcd_stats, statistics_type='PERCENTAGE'
)
# ## Analyzing Global Land Cover
# ### Add MODIS global land cover data
#
# MCD12Q1.006 MODIS Land Cover Type Yearly Global 500m
#
# https://developers.google.com/earth-engine/datasets/catalog/MODIS_006_MCD12Q1
# +
Map = geemap.Map()
landcover = ee.Image('MODIS/006/MCD12Q1/2019_01_01').select('LC_Type1')
igbpLandCoverVis = {
'min': 1.0,
'max': 17.0,
'palette': [
'05450a',
'086a10',
'54a708',
'78d203',
'009900',
'c6b044',
'dcd159',
'dade48',
'fbff13',
'b6ff05',
'27ff87',
'c24f44',
'a5a5a5',
'ff6d4c',
'69fff8',
'f9ffa4',
'1c0dff',
],
}
Map.setCenter(6.746, 46.529, 2)
Map.addLayer(landcover, igbpLandCoverVis, 'MODIS Land Cover')
countries = ee.FeatureCollection('users/giswqs/public/countries')
Map.addLayer(countries, {}, "Countries")
Map
# -
Map.add_legend(builtin_legend='MODIS/051/MCD12Q1')
# +
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
global_stats = os.path.join(out_dir, 'global_stats_sum.csv')
# statistics_type can be either 'SUM' or 'PERCENTAGE'
# denominator can be used to convert square meters to other areal units, such as square kilimeters
geemap.zonal_statistics_by_group(
landcover,
countries,
global_stats,
statistics_type='SUM',
denominator=1000000,
decimal_places=2,
)
# -
global_stats = os.path.join(out_dir, 'global_stats_pct.csv')
geemap.zonal_statistics_by_group(
landcover, countries, global_stats, statistics_type='PERCENTAGE'
)
| docs/gee_intro/Analysis/zonal_stats_by_group.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Outlier detection on a real data set
#
#
# This example illustrates the need for robust covariance estimation
# on a real data set. It is useful both for outlier detection and for
# a better understanding of the data structure.
#
# We selected two sets of two variables from the Boston housing data set
# as an illustration of what kind of analysis can be done with several
# outlier detection tools. For the purpose of visualization, we are working
# with two-dimensional examples, but one should be aware that things are
# not so trivial in high-dimension, as it will be pointed out.
#
# In both examples below, the main result is that the empirical covariance
# estimate, as a non-robust one, is highly influenced by the heterogeneous
# structure of the observations. Although the robust covariance estimate is
# able to focus on the main mode of the data distribution, it sticks to the
# assumption that the data should be Gaussian distributed, yielding some biased
# estimation of the data structure, but yet accurate to some extent.
# The One-Class SVM does not assume any parametric form of the data distribution
# and can therefore model the complex shape of the data much better.
#
# First example
# -------------
# The first example illustrates how robust covariance estimation can help
# concentrating on a relevant cluster when another one exists. Here, many
# observations are confounded into one and break down the empirical covariance
# estimation.
# Of course, some screening tools would have pointed out the presence of two
# clusters (Support Vector Machines, Gaussian Mixture Models, univariate
# outlier detection, ...). But had it been a high-dimensional example, none
# of these could be applied that easily.
#
# Second example
# --------------
# The second example shows the ability of the Minimum Covariance Determinant
# robust estimator of covariance to concentrate on the main mode of the data
# distribution: the location seems to be well estimated, although the covariance
# is hard to estimate due to the banana-shaped distribution. Anyway, we can
# get rid of some outlying observations.
# The One-Class SVM is able to capture the real data structure, but the
# difficulty is to adjust its kernel bandwidth parameter so as to obtain
# a good compromise between the shape of the data scatter matrix and the
# risk of over-fitting the data.
#
#
#
# +
print(__doc__)
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
import pandas as pd
# Get data
X1 = pd.read_csv("AS1.csv") # two clusters
X2 = pd.read_csv("AS2.csv") # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
# -
| Assignments/hw3/Failed_to_perform_with_dataset/HW3_boston/plot_outlier_detection_housing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: all
# language: python
# name: all
# ---
# +
# %pylab inline
import pandas as pd
import pysumma as ps
import xarray as xr
from matplotlib import cm
import seaborn as sns
from pathlib import Path
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
from statsmodels.api import OLS
import numpy as np
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
# %run ../lib/summa_snow_layering.py
sns.set_context('poster')
mpl.style.use('seaborn-bright')
mpl.rcParams['figure.figsize'] = (18, 12)
S_PER_HOUR = 3600
# +
dana = xr.open_dataset('../processed/dana_perturbations.nc').load()
coldeport = xr.open_dataset('../processed/coldeport_perturbations.nc').load()
reynolds = xr.open_dataset('../processed/reynolds_perturbations.nc').load()
dana['scalarSnowTemp'] -= 273.16
reynolds['scalarSnowTemp'] -= 273.16
coldeport['scalarSnowTemp'] -= 273.16
dana['scalarSurfaceTemp'] -= 273.16
reynolds['scalarSurfaceTemp'] -= 273.16
coldeport['scalarSurfaceTemp'] -= 273.16
# -
dana_years = np.arange(dana.time.dt.year.values[0], dana.time.dt.year.values[-1]-1)
cdp_years = np.arange(coldeport.time.dt.year.values[0], coldeport.time.dt.year.values[-1])
reynolds_years = np.arange(reynolds.time.dt.year.values[0], reynolds.time.dt.year.values[-1])
colors_2l = ['orange', 'chocolate', 'saddlebrown']
colors_3l = ['skyblue', 'dodgerblue', 'royalblue']
colors_4l = ['violet', 'deeppink', 'crimson']
colors_all = [ *colors_2l, *colors_3l, *colors_4l, 'lime']
year = lambda x, y: slice('{}/10/01'.format(x), '{}/09/30'.format(y))
# +
year_dict = {'dana': dana_years,
'cdp': cdp_years,
'reynolds': reynolds_years}
ds_dict = {'dana': dana,
'cdp': coldeport,
'reynolds': reynolds}
# +
sites = ['dana', 'cdp', 'reynolds']
temps = ['-2.0K', '+0.0K', '+2.0K', '+4.0K']
layers = ['2L_thin' , '2L_mid', '2L_thick', '3L_thin', '3L_mid', '3L_thick', '4L_thin', '4L_mid', '4L_thick', 'CLM-like']
site_years = [f'{s}_{y}' for s in sites for y in year_dict[s][:-1]]
site_model_years = [f'{s}_{m}_{y}' for s in sites for m in layers for y in year_dict[s][:-1]]
legend_elements = [Line2D([0], [0], marker='o', color='w', markerfacecolor=c, label=m, markersize=15) for m, c in zip(layers, colors_all)]
layers = ['2L_thin' , '2L_mid', '2L_thick', '3L_thin', '3L_mid', '3L_thick', '4L_thin', '4L_mid', '4L_thick', 'CLM']
# -
met_snow = {s: {dt: [] for dt in temps} for s in sites}
for site in ['dana', 'cdp', 'reynolds']:
for dt in temps:
for y in year_dict[site][1:-1]:
tds = ds_dict[site]['scalarSnowfall'].sel(time=year(y, y+1), dt=dt, model='JRDN')
tbs = np.diff(np.argwhere((tds > 0).values).flatten())
tbs = tbs[tbs > 1]
met_snow[site][dt].append(tbs)
# +
mbe_swe = {s: {l: {dt: [] for dt in temps} for l in layers} for s in sites }
mbe_cc = {s: {l: {dt: [] for dt in temps} for l in layers} for s in sites }
mbe_sst = {s: {l: {dt: [] for dt in temps} for l in layers} for s in sites }
met_snow = {s: {l: {dt: [] for dt in temps} for l in layers} for s in sites }
site_year_strings = []
for site in ['dana', 'cdp', 'reynolds']:
for dt in temps[0:2]:
for layer in layers:
for y in year_dict[site][1:-1]:
sim_swe = ds_dict[site]['scalarSWE'].sel(time=year(y, y+1), dt=dt, model=layer)
jrdn_swe = ds_dict[site]['scalarSWE'].sel(time=year(y, y+1), dt=dt, model='JRDN')
sim_sst = ds_dict[site]['scalarSurfaceTemp'].sel(time=year(y, y+1), dt=dt, model=layer)
jrdn_sst = ds_dict[site]['scalarSurfaceTemp'].sel(time=year(y, y+1), dt=dt, model='JRDN')
sim_cc = ds_dict[site]['scalarColdContent'].sel(time=year(y, y+1), dt=dt, model=layer)
jrdn_cc = ds_dict[site]['scalarColdContent'].sel(time=year(y, y+1), dt=dt, model='JRDN')
sim_sf = ds_dict[site]['scalarSnowfall'].sel(time=year(y, y+1), dt=dt, model=layer)
peaksnow = int(sim_swe.argmax().values[()])
firstsnow = np.where(sim_swe.isel(time=slice(0, peaksnow)) != 0)[0][0]
nosnow = np.where(sim_swe.isel(time=slice(peaksnow, None)) == 0)[0]
if len(nosnow):
nosnow = nosnow[0] + peaksnow
else:
nosnow = None
melt_season = slice(peaksnow, nosnow)
accum_season = slice(firstsnow, peaksnow)
snow_season = slice(firstsnow, nosnow)
sim_swe = sim_swe.isel(time=melt_season).values
jrdn_swe = jrdn_swe.isel(time=melt_season).values
sim_cc = sim_cc.isel(time=accum_season).values / 1e6
jrdn_cc = jrdn_cc.isel(time=accum_season).values / 1e6
sim_sst = sim_sst.isel(time=accum_season).values
jrdn_sst = jrdn_sst.isel(time=accum_season).values
mask_cc = np.logical_and(~np.isnan(sim_cc), ~np.isnan(jrdn_cc))
met_snow[site][layer][dt].append(np.sum(sim_sf > 0) / len(sim_sf))
mbe_swe[site][layer][dt].append(mbe(sim_swe, jrdn_swe))
mbe_cc[site][layer][dt].append( mbe(sim_cc[mask_cc], jrdn_cc[mask_cc]))
mbe_sst[site][layer][dt].append(mbe(sim_sst, jrdn_sst))
# -
mpl.rcParams['figure.figsize'] = (12, 12)
fig, axes = plt.subplots(1, 3, figsize=(18, 6), sharey=False, sharex=False)
site_name = ['Dana Meadows', 'Col de Porte', 'Reynolds Creek']
for k, s in enumerate(sites):
for i, layer in enumerate(layers):
for j, dt in enumerate(temps):
met_list = mbe_swe[s][layer][dt]
sf_list = mbe_sst[s][layer][dt]
axes[k].scatter(sf_list, met_list, color=colors_all[i], alpha=0.8)
axes[k].set_title(site_name[k])
axes[k].axhline(0, linestyle='--', color='grey')
axes[k].axvline(0, linestyle='--', color='grey')
axes[0].set_ylabel('')
axes[1].set_xlabel('')
plt.legend(handles=legend_elements, bbox_to_anchor=(1.1, 1.))
mpl.rcParams['figure.figsize'] = (12, 12)
fig, axes = plt.subplots(1, 3, figsize=(18, 6), sharey=False, sharex=False)
site_name = ['Dana Meadows', 'Col de Porte', 'Reynolds Creek']
for k, s in enumerate(sites):
for i, layer in enumerate(layers):
for j, dt in enumerate(temps):
met_list = mbe_swe[s][layer][dt]
sf_list = mbe_cc[s][layer][dt]
axes[k].scatter(sf_list, met_list, color=colors_all[i], alpha=0.8)
axes[k].set_title(site_name[k])
axes[k].axhline(0, linestyle='--', color='grey')
axes[k].axvline(0, linestyle='--', color='grey')
axes[0].set_ylabel(r'$\Delta$SWE from SNTHERM-like (mm)')
axes[1].set_xlabel(r'$\Delta$ Cold Content from SNTHERM-like (MJ)')
plt.legend(handles=legend_elements, bbox_to_anchor=(1.1, 1.))
# +
fig, axes = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(12, 12))
mbe_cc_list = []
mbe_swe_list = []
for k, s in enumerate(sites[0:2]):
for i, l in enumerate(layers):
for j, dt in enumerate(temps):
axes.scatter(mbe_cc[s][l][dt],
mbe_swe[s][l][dt], color=colors_all[i],
alpha=0.7)
mbe_cc_list.append(np.array(mbe_cc[s][l][dt]))
mbe_swe_list.append(np.array(mbe_swe[s][l][dt]))
mbe_cc_list = np.hstack(mbe_cc_list).reshape(-1, 1)
mbe_swe_list = np.hstack(mbe_swe_list).reshape(-1, 1)
ols = OLS(mbe_swe_list, mbe_cc_list, )
res = ols.fit(cov_type='HC1')
res.params[0]
res.rsquared_adj
res.f_pvalue
fig.text(0.5, 0.04, r'$Cold\ content$ (MJ)', ha='center', fontsize=22)
fig.text(0.0, 0.5, r'SWE (mm)', va='center', rotation='vertical', fontsize=22)
plt.legend(handles=legend_elements, bbox_to_anchor=(1., 1.))
# +
fig, axes = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(12, 12))
mbe_cc_list = []
mbe_swe_list = []
for k, s in enumerate(sites[0:2]):
for i, l in enumerate(layers):
for j, dt in enumerate(temps):
axes.scatter(mbe_sst[s][l][dt],
mbe_swe[s][l][dt], color=colors_all[i],
alpha=0.7)
mbe_cc_list.append(np.array(mbe_cc[s][l][dt]))
mbe_swe_list.append(np.array(mbe_swe[s][l][dt]))
mbe_cc_list = np.hstack(mbe_cc_list).reshape(-1, 1)
mbe_swe_list = np.hstack(mbe_swe_list).reshape(-1, 1)
ols = OLS(mbe_swe_list, mbe_cc_list, )
res = ols.fit(cov_type='HC1')
res.params[0]
res.rsquared_adj
res.f_pvalue
fig.text(0.5, 0.04, r'$\Delta SST$ (K)', ha='center', fontsize=22)
fig.text(0.0, 0.5, r'SWE (mm)', va='center', rotation='vertical', fontsize=22)
plt.legend(handles=legend_elements, bbox_to_anchor=(1., 1.))
# -
| gen_plots/scatter_plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Circular habitat uniform
#
# **authors:** <NAME>
#
# Here I simulate genetic data under the coalescent in a circular habitat and explore the fit of different ways to compute expected genetic distances on simulated genotypes.
#
# Lets load the necessary packages and modules to get started
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import sys
sys.path.append("../code/")
from habitat import *
from genotype_simulator import *
# -
# Plot size configuration
sns.set_style('white')
plt.style.use('bmh')
mpl.rcParams['font.size'] = 14
mpl.rcParams['figure.figsize'] = 8, 6
# ## Setup the habitat
# Here we define a circular habitat with 64 demes in total
d = 64
hab = Circle(d)
# Next we need to define a migration surface which is a function on the nodes of the graph that define edge weights. Here i just set all migration rates uniformly
def uniform_migration(self):
"""
Returns:
g: nx.graph
regular lattice with assigned weights
"""
for i,j in self.g.edges():
self.g[i][j]['m'] = 1.
self.m = nx.adjacency_matrix(self.g, weight='m')
self.m = self.m.toarray()
# normalize to sum to 1
self.m = self.m / np.sum(self.m, axis=1, keepdims=True)
# We then assign this method to the habitat object
# +
# assign the migration method
hab.migration_surface = uniform_migration
# create the migration matrix
hab.migration_surface(hab)
# compute graph laplacian
hab.get_graph_lapl()
# -
# Lets visualize the circular habitat with edge widths proportional to the defined edge weights, note we multiply the weights by some constant just for visualization purposes. Additionally, the nodes have a color scheme based on their position on the map, particularly the x position difference is emphasized more than the y the habitat which as not as informative in the circular habitat but useful for grid based simulations. As expected, we see edges weights are all the same
hab.plot_habitat(120, 2, False)
# As expected we see that the migration matrix $\mathbf{M}$ is extremely sparse as only neighboring nodes are connected. Each node only has two neighbors!
hab.plot_migration_matrix()
# ## Simulate genotypes
# Here we simulate genotypes under the coalescent using msprime ... this may take a bit of time. Specifically we simulate 10 haploid individuals per deme in 5000 independent regions of the genome. See `../code/genotype_simulator.py` for default params and implementation of the simulation object.
# +
sim_path = path = "../output/simulations/cir_unif.pkl"
geno = GenotypeSimulator(hab, sim_path, n_rep=5e3)
print(geno.y.shape)
# -
# Here we visualize the site frequency spectrum which seems to have a slight an enrichment of common variants suggesting a divergence from the neutral expectation which is true as we are simulating a structured population!
geno.plot_sfs()
# Lets filter out too rare variants leavings us with fewer SNPs
# +
geno.filter_rare_var()
print(geno.y.shape)
# -
# Lets perform PCA on the genotype matrix and visualize the first two PCs. Note that I center and scale the data matrix before running PCA.
geno.pca()
geno.plot_pca(geno.pcs, geno.pves)
# Wow PCA and strongly recapitulates population structure here!
# ## Expected genetic distances
# We can see the graph laplacian is sparse as $\mathbf{M}$ is sparse. We can think of $\mathbf{L}$ here as a sparse precision matrix
hab.plot_precision_matrix(hab.l)
# We can see that $\mathbf{L}\mathbf{L}^T$ is also sparse but not as sparse. It seems to have an additional off-diagonal band
hab.plot_precision_matrix(hab.l @ hab.l.T)
# Compute observed genetic distances and different models to compute expected genetic distances. Note that I center the data matrix before computing genetic distances but I do not scale.
# +
# lower triangular indicies
tril_idx = np.tril_indices(geno.n, -1)
# observed genetic distance
d_geno = geno.geno_dist()
d_geno_tril = d_geno[tril_idx]
# geographic distance
d_geo = geno.node_to_obs_mat(hab.geo_dist(), geno.n, geno.v)
d_geo_tril = d_geo[tril_idx]
# resistence distance
d_res = geno.node_to_obs_mat(hab.rw_dist(hab.l), geno.n, geno.v)
d_res_tril = d_res[tril_idx]
# random-walk distance
d_rw = geno.node_to_obs_mat(hab.rw_dist(hab.l @ hab.l.T), geno.n, geno.v)
d_rw_tril = d_rw[tril_idx]
# +
# coalescent distance
d_coal = geno.node_to_obs_mat(hab.coal_dist(), geno.n, geno.v)
d_coal_tril = d_coal[tril_idx]
geno.plot_dist(d_coal_tril, d_geno_tril, "Coalescent Distance", "Genetic Distance")
print('coal r2 = {}'.format(np.corrcoef(d_coal_tril, d_geno_tril)[0, 1]))
# +
geno.plot_dist(d_geo_tril, d_geno_tril, "Geographic Distance", "Genetic Distance")
print('geo r2 = {}'.format(np.corrcoef(d_geo_tril, d_geno_tril)[0, 1]))
# +
geno.plot_dist(d_res_tril, d_geno_tril, "Resistence Distance", "Genetic Distance")
print('res r2 = {}'.format(np.corrcoef(d_res_tril, d_geno_tril)[0, 1]))
# -
geno.plot_dist(d_rw_tril, d_geno_tril, "Random Walk Distance", "Genetic Distance")
print('rw r2 = {}'.format(np.corrcoef(d_rw_tril, d_geno_tril)[0, 1]))
# In summary ...
print('coal r2 = {}'.format(np.corrcoef(d_coal_tril, d_geno_tril)[0, 1]))
print('geo r2 = {}'.format(np.corrcoef(d_geo_tril, d_geno_tril)[0, 1]))
print('res r2 = {}'.format(np.corrcoef(d_res_tril, d_geno_tril)[0, 1]))
print('rw r2 = {}'.format(np.corrcoef(d_rw_tril, d_geno_tril)[0, 1]))
# Interestingly, in this case the random-walk distance computed using $\mathbf{L}\mathbf{L}^T$ seems to have a worse fit compared to both geographic distance and resistance distance which should match exactly the coalescent model I simulate under.
#
# *I need to check is how the circular layout in `networkx` assigns its positions and if the euclidean distance metric I'm using is appropriate*
geno.plot_dist(d_res_tril, d_coal_tril, "Resistence Distance", "Coalescent Distance")
# As expected the coalescent distance is exactly the resistance distance!
geno.plot_dist(d_rw_tril, d_coal_tril, "Resistence Distance", "Coalescent Distance")
#
| analysis/circ-unif.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('../')
import numpy as np
import pandas as pd
from source.signal_proc import car_filter, get_windows, extract_attributes
subjects = ['carlos', 'george', 'guilherme', 'harlei', 'heiko', 'joao', 'luis', 'luisa', 'sarah', 'thiago']
freqs = [0, 6, 12, 20, 30, 75]
sampling_f = 256
df = pd.Series(index=subjects, dtype='object')
# structure data
for subject in subjects:
df[subject] = pd.Series(index=freqs, dtype='object')
for freq in freqs:
df[subject][freq] = np.genfromtxt(f'../datasets/carvalho/{subject}_{freq}Hz.csv', delimiter=',', dtype=None).T
# apply Common Average Reference filter
df_car = car_filter(df)
# split data into windows of 3s, each window corresponds to one input
windows = get_windows(df, 3, sampling_f)
windows_car = get_windows(df_car, 3, sampling_f)
# extract attributes
attributes = [extract_attributes(windows[subject], freqs[1:], sampling_f, 3) for subject in subjects]
attributes_car = [extract_attributes(windows_car[subject], freqs[1:], sampling_f, 3) for subject in subjects]
# join results
attributes = pd.concat(attributes, axis=0, sort=False)
attributes_car = pd.concat(attributes_car, axis=0, sort=False)
# save result in csv files
attributes.to_csv(f'../extracted_attributes/carvalho.csv', index=False)
attributes_car.to_csv(f'../extracted_attributes/carvalho_car.csv', index=False)
| notebooks/extract_attributes.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# ## Epidemiological SEIR model
# %% [markdown]
# In compartmental modeling in epidemiology, SEIR (Susceptible, Exposed, Infectious, Recovered) is a simplified set of equations to model how an infectious disease spreads through a population.
# See for example [the Wikipedia article](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology) for more information.
#
# The form we consider here, the model consists of a system of four non-linear differential equations:
#
# \begin{align*}
# \tfrac{\mathrm{d}S}{\mathrm{d}t} &= - \beta IS \tag{% Susceptible} \\
# \tfrac{\mathrm{d}E}{\mathrm{d}t} &= \beta IS - \alpha E \tag{% Exposed} \\
# \tfrac{\mathrm{d}I}{\mathrm{d}t} &= -\gamma I + \alpha E \tag{% Infectious} \\
# \tfrac{\mathrm{d}R}{\mathrm{d}t} &= \gamma I \tag{% Recovered}
# \end{align*}
#
# where $S(t)$, $E(t)$, $I(t)$ and $R(t)$ are stochastic processes varying in time.
# The model has three model parameters: $\alpha$, $\beta$ and $\gamma$, which determine how fast the disease spreads through the population and are different for every infectious disease, so they have to be estimated.
#
# We can implement the relationship of these ordinary equations in terms of Python code:
# %%
def ode_seir(variables, coordinates, parameters):
var_s, var_e, var_i, var_r = variables
alpha, beta, gamma = parameters
delta_s = -beta*var_i*var_s
delta_e = beta*var_i*var_s-alpha*var_e
delta_i = -gamma*var_i+alpha*var_e
delta_r = gamma*var_i
return delta_s, delta_e, delta_i, delta_r
# %% [markdown]
# ### Initial condition
#
# The initial condition $(S(0), E(0), I(0), R(0)) = (1-\delta, \delta, 0, 0)$ for some small $\delta$. Note that the state $(1,0,0,0)$ implies that nobody has been exposed, so we must assume $\delta>0$ to let the model to actually model spread of the disease. Or in terms of code:
# %%
def initial_condition(delta):
return 1-delta, delta, 0, 0
# %% [markdown]
# ### Model parameters
#
# The model parameters $\alpha$, $\beta$ and $\gamma$ are assumed to have a value, but are in all practical applications unknown. Because of this, it make more sense to assume that the parameters are inherently uncertain and can only be described through a probability distribution. For this example, we will assume that all parameters are uniformly distributed with
#
# \begin{align*}
# \alpha &\sim \mathcal{U}(0.15, 0.25) & \beta &\sim \mathcal{U}(0.95, 1.05) & \gamma &\sim \mathcal{U}(0.45, 0.55)
# \end{align*}
#
# Or using `chaospy`:
# %%
import chaospy
alpha = chaospy.Uniform(0.15, 0.25)
beta = chaospy.Uniform(0.95, 1.05)
gamma = chaospy.Uniform(0.45, 0.55)
distribution = chaospy.J(alpha, beta, gamma)
# %% [markdown]
# ### Deterministic model
#
# To have a base line of how this model works, we will first assume the uncertain parameters have some fixed value.
# For example the expected value of the uncertain parameters:
# %%
parameters = chaospy.E(distribution)
parameters
# %% [markdown]
# We then solve the SEIR model on the time interval $[0, 200]$ using $1000$ steps using `scipy.integrate`:
# %%
import numpy
from scipy.integrate import odeint
time_span = numpy.linspace(0, 200, 1000)
responses = odeint(ode_seir, initial_condition(delta=1e-4), time_span, args=(parameters,))
# %% [markdown]
# We then use `matplotlib` to plot the four processes:
# %%
from matplotlib import pyplot
labels = ['Susceptible', 'Exposed', 'Infectious', 'Recovered']
for response, label in zip(responses.T, labels):
pyplot.plot(time_span, response, label=label)
pyplot.title('SEIR model')
pyplot.xlabel('Time (days)')
pyplot.ylabel('% of population')
pyplot.legend()
# %% [markdown]
# ### Stochastic model
#
# We now have our deterministic base line model, and can observe that it works.
# Let us change the assumption to assume that the parameters are random, and model it using polynomial chaos expansion (PCE).
#
# We start by generating a PCE bases:
# %%
polynomial_order = 3
polynomial_expansion = chaospy.generate_expansion(
polynomial_order, distribution)
polynomial_expansion[:5].round(5)
# %% [markdown]
# Generate our quadrature nodes and weights:
# %%
quadrature_order = 8
abscissas, weights = chaospy.generate_quadrature(
quadrature_order, distribution, rule="gaussian")
# %% [markdown]
# We wrap the deterministic model solution into a function of the model parameters:
# %%
def model_solver(parameters, delta=1e-4):
return odeint(ode_seir, initial_condition(delta), time_span, args=(parameters,))
# %% [markdown]
# Now we're going to evaluate the model taking parameters from the quadrature. To reduce the computational load, we use multiprocessing to increase the computational speed.
# %%
from multiprocessing import Pool
with Pool(4) as pool:
evaluations = pool.map(model_solver, abscissas.T)
# %% [markdown]
# And finally we're calculating the PCE Fourier coefficients:
# %%
model_approx = chaospy.fit_quadrature(
polynomial_expansion, abscissas, weights, evaluations)
# %% [markdown]
# With a model approximation we can calculate the mean and the standard deviations:
# %%
expected = chaospy.E(model_approx, distribution)
std = chaospy.Std(model_approx, distribution)
# %% [markdown]
# Finally we can plot the data with uncertainty intervals:
# %%
for mu, sigma, label in zip(expected.T, std.T, labels):
pyplot.fill_between(
time_span, mu-sigma, mu+sigma, alpha=0.3)
pyplot.plot(time_span, mu, label=label)
pyplot.xlabel("Time (days)")
pyplot.ylabel("% of population")
pyplot.title('Stochastic SEIR model')
pyplot.legend()
| docs/user_guide/advanced_topics/seir_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dl_py3
# language: python
# name: dl_py3
# ---
# +
# %matplotlib inline
import os
import ipywidgets as ipy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from skimage.color import label2rgb
DATA_DIR = '/public/dsb_2018_data'
PREDICTION_TRAIN = 'predictions/unet/train/outputs'
PREDICTION_VALID = 'predictions/unet/valid/outputs'
PREDICTION_TEST = 'predictions/unet/test/outputs'
SUBMISSION_PATH = os.path.join('/output/submission.csv')
# -
meta = pd.read_csv(os.path.join(DATA_DIR,'stage1_metadata.csv'))
meta_train = meta[meta['is_train']==1 & (~meta['vgg_features_clusters'].isin([0,1]))]
meta_valid = meta[meta['is_train']==1 & (meta['vgg_features_clusters'].isin([0,1]))]
meta_test = meta[meta['is_train']==0]
# +
def plot_with_labels(mask, contour, postprocessed, label, idx):
plt.figure(figsize=(16,12))
plt.subplot(141)
plt.imshow(mask[idx])
plt.subplot(142)
plt.imshow(contour[idx])
plt.subplot(143)
plt.imshow(label2rgb(postprocessed[idx]))
plt.subplot(144)
plt.imshow(label2rgb(label[idx]))
plt.show()
def plot_predictions(mask, contour, postprocessed, idx):
plt.figure(figsize=(16,12))
plt.subplot(131)
plt.imshow(mask[idx])
plt.subplot(132)
plt.imshow(contour[idx])
plt.subplot(133)
plt.imshow(label2rgb(postprocessed[idx]))
plt.show()
# -
# # Predictions and labels on train
labels_train = joblib.load(os.path.join(DATA_DIR,'ground_truth','train','labels.pkl'))
masks_train = joblib.load(os.path.join(DATA_DIR,PREDICTION_TRAIN,'unet_multitask'))['mask_prediction']
contours_train = joblib.load(os.path.join(DATA_DIR,PREDICTION_TRAIN,'unet_multitask'))['contour_prediction']
postprocessed_train = joblib.load(os.path.join(DATA_DIR,PREDICTION_TRAIN,'binary_fill'))['filled_images']
ipy.interact(plot_with_labels, mask = ipy.fixed(masks_train),
contour = ipy.fixed(contours_train),
postprocessed = ipy.fixed(postprocessed_train),
label = ipy.fixed(labels_train),
idx = ipy.IntSlider(min=0, max=50, value=0, step=1))
# # Predictions and labels on valid
labels_valid = joblib.load(os.path.join(DATA_DIR,'ground_truth','valid','labels.pkl'))
masks_valid = joblib.load(os.path.join(DATA_DIR,PREDICTION_VALID,'unet_multitask'))['mask_prediction']
contours_valid = joblib.load(os.path.join(DATA_DIR,PREDICTION_VALID,'unet_multitask'))['contour_prediction']
postprocessed_valid = joblib.load(os.path.join(DATA_DIR,PREDICTION_VALID,'binary_fill'))['filled_images']
ipy.interact(plot_with_labels, mask = ipy.fixed(masks_valid),
contour = ipy.fixed(contours_valid),
postprocessed = ipy.fixed(postprocessed_valid),
label = ipy.fixed(labels_valid),
idx = ipy.IntSlider(min=0, max=50, value=0, step=1))
# # Predictions and submission on test
masks_test = joblib.load(os.path.join(DATA_DIR,PREDICTION_TEST,'unet_multitask'))['mask_prediction']
contours_test = joblib.load(os.path.join(DATA_DIR,PREDICTION_TEST,'unet_multitask'))['contour_prediction']
postprocessed_test = joblib.load(os.path.join(DATA_DIR,PREDICTION_TEST,'binary_fill'))['filled_images']
ipy.interact(plot_predictions, mask = ipy.fixed(masks_test),
contour = ipy.fixed(contours_test),
postprocessed = ipy.fixed(postprocessed_test),
idx = ipy.IntSlider(min=0, max=50, value=0, step=1))
# +
def decompose(labeled):
nr_true = labeled.max()
masks = []
for i in range(1, nr_true + 1):
msk = labeled.copy()
msk[msk != i] = 0.
msk[msk == i] = 255.
masks.append(msk)
if not masks:
return [labeled]
else:
return masks
def run_length_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b > prev + 1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def generate_submission(predictions, meta):
image_ids, encodings = [], []
for image_id, prediction in zip(meta['ImageId'].values, predictions):
for mask in decompose(prediction):
image_ids.append(image_id)
encodings.append(' '.join(str(rle) for rle in run_length_encoding(mask > 128.)))
submission = pd.DataFrame({'ImageId': image_ids, 'EncodedPixels': encodings})
return submission
# -
submission = generate_submission(postprocessed_test, meta_test)
submission.head()
submission.to_csv(SUBMISSION_PATH, index=None)
| devbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # RNN Step Forward
# +
from code_base.rnn_layers import rnn_step_forward
from code_base.layer_utils import rel_error
import numpy as np
N, D, H = 3, 10, 4
x = np.linspace(-0.4, 0.7, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.2, 0.5, num=N*H).reshape(N, H)
Wx = np.linspace(-0.1, 0.9, num=D*H).reshape(D, H)
Wh = np.linspace(-0.3, 0.7, num=H*H).reshape(H, H)
b = np.linspace(-0.2, 0.4, num=H)
next_h, _ = rnn_step_forward(x, prev_h, Wx, Wh, b)
expected_next_h = np.asarray([
[-0.58172089, -0.50182032, -0.41232771, -0.31410098],
[ 0.66854692, 0.79562378, 0.87755553, 0.92795967],
[ 0.97934501, 0.99144213, 0.99646691, 0.99854353]])
print('next_h error: ', rel_error(expected_next_h, next_h))
# +
from code_base.rnn_layers import rnn_step_forward
import numpy as np
x_shape = (3, 874)
Wx_shape = (874, 128)
h_shape = (3, 128)
Wh_shape = (128, 128)
b_shape = (128,)
x = np.loadtxt('./input_files/x.csv', delimiter=',')
x = x.reshape(x_shape)
Wx = np.loadtxt('./input_files/Wx.csv', delimiter=',')
Wx = Wx.reshape(Wx_shape)
prev_h = np.loadtxt('./input_files/prev_h.csv', delimiter=',')
prev_h = prev_h.reshape(h_shape)
Wh = np.loadtxt('./input_files/Wh.csv', delimiter=',')
Wh = Wh.reshape(Wh_shape)
b = np.loadtxt('./input_files/b.csv', delimiter=',')
out, _ = rnn_step_forward(x, prev_h, Wx, Wh, b)
np.savetxt('./output_files/rnn_step_forward_out.csv', out.ravel(), delimiter=',')
# -
# # RNN Step Backward
# +
from code_base.rnn_layers import rnn_step_forward, rnn_step_backward
from code_base.gradient_check import *
from code_base.layer_utils import rel_error
import numpy as np
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dnext_h = np.random.randn(*out.shape)
fx = lambda x: rnn_step_forward(x, h, Wx, Wh, b)[0]
fh = lambda prev_h: rnn_step_forward(x, h, Wx, Wh, b)[0]
fWx = lambda Wx: rnn_step_forward(x, h, Wx, Wh, b)[0]
fWh = lambda Wh: rnn_step_forward(x, h, Wx, Wh, b)[0]
fb = lambda b: rnn_step_forward(x, h, Wx, Wh, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dnext_h)
dprev_h_num = eval_numerical_gradient_array(fh, h, dnext_h)
dWx_num = eval_numerical_gradient_array(fWx, Wx, dnext_h)
dWh_num = eval_numerical_gradient_array(fWh, Wh, dnext_h)
db_num = eval_numerical_gradient_array(fb, b, dnext_h)
dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dprev_h error: ', rel_error(dprev_h_num, dprev_h))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
# +
from code_base.rnn_layers import rnn_step_forward, rnn_step_backward
import numpy as np
x_shape = (3, 874)
Wx_shape = (874, 128)
h_shape = (3, 128)
Wh_shape = (128, 128)
b_shape = (128,)
x = np.loadtxt('./input_files/x.csv', delimiter=',')
x = x.reshape(x_shape)
Wx = np.loadtxt('./input_files/Wx.csv', delimiter=',')
Wx = Wx.reshape(Wx_shape)
prev_h = np.loadtxt('./input_files/prev_h.csv', delimiter=',')
prev_h = prev_h.reshape(h_shape)
Wh = np.loadtxt('./input_files/Wh.csv', delimiter=',')
Wh = Wh.reshape(Wh_shape)
b = np.loadtxt('./input_files/b.csv', delimiter=',')
out, cache = rnn_step_forward(x, prev_h, Wx, Wh, b)
dhout = np.loadtxt('./input_files/dho.csv', delimiter=',')
dx, dh, dWx, dWh, db = rnn_step_backward(dhout, cache)
np.savetxt('./output_files/rnn_step_backward_out_dx.csv', dx.ravel(), delimiter=',')
np.savetxt('./output_files/rnn_step_backward_out_dh.csv', dh.ravel(), delimiter=',')
np.savetxt('./output_files/rnn_step_backward_out_dwx.csv', dWx.ravel(), delimiter=',')
np.savetxt('./output_files/rnn_step_backward_out_dwh.csv', dWh.ravel(), delimiter=',')
np.savetxt('./output_files/rnn_step_backward_out_db.csv', db.ravel(), delimiter=',')
# -
# # RNN Forward
# +
from code_base.rnn_layers import rnn_forward
from code_base.layer_utils import *
import numpy as np
N, T, D, H = 2, 3, 4, 5
x = np.linspace(-0.1, 0.3, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.3, 0.1, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.4, num=D*H).reshape(D, H)
Wh = np.linspace(-0.4, 0.1, num=H*H).reshape(H, H)
b = np.linspace(-0.7, 0.1, num=H)
h, _ = rnn_forward(x, h0, Wx, Wh, b)
expected_h = np.asarray([
[[-0.42070749, -0.27279261, -0.11074945, 0.05740409, 0.22236251],
[-0.39525808, -0.22554661, -0.0409454, 0.14649412, 0.32397316],
[-0.42305111, -0.24223728, -0.04287027, 0.15997045, 0.35014525],],
[[-0.55857474, -0.39065825, -0.19198182, 0.02378408, 0.23735671],
[-0.27150199, -0.07088804, 0.13562939, 0.33099728, 0.50158768],
[-0.51014825, -0.30524429, -0.06755202, 0.17806392, 0.40333043]]])
print('h error: ', rel_error(expected_h, h))
# +
from code_base.rnn_layers import rnn_forward
import numpy as np
x_all_shape = (3, 5, 874)
Wx_shape = (874, 128)
h_shape = (3, 128)
Wh_shape = (128, 128)
b_shape = (128,)
x_all = np.loadtxt('./input_files/x_all.csv', delimiter=',')
x_all = x_all.reshape(x_all_shape)
Wx = np.loadtxt('./input_files/Wx.csv', delimiter=',')
Wx = Wx.reshape(Wx_shape)
h0 = np.loadtxt('./input_files/prev_h.csv', delimiter=',')
h0 = prev_h.reshape(h_shape)
Wh = np.loadtxt('./input_files/Wh.csv', delimiter=',')
Wh = Wh.reshape(Wh_shape)
b = np.loadtxt('./input_files/b.csv', delimiter=',')
out, _ = rnn_forward(x_all, h0, Wx, Wh, b)
np.savetxt('./output_files/rnn_forward_out.csv', out.ravel(), delimiter=',')
# -
# # RNN Backward
# +
from code_base.rnn_layers import rnn_forward, rnn_backward
from code_base.gradient_check import *
from code_base.layer_utils import *
import numpy as np
N, D, T, H = 2, 3, 10, 5
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_forward(x, h0, Wx, Wh, b)
dout = np.random.randn(*out.shape)
dx, dh0, dWx, dWh, db = rnn_backward(dout, cache)
fx = lambda x: rnn_forward(x, h0, Wx, Wh, b)[0]
fh0 = lambda h0: rnn_forward(x, h0, Wx, Wh, b)[0]
fWx = lambda Wx: rnn_forward(x, h0, Wx, Wh, b)[0]
fWh = lambda Wh: rnn_forward(x, h0, Wx, Wh, b)[0]
fb = lambda b: rnn_forward(x, h0, Wx, Wh, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
dh0_num = eval_numerical_gradient_array(fh0, h0, dout)
dWx_num = eval_numerical_gradient_array(fWx, Wx, dout)
dWh_num = eval_numerical_gradient_array(fWh, Wh, dout)
db_num = eval_numerical_gradient_array(fb, b, dout)
print('dx error: ', rel_error(dx_num, dx))
print('dh0 error: ', rel_error(dh0_num, dh0))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
# +
from code_base.rnn_layers import rnn_forward, rnn_backward
import numpy as np
x_all_shape = (3, 5, 874)
Wx_shape = (874, 128)
h_shape = (3, 128)
Wh_shape = (128, 128)
b_shape = (128,)
dh_all_shape = (3, 5, 128)
x_all = np.loadtxt('./input_files/x_all.csv', delimiter=',')
x_all = x_all.reshape(x_all_shape)
Wx = np.loadtxt('./input_files/Wx.csv', delimiter=',')
Wx = Wx.reshape(Wx_shape)
h0 = np.loadtxt('./input_files/prev_h.csv', delimiter=',')
h0 = h0.reshape(h_shape)
Wh = np.loadtxt('./input_files/Wh.csv', delimiter=',')
Wh = Wh.reshape(Wh_shape)
b = np.loadtxt('./input_files/b.csv', delimiter=',')
out, cache = rnn_forward(x_all, h0, Wx, Wh, b)
dhout = np.loadtxt('./input_files/dho_all.csv', delimiter=',')
dhout = dhout.reshape(dh_all_shape)
dx_all, dh0, dWx, dWh, db = rnn_backward(dhout, cache)
np.savetxt('./output_files/rnn_backward_out_dx.csv', dx_all.ravel(), delimiter=',')
np.savetxt('./output_files/rnn_backward_out_dh0.csv', dh0.ravel(), delimiter=',')
np.savetxt('./output_files/rnn_backward_out_dwx.csv', dWx.ravel(), delimiter=',')
np.savetxt('./output_files/rnn_backward_out_dwh.csv', dWh.ravel(), delimiter=',')
np.savetxt('./output_files/rnn_backward_out_db.csv', db.ravel(), delimiter=',')
# -
# # (Optional) Temporal Bi-directional Concatenation Forward
# +
from code_base.rnn_layers import bidirectional_rnn_concatenate_forward
from code_base.layer_utils import *
import numpy as np
N, T, H = 2, 4, 3
h = np.linspace(-0.5, 0, num=N*T*H).reshape(N, T, H)
hr = np.linspace(0, 0.5, num=N*T*H).reshape(N, T, H)
mask = np.ones((N,T))
mask[0][3] = 0 # length of s1 is 3
mask[1][2] = mask[1][3] = 0 # length of s2 is 2
ho, _ = bidirectional_rnn_concatenate_forward(h, hr, mask)
expected_ho = np.array([[
[-0.5, -0.47826087, -0.45652174, 0.13043478, 0.15217391, 0.17391304],
[-0.43478261, -0.41304348, -0.39130435, 0.06521739, 0.08695652, 0.10869565],
[-0.36956522, -0.34782609, -0.32608696, 0., 0.02173913, 0.04347826],
[0., 0., 0., 0., 0., 0.]],
[[-0.23913043, -0.2173913 , -0.19565217, 0.32608696, 0.34782609, 0.36956522],
[-0.17391304, -0.15217391, -0.13043478, 0.26086957, 0.2826087, 0.30434783],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.]]])
print('ho error: ', rel_error(expected_ho, ho, mask))
# +
from code_base.rnn_layers import bidirectional_rnn_concatenate_forward
from code_base.gradient_check import *
import numpy as np
h_shape = (3, 5, 128)
mask_shape = (3, 5)
h = np.loadtxt('./input_files/h_all.csv', delimiter=',')
h = h.reshape(h_shape)
hr = np.loadtxt('./input_files/h_all_r.csv', delimiter=',')
hr = hr.reshape(h_shape)
mask = np.loadtxt('./input_files/mask.csv', delimiter=',')
mask = mask.reshape(mask_shape)
hout, _ = bidirectional_rnn_concatenate_forward(h, hr, mask)
np.savetxt('./output_files/bidirectional_rnn_concatenate_forward_out.csv', hout.ravel(), delimiter=',')
# -
# # (Optional) Temporal Bi-directional Concatenation Backward
# +
from code_base.rnn_layers import bidirectional_rnn_concatenate_forward, bidirectional_rnn_concatenate_backward
from code_base.layer_utils import *
from code_base.gradient_check import *
import numpy as np
N, T, H = 2, 4, 3
h = np.linspace(-0.5, 0, num=N*T*H).reshape(N, T, H)
hr = np.linspace(0, 0.5, num=N*T*H).reshape(N, T, H)
mask = np.ones((N,T))
mask[0][3] = 0 # length of s1 is 3
mask[1][2] = mask[1][3] = 0 # length of s2 is 2
ho, cache = bidirectional_rnn_concatenate_forward(h, hr, mask)
dho = np.linspace(0., 0.5, num=N*T*2*H).reshape(N, T, 2*H)
dh, dhr = bidirectional_rnn_concatenate_backward(dho, cache)
fh = lambda h: bidirectional_rnn_concatenate_forward(h, hr, mask)[0]
fhr = lambda hr: bidirectional_rnn_concatenate_forward(h, hr, mask)[0]
dh_num = eval_numerical_gradient_array(fh, h, dho)
dhr_num = eval_numerical_gradient_array(fhr, hr, dho)
print('dh error: ', rel_error(dh_num, dh, mask))
print('dhr error: ', rel_error(dhr_num, dhr, mask))
# +
from code_base.rnn_layers import bidirectional_rnn_concatenate_forward, bidirectional_rnn_concatenate_backward
import numpy as np
h_shape = (3, 5, 128)
mask_shape = (3, 5)
h = np.loadtxt('./input_files/h_all.csv', delimiter=',')
h = h.reshape(h_shape)
hr = np.loadtxt('./input_files/h_all_r.csv', delimiter=',')
hr = hr.reshape(h_shape)
mask = np.loadtxt('./input_files/mask.csv', delimiter=',')
mask = mask.reshape(mask_shape)
hout, cache = bidirectional_rnn_concatenate_forward(h, hr, mask)
dhout = np.loadtxt('./input_files/dhc_all.csv', delimiter=',')
dhout = dhout.reshape(3, 5, 256)
dh, dhr = bidirectional_rnn_concatenate_backward(dhout, cache)
np.savetxt('./output_files/bidirectional_rnn_concatenate_backward_out_h.csv', dh.ravel(), delimiter=',')
np.savetxt('./output_files/bidirectional_rnn_concatenate_backward_out_hr.csv', dhr.ravel(), delimiter=',')
# -
# # RNN for Sentiment Analysis - Forward Pass
# +
from code_base.classifiers.rnn import *
# If you do brnn, please import from code_base.classifiers.brnn instead
import numpy as np
N, H, A, O = 2, 6, 5, 2
word_to_idx = { 'awesome': 0, 'reading':1, 'pretty': 2, 'dog': 3, 'movie': 4,
'liked': 5, 'most': 6, 'admired': 7, 'bad': 8, 'fucking': 9}
V = len(word_to_idx)
T = 4
model = SentimentAnalysisRNN(word_to_idx,
hidden_dim=H,
fc_dim=A,
output_dim=O,
cell_type='rnn',
dtype=np.float64)
# Set all model parameters to fixed values
for k, v in model.params.items():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
labels = np.array([1, 0], dtype=np.int32)
wordvecs = np.zeros((N, T, V))
wordvecs[0, 0, 0] = wordvecs[0, 1, 5] = wordvecs[0, 2, 2] = wordvecs[0, 3, 7] = 1
wordvecs[1, 0, 4] = wordvecs[1, 1, 8] = wordvecs[1, 2, 5] = 1
mask = np.ones((N, T))
mask[1, 3] = 0
loss, grads = model.loss(wordvecs, labels, mask)
expected_loss = 2.99619226823
# For brnn, the expected_loss should be 2.9577205234
print('loss: ', loss)
print('expected loss: ', expected_loss)
print('difference: ', abs(loss - expected_loss))
# -
# # RNN for Sentiment Analysis - Backward Pass
# +
from code_base.classifiers.rnn import *
# If you do brnn, please import from code_base.classifiers.brnn instead
from code_base.gradient_check import *
from code_base.layer_utils import rel_error
import numpy as np
N, T, H, A, O = 2, 4, 6, 5, 2
word_to_idx = {'awesome': 0, 'reading':1, 'pretty': 2, 'dog': 3, 'movie': 4,
'liked': 5, 'most': 6, 'admired': 7, 'bad': 8, 'fucking': 9}
V = len(word_to_idx)
labels = np.array([1, 0], dtype=np.int32)
wordvecs = np.zeros((N, T, V))
wordvecs[0, 0, 0] = wordvecs[0, 1, 5] = wordvecs[0, 2, 2] = wordvecs[0, 3, 7] = 1
wordvecs[1, 0, 4] = wordvecs[1, 1, 8] = wordvecs[1, 2, 5] = 1
mask = np.ones((N, T))
mask[1, 3] = 0
model = SentimentAnalysisRNN(word_to_idx,
hidden_dim=H,
fc_dim=A,
output_dim=O,
cell_type='rnn',
dtype=np.float64,
)
loss, grads = model.loss(wordvecs, labels, mask)
for param_name in sorted(grads):
f = lambda _: model.loss(wordvecs, labels, mask)[0]
param_grad_num = eval_numerical_gradient(f, model.params[param_name],
verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print('%s relative error: %e' % (param_name, e))
# -
# # Training/Inference on Small Data
# +
from code_base.sentiment_analysis_solver import *
from code_base.classifiers.rnn import *
# If you do brnn, please import from code_base.classifiers.brnn instead
from code_base.data_utils import *
import matplotlib.pyplot as plt
import numpy as np
download_corpus()
small_data = load_data('code_base/datasets/train.csv', sample=True)
small_rnn_model = SentimentAnalysisRNN(
cell_type='rnn',
word_to_idx=load_dictionary('code_base/datasets/dictionary.csv')
)
small_rnn_solver = SentimentAnalysisSolver(small_rnn_model,
small_data,
update_rule='sgd',
num_epochs=100,
batch_size=100,
optim_config={
'learning_rate': 5e-3,
},
lr_decay=1.0,
verbose=True,
print_every=10,
)
small_rnn_solver.train()
# we will use the same batch of training data for inference
# this is just to let you know the procedure of inference
preds = small_rnn_solver.test(split='train')
np.savetxt('./output_files/rnn_prediction_prob.csv', preds.ravel(), delimiter=',')
# If you do brnn, please save result to ./output_files/brnn_prediction_prob.csv
# Plot the training losses
plt.plot(small_rnn_solver.loss_history)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.show()
# -
| assignment3/assignment3-RNN/SentimentAnalysis.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Scala
// language: scala
// name: scala
// ---
// <a name="top"></a><img src="images/chisel_1024.png" alt="Chisel logo" style="width:480px;" />
// # Module 2.1: Your First Chisel Module
// **Prev: [Introduction to Scala](1_intro_to_scala.ipynb)**<br>
// **Next: [Combinational Logic](2.2_comb_logic.ipynb)**
//
// ## Motivation
// Now that you are familiar with Scala, let's start carving out some hardware! Chisel stands for **C**onstructing **H**ardware **I**n a **S**cala **E**mbedded **L**anguage. That means it is a DSL in Scala, allowing you to take advantage of both Scala and Chisel programming within the same code. It is important to understand which code is "Scala" and which code is "Chisel", but we will discuss that more later. For now, think of Chisel and the code in Module 2 as a better way to write Verilog. This module throws an entire Chisel `Module` and tester at you. Just get the gist of it for now. You'll see plenty more examples later.
// ## Setup
// The following cell downloads the dependencies needed for Chisel. You will see it in all future notebooks. **Run this cell now**.
val path = System.getProperty("user.dir") + "/source/load-ivy.sc"
interp.load.module(ammonite.ops.Path(java.nio.file.FileSystems.getDefault().getPath(path)))
// As mentioned in the last module, these statements are needed to import Chisel. **Run this cell now** before running any future code blocks.
import chisel3._
import chisel3.util._
import chisel3.iotesters.{ChiselFlatSpec, Driver, PeekPokeTester}
// ---
// # Your First Module
// This section will present your first hardware module, a test case, and how to run it. It will contain many things that you will not understand, and that is OK. We want you to take away the broad strokes, so you can continually return to this complete and working example to reinforce what you've learned.
//
// <span style="color:blue">**Example: A Module**</span><br>
// Like Verilog, we can declare module definitions in Chisel. The following example is a Chisel `Module`, `Passthrough`, that has one 4-bit input, `in`, and one 4-bit output, `out`. The module combinationally connects `in` and `out`, so `in` drives `out`.
// Chisel Code: Declare a new module definition
class Passthrough extends Module {
val io = IO(new Bundle {
val in = Input(UInt(4.W))
val out = Output(UInt(4.W))
})
io.out := io.in
}
// There's a lot here! The following explains how to think of each line in terms of the hardware we are describing.
//
// ```scala
// class Passthrough extends Module {
// ```
// We declare a new module called `Passthrough`. `Module` is a built-in Chisel class that all hardware modules must extend.
//
// ```scala
// val io = IO(...)
// ```
// We declare all our input and output ports in a special `io` `val`. It must be called `io` and be an `IO` object or instance, which requires something of the form `IO(_instantiated_bundle_)`.
//
// ```scala
// new Bundle {
// val in = Input(...)
// val out = Output(...)
// }
// ```
// We declare a new hardware struct type (Bundle) that contains some named signals `in` and `out` with directions Input and Output, respectively.
//
// ```scala
// UInt(4.W)
// ```
// We declare a signal's hardware type. In this case, it is an unsigned integer of width 4.
//
// ```scala
// io.out := io.in
// ```
// We connect our input port to our output port, such that `io.in` *drives* `io.out`. Note that the `:=` operator is a ***Chisel*** operator that indicates that the right-hand signal drives the left-hand signal. It is a directioned operator.
//
// The neat thing about hardware construction languages (HCLs) is that we can use the underlying programming language as a scripting language. For example, after declaring our Chisel module, we then use Scala to call the Chisel compiler to translate Chisel `Passthrough` into Verilog `Passthrough`. This process is called ***elaboration***.
// Scala Code: Elaborate our Chisel design by translating it to Verilog
// Don't worry about understanding this code; it is very complicated Scala
println(getVerilog(new Passthrough))
// Note that the Name of our module is `cmd<#>WrapperHelperPassthrough`, which is an artifact of running this tutorial in Jupyter. In your normal code, its name should just be `Passthrough`. This is an important lesson though - although Chisel does its best to preserve the names of your modules and other hardware components, sometimes it fails to do so.
//
// <span style="color:blue">**Example: A Module Generator**</span><br>
// If we apply what we learned about Scala to this example, we can see that a Chisel module is implemented as a Scala class. Just like any other Scala class, we could make a Chisel module take some construction parameters. In this case, we make a new class `PassthroughGenerator` which will accept an integer `width` that dictates the widths of its input and output ports:
// +
// Chisel Code, but pass in a parameter to set widths of ports
class PassthroughGenerator(width: Int) extends Module {
val io = IO(new Bundle {
val in = Input(UInt(width.W))
val out = Output(UInt(width.W))
})
io.out := io.in
}
// Let's now generate modules with different widths
println(getVerilog(new PassthroughGenerator(10)))
println(getVerilog(new PassthroughGenerator(20)))
// -
// Notice that the generated Verilog uses different bitwidths for the input/output depending on the value assigned to the `width` parameter. Let's dig into how this works. Because Chisel Modules are normal Scala classes, we can use the power of Scala's class constructors to parameterize the elaboration of our design.
//
// You may notice that this parameterization is enabled by *Scala*, not *Chisel*; Chisel has no extra APIs for parameterization, but a designer can simply leverage Scala features to parameterize his/her designs.
//
// Because `PassthroughGenerator` no longer describes a single Module, but instead describes a family of modules parameterized by `width`, we refer to this `Passthrough` as a ***generator***.
// ---
// # Testing Your Hardware
//
// No hardware module or generator should be complete without a tester. Chisel has built-in test features that you will explore throughout this bootcamp. The following example is a Chisel test harness that passes values to an instance of `Passthrough`'s input port `in`, and checks that the same value is seen on the output port `out`.
//
// <span style="color:blue">**Example: A Tester**</span><br>
// There is a ***LOT*** of crazy Scala going on here, so no need to understand anything except the `poke` and `expect` commands. You can think of the rest of the code as simply boilerplate to write these simple tests.
// Scala Code: Calling Driver to instantiate Passthrough + PeekPokeTester and execute the test.
// Don't worry about understanding this code; it is very complicated Scala.
// Think of it more as boilerplate to run a Chisel test.
val testResult = Driver(() => new Passthrough()) {
c => new PeekPokeTester(c) {
poke(c.io.in, 0) // Set our input to value 0
expect(c.io.out, 0) // Assert that the output correctly has 0
poke(c.io.in, 1) // Set our input to value 1
expect(c.io.out, 1) // Assert that the output correctly has 1
poke(c.io.in, 2) // Set our input to value 2
expect(c.io.out, 2) // Assert that the output correctly has 2
}
}
assert(testResult) // Scala Code: if testResult == false, will throw an error
println("SUCCESS!!") // Scala Code: if we get here, our tests passed!
// What's going on? The test accepts a `Passthrough` module, assigns values to the module's inputs, and checks its outputs. To set an input, we call `poke`. To check an output, we call `expect`. If we don't want to compare the output to an expected value (no assertion), we can `peek` the output instead.
//
// If all `expect` statements are true, then our boilerplate code will return true (see `testResult`).
// <span style="color:red">**Exercise: Writing Your Own Testers**</span><br>
// Write and execute two tests, one that tests `PassthroughGenerator` for a width of 10 and a second that tests `PassthroughGenerator` for a width of 20. Check at least two values for each: zero and the maximum value supported by the specified width. Note that the triple question mark has a special meaning in Scala. You may see it frequently in these bootcamp exercises. Running code with the `???` will produce the `NotImplementedError`. Replace `???` with your testers.
// +
val test10result = Driver (() => new PassthroughGenerator(width = 10)) {
c => new PeekPokeTester(c) {
poke(c.io.in, 0) // Checking zero
expect(c.io.out, 0)
poke(c.io.in, 1023) // Max value of a 10-bit int, 2^10-1 = 1023
expect(c.io.out, 1023)
}
}
val test20result = Driver (() => new PassthroughGenerator(width = 20)) {
c => new PeekPokeTester(c) {
poke(c.io.in, 0) // Checking zero
expect(c.io.out, 0)
poke(c.io.in, 1048575) // Max value of a 20-bit int, 2^20-1 = 1048575
expect(c.io.out, 1048575)
}
}
assert((test10result == true) && (test20result == true))
println("SUCCESS!!") // Scala Code: if we get here, our tests passed!
// -
// <div id="container"><section id="accordion"><div>
// <input type="checkbox" id="check-1" />
// <label for="check-1"><strong>Solution</strong> (click to toggle displaying it)</label>
// <article>
// <pre style="background-color:#f7f7f7">
// val test10result = Driver(() => new PassthroughGenerator(10)) {
// c => new PeekPokeTester(c) {
// poke(c.io.in, 0)
// expect(c.io.out, 0)
// poke(c.io.in, 1023)
// expect(c.io.out, 1023)
// }
// }
//
// val test20result = Driver(() => new PassthroughGenerator(20)) {
// c => new PeekPokeTester(c) {
// poke(c.io.in, 0)
// expect(c.io.out, 0)
// poke(c.io.in, 1048575)
// expect(c.io.out, 1048575)
// }
// }
//
// </pre></article></div></section></div>
// ---
// # Looking at Generated Verilog/FIRRTL
//
// If you are having trouble understanding the generated hardware and are comfortable with reading structural Verilog and/or FIRRTL (Chisel's IR which is comparable to a synthesis-only subset of Verilog), then you can try looking at the generated Verilog to see the result of Chisel execution.
//
// Here is an example of generating the Verilog (which you've seen already) and the FIRRTL.
// Viewing the Verilog for debugging
println(getVerilog(new Passthrough))
// Viewing the firrtl for debugging
println(getFirrtl(new Passthrough))
// ---
// # You're done!
//
// [Return to the top.](#top)
// ## <span style="color:red"> Appendix: A Note on "printf" Debugging</span>
// [Debugging with print statements](https://stackoverflow.com/a/189570) is not always the best way to debug, but is often an easy first step to see what's going on when something doesn't work the way you expect.
// Because Chisel generators are programs generating hardware, there are some extra subtleties about printing generator and circuit state.
// It is important to remember when your print statement executes and what is being printed.
// The three common scenarios where you might want to print have some important differences:
// * Chisel generator prints during circuit generation
// * Circuit prints during circuit simulation
// * Tester prints during testing
//
// `println` is a built-in Scala function that prints to the console. It **cannot** be used to print during circuit simulation because the generated circuit is FIRRTL or Verilog- not Scala.
//
// The following code block shows different styles of printing.
// +
class PrintingModule extends Module {
val io = IO(new Bundle {
val in = Input(UInt(4.W))
val out = Output(UInt(4.W))
})
io.out := io.in
printf("Print during simulation: Input is %d\n", io.in)
// chisel printf has its own string interpolator too
printf(p"Print during simulation: IO is $io\n")
println(s"Print during generation: Input is ${io.in}")
}
class PrintingModuleTester(c: PrintingModule) extends PeekPokeTester(c) {
poke(c.io.in, 3)
step(5) // circuit will print
println(s"Print during testing: Input is ${peek(c.io.in)}")
}
chisel3.iotesters.Driver( () => new PrintingModule ) { c => new PrintingModuleTester(c) }
| 2.1_first_module.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 01 - Introduction to seismic modelling
#
# This notebook is the first in a series of tutorials highlighting various aspects of seismic inversion based on Devito operators. In this first example we aim to highlight the core ideas behind seismic modelling, where we create a numerical model that captures the processes involved in a seismic survey. This forward model will then form the basis for further tutorials on the implementation of inversion processes using Devito operators.
#
# ## Modelling workflow
#
# The core process we are aiming to model is a seismic survey, which consists of two main components:
#
# - **Source** - A source is positioned at a single or a few physical locations where artificial pressure is injected into the domain we want to model. In the case of land survey, it is usually dynamite blowing up at a given location, or a vibroseis (a vibrating engine generating continuous sound waves). For a marine survey, the source is an air gun sending a bubble of compressed air into the water that will expand and generate a seismic wave.
# - **Receiver** - A set of microphones or hydrophones are used to measure the resulting wave and create a set of measurements called a *Shot Record*. These measurements are recorded at multiple locations, and usually at the surface of the domain or at the bottom of the ocean in some marine cases.
#
# In order to create a numerical model of a seismic survey, we need to solve the wave equation and implement source and receiver interpolation to inject the source and record the seismic wave at sparse point locations in the grid.
#
#
# <img src='./survey-ship-diagram.png' width=400>
# ## The acoustic seismic wave equation
# The acoustic wave equation for the square slowness $m$, defined as $m=\frac{1}{c^2}$, where $c$ is the speed of sound in the given physical media, and a source $q$ is given by:
#
# \begin{cases}
# &m \frac{d^2 u(x,t)}{dt^2} - \nabla^2 u(x,t) = q \ \text{in } \Omega \\
# &u(.,t=0) = 0 \\
# &\frac{d u(x,t)}{dt}|_{t=0} = 0
# \end{cases}
#
# with the zero initial conditions to guarantee unicity of the solution.
# The boundary conditions are Dirichlet conditions:
# \begin{equation}
# u(x,t)|_\delta\Omega = 0
# \end{equation}
#
# where $\delta\Omega$ is the surface of the boundary of the model $\Omega$.
#
#
# # Finite domains
#
# The last piece of the puzzle is the computational limitation. In the field, the seismic wave propagates in every direction to an "infinite" distance. However, solving the wave equation in a mathematically/discrete infinite domain is not feasible. In order to compensate, Absorbing Boundary Conditions (ABC) or Perfectly Matched Layers (PML) are required to mimic an infinite domain. These two methods allow to approximate an infinite media by damping and absorbing the waves at the limit of the domain to avoid reflections.
#
# The simplest of these methods is the absorbing damping mask. The core idea is to extend the physical domain and to add a Sponge mask in this extension that will absorb the incident waves. The acoustic wave equation with this damping mask can be rewritten as:
#
# \begin{cases}
# &m \frac{d^2 u(x,t)}{dt^2} - \nabla^2 u(x,t) + \eta \frac{d u(x,t)}{dt}=q \ \text{in } \Omega \\
# &u(.,0) = 0 \\
# &\frac{d u(x,t)}{dt}|_{t=0} = 0
# \end{cases}
#
# where $\eta$ is the damping mask equal to $0$ inside the physical domain and increasing inside the sponge layer. Multiple choice of profile can be chosen for $\eta$ from linear to exponential.
# # Seismic modelling with devito
#
# We describe here a step by step setup of seismic modelling with Devito in a simple 2D case. We will create a physical model of our domain and define a single source and an according set of receivers to model for the forward model. But first, we initialize some basic utilities.
import numpy as np
# %matplotlib inline
# ## Define the physical problem
#
# The first step is to define the physical model:
#
# - What are the physical dimensions of interest
# - What is the velocity profile of this physical domain
#
# We will create a simple velocity model here by hand for demonstration purposes. This model essentially consists of two layers, each with a different velocity: $1.5km/s$ in the top layer and $2.5km/s$ in the bottom layer. We will use this simple model a lot in the following tutorials, so we will rely on a utility function to create it again later.
# +
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Model, plot_velocity
# Define a physical size
shape = (101, 101) # Number of grid point (nx, nz)
spacing = (10., 10.) # Grid spacing in m. The domain size is now 1km by 1km
origin = (0., 0.) # What is the location of the top left corner. This is necessary to define
# the absolute location of the source and receivers
# Define a velocity profile. The velocity is in km/s
v = np.empty(shape, dtype=np.float32)
v[:, :51] = 1.5
v[:, 51:] = 2.5
# With the velocity and model size defined, we can create the seismic model that
# encapsulates this properties. We also define the size of the absorbing layer as 10 grid points
model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,
space_order=2, nbl=10, bcs="damp")
plot_velocity(model)
# -
# # Acquisition geometry
#
# To fully define our problem setup we also need to define the source that injects the wave to model and the set of receiver locations at which to sample the wavefield. The source time signature will be modelled using a Ricker wavelet defined as
#
# \begin{equation}
# q(t) = (1-2\pi^2 f_0^2 (t - \frac{1}{f_0})^2 )e^{- \pi^2 f_0^2 (t - \frac{1}{f_0})}
# \end{equation}
#
# To fully define the source signature we first need to define the time duration for our model and the timestep size, which is dictated by the CFL condition and our grid spacing. Luckily, our `Model` utility provides us with the critical timestep size, so we can fully discretize our model time axis as an array:
# +
from examples.seismic import TimeAxis
t0 = 0. # Simulation starts a t=0
tn = 1000. # Simulation last 1 second (1000 ms)
dt = model.critical_dt # Time step from model grid spacing
time_range = TimeAxis(start=t0, stop=tn, step=dt)
# -
# The source is positioned at a $20m$ depth and at the middle of the $x$ axis ($x_{src}=500m$), with a peak wavelet frequency of $10Hz$.
# +
#NBVAL_IGNORE_OUTPUT
from examples.seismic import RickerSource
f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)
src = RickerSource(name='src', grid=model.grid, f0=f0,
npoint=1, time_range=time_range)
# First, position source centrally in all dimensions, then set depth
src.coordinates.data[0, :] = np.array(model.domain_size) * .5
src.coordinates.data[0, -1] = 20. # Depth is 20m
# We can plot the time signature to see the wavelet
src.show()
# -
# Similarly to our source object, we can now define our receiver geometry as a symbol of type `Receiver`. It is worth noting here that both utility classes, `RickerSource` and `Receiver` are thin wrappers around the Devito's `SparseTimeFunction` type, which encapsulates sparse point data and allows us to inject and interpolate values into and out of the computational grid. As we have already seen, both types provide a `.coordinates` property to define the position within the domain of all points encapsulated by that symbol.
#
# In this example we will position receivers at the same depth as the source, every $10m$ along the x axis. The `rec.data` property will be initialized, but left empty, as we will compute the receiver readings during the simulation.
# +
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Receiver
# Create symbol for 101 receivers
rec = Receiver(name='rec', grid=model.grid, npoint=101, time_range=time_range)
# Prescribe even spacing for receivers along the x-axis
rec.coordinates.data[:, 0] = np.linspace(0, model.domain_size[0], num=101)
rec.coordinates.data[:, 1] = 20. # Depth is 20m
# We can now show the source and receivers within our domain:
# Red dot: Source location
# Green dots: Receiver locations (every 4th point)
plot_velocity(model, source=src.coordinates.data,
receiver=rec.coordinates.data[::4, :])
# -
# # Finite-difference discretization
#
# Devito is a finite-difference DSL that solves the discretized wave-equation on a Cartesian grid. The finite-difference approximation is derived from Taylor expansions of the continuous field after removing the error term.
#
# ## Time discretization
#
# We only consider the second order time discretization for now. From the Taylor expansion, the second order discrete approximation of the second order time derivative is:
# \begin{equation}
# \begin{aligned}
# \frac{d^2 u(x,t)}{dt^2} = \frac{\mathbf{u}(\mathbf{x},\mathbf{t+\Delta t}) - 2 \mathbf{u}(\mathbf{x},\mathbf{t}) + \mathbf{u}(\mathbf{x},\mathbf{t-\Delta t})}{\mathbf{\Delta t}^2} + O(\mathbf{\Delta t}^2).
# \end{aligned}
# \end{equation}
#
# where $\mathbf{u}$ is the discrete wavefield, $\mathbf{\Delta t}$ is the discrete
# time-step (distance between two consecutive discrete time points) and $O(\mathbf{\Delta
# t}^2)$ is the discretization error term. The discretized approximation of the
# second order time derivative is then given by dropping the error term. This derivative is represented in Devito by `u.dt2` where u is a `TimeFunction` object.
#
# ## Spatial discretization
#
# We define the discrete Laplacian as the sum of the second order spatial
# derivatives in the three dimensions:
# \begin{equation}
# \begin{aligned}
# \Delta \mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t})= \sum_{j=1}^{j=\frac{k}{2}} \Bigg[\alpha_j \Bigg(&
# \mathbf{u}(\mathbf{x+jdx},\mathbf{y},\mathbf{z},\mathbf{t})+\mathbf{u}(\mathbf{x-jdx},\mathbf{y},\mathbf{z},\mathbf{t}) + \\
# &\mathbf{u}(\mathbf{x},\mathbf{y+jdy},\mathbf{z},\mathbf{t})+\mathbf{u}(\mathbf{x},\mathbf{y-jdy},\mathbf{z}\mathbf{t}) + \\
# &\mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z+jdz},\mathbf{t})+\mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z-jdz},\mathbf{t})\Bigg) \Bigg] + \\
# &3\alpha_0 \mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t}).
# \end{aligned}
# \end{equation}
#
# This derivative is represented in Devito by `u.laplace` where u is a `TimeFunction` object.
#
# ## Wave equation
#
# With the space and time discretization defined, we can fully discretize the wave-equation with the combination of time and space discretizations and obtain the following second order in time and $k^{th}$ order in space discrete stencil to update one grid point at position $\mathbf{x}, \mathbf{y},\mathbf{z}$ at time $\mathbf{t}$, i.e.
# \begin{equation}
# \begin{aligned}
# \mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t+\Delta t}) = &2\mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t}) - \mathbf{u}(\mathbf{x},\mathbf{y}, \mathbf{z},\mathbf{t-\Delta t}) +\\
# & \frac{\mathbf{\Delta t}^2}{\mathbf{m(\mathbf{x},\mathbf{y},\mathbf{z})}} \Big(\Delta \mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t}) + \mathbf{q}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t}) \Big).
# \end{aligned}
# \end{equation}
# +
# In order to represent the wavefield u and the square slowness we need symbolic objects
# corresponding to time-space-varying field (u, TimeFunction) and
# space-varying field (m, Function)
from devito import TimeFunction
# Define the wavefield with the size of the model and the time dimension
u = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=2)
# We can now write the PDE
pde = model.m * u.dt2 - u.laplace + model.damp * u.dt
# The PDE representation is as on paper
pde
# +
# This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step
# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as
# a time marching updating equation known as a stencil using customized SymPy functions
from devito import Eq, solve
stencil = Eq(u.forward, solve(pde, u.forward))
# -
# # Source injection and receiver interpolation
#
# With a numerical scheme to solve the homogenous wave equation, we need to add the source to introduce seismic waves and to implement the measurement operator, and interpolation operator. This operation is linked to the discrete scheme and needs to be done at the proper time step. The semi-discretized in time wave equation with a source reads:
#
# \begin{equation}
# \begin{aligned}
# \mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t+\Delta t}) = &2\mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t}) - \mathbf{u}(\mathbf{x},\mathbf{y}, \mathbf{z},\mathbf{t-\Delta t}) +\\
# & \frac{\mathbf{\Delta t}^2}{\mathbf{m(\mathbf{x},\mathbf{y},\mathbf{z})}} \Big(\Delta \mathbf{u}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t}) + \mathbf{q}(\mathbf{x},\mathbf{y},\mathbf{z},\mathbf{t}) \Big).
# \end{aligned}
# \end{equation}
#
# It shows that in order to update $\mathbf{u}$ at time $\mathbf{t+\Delta t}$ we have to inject the value of the source term $\mathbf{q}$ of time $\mathbf{t}$. In Devito, it corresponds the update of $u$ at index $t+1$ (t = time implicitly) with the source of time $t$.
# On the receiver side, the problem is either as it only requires to record the data at the given time step $t$ for the receiver at time $time=t$.
#
# +
# Finally we define the source injection and receiver read function to generate the corresponding code
src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)
# Create interpolation expression for receivers
rec_term = rec.interpolate(expr=u.forward)
# -
# # Devito operator and solve
# After constructing all the necessary expressions for updating the wavefield, injecting the source term and interpolating onto the receiver points, we can now create the Devito operator that will generate the C code at runtime. When creating the operator, Devito's two optimization engines will log which performance optimizations have been performed:
# * **DSE:** The Devito Symbolics Engine will attempt to reduce the number of operations required by the kernel.
# * **DLE:** The Devito Loop Engine will perform various loop-level optimizations to improve runtime performance.
#
# **Note**: The argument `subs=model.spacing_map` causes the operator to substitute values for our current grid spacing into the expressions before code generation. This reduces the number of floating point operations executed by the kernel by pre-evaluating certain coefficients.
# +
#NBVAL_IGNORE_OUTPUT
from devito import Operator
op = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)
# -
# Now we can execute the create operator for a number of timesteps. We specify the number of timesteps to compute with the keyword `time` and the timestep size with `dt`.
#NBVAL_IGNORE_OUTPUT
op(time=time_range.num-1, dt=model.critical_dt)
# After running our operator kernel, the data associated with the receiver symbol `rec.data` has now been populated due to the interpolation expression we inserted into the operator. This allows us the visualize the shot record:
# +
#NBVAL_IGNORE_OUTPUT
from examples.seismic import plot_shotrecord
plot_shotrecord(rec.data, model, t0, tn)
# -
assert np.isclose(np.linalg.norm(rec.data), 370, rtol=1)
| examples/seismic/tutorials/01_modelling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Hindsight Experience Replay (HER) with Highway environment
# In this notebook we will be using the **Highway-env**
# - [Doumentation)[https://highway-env.readthedocs.io/en/latest/)
# ### HER
# HER is a method wrapper that works with off policy methods like **DQN, SAC,TD3, DDPG**
# setup the Highway environment by uncommenting the line below!!
# +
# pip install --user git+https://github.com/eleurent/highway-env
# +
## Imports
import gym
import highway_env
import numpy as np
from stable_baselines import HER, SAC, DDPG, TD3
from stable_baselines.ddpg import NormalActionNoise
# +
# env = gym.make("highway-v0")
# -
# ## DDPG
# setting up the environment
env = gym.make("parking-v0")
# +
# Create 4 artificial transitions per real transition
n_sampled_goal = 4
# DDPG Hyperparams:
n_actions = env.action_space.shape[0]
noise_std = 0.2
action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions))
model = HER('MlpPolicy', env, DDPG, n_sampled_goal=n_sampled_goal,
goal_selection_strategy='future',
verbose=1, buffer_size=int(1e6),
actor_lr=1e-3, critic_lr=1e-3, action_noise=action_noise,
gamma=0.95, batch_size=256,
policy_kwargs=dict(layers=[256, 256, 256]))
# let the model train
model.learn(int(2e5))
# save the model
model.save('her_DDPG_highway')
# +
model = HER.load('her_DDPG_highway', env=env)
obs = env.reset()
# Evaluate the agent
episode_reward = 0
for _ in range(1000):
action, _ = model.predict(obs)
obs, reward, done, info = env.step(action)
env.render()
episode_reward += reward
if done or info.get('is_success', False):
print("Reward:", episode_reward, "Success?", info.get('is_success', False))
episode_reward = 0.0
obs = env.reset()
# -
# ## Lets look at the trained agent!!
# +
# lets open and look at the video saved in our system while running the previous tab
# import lib to display
from IPython.display import Video
#locate the file
Video("video/highway-env 2020-12-19 00-05-08.mp4")
| Stable Baselines/.ipynb_checkpoints/Highway Env-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import networkx as nx
G = nx.Graph()
G.add_node('A', pos = (4,-2))
G.add_node('B', pos = (8,0))
G.add_node('C', pos = (7,6))
G.add_node('D', pos = (3,4))
G.add_node('E', pos = (1,7))
G.add_node('F', pos = (0,-2))
G.add_node('G', pos = (5,8))
G.add_edge('A', 'B', weight=1)
G.add_edge('A', 'C', weight=2)
G.add_edge('A', 'D', weight=1)
G.add_edge('B', 'C', weight=1)
G.add_edge('B', 'E', weight=2)
G.add_edge('C', 'E', weight=2)
G.add_edge('C', 'F', weight=2)
G.add_edge('C', 'D', weight=1)
G.add_edge('D', 'F', weight=10)
G.add_edge('E', 'F', weight=25)
G.add_edge('E', 'G', weight=14)
G.add_edge('F', 'G', weight=4)
# -
options = {
'node_color': 'blue',
'node_size': 1000,
'width': 3,
'arrowstyle': '-|>',
'arrowsize': 12,
}
pos = nx.get_node_attributes(G,'pos')
labels = nx.get_edge_attributes(G,'weight')
nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)
nx.draw_networkx(G, pos , **options,with_labels=True) #TODO pone los nombres de los nodos.
#FIXME no dibuja los edges directed
x = nx.shortest_path(G, 'A', 'G', weight='weight',method='bellman-ford')
y = nx.bellman_ford_path(G,'A','G',weight='weight')
print("Resultado en base a algoritmo shortest: ",x)
print("Resultado en base a algoritmo dijsktra: ",y)
| tarea 6/notebook_tarea4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# from https://github.com/socrata/dev.socrata.com/blob/39c6581986466edb5e7f72f5beea5ce69238f8de/snippets/pandas.py
import pandas as pd
from sodapy import Socrata
# Unauthenticated client only works with public data sets. Note 'None'
# in place of application token, and no username or password:
client = Socrata("data.cityofchicago.org", None)
# First 50000 results, returned as JSON from API
# Connverted to Python list of dictionaries by sodapy.
# Column names converted to snake case, special chars removed
# Dates and location formatted
results = client.get("9ksk-na4q", limit=50000)
# Convert to pandas DataFrame
carts = pd.DataFrame.from_records(results)
# -
# download remaining (limit 50000 / call)
start = 50000
while results:
print(start)
results = client.get("9ksk-na4q", limit=50000, offset=start)
carts = carts.append(pd.DataFrame.from_records(results))
start += 50000
# Drop rows with missing data
carts.dropna(subset=["latitude", "longitude", "creation_date"], inplace=True)
# Filter by status
carts = carts[carts.status.isin(["Completed", "Open"])]
# Convert latitude & longitude to floats
carts.latitude = carts.latitude.astype(float)
carts.longitude = carts.longitude.astype(float)
# +
import os.path
root_path = os.path.dirname(os.getcwd())
# Save result
carts.to_csv(os.path.join(root_path, "DATA/garbage_carts.csv"), index=False)
# -
| CODE/14_garbage_download.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date
import pandas_datareader as pdr
start='2015-01-01'
end= date.isoformat(date.today())
dataset=pdr.DataReader('AAPL','yahoo',start,end)
dataset=dataset.reset_index()
dataset=dataset.drop(['Date','Adj Close'],axis=1)
plt.plot(dataset.Close)
#moving avg 100
mv100=dataset.Close.rolling(100).mean()
mv200=dataset.Close.rolling(200).mean()
#plotting the moving average 100
plt.figure(figsize=(12,6))
plt.plot(dataset.Close)
plt.plot(mv100,'r')
plt.plot(mv200,'g')
dataset.shape
training_set= pd.DataFrame(dataset["Close"][0:int(len(dataset)*.70)])
testing_set= pd.DataFrame(dataset["Close"][int(len(dataset)*.70): int(len(dataset))])
training_set.shape
#remember that for LSTM model we have to ensure that the data is scaled between 0&1
from sklearn.preprocessing import MinMaxScaler
sc=MinMaxScaler(feature_range=(0,1))
dt_train_sc=sc.fit_transform(training_set)
x_train=[]
y_train=[]
for i in range(70,dt_train_sc.shape[0]):
x_train.append(dt_train_sc[i-70:i])
y_train.append(dt_train_sc[i,0])
x_train,y_train=np.array(x_train),np.array(y_train)
x_train.shape
from keras.models import Sequential
from keras.layers import Dense,LSTM,Dropout
# +
#defining the model
regressor=Sequential()
regressor.add(LSTM(units=50,activation='relu',return_sequences=True,input_shape=(x_train.shape[1],1)))
regressor.add(Dropout(0.3))
#units refer to as to how many such units have to be made more or like 3rd dimension
regressor.add(LSTM(units=60,activation='relu',return_sequences=True))
regressor.add(Dropout(0.3))
regressor.add(LSTM(units=80,activation='relu',return_sequences=True))
regressor.add(Dropout(0.3))
regressor.add(LSTM(units=120,activation='relu'))
regressor.add(Dropout(0.3))
regressor.add(Dense(units=25))
regressor.add(Dense(units=1))
# -
regressor.summary()
regressor.compile(optimizer='adam',loss='mean_squared_error') #adam is the optimizer generally used for big data
#loss gives more or less the acuuray of the model or the loss of data in each cycle of training
#training the model
import tensorflow as tf
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3,restore_best_weights=True)
regressor.fit(x_train,y_train,epochs=60,batch_size=32,callbacks=[callback])
sc_test=MinMaxScaler(feature_range=(0,1))
prev_data=training_set.tail(70)
f_test=prev_data.append(testing_set,ignore_index=True)
f_test_sc=sc_test.fit_transform(f_test)
x_test=[]
y_test=[]
for i in range(70,f_test_sc.shape[0]):
x_test.append(f_test_sc[i-70:i])
y_test.append(f_test_sc[i])
x_test,y_test=np.array(x_test),np.array(y_test)
# +
#predicting the data
y_predicted= regressor.predict(x_test)
# -
y_test=sc_test.inverse_transform(y_test)
y_predicted=sc_test.inverse_transform(y_predicted)
plt.figure(figsize=(12,6))
plt.plot(y_test,'b',label="TRUE PRICE")
plt.plot(y_predicted,'r',label="Predicted Price")
plt.xlabel("Time")
plt.ylabel("Price")
plt.legend()
plt.show()
regressor.save('stock_model1.h5')
| LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import getpass
import requests
import os
VAULT_ADDR = os.getenv('VAULT_ADDR') or getpass.getpass("Enter Vault URL: ")
VAULT_SECRET_PATH = "/secrets/data/incident-response/jupyter-notebooks"
VAULT_TOKEN = open(os.path.expanduser('~/.vault-token')).read() or os.getenv('VAULT_TOKEN') or getpass.getpass("Enter Vault token: ")
def getVaultSecrets(VAULT_ADD, VAULT_SECRET_PATH, VAULT_TOKEN):
auth_header = {
"X-Vault-Token": VAULT_TOKEN
}
url = f"{VAULT_ADDR}/v1{VAULT_SECRET_PATH}"
print (url)
r = requests.get(url=url, headers=auth_header)
return r.json()['data']['data']
def getVTIresults(VTI_API_KEY, sha256_file_hash):
auth_header = {
"x-apikey": VTI_API_KEY
}
url = f"https://www.virustotal.com/api/v3/files/{sha256_file_hash}"
r = requests.get(url=url, headers=auth_header)
return r.json()
vault_secrets = getVaultSecrets(VAULT_ADDR, VAULT_SECRET_PATH, VAULT_TOKEN)
sha256_file_hash = input("Enter SHA256 file hash: ")
vti_results = getVTIresults(vault_secrets['vti-api-key'], sha256_file_hash)
print (f"Malicious score: {vti_results['data']['attributes']['last_analysis_stats']['malicious']}/{vti_results['data']['attributes']['last_analysis_stats']['malicious'] + vti_results['data']['attributes']['last_analysis_stats']['undetected']}")
# -
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cs221-pommerman
# language: python
# name: cs221-pommerman
# ---
# # Pommerman Demo.
#
# This notebook demonstrates how to train Pommerman agents. Please let us know at <EMAIL> if you run into any issues.
# +
network_spec = [
dict(type='dense', size=64),
dict(type='dense', size=64)
]
# -
network_spec.apply()
# +
import os
import sys
import numpy as np
import time
from pommerman.agents import SimpleAgent, RandomAgent, PlayerAgent, BaseAgent
from pommerman.configs import ffa_v0_fast_env
from pommerman.envs.v0 import Pomme
from pommerman.characters import Bomber
from pommerman import utility
# -
# # Random agents
#
# The following codes instantiates the environment with four random agents who take actions until the game is finished. (This will be a quick game.)
# Instantiate the environment
config = ffa_v0_fast_env()
env = Pomme(**config["env_kwargs"])
# Add four random agents
agents = {}
for agent_id in range(4):
agents[agent_id] = RandomAgent(config["agent"](agent_id, config["game_type"]))
env.set_agents(list(agents.values()))
env.set_init_game_state(None)
# +
# Seed and reset the environment
env.seed(0)
obs = env.reset()
# Run the random agents until we're done
done = False
while not done:
env.render()
actions = env.act(obs)
obs, reward, done, info = env.step(actions)
env.render(close=True)
env.close()
print(info)
# -
# # Human Agents
#
# The following code runs the environment with 3 random agents and one agent with human input (use the arrow keys on your keyboard). This can also be called on the command line with:
#
# `python run_battle.py --agents=player::arrows,random::null,random::null,random::null --config=PommeFFACompetition-v0`
#
# You can also run this with SimpleAgents by executing:
#
# `python run_battle.py --agents=player::arrows,test::agents.SimpleAgent,test::agents.SimpleAgent,test::agents.SimpleAgent --config=PommeFFACompetition-v0`
# +
# Instantiate the environment
config = ffa_v0_fast_env()
env = Pomme(**config["env_kwargs"])
# Add 3 random agents
agents = {}
for agent_id in range(3):
agents[agent_id] = RandomAgent(config["agent"](agent_id, config["game_type"]))
# Add human agent
agents[3] = PlayerAgent(config["agent"](agent_id, config["game_type"]), "arrows")
env.set_agents(list(agents.values()))
env.set_init_game_state(None)
# +
# Seed and reset the environment
env.seed(0)
obs = env.reset()
# Run the agents until we're done
done = False
while not done:
env.render()
actions = env.act(obs)
obs, reward, done, info = env.step(actions)
env.render(close=True)
env.close()
# Print the result
print(info)
# -
# # Training an Agent
#
# The following code uses Tensorforce to train a PPO agent. This is in the train_with_tensorforce.py module as well.
# Make sure you have tensorforce installed: pip install tensorforce
from tensorforce.agents import PPOAgent
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
# +
def make_np_float(feature):
return np.array(feature).astype(np.float32)
def featurize(obs):
board = obs["board"].reshape(-1).astype(np.float32)
bomb_blast_strength = obs["bomb_blast_strength"].reshape(-1).astype(np.float32)
bomb_life = obs["bomb_life"].reshape(-1).astype(np.float32)
position = make_np_float(obs["position"])
ammo = make_np_float([obs["ammo"]])
blast_strength = make_np_float([obs["blast_strength"]])
can_kick = make_np_float([obs["can_kick"]])
teammate = obs["teammate"]
if teammate is not None:
teammate = teammate.value
else:
teammate = -1
teammate = make_np_float([teammate])
enemies = obs["enemies"]
enemies = [e.value for e in enemies]
if len(enemies) < 3:
enemies = enemies + [-1]*(3 - len(enemies))
enemies = make_np_float(enemies)
return np.concatenate((board, bomb_blast_strength, bomb_life, position, ammo, blast_strength, can_kick, teammate, enemies))
class TensorforceAgent(BaseAgent):
def act(self, obs, action_space):
pass
# +
# Instantiate the environment
config = ffa_v0_fast_env()
env = Pomme(**config["env_kwargs"])
env.seed(0)
# Create a Proximal Policy Optimization agent
agent = PPOAgent(
states=dict(type='float', shape=env.observation_space.shape),
actions=dict(type='int', num_actions=env.action_space.n),
network=[
dict(type='dense', size=64),
dict(type='dense', size=64)
],
batching_capacity=1000,
step_optimizer=dict(
type='adam',
learning_rate=1e-4
)
)
# Add 3 random agents
agents = []
for agent_id in range(3):
agents.append(SimpleAgent(config["agent"](agent_id, config["game_type"])))
# Add TensorforceAgent
agent_id += 1
agents.append(TensorforceAgent(config["agent"](agent_id, config["game_type"])))
env.set_agents(agents)
env.set_training_agent(agents[-1].agent_id)
env.set_init_game_state(None)
# -
class WrappedEnv(OpenAIGym):
def __init__(self, gym, visualize=False):
self.gym = gym
self.visualize = visualize
self.gym_id = gym.gym_id
def execute(self, action):
if self.visualize:
self.gym.render()
actions = self.unflatten_action(action=action)
obs = self.gym.get_observations()
all_actions = self.gym.act(obs)
all_actions.insert(self.gym.training_agent, actions)
state, reward, terminal, _ = self.gym.step(all_actions)
agent_state = featurize(state[self.gym.training_agent])
agent_reward = reward[self.gym.training_agent]
return agent_state, terminal, agent_reward
def reset(self):
obs = self.gym.reset()
agent_obs = featurize(obs[3])
return agent_obs
def episode_finished(base_runner, task_id, interval=100):
'''Callback for summary report.
A function to be called once an episodes has finished. Should take
a BaseRunner object and some worker ID (e.g. thread-ID or task-ID). Can decide for itself
every how many episodes it should report something and what to report.
'''
if base_runner.global_episode % interval:
return True
end_episode = base_runner.global_episode
start_episode = base_runner.global_episode
print('=========================')
print('Episode: {}'.format(base_runner.global_episode))
print('Episode rewards', base_runner.episode_rewards[-interval:])
print('Episode times', base_runner.episode_times[-interval:])
print('=========================')
return True
# Instantiate and run the environment for 5 episodes.
wrapped_env = WrappedEnv(env, True)
runner = Runner(agent=agent, environment=wrapped_env)
runner.run(episodes=1, max_episode_timesteps=2000, episode_finished=lambda a, b: episode_finished(a, b, 500))
print("Finished All Run, Stats: ", runner.episode_rewards, runner.episode_timesteps, runner.episode_times)
try:
runner.close()
except AttributeError as e:
pass
# # Distributed Training
from tensorforce.execution import ThreadedRunner
class WrappedEnv(OpenAIGym):
def __init__(self, gym, visualize=False):
self.gym = gym
self.visualize = visualize
self.gym_id = 0
def execute(self, action):
print('Executing...')
if self.visualize:
self.gym.render()
actions = self.unflatten_action(action=action)
obs = self.gym.get_observations()
all_actions = self.gym.act(obs)
all_actions.insert(self.gym.training_agent, actions)
state, reward, terminal, _ = self.gym.step(all_actions)
agent_state = featurize(state[self.gym.training_agent])
agent_reward = reward[self.gym.training_agent]
return agent_state, terminal, agent_reward
def reset(self):
print('Resetting....')
obs = self.gym.reset()
agent_obs = featurize(obs[3])
return agent_obs
# +
def episode_finished(stats):
print(
"Thread {t}. Finished episode {ep} after {ts} timesteps. Reward {r}".
format(t=stats['thread_id'], ep=stats['episode'], ts=stats['timestep'], r=stats['episode_reward'])
)
return True
def summary_report(r):
et = time.time()
print('=' * 40)
print('Current Step/Episode: {}/{}'.format(r.global_step, r.global_episode))
print('SPS: {}'.format(r.global_step / (et - r.start_time)))
reward_list = r.episode_rewards
if len(reward_list) > 0:
print('Max Reward: {}'.format(np.max(reward_list)))
print("Average of last 500 rewards: {}".format(sum(reward_list[-500:]) / 500))
print("Average of last 100 rewards: {}".format(sum(reward_list[-100:]) / 100))
print('=' * 40)
# +
wrapped_env = WrappedEnv(env, True)
threaded_runner = ThreadedRunner(
[agent],
[wrapped_env],
repeat_actions=1,
save_path='save_path/',
save_episodes=2
)
# -
print("Starting {agent} for Environment '{env}'".format(agent=agent, env=wrapped_env))
threaded_runner.run(episodes=1, summary_interval=1, episode_finished=episode_finished, summary_report=summary_report)
try:
threaded_runner.close()
except AttributeError as e:
pass
| playground/notebooks/Playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.datasets import load_iris
iris = load_iris()
print(iris.data)
# print(iris.feature_names)
# print(iris.target)
print(iris.target_names)
X = iris.data #features
y = iris.target # response
# Using KNN
from sklearn.neighbors import KNeighborsClassifier
# Making an instance of the scikit-learn model(estimator)
knn = KNeighborsClassifier(n_neighbors=1)
# Model Training
knn.fit(X, y)
# Predicting response for a new data
a = [3,4,5,2]
knn.predict(a)
# +
# Using Logistic Regression
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
logreg = LogisticRegression()
logreg.fit(X, y)
logreg.predict(a)
# -
# Evaluating the model
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# +
y_pred = logreg.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred))
# using Logistic Regression
# -
# Using KNN with k = 5
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred))
# +
# Checking the accuracy for different values of k
k_range = range(1,26)
score = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
score.append(metrics.accuracy_score(y_test, y_pred))
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(k_range, score)
plt.xlabel('Value of k for knn')
plt.ylabel('Testing accuracy')
# -
# Printing RMS error
import numpy as np
print(np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# K-Fold cross validation on iris dataset
from sklearn.cross_validation import cross_val_score
knn = KNeighborsClassifier(n_neighbors=5)
score = cross_val_score(knn, X, y, cv=10, scoring = 'accuracy')
print(score)
# mean accuracy
print(score.mean())
# +
# searching for an optimal value of k for KNN
k_range = range(1,31)
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
score = cross_val_score(knn, X, y, scoring = 'accuracy')
k_scores.append(score.mean())
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(k_range, k_scores)
plt.xlabel('value of k for KNN')
plt.ylabel('Cross validation accuracy')
# +
# We can use gridsearchcv to automatically select the best parameter for our model by passing in a dictionary
from sklearn.grid_search import GridSearchCV
k_range =list(range(1, 31))
#print(kk_range)
#create a dictionary which map the values which are to be searched
param_grid = dict(n_neighbors = k_range)
print(param_grid)
grid = GridSearchCV(knn, param_grid, cv=10, scoring = 'accuracy')
grid.fit(X, y) # fit the grid with data
grid.grid_scores_
# -
# Create a list of mean scores
grid_mean_scores = [result.mean_validation_score for result in grid.grid_scores_]
print(grid_mean_scores)
plt.plot(k_range, grid_mean_scores)
plt.xlabel('value of k for KNN')
plt.ylabel('Cross validation accuracy')
# +
# Examine the best model
print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_)
# hence we get the best model among values of k we provided
# -
| Iris dataset sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/b15145456/6th-ML-Marathon/blob/main/Day_015_HW.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="sGVimTKk1SRS"
# # [作業目標]
# - 請同學試著使用 pandas.corr() 這個函數來顯示相關係數並加以觀察結果
# - 思考1 : 使用 pandas 有沒有什麼寫法, 可以顯示欄位中最大的幾筆, 以及最小幾筆呢? (Hint: 排序後列出前幾筆/後幾筆)
# - 思考2 : 試著使用散佈圖, 顯示相關度最大/最小的特徵與目標值的關係, 如果圖形不明顯, 是否有調整的方法?
# + [markdown] id="gzIglmbq1SRU"
# # [作業重點]
# - 綜合前幾單元的作法, 試試看是否能夠用繪圖顯示出特徵與目標的相關性
# + id="_VNeLded1SRV"
# 載入需要的套件
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# 設定 data_path
# dir_data = './data/'
# + colab={"base_uri": "https://localhost:8080/"} id="ToyHV2TZ1SRW" outputId="98efa397-9a7b-4ba3-e89d-69eb312b590e"
# 讀取資料檔
# f_app_train = os.path.join(dir_data, 'application_train.csv')
app_train = pd.read_csv('application_train.csv')
app_train.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 338} id="B5BPWFHr1SRX" outputId="9184916c-45dc-44e5-f60e-c320fb45b1b8"
# 將只有兩種值的類別型欄位, 做 Label Encoder, 計算相關係數時讓這些欄位可以被包含在內
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
# 檢查每一個 column
for col in app_train:
if app_train[col].dtype == 'object':
# 如果只有兩種值的類別型欄位
if len(list(app_train[col].unique())) <= 2:
# 就做 Label Encoder, 以加入相關係數檢查
app_train[col] = le.fit_transform(app_train[col])
print(app_train.shape)
app_train.head()
# + id="DusFNYd61SRX"
# 受雇日數為異常值的資料, 另外設一個欄位記錄, 並將異常的日數轉成空值 (np.nan)
app_train['DAYS_EMPLOYED_ANOM'] = app_train["DAYS_EMPLOYED"] == 365243
app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True)
# 出生日數 (DAYS_BIRTH) 取絕對值
app_train['DAYS_BIRTH'] = abs(app_train['DAYS_BIRTH'])
# + [markdown] id="2j_7ou-41SRY"
# ### 相關係數
# 一樣,pandas 很貼心地讓我們可以非常容易計算相關係數
# + id="fR_DiS0Y1SRY" colab={"base_uri": "https://localhost:8080/"} outputId="262c087f-db99-427c-fd25-092aadacaf22"
# 觀察相關係數
# 移除NAN,並以遞增排序
app_train_co = app_train.corr()['TARGET'].dropna().sort_values(ascending=True)
app_train_co
# + [markdown] id="88SsCywb1SRZ"
# ## 練習時間
# 列出目標 (TARGET) 與所有欄位之間相關係數,數值最大以及最小各 15 個
#
# 通過相關係數的結果觀察有興趣的欄位與 TARGET 或其他欄位的相關係數,並嘗試找出有趣的訊息
# - 最好的方式當然是畫圖,舉例來說,我們知道 EXT_SOURCE_3 這個欄位和 TARGET 之間的相關係數是 -0.178919 (在已經這個資料集已經是最負的了!),那我們可以 EXT_SOURCE_3 為 x 軸, TARGET 為 y 軸,把資料給畫出來
# + colab={"base_uri": "https://localhost:8080/"} id="NcrX0JE01SRZ" outputId="5ecc1a7f-76de-4759-87c7-e3efa49cb530"
# 顯示相關係數最大 / 最小的各15個欄位名稱
print('Most Positive Correlations:\n', app_train_co.tail(15))
print('\nMost Negative Correlations:\n', app_train_co.head(15))
# + id="UGCwjCH-3KGt" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="89f693d0-cded-42e1-ecb1-8fae0ac62de2"
plt.scatter(app_train['DAYS_EMPLOYED'], app_train['TARGET'])
# + colab={"base_uri": "https://localhost:8080/", "height": 372} id="WgJMJfKzyfxk" outputId="e0124424-3496-4134-8ff2-74325e45cbfc"
app_train.boxplot(by='TARGET', column='DAYS_EMPLOYED')
# + id="YBn2Qutjylxp"
| Day_015_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn import preprocessing
import sklearn.svm
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputRegressor
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import cross_val_score
import numpy as np
from sklearn.ensemble import RandomForestRegressor
import joblib
from sklearn.metrics import r2_score
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error as rmse
# # Procesamiento de la datos
#
# ### Lectura de datos.
# - Dataset(Accidentes Medellin)
# - Dataset(Fechas especiales)
# - Data(Del proceso clustering)
datos_bruto = pd.read_csv("DatosBrutoCorregidos.csv")
fechas_especiales = pd.read_excel("fechas_especiales.xlsx")
data_clustering = pd.read_excel("clustering.xlsx")
# ### Seleccion de columnas a trabajar.
datos_bruto = datos_bruto[['PERIODO','MES','DIA','HORA','CLASE','LONGITUD','LATITUD','GRAVEDAD','BARRIO']]
data_clustering=data_clustering[['BARRIO','cluster']]
# ### Agregar clasificacion por cluster y si el dia es especial o no.
joindata = pd.merge(datos_bruto,data_clustering, how='left', left_on=['BARRIO'], right_on = ['BARRIO'])
joindata = pd.merge(joindata,fechas_especiales, how='left', left_on=['PERIODO','MES','DIA'], right_on = ['PERIODO','MES','DIA'])
joindata = joindata.fillna(0)
joindata = joindata.rename(columns={'PERIODO': 'ANIO'})
joindata = joindata.rename(columns={'cluster': 'CLUSTER'})
joindata.head
# # Exploracion de datos
# +
grupoCLase = joindata.groupby(['CLASE']).count().reset_index()[['CLASE','ANIO']]
grupoCLase.columns=['CLASE','TOTAL']
grupoGravedad = joindata.groupby(['GRAVEDAD']).count().reset_index()[['GRAVEDAD','ANIO']]
grupoGravedad.columns=['GRAVEDAD','TOTAL']
grupoCLASEDIA = joindata.groupby(['ANIO','MES','DIA','CLASE']).count().reset_index()[['ANIO','MES','DIA','CLASE','CLUSTER']]
grupoCLASEDIA.columns=['ANIO','MES','DIA','CLASE','TOTAL']
grupoCLASEDIA['FECHA']=pd.to_datetime(grupoCLASEDIA['ANIO']*10000+grupoCLASEDIA['MES']*100+grupoCLASEDIA['DIA'],format='%Y%m%d')
grupoGRAVEDADDIA = joindata.groupby(['ANIO','MES','DIA','GRAVEDAD']).count().reset_index()[['ANIO','MES','DIA','GRAVEDAD','CLUSTER']]
grupoGRAVEDADDIA.columns=['ANIO','MES','DIA','GRAVEDAD','TOTAL']
grupoGRAVEDADDIA
grupoCLUSTER = joindata.groupby(['ANIO','MES','DIA','CLUSTER']).count().reset_index()[['ANIO','MES','DIA','CLUSTER','GRAVEDAD']]
grupoCLUSTER.columns=['ANIO','MES','DIA','CLUSTER','TOTAL']
grupoCLUSTER['FECHA']=pd.to_datetime(grupoCLUSTER['ANIO']*10000+grupoCLUSTER['MES']*100+grupoCLUSTER['DIA'],format='%Y%m%d')
grupoCLUSTER = grupoCLUSTER[['FECHA','CLUSTER','TOTAL']]
grupoCLASEDIA = grupoCLASEDIA[['FECHA','CLASE','TOTAL']]
grupoCLase
# -
# ## Graficas
fig, ax = plt.subplots()
ax.set_ylabel('Total')
ax.set_xlabel('Clases')
ax.set_title('ATROPELLOS VS CLASE')
plt.bar(grupoCLase['CLASE'], grupoCLase['TOTAL'].values,label='TOTAL')
ax.legend(loc="upper left", title="Ventas Paises", frameon=False)
plt.xticks(rotation=90)
plt.show()
fig, ax = plt.subplots()
ax.set_ylabel('Total')
ax.set_xlabel('Gravedad')
ax.set_title('ATROPELLOS VS Gravedad')
plt.bar(grupoGravedad['GRAVEDAD'], grupoGravedad['TOTAL'].values,label='TOTAL')
ax.legend(loc="upper center", title="Gravedad Totales", frameon=False)
plt.xticks(rotation=90)
plt.show()
# +
atropello = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='atropello'][:32]
caidao = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='caida ocupante'][:32]
choque = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='choque'][:32]
atropello = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='atropello'][:32]
caidao = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='caida ocupante'][:32]
choque = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='choque'][:32]
incendio = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='incendio']
otro = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='otro'][:32]
volcamiento = grupoCLASEDIA[grupoCLASEDIA['CLASE']=='volcamiento'][:32]
fig, ax = plt.subplots()
ax.set_ylabel('Total')
ax.set_xlabel('Dia')
ax.set_title('CLASE VS DIA')
plt.plot(atropello['FECHA'],atropello['TOTAL'],label='Atropello')
plt.plot(caidao['FECHA'],caidao['TOTAL'],label='Caida ocupante')
plt.plot(choque['FECHA'],choque['TOTAL'],label='Choque')
plt.plot(choque['FECHA'],incendio['TOTAL'],label='Incendio')
plt.plot(choque['FECHA'],otro['TOTAL'],label='Otro')
plt.plot(choque['FECHA'],volcamiento['TOTAL'],label='volcamiento')
plt.xticks(rotation=90)
ax.legend(loc="upper left", title="Total categoria", frameon=False)
plt.show()
# +
cluster1 = grupoCLUSTER[grupoCLUSTER['CLUSTER']==0][:32]
cluster2 = grupoCLUSTER[grupoCLUSTER['CLUSTER']==1][:32]
cluster3 = grupoCLUSTER[grupoCLUSTER['CLUSTER']==2][:32]
fig, ax = plt.subplots()
ax.set_ylabel('Total')
ax.set_xlabel('Dia')
ax.set_title('CLUSTER VS DIA')
plt.plot(cluster1['FECHA'],cluster1['TOTAL'],label='cluster1')
plt.plot(cluster1['FECHA'],cluster2['TOTAL'],label='cluster2')
plt.plot(cluster1['FECHA'],cluster3['TOTAL'],label='cluster3')
plt.xticks(rotation=90)
ax.legend(loc="upper left", title="Clusters", frameon=False)
plt.show()
# -
# ### Analisis
# - Como vemos en la tabla y en la categoria choque y tropello solo tiene un registro, por tanto puede presentar problemas a la hora del diseño del modelo.
# - De igual manera la categoria incendio tiene muy pocos datos a comparacion del resto de categorias.
#
# ### Desiciones
# - Como vimos anteriormente en las graficas y las tablas choque es la que mas datos tiene sobre las demas categorias, "choque y atropello" solo un dato, incendio tiene 32 datos y un prueba preliminar hicieron los modelos alcanzaran grandes cotas de error y overfitting.
# - El dato de choque y atropello se agregara a la categoria choque.
# - Los datos de incendio se agregaran a categoria otros.
#
#
#
| directorio de trabajo/Tacho/TrabajoTAE/.ipynb_checkpoints/Procesamiento de datos-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Data-Science-and-Data-Analytics-Courses/MITx---Machine-Learning-with-Python-From-Linear-Models-to-Deep-Learning-Jun-11-2019/blob/master/Project%202%3A%20Digit%20recognition%20(Part%201)/Linear_Regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-jKf749g6256" colab_type="text"
# # Linear Regression
#
# ---
#
#
# + [markdown] id="LiqX2dH_HnE0" colab_type="text"
# ## Setup
# + colab_type="code" id="Xw3SOTRf9Nz_" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="f8e7b7ec-05a5-4ce8-e522-8a38c6358277"
import os, sys
from pathlib import Path
# Notebook Library
url = "https://github.com/Data-Science-and-Data-Analytics-Courses/Notebook-Library"
repo = Path("/nblib")
# !git clone "{url}" "{repo}"
if repo.parent.as_posix() not in sys.path:
sys.path.append(repo.parent.as_posix())
# %run "{repo}/.Importable.ipynb"
from nblib import Git
# Remote
URL = "https://github.com/Data-Science-and-Data-Analytics-Courses/MITx---Machine-Learning-with-Python-From-Linear-Models-to-Deep-Learning-Jun-11-2019"
REPO = Git.clone(URL, dest="/content")
if REPO.as_posix() not in sys.path:
sys.path.append(REPO.as_posix())
# Working directory, for running modules in part1
part1dir = REPO / "Project 2: Digit recognition (Part 1)/mnist/part1"
os.chdir(part1dir)
from setup.Setup import *
# + [markdown] id="PwYhRjtqgiMC" colab_type="text"
# ## Test Error on Linear Regression
# Apply the linear regression model on the test set. For classification purpose, you decide to round the predicted label into numbers 0-9.
#
# Note: For this project we will be looking at the error rate defined as the fraction of labels that don't match the target labels, also known as the "gold labels" or ground truth. (In other context, you might want to consider other performance measures such as precision and recall, which we have not discussed in this course).
#
# + id="74alqoxjhesw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 696} outputId="8839c3dc-0e92-46c8-80ab-28028132b37f"
# %run "{part1dir}/main.py"
| project2_digit_recognition/Linear_Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Exercise 34: Even or Odd?
#
# Write a program that reads an integer from the user. Then your program should display a message indicting whether the integer is even or odd.
# +
num = int(input('Please enter a number') )
if (num % 2 == 0):
print('{0} is an Even number'.format(num))
else:
print('{0} is an Odd number'.format(num))
# -
# Exercise 35: Dog Years
#
# Write a program that implements the conversion from human years to dog years (1 humn year = 7 dog years) Ensure that your program works correctly for conversions of less than two human years and for conversions of two or more human years. Your program should display an appropriate error message if the user enters a negative number
# Exercise 36: Vowel or Cononant
#
# Write a program that reads a letter of the alphabet from the user. If the user enters a, e, i, o or u, then your program should display a message indicating that the entered letter is a vowel. If the user enters y then your program should display a message indicating that sometimes y is a vowel. and sometimes y is a consonant. Otherwise your program should display a message indicating that the letter is a consonant
# +
letter = input('Please input a letter of the alphabet:')
if letter == 'a' or letter == 'e' or \
letter == 'i' or letter == 'o' or \
letter == 'u':
print('The letter is a vowel!')
elif letter == 'y':
print("Sometimes it's a vowel...Sometimes it's a consonant.")
else:
print("The letter is a consonant")
# -
# Exercise 37: Name that Shape(Solved—31 Lines)
#
# Write a program that determines the name of a shape from its number of sides. Read
# the number of sides from the user and then report the appropriate name as part of
# a meaningful message. Your program should support shapes with anywhere from 3
# up to (and including) 10 sides. If a number of sides outside of this range is entered
# then your program should display an appropriate error message.
# +
num_sides = int(input('Please enter the number of sides:'))
name = ''
if num_sides == 3:
name = 'triangle'
elif num_sides == 4:
name = 'quadrilateral'
elif num_sides == 5:
name = 'pentagon'
elif num_sides == 6:
name = 'hexagon'
elif num_sides == 7:
name = 'heptagon'
elif num_sides == 8:
name = 'octagon'
elif num_sides == 9:
name = 'nonagon'
elif num_sides == 10:
name = 'decagon'
if name == " ":
print("That number of sides is not supported by this program.")
else:
print("That's a", name)
# -
# Exercise 38: Month Name to Number of Days (Solved—18 Lines)
#
# The length of a month varies from 28 to 31 days. In this exercise you will create a program that reads the name of a month from the user as a string. Then your program should display the number of days in that month. Display “28 or 29 days” for February so that leap years are addressed.
# +
month = input('Please enter the name of a month:')
days = 31
if month == 'April' or month == 'June' or\
month == 'September' or month == 'November':
days = 30
elif month == 'February':
days = '28 or 29'
print(month, 'has', days, 'days in it.')
# -
# Exercise 39: Sound Levels: (solved 30 lines)
#
# The following table lists the sound level in decibels for several common noises:
#
#
# | Noise | Decibel level(DB) |
# |-------------|-------------------|
# | jackhammer | 130 |
# | Gas lawnmower | 106 |
# | Alarm Clock | 70 |
# | Quiet Room | 40 |
#
# Write a program that reads a sound level in decibels from the user. If the user enters a decibel level that matches one of the noiess in the table then your program should display a message containing only that noise. If the user enters a number of decibels between the noises listed then your program should display a message indicating which noises the level is between. Ensure that your program also generates
# reasonable output for a value smaller than the quietest noise in the table, and for a value larger than the loudest noise in the table.
# Exercise 40: Name that Triangle(Solved—20 Lines)
#
# A triangle can be classified based on the lengths of its sides as equilateral, isosceles or scalene. All 3 sides of an equilateral triangle have the same length. An isosceles triangle has two sides that are the same length, and a third side that is a different length. If all of the sides have different lengths then the triangle is scalene. Write a program that reads the lengths of 3 sides of a triangle from the user. Display a message indicating the type of the triangle.
# Exercise 41: Note To Frequency (Solved—39 Lines)
#
# The following table lists an octave of music notes, beginning with middle C, along
# with their frequencies.
#
# | Note | Frequency (Hz) |
# |-------|---------------|
# | C4 | 261.63 |
# | D4 | 293.66 |
# | E4 | 329.63 |
# | F4 | 349.23 |
# | G4 | 392.00 |
# | A4 | 440.00 |
# | B4 | 493.88 |
#
# Begin by writing a program that reads the name of a note from the user and displays the note’s frequency. Your program should support all of the notes listed previously. Once you have your program working correctly for the notes listed previously you should add support for all of the notes from C0 to C8. While this could be done by adding many additional cases to your if statement, such a solution is cumbersome, inelegant and unacceptable for the purposes of this exercise. Instead, you should
# exploit the relationship between notes in adjacent octaves. In particular, the frequency of any note in octave n is half the frequency of the corresponding note in octave n+1. By using this relationship, you should be able to add support for the additional notes without adding additional cases to your if statement. Hint: To complete this exercise you will need to extract individual characters from the two-character note name so that you can work with the letter and the octave number separately. Once you have separated the parts, compute the frequency of the note in the fourth octave using the data in the table above. Then divide the frequency by 24−x , where x is the octave number entered by the user. This will halve or double the frequency the correct number of times.
# Exercise 42: Frequency To Note (Solved—40 Lines)
#
# In the previous question you converted from note name to frequency. In this question you will write a program that reverses that process. Begin by reading a frequency from the user. If the frequency is within one Hertz of a value listed in the table in the previous question then report the name of the note. Otherwise report that the frequency does not correspond to a known note. In this exercise you only need to consider the notes listed in the table. There is no need to consider notes from other
# octaves.
# Exercise 43: Faces on Money(31 Lines)
#
# It is common for images of a country’s previous leaders, or other individuals of historical significance, to appear on its money. The individuals that appear on banknotes in the United States are listed in Table 2.1. Write a program that begins by reading the denomination of a banknote from the
# user. Then your program should display the name of the individual that appears on the banknote of the entered amount. An appropriate error message should be displayed if no such note exists. While two dollar banknotes are rarely seen in circulation in the United States, they are legal tender that can be spent just like any other denomination. The United States has also issued banknotes in denominations of $500, $1,000, $5,000, and $10,000 for public use. However, high denomination banknotes have not been printed since 1945 and were officially discontinued in 1969. As a result, we will not consider them in this exercise.
#
# | Individual | Amount |
# | -----------|---------|
# | <NAME> | 1.00 |
# | <NAME> | 2.00 |
# | <NAME> | 5.00 |
# | <NAME> | 10.00 |
# |<NAME> | 20.00 |
# | <NAME> | 50.00 |
# | <NAME> | 100.00 |
# Exercise 44: Date to Holiday Name (18 Lines)
#
# Canada has three national holidays which fall on the same dates each year. Write a program that reads a month and day from the user. If the month and day match one of the holidays listed previously then your program should display the holiday’s name. Otherwise your program should indicate that the entered month and day do not correspond to a fixed-date holiday. Canada has two additional national holidays, Good Friday and Labour Day, whose dates vary from year to year. There are also numerous provincial and
# territorial holidays, some of which have fixed dates, and some of which have variable dates. We will not consider any of these additional holidays in this exercise.
#
# |Holiday | Date |
# |--------|------|
# | New Year's Day | January 1 |
# | Canada Day | July 1 |
# |Christmas Day | Decemeber 25 |
# Exercise 45:What Color is that Square? (22 Lines)
#
# Positions on a chess board are identified by a letter and a number. The letter identifies the column, while the number identifies the row, as shown below:
#
# 
#
# Write a program that reads a position from the user. Use an if statement to determine if the column begins with a black square or a white square. Then use modular arithmetic to report the color of the square in that row. For example, if the user enters a1 then your program should report that the square is black. If the user enters d5 then your program should report that the square is white. Your program may assume that a valid position will always be entered. It does not need to perform any error
# checking.
# Exercise 46: Season from Month and Day (Solved—40 Lines)
#
# The year is divided into four seasons: spring, summer, fall and winter. While the exact dates that the seasons change vary a little bit from year to year because of the way that the calendar is constructed, we will use the following dates for this exercise:
#
# | Season | First Day |
# |--------|----------|
# | Spring | March 20 |
# | Summer | June 21 |
# | Fall | September 22 |
# | Winter | December 21 |
#
# Create a program that reads a month and day from the user. The user will enter the name of the month as a string, followed by the day within the month as an integer. Then your program should display the season associated with the date that was entered.
# Exercise 47: Birth Date to Astrological Sign (47 Lines)
#
# The horoscopes commonly reported in newspapers use the position of the sun at the time of one’s birth to try and predict the future. This system of astrology divides the year into twelve zodiac signs, as outline in the table below:
#
# |Zodiac Sign | Date Range |
# |------------|-----------|
# | Capricorn | December 22 to January 19 |
# | Aquarius | January 20 to February 18 |
# | Pisces | February 19 to March 20 |
# | Aries | March 21 to April 19 |
# | Taurus | April 20 to May 20 |
# | Gemini | May 21 to June 20 |
# | Cancer | June 21 to July 20 |
# | Leo | July 23 to August 22 |
# | Virgo | Auguest 23 to September 22 |
# | Libra | September 23 to October 22 |
# | Scorpio | October 23 to November 21 |
# | Sagitarius | November 22 to December 21 |
#
# Write a program that asks the user to enter his or her month and day of birth. Then
# your program should report the user’s zodiac sign as part of an appropriate output
# message.
# Exercise 48: Chinese Zodiac (Solved—35 Lines)
#
# The Chinese zodiac assigns animals to years in a 12 year cycle. One 12 year cycle is shown in the table below. The pattern repeats from there, with 2012 being another year of the dragon, and 1999 being another year of the hare.
#
# | Year | Animal|
# |-------|------|
# | 2000 | Dragon|
# |2001|Snake|
# |2002|Horse|
# |2003|Sheep|
# |2004|Monkey|
# |2005|Rooster|
# |2006|Dog|
# |2007|Pig|
# |2008|Rat|
# |2009|Ox|
# |2010|Tiger|
# |2011|Hare|
#
# Write a program that reads a year from the user and displays the animal associated
# with that year. Your program should work correctly for any year greater than or equal
# to zero, not just the ones listed in the table
# Exercise 49: Richter Scale (30 Lines)
#
# The following table contains earthquake magnitude ranges on the Richter scale and their descriptors:
#
# |Magnitude|Descriptor|
# |---------|---------|
# |Less than 2.0|Micro|
# |2.0 to less than 3.0| Very minor|
# |3.0 to less than 4.0|Minor|
# |4.0 to less than 5.0|Light|
# |5.0 to less than 6.0|Moderate|
# |6.0 to less than 7.0|Strong|
# |7.0 to less than 8.0|Major|
# |8.0 to less than 10.0|Great|
# |10.0 or more|Meteoric|
#
# Write a program that reads a magnitude from the user and displays the appropriate descriptor as part of a meaningful message. For example, if the user enters 5.5 then your program should indicate that a magnitude 5.5 earthquake is considered to be a moderate earthquake.
# Exercise 50: Roots of a Quadratic Function (24 Lines)
#
# A univariate quadratic function has the form f (x) = ax2 + bx + c, where a, b and c are constants, and a is non-zero. The roots of a quadratic function can be found by finding the values of x that satisfy the quadratic equation ax2 + bx + c = 0. A quadratic function may have 0, 1 or 2 real roots. These roots can be computed using the quadratic formula, shown below:
# root = −b ± √b2 − 4ac/2a
#
# The portion of the expression under the square root sign is called the discriminant. If the discriminant is negative then the quadratic equation does not have any real roots. If the discriminant is 0, then the equation has one real root. Otherwise the equation has two real roots, and the expression must be evaluated twice, once using a plus sign, and once using a minus sign, when computing the numerator. Write a program that computes the real roots of a quadratic function. Your program
# should begin by prompting the user for the values of a, b and c. Then it should display a message indicating the number of real roots, along with the values of the real roots (if any)
# Exercise 51: Letter Grade to Grade Points (solved in 52 lines)
#
# At a particular univeristy, letter grades are mapped to grade points in the following manner:
#
# |Letter|Grade Points|
# |------|------------|
# |A+|4.0|
# |A|4.0|
# |A-|3.7|
# |B+|3.3|
# |B|3.0|
# |C+|2.3|
# |C|2.0|
# |C-|1.7|
# |D+|1.3|
# |D|1.0|
# |F|0|
#
# Write a program that begins by reading a letter grade from the user. Then your program should compute and display the equivalent number of grade points. Ensure that your program generates an appropriate error message if the user enters an invalid letter grade.
# Exercise 52: Grade Points to Letter Grade(47 Lines)
#
# In the previous exercise you created a program that converts a letter grade into the equivalent number of grade points. In this exercise you will create a program that reverses the process and converts from a grade point value entered by the user to a letter grade. Ensure that your program handles grade point values that fall between letter grades. These should be rounded to the closest letter grade. Your program should report A+ for a 4.0 (or greater) grade point average.
# Exercise 53: Assessing Employees(Solved—28 Lines)
#
# At a particular company, employees are rated at the end of each year. The rating scale begins at 0.0, with higher values indicating better performance and resulting in larger raises. The value awarded to an employee is either 0.0, 0.4, or 0.6 or more. Values between 0.0 and 0.4, and between 0.4 and 0.6 are never used. The meaning associated with each rating is shown in the following table. The amount of an employee’s raise is 2400.00 multiplied by their rating
#
# |Rating|Meaning|
# |------|-------|
# |0.0|Unacceptable performance|
# |0.4|Acceptable performance|
# |0.6 or more|Meritorious performance|
#
# Write a program that reads a rating from the user and indicates whether the performance was unacceptable, acceptable or meritorious. The amount of the employee’s
# raise should also be reported. Your program should display an appropriate error
# message if an invalid rating is entered.
# Exercise 54:Wavelengths of Visible Light (38 Lines)
#
# The wavelength of visible light ranges from 380 to 750 nanometers (nm). While the spectrum is continuous, it is often divided into 6 colors as shown below:
#
# |Color|Wavelength (nm)|
# |------|--------------|
# |Violet|380 to less than 450|
# |Blue|450 to less than 495|
# |Green|495 to less than 570|
# |Yellow|570 to less than 590|
# |Orange|590 to less than 620|
# |Red|620 to 750|
#
# Write a program that reads a wavelength from the user and reports its color. Display an appropriate error message if the wavelength entered by the user is outside of the visible spectrum.
# Exercise 55: Frequency to Name (31 Lines)
#
# Electromagnetic radiation can be classified into one of 7 categories according to its frequency, as shown in the table below:
#
# |Name|Frequency range (Hz)|
# |---------|---------------|
# |Radio Waves|Less than 3 x 10<sup>9</sup>|
# |Microwaves|3 x 10<sup>9</sup> to less than 3 x 10 <sup>12</sup>|
# |Infrared light|3 x 10<sup>12</sup> to less than 4.3 x 10<sup>14</sup> |
# |Visible light|4.3 x 10<sup>14</sup> to less than 7.5 x 10<sup>14</sup> |
# |Ultraviolet light| 7.5 x 10<sup>14</sup> to less than 3 x 10<sup>17</sup> |
# |X - rays| 3 x 10<sup>17</sup> to less than 3 x 10<sup>19</sup> |
# |Gamma rays | 3 x 10<sup>19</sup> or more|
#
# Write a program that reads the frequency of the radiation from the user and displays the appropriate name.
# Exercise 56: Cell Phone Bill(44 Lines)
#
# A particular cell phone plan includes 50 minutes of air time and 50 text messages for 15.00 a month. Each additional minute of air time costs 0.25, while additional text messages cost 0.15 each. All cell phone bills include an additional charge of 0.44 to support 911 call centers, and the entire bill (including the 911 charge) is subject to 5 percent sales tax.
#
# Write a program that reads the number of minutes and text messages used in a month from the user. Display the base charge, additional minutes charge (if any), additional text message charge (if any), the 911 fee, tax and total bill amount. Only display the additional minute and text message charges if the user incurred costs in these categories. Ensure that all of the charges are displayed using 2 decimal places.
# Exercise 57: Is it a Leap Year?(Solved—22 Lines)
#
# Most years have 365 days. However, the time required for the Earth to orbit the Sun is actually slightly more than that. As a result, an extra day, February 29, is included in some years to correct for this difference. Such years are referred to as leap years. The rules for determining whether or not a year is a leap year follow:
#
# • Any year that is divisible by 400 is a leap year.
#
# • Of the remaining years, any year that is divisible by 100 is not a leap year.
#
# • Of the remaining years, any year that is divisible by 4 is a leap year.
#
# • All other years are not leap years.
#
# Write a program that reads a year from the user and displays a message indicating whether or not it is a leap year.
# Exercise 58: Next Day(50 Lines)
#
# Write a program that reads a date from the user and computes its immediate successor. For example, if the user enters values that represent 2013-11-18 then your program should display a message indicating that the day immediately after 2013-11-18 is 2013-11-19. If the user enters values that represent 2013-11-30 then the program should indicate that the next day is 2013-12-01. If the user enters values that represent 2013-12-31 then the program should indicate that the next day is 2014-01-01. The date will be entered in numeric form with three separate input statements; one for the year, one for the month, and one for the day. Ensure that your program works correctly for leap years.
# Exercise 59: Is a License Plate Valid?(Solved—28 Lines)
#
# In a particular jurisdiction, older license plates consist of three uppercase letters followed by three numbers. When all of the license plates following that pattern had Exercise 59: Is a License Plate Valid? 27 been used, the format was changed to four numbers followed by three uppercase letters. Write a program that begins by reading a string of characters from the user. Then your program should display a message indicating whether the characters are valid for an older style license plate or a newer style license plate. Your program should display an appropriate message if the string entered by the user is not valid for either style of license plate.
# Exercise 60: Roulette Payouts(Solved—45 Lines)
#
# A roulette wheel has 38 spaces on it. Of these spaces, 18 are black, 18 are red, and two are green. The green spaces are numbered 0 and 00. The red spaces are numbered 1, 3, 5, 7, 9, 12, 14, 16, 18, 19, 21, 23, 25, 27, 30 32, 34 and 36. The remaining integers between 1 and 36 are used to number the black spaces.
#
# Many different bets can be placed in roulette. We will only consider the following subset of them in this exercise:
#
# - Single number (1 to 36, 0, or 00)
# - Red versus Black
# - Odd versus Even (Note that 0 and 00 do not pay out for even)
# - 1 to 18 versus 19 to 36
#
# Write a program that simulates a spin of a roulette wheel by using Python’s random number generator. Display the number that was selected and all of the bets that must be payed. For example, if 13 is selected then your program should display:
#
# The spin resulted in 13...
# Pay 13
# Pay Black
# Pay Odd
# Pay 1 to 18
#
# If the simulation results in 0 or 00 then your program should display Pay 0 or Pay 00 without any further output.
| Python Workbook If Statement Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## California house price prediction
# ### Load the data and explore it
import pandas as pd
housing = pd.read_csv(r"E:\GIS_Data\file_formats\CSV\housing.csv")
housing.head()
housing.info()
# find unique values in ocean proximity column
housing.ocean_proximity.value_counts()
#describe all numerical rows - basic stats
housing.describe()
# %matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
# ## Create a test set
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(train_set.shape)
print(test_set.shape)
# ## Do stratified sampling
# Assuming median_income is an important predictor, we need to categorize it. It is important to build categories such that there are a sufficient number of data points in each strata, else the stratum's importance is biased. To make sure, we need not too many strata (like it is now with median income) and strata are relatively wide.
# +
# scale the median income down by dividing it by 1.5 and rounding up those which are greater than 5 to 5.0
import numpy as np
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) #up round to integers
#replace those with values > 5 with 5.0, values < 5 remain as is
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
# -
housing['income_cat'].hist()
# +
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
# -
# Now remove the `income_cat` field used for this sampling. We will learn on the `median_income` data instead
for _temp in (strat_test_set, strat_train_set):
_temp.drop("income_cat", axis=1, inplace=True)
# Write the train and test data to disk
strat_test_set.to_csv('./housing_strat_test.csv')
strat_train_set.to_csv('./housing_strat_train.csv')
# ## Exploratory data analysis
strat_train_set.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4, s=strat_train_set['population']/100,
label='population', figsize=(10,7), color=strat_train_set['median_house_value'],
cmap=plt.get_cmap('jet'), colorbar=True)
plt.legend()
# Do pairwise plot to understand how each feature is correlated to each other
import seaborn as sns
sns.pairplot(data=strat_train_set[['median_house_value','median_income','total_rooms','housing_median_age']])
# Focussing on relationship between income and house value
strat_train_set.plot(kind='scatter', x='median_income', y='median_house_value', alpha=0.1)
# #### Creating new features that are meaningful and also useful in prediction
# Create the number of rooms per household, bedrooms per household, ratio of bedrooms to the rooms, number of people per household. We do this on the whole dataset, then collect the train and test datasets.
housing['rooms_per_household'] = housing['total_rooms'] / housing['households']
housing['bedrooms_per_household'] = housing['total_bedrooms'] / housing['households']
housing['bedrooms_per_rooms'] = housing['total_bedrooms'] / housing['total_rooms']
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
housing.plot(kind='scatter', x='bedrooms_per_household',y='median_house_value', alpha=0.5)
# ### Prepare data for ML
#create a copy without the house value column
housing = strat_train_set.drop('median_house_value', axis=1)
#create a copy of house value column into a new series, which will be the labeled data
housing_labels = strat_test_set['median_house_value'].copy()
# #### Fill missing values using `Imputer` - using median values
from sklearn.preprocessing import Imputer
housing_imputer = Imputer(strategy='median')
#drop text columns let Imputer learn
housing_numeric = housing.drop('ocean_proximity', axis=1)
housing_imputer.fit(housing_numeric)
housing_imputer.statistics_
_x = housing_imputer.transform(housing_numeric)
housing_filled= pd.DataFrame(_x, columns=housing_numeric.columns)
housing_filled['ocean_proximity'] = housing['ocean_proximity']
housing_filled.head()
# #### Transform categorical features to numeric - OneHotEncoder
# We can use `LabelEncoder` of scipy to enumerate the `ocean_proximity` category. However ML algorithm should not associate higher values as desireable or should not think values 1,2 are closer than values 1,4. So we transform the categories into new columns of booleans using `OneHotEncoder`.
#
# We can do both enumeration then to booleans using `LabelBinarizer`
| ml/CA_house_price_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Логически оператори
#
# ### Булев тип
# Вече разгледахем изразите с аритметични операции.
# Освен това използвахме изрази с операции за сравнение в контекста на if и while конструкции.
# Но в Python може да имаме самостоятелни изрази с операции за сравнение.
#
# *Въпрос*: Какви стойносити имат изразите с операции за сравнение и колко на брой могат да бъдат те?
1 < 2
1 > 2
# Можем да просвояваме булеви стойности на променливи.
# +
is_ready = 1 < 2
print(is_ready)
is_ready = False
print(is_ready)
# -
# Можем да използваме булеви стойновти като условие на условен оператор и цикъл.
a = 1
b = 2
a_less_than_b = a < b
if a_less_than_b:
print("A is less than b")
counter = 1
loop = counter <= 10
while loop:
print(counter)
counter = counter + 1
loop = counter <= 10
# ### Логически оператори
# Логическите оператори се изпълняват върху булеви изрази.
#
# Ще представим основните логически операции чрез таблица на истинност.
#
# #### Логическо И (and)
print(25 * "-")
print("|%7s|%7s|%7s|" % ("x", "y", "x and y"))
print(25 * "-")
print("|%7s|%7s|%7s|" % (False, False, False and False))
print("|%7s|%7s|%7s|" % (False, True, False and True))
print("|%7s|%7s|%7s|" % (True, False, True and False))
print("|%7s|%7s|%7s|" % (True, True, True and True))
print(25 * "-")
# #### Логическо ИЛИ (or)
print(25 * "-")
print("|%7s|%7s|%7s|" % ("x", "y", "x or y"))
print(25 * "-")
print("|%7s|%7s|%7s|" % (False, False, False or False))
print("|%7s|%7s|%7s|" % (False, True, False or True))
print("|%7s|%7s|%7s|" % (True, False, True or False))
print("|%7s|%7s|%7s|" % (True, True, True or True))
print(25 * "-")
# #### Логическо отрицание (not)
print(17 * "-")
print("|%7s|%7s|" % ("x", "not x"))
print(17 * "-")
print("|%7s|%7s|" % (False, not False))
print("|%7s|%7s|" % (True, not True))
print(17 * "-")
# ### Примери
1 < 2 and 3 > 4
# *Упражнение*: Напишете програма, която показва знака (+ или -) от произведението
# на две числа, без да го пресмята. Решението трябва да се събира на 4 реда.
a = 1
b = -1
if (0 <= a and 0 <= b) or (a < 0 and b < 0):
print("+")
else:
print("-")
| archive/2015/week4/Boolean-operators.ipynb |