code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python tf>=2.0
# language: python
# name: tf2gpu
# ---
# ## Change detector in gas hydrates
#
# Set up some scripts to visualize change using a simple change detector.
import h5py
import numpy as np
from tomo_encoders import DataFile, Patches
from tomo_encoders.misc.voxel_processing import modified_autocontrast, normalize_volume_gpu
import matplotlib.pyplot as plt
import tqdm
from pdb import set_trace
# +
fpath = '/data02/MyArchive/tomo_datasets/gas_hydrates/data/exp2_time_19p6_101_102_107to110_113to185.h5'
def get_tsteps(fpath):
hf = h5py.File(fpath)
l = [int(key) for key in hf.keys()]
hf.close()
return np.asarray(l)
# -
time_steps = get_tsteps(fpath)
print(time_steps)
def load_datasets(fpath, tsteps = None):
if tsteps is None:
tsteps = get_tsteps(fpath)
vols = []
for ii, tstep in enumerate(tqdm.tqdm(tsteps)):
ds = DataFile(fpath, data_tag = "%03d"%tstep, tiff = False, VERBOSITY = 0)
vol = ds.read_full()
vol = (vol/255.0).astype(np.float16)
vols.append(vol)
return vols
sel_tsteps = time_steps[::20]
vols = load_datasets(fpath, tsteps = sel_tsteps)
# ### What's next?
#
# So we have 8 time-steps loaded up and we can now train whatever.
from tsne_detector import params, change_detector
p = Patches(vols[0].shape, initialize_by='regular-grid', patch_size = (32,32,32))
p_sel = change_detector(vols[:2], tuple([64]*3), 1, verbosity = True)
from vis_utils import show_in_volume
show_in_volume(vols[:2], p_sel, 0)
# +
# len(p_sel)
# -
| scratchpad/gas_hydrates_beamtime/change_detector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="eyOw4mAuxpaB" executionInfo={"status": "ok", "timestamp": 1640999079690, "user_tz": 360, "elapsed": 186, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
import re
import numpy as np
from collections import Counter
# + id="oRPBvADCo_Bl" executionInfo={"status": "ok", "timestamp": 1640999080068, "user_tz": 360, "elapsed": 244, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
def readFasta(string):
keys = re.findall(">Rosalind_\d+", string)
keys = [i[1:] for i in keys]
values = re.split(">Rosalind_\d+", string)[1:]
values = [i.replace("\n", "") for i in values]
return dict(zip(keys, values))
# + id="_9KLLH-WpbYl" executionInfo={"status": "ok", "timestamp": 1640999087254, "user_tz": 360, "elapsed": 6645, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
string = """>Rosalind_6727
ATACGACAATGGCGCTCCCGCGTGTTGAGGGGGGTGGTGGAGCAGTCAACTTCCATTTAG
AACGCACGCCATGAAAGATCATACCTGCTTAGCCCGCTCGCTTGGGAACCTGGTACCCCG
TGAAAAAATCAAGACCTGTTCAAGGCCCCCCTCCTACCAGTGATCGAAGGTGGACTCGTC
GATTGACTATGTACATCCCTTTCTGAACAACCACCTACAAAAGCTAGCATAGACCTTTTT
TCTCGCTCCATACAAACACTGTATTAAGGTGTCCCTGCAGTAAAGACGCAGCCCGACTCC
TCGCAACAAGTTGTATATCCCCGCACTACTCGTAGTCGTGTAAATACATTGCCTCGTAGA
ATGTGACCCCAACATCAACAAACTAGTGTCTCTTTTCTTGAATTAAGTCTATGGTGAGGC
GCCGTTAGGGCAGAAGGCCCATTATTATGGAGCGCCTCTCACAACCGAAGTCAGAGAATC
CAGGTGGCCCATGGGCCCTTCCCACTACGTTCAACGGCCCTCACTCCCTCGGGTGAGGTC
AACGAGTGCCCGCCATAGGCTATCCAGCTGCAGTGTGGGGTGTATTGAGTTAGGCCCCCC
GTAGGACAACAGGTACCTAGATAGGGTTTGATCAGATAGACATTAAAGGAACTTCTTTCG
AGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGGACA
CAAGTTACAGTGTAGCCTGTTCACTGGACTGCTCAGTAGTATCGATGAAACATGTTGTGT
GTCTCTGCACATTCTATTCAGCACGGTTCGCCGTAGCTCCGTAGACGGAGACGTCCTGAA
GCTCCATATTCAGCCACGTTTAAAGTCCTGGCGACCGGATAGAGGCCCGGTCCTTACTGG
CATCCGAGTAGAATCGATGGCGGGCGTGGCGGCCGGACTTTTTACGTGAACAGCCCTGAT
AAGCTGCCTTCATCAATGCAGCTACTAAACACTTGAGACA
>Rosalind_0096
GAAGGGACCTACATGCAGCCTTGAGGTTAAATGTCGTGTAAAATGGGATGTCTATGAGTT
CAGGCGCCCCCTCGATTCGTTTCTGAAAAAATTCCATCGTCCAAATTCTACGGCTTGCAT
AATCAGAGCATCAGGGATGTCACGATACGACGAACACTCACTAGGGGCGAAGCTATAATC
TCTTAAGAGAGGCCCGCGCATGCATAGGTTATAAGAGGGTATACGTCACTTCCTTATCAG
GACAAAGCGCTGTCTGAACGACATGCGGAGCGCGCCCCGATCGTGCCTAGCTACAGAAGG
GCCGGTTCGGAATGTGAAACCAGACTCCACAGGTAGCCGTGCGTACCCAGATAGACTTAA
GTCCACGCGTCGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTG
CACCCGCAAAGAAGTGAGACGGTGAATCTCCAGCGCGGTCCAAAGTGCACACGCGGCACA
TCCAAATCTTAGTGGGTCCGGCATAACGGTATGCGAGTTACGCGGTACATGGAGTTATTA
GTCGCGGGCCCATACCATCCCAACCACGATATTTGTAAGATGAAAAATAAGTTGGTGAGC
TCCAGCGTCACGACGTAACGCTCTTAAGGATGAGTATTAGTTGGGTGGCCGACGCGGTGT
AATACATAACGATCGCCACCGGTGGGGGCGTAAGCCCTGGCACCGTATAGTTCGCGCATC
TCCCTTCCGGGTGCCACCACGGACAGTGATGAAATTACCCTCCCAATCCGCATCTAAAGC
GTTTCTTGGCAGCCCAGGAACCAACTGTATTAGATCGACATGAACTTCTTGGACTGACCC
GGCCCACCCGTCCAATGTGCACACTAGCGTCAAGAGTTCTGAAAAGCTTATAACGCAACA
AGACTGAGTTAGTGACAAAATCTCCGGATTCGCTAATCCCGAAATATACTTTTTTCTGAG
GGTCGCTAAAGAGTCAGTAAGCATGACAAGTCAATGGGAT
>Rosalind_1354
ACTACGATTCTATAACGGTGTCTCGGGATCGAAGACGACCGGGGTCTTACCGCATACTCC
GCATATGCTCCCGTGAATACTACTCGCGGCTCCCAAGACACACGTATCACGTCGCAGGGC
AGCACTAGGGGAGTGTTGCCGACTATCTTGTTTGTTAACTAGATGTCAGCCACAATCGGC
CGGTTAATTCGCTCTAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTG
CTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGCAAGCGTATGGTATGCAATCTACGGC
TCAGACCCTTATTTCACTTAACAAACCTGTCGCTCCAGGCGATCGTGCAAGATGGTTTTC
TATTAAGGATCGGCCTATACTGTTTCCCCAACGGCAAATGCCGAGTGGACCCCCTTAGCT
TCGTGCGCTCCACTCTATGTCAAATGCGTGCGGCCACGAGTATTAGTCGAATAAAGCCAG
GCGGCGTAGACCAGAGATTGTTTCGAGTTCTGGTTGTCTTTCAACTGCCTCACGGCGTTA
GGCTTGCAGAGGGAGCTCAGTTTTTATGAATCACGTGTCGTCGGGATTGAACCGTTCTGT
GTTTCAGGCGTGAGAGCTCTCTAGATACCGAATACGGCATTAGCCGCAGCGTAACCCATG
GTCAACCGATTGCTCTCCATTATGGTTACCTGTATTTCGTCCCGAGGTCATTCCGGCATC
TAAGTATAAACCGACATGTTCGGGAAGAACTAGATCTAACCATCTTTCATATTAAGCTTC
GTGCTAGGACCTTCGTCAGAGACTTTTCCGATAGGGGGTCGGTATTGAAAATAACACGGT
TCGTAGCTGATCAGAGACGAGATGACCAAAACCGAGATTGGCTTGGGCAGAGACTTTCGG
GATAGGAATCGAGCAGGCTATTTTCTACGTCATGATCAATAGGGCCTGGGGCCATACGTA
CGACTGCGAATGCTGCCTGCAACAACTAGACGGGATAACT
>Rosalind_5565
TGGGCTTATGTCAGGACTAGAAAACGCTCAGATTCCTGCGGCCCCCTCCCTTCGCCCTTA
TGCGCTAGGTTCCAAGCCACTTCAGTCGTCTGCTATGAGCTGGGACTCAACAATCTGCCC
GCGAGCAGCAGGTAATATGCGCGCCTATGTCAACATCTATCCTTTCGGGAGACGCGGGAC
AGGATATCGTTAGTATCCTAGCAGTAGGACCTAAAGACATTAAAGGAACTTCTTTCGAGA
CATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTAGGACGC
GGCTACATTGTTATCTGGGCAGAACAGCGAGCTGTTTTCCACTCGCTCACTAGACTCTGA
GAGACTTCCTACTCCGGTCAGCTTGACCAAAAATCGTTTCTCCGCCAACAGGCATCTATC
CTAAGGTGCTGTTAGTGAACAGTAGCCTGCGAATACTAATCCTCGCGGGTGAACTAAAGA
AATAGTTCAACAGAGGAGTGCATCATGCAAAGAACCCCTAGAGACAAGTCGAGGAATCGA
ATGTTCCTGACCCGAATTACTCCGACTTTGGAATGCTAACTCTGACCGAAAGCGTTGCAT
CGTGTGAGACTACTCCTGGTTGGACCGAGCAACTACCACCGGACACCGAATGTAACGTCA
GCCCGCCCTAGCTCGGGGATCTCAAGGTCGCGAGAATACACCTACGCCCTGGAGCTATCG
AAAACAGAAACCCTCCAATCATTTGTGCGTCCTAACGGTCGGGCTCGTACGTCTGGCCTT
CGCCAAGGCTAACCGCCCCGTGATGCCGCCGCGCAATTTAAGACAAGCACAGGCCTAAGG
CAAGCAATTGAAATCTATCATCAAGTAGTCGTGGGCTAGGATTCCGTAAGCCCCTATGGA
ATTGGGGGTTTGGTCCTACCTATGGTCTCTGAACCTTCGTTCAGGGAAAAGAGGTCAAAC
TTCAGTTTTCCACCTACATGCCTCACAATGTGCCACACCG
>Rosalind_8399
ATTTCTTGGGCTCGTTAGCAGGTGACTTTGTTCCTATCTTCTCACTAAGGCTTCGTAGAG
TTTCCGTACTGCTGCCGCAGTCGAACTAATCACCAAAAAATCGCTGATATGTGTGGAGTA
ACCGCCCTAGAGAACGTGGTAAGCGCCAGTGCGTTGATTGGATTCATCTAATGCTCTCGG
ATTTCTTAACCTCTTTTTTATAACAGCGATTAGCCTCTTACGTCACTCAGCGGGAATCTG
TTTTTTCTGAATGAGAGATAGCATATAATGTACGAAAGCTTCGGCAAAGGAGTATCTGAT
AGCTGCGTTGAGACGTCGCCGCGGTATTCGACGGATAAGGAAATTATACGTATCATCTAG
TAACATTTGACAGCGCTACCTAGATATAGTGTGGGTAAATTAGGGCGTCCCACTCGCGCG
AGGGCTGAAGGTTAAAGTGAGGTATGATATGTGGCTTAAGGCCACTCAGCGCACACGTGG
ACATTTGCCGACGCTGGCACGGGATGAAAGTGAAACATAAACGGCTGTCGCAACCTGGGA
CTCCGGGAGCATCTATGCAAACAGAGAGGTTGTCGATATGCTCGCTGTGGCAATATTTTG
GACTTTATACGAAGTTACACCAGACAATTGTGTGCGGTTGCGTTTTAGGGGTTGGAATAG
ATCAGGTATAACGGAGGGATAGATAAGTAGTTAGATTCTGAGAACGTTTCCATGGCCCTC
TGATCACGTTCTAAACCTAGAAGAAGTGAGCCGATAGCAGGATAGCCGTACGTCTACAGA
GTCAGGCCCATGCTCTCGTCTTACCCTAGGTTAACACCCTCCTGAGGTGCAGCTCGTCAC
GCCTATCATCGTGATCCTAGCGAACCGATCTTGACATTAAAGGAACTTCTTTCGAGACAT
AAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGATTCCTTTTC
CATTTTCGGTTAGACTGCGCTAGATCCGGACGCGTATGTC
>Rosalind_2215
TTATTACGCGTAGAGCCTGTGGATTTAAAAGGGACGTTAGGCACGCTGCTTTGATCAATA
GATTATAATGACCACGTCCCAGTTTCTTGGACCTTTCGGTCACTGCTGCTCAGATGCATG
GCAAGGTCAATTCAAGGTCTTTTGAGTCGGCGCTGTCTGAACGTAAATGACAATGATTAG
GAGGGACGGTAGTCTAGGCGTGCATCACCCCAACTCGTGGTTCGTGTGGCGTATCGCAAG
GCCGTATGGAGGACGTACGAGTATTGATTTTAGCATCTGCAGGCTCGCTCTGCTTATACA
TGTCAAGTATTACACGATGTTGATAAGTGCATCCGCATGATCGGCGAGCGTACTAAGAAG
AAATGCCAAAAATTAATCGCCATGCTCGCAAGGACGACCAGGCGCGTCAACTCCTATTAG
GGGCGCATCCAAGATGACTATGCTCCAGTGACGAGCAGTTACCGATCTCTATTCGCATCG
GCCAAGGACTCTAACCCCCTCTGCCAAGCCTACAGATAAACTACTCCATGTTTCGTGTCG
GCAATTTATAACCGTGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGC
TGTGCACCCGCAAAGAAGTGAGACGGTGAATCCAGGGTGAGAGGCCAAGTGTTCAATGGT
CAAATGACCATTGTGCGCATCAACAACCGGTGGCCCTTAATGGCCAACGGTTCATCTCTA
CCTACTGAATACTAGATCCCAAGGTCCAGGGCGTTCTGGAGGATCCTCGGGATCAACCGG
AAACTACTAGACATCCATAAACCCTTACTCTTCCTTAACAACCATGAATCCTGGGGCCCT
CCAGAGTGACCCTTCTCTTACAAAATTACGAGACGGCACACCGTGATGTCTTGCCATGCA
CTGCCTTTCACACGCCGTAAGAGCGATTTCTATCGTCTGAGGGACATGGCTGGAGGGATT
GGTAACCGCTGAGAGTCTGCATCTTTGCCAACCCGATGCA
>Rosalind_6932
TGCCGCCTCTGCCCGGGTGTTCGTCAATTTCAAGCCGTCGCCTGTAGCGATACGACGCTG
AGAATATCGTTCGTCTTAATAGCGCTTAGACAGACGCGAGTCGGAATGCAAGTAGAGCTC
CTTGCGCAACAGTTTGGTAGCTCTGACTACATAAGCCCGCCACGTCGGGCCCATACTTGC
AGCTTTCAAATTCTATTGCTCCCGTGCAGTGGCTGGCGCGCAATCATACTTCGCCCACTG
AGTCCGTTCCCATCCCGCGGTTGTATGACCGTGATACGCTTTTGACACCTGGGACTGGGG
ATTAATGACACTACTGCGTCGAACAAAAGGTGCACTGGCCCGAATGTAACTTTTTACCCT
GATTTTACACTGCTCTCAGCACGCCTGCGTCCGATTGAGTTATCGGTCACAGGTAAAGAT
GACGTTCCCCTTAGTTTGCTCAGGTGGGATTTACGAGCAGTGATGTGCAAAATATCTATA
TGTTACTATACAAGCTCAATTTTTAGCGAGAATACCCCCTACAGATCCGGATAGCATCAA
CATACGTTGACCTGTGATGGGCTATTACACCAGTAGTAACTGACGGTTTTGTGAAGGTGG
TGAACTTGTCAGTTTCATGAAGCCACATAGTTGAGAACTGCCGCTCGTAGGCCATCGCGA
GTGTTCGTATTTGTTGCTAGAGCGGAGGCAACTCAGGTACTACCCCGACGTTATACATTA
AGGAGGGTATGCGAGCCACAGCACGCGACTCCCATCATTGACTCCTAATTTGAAGCCAAT
GTACGGAGCTCCCTATAAGAAATCTGCAATCGGACGGGCACCGCTCACACTGCTGCACCC
GACGGGTGATTGGAGTGAGATAGCTGTCGGAGCTCGGGACATTAAAGGAACTTCTTTCGA
GACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGCCCCA
TTCTTACTCACCGTTCGAAGCGACTCTGCATAGTATCGTA
>Rosalind_9157
TAGATCGTGGGCACAATAGATAAGGACATTGGAGCCCCTGGTATTGGTGACTTCCGAGAT
TTAATGAAAGAAAATACCAGCAACTGTCCAGAGACTGTTTTTGCGGGTACAAAGCTTGTT
TGACCTAATTCTCCGAGGGATCGACTACAACTAACCACCTGCACCAAAATTTGCGGACAT
CACGATACCCCCGAAGGTTAGAACCCTTGCTGGAAGTAATTTCCTAGGTCCATCACGCAA
AAGCAACTCCCATTCGTTGCTCCCAAGTAGCCAGGACGACCATATTTCAGATTCCAAACT
TAGGCCGTTAGATGTAATGCATGATCTGACACAGGCATCTTATGCCCAATTGGTGAATTA
TCGCGTCGCGCGCTTAAGGACTCTTTCCGTGTTGCCTATGCTCTGGAGACCACAATGGAT
CTGACATCTGACCCTGAAAGTCGGAATTGTTTCGGCAGACTTCGCGGCCTGACACCACGG
CATAAGGTCTAGTCCCAAGTACGCCTAGTAGGGTCCGATCGGCACACACACGGCTGTTGT
GCAAGTTTATATTCACTGCAGGTTTTCTTCCTAGAACCTACTCTCTGATACCTCCAGTTT
GCCAAGAATTCATATAGGGAGTCCTAGGAAGTCTAAGAACAGTAAGCAACAATATCAGGT
TCGCACTTATCTAACCGCGCCACCAGACAACACTCTACGTGACGTAGATCATGATGGGCC
CCTGCCCGGTGTGAAACGGGTAAGTATACCCTGTAAGTCATATGGACATTAAAGGAACTT
CTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAAT
CTAACTGGACGAGAGCCACCTTCGCGTCGTGTTCCTTAGTAGTCGAACGGTGTTTGGTGC
CTGACCTGCTTATAAGTGAGAAACTGGTCGGACTATAGACTCAGATAGCAGACGTTCGGT
TTCTATAAAGTGTCTGCAAGGCGTAGCTGGCACTAAGTAG
>Rosalind_9713
AACATAAGCGTATATGTCCTTGGATGTGGAAATTATGATGCCTTAATCCTCATCTTCCTC
CCGCGACCGACCTGTCAAGTGGTCACTTCATGATATCCGGCTAAACCTCGCCGAAGGCAG
TGGCGCCTCGTTGTCTCTAAAAGGATAGGAGTCGCTCCCGTCCATTTTTCTCGGTAGGGC
TCCATTCAGTTTTTTCGGGGAAGTTTGAGCGTCCCATAGGCGTGAGAGCAGCCAGAATTC
TCACCCGGCCGTCATTAGTTCGGGTGGACACCTCGGGACATGTCCTAGATCCCTTCCGAG
CCCTTGTGATGACCGTAAACGTAGCAACAGCATGTAAGTCCTTTGTTCAGACCATGACGT
AGGAAGGGGACCTCGCCTACCTGGCTGCCTAACTGTAGAGAGCCTCGGCTAACGGGGATC
TGTGGCCTTGGAGTCATAGCCGTCGGACGTGACTTACGCATTACTCCAAAGGCAACGTTG
TTCACAAGCAGACCTTAAGACTTAATCGGCCTACAGCATAAATAGTAACGATTGGAGCCA
AAATGTGTCCCTGACGTTACACCGGATCAATTCGTTCGTCTCAGTAAGACTTTCCCCTGT
CGGTTGTGACTTCCACAACTCATCACAAGGAACGCGATACGACTTCTCTCTGTACGCTGG
AGAAACAGCCTGATAATGGATCAGCTTGGGTCAGTTAATTTAGGGGTACAAGGTGCGCTC
CACGGGTGAAATACTAGGTAGGGTAACGCCGGAAGCGCCAGCTTTTGTCTCGCCATAGTT
TATGAGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCC
GCAAAGAAGTGAGACGGTGAATCGTGGTGGTTACTCGGACCATAGAAATGAAACAAAACG
AGTGGATGATCATAACTTTAGACCCGAAATGCTTTATGGGTGGCGGAAGCTTTTGGCGGA
TTGTGACTGATATGACAAACCCATCAGAGGGAAGTATTGA
>Rosalind_3393
CCTGCGCAAGTACGCCCTGGGTAACAAAATGAAGTGTGTCCCTAGAGCGTCAGACCACAG
CTGATCAGCGGCTACTGTCACGGAGGGATGCTGTTAGACAGATTGGTCCGAATGTTACTC
TCGTCGCCCCACCTGATCTTTTCGCATTGAACGGCGGTTGGGACACATTCCGCGCCCTGC
TAGAGCGCGTCGATAACCATGAACAAATCAGGATTTGACTCGTCCCAACCCGGGGGTCCC
TTTGAGGCGCGAACATCGGTATTTCGTGCAGTGTCCATGCATATGCAGTGGCCAACGAAT
TCTATTGAGGGTACCAAAACTGAGTCTTCTGGAAACGTGGACATTAAAGGAACTTCTTTC
GAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCCTA
CCCCACGAGCGTGAGTTTGGGAGAAAACTATATCCAAGGAAAGCGATACGACCCCCCTCC
AGAGCCTTTACCGTTCGCAATTGAACCTCTGAGTCGCCACAAACCGACGCCGGCGGGGCG
TGTACCTTAATCAAGGCTTCTCGAGGAGATAGGGGCAACTACCACCTGCCCGATTTGCCT
AAATCGGGGAGTAGTCCAACACCCAAAGACAGCTGGCCCAGACACGCATGTACCGATCTG
GGACATTTTCCAGCCGTCACTGTCCTGCAGCGAACAGCTTGATCCAGGGGCGTTAATCCC
TGGGGGGGCGTGTTTGCAAGGGTCGCCTACTGGAAGATGAGAAAACGATTTACGGTGGTT
CTGTAAAAGAAGTGCGGTGTCGATAAAAACTCGACAGGGACCAGCGTAGTTTAGTTTAGC
AGGCGACCATTGAACCTGATGGCGCTGCTATGATAGAAAGCTGGATAAGGTCTAGTTGTA
CTCAACTTATACACAAGGGTCTATCTAATAAACCCGAAACACTACCATCGGCCAAAGAGA
CCAGTTAAGCATATCATTTTCCGTAACCTGTACCGGAGAG
>Rosalind_3596
ACAGTGTGGGAATACTCTATGTCACCAGGACCTCATCACGATGGGATTCCGATCCGTCTC
CGACATCACGGTCATCTGGATGAGACGTGTTAAACAACACGGATCAGCATTCTCTTGGGT
AGACAACACGAGTTTCTTAAACTCCCAGAGAGCTTGAAGGTCAACTACGGATGTTACCGG
CAGTCCGGAAGGGAGAACTGGAGCTCAATACGTAGACGGTGGGGCCCCGTATACGACGTC
GACTCGAGTACTTCGTTAAAAATCGATACTCACTCTAGACCCAGACAGATCCTTGCTGTC
CAGTGTTCCTGCAACTGATCCCACTAATTCGGAGGTATAAAATAAATAGACGGAGACATC
CGACGTCCGTAACCTGGGCCAAAGACTCTAGGCCTGCGCGCTCATACCACGGATCTTGGA
ACCCTCCAAACTACCCATAAGCGAAGGCGTACTCGGGCCCGACGACGGGCTTATCTCGTA
GAAACGCCTCTCCACTGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTG
CTGTGCACCCGCAAAGAAGTGAGACGGTGAATCAGGTTCCACTTGTCTAGCATTTGACTC
GAGTTGTAATTGCTACTAACGACTCTACCGTTGTCTCCCCAAGACATCAATTAAATCTCA
TACTCTAAGGGGGCGTCCCTTATTTCACTCGGAGCACGGGGTGTAACTTTTACAACCCCA
CGTTATCGAAAATTTGCCGTCATCAGGTCCCCAGGAATCCGGGATCTTCAAGGCAGCCTT
TTTAGGATTCTTTCAAAGACTCCGAATACGGGGCCCTAACACACACATTCTGCAGTGCTT
AATGTCCAAGTCCATGGAATCAGTCTACATTGGTAAGTGGCCTGTATATCACACTCGCCG
GTGCCACGCAAGGCTAGGATCTCCTCTCTAGTAGTGCAGAAGTGGAGGAACAGAGATTTG
CTTGTCGGATAATTTGATTAGTACCCGAGGGCGGTTTAGT
>Rosalind_8773
CGATCTATTGCATTTGGAGGAGCATTTATGACATTAAAGGAACTTCTTTCGAGACATAAA
CCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTGATAGGCTTCCGA
CACGCGAGGGGAAACGGCCTAGCCTAAATAGTAGCGTCAAGCCCTGCCGACATGCAGTCC
ATCGTTCCGCTTTCGTCACTTCGCGACTATTCGAACCCGTGCTTTATTACCGTATATTCG
TGGCCGCCGCGTTAGTTATAGTACTCTTGACCCAACTAAGGTCTCGGTGATGTCTGTCAG
CCTAATCATCACAAACGTACTCGGCGTGCTTGAGCGTTATTAAATGATTTAGCCGGTGCG
CTTTGTCCCAGTTAGAGGACGATCCCACTCTCAGTTAGGATGATAGGAAGTGTATCCTGA
AGTAAATACCAGAGTACTCCGGGGCGCGCCACCGTCCTATATGACGTACTAACAGGGTTG
AGGGGGTAGCCCCTATCTCTAAAATATAAGGCTCAACGGGAATTCGGGCCCACCGAACTA
ATACAACTAGCGTTCGAAGCTACAACAAAGTCGAAGATTGTAGTTTCTGAGTAGCGTCAG
ATCTGTCGCCCCCGTTAGACCCTAGCGCTATAGTGGTTGGACCCATCCCAAGTTCATCTG
TATAGCTTTAGGACTTCGTCGTCTCGTTCCCTTTTGGTAACGTAGCAAGATTCATCAGAT
TTTCAGAAATGCTCATCACCGCTGGCGATTAACCGGGGCCGGAACACTAAATACATGCCT
CATAGTCTCTCTCCAGGCTTAGTTAATCCCGCTGGTACCCATTAGGATGATAGTGCCTGG
TTCCCAGCGCATGCATCATCACTCGCATCTACAATGCGCCGTTTGGCCGTATTTCGTGTG
ACTCCCCGTGGAGATGTATGGCTAAGTTATGGTGGTACAGCGTTGCTTACTCGGGCAATC
TAACGGTGGGTTAGTCTCAAATAAAGATTGTAGCGATAAT
>Rosalind_1088
TCGCCTTAGCGGCTTGCGACCCGGGATCATGTCATACCCCCGGGCCTCCTATCGTTATCT
ATGAGAATATTAAATCCATTGCCGACGAGTGGGTCGTTTTGCTACTCCTAACGTTGAACC
GACAAGGGTTTGCGATTACCCCGAAAGGTGGGACCACGCGCCGCTTGTTGCTGATCATTG
ACCGCTTGATACAGGGGCAAGTTTCAGTTGTTGTCGACTAATACGGTAATGACATGATCA
TCCCCCGCCATTGGGCTGTGCAAGTGAAAAGCTAACTCATGGCCTTCACTCTAGAGCTAT
ATAATAGGGCATGCAATATCTGCAGGCCGGACCAAGGCGGATATCTTACATCCCCATGGC
CCCATCTATCGACAGTCGTACTGATTCGCCCAGGAGCCGATCAGAGATAGCGCGCCGCGT
ACAGTGGGGGGAGGTCAGAACAATTCGTCATTTAACGTTAGCAGTTCTGTATCTCTATAG
TCAATGAGAGGGAATCTCGGTGAGGTGACATCCTAGGACTAAAGCAGGATACAGACTAGA
TTGGGTCTTTTGCAGGTCAGTATATAAGGTGGTTACGGTTCCATGACAGGCTTCGATCTC
CACCGGTCTCTACCGGCATCCATGCATACTACGCACTGTGACTAAAATCTCTTAAGGACA
TTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGT
GAGACGGTGAATCTTGTAATGCCACTGACGGCAGACGCAGGCAGTGTTAACGGCGTAGAG
GTGTACTTGGCTATAATCAATTGAATCGATCAGTTCGGTCTACACCTAAAGAGAGGGGAC
ACTCCTAATAATACGTGAGAGCAAAGTACTATGCTGGGTTGGCTATACAAGCCTTCGCGT
AGGGCATAGTCGAACCGTATTGGTCATCTCGGCCGACTGGGAACCCGGCCGGTCCAAAAC
AGCTCCTACGTTGGATAAATTGGTCGCTGAATGGTGTAAG
>Rosalind_1854
GAATTTCGAAGCTTGTATCGCTGGACAGGTGGAACGGGTCTCAATGCTCGAGCATTCTAA
CTTAATGTGTCTCCCTTACTTCGCGAATTAAGCGCATGGTCCTTAGTTCGCCTTTCATTT
AAATAATCCTACGGAGATATTGAAAAGGATGACACTCTTTTAGCCTATTGGTTTCGTACA
ACCAGGTTTTTCGCTAACTTGACCGACCCCTGGGTAGTCAGCTTCACTCATTGAGTGCTT
CTTTTCCATGTGACCTCTAACGGACGGGTGATCGCAACTCAAGCTAGCCCCTCCGTGTAG
AGAGCCGTGTCAGTGTGATTCCTTGAGTCGGCTACGGCCGAGCGATAGATAGATAAAGGA
GACGTTATTAAATGTGCTCACCACTGACCGAGACGTAAATTAGTAAGTTCTAGCGACATG
GGAGGCCGCGATTGTCAATCGTGACTTTGGGCCCTCCTTAGCCGCGGCTCCGAGGCACTC
CAGCGGACGAAGATATTCACGAACAGCATGAAATGATGTCACTATGATATCTAACACGGT
CCTACCTTGAGCCCCAATGTAGCGCAAGACTCGCATAACCTTCCGCACCGTACAGAGCCA
GGTAACCCTACAGGATTTCATGATTAAACCTGTGCTCAGAGTCGAATAGCCGGGGGATAC
GTTTAAGGATAGATCACGAACGTGCCTTAGGGCGCAATAACCGCACTATTCGCGCCTACC
AGCGTACCCGCTGGGGCAGACGACAATCTGCCAAAAAGACCAAAGTCCGGGATGAGTACG
TGTTGAAATCGTCCATTAAGTCAATGACATTAAAGGAACTTCTTTCGAGACATAAACCAT
AGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCGCGGAGGCGGTCACTGC
CGCTGGCACATGCTGTAATGTAAACGCGGAGAGGCAACGTTTGACGCTTCCAAGAGCGTA
AACTACAGTCTTATTTGTTATGCACGTCGATGAGGACTTG
>Rosalind_6677
GGCCCAACCAGGTCACGTCAATCGCTCACAAGGGCCCGTATAGGGCAGAGTGTCGTTCTG
CTCTTCGAGTGGCATACGGTCTCGTGTAGTTCGTCTGTTTATGTACGCGCCAGGAGATAG
CCATTGAGCGTTGCGCTAACCATCGGCCGGGAGGTCGCTAATGTATTCATTTATAGGTGA
CCTTCAACGTAGACTGCTCCATGCTTCGACTATGTAATAGACCCTTCCTACAAATGAATA
ACCCATCAGGGAGTAACCAGTTCAAGATATGGGGGTCATCGCTCTATAGGCGTTAGAACT
CTGCCTGTTTCTGCGGGGGCACTAAATGAGCTCGTGCCTTAGAGACAGAATACCTCTCAG
CTCGTTCGTTGCTACGATATTCTTCTCGGCTAGGGCTTGACTCCTCCCACTGAGACGGCT
GAAGCCTCGGGACCACGGTTCCCCACGTGTTTTACAGGTGACGTCTACCGCTGCAGGTTA
GGATTCGAGAAGGACCCAGCTGCTATAGCTGCATGACCAGACTCTTTGTACGGTAGCCGC
CGTTTCAGCTGGTCCTTTGCGGATTTCGAGTAACTCGTATATAACGCGAGAGACGCGACT
GTGAAAACCCCGCCTACCTTCGATAAGGGCGGGTCACCCCCAATATGGGCGCGTTCGTCC
AGTAGCGCGTGGCTGTCTAGACGTCAATCCAGAAGCCACACCTGACTGAAATTTATTTGT
GATCCAGACTCAGTTAAATAGTGAAGGACATTAAAGGAACTTCTTTCGAGACATAAACCA
TAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCGTGTTCGGCCACGCCC
GGCGATAATGTCTTCTGATAGCTGCCCGGAACCGCCAGCCATTTCACTTCTGGCTAACAA
TAACCCCAAAGCGCCCGAACTCCTACGTCGAGGACACAGTCGAACACTATAAAAACCACT
CATGTTGACCAGTACGTCAGTGGCCGTGTAACTTGACTGA
>Rosalind_5979
AACATGGTACGGCGCGGTTGATAAAACTGTTCTGCATTGATTTTGGCTAGAGGCAGTACA
TGTCAAATACACTATAAAATAACTGTGACTTCCTTGTACATAGGTCAGAACACACACGTC
ATGCGGAACCCACGCAGAGCGTCCCGAGTGGGATTCATTAACGCTCAATACGCCTTCCAA
ACGCAACAGGACAAACGATCAATTCTTCGTACCTCATCTCACATTACGCAGCACCTTTCC
CCTGCTTGAGGGGGTATCGGGTACCTAGACCGCTTTGTACTTTCTAGGTTGTTCTGTTGA
CTGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAA
AGAAGTGAGACGGTGAATCAAAAACAATGCTCAGGTCAGCCGCGTCATGCCACTCTCTAA
CACTGTTATCATGTGTTGCATCGAGATTAACTCCTGTACTGCATTGGGTAGAAGCTTCGA
CCATAAATGCATCCGCACAGCTCACCTCCTCCGTCGGATTTAGTGTTGCCCTCCACATTC
GAACCCGCTATGCCCGTTAGTCTGGCACCGAGCATTAAGCTGAGGACTACAGATGTCGTA
CACTACAGAGCGCCGAAAACATTACTGTACGGCCTACCTAATGATTCCGAGCAAGCTCCT
GTGAGCAGCCCCCTATTTTCAACCGGAGCTTGTTCTTCCCTTCCTGCATCGAAGCAGGTC
AACCTAGCCTCGACGAAGCACGATCAAACGTGCTTGGGCGGAGGTCATCAAGCCGGGTGG
TCTTAAGCTTATACGGATGCAACTGGTAGAGTGTTAACGGGCGCACCTCGAGGCAGTCCT
CCAGGTCCTTCAGTTCGAAGACATAGGTCGTAGGTGGAAAGGGAGTTTTACCTCTTGTTA
TCATAGGCAGGAGGTTCCCCCATTTCTGGCGGCGCGGAATCTACCCGGTGGTGCTCGAAC
AGATGAGCGCATCAGCGGATGTAGAGAAATTAGGGTGCGC
>Rosalind_2437
GAGTTAGCCAAGAATCCAATCTCTTTGCACAGACCTGTCATGCACATGTACCGCACGCTT
CTTATCACTGCGAAGAAACCTAGATGACCGCGGCTTCTCCAGCGCATGAATCAATTTTTT
AACTTATTCGTCGCAAGGCATTCTGTATGGGCGACTGCGAGTCACGGGTGACGCCTGTTC
TCTGACCCCGTCGCCCCCCGGGCTTGGTCATAGAGCTGCGCGGCGTACTAAAATTGCAGT
GCATGACTCCGAGCCCCGTGGACACGGTATAGATTCAACACGCGGTATACTCATCTGCGG
TCTGCAATCGAAGTAGGGGATGTAGGACCTGAATGATAGCCTGGTGCTCCTTTCACGACT
TATTCTGAACCCACGGAACGAGGCAGTGTCACACCTACGCCGACATACACCTTTTATACC
TTCTAACCGCCTACACTACCAGTTACCAGTTGCCATCAAGGTCCCCTTACAACGTTGTTA
AATGACTCACTATGACGACTGTCCATGCCTCTCTCCTCCGGTCAGTACTATTATCACAGA
TCATTGGTGCTCCAAGAGCAGCGAGTGAGTAGCTTAATCATGTGATACGTAACACGTGTC
GTAGGTAACGCAATGTCGTCTAGAGTTTGCTATGGCAAACATGATAGGGGGGCCCAGACA
TCCGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCA
AAGAAGTGAGACGGTGAATCAGCCACGGGCTAAGGAGCATCCGCGAGCAAGGACTCGGCA
AGGTACACATTAATGGGTGCTTGTCCTCACGTACTGGGAGCAGCGGAACCTTTTTTGGCT
GTACGTTCGGTCTTACACTGTAGGATCGCTCGCGGAAGTGCCAGAACTACACAGCAAGGA
GGGCAACGAGTCTGTTCCATTATAGAACGGGGCGGCTCCTCGCTCCAAGAGCAGTACACC
TTCGCATATCGCATTACAGATGTCGAGATGGCGTGACGGT
>Rosalind_5555
GCTATTAGGATAGCGAGCACAAGTAACTGGACCCTTAAGCCGCAATGGTAGGACTTTCAA
TCCTCAGAGGGCCTCTGCACATGCGCGGGCTAGACTTGACATATTGCCCGAAGGGCCGGT
GATTTATCTCCCGCTATCCTCATGAAAGCCGTGCCACTGCTGTAGCGACGCTCGGATCAA
CATCATTAGGGAAGGTCCCGGACGTACCTAGGGCAGGTGACCGAGGCTCTCTGGACGACC
CGCGAACAATTACCCTAAGGGGTGACATGTTATGAGAAGCGTTCCTCGGGCGAGATTTGC
ATGCTACCTTCTCCCATTACAGAGCCTGGGTCTGAGCAAACGGATTCGTTCTGAGAAAGG
TGTTTGAACAAATAAGAATTTACCTGTAACCTGTTATCAAACTTGCTCCTTAAGCAGGAG
GTATTTTGTGGGTGCTGGCCTTCAAATAGTCTGAACTGCGCGCTACGCGCATCCTTAAGT
CATTATATTCAAGTGGGCAGCTCCTTTAATGAACTGAAATCATCGAGTCCCTCTCCCTCA
CAGTCGCAAACTTCGCCGCATAAGACCGCATGCGCTTCGCGGTACTCCAGTATAATAGTC
TATAGTCGGGCCATACATCCGGACGACACTGTCTAATGGCCGCCCTTATGGTGCAAAACG
AGCGTGCCACCTATAACTAAGCGGATTAAACTAGGATGCGAGCTGGAATAGATTACCCTG
AGAAGTTGCGGCCTATCTGTACCAGCCAGTGTCCGGACGCTCACACCCATATCTTTATAT
CCCTTGTACGCTACATTGAACTAGGGCGTTTAAGATTGTTTACGCTCCTTCCCCCGCCAT
GCCACGAGTGCCGTATGTAACTTTCGTTAATAGAGTATCGTGAGGTATGCCAACGACATT
AAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGA
GACGGTGAATCGCCAAGTTCATTACCGTTTCTTGTTTACT
>Rosalind_9504
AGGAACTTACCCGGGGACGGGGGGGCGCCGTGGCTTGAATAACCTGTGTTGGAACCTCGG
CAGTAGACTATGCCAGTTGGCCCCCCATTACTGATACTGGACATTAAAGGAACTTCTTTC
GAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCAACA
AAACCTCGCACGAGGAAAGGTGCGTCGAAGCTTTGCCGGGGTCTCAGCGCATAGACTCGT
GACACCGGTGGCAGGTCACCTTATCGGGTATTGGCCATCGCTATATTAAAGGGTTCACAG
ACGTGTGCCGGCACCTCTGTCACGTTGCCGGTCGTCGAGACGGAGATTATCGATTAGCTT
TCGCTCGACTGGGACAGCTTTCTTGCTGGTTTAGGACGGTTCGCTGCCCAGTTCGTCACT
TAACGCCTAGTGGGTTTTTTTATGGAGTCAAGTGTTCACATACCTAATGAGCACGGCCGC
TCCTTTAGGACAAGGTCTCGAACTAAAAAACTTCCCGTTTTGAAGTTCTGAGCTTGCCAC
AGCCGGGGCGCATCCAAAGCACGGAAATCGCATGTGATTGAAGCAGGGCTTAGAGCAATA
GAGCCTGGCGCTACCGCCGCCGGCTACAACGGTCTCGCATTTCTCATATGCTGGTTATCT
GGGGGCCTCGGAGGGACCACATAACCCTAGCCGTCAGCATGATCCTCATAGTTGAGGACT
CTCCGTAGCCTTTAATCAAGATACGGGCTTAGGCATTGACCCACTAAGCTTATTGCCACG
CCGTATTGGGTAGTTGGGCAGCCGGACTAGAATCGTTTTTATGGGGCACAAGTCACATAT
CTAATCTGCTCACCGACTGCTTAAGTACGGTTGAGGGGCAAGACGTCGCGTCCTTTAGCC
GGAAGTGACATTTTTGGCGAGAAACTCTCTGACACAAAAATTTTAATTGTGTCGTGGATT
TAACTCGTCGACCGGTGCTCACTGAGATAGCCGGAGCTGG
>Rosalind_1355
CAGGTCAATAGGGTGTGGCCCGCACCAAAGACGTAATGAACGCCACGTTCGCCGGGCAAC
ACGACCTCCCGTTTCCAAAGTTAATAACAATGTTGGCTCAAATTACGCGTTAAACGAATT
ACCGAAATACGTTATGCGCGCGAAGGACGTGTAAAAGCCAAAAGAATCATCCTAACAGAC
ATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAG
TGAGACGGTGAATCCCGCCGAGAATGTTCCCATAGTTAGTCATTTGTAAAGTTAAACCGC
AAAATGAGATGGGACCTTATCTTCAGACTGCGCGGCACCAGAATACTTGACGTGAACCGG
AACTCTCGGAGACCATAGTCTGGCAGTCGAGCGATCTACACTTAGTATATCCCGCAGTCG
GACCCGGTGCCTACCCGGTCCCCTTAGATGGGCACTCGGGTGCGACCGCCAAGCCGGCCA
GAAGCAATGCAATTCAGATCATCACCCACAATGATCGCGGCTTGAAAGTCGAATCGCCGG
ACAGCTAAGCAGAACCGTAAACGCGAAATAGTGCTCTTGTAGGTCACATCGTTAGAGGAA
CAGCCCACCCACGGAACTGATAAATGTATCCGGAGCATAACTGTAGCAACGTGTTTTTAG
CAATTAATTCGCCTGGCGCGCGTTAGAAGGTTTTCGGACCTAGTCGACGAGCACAACCCT
TCTTGACCGATGGACCAGTCAACTGTTTCCCCACCACTACGCCAGCATTCATAATCCTGC
TGAATCACTACCCGCTGGTTTAATCACGCCAAACTCTTACCTGTAGCTTCCCGTCACAGG
GTTTCCGAACGTGAGCAGTTGACCCCTATGCTATCAACCTCAGTTTTACCCGGCGGACCC
CATGGGAGAGCTACCCGGGTGCAACGGAAGTATCTTATAGCTACCTTTCTTGTAGATAAC
AACATTGCTTTATGCCAAATGGTATTTGCTATGCGATCTA
>Rosalind_5113
AAATAGGGATCTCTTAATGGTACTAACGGTGGGGCATGTATTAATCCGATCTTGTGGTCG
GCGTTGTGGCGCCAAAACCCGGAAAGTGGGATTTTACGACTATGAATTGTCTACGGTGTA
CGGGACGGAGAGTTGAGGTGCGGCTAATACACCAGAGAATGTGCTCCTCTATCCGTCATA
GATGACGGCGTATGCGCAGAATCCTCGCGCCCCCGAGTAGAGGCAAAAGCACCTGGACTA
CACGGAAGCCGTGTGCGGGTGCTGAGCACAATGAACAGGAATTAGGTTTGTGCTCAGGCA
AGATGGTCCCCGATGGGTGAGACTGAGGTTGTCACCCCCGCGCCATGCCTAGCTTCGCAC
GGGAGGTAGAGAGATACGGTTTCGCGTAAATGAACGCTATTCGATATCTCTTCGTGGAGT
AGGCCAGGAGAGCCGCTTGACCAGATCCAGTGCGTTGCACTCCCGACATTAAAGGAACTT
CTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAAT
CTCCGAACTGTATCACGCTAAGCGACTGCGAACGATTCGTGCATTTAAGGTGGCATGCAG
TTTGTGCGTACGTACCCTTTAATACGGACGTGACCCCAAACGCGAAGAATAGTTAATCTG
GAGCAAAGCTCTATTGACATGGCTGTTCGGTGAACTGGCGATCTACCCGGAGTCCAGGAT
AACCTTTTAGAATACTCTCCCTGTCGCAGGGACTCACCAATGATTGAACCCGTTAACGGT
CTATTGGAAGATACGGGCTGTTTCAAGCCAGGAGTGCAGGCGTAATAAAAATACGGCGTG
ACGACGGGCCTTCATAGGACTGCGAAGAACGGCTTTAGGAGTCGATCCCGTTGAATGGAG
GGTGACCCAACGTATAGTTCGCCAAGATTGGTGTGCTCACTGCGGATTGGTGTTCACACT
GAATGAGTCAAGAAATCGCGCTAAAAAGTTACTTCTGGAG
>Rosalind_0499
GTTCTTAAAGCAGTCGCCAGATGAACAGCTGGCTAGAGCGAACCTTGGTCCTAGATGCAT
GCATATAGTATTGTAACGGCCGGGCGGTCGAGTTGTGACCTAATTCTACATACAAGCTTA
GCCTTATACGTGCGTGCACCCTCGAGTGCTGGACTCACTTCATTGCTCCCGGGGGCGAAA
TATCAAATCAGTTACGAATTCTTAACGAGCAGTAACTGTTCTGCGGCCAAAGGTCTTCTA
CCATAGGGAGCTGGTGCAGGCTCATGTAGGCCCTCCTTAGCACCGAGATAGATACACGAA
AGGCGGCATTCGCCTCGTGTGATACGAGGTATAGGAACATAACAAGCTCATTAATTGCAC
AGTTGTAAAACCCATACGCTTAGTTTTCCTGGCCTAATATACCTAAAGTCTCCCAGTTAG
GTGTCGTTGTTACGTACCTTTCAGCTCCTTATGGCCGGCCAGAACTGTGTGTCCGGACCA
AAATCACACCCTGAACACATTGACCGCGGCAGAGGTTGTGAGTCATCCTATCGTGTGTGC
GCTTTGCCTCCCAGGGGCAGTATTTACGAAGGGATTATCACGTATTTCCACCTCGGAGGC
CTTCAGTTAGAAAACGTCGATACCAGCAAATTGCTGAGGGGTCAATCATGCCATGGCGCC
CTCGGCCCTTCATGCGAGATTCTGACCTTCTCACGGAAACCCGGCTCCGTACAGTACTGA
ATGCGTCTAATACAGGATCCTCACGTAATACGTATCCCATCGGATCATACAACCTCGTCA
TACGTGGCAAGTGGCAGTTCACTAGTTTACTCGTTAATTCCAAACGGTATGCGTAGACAT
TAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTG
AGACGGTGAATCTACGATCTGTGCTGCTGATACCCGCCGTTAGTTTAGCGAGGACATCTT
TCGCGGTGTCACGCCACGGCGTCGGCGCGCTTATAATTAG
>Rosalind_3559
TCATAACTTTCGCCTCCCGAAGACGGTTGATTATCTTATGTGCTCGTTGCTAATTTGGGC
ACCACAGGGTTCACAAAAAGCGCCGTCTCACGACTACCTCAAGCGCCGTTCACACGTATC
TACCGCCTGCTCGAAAAGCCTTACTGATCGCCAAGATACTTATCAATTATAGATAAAGAC
AAGAGCTGCCCAGGGTGGTTGTCAAAGAGCAGCTTACTTCATTTTCGCACCTCACTGACA
CGGAGCGTTAATGATAAGATTCTCTACCGGACCCTCGAACAATAAAGGAGCCTCCGGGAC
TCATAGGAAGATGTTGCTCGTTTGTATGGTGCTACGATGGGCCGCGCTTGGTATGCGCCT
GCCTAAGGGACATAGTGTCCCGGCTCTTGGACTCGCGCAATTTTGACATTAAAGGAACTT
CTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAAT
CCATGCAGATGGAACCTCGGCGTCATACGTCTGTTTTATCAGACGACTACTCCCTGCCCT
CCAGTGAAGAAAACAGGTAGAGCTGGCCCCTATCCACGTCCTGACCCGTCCCGCCGAGAC
ACAGTATGCACCCGACGTCGGGGGGCGAAAAGAAAAACCATCCGAAGAGCGATGGAGGTC
TCAACCTACCCTTTCATAGGCCACTCGGCCTGCGATGAAGGTGTGCCCGCGTTGGGCGGG
AACCGTTAAGCAGCCTGTACTGGAAGGTGTTCCTACAGCACATGTCAGCTTTACTTAGTT
ATCAGCCGCATAACCGCTTCCTGCCCGGGACCTTAAGGAGCTCTCCGTACGCACCTATGA
TCCGGCCCTTAAAGTTTGTTATCGCCACCAGTGCAAGCTTAACACATCCCTCTGTAGAGT
CTTAGATCTTTGCGATACCCCAAGCGACCGCAAGAGTTCACAACTACGAACGTAAAACGA
CATGTCGCTGTCTAGAGCGGAGAATCTGACTAGGGTTTGT
>Rosalind_6988
GCCCAGGCGCTAATTGGCAATTATGATTGCAAACACAAATCTGTTGAACAGGCCGAACTG
AGTGGATGTTTTGAACTCGGTAGATGGTTCCTCCCATTCTAGACGGGTTACGACACGCCA
GTATCGTTCATATTCGATGTCTCCTCTTACAGCTTCGTGATCGTGACAGAAGCCTGGGAC
GCCGTATGCGTCTAAACCAAAGCAAGCGAATGGACGGTGCTGGAAATAATGGAGCTTCTT
CTTCTCATATTTGTAGTTAGCTCGTGTCTGTTGGCTATTCCAATGCCGAAGTCTTGTCGA
ATCTCATTCTCTAATCGATGCCATATTAGCAGTAAAGGCACTGTCGGGACAATGGGAGGA
TTAGGGTTTAAGATGCTGCGTTTTATCTCAAGTATTTAGCGGTTAGGTGGTTAAATACTA
TTCCCCTTACGTCTACTAGACGCGGAACTTGCGAGGCCTGCGGCGCGAACGAAAGTCGTG
CAGCAGCCTCTCAATGTAACCGGTATGTCCGCGAGCTGATTCCCTGAAGTAAGTGTTGTA
TAGCCCTAGAAGCTGTGCTCTAGTGGCAAGTAAATAAGTGCCTCGAGTAGCTATCGCATT
ATATCTTGGCAGATGTGGGGTCTTCGGACAAGTGAATACACGAGTGTGCGACAGGCCACG
CGAAACGCCTCTTACCAACGCACCATAAGACGCGCAAGAAATGATGTTGTCCGACATTAA
AGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGA
CGGTGAATCACCGTTTCAGTAGCCACTAATCTAATCACGCTAGTAACTACTTTCACGAGA
ACGGACTGATGCTGACTATCGCCGGTAGCTACAATCGTTTGCGGATAATCCTCGACTATC
CTTGTCGGATGATCCGGATGGAGCCGTTGATGATCCGTGGGCGGCGGTGGTAGTACCAGC
GACGACATCCCTCGCCAATTAATTAATTCCTTCTGCCACC
>Rosalind_1273
AGTCGCCGTTCTTGATTAACACCCGCACCGTACCGTTTGCCTATCCAACTATAATATGAT
GAACTTCATCTACGATTTGCCATGACAAATATACCCGCCCTAACCCGGGCCGAAAGCTTG
GATTTCCCCCATCTGCTGAGCAAGGGTACTACATAACTGAAAGGTATTACGGAATCTCGC
GGAACTGAGGGCCTTTTTTTATTCCCCTCTCTGAGATTGACGAAAGTGGTGCGAAAGGCA
GCGTGGACAATTCGGTTGCATCACTTTTTTCTGTCAATCTGACTACGGCATCACCAATCG
CTAGTAGCTTATTGGATATGTTGTGTCTCTTTAGAGGCGCAAATCCAGGTAGTTGGGTAA
AACTTGCCCGGTCGAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGC
TGTGCACCCGCAAAGAAGTGAGACGGTGAATCTAAGAGTCAGTGGGTCCCGTTTCCCCCT
GAAATAAAAGGTGCAACCATTACGCACGAAAGTAAGGGAATAAGCAACCCGAAGAAGGTA
CTTGATCCGTTGTTACGGTGCTCTAATCCGTCGGGGCCGGAACGGTTCGCGTGCTCAAAC
AGGATTTTGACGCTGGGTGGCACGTGCGGCTTTTAATGTTACTACTACATGGTTCGCCGT
ATGGAGTAATGGTCGAGTGTGTGATTAGCAGCAGTTAGGTATAATTCGGTGGATGTCGGC
CCGCGATACGACCACTAGTTTAAAGTTACTTATTGCACACCTGACAAGAATTGTGGTGAG
CACTCAACCCAGGCACATCAAATGCGGCGCTACGATAGCACTTAATTTCCTGCGCCATCG
TGAGCCAGGTAAAAGGACACAATCGTCCGGTAAATTAATTACGCATACTACGTTATTATT
GTGCGTACGGCTGGGATGTCGGCGTATCACCGACTGTACCTACTTCCGCATCGGCAAAGG
AATGTCTGCGTCTGTTCCCGGACCGCTCTTGTAATCGAGG
>Rosalind_3167
TCAGGCATAAAAAGCAGCCGGGATGCCGCTAAGGGGACATCCTGAGCCCCCCTAGCGCTC
TTGCCTGCGACTTTAATTGATACGCTCTGCCTTATCTTCTCTTCTGGGTGAAAACTGTGT
CTGTGTACTAATTGTCTGCGGAGGCCCGGTGCACGGCAAAGCGGTGTGCGGTATTTGCGA
GGTGATGACCTGAATGCCACCTCCCAGTGACTGAAGAGCCTTAACTTGCACACGTCTCGG
CCGTTTGCGTTAACTTCGTACGCGAAGGTTGAGTCGACTACAAGGAGTGACTATGCCAAT
CACGTCGAGCCCTCCCCACAAAATAGTCAGGTTCTGTGCTACATCTGAGGAGCACTTCTA
ATTTGGAGCTGAACAATATACGGTACGTAGTCCGTCTACGAATACACCTCTATTTTACGT
CTAGCAGGGAAGTTGAATATCGATAGCGGCCGCTATGAGCAATTGATCAAGTAGCAGACG
ATAACATATGGTCCTCTTACCAACCTGACACCTCGCCCTACGCTGCGAGACTCAGGTCCC
ACGCCGTGCTATTATTGTGTCGACATGTCACAGTGTGGTCAGAATGCAGTAATGACCGTG
CAGATACAATAGACAATTCAATCATTGATTGACGGTTGCGATTACACTGCGTCGCTGCGT
AGCTTAGGCCCGCGGCAACAAATTAGGGTCAAGCGTGGCTCATTCGGGATCGAAGTCAAC
CTACATCAGGTAATGGTTTTGCTCCACCGAAACTGTAGGCGCGTAGATCCGCCAGAAATT
GAACTTGAGATACGAGCGGCGTAGAGAGCCCGTGACGCATCTCCCGGGTTTAATGTCGCA
CGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAA
GAAGTGAGACGGTGAATCTCCCGCTGCTGTGGGCGGATACCCCAATATTCTACCCGCCAG
GCTCGGCGGACGACGTGCTAGAAGTGTCGTCTACCAACAA
>Rosalind_4491
ATGTTTTAGATCGTGTTCTCAGACAGACGGGTGTGGTCGACACTACGGTCTTGACGTCAT
ATTGTGATATATGTGCGTCTCGGTCCACGCCGATTTATTGGATTCTCGTATATTACAGCG
CAAGTCCATTGACTGACGTATGGGAGCATCTAACATCAAGAAATTGAAGAGTAAGGTACT
AGAACGGAGTCGAGTAGACTGAATTTTCGATTACTAGTGCAGATAGTCAGCAGTCACTGC
ATAAGGCATTGAAGTTGATTCCGAATGGGCTCACGGTAGCGCCCCTCATGTTGTTAACCA
CCCTCCCCCGAGTAGTGGCCCCTGGCTTATACTCAGGTGCATCTCAAAAAGCCTGACCGG
TAAATTTAGGAAACGGAGCACTCCCGAATGAGCCTAGAGTGTTTTTAAGAACTTCCTCAG
AGCAACACAACACGCCCCGTATCCAGTCATATAGTGTGACAGGGTCTCACCCAGTACAAT
TTGGTCTACCTCGGGACATAGAAAAGACCGAGCGTTCGTCAAACACCGCAATGCCTACAA
TGTTCCAGTGTACCGCGATCCTTATAGATACTCTACGACGGAAGGGCTTCTAACAATCCA
CGTCGCTAGGGCCCAGGCCCGGCCTGAATCCTTATCCCAGAGCAAATTCCTCGAATGGTG
ATTTGGGCATTGCGAATCGGACCAACCCGCCGCAAAGCTTTCGTGGACCAGATTGGCAAA
AGACTGAGTTATTCCTAGCAAACCCCAGTCGGATCAAATGGAAATGTAAGCCCGGGTTAG
GACTCTGGTTTAAGTGCTCTGGGAGATGACGCAGGCGTAACATGGCCACCTTTTTAAGAG
CGTCTACGTTACCTAAAGATTGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAG
TGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCGTGGACCTGGTGGAAAGACCC
ATCGCTAAGTTTCGTGAGGGTTCGTCTTCGAGACTCGGAC
>Rosalind_0292
CACATTAGTCCTAGGAGATAGCATGCCATCTGAGGTAGGTTGCGGTTACTACTTAGTCCA
CCGGTTTTCTGGGCCGGCCCTAGAGCGATGGCAGTACGGGCCTAGACTTCCGAAAGGAAG
TCAGGTATACGATGTGCTTGTCCTCGGTCCACGTCCCTGATGCTTGGAGGGAGCAGCCTC
TGCGGCGTTTGAAGCTACTACCATCGCGGTAATTGGTAAGCACAATGATCTAACAGAACT
TGCGCATCACTGGCTGGTAGGACTCCGTTTACATAAGCCCTAGCACTAATCAGCGCGCAT
CACTGCTCCTGCGCCCCGAACTTTTTCATAGGTTCCTTACGCCACTACTCTCTTCCTAGC
ATTAAAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCC
GCAAAGAAGTGAGACGGTGAATCTGGGTATACATAACACTAGGTCGAACCGTCCATAGGT
GTATCGGCAAAGACCCGCCCGCTCCGGGGACATATGCTGTTACCACCGACCGTAGAAGAA
AATATTAAGAATCAATTACATTCCTAAAGTCCGACACTACACCGAACCACTTAATATAAG
CTCGCACGGTAACTCAAACGCCGGTCGTCTTCCTTTCGGGGCTTAGATTACGCTAAAAGG
CTCAAAATACCTTATGGCATGCAAGGAAATGATGGACACGTCCACCCGGTCTCCCTGTTC
TACGAAATAAGGGGTAAGACGACGTCTCGGAGGGTGCCCGCCCCTTCCCCTATTCCGGGG
CGCTTCGCGGTAGTGTAACCGTACGTGAGTTCTTGCAGTCGCAAGCGGACGTCATTACTC
TCCAGGGCGGGACCACGCCTCCACCGTTGAACCTTCTGTGGAAGTGGAGCCCAATATGAG
GCGTGGGCGGGGCCGCTATAGATTACGTGTAGCCCGCAACGACAACTATCCATATACACG
TGTCCTTACGTATACCAGAGAGAAGGATGTCTATCTCGCA
>Rosalind_3670
GTGTACACGCTTATAGACTCCTCCGTGAAAAAAGCATAAAGTACAGGATCAATAGATAGA
TTGAGGGCGTTAATGAAATCTTCAGACCGGGCTGCTTATCCACAAGCCTCGATAATGGAT
ACGCAACAGTTTTCGTGGGTTGAAGGATAGGCGAAGGACCAAGCTCAAAATCGGGTGAAA
GCCACCTGCTTATCATAACTTGAGAAAATATAGCGAGCACTTGCATATAGTGGGCCCAGC
TACCTTTAAAAACACATGGTTTATAAAATCTCGCATTCATCTGAGTGTTTGCCATGGGAT
GCCAGAAGTGCCGATTGGGCATTGCCCAATCGCTGGGTGAAGGCAGAGATGGGGCGCCTG
CGATGTACGAGAAACCTATATTTGGTAATATATGTTACCTCTGCTCTATTCTGCTTCTAC
ACTGGGTGTTGCTAGGAACGATGATATGTGAGTCCCCTTAACTCTGCCCCGGTCGCTTGA
ATAAACTCCCTAAGACGGACCTGCGTAGCTTACCTGTGCTAATTGAAGGGAAGTTGCTTA
ATTGATCTTAGGACAGTTTAAGTGCAGGACAGGGGAATAGTATGAAGACCTATGTTACAG
GTCCGATGTCCAGTATAAAGGCCTGAGCCCGCGCCCCAGGCGTCATGGTAGAAGGCGTTA
TCCCGACTCTTAGCTTAATTGGGCTCGTTGCGTTTGACATTAAAGGAACTTCTTTCGAGA
CATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGACATGCG
ATTCGAGCCGACAGAGGAGCCTATGACGGGAAACTATCTTACAATCCTTAATGCAGTAAT
CCCTTAGTGGTTATTTAGGGAATCAGTACAGACTTTGTAATAACTTTTCTAATCCTAACG
ATCACCTAGACAGTGTTTGCACAAGCTTTTCCTGGGACATTCAAAGTTCAGCGGCCTGCG
TCACAGAAGGCCATCAGGGGCTTTATTGTAACGTCTAGGA
>Rosalind_5853
CGCCCTTTGGGCAGTTCGAATTAAAAACGATTGCTCTGCCTCCCTCGCAAGGGAACCTAG
AGCGACTGCTCACTCAAAGGATTCGTCGCAATAGGAGTTTGCTCAGGCTGAACAAGCGGG
CTAACGATCTTGTTGGGATAGACGGCAGGAGCAATTCGACGATACTTTATCCTTGCTGCT
TCCGGACGCCCATTTCGATGAATAGGGACCAAAGTATCTTTATAGCTTAGTAGCGACATT
AAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGA
GACGGTGAATCCATACGATGACGCAAATGCGCGGGTATCGTGCGAGCGAATTGTCTCTTA
ACACATACGCTCACAAAGAATGGGTACTAGCGCTCGGCTACTGTTGACGAATTAACTGTG
TTTTAGCAGACAGACTGTGATCCTTAATTACTATTTTGGACACTATGTGCTCAGCGTGAC
TCTTCTTCACTGATGAGCACTCGTGAGAGTGGAGCCCTGGCGCTATTAGTTCCGCATCCC
ACAAGGAAATCATTATGACTGGGAAGGGCCCGATTTTATTGTCTGGGGACAGAGCGTAGG
TGTATAGTAAAGCTAATCATTACAGTTCCAAAACTGTGTTTACTGGACACCTACGAGCTG
ACTAAGTCCTGCCTTCCCACGGCTTTGTACCGCCGAGGTTATTCGCACGTAGTAACTACC
AAGGGCTATCGTCCGTGAGTACGTATGTCTTAGAAGCGCGTTTCAAGAATACGGTTAACA
CTCAATATCAGTTGCGCCCACCCGGCTAAGTCATCTTCAAAGACCCAGTGCTATACTGAC
CGTGACAAAATGGCTACTCATTATTTTACATAGTGAAAGTCCAACCCCATCTAACTCACG
TGCTCAGGTTGGGTACTTTTCAGGGTTATGTACGCATCCCAAAGTAACTCTATAATCATC
ATTGCGTAGAAGTAAACATGGGACTAAATAGTCTCTTGCC
>Rosalind_8594
GATCTTAATGTCTGTGTCTCCCGGAGATCGCGAGCTTTTCATTTACATTTATCGCAGTAT
CGGGCCAGCACAACATTGCTCATTTACTAGGAAACCCAAACTAGGGGGTAGATACGCTGC
TGATGATGCACGGAGCTCTGAATTAGACCCTGGAATGCTCTTGGGTTCAGTAGGGGGACT
TTTATTGCGCTCTCAGGCAAACCTTCTACGAAACTTGCGCCGGCAGGTCTCTGAAAGTTG
TACTCATATTATAGGCGAAACACGAAAACACCCTGAATTGCTTGTGCTCAGCGGAGTGAT
CCATATTGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACC
CGCAAAGAAGTGAGACGGTGAATCTGGAAATTCAAGCTATGATGCTCGAGCTGCCCCGGG
CACTAACTTTTGCGCTCCAGCTGAAGTACTCGATACAATGCGGCTCTAGCTCGGTCGTGG
ACTGCTCTTTTGATAAAACCTGGTAGCACCATCAGGGGCTCTTTCTCCCCCCTGAGCCGA
CGCGCCCGGTTGCCGGCCTGACGGCAGTCGGGGGAGTGGCAATCTAGCCTTTCGAGCACC
CCGAGATTCGTGTTTATAACCAAGCGGATACGTCCTGAAAAATCGTAAGTAACACGCTTA
GCGAGTTCTCCCCTGAGGGGTATGTCCGGGTCTAAAAGTATAGCGAAGTAATGGACGCTA
AAATTTGATTCGCCGCCCCTCAATCCCGCGAGTGATATCCCGTCGAAAGCTAAAGCCACA
CCGATCGCGCTCAGCTAATCTCTGGCAGACTGAACGACCCCTTTCTACCTGGCCCTTAAT
ACACTGAGCTCCCAGTAGCAGGTATTTAGCCAGTTCAGTATGGGTAACGTGACAGGATTG
CCCATCGAGCTTGGAATCGGCCATTCATAAACGTGACTGTCCTCAGTCACAACTGAATGG
TTGCGGATCAATGAGCGCTCAGTCGAACGGTCGAATAGAC
>Rosalind_4469
GCCGACACGTTTCGACGTCGTGATCCATTTGTAATTACCGGCACGAGGCATCCGAGTGCC
TTGTATAACACGGTGCCCGGCATGCTGTATGTCACAACTTTGTGCCTTGAAGATCGGCAT
GGTTCCAGAACGACGGGGAATGATTACGGCATGGGGCCCACAGACCGGAGTGTCAGTGCA
GACATGATATATCGGATAAGTCGAGGCACGCTCTATTCAAAAGCGGCCACTCAGAATGCC
AATCGATAGGAATTGTGTCGGATAACTCGCTCGAAGCAATTTTGGACTATGTACAGGACG
AGCATGCTCGCTGAGAATCACTAGACTCTCCGTGACGATGCACGCTAATGGGGGTATAAC
TCGTTGTCTAGACCATGGGTATGGAGCTGAAGTTGCCCGGCGTAGCATTACAGGTCGAAC
CCGGCCACCGTCATCTACTACGTCACTTATCTGCTTGAACCGATTAAGGGCTACATTACG
TGTGCTCGCTTTCCTAATGCAGACCAGTACTGGAGGTCGAAGGAGAACATGGTTAAGTTA
AAGCATCGTCAAGAGAATTCAGGGGTTAATATGGCTGAAGTGCAGCCTACCAGCTTGGTT
ATTACCATTCGTATTGGCGACGGAACTACGTAGTGGGATACGCATGTGCGCACACCAAGG
GACCCGTTTGGGTGAATTTTGCGCCTCGCACTTATACGCTATTAAGTTCTGGAGTCACGC
CCCACTGCTTCCCGAACGACGCCTACCAACACGGTAGCATGGGCGAACTGAGCCTATCCG
CGTAGTAGCTCTGTCTGAGCACCGGAGAAATCTAGCCGTTATTGCAGTCGCAGATCGTAC
GCTTAGGTTGTCAGACCTTTATCCAACTCTTCTGAAAAAAGACATTAAAGGAACTTCTTT
CGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTAA
CTGCCCTGCTGTACTATATGGCTCGCTATGGCTGTTTACA
>Rosalind_8480
TATGGAAGTAGGACTGTTTGACACGGCCCTTATGCGCGTTCGAGGATAAATGACCTCGAC
CTGACACCCCAGCGCCGGTTATTACTGTTTTAGGTGTATATTCACTGATTGAGTCTGGTG
CTGCCATGTAGAGTAGTTCAGATTGGAACGTCCGCTTCGAAAGCAACACCTATAGCTAAA
ACTCACAGAATAGTTGTGATGTACGGCGCATATTCCGGGGTTAGTTATCCACAGTGTTCT
AGGTCTCAACGGCGTCTCTTCTCTCAGGACCAGTGTTTGTGCTGCGCCCTGCTCAAGGGT
AAAGGGACTTTCGTTCGATTGACCGGAGAGAGTGCATGGCGCAAGAAAATGGCACGATGA
GTGGCCCCTTTTTATTCAATACCGAGTCGCGGGTTCTTTGGTCTCGAGTAAGCGACATAC
GCATAAGCGACTAAAGCTGCGCGGTGGTCGTACCTGGAGGCGAGTGAATCATAGAGCTTC
GTTTTGTAGTGGTGTTCTCATGATGCATCGATCTGTCCCACCAACACTTCACGTTCCGGC
GAGCGTCAGTAGAAAAGTGATTTCCTTGCATTACGCCATGTTCGGTCCACTCGCGGTGTG
ATTCCAAACTAGGTAACGTGATGTACGTGACATTAAAGGAACTTCTTTCGAGACATAAAC
CATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTATGTCTTAAGCGGA
GCCCCACAGTCCACTATGTGTGCAGGCCTGCTTGGCGGGATTCATGATGATACCATTCGG
CGGCCGGTTCGCACCGAGCCAAAAAGCAATGACAGACATGGAGGCTCGCTCGAGGGTCGA
GATGACGTCATGCCTGGGCTAAATTTTGCATCCGGGCGAGATGCCGCGTGCAGACCGAAT
GACGAAGCCGGACCTTGTGACCTAGTTCCATTCATTTCTAGAGTTCGCAGACCTGCAAGA
CGTCTTCTCTCATGGAGGATAGAGGCAACTTAAATCGTGC
>Rosalind_8137
ACGCTCTATTCGTAGGTGGTATACAACTTCTCCAAGAGATTATGGTATCGCTCATTGATT
TTACGAGGAGGTCCTCACCTTCTCTAAAATAATGGGGGGTCGAAGCTAGTTGTACAGTTC
AGGCCGTCGGGGTTCTGGCTTTACAGGCAACCGCCGACATTAAAGGAACTTCTTTCGAGA
CATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCATTTCACG
TCGACAGGAACGTCTCTATCAATTCGAAGACGTTCTGGGTTTATTTTTGGCAGAATAGGC
CTACACAGCGGCTAGCTGGCACAGCCGCTGGTTACTTGGCTACATCTCTCTACTGCCACT
TCTAGCCTGCAACCATTGATATAAAATATTGATTTGATGTATATTGGACCGAGTTTAATA
GAGATTAGATAATTCTCTTATTAAGTAGCGCCTCTGCCCCAACATACCTGTCTGAGAATT
CGTCATGCACTATACTTACAATCCTTCTCTCTTCATCCATGATTTTACGTGAGTGAATTT
ACCGCGGTGGTGGGATACTTGAGGTTCCAGGTTCTGAGGCCGAGCCGATTCTGCGCGGAC
GTCCGCTGATCGCGAGTCAGTGGTACACTCTATCATGTCGAACTATTCACCCCGTTTGGG
CCTGTTAGTAACCCTCCCGGACCTTCCCTCAGATAGACGGCTACATTGCTAGCTTTCAAG
TCGTCCGCGCGCTTACCTCTACGTAACCGTATGGTAGTTGTAGCCGGCAGAGGATCCCTC
CCTATTACCTCCCTATCTCTAACGACTGGCGAGAGATGCACGCCGAGCGAAATGACCGTG
TACAAGTAAACGTGCTGAGTCTCGGTCTCTGAGCCTAGATAGATACGGCGTCAGTACAGA
CGGTCCGTCCGTCCATCGATGGGACGCACCCAACTCCAATAGGTGTTTCCGCCGACATGG
GTTACGGGAAAGCTTAGCGATCGTGTGGGATAGTTAAAGG
>Rosalind_5957
AGGAGCATTTTACCTTAACTGCAACCTGGTCTGTGCCCAGGTAAAACCTGATTGTGGGTA
GCATTAAGGTAGGCATCCTCTCCTGTAACAGTAGACGCAGGAACTTTTGCATGCGTAAAC
GTCGACGCGCAGTCCAATACGCCGTAGGGTCGCACACTGGCTTATATGTCTTGGCCCGCC
TGCACAGAGGTAGAGCTTCGATTACTATGGCGTCTTTCGCACAGTTTTCTACACATACTA
CCAACATTCGTTACATAGTGATGCGCTCCGTCCTCATCCAAAAGTATAGATGCACCCTTA
TGAAGGGGTGTCTCGTAGTATGGTTTACAGATCACCGCTGTTACCCGGCGTGTTCGACCA
CAGTATCGTGCCGTTGTGGATGCATATGGCCCAAAAGAACATATCGTAAGAGACGAACTC
CTAGATTTTTTAGGGCAAACGAGTGGGCAGAGGGGCAAGCTGTGAATTCCGCAGCAACCG
TACTTCATGCCCCTGGGGCCAGGCAGCGTTCATTACGTGATTGAACCCCTGGGCTTCACA
TAGTCCGAAAAATTTGTAGCGGACCAGCTTGTCACCTGACTGTGCTCGACATTAAAGGAA
CTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTG
AATCTCCGCGTGCCAGTCGTTGGTTATCAATCCGAAAACCGTATTTGGGGAACATAGACT
GGGTAAAATCCACTTTGAATTGCTTAGACCCCAACGTGGTGGTATGGCACTTTGTGCTTC
CGTTGGTGATGACCGGCCTACCTGTTTCTGTACATAATTCAGCGGCACACTCGGTACAAT
ATTATTGAGGGAACTAAGTTATCTGTTCTACCACCTTACTCTTGTATGCGAGAAGAGTGT
CTGTTAACGTCCTTGCACGCGCTTCGCCAGCCCAAGAGATTTGACGTATCCGGCACGTTT
GGCAACGTACGTTGTCCGACAGAACTCCCTGAAGAATTTA
>Rosalind_0370
TTATTTGATGCTACATGACGGAACGATGGAGTTGCGCACGCGGCATTGCAACGTTTTCAT
CACAGATTGCCAACCATTGAATTTTAACGGTCATATTCATCTTCGCTTAGTAGCTGGAAG
TTTTTAGCCTCTTCGGCCGCAACCCTAGCCTGCGCATTATCTATTCTCTACTGCACGTCT
CAGGCGGGCCCAGTCTCTGCGATTAACAATACAACTAGTTGACTTTTTACCGGAGTCCGT
TAGTTATCTGGCTCAGTAAGCATGTCGCGAACCCTCGTCGGATCTCAGAAGTACAAAGGG
CAGTGGTGTATTGTTGTAACTTGTCGTTGCGTTCAACGTAATTGCTTCGGGGACCCCTTT
GACCTTAACATTTCTGCGGGAAATATGGCACATGTTCTGTGTCAAAGCGACATTAAAGGA
ACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGT
GAATCTTCGTCCTGGACGCTTCCTTCAGCGCCTGGTTGAGGGTCGTCACGTAGACTCTAG
TACCTTAAGCGCCTGTATACGTTGACCACATCCCCGACACAGACCCGGAGTGCTGTATTA
TACATTATCTTCCGAATGCGAGGCTTTATAGGCCAATCTTGGACGCACTCCTCCCCTCAT
AGCTAGTTGGAGCTAGGCACCTAGATCGCCAGCTCGGTATGAAGTACTGACTTTCATTGA
GGGACATCGCGAGGTCCACTGAGGGTCTCGTGTTCCGCGCCCGAGTGTCTGTCTTTACTT
TTAAATGTAGGTTTAGAGTGCTAGATAACTATCCACCCTTACCTCTCTCAAGCAGGACTC
TCGTAACACCCGTTAAGCCCATTGTCAACCTTACGGTCAATGGCCGCTCTATGAGTGGTA
AAATGGGAGAGAGGCTACGAGCATTGACCCTGAAATGGCCCTAGTACTCCCTGTCGGATG
GACACCGCTGTCTCCGGATGGAAACTCATACAGTTTCGCT
>Rosalind_7980
AGCCAGTCTACTTGTTACCTAATGCTCGCGTGTTCGGTTTGACGTTGTGGCTGCGTGTCG
ACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGA
AGTGAGACGGTGAATCGCCTCGGCGAATTTGGTGGTGCCGCACGGCAAGGGACCCATATT
CAACATGGGCCGGTTAGTCTGACTCACGGCTAAGGTATCTTATGGATGTCGGAGCTATAC
CTACCTACTGCTCGCGATTCGTCATCATCCCTAATTATGCGCTGGCTGTGCGCTAATAAT
TGCAAATCCTCATGCGAAGCACGATTCTGGGGGTTCAGACTTCTATTTCACAAATGCGAC
AATCTCTATACGTGTTCCCGATGTAGGCTCGACCGTTCCGGTGCCCTTCGTAATGGCATG
GACACTTTGTGCATATCCGGCTCGTTTGCCATTCCCAGCGGATCATTGACCGGGTCTCAC
GATCACCGAGGTTGATGCGGTTCTATCATTAGTTTAGCATTAAAGGGCAGGGATCCTCAA
TTCCGGGTCCGTAACAACGTCGCATAAGCGTTGACTGCGAGGAAGACCACGTTCCGGTGC
CGAAGTGCTGTCAATTCGTCTTTACTCTTTATCACCATGGCTTCTATACTCCCTGATCAC
GATCCTCATCCTAGCCCCCCCACGAGAGGATGGACATTGCCACCCATTACTCACTAAGGA
GCACCGGAAATAGGTGAACCTCCGCGATCTCGTCCAGAGACCGTAGCCTTCTTTGAGCAC
CCTGGGAAGAATCCTATGGCTAGTCTAGCTCAGAACAATTTCTTCCGATTGTACCTTCGC
CGTTGAGCTCCAATTGTTAATAGGTAACGTCCCTGAGTCTCAACAGGCATCAACGATCCT
CGGCCTACTTGACACGCTCATGTTTTCTACGTAAATCACGTCTACACCTCTGCACAAGGT
TGTCTGGGTCCATTACTACAAATACTGTAAAATGCGACCC
>Rosalind_3796
ACGGTAGGCTGAAGTGAATCTGCGGAGCTGGAGTCCAGCCCCAGACTCCATTACGTCTCA
TATCACTAACTTTAGGAACTTACAAGGTGCTTCGACGACGCGTAGCCGAGCGCTCGGTGT
TTATGCGAACGAGGAAAGCGGCGTTGGGGCTGTTTGTATACGTTATCTATTGGTATTCTG
GAGAGTGTCGGCTAGCGCAAGACTATCTAGCTTCACACGCATCACTGTCAACAACTTACA
CTTTCCTATGAAGGCACCCGGAGTGTCGAAGTGCCGACAGATATCGAAAGAGACTCGCGC
TCTAGACACGCCAGCACATCCTTGTAGCTCACTTTCTATATTGCGGGGCATATTCGCCCG
GTTACATAAGAAGCTGGAGGTTCGCTTACTTCCCCGCCAGGCGCTTAGTGAGCAGAGAGC
ATTAGACCACCGTAAACGCCTCCCTTGTCAGTGGGTCCCCGGCGACATATCTCTATTGAA
ATTTCAAGCGTACCAAGTGTATCGATCGGAGGATCACAGGCACTACCACTTAATCTTGGT
TGACAAGAGAACTGCAAGTTGGTCAATCAGTGTTCATGTGAGATGCTTAAGGCAGCATCA
TTTAAGCGTGGCAGGGGATACTAGCGCCTTCAGTTATAGAGCCGCGCCCTTCGAGAGTGT
ATTGTGCTGGCCGCTTCTTATTGAACCTACCCAAAGGGCTGACGCGGGAGATACTGGCCG
CCACGCCATGAAAATCACGGCGATCCCGAAGTGATTGACTAGGATTAGGAGTTATTGGTC
GCTAGCGTATAGGCCCACGAACGAGGCCTATCTCAGGTTCCCTAAGTACTTACCCCGCAG
TCCAAGACCGTATTCCTCCAACTCGCGGACATTAAAGGAACTTCTTTCGAGACATAAACC
ATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCCGCGTGAGATGCGAT
TGGGACTGGCCGTGCACCCGGCCTTGACCCATTACGAGTG
>Rosalind_0718
TTGGTTATAGGAGTTTTCAGATCTACGCCCGTTTATGTCTCAAAGTATGACATTAAAGGA
ACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGT
GAATCAGGACCTTCAATGCAGGCGCCCTGACACCCGACGCCTAGTGAGTGTTCCCGGAGA
GATCTTACCTTAGTGTCACGCCCCCCCCCGGGTGCAAGAGGCTTGTTAATCCCACGGTGT
CGCGCGCTACCGGCCAGCTCGGATTTAACGTGGAGTCCGGCCTGGCACGAACCTGGTGAA
TCCTAGTTCCCGAGATGGCCCAGGCGGGCTCCTACGACACTGTCAATCACCACAATTCGA
TTATAAAGCACCCGAGACGAGGGAATACAGTTGATTGTGATATACCCTGCCAAAACGAGC
CCCCCAAGGACAGAGGTTGTCGACCTCAACATAAATAGTATGCTCCAGGCGGCCGCAGGC
GCCGGGTGCCAACGTTATCGAGTTTGCCCATTTTTATTCTTGTGCCCCAACCCCGTTGGC
ACGAGGCCAAAATACTGGAGGATTCGGTGCGTAGTGCGGGGGAAACATACGTCGCCCGTG
CGGACTCCCGTATTAGCACGCAATTGGCGAACTGGTCATGTTATCTCTTAAATCACAAAT
GAATCTTACCGATGATATCCGGGAAAGTGACATCCCCAGGTTGAGGCCCTTCCGACGACA
AGACGCCGACTCGCTGTCATGAAAAGGAACGGGCTGTTAGGTGACCCTGGCTCTCTATTA
GCAGGTAGCGAATGGCAACGTTGCCGATGGTCCATGTCAAGTTGGGGCCTCGCATATCCA
TAGCCTCAAGTTAGATTGAGAGAAGATTTCGACAACCATCCTAGTACTATGATAAAAGCT
GGAAAGGATTGAGACTGTAGATAACTCAAGCACTGAGCTCTTCCGAGAGCAACATTACCG
AACCGGCGCAGATGAGTAGGGGAGCTGCGCCCCGCAGTAG
>Rosalind_7897
GAGATGTTGATTAGGATTCTTGCCCCCCGAAATCTGCCGGGCCTACTACACATCTCGTGT
TATCATTTATTGAACTAAGTGCGTTTCGCCAAGTACCTACGACCTTGTATTCGCTTGAAT
CGAAGTTTCTGTCGGGTTATTCTACCGAACTTGGGAACGAACTGTCGTCCTCCAGGGAAT
ACCGACACCCGCCTCAGTTCTAGCTTAACGCGACGGAATCGTTTCGGTGTCCTCACTTTT
TGGCAACTGGGACAGTAACAATTGTAACAGACTTTGTACTAGACACTCCAGCGGAGCCTC
CAAGCGGGCCTCGAAAGGCGAACCCTAATAGAAAGGCCTGGCTGTCTGCGTAAACTGCGA
GCCTCGATCTTGTCCAAAGGACAAAACCGGGTATCTACGTGGAGACTGTGGAGGAAGGTT
TGGTGCGAACAAATCGACAATATTTCCCTCAGTCCGATTCCACTGGATTCGATTTCCCGT
CCGAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGC
AAAGAAGTGAGACGGTGAATCTAGCTCTCGTTACAAACCGTCGACACCTGCAAGTTACGG
GGTCTTCAGTAAAATATAGCGTTTCGCCTTGTCCGTGTCGTCTTGGCTATAGTGCAGATA
CCCGCAACTGTAGTCAAAATCGATACTCGGCCGCAATCCATCAGACCGAACGGGGTTCGA
AAGAACGTAGAACACCAGTGATCTCACTAATCTTTACCCGCACAACACCTAAGATTCAGC
GGCGTACTAACCGCCTAAGTGCGGAGAATATCGCAGTCACCCCCCATGTGAAGAATTATA
TTACACCATTAAGCCCGGTTCCAGTTGCCCCGGCCTGAGACTGCGGTTATAATAATCTAA
TCGATCATTGCGGAGGGGGATTGAGTTATGTATACAGGGTTCCGTGTATACTCGGATGCG
AGATATGCTATGGTAGGGAAATGCAGCGTCATACTATCTG
>Rosalind_7963
AGTGAGCGGCACCTTGTCATTGTGGTACGAAGAATAGGAATGCGTACGTAACGGGAAGCT
CACTTATAAAGCCCACTCGGCTGGCCTCACCGACGTTCCAAAGCGTTACTGTTAAACCTG
CTCATCCACGTGTGCTTGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGG
TGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCGATCCTGTCAGAGCCAATTGATAG
CAAACGCTATCTAGGGTACACTCCCGAACCAAATTGCGAGCTGACCGGGAGGAACGATCT
TTATTGCATCAATGTCACTCAACAAAGCGAGCCGGCCCCGCGTTCCCGTGCCACAGTCCG
TGAAAGCCTTTATAACAGCGGTCCAGCCAATGAACGTCACGTTGGCACAAAGGGACATCG
TAGGTCATTACAACGGGTGACAGCCCTAAAGCCACTTCCTCGGTTATGGTCGGGCAAACA
TACCATTGATCCCACCAAAACGGCTAGCTACTGACGTTTTCAGGAGTTGCATAAATTCGG
CACTAAGGTCGCACTATGTATGGCATTCGCTAGGCCGTCGTTCAAATAGCGGCCTTTGTG
AAGGACCGACTATCACCACCGGCCTCTGGGGATATGTGGGGCAAAAGACACTATACTTTA
TATTCATTCACCTCATGTTCGCTAAAGTCCCACAGGGATCGACTAACCCAGTTCCGGTAA
GGTAATTAAAGTGCAAACCAAACTCCTAACATGTCATCCGAAACCTGCTTTTAGGGTCGA
CTGATGGCGCCCAGCATTGTTCTCGTCCCTGTCGGGAGCCTTGTTCGTTCGGCCCGATCG
TATGTGAGCCAAAGCCTACTCATTTGTTGCGAAATCTCGCAAATACTATAGCGATGGACC
GTGCCCATAGAATCGGAGCGAGGCGTGCGTTGAGTCCCCTCCCTGCGGGATCAGCATGAC
ATTGGAGGCAGCAAACTCAGCGAAGCGACACGGCAATGCG
>Rosalind_0514
TACAGGTTTAGAGGCGAACGACGACAAGGTCGCGCCCATCCCCGAAGATGAAAATCACCG
AACTTCGGCGAATGAGTGGCATGAGGAAATCCAGTGGGCTGTTCAACTTTGCTCATTTCC
TGTTGTGACCGCAGATACTACACGCTCTGACGCAATGGGAGTTGAAGCACACCGAAAAAC
CACATTGAGATTCGGACAACAGGGGTATGCATACAGCGCTCCACCTGACCGCTCGCCCGA
GGCTGGCCAATCGGCGGGGGCCAAGCGTGATAATTCGACGTGGCCCCCTTAGGATCGAGG
GTGTCACCTCACAAACCTTTCCGAACCCACCAGGTCTGTGACATTAAAGGAACTTCTTTC
GAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCACAT
TTCCTAATCCGGTTTAAACCCTACCTTTTCCGGCCCCCTCATTATGTGTGCAACCGGTGT
ACATGGGTTGGGCGGAAAATCTTAGAAAGATTTAGACTGTTTCGGGGTGGGTCGGGGCTT
GTGTTTGTAGAACCTCTATGCTCCTAATTTAGCGATTAGAACATTTATTGTCTGCTAAGG
TTTCTTCGTAATTGGCGATTCCCGTGATTGAGGAGCTCCGGTGGGATATCGTAGTGCGAG
GCCTTCATTACCACTTTCCTACAAACTAATCGCGCAAAAGGTCAGACTCCTATAGGATCT
TGCGTGTTTCATATCAGCTAGACCAGCAACCTGCCTTCATGAGGAATCTTTATACCTTTA
GCAGCTTAATCCGGCCGAGCGCGGCTCGCGGACGAGTTATAAAAGCGTTTGGGGAGGGAT
CGCCTTTTTAAATCGGTGACGAAAGCTCGACCCATGGAGTGACGCTGGAGAAGACATAGG
TCCAGCGCCTCTATCTAAAGCGGATGGCCCCGTCTTCACTCTATGCGCTTTTTTATTACT
CGCTAATGGAGGGTGTAGAATCGTGTGCTACTCACGTCGA
>Rosalind_2370
AGGATTTCATCTGTGTTAACTACAAGGGCGTTCGCCTGGATTGACAAATACTGGGTGAGG
GCAACTAAGGAAAAGTCGGACACCCCGGAGGATATGACTCCTTGTAGCGAGCGGTTCATG
TATCCTAGTCCAAAATCTTTTAACGAACGTGTTGGGAACCGCACCGACCCCCTACAACCC
TACGGCTTTCCACCCTCCTAGACGGGTGTCTAAGCTTAGCCGCCGTATTAATCCAATAGG
ATCGTGTCGTGGGGGTAACTGTCTGCCTTATCCGAGAGCGAAACACTAAGTGAAATGTTG
TCCACATGGTTGCAGTTTGACAATCCGTGTGGAGGCACCGAATAAAGGCTGTCTCGCTGG
TAGACCCCCACTGCGGAGAGGAGCCAAGCAAACATACGTTGTCTGTATAATCTCGGGCCG
GTCGCATCATGTCACCGGGTTCGGTCAGTTTCTCCGTCCTATTGACAGGGAATCACATGG
TAGCAGCCGCGTACAGCAGCGGAAGAGTTCGACAATGGAGATTGACCGTCCCGAGTTAGC
CTATTACGGATGCCACACTAACCGTGGGTCGCTTGCTCCCAATACGTAGCAACGCCGTTC
TGTAGAAAGAAGGTGCAGTCACCCAACTGCAGTTGATCGTGCCATCGGAAACCGTTCTTG
AAGGTAGAATCCACTTGCAGACGTGGGCGTGGCTCACTAATGTAGAGGATAAGGTAAGCT
CAGCAGGCGGATCTCCGGAGCAACACTCTGTGTCAGTAACTACCTTTAATGATCCCTGTG
AACAGTAGGCGACCCAAAGGTCACGTGAGCGTCGTTCACGGGAGATGAGGTCATAAAGGG
TCACCTAGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCAC
CCGCAAAGAAGTGAGACGGTGAATCTCTGAGTAGACAGGGGAGTATTCATGCTTGCCCCG
CCAAATACCCTCTACAGGCACACACGACGAACCAACATCA
>Rosalind_1439
TGTTCTCTACGGCGTGACTGATAAGAACTTACGCCCGGATACGCATATGACATTAGATTT
CCTATGTCTTCCGAATCGCACAATTACGTTAACCTGACTGCTTGAGTAAATTGAGCTACT
CCCTGCGGATGGAAGCTACATTTTCGGGCAGAATATACGTAGTGAAACCCTACCCTCACG
ACTATGGGCTTATGTTCACCTTGACGGTAGAACTTTGGCGGGCGCCAAAAACCCCTTGTT
ACTTCGAGCATCCAATAAACCGACATCTCATAGAAGGAAACCTCAAATACAATCGAAGAA
TAGTTAGCTTCGGACTGAAGGATCGATGGTAGAGTAGCTAAGACTCCCCGCTTATTTAAA
ATATCTGTTAGTGGTACTTGGACTCCCTCAGACGGTAGTGGCTTTTATTCTCATCAGCTC
CTCTCCCCATCACGCCAACGGGCGGGTCACGCCCCGTGGCGCCATACCTTGGGGAGTCTG
ACAAATCACCGAGGGTTCCTTACAGAGATCACCCCTGTGTTATGTTAAAAATTTCAAGCT
GGCGCCGAAGCATGGAGGAGTTTACACCCTACAAACTGTAGTCACTTGAGAAGGTATTCG
ACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGA
AGTGAGACGGTGAATCTGTGACGGCAGGTCCGCTGTCCATTCTCGCTTAGCAATTACCGC
ATGTCGGATCCATAAGTACGCTCTCCTATCCGATCTTACGCCCCGAACATTTTGCCAGAC
TAAGACATCCTGTTGGACCCTAGTATATGCCAAGGTAATGTCGATCCCGCTACCAATTAT
AATACTGCGCCCATACAACAGGATAACATGTGCGCACTCGGTCGAGTTTTGAGGTTAGGT
GGCAGTTAAGATACAGAGTCCTCCGATGTTGCACCAACTGAAATTGGGGCCTTGTCGCGG
CTATGCGTAGGTCCGCCATACACTGTAGATGAACGGCATA
>Rosalind_2946
GGGATATATGATTCAATGAAGGACTAATTGGGTGCAAAAAGCCAACATCTAGTGGTGGTG
AAGCTCTGTTTTATCCGCCTGACCGCTACCCGCAGAACAAAAACCGTATGTACCGCGTGG
AGAAGGGATGTCTGTAAATAAGTGCCAAACAGCTTGTTAGAAACTAGGTGATGTTGAGCC
CCGGACAATATATACATGTCCGTACGGGTGACGAGCGCGCTCGAGAAGGTCTGCACCAAG
TTTGTCTGTATCTGGATTCAACTGCTCGCTCACGGAGATAGGGGTAAGCGCGGGGCTCAC
GACCTTCATGAGGTGCATCCTGCTCTCCTATGGGAACGAGTGTATCGCGCTCCCCGCTTA
AGAGCTGGGTTGATAGAGCTGCGGCAATCTCGGTCTGTACGAAGCACAAAGCGGACGATA
AAGCGGCTCAGTGACAGTACCTTTATGTTCTTTTTGTATGTTAGAGCGAGTTGAGAAGAT
AACGAACATCCTACCTGGGCATGCATAACCTCACTTAGGTGGCAAGATTTTGTCTGGCTA
TGTTGACACCTTGGCCATTACGGGAAGCATCCCGGAAAGCTCTCTTATAATTCCATTAGA
CTTCCCCTGGTGGTGAGGTAACAGGTGGTATTCGTGGACGATCGATCCGCGACATTAAAG
GAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACG
GTGAATCGGGGGATCACGGTGCAAAGAAGGCTAGCCCCCAAGTTACCAGTCACAGCAGCC
TTTTGATACGTACGCATTATTTGTGCCCGTACTACAGCCGAGTATTGTCTCAAATTGGAG
CAATCCAGGAAGTTTGCCTGATGAGATAGTGTGTTAGTAATGGGTCTCCGTATCTGGGTC
TGCCCCTAAGGTGAACCGATGCTCCGCCTTATTTGTGATACAGCAGGGATTATGCGAGCA
CAATAACCAGGATAGCACTTTCGTGTCGAGTGATTTAGAT
>Rosalind_4025
TCGCGTCGTAGTTATCTTACCAAAGCGTTCGTCAACGGGGTGCCCCCGACCCGACCAATG
GGTCACTTCTTCTTGAATCTAGTCATCTTCCCATCCGTGGCACTGCACGAGACCCGTCCT
TGGTTGACAAGTCATCCGAGCGAAAGACGCCAACATAATGCTTAACCTCGTGCATTATGC
GATCAATTGAATCTCTCGCCGCCGTAATAAGCCTGCGCGCTAGTGCCTCTATTGAAAAGT
TGGCTTCTTGTGCAACTCCCACACGCCACACATCTTGAACTCGAGGCCGATTAAGATACG
TTCAATCTAGCTGTTCTCAACGCACCGGGCGAATGAGACTCGGGTGGTGAATATCCGTTA
ACGGAGTAACCCAATTAAACGGCTGTCTGAGAAATCTCTTGTGTACTCACACGTGTGTCA
GCCGGGTGTCCTATATACAGCCCAGATATACAGGTGAACTTTCTCGTCCCGATCGTCTAC
TGGGAGTGAGACCTCTAAACTCTGAAAGTCGAAGAACTAGATTGCGGCGCTCCTCTATCA
CCCGCAACGAATATCAGTAAATCAATATCATTCAGCTAGTCAATTGGTCAAGCGCTCGCC
ATCGTTCTGTTTTGAATGCTGAAGGCTCGATTGTAGATCGGCGTGACATTAAAGGAACTT
CTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAAT
CTGGGTACCGGTGAGATTAGACGTCCAGGCGAGCGCGTGCGAGTTGTACAACTCCAATGA
TATCCCGACACTTCCGCGAGCACCTGAGGAATCCAATCGTTAAGGAGGATGCCGTCGCGT
TTCGCCAAATCTGATCTAGAAATTAATAAACCTTTCTGCCGCCTCAATTATTACCAAATC
CATGGCGAGAATACGGATTTGAGTAGCAAGAGATATTGCTTTACTACAGTATAACGATTG
CTCCGTGTTTGGATTCCTAATGTCACTGGGTTCCGCTCGA
>Rosalind_7062
TGCTATCATGACTCATGCGGAGCCCTGGTTACTCCTGTGAACCTATCTAGAAATACAGGG
GGGACTGTTGCGGAATACATGGCCCCACATTCGGATGCCTCAGGTTCCCAACTGGTACTG
CTAAGCTGGAATTGCTAGAGTTTTACCTCTAGATGGCGATGATAATGAAGCGTCTAACGC
CTTGCATTGAAGCACACGGACCTAGGTCGTTCGTTCCGCAAACTAACTCGTTCTAGCATG
TAGCCGCCGACATGTCACATGCTGGCCGCCATAGCCAAAGTTCGCCCACGGTTTGGGACG
TTGTCCTGGACTCATACCCTCAGGTGCTAGACATTAAAGGAACTTCTTTCGAGACATAAA
CCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTCGGTGCAGAAAAT
CACAATCTGGTACAGAAAGTGAACAGGGGTCGCTGTAACCTCAATCGGTACCGTTAGGGT
GACTTGGATCGCGGATCTCGGCTAAACAGGCCCATAGCACCCCTCCTAATAACCAGGCAT
GACCTATCGCGCAATCAGTGCAGCCCAACCACAGGCTTGTCATCAACTTTTTTCTGGAGA
AATGCATTCCCGTGCCTTTCGCTGCTGCTTGCCACCCCCCATGGTTGCCCTCCGGCTGGC
CTAAACACGCATCCAGCGCAGGCCAGTCGGGTCTGTACAGGTTCGTTCAGGAAGACTTAT
GGGTCTTGCGTTATTGTAATATCGCACCCCTGAAAGGATCTTTTCCCCCATCCAGCAATA
TGAGTCCTTAAACCACAGTGTCCGTACCGGCTAATCACGTAGTGATATTCAAGCCGAATA
ATAGAGCGCAGATTGGCAAGGGGGTCGCACTGTACTCCCTAAAGTCTAAGATGTGAGGAA
CCTATGAACTGTAGCCCGGGCATTACGCCAATTAAGGAATTGTATAGGCGAAGAGGCTAT
CTCTGAACTATACCCTAGCTCTCTGTCGAAGTAGAATGAA
>Rosalind_3102
TTTATCTCTCAGATTTTAAATCGGCTGCACCCAATGACCTTCGTAAATCGGCAACAAAGT
CCGCTCAAAGTTCTCAACTTCATTAGTTTGTCTTGTCTGCTCGAACGGCCGAGATGAATC
AAAGTCGTTCCAGGCTCCCTCTGAGTCGCACGCCGCCGGGTCGACGTCCTGTGAGCTCTC
CTAACGGTGACCGACGGGCAAAGATCATTTACCATATGAAAGCGGTTTGGAAGCGACAGG
CACTATATTCTGGGGTGCCAGGAATGGCGTTGCTGTCCGAGCTCGCGACGCAGAATGTGA
TATACGGCGGCTCTTCTTAATATTTCATACTATCATGTAACTAAAATACTCCCAAAGGAC
GTGTCTGTTAACTGGCTTTCGACCGGCTGCGCTTGATCTAGAGACTGGGGGACCTCGTGC
AAACCGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCG
CAAAGAAGTGAGACGGTGAATCCCTGCCCAACTAGCAACATATTAGGCCAGCTATTCTTG
CCGATTTGTCCACACTCTACATAATAACGCCGTGGCGGATCATCTTCGACTGGTCTCCTC
GGGGCGATTTGCTGTGGACCCACTTTTGTTAGTTAGACGACGTGTCGGTACCGTTTACAC
CGTGTTGTGTGCACATACAGGGCTTTGGGCTGAGGCCTACTTAGGGTATTATCTTGTTTT
AAGGTCTACACTGGACATTCGCCGCTCCGTTGGAACAATTGGTGGGGGGAGGGTTACGCC
TTATCGGCCCGTACGTACTCCGTGCATTAATCATGCACCGCCGATCCCTCTCGACCGAGG
CTTAATATCGTTCAATTACCACACGTTCTCACAGCGGAAGAGGCCCACCGATCTGTGATA
TTTATGAAGCTTGACGCGTCGTACGACAGGGATTGGGCAGCTTAGGGAGCCTACGGCGAC
CCTAAATCCGAGATGTTAGATCAGAGTACGGTGTAGCGAC
>Rosalind_0240
CTGACATAATATGACAGTCAAGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGA
GTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGGGAGGCGAGCTCAGTAACCC
GAAGCGTCCTGTCGAATCACGGACGCCAGCTCGGACTCATCGCTAAACAGTGGCCTGGGA
GTTAATGAGTTGCGTCTGTTTCTTACAAACGCTCGAGGACTGACTGCAGGGGAGATACCA
GGATCTATTCTGACGAAAGATTGACAGGGCCCATAAATCCTAGGCACCAAGTCACTTTGG
ACGGTACCAAGAAAGCTGGGGGCACCTTAGACTTTGGCGCCTTGTGGATCATTTAGTCGT
GGTCCCTGAGTAAATTTTCCGTCTTAATCGAATCGTTCTGTCGCACACCGTGTTTTAGAC
CTGGCGTATAGCCACAAGCAACAGCTCGATCATTGTGGTGTATCCCATACCGTACCATAT
TTTAGTGGCTGGTTACCGAGTTGGATAATCTTATTTTGGTTGAGACCAAACCTGGGAGTA
CAGTGCGTCTACCAAAGTTTCGTCTGAACGTCAACCCGGCTCAGAAAGAGCCGGCTTAAG
GAAGCTGGGATTGAGCGTCGTCGTAACATCAGATTCTTAGCGAGGAATGTAGAATCCCGT
GCTCATTATAATGTGATTGGCGGCATGCGACTCACGTTGTATGCCGGGGTTGTATCAGCG
ATTTACCGCCGTCCGGGGGGGCACTCCCAAGATTCGATAGGTTCTTGGTTGCGTCCGCCC
CCGCGACTATCATGACAGATGTCTATTAGGTGAGTGCTTGGCTTTTCGCACCAATCTCAA
TGACGAGTTCTAGAATTAGATGTAAACATTCGGAGTTGCACGTGCCTCTTGCCGCCAATA
GCCCTCCACATTTTGGCAATGGGAGGTGCCGTTGTTTCAGTCCCTCTAGACGGAATCCAT
AGCCTCGGACAGGGTCCAGTCTCCCTAGAAGATTCAGATT
>Rosalind_2691
CAACTAAGATAAATTGGGACAAAATGGCTACCGGACTTTTAGGACTGCCCTTTGGTTAGC
CTGGGTTCGCCAGAAGGAAGAGTAGACGACTATATGCGGGCTTTCATGGTTCAAGGGATA
GTAATGAATTTGTGCCCCCACGTTACAGGCCAAGCGTTAGATGGCATGGGCGCACACCCT
GTCCTACATCCATTGTTTACGAATTTACATAAACCTATAAAGAGGCAGTGGCGTAGAAAT
TCGTTACATCTACCTTCTGCACGCGCAGGACTCTGAGACACTCTAGCTAAGAACTCAATG
CCGGCATAGGACGCAACTCTAGGTTCTACTCGTAATCAGCAGGAAAGGCGCGATCCCGAG
TTACAGGCCGGCCGACCCAGTGGCAGCCGGACGGGACTGCCGCTGGTGTTTATTCAGATC
CAGACCATAAAGTATAACTGTTGAGACCCCTTTCCATGACTCATACTCGACCAACGTCTT
CCCTCTTCTATACGTTGAAATCTAACACACTGATACGAGGAAGTCTAAGAACTACTAAGT
CCCTATGAGGCTCAGGGTGGTACGGCAGGTGTGAGATAAGGGGTGTGATAGGAGGGAAAA
CCTGCTAATCTAAAAATGGTAACATCAGACATTAAAGGAACTTCTTTCGAGACATAAACC
ATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGCTAAACATTCTACCA
GGGCAGCAGGGGATTTCTAACGCAAGGCCCATTCACTCCTTCTCACATGGGTCTAAGTTT
TAGCTCCGCTACCCCAACTTGATCGCGGACCACTATTGAGATCTCTCACTTCATGTGCGT
GGCACTCGGTGGCTCATCTTGATAGTCAGTACCCGTACAGCATTTGTCCGCGCGGATCCA
CTCAACGGCTCATAAGGAAAGGAGTGAGCGCTGACCTCTGAACGAAAAGCCCGAGGCGCG
AGCTACTTCCCCCTCTCATCGACCGGCAAGAGGCAGGAGG
>Rosalind_9472
GCGGTAATCCTTCCTTGCTTCGGACTGCCCCGGGGCTGGCTATCACTGGAGGCAGATGCT
CCGATTGGCCCTGAGTACTCTTTATACACAGTCTCTTTTGTCCAGCCGATTATGGTCCGA
GAGTTTCGACATGTTTGGGTATTCCACCAATGAGTTCCTGCGTTGTGAACAGACGGTGGA
TGCCGGATTGCACTCTCTGTGGCCAGCCCTGACGGGACTGAAGTGTCGTGAATGAAATGA
CGAGCCTTGCCACATGCTGGCAGACCCATTCCATACTTCACCCTGTTCGCAAACATCGGA
CTAGCATTACGTATGCGCAAAAAAGTTGAGAGAATTCTGTATCTCCCGGTTTTCATCTTT
TGTAATGGCTAAGAGCATCATTCTGTCTTCAGATCTCAGGCCCCGCAGACACGAAACAGA
CGCGTTAGTATGCGAGTGTGCCGCGGAACGGGGCTTCCCCGGCGAAATTTCACGCCTACT
CTCCACTGTCTACTATCCATAAAGGTGAGACATACGTGCCATTGTATCTTGCCGGTTGTC
TCGAGCGAGCACCCGCACCTGGATCAGGCCCGAGCCTTCACTTCGCGATTGTGAATCTCC
TCTTCGTGTCCATGTGGCAGGTAATATACCTCCGGGTCGACCTATCAAACCAGCTTTGTA
AGCAACATTAGAGTTAAAGTAAATAGACGAGATGACGGAGTCTGCAGATGCTACCCTACA
AGCAGAATGTTTTCTTCTTAGTCCAGCGGCATCCCACCGGCCCCACCCTGACCCCTAGAA
GCGTGTTTCAGCCTATACCTAGTGCCAGAGAGCCGACATTAAAGGAACTTCTTTCGAGAC
ATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGGGCAAGGA
GTATCCATCAAGCTATTGGTTCCGAGTGTTGTGGTCGAGGCAACGACCCTCTACTCGATA
CCTGATGTTAGGGGGCGTCAGTATAACCCTTCCGCTTTCG
>Rosalind_6378
CGCGAGCTACGATATAGACGAGGTAACAGTGGCCTTCGTAACATCACTTGGTATCGGCCC
AGATACTTGAAAGGGCATTTGAGCTAATACGCGACACGGGGAGTTTCGACTAGCAAGGGT
AACACGCTAGGTATATTGGAGGACATCTAAACGATACAGCGAACCCTCGCCCGATTGTCT
TAGCGTGACGACGAAGACGCCGTTGGGCGCGTGATACCCTGATCCATTAAGTAGCGATCT
TAGCGTCCTCCTGTCACAGCGAATTACTCCTCGTTCAGACCGCCAGCCACGCCGTGTTGA
GGCTGTCCAACCTACATCCAAGGACGAATATGACCAATGTGATAGTTTTCTATGCTTGCT
GTATGTGCAGGGCGCGAGCAATCGGTGTAATACCAGCACGCGCCTCTCACACCCAGACAG
CGCGGGTGTGGATAGCAGGCGTGACCAATGAGTTGGCGAGGTCCCATGTCGCTCCTGGGG
TGGCGGTCGCAGCGGACGTAATATACCTGATAGCGTATGCGTCTCTGTCATTCCCTGCCA
CATCGTTAAGCTCAATACTCCGCAATAGTATGCACATGGTCGCATTTTACCCACTCTTTT
GGACGGCGTGCCGGAGGTTCCCCAGGATCGTAGTGAAAGCGGGAGCGACTACCATAATCC
ACTGTATATGTCTCAAGAAGTACCTGTCGTATTGTCCTACGCTCTCTACGAGGCGCGGAT
TCGAAGTACCATCGGGGCAACGGCTATGGCAGTTTTCGCAAGCCTATAGACGGGACGCCA
CGCGACAGGCGTCAAGCTCGTACGCTACACGGAAACTCGTGCGAAGTGGAAAGGGGTGCT
TAGTTTGCAACGGGTACCGGTGTGCGACATTAAAGGAACTTCTTTCGAGACATAAACCAT
AGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCAAAGAACTTGGTCCACCC
ATCTAGCTTCAGGAGTTAGGCGAGGTTCGATAGTATATTG
>Rosalind_2101
GATCGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGC
AAAGAAGTGAGACGGTGAATCTAGGGTGCCCAGCCCTCACGCATTCATAAGTCGATCTAA
GAGACCTAAGCGTGGCAACGGGACCAATCCGGGTGTCTTGGTTCTTCTCGCTATGCTTGT
GCGATCGCGAAAGTATACCGTACATCGTCTCCATTCATGATTGTTTCGAAAGTTGAACAA
CTTCTGTGCTCTCGGTAGGTAGTTATTCGTGCGCTTGTTGATCGTTCGGAGCGTTCTCGC
GTAGCATTCATTCAACCCCGGTACAATCATGGAAATCCACTATGCCCGCCGCTATGTGGC
TTGACGCGACGAGAGTCAGTGAAGAACCGAAATCGAGACGAATATTGAGCAGTGTGCACT
ATCGCTCTTGTAGGCCGCATAGGATCGCCGCGGAAAGACCTACCTTCTTCCAGGTACAAT
GAGCAGATCGAAGTTCAACCTAAAGTTGTTATAGCTTGGGCGTCTGCAGGTTAGGGTGAC
TGAGGCCGCATCTCACCAGTGCCGGGAAACAGTGATCGCGTCCACGGTGCATTGAGTTTT
TGACGGACGGAGATTTGGGAGGCTGTAGTTCTTGTCTTCCATGCGATTCCATTACCCCGC
CCGCTAGTATCCGAACCTGGCGCCCATGTTTCTCCGGCGGCATTCGGGTAGACGATGTGG
TGACTACAGAACTGAGGTGTGGCGACCCGGTGTTGAGAACGGTCGTGCATATTGAGGACT
CATCGCCACCTAAAAGGAGCGCGTTCAAGGAGACCCTTACAGTGTCGACCTAAGAGCCCG
CTGCCACTGGCAACCTTCTGGACTGACGTGTTCAATCTGTCTTCTTGTTATGACGGGCCC
CCCCGCGGAAAGACGTTCGCCTTTAATCTTTGAGTGCCACGGTCTGAATGCATCCGCTCA
AGACCGCTACCTAGATCATAAGCTCGCACAAATAATCCAT
>Rosalind_9682
GCACCTGAAGACACGCCGGGTCTAACACTAACGAACGATTCCGTCAGGCCTAGACCAGGT
CAACCTACCCAAATATAGCTAAGCGTCATCCTGGTCAACGATATCGTCAAAATCGTAGCG
ATGAGGTACAACCTCCGCTGAACTCCCAAGTATACATACTGTGGGTAGCTTCGTTGTTTG
TTCCATGCTGAGTCGTGCGACTCTACACCGTATAAGTACGCTGGATATGTTGCATGGGTA
ACCGCCCCGTGTCAGCGGATATGTGATGTCACAACGCTAGTTTGACCCACATACCAGGCC
CTACATTCACTAGGCGGTATCGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGA
GTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTCCCGACTCGCCATTGAGGTC
CCGTCATTCAAGAAACGGAACAAAGCTGGGACGGGCTCGAGATTCTAATGTGAGGAACGT
GAATTCCCGATAACATCCGCCACTACATCCATTCACAATCGCCGGCGCGAAGACGCCTAA
TCTTCTCTGTCCCTCAAATGGAGGGCGCAATTGGGATGAGGCCTAATGCACTATAGTATG
ATAAGTTTGGAGATGTAACCGATACCATGTACCCACTGTGCAGAAAGTACAGCTATTATC
AGGGTGGACCCTTTGCATGCGAAATCTCAAGTGATTATCCCCGCTGTGTCTCTCGATGCC
AATCACTCCCAAGGGTTATTGCGGGTGAAAGGTTGTCTTCATCGCGGCTCGTGGTATAAC
GTTAGTCAGCCTTCGGGTACTGTGTTCGTTTACACGCGACCCTAGGGGGTTGATCTGTCG
TCGTCATCAGGAGATTCCTACAATACAGAAGTTCACACTACACGTCTTTGCGCTCAGCCG
ATCTTCTTGCCGCTTATAAGATCCCGGACGTATCCATGCTTGTCGGCAAACTTGGGTCTC
TACCTTTCTGCTTATTTAGAAAGGTTTTCGTGAGTTAAGA
>Rosalind_4217
TTAACCAAAAGGCCAAGTAAAATCTACTACTCATGGATGTTATTAATACTTATATTTAAC
TTCGCCACTTATCTCTCGGCGGTATACGCTACAGGGCAGTGCATGTCCCACGTTTTCAAT
TTCACCTGGTCCAATACAAAGGAAGCCTTCAGTACAACAAAGTAGGGTGCCGTGGTTATA
CCTGATCACTACAGAGTACACGGCTCTGGTGACTAACCTCCGTAAGGCTCTACTAGACTC
TCGTGGCTCGCTCCTAAAAGCCGAAAAATGCTCGGAGCTGGCTAGCCGGCCATAGGTTGA
TCTTATTTGACTGGGATGCCCCTGACACCGGCGATTGTGTATGTTGTATTACGAGGATCC
CCCAACCAGAGCATCAACAAGGGCATAATGCGAAACAGCCTCCAGGAGCTCGCATGATCC
CCCCCTTAGTTTTTGGTTTAAGAGCACATGTTATAACGAGGCTGTTCCAGGGGGTGTGTT
CGAGATAGCCGAAGAACACAGTGGTTTACTTCCAATTCCCGAACCTGTAAAGATAGAAGT
ATGTCGATCCCTTACTTTGCGTTCAGGTTCGCAAAAGGGGCGTCCCGCGTCTATTCATGA
TTAGCTCCGTCTGGTCAACCAGTGGGTGGTGATTAGCGACTATATGTACTGGCGATAATG
AAATCAAATGTCCCTCAACGAGATATTAACACTCCTGCTAATAAACCCCTTTGACCGAAC
TGTTACATTAGGGGAGTGGACGACGTGTCAGCCAAGGACATTAAAGGAACTTCTTTCGAG
ACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTTGTGCA
AGTCCTAGCATCATCATGCGTGACGATACTTAAGTAAGGGAATCTTACTTAGGACCGGGA
TCCATCCAACATAGGTGCATTATACTCCGCGATGTATGCGGTAGTGCGAGAGTGTACCGC
CTTGAACTCCCTAGACCGCCGCTTCCTCCTAGAGGTTAAT
>Rosalind_6521
CGGCCCTGGGATATTAGCGTCGGAAACCCGCCAAACCTATCGCTGGGTTGGAGGAACACC
AACCCGTTTATGCTGTGCATGGAGATAAACTGTTTGGTGCCAGCCCAGCTAGCCAAATTT
ACATCTCAACTAGCATGTGGGGAGGTTGCATGGACGGTAGACTCATGTGCTCTATGCTGA
CGGAGGATAGTGCACGCTATCATTACCTGGAGGGAGAAGGCCCGCACGACGTACCAGTGT
ATTCAGGGGCGATGTACGAGTGACAAGTAGAATGAGCCCGAACAGATTCGACGAAAGCCC
TCGCTGGCCATGCTCTGACTCCTAATCTTGGCTCAGATCCGCTTTCTACACTGATGATTT
TCTGGCGTAAGTCGTCGATGGCAAATGTAAAGTATCAGTCTTCGAACTAAACCAAAAAGC
CCCGGCTATGCTGATTTTCTTGCCCACAAAGTGCCCCTCACGATGCCTCCTGAGTGAACT
CGATAGCACAAAACACTTTGATCGGCTCGTGATCATCCTAATATCGGCCTTTACCTATTT
CTGCGTTGGTAAGTAGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTG
CTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTTTTAGCCACGTGGTTTATAGTGGAGC
ACAAGTCTGTTAGGTTCTTACGAACTCCCTACCCGCCCCGGTAACTACCGGTCTAGCAGA
CGTATACCCACAGTTTCTTTGCCCATCACATTTAAGTTCCTGCCCTGGGCACGACCCGGG
AAAAAGGCGACGGCCGTACGCTATAAACTTCGAGAAAGCAACGCGTAACCTTATGGTTAG
AAGGTGCTGATTTGGTGTTTTCTCATAGATCTACGAGCGCATCCCCGCTTCAACATGTGC
GATGACATCGGAGAAAAAACCACTCAGCAAGCGAGGTTTGACTCGTGCTGCACGCGCTAC
CTACGTCGTACCTATGTAGACCGGCGTCCGGAGTGTCCCT
>Rosalind_1280
GATTTACCGCCATTCGTGCGACTCCTTATTTTTGTGAACTACCGGCGCCGAAGCTTCTTA
CTTAGGCAGATTCTTAACGTCGGTACTCGGGCTTGATGGGGGCCCTTCAGAGTTTTAGGT
TCTGGACCTGCGTTGCGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGT
GCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTGCAGTTCTGTGCAGCGATTAGGGCT
AATCAACCAGTGTGAGTTGTGGGGCTCTGCGACATAAAAGCGACCTCCAAGGGATACCCA
GACTCCCTGGAAAAAGTCGCTGAGTCCACTAAATGGTAGCACAGCGAATACTACCATGCC
CGTTGCTCTACGCGCGGTAAATAGCATTCCGCACTAACAAAGATCGCAGTAGGATGAGAG
CTGTAGAACTGAACACATACATTTTTCACACAACTTATCTACCCTGCCCAAGAGCACTCT
TTAAATTTCTGGCTTTTACCGCTCCGCCTGTCTGCGCCTCAAGAAAAACTTAGCTCCGCT
TTGAGCACTAGAGGATAGTGGGACTAAGACTATATGCAGTTGTTAACACATAAAAGGAGG
GACTCAACATACCGGAATTAGATAGAGATCACCAGGAATTACTCTGATACATGTGCGGCT
TCGTAGGACTGCCTCATCGCGATTTAACCGATAAACTATAGTTAGATCGACATTTCGGAC
CTAGACAGGCCTCGCGTAAGCAATGAAATGCTACCTGGGTTCGCAGGATAGATCGTACAA
AAGGCGCTGAATTGGTCCACCTACCTGTCTGGTCGGGCCTCCCCTTTCGAATTCCACCAT
ACTTCTTCGTGTTATGGGCGTGTCAGAGTTTGGTGTACTGTTCTTGCCCTGGGGTGGCTA
CTGCCCGCAAAAGGCTTAACGACTCGAAGCTTGGTTACGTGGAAGCCCAGCCGGCTCAAG
TGCGCCCTCTGGCCTACGCTCCCCGTCGCCGACCTTGTTT
>Rosalind_6170
GCGGCTCATTTGTTAGTTAGATTCGGGTACGGGACATACCTTTGTCTGTAATATACTGGC
GAGAGGGGAGACATTTTGATACCATCGCTGTGAGGGAGTGTCCTGGGAATTAGCCGCAAG
CCCCGCCACGCACTCAAGAACTCTAGAGGTCTCCACTTGAGCCCAGGTCCATCCCCGGCC
GGTGATACAACGGACGTACTGACACCCGGACAAGGGAAACGCATTTGATGGTGCCAAATT
TCGCACGCATATCTGGCACCGAAACGGCGACGGCACACTCTGATCGGCAGACGGGTTAAT
CAGAGGCTTTTAGGTAGGGTTGGGCTACGATCTACAGCCCAGAGCTGACGGCCACAGCTG
ACGCCGAGCCAGAGTTATAAATATGCACAGGTAGAGCGTAATTAGCCTACTATCCGTTTA
ACGAGTAGCTTGCAACGTTCGGTAGGGCGACCACGAATGTCCGGGAGATTTGCCGGGTTA
ACGCATAGAATCCTACGCACGACATGAATACACCGAAAGTATCCTTCCTTCTCGGTGCAC
CCCAGCATAAGAATGCCGAGACGCATGGGACTTAGGGCAGCGACAAACAGGACTGGGGCT
CAATATTGAATGCGGTCTGTCCATAAGAATGTTATGATCTGTTTACCGTTGGCTATTCCT
AGGGGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCG
CAAAGAAGTGAGACGGTGAATCCCTAGCGGCCTAGAACCATGGCCGAAAGTTTACGAATC
ATGCATTGACGAGTGCACACCAACCTGGCCGATAATCAGACGGTTTCAAGTCGCTCGGTA
ATAGCCACAGGGTTAAACGGCAACTTTCAACGTGTGCATGCCTGTTAAGCTTTGCTACAC
ACATATGCCGCTCTTATCGTGCCAATGACTCCTTGGGCTGCGAGTAGCGTCGTATCAGCT
TCGCCCACACCCCATACACAAAATCGGCTCCGGGGTTCGA
>Rosalind_1402
AGGAGTCTACCATACTGCTACAGGAGGATACTGGTGGCCCCCGACTACGGGTTGCAGCAG
TTAGTTAAGACCAAGTCCACAGTTGCGCCACTCGCGACCCAAGCGCACTGACTGTGAAGG
CGTGGACCTAGTCAGAGGGCAGGGACCGTTTCCAACCTCTGAGGTAGGCAACCTCCAGGA
TGTTACGGACAGCGCTTGCCGTTCCACGCACCCAGGAGAACTTTGGGTACCAGTGTTTCA
CTTAGGTTATGCAGCGAAAAACGGAGGCTCGTACACGGCGCCGCCAATGTGGTCAGGCAC
ACGCAACTCAGTAGCATGGGGGTTATAATTTTTCTCCATGATAGGCGCTCTCGCTAGTGT
GAATAGACGCACGCCCGACAGCCTCCGCGACCATTCGATCCGCGAAGTTGCAGGATTGAT
TGATTGCAACTAGTGTGTTATTGTCTGCGAGGGCGTGTGGTTGTGGAGCGTCGAAATAAT
TGCCTATCCATATGGAACACTGCCCGAATCCTCTCTTTACGACGCGAGAGAACAAAGGGG
GCTTTCATCATCTGCTTGGCGGTTGTCTTTCATTACCAACACGTTATGTAGCAGTGCTTG
CCTCTCTCGGACTATATTTGCGAGCGAAGATACACCATTCTTGTGAACATTGGTGTGACC
TACTGATACCCCCGCGAACGTCGAAGGAGTTGGTTGTTCCGGTAATCCCTTGAATTGAGG
TCCTGGAAGATTAGAGTACATAAGCTACTTGTGAAGTGAATAAGCACTACCGTATCAACG
CTAGTGATCGAAAACACTTCATCCAATAGGGCTCACTATAGCTGACCACATGGCGTTTCG
CACGAATGGAAGGACAAGCCTCAGAGTACAAACCAAGTTCGCATGTCTGTCAAAGAAATG
GACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAG
AAGTGAGACGGTGAATCTTGCTACATTAGTGGCGCGGATG
>Rosalind_2068
TATTAAGTAAATAGCGCGCGAGGACTATGTGCTTTGCGTAAACGCCCGGGGCTTTAGACC
TTACCTCAGGGGGGCGATCGCAATTCCGACTCCGGCTTCTGAATTAGTGTTCCAACTTGT
AAGTTTGCTCCAATTTGGTTCATACTAGTAAACGTTGCGCCGAGCCCTCACCTCTTCGGT
AGGATCACATCACCTTGTGGCCGACTCTGACCGGTCCCATTTATACTTAGATGCACACAA
AATACAGGAACAGCTAGCATCTGGGCACTCAGGGTGTTGCTTGCAGAGATGATGCGCTAT
GACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAG
AAGTGAGACGGTGAATCCGCTAGAGCTTTCTCTGATGAGCATTCGCCACTGAATGAGTTT
CATTGTAATGCTGGTCTTAGCAGTACAAGCATTTCAAAAGTGTGAAATGCTCTACCCCCC
CACCATCCAAACTTGGCCGATCGGCCAATTGGCGAGTTTACGGTTCGACTATAGGTGTAT
CGCTGTTTATCCCTCCACATAGGTAAAGCGATGCACGAGCAGAGGTACCCTACCGGTTAG
TCAAATTCATGCCAGGGACGTGCCAATCCTGCCGGCGGTATTTTGTCGGCCCGGACCTCT
GGCACAGAGAAGTCTCGTCAGCGGGAGATGGAGTGTATCTCTGCGATCAGGAGTTTAAAC
CCATGCGTTGGTTAGTTTCTAGACACCAAATTACTGAACACAAAATGTCCACTGCAGACA
GGGTGAGGTTGGATCCCCGCCGAATTGGTGCGTATTTGCGGGCTCCTGTAGGGCGGGTAA
TACGGTGGATCAGCATCCTGACACACGTAGTTGTCAATCCTGGTCGCCCCGTTTACCTCC
TTACGATAACATTTAGCATCGAGCGCCGGGGCAGACCGTGAGACCTGCGCTATCTTTCAT
TTAGCCGGTCGGGCCGGATGCCTTCAGAGGACAATTAGAA
>Rosalind_6029
CCGTCCTTCAGTGAGTTTCACGGCCGGTGCACAGGGGATTGTAAGTTACTGCGGTCGGAA
ACTCATTTGGGCATTCATAAGCGTTTAGACACAGGGAGGCAAGTTGAGAAGCCTGTGGTG
TTTACGAGTTGGGATAATGAGAAGGTCAAGAGGATGTGGCTAAACATCTCTGCGGCCAAG
ATTTAGGGATTGGTGTTGGGCTCATCTTATCCCAATCGTGGTACGCGCTGGAAACTGGGG
GGATCCCCTATCCTCGACTAGAAACCTAGCAACTGAATGCCCAATCCGGATTCTGTTGGT
GTACATTAACCAGGTGAGCAAACACTGCCTCCCCGAAGCACTTTCCCAACTCTCCCTCAC
TTATATTCACCTAAACTACACGGATTGATCAGTCCTTATCTTTCCTTTGGTCTCCCAAGG
CGTCCTGCCCGAGGGGGCCTTCATCCCTTATTGAGCAATCGTGCATGGTTCCGCCGTTAT
ATCCCAGGACGTCTTAATGGTACACCCGAATCACTTGATTTTCCGGTTCCGGGACGGGGC
ATGAATAGATGTAGGCGCTCGTAAGCAGGATTCAATACAAGTACCTTCTTTTATGGGTCA
CGAGGCGAAAGAACGCGGACACATATAGTTTTATCTTGCTAGGGGTCGTGTTCTCCGAAT
CCCTAGCTGCAAATCGATAACGCATGCTGGCATGAACCTAAATGATGTAGTTTCTCAGTC
TTGCCATCGTGTGGCTATCGGAACGCGGCGACCCGAGGCGCCTCTTCGCGTAAGGCGAAT
CAGCCGGGGAATATACACCCTTAGGTGTGTTGGAGTTCGGACATTAAAGGAACTTCTTTC
GAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTTCT
GAAATCACCTCGTGTGAGGAGCGTTCACCAGAGTGCCTCGACGGCGCTCTATGAACGTCT
TTGACTAGGTCCGCTGCGTGAGGATAGAGAAAAAAGTAGG
>Rosalind_2238
CCGCAGCCGAGCATGAGGTACCTATGTTGATGTAGGCAAATAGGGACCGACTCTTATTAC
CGAACGGTAAGAGTTAGCCGTGGCCGAAATGGGGAATGCCCCCCTTGCATTTATACTGTA
TATGTACTCGGCTAGGGCCCTGTTTTCATGGGTAAGAGCGCAAACGGCATCAGGCAGTTC
CAAGAGCGCCAAGGAATGTTCGCTCGTGAAACCAAATGGACCGTCAACCCAGGGAGGGTT
GATGTGGCCGCCAGTGGGTATGACGGTTACCCTTAATGATAATAGCGAGTCTACTATAAG
CAGAGGTCTTCGGTTCTAGACCGCGTGAGTGTACGGCAGTCGTCATACTGGTTTCAAAGC
GGTTGTATCTAACAGAAATTGCAAAGGTTTAGATTGCGTGCAGAATTAAGAAACTGAAAC
AGTTACCCGTAGAATTGAGAGTCGGTTGATATCTAGCTAAAGAGAGACAAGAGAGAGAGC
TGGTTAATCCGCCCATCCGCGTCTTGGAGCCGTGGCGCACGCTGCTGCTGAAGGGTGAAT
GTCACAGCGTGGGTTTCTTGACGATGAAAAATTTTAGCTCGTTTCGAGTACTCACAAGGA
TTTCGTCCATCTCGCTCGGTTCGTGCTCATAGCTGATCTTAACGAAGGCAGTTACCCCCG
CAATATGAGATTGTCAAGTTGTTGTTAACCGCTTGCGTGCCTTGTTGGGAGTATGGAACT
AGATGATTACCGTCCTTTACGGCGAAGGCTACTCCTTCATTGCTAACATAGACGCATGTC
TCCAGGAACGCATGGGGATGAACGACTTTCAACCTTGTTATTTGTGCACAACCCTGTCAC
CGCGACAAGAGACCAAAGTTTAAACGTTTCGGGGAGCCCTTTAGTACTGACATTAAAGGA
ACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGT
GAATCTAAGCCTATGTCGTGAAGCTCGCGAGACGTAGGTA
>Rosalind_8353
AGTAGCCTCAAACGCTAATACAGAGACCAGCTGGCTGCCGTCCCATGCCCTTTTATGCTC
CTAAGCAGGGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGC
ACCCGCAAAGAAGTGAGACGGTGAATCTCTAGATCTAGGTGCCGGTTCTTCCATTACATC
TGTCTTAAAAATGTCCAACTTTAATACTGCTTACAAGTCGGTCCTGTAAGACTATCGATG
TTTGCCCACCCCGGATTAAGCAAGAGTGTTAACTTAGGCCAATTTTTTCCAGGCGGCCAC
TTAGGAACACTGTTGATAAGTTGCAGGCCGGTAAACCGGGGGGTGTCTTTGGCGTCTTTT
CAATAGCAGCAGCTACTTCTCATCAGAAGACGTAGATTAGGATTGACTAGTCTCGAATAA
GGAGCGTTACTCGGATGAACATAATGCGTACGCATCAGGACTCAGTTGTCGGCACAGTCG
TTATTAATATGCCGTAAGGGAAGTTATACTTCAAATGACCTCTCACCTTTGCAGTATGCG
TTCCATATAACCAGAGGGCTGAACATTTCGTCACAGGTGGTGGCTGGTTCCTCTGTGTAG
CCATACGATTTGTACGGCACGACCGAATCCTTACAAACCTCATGGTTTCGTAGAAAATCT
GGGTAATTATGGGCAAGAAGTGCCCCTCGAATCGAGGTCCAGGACAAGTCGCAGGATGAG
ACCCGGAGAATGCGTTGCACCATCTCGCCTCGCAGGGGCAAAACCCCTCTAACACGTCGA
CGGCAGGCCCTCGATCATGTCTAACGCAATATTAGTAATATGGGCCCTCCGTGACGTTTT
CGACTCTGAAAATATGTGGAGTCGCCTACATCTTAAGCGCAATTGAAGTCAAACACTCGT
TCGAGAAGCCGTAACACGCCAATGTAAAACTACACTACAGGGTCTCCAGCAGAGCCCCAA
ATATTCCGACCCGCTATTGGTCGTATCTGTGCGCTTCCAA
>Rosalind_8180
AATCCCGAGGCTGCGCGCTTTTAAGTTCATCTGCGCACGCGCACCCAGGATCGACATTAA
AGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGA
CGGTGAATCAAGCAAAATCTGTTCGGTGGTTTCCTCCGTAGAAGGACTTCTGATAACTGC
AGTTTACCTATAAATGACGCGGGATGACTCGTTGGTAGCATACGGTTGTGGCTCACGTAT
ACATATGATTGTCGGTAGATAATAGAGTTCACTGGATTTCATAGCAGGTCTTGGCCGCTG
AGGCAGCCGAGATTTGACCTCATCCCTCGCTATTAGCGGGAAAATATAGGATTGACGTTT
GTCGTGCAACGTAGGTTCGCCTCCACATAGAGAACGGGTATCGTATTCTTGTTACCTGGG
CAGACTTTGTGGGAAGTTGTGGTTGCGTCCAGGATTATCCCACGAGCTATTACATATGCT
TGCCATCAAAACAAGCCAAACATACCAGACCTGAAGTGAGACCAGATTCAATCTCTACTT
GCGGTGCCAGTGCTACTGGAGTTAAGTGCCAATAAATACGGCCTCGGGTAAAGAAAAAGA
ATGTGCTACCTAACTTCCCTTCTATAATTGTGATTATTTCTATCGAATGTGATTCCAAAT
GCTCGGGTACAACTCCTAAGCCTCTAGGTGACCTGAAGTACCCCCGCAACTATCGGCTAA
GCCAATCCCCGTCCCCATAGGTTGGCCAGGGACCCTTTACTCTTACGCCGCCCGATTTTT
TGGATTTTGTTGCATCCACATGCGCGGTGTTAATTCACCTGCTATTAACGTCTATTCAAC
GCGTTCGATTCCTGACATCATCAATAACTGCAAAACCGAACTATGAATTTCTAAACCCCC
CTTTATAATAGCATCAATATCCGGCAAGGATCTTATCCATCTGGCGCTCCTCTGAGTTCT
GTGTAGGCTTTCGTTCACAAATAGAGTTGTCTGCAAGCAG
>Rosalind_8905
CTGTATCTATTACATTACTAATCGTGTTTACGTGGAGCCTCTGCTAGGTTGTAGGCTTGA
GTCAGGCGCAACAATGAGTATGCTAGGCTTTATGCCAATCCCACGTATCAATATGAGTAG
TTTTGTTAGTCTCAAACCACCCCTTACCCGAATTACACGGCAAAATGGTCCGCCCTACAC
ACCTACGCAATTCACAGCTCGCGTAACCTTGAATGTGACTTGGTGGTGCGCCTTGATAAT
CTGGGCAACACCGGGGTTAGTCTCTGGCGAACCGCTCAAATTTCCGTTCTTCAGGTGATA
TCTCCTCATACGATGCCGCAACGCGTCCATTTAGAAAAGAGAGATCTCGGACCTCAACGA
CCGTCTCCCGGCCTTCGCACTAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGA
GTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCGCCCTTACCAGGATTGTCTC
GCTCTTACTCGACATGCTAATCGCTTGCCTGTGGTTTGGGGTAACGACACTGTTAGTTTC
GTGCCAGTATGACGTCTAGCACGTTCCTTTACTGTTTCAGTATTAGTTCGAAATTATCTA
GTCCCCCGTAGGTCCATCAAGCGCCTATTAATGTAGGGCTACTTAATCCGCGTTATGAGA
ACATAAAGGCATTGAGTCTAGTTTTGGAGGTTCCAAATAGACACGGATAATACCAGCGAG
GGCAATCCTGGTCGGAAGCTTCTATGAGCACCAAGCCGGGGTTCGTATATCTATTATGTA
TTAGAAGCCCGATACGCCGTGGAGATCCGTCCCTCCATTCGGAACTTACTATAGAGCCTC
GCAGCATTAGCGCGCCGTTAGCGTGTTCTCCCTGGATAACCGGAGTTAGTAACTGGTATC
CATTACACACGTATTCAGAATTGCGGGTCATTTTAAGGCAATCTTGGAATGACCAAAACG
ATGGTCGATACCATACCCCCTGCAACGCCGTTACTGAGCC
>Rosalind_3278
GCTGCCTATGCTTCTCAACTTTCCCCCAATCCCAACCCTGTTTTCTCTGCGTGTTCAGAT
AAGTATGTGGTCGCCATTGTCGTAACGGAAGCTGCACTCTTCGCGTTACTATGGTCATCC
GATTCACAGAGACCTAGAGTCTGTGTGCACGAGACTGCTAAGAATTATAATTTGATTACA
AGGAACCAATTAAATCAAATAACACGAAGGCGTGCATGTTTAAAAGACTAACGAGTCTGA
AGGAATTTCAGTCATCATCGCCAGAGCCTACGGACGCTGAAGTCCACACTAGCTGTAATA
CAGAAGTATGAAGTAACCGGTCATCGGGTCTGTCCAGCGAACCGAAATAGTCGCCAACAG
AGCGCATTAGCGAAACCGCGGAGGGCAACACATCCTGTTCGCTGTGCAGTCTTTAAAGCG
GGATTGTTGCTTCGGTGTCTCCATCAGAAATTGGCGACATCTAGGGACTATTTTTGTATG
GATTACCACTCTAAGGTAGTCAGAGCATCTAATACCAAAGAGTCTTAAAACGTCCTATTT
TCGTTGTGCGGTAGCGCCACCGTGCTTCGTTACTGAGCCATAGGTCAATGTGTCCTCGAC
TGAAATTTACGTTCCCTTCCGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAG
TGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCAAGGCGTATCCTCTCCGAAGCC
CGTGGAAATGTGATTAACTGTTTCATATCAGGATGCTGGATAATGATAACACTGTACCGA
ACCGTTATCAGGTGTTCCGCAGGCAGCTGGTCGGGCATGAGTACTCATGCCACCAAACCC
GAAAGAAATAGACAGAGATCACGCTGGATGCCGTTCACCGCGGTGCGAATATCGCGTTTT
ATCCAAGCATCGGCGTATTGGCTACGGTGCGTTAGCACACTTTCGGAGTAAACCACATGA
ATCCCGTGACGTTGGCGTTCCGGGGGCATAATAATTGCTC
>Rosalind_6913
ACGCTATTGTTAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGT
GCACCCGCAAAGAAGTGAGACGGTGAATCAACGAACGGATCGGCTGTCGTGATAGATTTT
GAAAAGTCCGTCCTCGCAGGGTCTGTTCACCCCAGAAGGCTACGGTAGCCTGCGCCTGAG
GAAAACTTAATTCGGTTCCCCGCAGGATAAGCGCGTAAGGAGACTTGCAGTACACATTAT
TCGATCGGACGAGCAAGGGCGGAACCACCTAGTGCAGATTCGGAATGTATATAATTTGTT
TCGAGGGGAAGAATAACCCGTATTGCTCGCAGTATAGACCCGCGTAGGAGCCGACTACAC
GGACAGATCGAGCTTACGATGGGGAGTTATGTCGCGTGTAGCAGCTACTCTATATGAGCT
GTCCACTTCGGGTATACCGATCAATGCACGTAAGTGGCGGCCGCGTAAACGATACGTTCT
TGATTGTTATTTGTAGTTAGAAAGGGATAGGGTGGGTACTGCGTTACCACCTCCCGGTTC
CATAGCACCCTGGCGACCGGACCCTGTTCGTTTTATACCTAACTGGTCCCATATTCTAAA
TCGCTATCCGCGGTTGCTTCGCATCTAGTCCTTGGCGGACCTCCCGTAAGTTCTGTTTCT
ACATATTTGAACTAGCGCATGTTCGTGGCAGCAGGTATTAAAATTCACTTAAAGTTTAGA
GGATAGACCGCATGGGATCTCTACACGTCGCTGCATGGGTATAGATATGTATTTCGCGGC
GTCGATCATGGACAATGGCACTGCCGAGAAGTCGATGCATTAATCCAGTGATGTCGAAAA
AGGTCAAAGTGAGGGTGTATTCACGGGGCGCGGACTTTCTCGCCCGTTTCTACTGATGGT
AAGTTTGAAACTTAGCGTAGCTGCTTGCGGGCTCGAGTAGCAGATGAACTGTCTAGGTGA
AACCAAGGGGCCGGGAGGGGATAATTCCGGCTTACTGACA
>Rosalind_3478
ACTGGTGGTGGCCGCGCCAGACCACCGGGGAATATAGAATAGCAGAACGGCCAGTTGCTC
GGGTAGAGTTCTGCCCCTGAACTTTTCTATGCGCATGAACTTCGAGTGTTTGTGCGAGAA
CTCTAAATTATTGTTATGGACCTTTAGGGGGTATGACCTAGTTTTCTGAGACAGTCTCGG
GGCGGGACCGTAGTGAAATTCGGAGCTAAGTTTACTTCGATTCACTTAATCTTAGACATG
CCGTAAGACCCAAAACCGATCGTGGTCTCTTCTACTTGTATTTTGTTGGTCAAGTTAGGA
GACCATGGAAGTTCGGTAGTATAGGATGAGGCGGTCCGGTGTTATATCGCCGCCCCCCCT
TATGCCATGAGCTTGATAATATAAAAGGGTATCGTGACACCTTGTTTCACACTGTAGGTG
AGACAGAATCCAGTGCCGCGGGACGCATCTAAGTCATGCCCAGTATTCCCCGTAGCTAGC
GACGGCCGGTGTGCGAGACTTAGTTCACTGATAGCGTTTTTAAGGTCTGATTGTTTCGTA
CGTAATGCAATCGCGGGATCCAGGATCACCCTCGACCTAGCACCTCTAGTGCCCCGCCTT
CAACCAGTGCCAGACTAGTCTTTTGTCACTGGCCTATGATGATGTCGTAGGACACAGGTG
ACAAAGGTGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCAC
CCGCAAAGAAGTGAGACGGTGAATCCAGACCTGATAAAATTACAAGCGATGGCCCTACTA
AGAAGCGTTTTGTACCTTGTCGATGGTTTCATCGCTCGCCCCTCTCTAAGTTCTTCCCAA
TTACTTACCCCTCAGCCCCAGAGGTGCACTCCGCCGGAACATACTAAGACCCCGCCTTTG
CATCCCTTGAGACTCGGTGAACCACGGTGGACCGACTTGGACCATTGGAGAAGCTTCCTG
GGTCCCAGCAGATACACAATGCGTAGCCTGCTAGAGTTCA
>Rosalind_4447
ATCCGGTGGCTTGTGTGAGCGACACCCTTCCGTTATGGACGCCTCCACATGAGAGGCGGC
TCAGATGCTATCTACGTTCGCTAAAATTTTTGTGCTGGGCGACTCATGGGTTGCATGTAA
GGTAACTTGCCATGTGTTATTACGATTCAAGCCGTATAATCACTGACATTAAAGGAACTT
CTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAAT
CCGAAAAACTATCTGCATGCTTATAAGTATGAGCGACTTTCCGAACTATGAAGCCGCTTA
TCATTATCAGTTTACTCTTGGATTGCTTCTCTCGTCAATAGAGCCTCCGACAAGGACGTA
ACGCAAATTGTCTGCCGTTCGCCTGTCTGCATCTCAGTGGGCGGATGAGACATAGTTGGA
CCCCGAACTGAGGCCGAAAATGTGATTGTAGTCCGCTAACCACAATATGCCCTCGGACGG
ACCCTCTACAGTAAGTGATCCACTAAGTACGGTCCTCATGAAGGCGTACAATGTCGTTCA
CAAAAAACTAGATGACACTCGCGGCCTGGCTGTGGGGCCAAACATTACATTAAACGTTGT
ACTGGCCTGCTCATGGGGCCAAAGTGTGCGATGTTTGGGTTTTCCCGGGAGTCGCCCCAG
GTGCGACATCGGTTTGAATCCATCCAAGTCTACGGCGACTTCGTTCGTTTTGCCTTCCCT
GGATCCGCCGTTGCCGGCTATCAGCAAAGCCGGGACGCTCGGTGCTAGTACGAGTCGTAA
TCTGTCGGCCGATAATGACCAAAACGATAGCGTCCGCTCGGTTCTACTAGTCATTTGATC
GGACTTTCGCAAGTATATCTCCCCCTTCTGCTAAACCCCGCCTTGTGACTTAACGTGCTT
TTGCAGGGTCCGACCTATCCCGATCCAGTTTACCGATCGTGTCAGCAATCCTCGTGATCG
CCAGATGTATAATACGAGCCCCGGGGCGTATTCAGTAAAC
>Rosalind_5510
TGAGGCAGGCATGGATTAACTACCGACTTGTTGCGACTATCCGCTCTGAAGGGTTTGTTG
GGGTCTAAACTTACCGTCAGGACAGAAACCCTATCGCTCGACTGGTAACATGGTACGTGT
GGTCTCAACACACTCGTCATCGGGAACGCTTACGCTACACAGGGCCGCGGGAAGTGACAC
AACCCGTCCGCACTCATGGGTAGTTTTGCGACGACGCTGATATTTGATCTCACTTCTTAA
GGGGACCGTGACTGCTATTAGAGTTCCCCAATCGGGGTATTGACCCCGTCTTCCTAAGGA
AGAGATAGGCCATGTACAGTTTTAAGGCACAGACTGGGATAACTGGCGGAGATCGGGTCT
GGTCTTACTCATGTGCCGAACAATTGGTTAGGAAAATCGCTAAATTGGGGAGTCTTAGGC
TTTAGAATCCCCGAACGCCTCGGCAACATCGAAAATAATGATTAGCTGTTCTCGACCGCG
GCTCCGACCTTTTTATGGGAGCTCTGATGACTGAATAACGAATCATAGGAACATATACAC
TATAGGGACGTGCTTCAGCCTGCTAAAGTCGGGATTAAACTCCGTACGACCTCTATGCGA
CTTCTTTTGACAGAGTTACCCATGGCTACCTGAGAGGGTTTATTCGTCAGGGATATTTCC
GACTTGGAATATAACGGGGTTACGCTCCCAAGTCCAGCGCCAGGGTTCGAACTAAGTGAT
GCATTTTTAGAACCAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGC
TGTGCACCCGCAAAGAAGTGAGACGGTGAATCGTCTTCGTTTTGAATCCTCTCGCATTAT
CCTTGGGCGAGCCAATTCTGTCGGGACGGCCCGCCAATAGGCGCCAGCAGCATCTGCGGA
ACGTGTTAGAGATAGACTCCCGGAAAAGCAAGTACCGCAAGCGGCTAAATTTTAAGCCCT
CCAGCATGCAGGATGCCCGGTTTCCACAGTTCTTTGTCAA
>Rosalind_0072
AAGAAAGGCAAAGGTCTTTAGCGGTTGCAGCTCAAGCTAGTATATGACATGAGGCTTGGT
GTCATGAACAAAACTCCATTCCTCATCTCGAGTGAGCATTTGAGGATGGGGGTCGGATAG
GCGGGCACTCGTTAGGAAGTACTTATGGGAGCAGTGATACGGCTTTAGAGGCTAGTGCGA
AAGTTTTACAGACTCTCGTTGGCAGCACAAGCGTAGGCCCTTTCAATCTAAAGGCGTTGC
GTGGCCTGGGGTTCCTCAAGTTCCTGCCAAGGATACGCACGCCGTAGTACACAACTCTTG
GCTGAAGACCGCTCAATTTGCACCGACCTGGAGTTTCCAACCCATACACTGGGGGATGAT
GCGCGTTCTATTACGAGTCCAATAATTCTTAAGACGTTCACAAAGGGTTTCACAGCACAT
ACCGCCTACCGAAAGGTTGATAACCCGAATAAAAGATAGAACATCCTTTTTCAGTGGTGG
AACGTGGCACGACTTCGAGCAGGACCACGAACGTGAGTCATCAGCAACGTCGTGCAGACC
AGACAAGTGACTTACGTGGGTGCGTTGAAAGAGAGGATATCTTTTTGCGTAGATAGACTC
AAAGCAGCTGTGTCGAAGCTAAGGGTGCCGCGGTGCGTGAAAATTCGCGTGATTCCCAAG
CCTTACCTGGTGCCCACCGGTTAGATATGTGGTCCAAAATGCAGTGCTAGTTGTTTTGGC
CTTAGTCCGTCGTAATCCGCTGCTGTGAGGAATAACGCCTAACATAACCTCTCCGACATT
AAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGA
GACGGTGAATCCGTCGCTTGGAGTGCCGGGCTGGAGGCAAAATATGTTTATTGGGATAGA
ACTCCATCAGCATTGGCAGGTCGAAGCTGATATCGTATGAGTTCGTCAGGGATTGCAGCT
CCGTCGAAACTAAAGCTAGACTACCCCTTAGACATGCGTA
>Rosalind_5032
TCAGCCACGGCACAGATACACCCTGGCCAAACTCAAACGGCTGGGGACGATGTGTACAGC
ACGGGAGTGAACATCGCCGGCAGTTCGCGTGCGCCTTGGGTGCTCGGCTCACCCGTATCA
AACCGTGTGTTACTGGGCGTCGCGCACAGGAAGTAGGGTGCGGGTTTAAACTATCGTCTT
CGGGCTTATGGGCATTCGATATTAGGCGCCTACCTTTAGCCCCTGTATCCCTCCTTAGTA
GAACGACGCACCCAATCTCATCCGATCCGGCTAACCGACTCAAGTCCGCTTCGAGTTGTC
CGTATGATGGGGGATTATCTAGGCCGGCCGGGTTCTCTGGCCTGTTCGCTCCTACACATT
TAAGAGGAGTCAGCATGAACCAAGCACCGACAGGACTACCAGAGCCATTATCGAAGTTTC
GGTGAATGTTGTTGCCGAGCTGAGACGCCAGTCGATAAATCCAGTTGGTGACACGTCCGC
GCGTGGGGTACCCCTGTACCACGGTTACGCATCTTGGACAAGACCAGATATCGGGGTGTC
GCTTTAAATTTAACAACGTTCTCATGTGCTTTCTAGGAGTCGCCGTGCTCATGTGGAGGA
TTACTCGGGAAGGGATGTTCCGCCGTGTGCGTTAAGCTTGGCTGCGGTACACGGGTTACC
TTAATGGGCGAATCACATATGTATTAGAATGCACGCCGGTTTGGATACGACTAATTCGTT
CGTGCAGGTGATATCTGCCCGGGAGTTGGTAAACGGGCTATCTGTTAGCGCCATATCTTC
TTGTCCCTAGCTGGTTTGTGAGCATACGGTGTTGGAGTAGCTGGTGTATCAGGCCTACTT
ATCCTGTCGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCA
CCCGCAAAGAAGTGAGACGGTGAATCAATGCTTGCACGCATCCTGATCTGCTTGTCGGAA
CGAGCTACTGCCGTTGATCCTCCCTCGCGTATCGGGACAC
>Rosalind_3781
GTAACTCTCGAATACTATCCTTCTCAAAAATTAGTTCCATCTTTTCGGTTAATGGTCCTA
CATCGGGGTAGCATAACTGAAGTAGAACTTACCGCAAGGCGCTTCCCATACCCTAACGAT
ATCCCGAGTTCTTTTCATATCGTCTGATCTAGTTTCACGAGAAGGCATGAAAAGGGGCCA
CGACCAGAGCGGTTAAGACCACGTCTGCGACCAACATGAGTCATTACGCTCACGTAAGCT
GTGACCCCCGGCGCAACAGCGATTATAGCCCGTAGTCGGACACATCTGCCTATGATGCCA
AACTCGGTTCGCGTTCCGGGTTGATCGGACGGAAGACATTAAAGGAACTTCTTTCGAGAC
ATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCATACCATGT
GTATTGACCACCCTCCACATCTGTACACCTTTTGAGACATAGTTGTCCGATGTGTTGCAG
TTACTGAAGAGACAGTACGCCCGATCTGAAAAGAATTCAATCCTGTCTAAGGACCCATGG
ATCTTTGTCTAAGGTCCTGACATCGAGCGTCACCTGGTAAGCTTCTTATAGGGTCCCTAC
AGCGATCGCGGCTAATCCGAGTCCTAGGCCTAACACCGTCCGCCCTCCAACCGCGATACT
GACGTATCACCTCTAAGTGGACGAGACTAGGTCGATACGCGAGCCAACATCTGAACCTCG
TCTACTATCTAATAAAATTAGACCGATTTCGGAGAAACCTAGCTACGCCGTCGCTGACAC
TTGTACCTCCCACGGCCCTCATCGATTTAAGAAACAACAACTTTGTAAGTAAGAGGCTAA
GATTGGCCGTGGGGTTGTATGGTTTACCAAGGTAGGGCCGATTTACATTTACAGCCAAGA
CTGTTGCTGGAGGAGGAGACTGGGTATAAATAACCAAGCGACGGACCGGAGCAGAAGTAA
ACGGGGTTAAGTCCGGTTCTTCTGCGGCGCCCGCGTGATG
>Rosalind_8441
TCCAAACCTGTTCCAGGATTTAACATGATACAACCCAGGGCCGATGGACTTACCCGAGAG
TGTCGCGCTCGGTATGCTTCAGCGCTGTCCGAATGTCAGAGCGCATGGACAACATTCACG
TTCCAGCCTTACGACTTGGCGATATGTTTGCTGATATAGATTGCTGGCGGCTATGTCCAG
AGATTGGCTGCTGCCCCTCTAATAGTGGGGTTGAGCCTCTATACGGGACGAACTTTTGAT
GCTGTTACGGATCGCGTGAAGCGTCATTGGAGAATTACCTGGCTGTTAAGCGTTCAGTTC
GTAATGGATGCCGGGGGAAGGATCCGATAGGGCTAAAACCTAAGCACTCAGTTGGTGTAC
TTGCATATAATTGTCCTGGACCAGTAGTGAGTGATCATATAACAAACGTAATTACTGACA
AGTTTTGGTCATTCACGGATAGTACGTTCTGTTCCACGCTCCGAAATCCTGGACATTAAA
GGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGAC
GGTGAATCTTTGTGCTACGGTGATAGATGAGACGGGATAAAAAGTCGTTAGCAATAACCA
TACGGCAATTGACGGGGTATGTATTGAAAGTTAAGGTTGTGTAGGGCTCAGCCGATGCGT
TACCGGCACCCAGACATCGATCAATAATGGTGCTGTGATTTGTCCGTTCCTACGAATTCA
ATCGCTGCAAGGTAAGCTCGCGCCTGCGATGGCTTTCTTGAATTGCAGCCGCTCCGAATT
TTCTCCACGCTTGAATGCGTATTAAAAGGTCGGATTACATGCACAACCGACTGGTATAGA
TTCTTACGACTACGGAACTCTAGTGCATTCAAGATGGTGGGCACCCAGGTTCTAGACTCT
GGCCATAAGATAGTGTAACTATGGGTTTCCAAACCCTACCCTCGCCGATCGACCGGGATA
TCGGCGGGAGAAATGGACGCGTCTAATGACGTACATGTTC
>Rosalind_6088
GGAGCACCAAACTAAGAAGAGACGTCTACCCTCGCCCCGAATAACACCGGCCTCGCGTGA
ATTCGACGAGGTAACTGAGTCACCTCCTAAAGTCGCCCTAACCCGTCTACTTAAGCACGC
GTTGCCGAGCAGAGCCGATGATTTGAGTCCCGCTTTAGAAGTCACGTCCAGTAGAGTACT
TCAATGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCG
CAAAGAAGTGAGACGGTGAATCAATAACTTTGCTCGTGGGGGTCCGGCTAAGTCATTGTG
CTCCCCCACGGAACTGATTTTAATAACGTGATCGAGATCTGCTATTGATTTATTTCCGAG
ACCTCGTAAATTCTCCATGCTAGTTAAGAGGGGAATCCCTGTCGAATAAGGGCTGCTATT
ACCAGAGAGCGGTTGTCTGTCCCTCGCGATTACGACTGTTACTGTTCACCAACGGGGCGT
GTGGTTATCTCGTCATCAGGTAGGGCCTATGTGATCGATATGCGCGGCTGTTCTCCACCC
AAATGCACGAAGTTACCCACATCAGTTTAGGGTCGGTCCCGGGACCGGCATGTGATGAAT
CTGTGTTTTGCCTGAATAAAAACACGCGAGTTTTCGGCCCTACGATCTGTCAGAGGATAT
GCAGCCTCACGTTCGTCTCTGCTGGTCTTGACAGCTATATTCAAGTGAAGTCCGATAAAT
GAGTCTTTCCGGCCGGATAAAGTAAAGAAGAAATAGTTGTTTTTATTAGATTTACACCGA
TAGAAGCACTTGAGATAGGTTATGCACTAGCTCAGATAGACGGATCTATTCTGAGTTGGA
AGCGGTGGGCTTTTCTTGAATCAGTTCCTCTTAGACTTTTGCTCTCGCAGTTCCAACACC
ACAATATCGGCCGCTCCATCGTGAAAGTTGAATTCCAGATAACAGTTCTATGAAGGGCAT
GGGAAGAGTGCGTTTACGCAAAGTCGTTCGAAAAGGCTGA
>Rosalind_4874
TACAGTAATATAATTGCTCGCAGGGAGTCTACTAAGAGGTCAAGCAGCTATCGAAAGCTC
ATCTCTACATATAAACAACGCCTGTATTCCGGTCTGCTATTGGTCAGCGCGGGAGTGCCA
TATGCGTTCCGGCAAGGTTGTTTAAGTGTGCCGATAGATGAATCGTTAGATTCGCAGGAA
GGGCCTTCCATCATCCGATGCAGGGCGACCTAGCAAGGATAACGGGAACCTTGATGAAAC
CTGGCATGGGTCCCGACGACATTATCACGCGGGGATGTCGATGTCTGTCCGTGCTAAGTC
CCGAACTACCGCAACTACGTATTTTCCCTACGTCTTTGCTCAACGGTGACGCGGTGCTTA
CTAGGGGATTTATTTAATTGCCGTTAGAGCTTCCGACGTGACTAACCATGACCCCGTTCT
TATACACCCGTCCCCGCTCCGATTCAACACTGGAGTATGGATTTCGTCCACCCATGTGGA
TGGCAGCTTACTCCATCTATCAAACAGACCTACCGACTAAAGTCTCGTGGGACATATCCT
GGGGTGGGCATCTATTGCCAGGGTTTGGATACGACTTGGAGGGCTTTCATAGAAGAGATT
AGACTGGTAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCA
CCCGCAAAGAAGTGAGACGGTGAATCCCTGAAGGCACCATACAGCTAGAAGCAAAGAAAG
CGCGTAGACGTGTCCTGCCACTGCGTCTACTACGACGACTCTTGCTTCTGCTGGCGCGAA
CGAGCAGCTATGGTTCAGGCTATTTCTGAGTACGCACTTTTGGAAGACCTCACGAGGTAC
CCAACGACGGAATGTTGTGCTAGTTGACTTGCTTCATCGCCTGGTAGACCGCTTCTCCCG
CCGGGATAGCACATTTGACAGGAACCTTGTCCATGATAATAATGTTATAAAGGTTCGAAG
TCCACAGCTGAATTATTTCACGAGTCAGTAGCTCTAATAC
>Rosalind_8091
TACTCGACTAAAATATCAGACCGTGACTCGTTGGACCAAGCGTCGCAGCAACCTATTTGC
GCATGCACAATAGGCTACCCGACGCGGTTCGCTTAAGATGTCATGTGACTTGTTATGTGA
TAAAGTCCCGACACCTATGTTGTGCTTGGTGATTATCCCAGTGGTGGGAGACTCCTTCCT
CTGGCGGCCAAAATTTACTGGACTCTTGCCTAGATTTGAGAAACCAGACCGCCTTCGATG
TGAATGTACAATTTGAACGATATAGCTTGCACACGACATTAAAGGAACTTCTTTCGAGAC
ATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCAGGCTACAT
TACCGCAGTGTTGGATGCGTCAAAGTACTCAACTGGTGCTTATCATGCTGCAACGCCGCC
CGCGAAGGTCCAATCCCTGTGTAAAGACGCAGAAAACTGCTGAAGGCCCATGTACACACT
GACGGATCAGTGTCGGAGGTCCTTCCTTTTGGTCCTGGGAATGGGGCGTGTTTGAGGTTT
TAAGCCAGAATTCCGGCCGGCCCAATCACGCCCCAGCAGCCTGCAACTTGTATACACGAA
GACCGGGCGCGGCGTCGATACAGGCAGCCAGGTGCTGGGAGTGGAGGGAGCTCTGTATCA
CTCGGGGTCCGCTGGACGAAAAATCGAAGTTCCTAGACCCTGTCGCTTCAGCCTTACTCG
CTCGTAAAGACTGATGCGAGACTTAAATGCAACTGTTGTATTTCATTCCGAGAAGGATAG
ACTTTGCAGATACAACCTTACGGCGCGTCCTACTCCCGCTCTTCAGTGTAAGACGTGCGA
ATTTCCTGGAGCAACACCCGGGCTCGCAAACGCGGGTCCCAGCCAGCTGAATGTGAATCA
CCCCTGGGATCCCGGACTGCTAATCATTAAGCTAGATCTATAATACATAAGGGACCAGCG
TCGATCATTAACTCTCGTTGGATACACTTTCTGCAAAAAC
>Rosalind_2615
GATGGGCGAAGGAACGCTAGGGTCAGGACATTAAAGGAACTTCTTTCGAGACATAAACCA
TAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCTATCGCGCAAATTAGA
GAAACGAGAACCAGGGACTATACGCGGTTAGGCGGAAAATAGTGCCATCACCTTGTTGCA
GTAGCTAACCACACAAATACCCCCAGGCGTCCCGAAGTATCACCCGTGGTCGTACGATAT
CACAGATCAATCCGCTCGGGGCCACATTAGAACAAGTCTCGGATACAAAAATGAATTTAC
ACGCATAGGAGAGGGTACATGTAACCATGGGTCCCGACCTTGGGGTATCTATAGTCGTCA
AGTAGAGCGCTTGGCACCGGCTCGCGAAGTTAATGATATAATAGAGCTTCGGACTACGTC
TCGAAAATGATGGTTCGCGGGAGTAGATAAGGCCAAGTTTAGACGTGCGAATTCCTCTTT
GCGGTCCAGTCTGATACCCCCTAGGGAAGCGATAAAACGACCTCGGATTTTCCGCATACC
ACGCAGGTAGATAGAGGCGTCGCCTTAGACGTGTAGGGAGTCCTTGGGACTTTGTCTCAG
TTCTGCACTTGCCAGCTATAAAAGGAGGCGTTCTGGACGCGGTATAGAGGTCGGCTGTGG
CCAATACGTACAGAATTTATCGCTACTGACCCCGGACCGAGCGTGTTCATGCTACGGGTG
ATAGGAGTCTTGAGGCGTGAAGAACGACGTATTAAACGACGTGAAAAAACGGAATAGTAA
GATTTTAGCTAAGCCCCGCTACATACCGCTTGAGAGACGAACCCGCCAGAGACTTTTATT
GCGCCCTAAGGGTTTCAGCCTGGTCTTGTTCGAGTCTATCTCCCCACGCTGGTTAGGGCC
TAAAATATGTATTTTTTGTCAGACTATATAATACTCTTCGACACCAACTGTGTCTTAAGA
CGTGTGTCGGGGCCCCACTACGGGCGGGTATGCACGCTAG
>Rosalind_9358
GGGGACAAGGCCTGTTAGCGCCATATGTCTGGGTGCTTATAGGGTGTGCTACTACATCCA
AGCAGCTGGGGCATCCCTGGAAGTATTGGCTCATGCGGTCCGACGCTACATTGGCTGGGG
TTCGATATTAAGGAAGTGCCGACTCCGGCTTCTTGCCGGTACAAGAATCGCTCTCTCCCG
AGGAGTCAGCGCGGATATGTAACAGCACTCCGCGGTCCGCCTATGGGTCGCTTTGTCCAT
CGTGTGTCCGATTCAGCAACACTCGAGAATGCTGAACAGAGAAAAATGTCATCAAAGTCT
TTTTACCGCTAGACTCTGCTTCAGGTCAAAGGGGGGCTTTATCATTGCGCGCGCCAGTCA
CTCTGTTGGGGTATAAATGAACAGTCCTATGCGGAGACATTAAAGGAACTTCTTTCGAGA
CATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCTACACAC
CATTAAGATGTTCGTGATTGTCGTGTGTGAGGGCTTGACCGATCCAGAACGTGTGTAAGC
GCACATTATGGCACAACAGCTCCACCGATAACTACATGTTAACCGAATAAGATTACACGC
ACACAGGCACGGCCTGAGTAGTTATCGATTTTGTATGTAGTGCGCTACAAAGTTCTGCGT
GCCGCCATTCCGCATAGCCCTAACCGATGGGAGGTGAGCATGGGCGTAGACGCAGGGCAT
CCTGAATCCTGCGCGTGCCGGGAATGTCGAACCCACCTTCGGAGTCTAGAAACATAGGCT
TAATACACTGGCTCGCCGTGGGAGCGTTCTTCGGAGCCGCCCCTACTCTCCGCGCATCAG
CACATGCTCGAGAAGCGCCCTAAGTTAGATCCCTGTAACAAGATTGTAATGAAGTCCTAC
TGTCGTTGGTCAGTACTGCCGTGAATATCGGGGTTTTCGCGGGGCAGTCTGAGAATGGGT
AGGAACCTGGAGGGAGTCCAGAAGGCAGTCTTTGGGAGAA
>Rosalind_2397
ACAGACTCTGGACCGTGATAGGGGCGGTCTTGAGTGATACTGATCTGCTGCACTGTTACG
GTTTTTAGACCATTCTATACACATTGGGCCCTCAAGTCAAAAATTATCTCGTCAGTTTCG
ATCGCAACCCCTTGCGGTATGCTCCTACTCGCCTTACCCATGCGGATGCACAGCTTCGAT
AGACCGTACTTGGAGTCTAAGGGACGTGAAGAGTTGACGTATTGATCGTAGGTTCAAGGG
GGAGGGACCGTGACAGGGATGACGAAGTTTTTCCGGATAGGTTTCGGAAATAACGTAAAA
TTGTTAGTGGGTAAACGCAACTAAGTCATTGGCTCACTCCAAGGTAGCAAATTGAAGAAT
GGATGGTCTATTATTCTATCTGCTGCTATCCGCCGCCGGAGACCCTCCTCCGAATCAAAA
TTGTGAGTATGAGATAAGGTGCTAGCATCTAGGAGAGAATAATAGGTTTAGTCTGAACCC
GACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAG
AAGTGAGACGGTGAATCGGGCCCACGGCCGTTTCGGGCCTTCTTCCTCCGATCGAAGTTC
TCCGGCTGACGAAAGTAGCTGCGTCTGTCAGCGTCCGTGACGTGCGAACCCTGTGCGCTG
GAGCTAGTAAGCTCTCGGCAATTCGAGTCGAATTAGATAAGAATAGCGTGAAAACAGCAT
TATCAACCATAACACCATGTGTCGCTCAGCACTAGTAGTGATGGATGGATTCGCTTTACA
CACGAGTTGATCTTGGTAAAACGGCGTTTGCCTGGGTATTTGTTATGTCACCGAGCATAG
GCGCAAGCCGCTCCGAGCGCAAAACAGGGTATTCCTCCCCTCACCGCGCAGGTGGCGGTC
GCCAACCAAGTGCTTTGGACTATCACTTAAATCTCTAGGACATTACTGCTCACCATATGG
GCCGCGGATGCTACGGCTGTAAATTACGTTGGAAATGTAC
>Rosalind_2736
AATATAAACTAACTTTTGATTTATGTCCTAGCCGTCCAAGGCCCATCTCCCCCCCGGATG
CGTTGAGAATGGACAATGGCGCTCCGGAGAGAAGAATTAAACCCCCAGCAACCGGTTTCG
ATAATATCCATTGGGATGTGCACACTTGCACGGCCCGCTTGCATACCCGCCTGATGCAGT
AGCGGTAGAGTACCCGTCACCTATGGCGCAGACAGTTGTTAGGCTCTACTGCGCTTACAC
GCTAGTATCTGACATATCTGGTCACTTACTCCATGGCAGGCCGACAAGTACTTTCTGTGT
CTATCTCCGAACATGTGACGCTACGATTGGTCGGACTGCGACCATCTCGTGCTCCGATAG
GTACACCACACAGCCGGCGGACATTGCTCTCCACGTATTTTTACCCTGCATGTACTCGCG
GAGTCTAGAGACTCATGAGAGTTTGACATTAAAGGAACTTCTTTCGAGACATAAACCATA
GAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTAACATTATTCGGCACAAT
TGTTTAATCCTCGCGCTAACTCCACCCAGGCTTATTGCGGTTCAGCGGTATCAAAGCTGT
AGAGATACAACGGAATAAGTAGATAAACCGCAAAGGTGACGGCGTAAAGCTGTGCAGTAG
CTCGGCGAGCACAAACTGTATATCCTCTTTAGCCTGAATTCCACTTGAGGGAACCACAAA
TGAAGCACTGCGAATCCTGTATTAGGGGGCACCCATACCGATCCCGTAATATTTGTGGCA
TTCGCGACACCTTGCGTCATTTTAGGATCTCAGGTTTACTCTTAACGCCACGTATGCGGT
AAAAGCATAGGCCGATTAAGATACGACTTAGACATGCGCTCATTGGTTCAAACGATTCCT
TATAATAGAGGTCGAACCAGCAAGTCGCAATACAAGTATTTCACGAACATGATCGGGATA
TGGCTGCACGGTCCCAAATCCTGTATGGGAATAAGGATTA
>Rosalind_5997
GCTACTACTCTTATTAAATTGGCGAGGACCTGTGGATACCGTATTGCGCATTAGGGTAAT
TCGGACTGCAAGCCGAGTGCAAGGCACGGAGAAGAGCAGGTACCCGTACCTACAAATAGA
GGTCTAGTCGCCTCTGATCGTAACGGGGGGATTCCGCGACACCGCCGTCCAGTCCTGTGA
TGCTCTGCGTGATCACGATCCCATTTGATATGCTGTACCAACTAAAACGATGGAATGCGG
GACCACTCACCGGCGTACCTGTATCAACGTTTAGGAAGTTATTCACTCGGTGTCCTATTT
TTCACTGCCTCGTGCGCAGTTTTTGGCGATATATTGAAAAAGTGCGACGAGAGATCGGCT
TCAGGGTAAGGTAACTAGCCGGAAGACATTAAAGGAACTTCTTTCGAGACATAAACCATA
GAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCATAGGGCTCCATTAGAGTT
TGGCTCCGTATCATGGACCCGAGAGATCTCGCTTAAAGCCAGTGGGCACGTGGACAGGGC
CATGCGGGTTGGGGCGTTCTTATTTCTTCGGCATTGAGCCTTAACTCACTTCTGTTAGGC
CAGGCACCTACTCCGGACGACGATTGCGTCAGTAGTTGACAGATTTGAGATCATGTGGAA
AGCACTAGAGCCGATAGGGAAAACCATCCCCGGGCACTTATCTCTAAGTCATAGTCAGCT
GGGTAGATACGACTAGCTGCGTATCGCGGCGCTTCCGTTATAGTCCTTCTCTCAACTAGT
ACTGGCGCATATGCTAGGATGAGGAGCTCCCAAATGACAGATGCCAGAAGTGCGCGTACC
GTCATGAGCTAAAGCAACACGCTTTGATCGCATAATGAATTTGGGTCGATAGATTAGATT
GGATGGGCGTACATGTTGGTCCTTCTATAGGTAGCCTTTCAGCTATGCGGCCCTGTACCT
CCGAATTCACCCTGTCGTGTGAAAGTACGAACCTGATCGT
>Rosalind_8735
AAGCTAAGTTTGTTATGAAAACAACCTCCGTTTGAGGCTAACAGGGTTTCGGACGGCCAT
CGGCTTGATCACAGTTGCTGTGGGAAAAGTCGCCCTACGACTTTAACCAGTTATTTTCTA
AAACCTGTATTGTTGCGCCGCCCGATCATGTGTGTTCCCAACGAGTGTATCCACTAATGG
TTTACATGGCACAAGAATAATAGTTGACGCTATGTTTAAGCCGTAGTAAGCTTCACCCAG
GGGACGTCGAGCGAGCCCATTTGCAACTTATGAGCTCACCACGTGATGGCTTTGGGAAAA
GTAATTACCCTAAACGGGTTCGTGTACATTCTCCAAAAACTCTGTCCTACATTCCCAGCG
TAATAGTCGGCTATTCGAGTCACCTGCACCCCCCCGACACAAACGAACGTTAACGACAAG
AAGTTCTTCTGTTCGGATACGCTACCCTGAGGTGCCGATATTCTGATTGTAACCAGATGG
CTCGATCCGGGTGCCTCGGGGTCTGCCAATAGTGGATTCCACCTATCCGTGGTCACTGAT
AACGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCA
AAGAAGTGAGACGGTGAATCAGGTTAAGCTGGTCCGGGGTGCATTTTGGGAGATCCTATA
GAAGGGGAAGACGAGAGATGGGCTTAAAAGGGGGCCTATCCAAGACTATCTCATGTCCCC
ATCGCGATACAACCGATCGGTAATACGACCGAAATGGGCGCATGAATTGCTCTAACCGCA
CAAGCGTGTCGGTAGCGTCGTCCGGGCATATATTATGCCTTCGTAAGATTTCGGTCGTAC
TTGTGAGGAATGAGTTTCACACGTCCAATCCTTGCGAGAGCGCGCCACAGGTCGGCCCCC
GATCGACATGATACGTCTATGACCTTTCCAAAGCGGTTCATCCTGCACCGAAACTCATAA
TTAGTCGCCCACTCGAAGCCGGAAATGAATACTGGGTGGT
>Rosalind_5462
GAACCTCCCAACTCCTCGCTCCTGATAAATCCTTAATTCCTCCCAACGTACTGTGTAGTA
TGTGGCATACGAATTCGAAAGCATATCACTGATCGTCTCAAGCCCTTGTTGTACAGCGCT
TAACGCAAGTATGCGGGAAATTCGCGTTCTATGTTCCGTTCCTCCTGGGGACGCGCGTGA
CGGGGTTAACGCACCTCCTGCGAAACAGAACTCTCGACACGTGGTGACAGCATTGGCTAC
GAGTGGCTAACTATATATAACCCAGCCTACCAACGACTGAGGTCGACGCCAGCAGTAGTA
GCCACCCACTACTATTTTGGCGGCTATACAGTCCTACTTCAAACGTGCTCGGACGAGTGG
AATCCGAGTGGGCAAAACGAGCGATATGTTATGGTGTAGTCGTCGCCTGCTAATTCAGAA
TTGTTACTTAAACGCTCTGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGG
TGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCACCTCGTCCGCGGATCGAAATGAGC
ATGTCTGAGTCACATACTTTGACGAAAAGACCTATTCGCCTCTGAGGAGTGCGGTGCCCA
TTACTAACTGTACGAACGTCCTGCTAACTTAGTGCTCGCTAAGGCTCCCCGCTGGTTACT
CCATAGCAGGCTGGTGGAAGAGGTCTCTCACATCAAGGCGCAACTGGCTCGGGCCGGAAC
GACAGAATATCCGGTGCCGAATATCAGCTTCACAATGAGACAGCAATGTACAACACCAGC
CGCCAGATAAGGCAGATCTTACAGCATCAGTCCGCAGCCTTTATACGCGTGAACTTAACT
TTTATGCCTTTCCTATCGGATAGGCACAGACTAATCGTTCAAAGGCACAGTAGAAGGAGA
TGACGCTGATACTTTGCTATAACAATGCGGCTTTCGAATAATGAGGCCTGATTCTCTGTA
AGTTTTAGTCAAGGGCGTGGCATCTATAGACTACTGCCAT
>Rosalind_2286
TATTCAGCGGCGATTATCAGCCTTACTCTCAGCCATATGAAACCGCCGCTTCCCTACAGG
TTGACAGGATTGGTTCCACCTCAGAAACGCTTCACCGACCCAAATAAATACACGTTTGCG
ACGAGAGTTTCGCCCCACGCAATTAGGTGGGTGCAGCATTTAGTGACCGGCTGGTATGAG
ATGTACGTCGTCCTGTCGACCCCCATAGTCCGGCGTCTTAATGGGTTTTTGAGGAAGGGA
GGATGTACGTCTGGCTGGTGTGTGTCAAACACTGTTCGCTCAGTAGGCGTAGCCAGTTAT
AGCTAGCTGGACACGCTGCTGCCGTCCGCGGCTCGTGCCTCTCTTAGGACGCCCTCCACA
TTTCAACGCAACACATTGGGATAGACTTGTTCTTTGTATTACGATCCGCAGAGGGGCGTA
GGACCGTTCCCAGTGCGCGGAGCCACTTTGTATTGACTCAGCATCGAACCTTTTTGGTAG
CGATTGGTGGAAAAATCGTGCGTCCGGAACAATCCTTAGTCCATACATAGCCGCCTTAGT
TATTAAACATAATTTATAACAGCCGTTGCGGGAAGGCCGTGCTGTACGCCCATTAGCGGC
ATCGAATGTATCGTTCCATTTCATAAGATTCTATACAGCCACACGCACAAATGTATGCCT
GACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAG
AAGTGAGACGGTGAATCTTTGACCGTGTCAAATCACCGAAGCGCAGTTTTTCCCGCCTAG
TAGGTGTCTGGCCATTAGGTCACGGTTGGTGTACGGGATAAAGCTTTCTATTGGTATGCA
TGTACACTGGCCGGGGTCGAGGATATGTTTCTTGCATAAACCCTGTAAGAATAACTCCAC
CTCCGCACATCAACGAGAATCTAATCCTCGCAACAAATGTCGCTCGTGGATCCTGGCAGC
TTCGGCCAGTTGCTAAACAGTACTCTTCATCGGTACATCA
>Rosalind_2472
AATTTTTTACGGCGGGAAGAGTCTTCTCTGTGCGGCCCAGTTCAGAATCTAGGAGTGAAC
AATGTACCAAACCCGTCTAGTTCACGGCCCTGGTTTGTATCGGTAGTTGAAATCAAGACC
AGTTACCTTGGATTTCAGCACTTGCCTAACTTTGTATGTTCCTCAGACGCTCGTATCGGT
GTGACGGGTCTGGGCGGAGGGGCACCGGTGTATCAATGTCTCGGGCCTCTGTCGATAGTA
GATTTCGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCC
GCAAAGAAGTGAGACGGTGAATCGTTTTTGGACTTTAAGACAAAAACACGGGCGTATAAG
ACCGTGTCGAACCTTAAATTCCACGCCTCACTATCGGAACGGTGCAGATCTATCCTTCTC
CCTGATAGAACCACGCTGAAATTGTAAGGTCCCCTGTGCCGTGTGGAACACCCCACTGAT
TCCACATCTCGATGAGCATTCGAGGGCCGAGTCCTCATCCCCAACCATGAAGGCACGGCA
ATCGTCAAGGTTGCGGTCATAAACACCAACTTTTCGACACTGTGGGGCATACTGAAGGGT
TGACGGCAGCTTACACTCGAGAACTGGAAGGGATTATACAGGGACGGGCGCTCATTGTAA
TCTTCACGTGATGTCGACGCGAGAGCATCGCTAAAGGTTATGCTCGCCCTGTTCAGATTT
GGGAGTCCCCGCCAGAACTATCAGGCGTCAGTGCTTCCCCGCAGATCTCTGTAACAGGGT
TACCTGTGTAATTGGTATACCCAATCATCCACTAGGCGAACACAAATAAAAGATACCGTG
TCCCATTGAATGCTATGGTGGAACCAGTAAGCATCTCTATAAGAAGGCTATTCGACGGAG
TCTCTCGATACAAGGGTATGATGTTTCATTCCTCGCGACCGGAGACATTTTGATCAGGAT
AGCTGCCGCAAGAGGCTCAGGCATACGGCCTAGAGCGATA
>Rosalind_2607
GGACGACATCAGGAAGGTTCATCCGTTACTATGTGTCCGTAAAAACATTCGTGTTCCATT
AACAGGAGTTATGATTCTTAGCGTCCCTGTAAGCCATTACCAGCGACCACTCAACTAAGA
GGTGCGTCATGTCCCTATGGCTGACATACTCGGAACGTCTCTCGACAAACAAACCCCTTC
GCGCGAAGGCTGTTTATCAGTGTTATTGCCCATTTTAACCGACAATTATTGTTGCCTCGG
TTAGATGCTTTTTGGTTACTTGTCGCCTGGTGAATTATAGACCCTGTCGTGGTTATGTAC
CATGGTCTCTTCGCAAACCGTCCTTTTACCCGTCATGGTCAATGTCGTTGTACCAGCCGT
GGGTATTAGTGTCACGCTATATTGAGGCACCGATAGGCACCTATATACAGCAGGCACTGG
ACTTTCCTGGCTAACTAATCTGTGCACTCCCGATTGGATTACACGTAGTCCAGCGCTTGG
TGGGTACTTGCACAACCGGTTATGCGCTGGCCATGTGAAGTTCATATATCCACACTCGGG
TCCACAATTCGGTGGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCT
GTGCACCCGCAAAGAAGTGAGACGGTGAATCGAGACATTTACAGCTCCATATTTGTAATC
AAGCCCCTGCATATTGCAGGGCGTCTAGTGAAAGCGGTAATTGAGCATATAATTCGTTTA
GTCTTCTACATCTTTAGTTCGACACAATCATGGTGAAGCGGTAGAGGTCTCTTCTTAGCT
GGGCGAGGCATATATTCCAACAAACAAACACGACATCAGGGACATAAGCAGTAGCAGGTT
ATGATGACTATGTTCACGGGCGGGCCCCGAATAATCGAAATGACTTATGTGGAGAATAAG
CAACGTCGTTGAAACATTACCGAGTCTTTACAGCGGATTGGTGTACCCAAACACGGTGAG
GAAGGGCACCAAAGCAGAAACGCAGGTGTGCCCATAATCG
>Rosalind_1835
TAGATTCATTCACGCGGAGTTCAAGTAGGTTCTCACCGTTACTAATCGCGTCGCCCGTTA
TTACAGGTTAGACGCCTTAGGCCTGCCTATTTACTATCGTCCGACAGTAGACACGCCGTC
AATACGTCGTTGGAGAGTTTGTCTACGATACGGGATTCCGGGCCGAAGTAGCCACGCCAT
TATTGAGAAGTGAGAGTGTAACATCTTATGGAAGTGATAGAGCTTCCGTACCCTTGTCGG
CGGACGTGCCTTCATCGATGCACTTCCGTGGCCGCGCTAAGGTTCTCTTATACGACGGCG
TAAAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGC
AAAGAAGTGAGACGGTGAATCCACATGCTTACTACGGTGAGTCAGTTCATTAAAGCGGCG
TTACCACGGGGCCGCATTTTTACATACTGTAGCCCAAGAAATGACCCATTCGATTATAGA
AGGGTTGTGAGCCACATTGTAATCCTGCCATTCGCCTCTCAGGTTTAAGGTGACGTAATA
TGGCCCAGCCTCGCGGGGCTTCGATGTCATCACGTTATTTGGACCACGAGTTACGAGAAG
AGTATTGATGAGGCATAAAGGTCTGACATGATGAACCTAACGGTACCCGTAACAAAGAAG
TCGCTGAGATGGGGGCTGTCTAGTGGGGCCTGTGAGCGATGACCGGCGCACACGCCGATA
CGACGAGGTCTCTGCCAGAGGGACGGCTCCTGATACAGTATACCTCCCATCGTCACAACC
AGTTGTCAGTGAGCGGTTGTCGACCGCCCGTGGTCGGCTCGCAAGACAGAGCCTGCCGAC
GGAGGTTTTGGTATAGGACCTCACTGTATCGACCTGTAGCGCGCTACGCAGAAAGAGCGT
TAAAGGGGATCGTTACGGAACCGCCCCCTGCGAGCTATAGAAACGACACATACAAGAAAC
GATGTAGCGCGTCGGCTCTATAAGCAACCCCACACTACTG
>Rosalind_4623
ACGTACGAGGTTAAGGAGGTACTAAAACGTTGGACACCCGCGTCTGTCAATGCACGTGCG
CGGTGGTGGATTACCCGTATCTAGGACTCTATCAAATGGTCGCAGCGGCGAACAGGTCTT
GATACGGCAACAGTCCAGTCCTCAATTTTCCGCACACTGCCAATTGACATTAAAGGAACT
TCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAA
TCGTAGCACTGCGTCAACGATGGCATGATCGATTTCCGAAGTAGACAGATACAAAGAATT
CCTAGCAATTACACTGACCTCATGCTATCTCAAGCCTCTTAAACTACGTATTGTCCGTTG
TGGAACACAGACCATAAACGTACCCATCCCGACGTGGAGCAGCGGCTCCCCCTCTTCGGA
AGATCGGCGCGTGTCGAAACGCATGGCAATTCCAGTAGACAGCAGATGACTTACTCAGTA
GTATAGCATCGGGCAAGAAGAGCAGAGTTATGTCTGTGCAGGGGCCGCAGGTCAGATCGG
ATGCACTTTCCCGCATGGAAACACATACTGTCTCATATAATGACTGTACGGAGTTTGATT
GTGATTTTTGCTACAATGTTTGTTAGTAACTACTCATCTCTTCTTTGGACTACGCCTGGA
ATAGCAGAGAACAAGTGCAAATATTAGAAGTCAGCACCTATCCTTAGAAATCTTCGACAA
ATTTTCATTTCTACTTAGTGGCACATGCTTTGTAGAGACGCGACTAGGTGGCCCAGCGAG
ATCGCCGGTTCTTTTAGCGAGCGCAGTGGAGACTGGTCCGAAACGAGGAGAGTGTACTCG
ACCTATTACGCGGAGCTCAGGGGCAATACCTGCGTTGCGGCCGACGTGTGGGAGTAGTCT
GTCTGTAGGTTGGACTGGGGAACAGTTTGTGTTTTAGTCAAAGAATTCAACCGAACGAGG
AATTGGTCATTGCGCTTCAGGCTATGATATGATTCACTAG
>Rosalind_1123
ACCGAATAGTATCGAAGCTTTATACTAATCTACAGTATTTAGCTCCCAACGTCACGGGAA
GATACTAACATCTCTTACTTGTGTAACCGTAACTGATAACCGGAGAAAATCTTCTAAAAT
CTATAATATACTCCTCAGCTGTAACGAAGCTATCAGTTTTCCTTTGGAGGGTCTTGCCGG
AGTTAGAGAGCGTGGGCACTCGACTTCACAGCCCTCGGACATTAAAGGAACTTCTTTCGA
GACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGCTGTA
ATGACCTGGAATTGCATGTGAGCTCATAGTACGGCTTAGCCCCTCGTTGCCGCACGACTC
GCATAGACAAGAATGATCCAGGGCACAGAGAGCACCAACGAAGTACCTACAATCACGGTG
AAATACACGACGGCAAGGTTTCGAGTCAGACCAATTCTATACATCGCCGTCCAACCGTGA
TTTACCGAGTCAAGTATACGAGTTGATTGTGTTATCACCGAGGGAAGCGATCACCGAGAC
CCTATTTATAGTAGTCGTGTAGGCTAATCCAGTGGTGCGAACATTCGGAAGAATCCCCTT
CATACAAGTTTCGTTGGATAAGGTATCGTTAAGTTATGTATGCTGGGCCGCTTATTTGTA
AGCTTCCCATTCACAAGCAGGAGCCTGGATGCTCGCTATTAGGAACTTTTTCGTCTAGCT
ACAAGAGTATAGTCACCTGACGTCTCGGATCACTAGCAAACCGGTGTCCAAACCCCTGAT
AATACCAAGTCCGAAACCCATGCAGGGACTCGATCTACTACATCGAACATCCGTAAATGA
TAGCATACTAGCGTGCCCAAAACGCAGCAGATTGTAAGCCTCATAGCCATGTATGAAGGG
TTGGAATCCAGTCGGTAGACAGTAGTCGTAACCATCCGTACATGAACTCCGTTTTAAGCA
AAGCGGACCACGAGCGGGATGCCGTTTAGACATAGATACG
>Rosalind_2877
TTCTGACGATGTAAGTCGATCGAGGCGTCCCACTAAGTTGGGATGCCACTCGTACCTCAG
TAAACAGTATGTGCGGCACTGTAGAGGCGCCCACGTACTCGGTTATGAGGAACTAATCCC
TGCCCCGTTAGACTCCCTACGCGGATGCTCGAGGGGATGCTTGATATGATTTTCCCTTGT
AACTCCTGATCTGTAGCCTCGGTCATGAACGGAATGCTTAAAAGCCACTGGAATCCTCCG
CTAAACGCCGGTATGTGAAGCGAATGCGCGGGGCAAAGCAACGCGGCTACTGAATGGGCG
CTAAGTCGTACGAGATAATCGCCTCCTGTGGGTCTTCGGCTGGAACGTTACACAGCGGAG
GTCAAGTATGAAATTGGTAATCGATGCTAAACGAATCGCGGTCGCCGCTCTTATCGCAGG
GATCCGCCTGTCGCTAAATGATCGACTAGCATAACAAGGCATTTGCAGATGGTCGAAGCT
TACGATCTGCTGAGGCGGAGTGGGCTCACGCACTCCGAGAAGACTAAGAATCAAATAAGA
AAGCAGGAGCTTCACAGTGTAATATTGACATTTTTAATGTTATCGCGTCCATACGTAAAA
CACTGAGCACGCACGTAGTCGAGAGCTCCCACCCGTATGTAGTAGCCGAGCTTACACTTT
CTTCGTGACGTATCCGCGGTAAATCAGCTAGGCCCTCACAAATTAGCGTTGGTTGCCGCA
TATTATTATCAAACGGTTGATTTTCCTTGAGCAATCAGTATCCATCCATAAACCTTGGAA
TAGCTAGTAAGTATTAAGGCAACGCTGGCGAATGATTGGTCCTACACCGATTGTAATTGT
CCTGTTCGAACCAAAGGCGTTACGACATTAAAGGAACTTCTTTCGAGACATAAACCATAG
AGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTTTCCCTGTAGTAAGAGCAC
GTAATACTAGCGGGCAGTCTAGCGGGTGAGATGGGCGAAG
>Rosalind_9349
TCGGTGAACTAACTTCCAGCGTGGCACTGACCTTACGATGAATCGCTACACCACCGCGCA
ACAATACTAGGGCTCAGTCTACAGGTAAGCACGATCGATGCAGCAGGGTGTGACCTGAGA
TACGTGGCTGAAACATTCAAAACGCAAGTCGAAAGCGTACTAGCGCTTGGTCTCGCGTTG
TGGTAAAATTGGTCGTGTGTAACTAAACTGTAAACCCCAGCTAAGCGCATCTAGCCCATC
TCAGTTTCCACATTTCACCATAGACTTATCGCCGTGTCACGCGCTGGGCTAGAAAGGTGG
TTGTTCACGCCGGGCAGAGCCGTCTCCAGAAATGTATGAAGGAGTGAACATGGCCGCGTT
TTGCTCTCTTGCTGATTGTTAGATATGTCGTGTCATTATATGGTATTAAGCCTGAAATTT
ACAATTTAGTCGGACAGTCCTCCGTTGATGTAAGTTGTGCGACATTAAAGGAACTTCTTT
CGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCACG
ACAGTCGTCTGCCAAGGTGTGTCACGGAGGATCAAGGGGCAATTTGCCAAGGTGATGCTA
GTAAGCCGTTGTATGAACGGCAACCAATTTAGACGTCTTCATCCAATCTCTACAATGTGA
GGCTGGAGTGCGCCTCATGGACCTCCTGGTCATTCGGACGGGTGTATAGTTATATTGTTC
ATCGTCCACGAACGTTCCTATCGACTCGATTGTGCGGCGGACGTTATATCCCGGCGGGAT
TGAGCGACGCGTGATCTAGGGCATACTATGAGGACGGTGTGTGTTTCCAATTGCTAATCA
TATCAAACACAAGGCCCTAAAACGAACGTCGTGGGGGACGAGCGCGGGACCAAGACACGG
CTTACTCGCTTTGAATACTCCTTCTCCACATCACCTCAAATATAACAGGACTTAAGGTCC
TGTCCAGGCCTCGATAAGATTCGTCTGGTGATCATAGGAG
>Rosalind_6308
GATTGATGGCGCAAGAGTATTTCCCACTAGCGTTGACTTATAGTCTAGTTGACATTAAAG
GAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACG
GTGAATCGGTAGATAGTTCGCAGTTGAACCGTTAAGTCTTCACGCCTACTCAGCCCCTAC
AAATAAAGGGTCACACAGCATGATGTCATTGTGAGTGTTGGCTGGGTGCACCGGCCATAA
TGCATCAAGTGGTCTCCCTCCGTTGATCCCCTCTTCCACGATAAAGTAAGACTACCCTGT
GAGGCTGCGCACACCCTAATCGGAAATGTCGCTTTAGCCTACCTGCCTCTGTAAGGTGCA
CAGCCCGTTGGAGCTGTTGTAACGCGACTGTTGTGCGGACTTGTAATTAATAGGTTAAGG
GAGTCATTGATTTCGTACGCAGTGTCATCTGCTATTACTCTATACCCTCTGTACTGTCAT
GGTAGCAGAAGAGGGCAGTCCGATTAGACCGTTGTCGTAGTTGACTACTTATGCATCAAA
ATGGCCACTCCTGAGCTTCACCCCTAGAGAAACCATGTAAGATCTTTTGTTAAACCCTTG
CTTGCAATGCGGATATATAGGATAGTTGTTCGCATTTAAGCTCAGAATTTTGCTTATGGT
ACGGGATAACGGGCACAATCTATCTAGGTCCTCGACAGTTGCACCCGGTGGTCACCAGGG
CAGGCTGATAGGGTATTTTCTACTAGGAGAGGTACGCTGCTCATCTACTGGAGAGGCGGA
CCGCCCCCGCCTTATTGTTCTGCCGACTATACGCATACCAGCACAAGAATCGCGCTGGGA
TGCTCTAGTTCCTTGGACATCACTGGTCTGCTCTCCATTTTCCTATAAGGAAATGTGCCG
GACCTAGCGGGGGGGGTCTTGATCTTCTACCATTCAGTTCACGATCGAAGTTCCAGTCAC
TTGGTACCTGGAGGTGCTCTTGCACGGATGCGTCTTTCCC
>Rosalind_6508
AGAACAGCAAACTTTATAGTTCCGAGCGGTACCAGCAGCCCACATCTGAAACAGACGGCT
AGCTGAAGGATGTCAATGGCGCAAGGCGGTTGGGACACACGTACTTAGCGCCAGTCTCGT
GGTATTCGTGGTGTTCCGTCCTCATTTGTCTTTACCAGAGGTTTGTGCTACCCACACGTC
GAATCGTGCCATTTCTACCTACGCGGAGCCTATGAATATGTGCGTTGTTTTGGTTATTGG
CCTAATCATTTTCCGAACAGCCCCCTGTGAGTGCGGAATGTCGCCTTAGCAAGCATTTGC
CCGAGCAGTAACTCCTACGCAGTCGTCTGATGTATACAGGTCTTACGAGTTTAATTAGTA
ATGACCACCATTATTTCTGCACCACGCGGTGGACCACAACGATCTCACCCAATCTTAACT
CGTACGTGGCTCAAGACGTCTACCGTATGTAACTTGTAACTAATCCAGCGGTCCGACATT
AAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGA
GACGGTGAATCATCTAAAATGGACCCATCGTTTATGGTCCGTCTCCGCGCTTCCTTGTGG
GTTATTGTAAGGGGTGGTGACTCCCTATACACGTCAATATATTCACCGAACACGAGGAGT
CCTCGAAGGTACAACTCCAGACTCATTGTAAATTTTCAACGAATTGGGGTCCCCCCCGTG
TGTTACCTACCAGCGATCAGTTTTTCTAGCGAGATAGAGTGGCGTATGCTTCTGGTTCCC
TCAATACGTTTCTGTTGTCATGGTTGACCAGTCACCTGCTTTTCCTCCTTCTTGCGGCGG
ACGGTGGTCTCAGTAATCTATGCGTTTGTTACCCTTAGTGGACAGATTTTGCTACTTGCA
ACCCACTTTGATCGTGGGACTAGGGCAAGCCGGCTTAGTTGGAGGTCTCGCACACGGTAC
TCCGGGCTGGCGTACAAGCCACACTGAACGACCTCACGGC
>Rosalind_1117
ATTTTATGCTAAAGTAGCCCGGTGCTGGGGATTTTGTAGGCAGTTAACTATGAGTCGTAG
GGGCGTGATTCATATGTGCCAGAGTGCTGTGACCCCCCGCGTAATGCTGTTTGGGACTAG
CGAGAGGAATATGCTCATTCTCCGGTGTTTACATGATGCTAAGATAGCAAATGTAATGCG
TATCGTAACAAATTTCGAGGAACACCCCCTTAAGTGTCCTAGTCGGATCTTGATGCAAGA
TTGATTATGCTGCATTCTCTGCAACCTTCGCCCAGGTAGCATGGTCTTTACTAAGGGATG
TCACACTGATCGTCCACTTTTGACTATTTCATCAGACATGGGTTGCGGGCATGTTATCCC
ACACTCGGGTAGGAGAACCTCCCTCAATGCTTACAGACGGAGAGCGAGTAACGGTAGATG
TGTCCAGTGATCACGCTATAATTGAGCCCGGGTAGGCTTATAGATAATCGGCTCGATGGG
CGCGTGGTGCGTCTTCGAGCCAAAGTGGTACCTAGTTGGCTAATAGAAATCAGGTCTAGA
GTATGTGTATTCTTAATCTTTAATATTTGAGCACCGCTGTAAGGCGATCCCGACATTAAA
GGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGAC
GGTGAATCGACACCGGCAGACGCTCCCCAAATGAGACACCCACGACCGGCTGACCATTGG
CGAGTCCGAGGGAGTCTGATGATTTCATTAGAATATGATTGCGCGGAAGTTTATACCGAC
TGGCTTGTTCTCCTCAGGCTCCCTAGTTACCCTGCAATCATGTGTGGAGCGGGATTGATT
AAATATTGCGCACTGAATCTGTAGAGCATCTCCAGTGAACAGGGCGCGAGGATTTAAATT
TGGAGGAAGTGAGTATAGACTAACCGTTGGGCGTCTTGTGTTTGCTTAGGAGAAGCACAT
ACTCGAAAAGCACTTTTACTTTCCTCAGAATATGTTCCTA
>Rosalind_7872
GGATATATGCACCTCGATTTCGGACTCATCATCCGTGTTTGTCCCCAGGTAACCATGAAT
GGATTACACCCCTTGCGTAGATGTTTCTAGTTCCCATGTTCCGCTATATGAGTCGTCACG
CAAGTTACTAATACAGCCTTCTGGACGCGTCGTTGAGTCTTAGAAGGGGTGAAAGAATTC
CGGCGAGGAGTCGCAACTGAGCGGCTTAACGCGCTACGACGGTCATAGTAGGAAAAGAAC
CGTCAGGATGCCGAACAAGAAGAACAGTAGCCAATAGGCAGATCATCCGGTTAGTGCGTC
GACAGACGAACACCAGAATGCGAATCATAGCCCCTGCGCGCAGCGGTCACTATCATGTTA
AGTCCCATCCTCCAAGAAGGACGATTAACAATTTAAGCAGACATTAAAGGAACTTCTTTC
GAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTTAC
GTTGATGTACAGAAACTGCGATCCTGATAGTCATAGTGGGTGTCGATAAGCTTAGCAACA
AAGCCAACGTGAACCACTCTTCTTGATCATATTCCCCGTACGTATGTATTGAGACCGCAC
CATGCCGAATCCGGTAGATGTGGGGTGATGGGAGGCATAACAATGCCCGCCTGCTGTACT
GGCAGCGTGGAGCAATATTCTCAACTTGAAAGACACGCTTGACCCAAATTGCCCGTCTAG
AGGGTGTAAAGCCAAACTCAATCGTTATTATGAGCCATTCGGAGCCAGAGGTGGTATACG
AAGCAGGCCAGCTACCAAGGGCACGCTACATGAAGTTACCGTGCCGCTTGAGTTATACTA
CCGCACGGGGCAGCCTGGACAGGCAATGCGTCGAGAGTTCCGCAGTTTAAATGTTCAGCT
TGCAATATACGATGTCCTGTGCACCTGTTTGACACTACGAAGTATTGGTCGGAGAAAGGG
CATGTCAATTTTACCGCTGTCCGGCCGTATTTCGGTTAGC
>Rosalind_9394
AACCCCTCCAACCATTGCATTAAGGGCACCAGCATAACTGCCACGTCGCTTGCGCATCTG
ACGAGAGATAAGCGAAGACCTATAGCTTCGGCAAGCCGTTAACCGTTCAGCCGGCATGCT
TTTGTGCTAGAAGTCTTATGCACCTTGTGGAAACATACAATAGCATAAAGGATAATACTG
CCAAGGACCCTCGCAAGACTCGTACGGTTGTCAGAACTATACATTGCGGTCTCGACTGTT
TTTAATCAAACCGATAAGACCGCGTTGTCGTACGTCCGGAGACGGGAGGGTCGGGCACAG
CAGCGCCCTAGCGCACTATGAGAGTATGTTAAATACAGAGAGTAGTTGGTTAAAACCCCC
ATTAGAGGACCCGGAAAAAAGCGGACAATATAGGACTGGCGATCCTTTGCGTCGTCCCGT
GCGCGCACTCCCACTTGTCAAAGCCACTAGGGGGGTGAATAAAAATTTAACGATTTTACA
GCCAGGGTGACACTACCGCTAATCCTGAACACTTAGCATTCGAGCCAGTCAGGACATGTC
ACGTTCTACCTTCACCTCCAATAAACAGTGATGTCTCAAATCCCCGCTGCCCCGGGGTCG
TCACCATCTCATGTACGGTTTCCTCTGAGACGTCGTGGTTTACTGAAACGTGAACGTACC
CAATTCCAGCAGCGGAAATCAGAACCAGGTATTGCCGAAATCGGTGGCCACACGCCTTGT
ACAGCAATGTGCTGGCCCGGACAGTAAGTGCCAACGTCAATAGGAGTTCCCTTGGTTTTG
CTCAGGGGGCGCTGACTTTCTGTTGATAATGTCGATGAGTCGCATTATATATAGTTGGAA
CAAACTCGACTGCTTTTGAACTAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAG
AGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCGGACGCTTGCCCTATCACCT
TGATCGCAGCTATTGTCTGGCTTCTGAAAAAAGACGCCGG
>Rosalind_1116
GATGCGCTAGGAGCGCCTGTGAGTCTATCTTCAGCCATACAGACTCTCGTGAAGAGTCGA
ATCCCCGATTGCCCCACGATTCTGGAGATTTACGAGCTTCGCTCTCGGTGAAGACGGGCC
ATATCCCCTGCGCACCACGGCTTTCTCAATATACCCTTCCACGCAGATAACCATAAGTAT
CTTTACTACCAGCTTGACTGAAAAATGTAGTCGGGCACTCCAACCAGCATCTCTTCTGGA
GGGGTGACGCAAGGTGTCCATTGCATCTATTTCGGCCCAAACCGACTAATGTTGCATACG
TCAGCATCTTCGAGCTGTCCGCAGTTCACCTGTATGCGCCCAGATTCGATGGGTCCAGTC
GTTGACATCAAGTACCATGGTGTTGTCGTGAGAGGAATTAACCTCGGATGGCTGGTTATG
CGACACCTCTTAATAGGCACACTTCAAGTCTCCCGGGACGTTCCCAACAGCTACTCAGGT
GACCCTCTGCGTGTTGGCACATGCTGGACGCCCCACGACTGGTATTCGTCTACGGTTGTC
TGCCCAGACGATTTGGGCAACGCTGTATTCTGCTGTTATCGTTCGAGGAGTTGCTGGGCC
ATCTCAAAAATCGTTATCCCGCGGCAGTCCGGCTGGGGCGATAACACCTGGTAATATCAG
CGACTGATCGACCTGAAGACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGT
GCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCTTGCCAATTCACCAGTCTGCTTGCTG
TCCGACGCTCACATTCTGTTCTTCCTCTAGGCACCAGATCAATCAAGAGCTTCTCGAGGA
ACGATGGACGTAAATATGCACGAGACAAAGAGTATGTGTTGGGATCCCCCAATAGAACCA
GACTATTGCTTGTGGGGTATTAAAATGTGTGTTATTCTCCATATGTTTCGCGTTTCACAC
CACCATTATTCGGGGAAAGCCGTTACCTGGGAATGCATTC
>Rosalind_4233
CACAGGTTTCCCGCGCTCCCATCGAGTACTCGGGGCCAATCCAGGTACACGTGCTCAAAG
ACATTAAAGGAACTTCTTTCGAGACATAAACCATAGAGTGGTGCTGTGCACCCGCAAAGA
AGTGAGACGGTGAATCGACGTATGGAAATGCGGGCCGCTAATCGCGAGACATCGGTTAGT
CAACAGCACTACCTGTACTCTTCAATTCCAACATGGTGGGCGAGACGCGAAGGAGTGTTG
AATATACATTTACAGAATGTGATGTCATCCCGCAGGTTGCCAAAAACGACAACCGATTTC
GCCTACGTATTTGGTGATGATCATCGCTTGTAAAGAGGACTCCAAGACAAACACCGCCAA
CTTTCGACGATCTGGTGGAATTGATGGGTTTTCTTAGTTAAATCCAAGCTCACTATTCTA
CCTGAAGAACAACAATTACTATAGTTGCATCTTAACCCTTAACCCAAAGCTGTTTGAAGG
ATACCCTAACTTGCTCAGGCCAAACCCTTATGGTCACGATCTTCGGGTCCGTTGACCCGC
GGGGCGTCTGCGAAGGGGTCAGAGTAAGTTACTTGAGACGTTACTATATAGCGGCGCCAA
AAATGGCGAGGATGCCTTCTTCGCATGCCGGGCTCCGTCTGAGCTCAACGGCGTTGCCAT
GACCTTTAACACCCTAACTCATCATAACCGGCCCTAGAACTACTGAATCAACCGGTTCCC
GGACTCACCGAAGGCTGTGACGAATGGGTTGCGTCGGACAAGGCCAGTTAATGAAGAAAA
TCTCAGATCGAGAGGATCTCACTCGTGAGAGACACGTTCTGCGGCATGGCTGAGCTCGTT
CGGAATCGAGGACGACCCCTTGCCCCCGGCCAACATCCGGTATCTGCGGCGGATCTGGGT
TCTTTGGCGTATACTACTAACCTCATTTATTGTAACCTAGTACAAGCCTAATGCCCATTC
GATGCAATAGAGTCCGGTTTAATAAACACGCGACTGTAGG
>Rosalind_8272
GAGTCAAATCGGAAAAGGTGTCGCCTCCCACCTGCAATTGGACTTCATCCTCTGTCTGTC
CGCCAGGCTACGATCTATGGGGCAGAGCGGCCTGCAGACGGGGGAACCGGGCGAATATAA
GAAACACAGTTTTCACAGTGGTGGGTCGGATGCGTAGTTTACCGCGTGATTCTTTGGAGC
TTGGCTGGCCCCTGACAGACCTGGACTGGCGGTTAATCCACCCATCGAGGCTCGTAAAAT
GATGATATAGAGTTAATCAGGAGAGACATTAAAGGAACTTCTTTCGAGACATAAACCATA
GAGTGGTGCTGTGCACCCGCAAAGAAGTGAGACGGTGAATCCGGTGCGCACGAACACTTC
CTGCTGCGGTAATATGGGAGATTATAAACCCTGAGAAGCATTCAATTTCCGGTCAGGACA
TCTCTACGCAGGGTTCCTACAAGTTGGATGAGTAAAAATTTCGACTATCTTGAACAAGTC
TGTTGGTAGGCTAGAGTGCCCCCCACGTGCGCCTTTTAGAGAATACCCGCCTGAGTGACA
GATCATAAGCCCGTAATTGGAGGATACGCCAGGAGCAGTGACTAGCTAGAAACCCTGGCG
ATAACCGTCCGGCGGCAATAAATGTTACTGTAATACACAATGCTGATAATCGACTGACGT
ACGCTGTTGCCAAGTCCCTTGGCCGAGACCCATTCCTTAGACCTTAGATTCAATGCGCTT
TCGCCCTCCTCTTCGTGTTCTGCATCATACGATCATCATTCTCTCCGCATTCCGAAAGTC
GGGGTATGCTAATGCGTTAGGGCGATGAAGTGGTGACGCGTAGGTAAAAGAAAGCGAGTT
AATCATGTACTGTCTATTGGACTAGGTGACAAGCCCCCATGCTATAACATAGCGCGCTTC
GTAGGGTCGACGACAACACTAGGACCAGATAAGTCCCACATCCAGACGACTAGTGTACTT
AGTGAAAAGAGAGGGGATTAACAAGTCATGTGGTGCAACT"""
# + id="Bn_lDsnzphYU" executionInfo={"status": "ok", "timestamp": 1640999087255, "user_tz": 360, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
fasta_dict = readFasta(string)
# + id="6BTmLWkJplfE" executionInfo={"status": "ok", "timestamp": 1640999087255, "user_tz": 360, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
#Naive dynamic programming approach
#From wikipedia
#Pairwise comparison
def LCS(string1, string2):
A = string1
B = string2
L = np.full((len(A), len(B)), -1)
z = 0
lcs = []
for i in range(len(A)):
for j in range(len(B)):
if A[i] == B[j]:
if (i == 0 or j == 0):
L[i, j] = 0
else:
L[i, j] = L[i-1, j-1] + 1
if L[i, j] > z:
z = L[i, j]
lcs = [A[i-int(z):i+1]]
elif L[i,j] == z:
lcs.append(A[i-int(z):i+1])
else:
continue
else:
continue
if len(lcs) > 0:
return lcs
else:
return None
# + id="tfJGIT8Kweib" executionInfo={"status": "ok", "timestamp": 1640999090640, "user_tz": 360, "elapsed": 142, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
#Comparison across N strings
def gen_LCS(iter_strings, lcs = []):
if len(lcs) == 0:
lcs = LCS(iter_strings[1], iter_strings[2])
return gen_LCS(iter_strings[2:], lcs)
elif len(iter_strings) > 0:
lcs_list = []
for ss in lcs:
lcs2 = LCS(ss, iter_strings[0])
if lcs2 != None:
lcs_list.append(*lcs2)
return gen_LCS(iter_strings[1:], lcs_list)
else:
return lcs
# + colab={"base_uri": "https://localhost:8080/"} id="waWHEGODwn7t" executionInfo={"status": "ok", "timestamp": 1640999095694, "user_tz": 360, "elapsed": 3766, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}} outputId="52ae7073-59ab-4b96-9df9-6a3d17e5e45d"
gen_LCS(list(fasta_dict.values()))
# + id="HQzDtHYC3niZ" executionInfo={"status": "ok", "timestamp": 1640999098000, "user_tz": 360, "elapsed": 195, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
#Suffix Array Approach
def argsort(seq):
return sorted(range(len(seq)), key = seq.__getitem__)
def concat_string(strings, sep):
if len(sep) < len(strings):
print("You need more separator characters")
else:
mod_strings = [strings[i] + sep[i] for i in range(len(strings))]
concat_string = "".join(mod_strings)
return concat_string
def constructSA(concat_string):
suffix_array = [concat_string[i:] for i in range(len(concat_string))]
suffix_idx = argsort(suffix_array)
return suffix_idx
def len_common_prefix(string1, string2):
n = 0
for (i,j) in zip(string1, string2):
if i == j:
n+=1
else:
break
return n
def construct_lcp(concat_string, suffix_idx):
array = [0]
suffix_idx = suffix_idx
for (i, j) in zip(suffix_idx[:-1], suffix_idx[1:]):
array.append(len_common_prefix(concat_string[i:], concat_string[j:]))
return array
def hash_window(concat_string, suffix_idx,sep, k):
intersections = ["".join(list(set(concat_string[idx:]).intersection(set(sep)))) for idx in suffix_idx]
hash_table = Counter(intersections)
return len(hash_table) == k
def window(concat_string, lcp_array, sep, i, n, k):
i = int(i)
n = int(n)
if hash_window(concat_string, lcp_array[i:i+n, 0],sep, k):
len_prefix = min(lcp_array[i+1:i+n, 1])
idx = lcp_array[i, 0]
substring = concat_string[idx:idx+len_prefix]
return substring
else:
return window(concat_string, lcp_array, sep, i, n+1, k)
def gcs(concat_string, lcp_array, sep, n):
lcs_list = []
z = 0
for i in range(len(lcp_array)-n):
substring = window(concat_string,lcp_array, sep, i, n, n)
if len(substring) > z:
z = len(substring)
lcs_list = [substring]
elif len(substring) == z:
lcs_list.append(substring)
else:
continue
return lcs_list
# + id="wP_VuA4y4SSj" executionInfo={"status": "ok", "timestamp": 1640999100155, "user_tz": 360, "elapsed": 126, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
emoji_table = """🙂 Slightly smiling face U+1F642
😀 Smiling face U+1F600
😃 Smiling face with big eyes U+1F603
😄 Smiling face with smiling eyes U+1F604
😁 Beaming face with smiling eyes U+1F601
😅 Smiling face with tears U+1F605
😆 Grinning face U+1F606
🤣 Rolling on the floor laughing U+1F923
😂 Lauging with tears U+1F602
🙃 Upside down face U+1F643
😉 Winking face U+1F609
😊 Smiling face with smiling eyes U+1F60A
😇 Smiling face with halo U+1F607
😎 Smiling face with sunglasses U+1F60E
🤓 Nerdy face U+1F913
🧐 Face with monocle U+1F9D0
🥳 Partying face U+1F973
🥰 Smiling face with hearts U+1F970
😍 Smiling face with heart eyes U+1F60D
🤩 Star-struck U+1F60D
😘 Face blowing kiss U+1F618
😗 Kissing face U+1F617
☺ Smiling face U+263A
😚 Kissing face with closed eyes U+1F61A
😙 Kissng face with smiling eyes U+1F619
🥲 Smiling face with tears U+1F972
😋 Yummy face U+1F60B
😛 Face with tongue U+1F61B
😜 WInking face with tongue U+1F61C
🤪 Zanny face U+1F92A
😝 Squinting face with tongue U+1F61D
🤑 Money face with money tongue U+1F911
🤗 Hugs U+1F917
🤭 Face with hand in mouth U+1F92D
🤫 Shushing face U+1F92B
🤔 Thinkin face U+1F914
😐 Neutral face U+1F610
🤐 Zipped mouth U+1F910
🤨 Face with raised eyebrow U+1F928
😑 Expressionless face U+1F611
😶 Face with no mouth U+1F636
😏 Smirking face U+1F60F
😒 Unamused face U+1F612
🙄 Face with rolling eyes U+1F644
😬 Grimacing face U+1F62C
😮💨 Grimacing face U+1F4A8
🤥 Lying face U+1F925
😪 Sleepy face U+1F62A
😴 Sleeping face U+1F634
😌 Relieved face U+1F60C
😔 Pensive face U+1F614
🤤 Drooling face U+1F924
😷 Face with mask U+1F637
🤒 Face with thermometer U+1F912
🤕 Face with bandage U+1F915
🤢 Nauseous face U+1F922
🤮 Vomiting face U+1F92E
🤧 Sneezing face U+1F927
🥵 Hot face U+1F975
🥶 Cold face U+1F976
🥴 Woozy face U+1F974
😵 Face with crossed-out face U+1F635
🤯 Face with exploding head U+1F92F
😕 Confused face U+1F615
😟 Worried face U+1F61F
🙁 Slightly frowning face U+1F641
☹ Frowning face U+2639
😮 Face with open mouth U+1F62E
😯 Hushed face U+1F62F
😲 Astonished face U+1F632
😳 Flushed face U+1F633
🥺 Begging face U+1F97A
😦 Frowning face with open mouth U+1F626
😧 Angushed face U+1F627
😨 Fearful face U+1F628
😰 Anxious face with sweat U+1F630
😥 Sad but relieved face U+1F625
😢 Crying face U+1F622
😭 Loudly crying face U+1F62D
😱 Screaming face U+1F631
😖 Confounded face U+1F616
😣 Persevering face U+1F623
😞 Disapointed face U+1F61E
😓 Downcast face with sweat U+1F613
😩 Weary face U+1F629
😫 Tired face U+1F62B
🥱 Yawning face U+1F971
😤 Face with steam U+1F624
😡 Pouting face U+1F621
😠 Angry face U+1F620
🤬 Face with symbols on mouth U+1F92C
😈 Smiling face with horns U+1F608
👿 Angry face with horns U+1F47F
💀 Skull U+1F480
☠ Skull and cross-bone U+2620
💩 Pile of poo U+1F4A9
🤡 Clown U+1F921
👹 Ogre U+1F479
👺 Goblin U+1F47A
👻 Ghost U+1F47B
👽 Alien U+1F47D
👾 Alien monster U+1F47E
🤖 Robot U+1F916
😺 Grinnig cat U+1F63A
😸 Grinning cat with smiling eyes U+1F638
😹 Grinning cat with tears U+1F639
😻 Smiling cat with heart eyes U+1F63B
😼 Cat with wry smile U+1F63C
😽 Kissing cat U+1F63D
🙀 Weary cat U+1F640
😿 Crying cat U+1F63F
😾 Pouting cat U+1F63E
🙈 See no evil monkey U+1F648
🙉 Hear no evil monkey U+1F649
🙊 Speak no evil monkey U+1F64A
💋 Kiss U+1F48B
💌 Love letter U+1F48C
💘 Heart with arrow U+1F498
💝 HEart with ribbon U+1F49D
💖 Sparking heart U+1F496
💗 Growing heart U+1F497
💓 Beating heart U+1F493
💞 Revolving heart U+1F49E
💕 Two hearts U+1F495
💟 Heart decoration U+1F49F
❣ Heart exclamation U+2763
💔 Broken heart U+1F494
❤️🔥 Heart on fire U+2764
❤️🩹 Mending heart U+2764
❤ Red heart U+2764
🧡 Orange heart U+1F9E1
💛 Yellow heart U+1F49B
💚 Green heart U+1F49A
💙 Blue heart U+1F499
💜 Purple heart U+1F49C
🤎 Brown heart U+1F90E
🖤 Black heart U+1F5A4
🤍 White heart U+1F90D
💯 Hundred(correct) U+1F4AF
💢 Anger U+1F4A2
💥 collision U+1F4A5
💫 Dizzy U+1F4AB
💦 Sweat droplets U+1F4A6
💨 Dashing away U+1F4A8
🕳 Hole U+1F573
💣 Bomb U+1F4A3
💬 Message baloon U+1F4AC
👁️🗨️ Eye in speech bubble U+1F441
🗨 Left speech bubble U+1F5E8
🗯 Anger bubble U+1F5EF
💭 Thought baloon U+1F4AD
💤 zzz U+1F4A4"""
# + id="ErMF0lRO-ceG" executionInfo={"status": "ok", "timestamp": 1640999101365, "user_tz": 360, "elapsed": 158, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
emojis = [i.split("\t")[0] for i in emoji_table.split("\n")]
# + id="86mzrxR__jKy"
concat = concat_string(list(fasta_dict.values()), emojis)
suffix_array = constructSA(concat)
lcp = np.array(construct_lcp(concat, suffix_array))
lcp_array = np.vstack([np.array(suffix_array), np.array(lcp)]).T
gcs(concat, lcp_array, emojis, 3, )
# + id="ziu7WRywCgIy"
| solution_notebooks/Finding a Shared Motif.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../images/dask_horizontal.svg" align="right" width="30%">
# # Parallelize code with `dask.delayed`
#
#
#
# In this section we parallelize simple for-loop style code with Dask and `dask.delayed`. Often, this is the only function that you will need to convert functions for use with Dask.
#
# This is a simple way to use `dask` to parallelize existing codebases or build [complex systems](https://blog.dask.org/2018/02/09/credit-models-with-dask). This will also help us to develop an understanding for later sections.
#
#
# ## Learning Objectives
#
# - Deploy a local Dask Distributed Cluster and access the diagnostics dashboard
# - Work with `dask.delayed` to parallelize custom functions/workloads
#
# ## Prerequisites
#
#
# | Concepts | Importance | Notes |
# | --- | --- | --- |
# | Familiarity with Python | Necessary | |
# | Familiarity with xarray | Helpful | |
#
#
# - **Time to learn**: *25-35 minutes*
#
#
#
# ## Deploy a local Dask Distributed Cluster
#
# As we'll see in the [distributed scheduler notebook](11-dask-distributed.ipynb), Dask has several ways of executing code in parallel. We'll use the distributed scheduler by creating a `dask.distributed.LocalCluster` and then passing that to the `dask.distributed.Client`. For now, this will provide us with some nice diagnostics. We'll talk about schedulers in depth later.
#
from dask.distributed import Client, LocalCluster
cluster = LocalCluster()
client = Client(cluster)
client
# **Note**:
#
# - A cluster is a set of "workers". In the `LocalCluster` case, these workers are all on a single machine
# - A client allows us to connect our jupyter notebook or script to the cluster
# You may want to look at the keyword arguments available on `LocalCluster` to understand the options available to you on handling the mixture of threads and processes, etc... by un-commenting the cell below:
# +
# # LocalCluster?
# -
# ## Basics
#
# First let's make some toy functions, `square`, `add`, and `square_root` that sleep for a while to simulate work. We'll then time running these functions normally.
#
# In the next section we'll parallelize this code.
# +
import time
import dask
# +
def square(x):
time.sleep(1)
return x ** 2
def add(x, y):
time.sleep(1)
return x + y
def square_root(x):
time.sleep(1)
return x ** (1 / 2)
# -
# We time the execution of this normal code using the `%%time` magic, which is a special function of the Jupyter Notebook.
# +
# %%time
x = square(3)
y = square(4)
z = add(x, y)
r = square_root(z)
r
# -
# This takes `~4 seconds` to run because we call each function sequentially, one after the other.
# Those two `square` calls *could* be called in parallel, because they are totally independent of one-another.
#
# We'll transform the `square`, `add`, and `square_root` functions using the `dask.delayed` function. When we call the delayed version by passing the arguments, exactly as before, the original function isn't actually called yet - which is why the cell execution finishes very quickly.
# Instead, a *delayed object* is made, which keeps track of the function to call and the arguments to pass to it.
#
# +
# %%time
delayed_square = dask.delayed(square)
delayed_add = dask.delayed(add)
delayed_square_root = dask.delayed(square_root)
x = delayed_square(3)
y = delayed_square(4)
z = delayed_add(x, y)
r = delayed_square_root(z)
r
# -
# **This ran immediately, since nothing has really happened yet.**
#
# To get the result, call `compute`.
# +
# %%time
r.compute()
# -
#
#
# <div class="admonition alert alert-success">
# <p class="admonition-title" style="font-weight:bold"></p>
# Notice that this runs faster than the original code.
# </div>
# ## What just happened?
#
# The `r` object is a lazy `Delayed` object. This object holds everything we need to compute the final result, including references to all of the functions that are required and their inputs and relationship to one-another. We can evaluate the result with `.compute()` as above or we can visualize the task graph for this value with `.visualize()`.
r.visualize()
# <div class="admonition alert alert-info">
# <p class="admonition-title" style="font-weight:bold">Reminder: Task and Task Graphs</p>
# <ul>
# <li> A task is a function that you want to call and its corresponding inputs. </li>
# <li> A task graph is a collection of (1) the functions we want to call + their inputs (2) their dependencies. </li>
# </ul>
# </div>
#
#
# <img src="../images/dask-task-stream.gif">
#
#
#
#
# By default the task graph is rendered from top to bottom. In the case that you prefer to visualize it from left to right, pass `rankdir="LR"` as a keyword argument to `.visualize()`.
r.visualize(rankdir="LR")
# Notice that this includes the names of the functions from before, and the logical flow of the outputs of the `square` functions to the inputs of `add` and `square_root`.
# ### Some questions to consider:
#
# - Why did we go from 4s to 3s? Why weren't we able to parallelize down to 2s?
# - What would have happened if the `square`, `add`, and `square_root` functions didn't include the `sleep(1)`? Would Dask still be able to speed up this code?
# - What if we have multiple outputs or also want to get access to x or y?
# ## Exercise: Parallelize a for loop
#
# `for` loops are one of the most common things that we want to parallelize. Use `dask.delayed` on our custom `square` function and the built-in `sum` function to parallelize the computation below:
data = list(range(1, 11))
data
# +
# %%time
results = []
for x in data:
y = square(x)
results.append(y)
total = sum(results)
# -
total
# %%time
# Your parallel code here...
# + tags=[]
# %%time
# Solution
results = []
for x in data:
y = delayed_square(x)
results.append(y)
total = dask.delayed(sum)(results)
print(f"Before computing: {total}") # Let's see what type of thing total is
result = total.compute()
print(f"After computing : {result}", result) # After it's computed
# -
# How do the graph visualizations compare with the given solution, compared to a version with the `sum` function used directly rather than wrapped with `delayed`? Can you explain the latter version? You might find the result of the following expression illuminating
#
#
# ```python
#
# delayed_square(1) + delayed_square(2)
# ```
# + tags=[]
z = delayed_square(1) + delayed_square(2)
z.visualize()
# + tags=[]
sum(results).visualize()
# + tags=[]
total.visualize()
# -
# ## Exercise: Parallelizing a for-loop code with control flow
#
# Often we want to delay only *some* functions, running a few of them immediately. This is especially helpful when those functions are fast and help us to determine what other slower functions we should call. This decision, to delay or not to delay, is usually where we need to be thoughtful when using `dask.delayed`.
#
# In the example below we iterate through a list of inputs. If that input is even then we want to call `square`. If the input is odd then we want to call `double`. This `is_even` decision to call `square` or `double` has to be made immediately (not lazily) in order for our graph-building Python code to proceed.
# +
def double(x):
time.sleep(1)
return x * 2
def square(x):
time.sleep(1)
return x ** 2
def is_even(x):
return not x % 2
data = list(range(1, 11))
data
# +
# %%time
# Sequential code
results = []
for x in data:
if is_even(x):
y = double(x)
else:
y = square(x)
results.append(y)
total = sum(results)
print(total)
# -
# %%time
# Your parallel code here...
# TODO: parallelize the sequential code above using dask.delayed
# You will need to delay some functions, but not all
# +
results = []
for x in data:
if dask.delayed(is_even)(x): # even
y = dask.delayed(double)(x)
else: # odd
y = dask.delayed(square)(x)
results.append(y)
total = dask.delayed(sum)(results)
total.visualize()
# -
# %%time
total.compute()
# ### Some questions to consider:
#
# - What are other examples of control flow where we can't use delayed?
# - What would have happened if we had delayed the evaluation of `is_even(x)` in the example above?
# - What are your thoughts on delaying `sum`? This function is both computational but also fast to run.
# ## Exercise: Parallelizing climatology and anomaly computations with xarray and `dask.delayed`
#
# In this exercise we read four netCDF files for 4 ensemble members of CESM2 output submitted to the CMIP6 project. We then use xarray to compute anomalies for each ensemble member in parallel i.e. compute the climatology and use xarray's groupby arithmetic to remove this climatology from our original data for each member.
#
# We are given sequential code to do this and parallelize it with `dask.delayed`.
#
# The computation we will parallelize is to compute the anomalies for each ensemble member from the input data. We will do this by using `dask.delayed` together with `xarray`. In a future section we will do this same exercise with xarray xarray dataset backed by `dask.array`.
# ### Download data
#
# To download the necessary data, make sure to run the [Download Data Notebook](00-download-data.ipynb). This will download all necessary input files for four ensemble members (`r11i1p1f1`, `r7i1p1f1`, `r8i1p1f1`, `r9i1p1f1`) from the [esgf-node](https://esgf-node.llnl.gov/search/cmip6/).
# ### Inspect data
# +
import pathlib
data_dir = pathlib.Path("data/")
files = sorted(data_dir.glob("tos_Omon_CESM2*"))
files
# -
# ### Read one file with `xarray.open_dataset` and compute anomaly
import xarray as xr
ds = xr.open_dataset(files[0], engine="netcdf4")
ds
# Compute anomaly
gb = ds.tos.groupby('time.month')
tos_anom = gb - gb.mean(dim='time')
tos_anom
tos_anom.sel(lon=310, lat=50, method='nearest').plot();
# ### Sequential code: Anomaly for each ensemble member
#
# The above cell computes the anomaly for one ensemble member during the period spanning `2000 - 2014`. Here we expand that to all four ensemble members using a sequential for loop.
# +
# %%time
results = {}
for file in files:
# Read in file
ds = xr.open_dataset(file, engine='netcdf4')
# Compute anomaly
gb = ds.tos.groupby('time.month')
tos_anom = gb - gb.mean(dim='time')
# Save the computed anomaly and record the name of the ensemble member
results[file.stem.split('_')[-3]] = tos_anom
# Combine the results in our dataarray by concatenating the results across a new dimension `ensemble_member`
dset_anom = xr.concat(list(results.values()), dim='ensemble_member')
dset_anom['ensemble_member'] = list(results.keys())
dset_anom
# -
dset_anom.sel(lon=310, lat=50, method='nearest').plot(col='ensemble_member', col_wrap=2, size=4);
# ### Parallelize the code above
#
# Use `dask.delayed` to parallelize the code above. Some extra things you will need to know.
#
# 1. Methods and attribute access on delayed objects work automatically, so if you have a delayed object you can perform normal arithmetic, slicing, and method calls on it and it will produce the correct delayed calls.
#
# ```python
# ds = dask.delayed(xr.open_dataset)(files[0], engine='netcdf4')
# ds.isel(time=0).sum() # everything here was delayed
# ds.groupby('time.season').mean() # everything here was delayed
# ```
#
# 2. Calling the `.compute()` method works well when you have a single output. When you have multiple outputs you might want to use the `dask.compute` function:
#
# ```python
# >>> from dask import delayed, compute
# >>> x = delayed(np.arange)(10)
# >>> y = x ** 2
# >>> min_, max_ = compute(y.min(), y.max())
# >>> min_, max_
# (0, 81)
# ```
#
# This way Dask can share the intermediate values (like `y = x**2`)
#
# So your goal is to parallelize the code above (which has been copied below) using `dask.delayed`. You may also want to visualize a bit of the computation to see if you're doing it correctly.
# + tags=[]
# %%time
# This is just one possible solution, there are
# several ways to do this using `delayed`
results = {}
for file in files:
# Read in file
ds = dask.delayed(xr.open_dataset)(file, engine='netcdf4')
# Compute anomaly
gb = ds.tos.groupby('time.month')
tos_anom = gb - gb.mean(dim='time')
# Save the computed anomaly and record the name of the ensemble member
results[file.stem.split('_')[-3]] = tos_anom
# Compute the results
# dask.compute() returns a tuple here with a single item. So, ensure to grab this one item by using the 0 index
computed_results = dask.compute(results)[0]
# Combine the results in our dataarray by concatenating the results across a new dimension `ensemble_member`
dset_anom = xr.concat(list(computed_results.values()), dim='ensemble_member')
dset_anom['ensemble_member'] = list(computed_results.keys())
dset_anom
# -
# Make a quick plot to ensure the results still match
dset_anom.sel(lon=310, lat=50, method='nearest').plot(col='ensemble_member', col_wrap=2, size=4);
results
results['r11i1p1f1'].visualize(rankdir="LR")
# ### Some questions to consider:
#
# - How much speedup did you get? Is this how much speedup you'd expect?
#
# ## Close the Cluster and Client
#
# Before moving on to the next notebook, make sure to close your cluster, and client or stop this kernel.
client.close()
cluster.close()
# %load_ext watermark
# %watermark --time --python --updated --iversion
# ---
# ## Summary
#
# - `dask.delayed` is a handy mechanism for creating the Dask graph, but the adventurous may wish to play with the full fexibility afforded by building the graph dictionaries directly. Detailed information can be found [here](https://dask.pydata.org/en/latest/graphs.html).
#
# ## Learn More
#
# Visit the [Delayed documentation](https://docs.dask.org/en/latest/delayed.html). In particular, this [delayed screencast](https://www.youtube.com/watch?v=SHqFmynRxVU) will reinforce the concepts you learned here and the [delayed best practices](https://docs.dask.org/en/latest/delayed-best-practices.html) document collects advice on using `dask.delayed` well.
# +
from IPython.display import YouTubeVideo
YouTubeVideo(id="SHqFmynRxVU", width=600, height=300)
# -
# ## Resources and references
#
# * Reference
# * [Docs](https://dask.org/)
# * [Examples](https://examples.dask.org/)
# * [Code](https://github.com/dask/dask/)
# * [Blog](https://blog.dask.org/)
# * Ask for help
# * [`dask`](http://stackoverflow.com/questions/tagged/dask) tag on Stack Overflow, for usage questions
# * [github discussions](https://github.com/dask/dask/discussions) for general, non-bug, discussion, and usage questions
# * [github issues](https://github.com/dask/dask/issues/new) for bug reports and feature requests
#
# * Pieces of this notebook are adapted from the following sources
# * https://github.com/dask/dask-tutorial/blob/main/01_dask.delayed.ipynb
#
#
# <div class="admonition alert alert-success">
# <p class="title" style="font-weight:bold">Previous: <a href="./07-dask-intro.ipynb">Introducing Dask</a></p>
# <p class="title" style="font-weight:bold">Next: <a href="./09-dask-array.ipynb">Dask Array</a></p>
#
# </div>
| notebooks/08-dask-delayed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Homedepot5/DataScience/blob/deeplearning/MeanSquareError.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="LbSolS5k_CGL" colab_type="code" colab={}
import numpy as np
y_predicted = np.array([1,1,0,0,1])
y_true = np.array([0.30,0.7,1,0,0.5])
# + [markdown] id="kswjIv4v_Vsa" colab_type="text"
# Using Numpy
# + id="JrJ7C_uq_VNI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0f6ff8db-f3c0-48a7-83db-5cf3f47d1b0b"
np.mean(np.square(y_predicted-y_true))
# + [markdown] id="gO0uR7Mw_-he" colab_type="text"
# with python
# + id="FeETAXlbAAzq" colab_type="code" colab={}
def mae(y_predicted, y_true):
total_error = 0
for yp, yt in zip(y_predicted, y_true):
total_error += abs(yp - yt)**2
print("Total Square error is:",total_error)
mae = total_error/len(y_true)
print("Mean absolute error is:",mae)
return mae
# + id="XUX4fpUHADNn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="66c2e5a0-b86e-4cc3-9a4d-b186bb58b262"
mae(y_predicted,y_true)
| MeanSquareError.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 3
#
# I hope you're getting the hang of things. Today we're going on with the prinicples of data visualization!
# ## Overview
#
# Once again, the lecture has three parts:
#
# * First you will watch a video on visualization and solve a couple of exercises.
# * After that, we'll be reading about *scientific data visualization*, and the huge number of things you can do with just one variable. Naturally, we'll be answering questions about that book.
# * And finally reproducing some of the plots from that book.
# ## Part 1: Fundamentals of data visualization
# Last week we had a small introduction of data visualization. Today, we are going to be a bit more specific on data analysis and visualization. Digging a bit more into the theory with the next video.
#
# <mark>*It's important to highlight that these lectures are quite important. We don't have a formal book on data visualization. So the only source of knowledge about the **principles**, **theories**, and **ideas**, that are the foundation for good data viz, comes from the videos*. So watch them 🤓 </mark>
#
# [](https://www.youtube.com/watch?v=yiU56codNlI)
# > *Excercise 1.1:* Questions for the lecture
# > * As mentioned earlier, visualization is not the only way to test for correlation. We can (for example) calculate the Pearson correlation. Explain in your own words how the Pearson correlation works and write down it's mathematical formulation. Can you think of an example where it fails (and visualization works)?
#
# Wikipedia: Pearson's correlation coefficient is the covariance of the two variables divided by the product of their standard deviations.
# \begin{equation}
# r =
# \frac{ \sum_{i=1}^{n}(x_i-\bar{x})(y_i-\bar{y}) }{%
# \sqrt{\sum_{i=1}^{n}(x_i-\bar{x})^2}\sqrt{\sum_{i=1}^{n}(y_i-\bar{y})^2}}
# \end{equation}
#
# it fails (and visualization works)? -> no clue
#
# > * What is the difference between a bar-chart and a histogram?
#
# e.g. https://keydifferences.com/difference-between-histogram-and-bar-graph.html
#
# histogram -> verteilung
# bar graph -> displays different categories
#
# > * I mention in the video that it's important to choose the right bin-size in histograms. But how do you do that? Do a Google search to find a criterion you like and explain it.
#
# https://www.statisticshowto.com/choose-bin-sizes-statistics/#:~:text=Choose%20between%205%20and%2020,more%20useful%20with%2020%20bins.&text=For%20example%2C%20if%20you%20have,instead%20of%206%20or%207.
#
# (As a first guess, you can start with Scott’s rule for the bin width w = 3.5σ/√3 n, where σ is the standard deviation for the entire data set and n is the number of points. This rule assumes that the data follows a Gaussian distribution; otherwise, it is likely to give a bin width that is too wide. See the end of this chapter for more information on the standard deviation.)
# Ok, now that we've talked a bit about correlation and distributions, we are going to compute/visualize them while also testing some hypotheses along the way. Until now, we have analysed data at an explorative level, but we can use statistics to verify whether relationships between variables are significant. We'll do this in the following exercise.
#
# > *Exercise 1.2:* Hypothesis testing. We will look into correlations between number of steps and BMI, and differences between two data samples (Females vs Males). Follow the steps below for success:
# >
# > * First, we need to get some data. Download and read the data from the Female group [here](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_f.csv) and the one from the Male group [here](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_m.csv).
# > * Next, we are going to verify the following hypotheses:
# > 1. <mark>*H1: there is a statistically significant difference in the average number of steps taken by men and women*</mark>. Is there a statistically significant difference between the two groups? What is the difference between their mean number of steps? Plot two histograms to visualize the step-count distributions, and use the criterion you chose in Ex.1.1 to define the right bin-size.
# **Hint** you can use the function `ttest_ind()` from the `scipy.stats` package to test the hypothesis and consider a significance level $\alpha=0.05$.
# > 2. <mark>*H2: there is a negative correlation between the number of steps and the BMI for women*.</mark> We will use Pearson's correlation here. Is there a negative correlation? How big is it?
# > 3. <mark>*H3: there is a positive correlation between the number of steps and the BMI for men*.</mark> Is there a positive correlation? Compare it with the one you found for women.
# > * We have now gathered the results. Can you find a possible explanation for what you observed? You don't need to come up with a grand theory about mobility and gender, just try to find something (e.g. theory, news, papers, further analysis etc.) to support your conclusions and write down a couple of sentences.
#
# > *Exercise 1.3:* scatter plots. We're now going to fully visualize the data from the previous exercise.
# >
# > * Create a scatter plot with both data samples. Use `color='#f6756d'` for one <font color=#f6756d>sample</font> and `color='#10bdc3'` for the other <font color=#10bdc3>sample</font>. The data is in front of you, what do you observe? Take a minute to think about these exercises: what do you think the point is?
# * After answering the questions above, have a look at this [paper](https://genomebiology.biomedcentral.com/track/pdf/10.1186/s13059-020-02133-w.pdf) (in particular, read the *Not all who wander are lost* section).
# > * The scatter plot made me think of another point we often overlook: *color-vision impairments*. When visualizing and explaining data, we need to think about our audience:
# > * We used the same colors as in the paper, try to save the figure and use any color-blindness simulator you find on the web ([this](https://www.color-blindness.com/coblis-color-blindness-simulator/) was the first that came out in my browser). Are the colors used problematic? Explain why, and try different types of colors. If you are interested in knowing more you can read this [paper](https://www.tandfonline.com/doi/pdf/10.1179/000870403235002042?casa_token=<KEY>).
# > * But, are colors the only option we have? Find an alternative to colors, explain it, and change your scatter plot accordingly.
# +
# 1.2.0 import data
import requests
import pandas as pd
import io
data = pd.DataFrame()
for i in ['f','m']:
URL = f'https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_{i}.csv'
urlData = requests.get(URL).content
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
rawData.columns = ['steps', 'bmi']
if i == 'f':
rawData['data_set'] = 'female'
else:
rawData['data_set'] = 'male'
rawData = rawData[['data_set', 'steps', 'bmi']]
if i == 0:
data = rawData
else:
data = pd.concat([data, rawData])
del i, rawData, URL, urlData
# +
# 1.2.1
from scipy.stats import ttest_ind
male = data[data['data_set'] == 'male']
female = data[data['data_set'] == 'female']
male_stats = male.describe()
female_stats = female.describe()
diff_mean_steps = round(male_stats.at['mean', 'steps'] - female_stats.at['mean', 'steps'], 0)
ttest_steps = ttest_ind(male['steps'], female['steps'])
if ttest_steps[1] < 0.05:
print("There is no significant difference in the average number of steps taken by men and women.")
else:
print("There is a significant difference in the average number of steps taken by men and women.")
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(20, 10), sharex=True, sharey=True)
min_steps_male = male['steps'].min()
max_steps_male = male['steps'].max()
axs[0].hist(male['steps'], bins = 5, range = (0, 15000))
axs[0].set_title('Distribution of Steps by Men')
axs[0].set_xlabel('Steps')
axs[0].set_ylabel('Frequency')
min_steps_female = female['steps'].min()
max_steps_female = female['steps'].max()
axs[1].hist(female['steps'], bins = 5, range = (0, 15000))
axs[1].set_title('Distribution of Steps by Women')
axs[1].set_xlabel('Steps')
axs[1].set_ylabel('Frequency')
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
# +
# 1.2.2
import scipy
Pearson_correlation_steps_BMI_women = scipy.stats.pearsonr(female['steps'],female['bmi'])[0]
Pearson_correlation_steps_BMI_men = scipy.stats.pearsonr(male['steps'],male['bmi'])[0]
# source:
# https://www.researchgate.net/publication/6747371_The_inverse_relationship_between_number_of_steps_per_day_and_obesity_in_a_population-based_sample_-_The_AusDiab_study
# 1.3
# Create a scatter plot with both data samples. Use `color='#f6756d'` for one <font color=#f6756d>sample</font> and `color='#10bdc3'`
import matplotlib.pyplot as plt
plt.scatter(x=female['steps'], y=female['bmi'], c='#377eb8')
plt.scatter(x=male['steps'], y=male['bmi'], c='#ff7f00')
# good colors to use, so color blind people can still see the results
# CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00']
# -
# ## Part 2: Reading about the theory of visualization
#
# Since we can go deeper with the visualization this year, we are going to read the first couple of chapters from [*Data Analysis with Open Source Tools*](http://shop.oreilly.com/product/9780596802363.do) (DAOST). It's pretty old, but I think it's a fantastic resource and one that is pretty much as relevant now as it was back then. The author is a physicist (like Sune) so he likes the way he thinks. And the books takes the reader all the way from visualization, through modeling to computational mining. Anywho - it's a great book and well worth reading in its entirety.
#
# As part of this class we'll be reading the first chapters. Today, we'll read chaper 2 (the first 28 pages) which supports and deepens many of the points we made during the video above.
#
# To find the text, you will need to go to **DTU Learn**. It's under "Course content" $\rightarrow$ "Content" $\rightarrow$ "Lecture 3 reading".
# > *Excercise 2*: Questions for DAOST
# > * Explain in your own words the point of the jitter plot.
#
# points are plotted and then randomly moved a little, so they do not overlap -> easier to see if point occurs multiple times
#
# > * Explain in your own words the point of figure 2-3. (I'm going to skip saying "in your own words" going forward, but I hope you get the point; I expect all answers to be in your own words).
#
# anchoring points of the bins are important -> same bins, but different statement
#
# > * The author of DAOST (<NAME>) likes KDEs (and think they're better than histograms). And we don't. Sune didn't give a detailed explanation in the video, but now that works to our advantage. We'll ask you to think about this and thereby create an excellent exercise: When can KDEs be misleading?
#
# should be combined with a Cumulative Distribution Function to get a better feeling for the significance of certain peaks
#
# > * Sune discussed some strengths of the CDF - there are also weaknesses. Janert writes "CDFs have less intuitive appeal than histograms of KDEs". What does he mean by that?
#
# our eye works really well to spot significant peaks of an histograms or a KDE as it is good with distances, but it struggles more with the changing slope of CDFs
#
# > * What is a *Quantile plot*? What is it good for.
#
# its a technique to graphically find out if a data set follows a distribution -> plotting their quantiles against each other -> line with 45° slope -> same distribution
#
# > * How is a *Probablity plot* defined? What is it useful for? Have you ever seen one before?
#
# - probability plot for assessing how closely two data sets agree, which plots the two cumulative distribution functions against each other
# - P-P plots are vastly used to evaluate the skewness of a distribution.
# - nope
#
# > * One of the reasons we like DAOST is that Janert is so suspicious of mean, median, and related summary statistics. Explain why one has to be careful when using those - and why visualization of the full data is always better.
#
# - misleading information -> e.g. shopping cart where 5 pieces cost 1 dollar and one 20 -> mean gives different picture
# - skewed distribution -> mean and median give wrong impression
#
# > * Sune loves box plots (but not enough to own one of [these](https://images.app.goo.gl/rpozyRX3xu5oFobt8) 😂). When are box plots most useful?
#
# quick overview of the data + finding outliers
#
# > * The book doesn't mention [violin plots](https://en.wikipedia.org/wiki/Violin_plot). Are those better or worse than box plots? Why?
#
# Violin plots are similar to box plots, except that they also show the probability density of the data at different values, usually smoothed by a kernel density estimator. Typically a violin plot will include all the data that is in a box plot: a marker for the median of the data; a box or marker indicating the interquartile range; and possibly all sample points, if the number of samples is not too high.
# -> better more information
# ## Part 3: *Finally*! Let's create some visualizations
# > *Excercise 3.1*: Connecting the dots and recreating plots from DAOST but using our own favorite dataset.
# > * Let's make a jitter-plot (that is, code up something like **Figure 2-1** from DAOST from scratch), but based on *SF Police data*. My hunch from inspecting the file is that the police-folks might be a little bit lazy in noting down the **exact** time down to the second. So choose a crime-type and a suitable time interval (somewhere between a month and 6 months depending on the crime-type) and create a jitter plot of the arrest times during a single hour (like 13-14, for example). So let time run on the $x$-axis and create vertical jitter.
# > * Last time, we did lots of bar-plots. Today, we'll play around with histograms (creating two crime-data based versions of the plot-type shown in DAOST **Figure 2-2**). I think the GPS data could be fun to see this way.
# > * This time, pick two crime-types with different geographical patterns **and** a suitable time-interval for each (you want between 1000 and 10000 points in your histogram)
# > * Then take the latitude part of the GPS coordinates for each crime and bin the latitudes so that you have around 50 bins across the city of SF. You can use your favorite method for binning. I like `numpy.histogram`. This function gives you the counts and then you do your own plotting.
# > * Next up is using the plot-type shown in **Figure 2-4** from DAOST, but with the data you used to create Figure 2.1. To create the kernel density plot, you can either use `gaussian_kde` from `scipy.stats` ([for an example, check out this stackoverflow post](https://stackoverflow.com/questions/4150171/how-to-create-a-density-plot-in-matplotlib)) or you can use [`seaborn.kdeplot`](https://seaborn.pydata.org/generated/seaborn.kdeplot.html).
# > * Now grab 25 random timepoints from the dataset (of 1000-10000 original data) you've just plotted and create a version of Figure 2-4 based on the 25 data points. Does this shed light on why I think KDEs can be misleading?
#
# +
# 3.1 import and preprocessing
import pandas as pd
import numpy as np
from pathlib import Path
# read police incidence reports, historical 2003 to may 2018
police_incidence_reports = pd.read_csv(Path.cwd().parent/"files"/"Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv")
police_incidence_reports = pd.DataFrame(police_incidence_reports)
police_incidence_reports_2003_till_2017 = police_incidence_reports[~(pd.DatetimeIndex(police_incidence_reports['Date']).year > 2017)]
police_incidence_reports_2003_till_2017['Datetime'] = pd.to_datetime(police_incidence_reports_2003_till_2017['Date']+ ' ' +\
police_incidence_reports_2003_till_2017['Time'])
police_incidence_reports_2003_till_2017.sort_values(['Category', 'Datetime']).reset_index()
larceny_theft = police_incidence_reports_2003_till_2017.loc[police_incidence_reports_2003_till_2017['Category'].isin(['LARCENY/THEFT'])]\
.sort_values(['Datetime']).reset_index()
# +
# 3.1 jitter plot
import seaborn as sns
import matplotlib.pyplot as plt
larceny_theft_january_1st_2003_from_1_till_6_pm = larceny_theft.loc[(larceny_theft['Datetime'] >= '01-01-2003 13:00:00') &\
(larceny_theft['Datetime'] <= '01-01-2003 17:59:59')]
plt.figure(figsize=(10,10))
ax = sns.stripplot(x=larceny_theft_january_1st_2003_from_1_till_6_pm ['Datetime'])
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
# +
# 3.1 histograms
vehicle_theft = police_incidence_reports_2003_till_2017.loc[police_incidence_reports_2003_till_2017['Category']\
.isin(['VEHICLE THEFT'])].sort_values(['Datetime'])
vehicle_theft_2003_jan_till_jun = vehicle_theft.loc[(vehicle_theft['Datetime'] >= '01-01-2003 00:00:00') & \
(vehicle_theft['Datetime'] <= '30-06-2003 23:59:59')]
larceny_theft_2003_jan_till_jun = larceny_theft.loc[(larceny_theft['Datetime'] >= '01-01-2003 00:00:00') & \
(larceny_theft['Datetime'] <= '30-06-2003 23:59:59')]
# plot
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(20, 10), sharex=True, sharey=True)
min_latitude_larceny = larceny_theft_2003_jan_till_jun['Y'].min()
max_latitude_larceny = larceny_theft_2003_jan_till_jun['Y'].max()
axs[0].hist(larceny_theft_2003_jan_till_jun['Y'], bins = 50, range = (37.7, 37.82))
axs[0].set_title('Distribution of Larceny from January till June 2003')
axs[0].set_xlabel('Larceny')
axs[0].set_ylabel('Frequency')
min_latitude_vehicle_theft = vehicle_theft_2003_jan_till_jun['Y'].min()
max_latitude_vehicle_theft = vehicle_theft_2003_jan_till_jun['Y'].max()
axs[1].hist(vehicle_theft_2003_jan_till_jun['Y'], bins = 50, range = (37.7, 37.82))
axs[1].set_title('Distribution of Vehicle Theft from January till June 2003')
axs[1].set_xlabel('Vehicle Theft')
axs[1].set_ylabel('Frequency')
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
# -
# 3.1 kernel density plot with data of jitter plot
plt.figure(figsize=(10,10))
ax = sns.kdeplot(x=larceny_theft_january_1st_2003_from_1_till_6_pm ['Datetime'])
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
# +
# 3.1 kernel density plot with latitude data
plt.figure(figsize=(10,10))
ax = sns.kdeplot(x=larceny_theft_2003_jan_till_jun['Y'].sample(n = 25))
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
# at histogram values were e.g. max 37.81, but here you cannot see the cutoff
# -
# >
# > Let's take a break. Get some coffee or water. Stretch your legs. Talk to your friends for a bit. Breathe. Get relaxed so you're ready for the second part of the exercise.
#
# > *Exercise 3.2*. Ok. Now for more plots 😊
# > * Now we'll work on creating two versions of the plot in **Figure 2-11**, but using the GPS data you used for your version of Figure 2-2. Comment on the result. It is not easy to create this plot from scracth.
# **Hint:** Take a look at the `scipy.stats.probplot` function.
# > * OK, we're almost done, but we need some box plots. Here, I'd like you to use the box plots to visualize fluctuations of how many crimes happen per day. We'll use data from the 15 focus crimes defined last week.
# > * For the full time-span of the data, calulate the **number of crimes per day** within each category for the entire duration of the data.
# > * Create a box-and whiskers plot showing the mean, median, quantiles, etc for all 15 crime-types side-by-side. There are many ways to do this. I like to use [matplotlibs's built in functionality](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.boxplot.html), but you can also achieve good results with [seaborn](https://seaborn.pydata.org/generated/seaborn.boxplot.html) or [pandas](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.boxplot.html).
# > * What does this plot reveal that you can't see in the plots from last time?
# > * Also I want to show you guys another interesting use of box plots. To get started, let's calculate another average for each focus-crime, namely what time of day the crime happens. So this time, the distribution we want to plot is the average time-of-day that a crime takes place. There are many ways to do this, but let me describe one way to do it.
# * For datapoint, the only thing you care about is the time-of-day, so discard everything else.
# * You also have to deal with the fact that time is annoyingly not divided into nice units that go to 100 like many other numbers. I can think of two ways to deal with this.
# * For each time-of-day, simply encode it as seconds since midnight.
# * Or keep each whole hour, and convert the minute/second count to a percentage of an hour. So 10:15 $\rightarrow$ 10.25, 8:40 $\rightarrow$ 8.67, etc.
# * Now you can create box-plots to create an overview of *when various crimes occur*. Note that these plot have quite a different interpretation than ones we created in the previous exercise. Cool, right?
# +
# Exercise 3.2 probability plot
from scipy import stats
stats.probplot(x=vehicle_theft_2003_jan_till_jun[vehicle_theft_2003_jan_till_jun['Y'] < 90.0]['Y'], dist=stats.beta, sparams=(40,80,10), plot=plt)
# it feels impossible -> also histograms show that they do not follow any common distribution like a normal distribution
# +
# preprocessing data focuscrimes
focuscrimes = pd.DataFrame(['WEAPON LAWS', 'PROSTITUTION', 'DRIVING UNDER THE INFLUENCE', 'ROBBERY', 'BURGLARY', 'ASSAULT', 'DRUNKENNESS', 'DRUG/NARCOTIC', 'TRESPASS', 'LARCENY/THEFT', 'VANDALISM', 'VEHICLE THEFT', 'STOLEN PROPERTY', 'DISORDERLY CONDUCT'])
focuscrimes.columns = ['Category']
focuscrimes = pd.merge(focuscrimes, police_incidence_reports_2003_till_2017, on = 'Category')
focuscrimes.sort_values(by=['Category', 'Datetime'])
# +
focuscrimes_by_unique_day = focuscrimes.groupby(['Category', focuscrimes['Datetime'].dt.floor('d')]).agg({'PdId':'count'})\
.sort_values(by=['Category', 'Datetime']).rename(columns={'PdId':'Counts'}).reset_index()
# focuscrimes by category and weekday
focuscrimes_by_category_weekday = focuscrimes.groupby(['Category', 'DayOfWeek']).agg({'PdId':'count'})\
.sort_values(by=['Category']).rename(columns={'PdId':'Counts'}).reset_index()
fig, axs = plt.subplots(nrows=7, ncols=2, figsize=(20, 20))
m = 0 # falls n = 1 -> m + 1 und nach plot n wieder auf 0 setzen
n = 0 # 0 oder 1
for i in focuscrimes['Category'].unique():
if n == 1:
y = focuscrimes_by_category_weekday.loc[focuscrimes_by_category_weekday['Category'].isin([i])]
axs[m,n].boxplot(x = y['Counts'])
axs[m,n].set_title(i)
axs[m,n].set_ylabel('Crime Count')
m = m + 1
n = 0
else:
y = focuscrimes_by_category_weekday.loc[focuscrimes_by_category_weekday['Category'].isin([i])]
axs[m,n].boxplot(x = y['Counts'])
axs[m,n].set_title(i)
axs[m,n].set_ylabel('Crime Count')
n = 1
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
fig.suptitle('Boxplots of the Number of Crimes per Day by Category')
plt.show()
# +
# boxplot
focuscrimes['hour_decimal'] = focuscrimes['Datetime'].dt.hour+focuscrimes['Datetime'].dt.minute/60.0
focuscrimes.sort_values(by=['hour_decimal'])
fig, axs = plt.subplots(nrows=7, ncols=2, figsize=(20, 20))
m = 0 # falls n = 1 -> m + 1 und nach plot n wieder auf 0 setzen
n = 0 # 0 oder 1
for i in focuscrimes['Category'].unique():
if n == 1:
y = focuscrimes.loc[focuscrimes['Category'].isin([i])]
axs[m,n].boxplot(x = y['hour_decimal'])
axs[m,n].set_title(i)
axs[m,n].set_ylabel('Time of the Day'')
m = m + 1
n = 0
else:
y = focuscrimes.loc[focuscrimes['Category'].isin([i])]
axs[m,n].boxplot(x = y['hour_decimal'])
axs[m,n].set_title(i)
axs[m,n].set_ylabel('Time of the Day')
n = 1
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
fig.suptitle('Boxplots of the Number of Crimes per Hour of the Day by Category')
plt.show()
| lectures/Week3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Ice - Albedo Feedback and runaway glaciation
# Here we will use the 1-dimensional diffusive Energy Balance Model (EBM) to explore the effects of albedo feedback and heat transport on climate sensitivity.
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import climlab
from climlab import constants as const
from climlab import legendre
from climlab.domain.field import global_mean
# ## Annual-mean model with albedo feedback: adjustment to equilibrium
# A version of the EBM in which albedo adjusts to the current position of the ice line, wherever $T < T_f$
model1 = climlab.EBM_annual( num_points = 180, a0=0.3, a2=0.078, ai=0.62)
print model1
model1.integrate_years(5)
Tequil = np.array(model1.Ts)
ALBequil = np.array(model1.albedo)
OLRequil = np.array(model1.OLR)
ASRequil = np.array(model1.ASR)
# Let's look at what happens if we perturb the temperature -- make it 20ºC colder everywhere!
model1.Ts -= 20.
model1.compute_diagnostics()
# Let's take a look at how we have just perturbed the absorbed shortwave:
# +
my_ticks = [-90,-60,-30,0,30,60,90]
lat = model1.domains['Ts'].axes['lat'].points
fig = plt.figure( figsize=(12,5) )
ax1 = fig.add_subplot(1,2,1)
ax1.plot(lat, Tequil, label='equil')
ax1.plot(lat, model1.Ts, label='pert' )
ax1.grid()
ax1.legend()
ax1.set_xlim(-90,90)
ax1.set_xticks(my_ticks)
ax1.set_xlabel('Latitude')
ax1.set_ylabel('Temperature (degC)')
ax2 = fig.add_subplot(1,2,2)
ax2.plot( lat, ASRequil, label='equil')
ax2.plot( lat, model1.ASR, label='pert' )
ax2.grid()
ax2.legend()
ax2.set_xlim(-90,90)
ax2.set_xticks(my_ticks)
ax2.set_xlabel('Latitude')
ax2.set_ylabel('ASR (W m$^{-2}$)')
plt.show()
# -
# So there is less absorbed shortwave now, because of the increased albedo. The global mean difference is:
global_mean( model1.ASR - ASRequil )
# Less shortwave means that there is a tendency for the climate to cool down even more! In other words, the shortwave feedback is **positive**.
#
# Recall that the net feedback for the EBM can be written
#
# $\lambda = - B + \frac{\Delta <(1-\alpha) Q >}{\Delta <T>}$
#
# where the second term is the change in the absorbed shortwave per degree global mean temperature change.
#
# Plugging these numbers in gives
#
# $\lambda = - 2 + \frac{-20.4}{-20} = -2 + 1 = -1$ W m$^{-2}$ $^{\circ}$C$^{-1}$
#
# The feedback is negative, as we expect! The tendency to warm up from reduced OLR outweighs the tendency to cool down from reduced ASR. A negative net feedback means that the system will relax back towards the equilibrium.
# Let's let the temperature evolve one year at a time and add extra lines to the graph:
# +
plt.plot( lat, Tequil, 'k--', label='equil' )
plt.plot( lat, model1.Ts, 'k-', label='pert' )
plt.grid()
plt.xlim(-90,90)
plt.legend()
for n in range(5):
model1.integrate_years(years=1.0, verbose=False)
plt.plot(lat, model1.Ts)
# -
# Temperature drifts back towards equilibrium, as we expected!
# What if we cool the climate **so much** that the entire planet is ice covered?
model1.Ts -= 40.
model1.compute_diagnostics()
# Look again at the change in absorbed shortwave:
global_mean( model1.ASR - ASRequil )
# It's much larger because we've covered so much more surface area with ice!
#
# The feedback calculation now looks like
#
# $\lambda = - 2 + \frac{-109}{-40} = -2 + 2.7 = +0.7$ W m$^{-2}$ $^{\circ}$C$^{-1}$
#
# What? Looks like the **positive** albedo feedback is so strong here that it has outweighed the **negative** longwave feedback. What will happen to the system now? Let's find out...
# +
plt.plot( lat, Tequil, 'k--', label='equil' )
plt.plot( lat, model1.Ts, 'k-', label='pert' )
plt.grid()
plt.xlim(-90,90)
plt.legend()
for n in range(5):
model1.integrate_years(years=1.0, verbose=False)
plt.plot(lat, model1.Ts)
# -
# Something **very different** happened! The climate drifted towards an entirely different equilibrium state, in which the entire planet is cold and ice-covered.
#
# We will refer to this as the **SNOWBALL EARTH**.
#
# Note that the warmest spot on the planet is still the equator, but it is now about -33ºC rather than +28ºC!
# ## Here Comes the Sun! Where is the ice edge?
# The ice edge in our model is always where the temperature crosses $T_f = -10^\circ$C. The system is at **equilibrium** when the temperature is such that there is a balance between ASR, OLR, and heat transport convergence everywhere.
#
# Suppose that sun was hotter or cooler at different times (in fact it was significantly cooler during early Earth history). That would mean that the solar constant $S_0 = 4Q$ was larger or smaller. We should expect that the temperature (and thus the ice edge) should increase and decrease as we change $S_0$.
#
# $S_0$ during the Neoproterozoic Snowball Earth events is believed to be about 93% of its present-day value, or about 1270 W m$^{-2}$.
#
# We are going to look at how the **equilibrium** ice edge depends on $S_0$, by integrating the model out to equilibrium for lots of different values of $S_0$. We will start by slowly decreasing $S_0$, and then slowly increasing $S_0$.
model2 = climlab.EBM_annual(num_points = 360, a0=0.3, a2=0.078, ai=0.62)
S0array = np.linspace(1400., 1200., 200.)
#S0array = np.linspace(1400., 1200., 10.)
#print S0array
model2.integrate_years(5)
print model2.icelat
icelat_cooling = np.empty_like(S0array)
icelat_warming = np.empty_like(S0array)
# First cool....
for n in range(S0array.size):
model2.subprocess['insolation'].S0 = S0array[n]
model2.integrate_years(10, verbose=False)
icelat_cooling[n] = np.max(model2.icelat)
# Then warm...
for n in range(S0array.size):
model2.subprocess['insolation'].S0 = np.flipud(S0array)[n]
model2.integrate_years(10, verbose=False)
icelat_warming[n] = np.max(model2.icelat)
# For completeness: also start from present-day conditions and warm up.
model3 = climlab.EBM_annual(num_points = 360, a0=0.3, a2=0.078, ai=0.62)
S0array3 = np.linspace(1350., 1400., 50.)
#S0array3 = np.linspace(1350., 1400., 5.)
icelat3 = np.empty_like(S0array3)
for n in range(S0array3.size):
model3.subprocess['insolation'].S0 = S0array3[n]
model3.integrate_years(10, verbose=False)
icelat3[n] = np.max(model3.icelat)
fig = plt.figure( figsize=(10,6) )
ax = fig.add_subplot(111)
ax.plot(S0array, icelat_cooling, 'r-', label='cooling' )
ax.plot(S0array, icelat_warming, 'b-', label='warming' )
ax.plot(S0array3, icelat3, 'g-', label='warming' )
ax.set_ylim(-10,100)
ax.set_yticks((0,15,30,45,60,75,90))
ax.grid()
ax.set_ylabel('Ice edge latitude', fontsize=16)
ax.set_xlabel('Solar constant (W m$^{-2}$)', fontsize=16)
ax.plot( [const.S0, const.S0], [-10, 100], 'k--', label='present-day' )
ax.legend(loc='upper left')
ax.set_title('Solar constant versus ice edge latitude in the EBM with albedo feedback', fontsize=16)
plt.show()
# There are actually up to 3 different climates possible for a given value of $S_0$!
# ### How to un-freeze the Snowball
# The graph indicates that if the Earth were completely frozen over, it would be perfectly happy to stay that way even if the sun were brighter and hotter than it is today.
#
# Our EBM predicts that (with present-day parameters) the equilibrium temperature at the equator in the Snowball state is about -33ºC, which is much colder than the threshold temperature $T_f = -10^\circ$C. How can we melt the Snowball?
#
# We need to increase the avaible energy sufficiently to get the equatorial temperatures above this threshold! That is going to require a much larger increase in $S_0$ (could also increase the greenhouse gases, which would have a similar effect)!
#
# Let's crank up the sun to 1830 W m$^{-2}$ (about a 34% increase from present-day).
# +
from climlab.process.process import process_like
model4 = process_like(model2) # initialize with cold Snowball temperature
model4.subprocess['insolation'].S0 = 1830.
model4.integrate_years(40)
#lat = model4.domains['Ts'].axes['lat'].points
plt.plot(model4.lat, model4.Ts)
plt.xlim(-90,90)
plt.ylabel('Temperature')
plt.xlabel('Latitude')
plt.grid()
plt.xticks(my_ticks)
plt.show()
print('The ice edge is at ' + str(model4.icelat) + 'degrees latitude.' )
# -
# Still a Snowball... but just barely! The temperature at the equator is just below the threshold.
#
# Try to imagine what might happen once it starts to melt. The solar constant is huge, and if it weren't for the highly reflective ice and snow, the climate would be really really hot!
#
# We're going to increase $S_0$ one more time...
# +
model4.subprocess['insolation'].S0 = 1845.
model4.integrate_years(10)
plt.plot(lat, model4.state['Ts'])
plt.xlim(-90,90)
plt.ylabel('Temperature')
plt.xlabel('Latitude')
plt.grid()
plt.xticks(my_ticks)
plt.show()
# -
# Suddenly the climate looks very very different again! The global mean temperature is
print( model4.global_mean_temperature() )
# A roasty 60ºC, and the poles are above 20ºC. A tiny increase in $S_0$ has led to a very drastic change in the climate.
# +
S0array_snowballmelt = np.linspace(1400., 1900., 50)
icelat_snowballmelt = np.empty_like(S0array_snowballmelt)
icelat_snowballmelt_cooling = np.empty_like(S0array_snowballmelt)
for n in range(S0array_snowballmelt.size):
model2.subprocess['insolation'].S0 = S0array_snowballmelt[n]
model2.integrate_years(10, verbose=False)
icelat_snowballmelt[n] = np.max(model2.diagnostics['icelat'])
for n in range(S0array_snowballmelt.size):
model2.subprocess['insolation'].S0 = np.flipud(S0array_snowballmelt)[n]
model2.integrate_years(10, verbose=False)
icelat_snowballmelt_cooling[n] = np.max(model2.diagnostics['icelat'])
# -
# Now we will complete the plot of ice edge versus solar constant.
fig = plt.figure( figsize=(10,6) )
ax = fig.add_subplot(111)
ax.plot(S0array, icelat_cooling, 'r-', label='cooling' )
ax.plot(S0array, icelat_warming, 'b-', label='warming' )
ax.plot(S0array3, icelat3, 'g-', label='warming' )
ax.plot(S0array_snowballmelt, icelat_snowballmelt, 'b-' )
ax.plot(S0array_snowballmelt, icelat_snowballmelt_cooling, 'r-' )
ax.set_ylim(-10,100)
ax.set_yticks((0,15,30,45,60,75,90))
ax.grid()
ax.set_ylabel('Ice edge latitude', fontsize=16)
ax.set_xlabel('Solar constant (W m$^{-2}$)', fontsize=16)
ax.plot( [const.S0, const.S0], [-10, 100], 'k--', label='present-day' )
ax.legend(loc='upper left')
ax.set_title('Solar constant versus ice edge latitude in the EBM with albedo feedback', fontsize=16)
plt.show()
# The upshot:
#
# - For extremely large $S_0$, the only possible climate is a hot Earth with no ice.
# - For extremely small $S_0$, the only possible climate is a cold Earth completely covered in ice.
# - For a large range of $S_0$ including the present-day value, more than one climate is possible!
# - Once we get into a Snowball Earth state, getting out again is rather difficult!
| courseware/Snowball Earth in the EBM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## This notebook prepares several datasets for testing NN impls while varying the number of features, records, etc. using only integers between 0 and 50
import numpy as np
import pandas as pd
df = pd.DataFrame(np.random.randint(0,10,size=(1000, 4)), columns=['Feature1', 'Feature2', 'Feature3', 'Feature4'])
print(df.shape)
df.head()
# write out the data, the tribuo loader uses the header
df.to_csv('/data/src/Oracle/tribuo/Clustering/Core/src/test/resources/integers-5K-4features.csv', index=False, header=True)
df = pd.DataFrame(np.random.randint(0,25,size=(250000, 8)), columns=['Feature1', 'Feature2', 'Feature3', 'Feature4', 'Feature5', 'Feature6', 'Feature7', 'Feature8'])
print(df.shape)
df.head()
# write out the data, the tribuo loader uses the header
df.to_csv('/data/src/Oracle/tribuo/Clustering/Core/src/test/resources/integers-250K-8features.csv', index=False, header=True)
| notebooks/Data Setup/pandas n-D integers Data Setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
NLP_DATASETS_PATH = '../../../../NLP_Datasets/'
LCQMC_PATH = os.path.join(NLP_DATASETS_PATH, 'LCQMC')
LCQMC_TRAIN_CSV = os.path.join(LCQMC_PATH, 'train.txt')
LCQMC_DEV_CSV = os.path.join(LCQMC_PATH, 'dev.txt')
LCQMC_TEST_CSV = os.path.join(LCQMC_PATH, 'test.txt')
LCQMC_SEP = '\t'
LCQMC_COL_NAMES = ['text_1', 'text_2', 'label']
# -
train_df = pd.read_csv(LCQMC_TRAIN_CSV, sep = LCQMC_SEP, header = None, names = LCQMC_COL_NAMES)
dev_df = pd.read_csv(LCQMC_DEV_CSV, sep = LCQMC_SEP, header = None, names = LCQMC_COL_NAMES)
test_df = pd.read_csv(LCQMC_TEST_CSV, sep = LCQMC_SEP, header = None, names = LCQMC_COL_NAMES)
all_df = pd.concat([train_df, dev_df, test_df], ignore_index=True)
train_df.head()
dev_df.head()
test_df.head()
# ## missings
all_df.count()
all_df.shape
all_df['label'].unique()
# ## Target Rate
all_df['label'].mean(), train_df['label'].mean(), dev_df['label'].mean(), test_df['label'].mean()
# ## Text Length (Char-level,non-preprocessed)
text_1_lens = all_df['text_1'].apply(lambda x: len(x.strip()))
text_2_lens = all_df['text_2'].apply(lambda x: len(x.strip()))
text_lens = pd.concat([text_1_lens, text_2_lens], ignore_index=True)
text_lens.head()
text_lens.describe([0.05, 0.1, .80, 0.90, .95, .99])
| fine_tune/notebooks/LCQMC/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MP1: CS 498 DSG (Spring 2020)
# ##### <NAME>
#
# # Task 0 – Getting to know the analysis environment
# ## Question 1- Import csv data into jupyter notebook
import pandas as pd
import seaborn as sns
import numpy as np
import datetime
from scipy import stats as st
from statsmodels.stats import weightstats as stests
disf=pd.read_csv('mp1_av_disengagements.csv')
disf['Month']=pd.to_datetime(disf['Month'],format='%y-%b')
milesf=pd.read_csv('mp1_av_totalmiles.csv')
milesf['Month']=pd.to_datetime(milesf['Month'],format='%y-%b')
disf.head()
milesf.head()
# ## Question 2. Summarize the following information
# ### a. Total number of AV disengagements over the entire duration of available data
# $$P(DPM){\rm{ = }}{{{\rm{Total Number of Diseng}}} \over {{\rm{Total Number of miles}}}}$$
sum(milesf['total number of disengagements'])
# ### b. Number of unique months that have recorded AV disengagements
milesf.Month.unique()
len(milesf.Month.unique())
disf.head()
# ### c. List of unique locations of AV disengagements
disf.Location.unique()
# ### d. Number of unique causes for AV disengagements
disf.Cause.nunique()
# ### e. Which columns in the datasets (if any) have missing values? How many missing values do these column(s) have? (NAs (not valid entries) commonly occur in real world datasets…)
null_columns=disf.columns[disf.isnull().any()] #Check for null values in dataset
disf[null_columns].isnull().sum()
disf.columns[disf.isnull().any()]
# ## Question 3: Plot a pie chart for the causes of AV disengagement. Based on the pie-chart, list the top 2 leading causes of disengagement?
# +
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['figure.figsize'] = [12.0, 8.0]
import matplotlib as mpl
mpl.rcParams['font.size'] = 20.0
causes = disf['Cause'].value_counts()
patches, texts, _ = plt.pie(causes,
# grab labels from frame keys
labels=causes.keys(),
# some formatting
autopct='%1.2f%%', shadow=True)
plt.legend(patches, labels=causes.keys(), bbox_to_anchor=(1.025,1.725),loc="upper left")
#plt.axis('equal',fontsize=40)
plt.show()
# -
# #### According to the pie chart the top 2 leading causes of disengagement are Unwanted Driver Discomfort & Recklessly behaving agent
causes = disf['Cause'].value_counts() #verifying the plot
causes
# ## Question 4. Visualize the trend of disengagement/mile over time with monthly granularity. How would you describe the trend? Are AV’s maturing over time?
trend = milesf.groupby('Month').sum().reset_index()
trend['disengagement per mile'] = trend['total number of disengagements']/trend['miles driven in autonomous mode']
ax = trend.plot(x='Month',y='disengagement per mile',figsize=(10,5), linewidth=5, fontsize=20,legend=False)
ax.set_xlabel('Time (month)', fontsize=15)
ax.set_ylabel('disengagement per mile', fontsize=15)
ax.set_title('Trend', fontsize=20)
# AV disnengagements are reducing with time.
# # Task 1- Basic Analysis of AV Disengagements
# ## Quesiton 1- What do the following distributions signify about samples drawn from it?
# ### a. Gaussian distribution
# PDF: ${\displaystyle f(x\mid \mu ,\sigma ^{2})={\frac {1}{\sqrt {2\pi \sigma ^{2}}}}e^{-{\frac {(x-\mu )^{2}}{2\sigma ^{2}}}}}$
#
# Normal distributions are mostly used in natural and social sciences to represent real-valued random variables whose distributions are not known. Their importance is partly due to the central limit theorem. It states that, under some conditions, the average of many samples\of a random variable with finite mean and variance is itself a random variable whose distribution tends towards a normal distribution as the number of samples increases.
# This distribution also known as the “Bell Curve”. And because of the following features it is highly appreciated by the data scientists.
# 1-The mean, median and mode of normal distribution are equal plus it is symmetric around the mean.
# 2-The curve is dependent on the mean and standard distribution of their data and it is very dense at the center and less dense at the tails.
# 3-Appoximately 95% of the area of the curve is within 2 standard deviations of the mean
#
# ### b. Exponential Distribution
# PDF: $ f(x;\lambda) = \begin{cases}
# \lambda e^{-\lambda x} & x \ge 0, \\
# 0 & x < 0.
# \end{cases}$
#
# The exponential distribution occurs naturally when describing the lengths of the inter-arrival times in a homogeneous Poisson process.
# The exponential distribution describes the amount of time between occurrences.
# E[X] is given by 1/ λ where λ Exponential Distribution .
# Which means that as λ gets larger the less is the time between occurrences.
# For Poisson equation, Exponential Distribution is useful to model the random arrival pattern
#
# ### c. Weibull distribution
#
# $
# {\displaystyle f(x;\lambda ,k)={\begin{cases}{\frac {k}{\lambda }}\left({\frac {x}{\lambda }}\right)^{k-1}e^{-(x/\lambda )^{k}}&x\geq 0,\\0&x<0,\end{cases}}}$
#
# It is widely used in life like systems for the data analysis.
# It has 2 parameters, 1- Beta as shape parameter and second is N a scale parameter.
# If beta is less than 1, the probailty density tends to infinity at time ~zero
# If beta is equal to 1 the graph the failure rate is fairly constant.
# If beta is greater than 1 the failures rate increases as time increases.
#
# ## Question 2 - If the AV suddenly disengages, there may not be enough time for the human to react.It is also possible, that the human is not sufficiently attentive while in the AV becauseof reliance on the technology. To understand the human alertness level, we measure the reaction time of the human driver in the field. Plot the probability distribution of reaction times. Does this distribution fit any known distributions (Gaussian, Weibull, Exponential)? What does the fit distribution signify?
x = disf['ReactionTime']
plt.hist(x, normed=True, bins=100)
plt.xlabel('Reaction time')
plt.ylabel('Frequency')
plt.title('Distribution of Reaction Times')
# According to the plot it can be inferred that the distribution follows weibul distribution which shows that the probability of the reaction time being high for human beings is very low.
# ## Question 3 - Compute the average reaction time
# ### a. For the entire duration of the dataset
avg_reaction_time = disf['ReactionTime'].mean()
print(avg_reaction_time)
# ### b. For the entire duration of the dataset differentiated by the location of disengagement
avg_reaction_time = disf.groupby('Location')['ReactionTime'].mean()
print(avg_reaction_time)
# ## Question 4 - It is known that the mean reaction time for humans in non-AV cars is 1.09 seconds. Is the mean reaction time for humans in AV cars different from non-AV cars? Perform a hypothesis testing at a 0.05 significance level.
reaction_time = list(disf[disf['ReactionTime'].notnull()]['ReactionTime'])
one_sample = st.ttest_1samp(reaction_time, 1.09)
print("t-statistic = %.3f \np-value = %.3f." % one_sample)
if one_sample[1] < 0.05:
print("Reaction time for humans in AV cars is different from non-AV cars.")
else:
print("Reaction time for humans in AV cars is not different from non-AV cars.")
# ## Question 5 - Plot the probability distribution of disengagements/mile with monthly granularity. Does this distribution fit any known distributions (Gaussian, Weibull, Exponential)? What does the distribution that fits signify?
#ax = sns.kdeplot(milesf['diss/miles'])
trend['disengagement per mile'].plot(kind='hist',bins=40)
plt.xlabel('Disengagement')
plt.ylabel('Frequency')
plt.title('Distribution of Disengagement per Mile', fontsize=20)
# The distribution mostly fits an exponential distribution. The disengagement per mile is decreasing continuously expect for a few values which are rising. These values can be considered as outliers. We will be able to have a much better idea of the fit with a larger dataset.
# # Task 2 - Probabilistic Analysis of AV Disengagement
# ## Question 1
#
# ### a.
# The assumption on maximum number of disengagements in a mile allows us to treat the occurrence of a disengagement in a mile as a random variable with a **bernoulli distribution**.
# ### b. Based on the above assumptions, calculate the probability of disengagement per mile on a cloudy day.
# $$P(dpm){\rm{ = }}{{{\rm{Total Diseng}}} \over {{\rm{Total miles}}}}$$
#
# Using Bayes Theorem,
#
# $$P(dpm|cloudy) = {{P(cloudy|dpm)P(dpm)} \over {P(cloudy)}}$$
total_diseng = milesf['total number of disengagements'].sum()
miles_drives= milesf['miles driven in autonomous mode'].sum()
prob_dis= total_diseng/miles_drives
#print("P(dpm):",prob_dis)
prob_cloudy = 1-0.72 #Given prob clear weather is 0.72
cloudy = disf['Weather']=='cloudy'
prob_dpm_cloudy = (sum(cloudy) / len(disf)) * prob_dis / prob_cloudy
print("P(dpm|cloudy):",prob_dpm_cloudy)
# ### c. Based on the above assumptions, calculate the probability of disengagement per mile on a clear day.
# Using Bayes theorem,
# $$P(dpm|clear) = {{P(clear|dpm)P(dpm)} \over {P(clear)}}$$
clear= ~cloudy
prob_dpm_clear =(sum(clear)/len(disf))*prob_dis/0.72
print("P(dpm|clear): ",prob_dpm_clear)
prob_clear = 0.72
# ### d. Similarly, calculate the probability of an automatic disengagement per mile on a cloudy day, and the probability of an automatic disengagement per mile on a clear day.
# Using the axioms of probability we can rearrange the formulas as follows:
#
# $$P(auto,dpm|cloudy) = \frac{{P(auto,dpm,cloudy)}}{{P(cloudy)}} = \frac{{P(auto,cloudy|dpm)P(dpm)}}{{P(cloudy)}}$$
#
# $$P(auto,dpm|clear) = \frac{{P(auto,dpm,clear)}}{{P(clear)}} = \frac{{P(auto,clear|dpm)P(dpm)}}{{P(clear)}}$$
auto = disf['TypeOfTrigger']=='automatic'
cloudyauto=(cloudy & auto)
likelihood_cloudy=(sum(cloudyauto)/len(disf))
prob_dpm_cloudy_auto=likelihood_cloudy*prob_dis/prob_cloudy
clearauto=(clear & auto)
likelihood_clear=(sum(clearauto) / len(disf))
prob_dpm_clear_auto=likelihood_clear*prob_dis/prob_clear
print("P(auto,dpm|cloudy):",(prob_dpm_cloudy_auto))
print("P(auto,dpm|clear):",(prob_dpm_clear_auto))
# ### e. How likely is it that in 12000 miles, there are 150 or more disengagements under cloudy conditions? [Hint: Think of an appropriate approximation that makes the computation feasible/easier.]
# We can assume that the distribution is normal. For a normal distribution, we know that
# $$\mu = np = 12000 \times P(dpm |cloudy)$$
# $${\sigma ^2} = np(1 - p) = 12000 \times P(dpm |cloudy)(1 - P(dpm |cloudy))$$
#
# Then we can use mean and std to calculate the z-value which then be used to calculate the p-value.
mean = 12000*prob_dpm_cloudy
std = np.sqrt(12000*prob_dpm_cloudy*(1-prob_dpm_cloudy))
z=(150-mean)/std
print('Mean=',mean)
print('Std=',std)
print('z-score=',z)
p_value = st.norm.sf(abs(z))
print('p-value=',p_value)
print('Probability of 150 or more disengagements in 12000 miles under cloudy conditions is',p_value)
# ## Question 2
# ### Answer the following question about hypothesis testing:
# ### a. What does the normal distribution represent in the hypothesis testing?
# In Hypothesis testing, the hypothesis tests of a population mean is performed using the normal distribution. It is necessary to generalize the hypothesis test results to a population. Also, the normal test will work if the data come from a simple, random sample and the population is approximately normally distributed, or the sample size is large. Normal Distribution in hypothesis testing basically helps in determining if the sample that has been tested falls in the critical areas. If that's the case, then according to the concept of Hypothesis testing, null hypothesis gets rejected and alternative testing gets considered. The 'two-tailed' test is derived from testing the area under both tails of a normal distribution too. It helps in giving an estimate of what is possible. Assuming a normal distribution also lets us determine how meaningful the result we observe in a study is. For eg: The higher or lower the z-score in Hypothesis testing, the more unlikely the result is to happen by chance and the more likely the result is meaningful.
# ### b. Does rejecting the null hypothesis mean accepting the alternative hypothesis?Explain your answer.
# In the hypothesis testing, both the Ho and Ha are assumed to be two sides of an extreme i.e either having Ho or the Ha probability. If null hypothesis means that there is no variation perhaps the statistical significance in the set of observations considered then rejecting this hypothesis eventually signifies the only other possibility left i.e Ha.
# ## Question 3
# ### At the 0.05 significance level, test the following hypothesis: The AV has more disengagements (automatic and manual) on cloudy days than clear days. Based on the result of the hypothesis test, what can you conclude about the impact of weather conditions on AV safety?
#
# $$\eqalign{& {H_o}:{\text{ Number of disengagement in cloudy} \leq \text{ Number of disengagement in clear}} \cr
# & {H_a}:{\text{Number of disengagement in cloudy} > \text{Number of disengagement in clear}} \cr} $$
from statsmodels.stats.proportion import proportions_ztest
count1 = 0
count2 = 0
i=0
while i<len(disf):
if disf.Weather.iloc[i]=="cloudy":
count1 = count1+1
if disf.Weather.iloc[i]=="clear":
count2 = count2+1
i=i+1
print("Count of cloudy ",count1)
print("Count of clear ",count2)
counts=[count1,count2]
total_dis= milesf['total number of disengagements'].sum()
total_miles = milesf['miles driven in autonomous mode'].sum()
cloudy_miles = prob_cloudy*total_miles
clear_miles = prob_clear*total_miles
stat, pval = proportions_ztest(counts, [cloudy_miles, clear_miles],alternative='larger')
print('z-value',stat)
print('p-value(approx):',pval)
# Since p-value is low, we can reject the null hypothesis and conclude that the number of disengagements on a cloudy are more.
# ## Question 4
# ### What’s the conditional probability that the reaction time is: (Hint, there might be multiple conditions to consider.)
# ### a. Greater than 0.6s given that the weather was cloudy? Reaction time is measured only in cases where there was an automatic disengagement.
#
# $P(RT>0.6s|cloudy,automatic)$ =
rt=disf['ReactionTime']>0.6
prob_reaction_cloudy = sum(cloudyauto & rt )/sum(cloudyauto)
print("P(Reaction Time > 0.6s | Cloudy):",prob_reaction_cloudy)
# ### b. What’s the conditional probability that the reaction time is greater than 0.9s given that the weather was clear?
# $P(RT>0.9s|clear,automatic)$ =
#
prob_reaction09_clear = sum(clearauto & (disf['ReactionTime']>0.9))/sum(clearauto)
print("P(Reaction Time > 0.9s | Clear):",prob_reaction09_clear)
# ## Question 5
# ### A study found that an automatic AV disengagement will result in an accident if the human driver is slow in reacting. Following reactions are considered slow: (i) a reaction time greater than 0.6s under cloudy conditions and, (ii) a reaction time greater than 0.9s under clear conditions. Find the probability of an accident per mile involving an AV disengagement.
# $$P(acc/mile) = P(rt>0.9s|clear, dpm)P(dpm|clear)P(clear) + P(rt>0.6s|cloudy, dpm)P(dpm|cloudy)P(cloudy))$$
prob_reaction_clear = sum(clearauto & (disf['ReactionTime']))/sum(clearauto)
prob_reaction = (prob_reaction09_clear * prob_dpm_clear_auto* prob_clear ) + (prob_reaction_cloudy * prob_dpm_cloudy_auto* (1 - prob_clear) )
print("P(acc/mile):",prob_reaction)
# ## Question 6
# ### The probability of a human driver causing a car accident is 2x10-6 [4]. How do AVs compare to human drivers? Justify your conclusion and explain its consequences.
prob_human = 2e-6
print("P(Accident|Human):",prob_human)
print("P(Accident|AV):",prob_reaction)
if prob_reaction>prob_human:
print("The probability of a human driver causing car accident is lesser than AVs.")
elif prob_reaction == prob_human:
print("The probability of a human driver causing car accident is same as AVs.")
else:
print("The probability of a human driver causing car accident is more than AVs.")
# ## Question 7
# ### The hypothesis test you performed in this task is an example of a parametric test that assumes that the observed data is distributed similarly to some other well-known distribution (such as a normal distribution). However, sometimes, we need to compare two distributions of data that don’t follow any such well-known distributions. Perform a two-sample Kolmogorov-Smirnov test (using the ks_2samp package from Scipy) to compare the following two distributions: (1) distribution of disengagement reaction time when the weather is cloudy and (2) distribution of disengagement reaction time when the weather is clear. What are your null and alternative hypotheses? Assuming a significance level threshold of 0.1, what can you conclude from the test results about the impact of weather conditions on disengagement reaction time?
# Null_H: Both distributions are of same type
# Alternate_H: The distributions are not same
from scipy.stats import ks_2samp
reaction_cloudy=disf.ReactionTime[disf['Weather']=='cloudy'][disf['TypeOfTrigger']=='automatic']
reaction_clear=disf.ReactionTime[disf['Weather']=='clear'][disf['TypeOfTrigger']=='automatic']
ks_2samp(reaction_cloudy,reaction_clear)
# Since the p-value is so high we accept the null hypothesis and conclude that the distributions are similar which signifies that the weather being cloudy or clear has no effect on the reaction time of a person. Also, from the test results obtained, the disengagement reaction time when the weather is clear is more as compared to when the weather is cloudy.
# # Task 3 - Using the Naive Bayes Model
# ## Question 1
#
# ### Though there are 10 different causes for disengagement, they can be grouped into the following 3 classes – (i) Controller, (ii) Perception System, and (iii) Computer System. The mapping from Disengagement Cause to Class is given in the table below. You will use these 3 classes as the labels in the NB model. Modify your pandas data frame to include a ‘Class’ column.
# +
# replace the cause values with corresponding class label
disf['Class'] = disf['Cause']
disf['Class'].replace(['Incorrect behavior prediction of others','Recklessly behaving agent','Unwanted Driver Discomfort'],'Controller',inplace=True)
disf['Class'].replace(['Adverse road surface conditions','Emergency Vehicle','Position Estimation Failure','Incorrect Traffic Light Detection'],'Perception System',inplace=True)
disf['Class'].replace(['System Tuning and Calibration','Hardware Fault','Software Froze'],'Computer System',inplace=True)
disf.head()
# -
def get_values(data):
colname = data.columns
c_label = data[colname[-1]].unique()
f_name = colname[:-1]
# create a dict of categorical values for each feature
f_values = {}
for f in f_name:
f_values[f] = data[f].unique()
return c_label,f_name,f_values
c_label,f_name,f_values = get_values(disf[['Location','Weather','TypeOfTrigger','Class']])
data = disf[['Location','Weather','TypeOfTrigger','Class']].values
# ## Question 2
# ### Split the data randomly into training and testing (80-20 split). Use the fields ‘Location’, ‘Weather’, and ‘TypeOfTrigger’ as features and use ‘Class’ as assigned in the previous question as the label.
# function that splits dataset into train and test data
def split(data,train_ratio):
k = int(len(data) * train_ratio)
# randomly shuffle the dataset
np.random.shuffle(data)
train, test = data[:k,:], data[k:,:]
return train, test
train, test = split(data,0.8)
print("Size of Train Data: {0}".format(len(train)))
print("Size of Test Data: {0}".format(len(test)))
# ## Question 3
# ### Using the training dataset, create a NB model to identify the cause of disengagement based on the features ‘Location’, ‘Weather’, and ‘TypeOfTrigger’. Show the conditional probability tables from the training dataset.
# +
# function accepts the data values in the form of a list.
def train_NB(data,c_label,f_name,f_values):
c_prob = {}
f_prob = {}
N = len(data)
for label in c_label:
# separate data values for class label
separated = data[data[:,-1]==label]
Ns = len(separated)
# calculate class probability
c_prob[label] = Ns/N
temp2 = {}
for i, f in enumerate(f_name): # repeate the procedure for each feature
# create a temporary dictionary
temp1 = {}
for val in f_values[f]:
# calculate conditional probability
#temp1[val] = sum(separated[f]==val)/Ns
temp1[val] = round(sum(separated[:,i]==val)/Ns,4)
temp2[f]=temp1
f_prob[label]=temp2
# return a dict containing individual class probabilities
# return a dict containing conditional probabilities of each categorical value
return c_prob,f_prob
c_prob,f_prob = train_NB(train,c_label,f_name,f_values)
c_prob
# -
pd.DataFrame(f_prob)
# ## Question 4
# ### Using the model to predict the cause of the disengagement for the test dataset. Compute the accuracy achieved by your model.
# +
def NB_pred(data,c_prob,f_prob,c_label,f_name,f_values):
pred = []
for row in data:
score = {}
# calculate conditional probability for each class
for c in c_label:
CP = 1
# calculate probability using Bayes Rule assuming that features are independent
for i,f in enumerate(f_name):
CP = CP*f_prob[c][f][row[i]]
score[c]= CP*c_prob[c]
# use MAP rule to get class label
# select the class with maximum probability
pred.append(max(score, key=score.get))
return pred
# function that calculates accuracy of a model given the predictions
def get_accuracy(true,pred):
accuracy = round(sum(true==pred)/len(true),4)
return accuracy
# -
pred = NB_pred(test,c_prob,f_prob,c_label,f_name,f_values)
accuracy = get_accuracy(test[:,-1], pred)
print("Accuracy: {0}".format(accuracy))
# ## Question 5
# ### To get a better estimate of the model performance, perform cross-validation. Repeat sub-questions 2, 3 and 4 five times for different splits of training and test data, and report the average accuracy.
accuracy = []
for i in range(10):
train, test = split(data, train_ratio=0.8)
c_prob,f_prob = train_NB(train,c_label,f_name,f_values)
pred = NB_pred(test,c_prob,f_prob,c_label,f_name,f_values)
acc = get_accuracy(test[:,-1], pred)
accuracy.append(acc)
avg_acc = round(np.mean(accuracy),4)
print("Accuracy List: {0}".format(accuracy))
print("Average Accuracy: {0}".format(avg_acc))
| AV_DataAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# name: python3
# ---
# # Use the Shirt Class
#
# You've seen what a class looks like and how to instantiate an object. Now it's your turn to write code that insantiates a shirt object.
#
# # Explanation of the Code
# This Jupyter notebook is inside of a folder called 1.OOP_syntax_shirt_practice. You can see the folder if you click on the "Jupyter" logo above the notebook. Inside the folder are three files:
# - shirt_exercise.ipynb, which is the file you are currently looking at
# - answer.py containing answers to the exercise
# - tests.py, tests for checking your code - you can run these tests using the last code cell at the bottom of this notebook
#
# # Your Task
# The shirt_exercise.ipynb file, which you are currently looking at if you are reading this, has an exercise to help guide you through coding with an object in Python.
#
# Fill out the TODOs in each section of the Jupyter notebook. You can find a solution in the answer.py file.
#
# First, run this code cell below to load the Shirt class.
class Shirt:
def __init__(self, shirt_color, shirt_size, shirt_style, shirt_price):
self.color = shirt_color
self.size = shirt_size
self.style = shirt_style
self.price = shirt_price
def change_price(self, new_price):
self.price = new_price
def discount(self, discount):
return self.price * (1 - discount)
### TODO:
# - insantiate a shirt object with the following characteristics:
# - color red, size S, style long-sleeve, and price 25
# - store the object in a variable called shirt_one
#
#
###
shirt_one = Shirt('red', 'S', 'long-sleeve', 25)
### TODO:
# - print the price of the shirt using the price attribute
# - use the change_price method to change the price of the shirt to 10
# - print the price of the shirt using the price attribute
# - use the discount method to print the price of the shirt with a 12% discount
#
###
print(shirt_one.price)
shirt_one.change_price(10)
print(shirt_one.price)
shirt_one.discount(.12)
### TODO:
#
# - instantiate another object with the following characteristics:
# . - color orange, size L, style short-sleeve, and price 10
# - store the object in a variable called shirt_two
#
###
shirt_two = Shirt('orange', 'L', "short-sleeve", 10)
### TODO:
#
# - calculate the total cost of shirt_one and shirt_two
# - store the results in a variable called total
#
###
total = shirt_one.price + shirt_two.price
### TODO:
#
# - use the shirt discount method to calculate the total cost if
# shirt_one has a discount of 14% and shirt_two has a discount
# of 6%
# - store the results in a variable called total_discount
###
total_discount = shirt_one.discount(.14) + shirt_two.discount(.06)
# # Test your Code
#
#
# The following code cell tests your code.
#
# There is a file called tests.py containing a function called run_tests(). The run_tests() function executes a handful of assert statements to check your work. You can see this file if you go to the Jupyter Notebook menu and click on "File->Open" and then open the tests.py file.
#
# Execute the next code cell. The code will produce an error if your answers in this exercise are not what was expected. Keep working on your code until all tests are passing.
#
# If you run the code cell and there is no output, then you passed all the tests!
#
# As mentioned previously, there's also a file with a solution. To find the solution, click on the Jupyter logo at the top of the workspace, and then enter the folder titled 1.OOP_syntax_shirt_practice
# +
# Unit tests to check your solution
from tests import run_tests
run_tests(shirt_one, shirt_two, total, total_discount)
# -
| lessons/ObjectOrientedProgramming/JupyterNotebooks/1.OOP_syntax_shirt_practice/shirt_exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fitting PAT measurements
# Authors: <NAME>, <NAME>
#
# The core package for this example is `qtt.algorithms.pat_fitting`. We use this package to analyse the data from a photon-assisted-tunneling measurement performed on 2-dot system, and extract the tunnel coupling and lever arm from the measurement.
#
# For more information on PAT measurements and fitting see "Automated tuning of inter-dot tunnel coupling in double quantum dots", https://doi.org/10.1063/1.5031034
# Import the modules used in this program:
# +
import os, sys
import qcodes
import scipy.constants
import matplotlib.pyplot as plt
import numpy as np
from qcodes.plots.qcmatplotlib import MatPlot
import qtt
from qtt.data import load_example_dataset
from qtt.algorithms.tunneling import fit_pol_all, polmod_all_2slopes
from qtt.algorithms.pat_fitting import fit_pat, plot_pat_fit, pre_process_pat, show_traces, detect_peaks
# %matplotlib inline
# -
# ## Load dataset
dataset_pat = load_example_dataset('PAT_scan') # main dataset for PAT analysis
dataset_pol = load_example_dataset('PAT_scan_background') # 1D trace of the background data
# Set some parameters from the data.
la = 74.39 # [ueV/mV], lever arm
sweep_detun = {'P1': -1.1221663904980717, 'P2': 1.262974805193041} # [mV on gate / mV swept], sweep_detun * la = detuning in ueV
kb = scipy.constants.physical_constants['Boltzmann constant in eV/K'][0]*1e6 # [ueV/K]
Te = 98e-3*kb # [ueV], electron temperature
ueV2GHz = 1e15*scipy.constants.h/scipy.constants.elementary_charge # [GHz/ueV]
# Show the PAT scan and the background data.
# +
MatPlot(dataset_pat.default_parameter_array(), num=5)
plt.title('PAT scan')
pol_fit, pol_guess, _ = fit_pol_all(la*dataset_pol.sweepparam.ndarray, dataset_pol.measured1, kT=Te) # 1 indicates fpga channel
fig_pol = plt.figure(10)
plt.plot(la*dataset_pol.sweepparam.ndarray, dataset_pol.measured1)
plt.plot(la*dataset_pol.sweepparam.ndarray, polmod_all_2slopes(la*dataset_pol.sweepparam.ndarray, pol_fit, kT=Te), 'r--')
plt.xlabel('%.2f*%s (ueV)' % (la,str({plg: '%.2f' % sweep_detun[plg] for plg in sweep_detun})))
plt.ylabel('signal')
plt.title('t: %.2f ueV, kT: %.2f ueV, la: %.2f ueV/mV' % (np.abs(pol_fit[0]), Te, la))
_=plt.suptitle(dataset_pol.location)
# -
# ## Fit PAT model
# +
x_data = dataset_pat.sweepparam.ndarray[0]
y_data = np.array(dataset_pat.mwsource_frequency)
z_data = np.array(dataset_pat.measured)
background = np.array(dataset_pol.default_parameter_array())
pp, pat_fit = fit_pat(x_data, y_data, z_data, background)
imq=pat_fit['imq']
# -
pat_fit_fig = plt.figure(100); plt.clf()
plot_pat_fit(x_data, y_data, imq, pp, fig=pat_fit_fig.number, label='fitted model')
plt.plot(pat_fit['xd'], pat_fit['yd'], '.m', label='detected points')
plt.title('t: %.2f ueV = %.2f GHz, la: %.2f ueV/mV' % (np.abs(pp[2]), np.abs(pp[2]/ueV2GHz), pp[1]))
plt.suptitle(dataset_pat.location)
plt.xlabel('%s (meV)' % (str({plg: '%.2f' % sweep_detun[plg] for plg in sweep_detun})))
plt.ylabel('MW frequency (Hz)')
_=plt.legend()
# ## Fit 2-electron model
dataset_pat = load_example_dataset(r'2electron_pat/pat')
dataset_pol = load_example_dataset(r'2electron_pat/background')
# +
x_data = dataset_pat.sweepparam.ndarray[0]
y_data = np.array(dataset_pat.mwsource_frequency)
z_data = np.array(dataset_pat.measured)
background = np.array(dataset_pol.default_parameter_array())
pp, pat_fit = fit_pat(x_data, y_data, z_data, background, trans='two_ele', even_branches=[True, False, False])
imq=pat_fit['imq']
plot_pat_fit(x_data, y_data, imq, pp, fig=pat_fit_fig.number, label='fitted model', trans='two_ele')
plt.plot(pat_fit['xd'], pat_fit['yd'], '.m', label='detected points')
plt.title('t: %.2f ueV = %.2f GHz, la: %.2f ueV/mV' % (np.abs(pp[2]), np.abs(pp[2]/ueV2GHz), pp[1]))
_=plt.legend()
# -
# ## Show pre-processing and intermediate steps
imx, imq, _ = pre_process_pat(x_data, y_data, background, z_data, fig=100)
show_traces(x_data, z_data, fig=101, direction='h', title='Traces of raw PAT scan')
xx, _ = detect_peaks(x_data, y_data, imx, sigmamv=.05, fig=200)
| docs/notebooks/analysis/example_PAT_fitting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training evaluation
# %load_ext autoreload
# %autoreload 2
# +
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import random
import math
import re
import time
import tensorflow as tf
import matplotlib
import matplotlib.patches as patches
import json
import skimage.io
from IPython.display import clear_output
import numpy as np
import utils
import visualize
from visualize import display_images
import model as modellib
from model import log
import xavi
import coco
ROOT_DIR = os.getcwd() # Root directory of the project
XAVI_DIR = os.path.join(ROOT_DIR, "XAVI_Dataset") # Xavi Dataset directory
MODEL_DIR = os.path.join(XAVI_DIR, "model") # Directory to save trained model
DEFAULT_LOGS_DIR = os.path.join(MODEL_DIR, "logs") # Directory to save logs
XAVI_MODELS = os.path.join(DEFAULT_LOGS_DIR, "xavi51220210430T1046") # Local path to trained weights file
TRAINING_SUBSET = "train512"
VALIDATION_SUBSET = "val512"
COCO_DIR = os.path.join(os.getcwd(), "COCO_Dataset") # Xavi Dataset directory
COCO_VALIDATION_SUBSET = "valminusminival"
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## Load datasets
# ### XAVI dataset
# +
# Build validation dataset
json_categories = json.load(open(os.path.join(XAVI_DIR, VALIDATION_SUBSET, "categories.json")))["Categories"]
xavi_dataset = xavi.XaviDataset()
xavi_dataset.load_xavi(XAVI_DIR, VALIDATION_SUBSET, xavi.MasksType["PNG"], json_categories, (512, 512))
xavi_dataset.prepare()
print("Images: {}\nClasses: {}".format(len(xavi_dataset.image_ids), xavi_dataset.class_names))
# -
# ### COCO dataset
# +
# Build validation dataset
coco_dataset = coco.CocoDataset()
coco_dataset.load_coco(COCO_DIR, COCO_VALIDATION_SUBSET, class_ids=[1,3])
coco_dataset.prepare()
clear_output(wait=True)
print("Images: {}\nClasses: {}".format(len(coco_dataset.image_ids), coco_dataset.class_names))
# -
# ## Evaluation of the training
# For each class, for each model (output of each epoch) and each dataset, compute the mAP:
# +
N_IMAGES = 100
# Load config and model in inference mode
config = xavi.XaviConfig(json_categories)
xavi_model = modellib.MaskRCNN(mode="inference", model_dir=DEFAULT_LOGS_DIR, config=config)
APS_XaviDataset = []
APS_XaviDataset_humans = []
APS_XaviDataset_cars = []
APS_CocoDataset = []
APS_CocoDataset_humans = []
APS_CocoDataset_cars = []
weights_files = [f for f in os.listdir(XAVI_MODELS) if f.endswith(".h5")]
for i, weights_file in enumerate(weights_files):
# Load model
xavi_model.load_weights(os.path.join(XAVI_MODELS, weights_file), by_name=True)
# Pick random images from validation test
xavi_ids = np.random.choice(xavi_dataset.image_ids, N_IMAGES, replace=False)
coco_ids = np.random.choice(coco_dataset.image_ids, N_IMAGES, replace=False)
# Compute AP for XAVI Dataset
(APs, class_APs) = utils.compute_batch_ap(xavi_dataset, config, xavi_model, xavi_ids, show_images = False, target_classes=(1,2))
APS_XaviDataset.append(np.mean(APs))
APS_XaviDataset_humans.append(np.mean(class_APs[1]))
APS_XaviDataset_cars.append(np.mean(class_APs[2]))
# Compute AP for COCO Dataset
(APs, class_APs) = utils.compute_batch_ap(coco_dataset, config, xavi_model, coco_ids, show_images = False, target_classes=(1,2))
APS_CocoDataset.append(np.mean(APs))
APS_CocoDataset_humans.append(np.mean(class_APs[1]))
APS_CocoDataset_cars.append(np.mean(class_APs[2]))
clear_output(wait=True)
print(i, " / ", len(weights_files))
print("APS Xavi Dataset: ", APS_XaviDataset)
print("APS COCO Dataset: ", APS_CocoDataset)
clear_output(wait=True)
print("APS Xavi Dataset: ", APS_XaviDataset)
print("APS Xavi Dataset (humans): ", APS_XaviDataset_humans)
print("APS Xavi Dataset (cars): ", APS_XaviDataset_cars)
print("APS COCO Dataset: ", APS_CocoDataset)
print("APS COCO Dataset (humans): ", APS_CocoDataset_humans)
print("APS COCO Dataset (cars): ", APS_CocoDataset_cars)
| XAVI - 6 - Training evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# **Chapter 10 – Introduction to Artificial Neural Networks**
# _This notebook contains all the sample code and solutions to the exercises in chapter 10._
# # Setup
# First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
# +
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "ann"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# -
# # Perceptrons
# +
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
X = iris.data[:, (2, 3)] # petal length, petal width
y = (iris.target == 0).astype(np.int)
per_clf = Perceptron(random_state=42)
per_clf.fit(X, y)
y_pred = per_clf.predict([[2, 0.5]])
# -
y_pred
# +
a = -per_clf.coef_[0][0] / per_clf.coef_[0][1]
b = -per_clf.intercept_ / per_clf.coef_[0][1]
axes = [0, 5, 0, 2]
x0, x1 = np.meshgrid(
np.linspace(axes[0], axes[1], 500).reshape(-1, 1),
np.linspace(axes[2], axes[3], 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = per_clf.predict(X_new)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs", label="Not Iris-Setosa")
plt.plot(X[y==1, 0], X[y==1, 1], "yo", label="Iris-Setosa")
plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], "k-", linewidth=3)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#9898ff', '#fafab0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap, linewidth=5)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="lower right", fontsize=14)
plt.axis(axes)
save_fig("perceptron_iris_plot")
plt.show()
# -
# # Activation functions
# +
def logit(z):
return 1 / (1 + np.exp(-z))
def relu(z):
return np.maximum(0, z)
def derivative(f, z, eps=0.000001):
return (f(z + eps) - f(z - eps))/(2 * eps)
# +
z = np.linspace(-5, 5, 200)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(z, np.sign(z), "r-", linewidth=2, label="Step")
plt.plot(z, logit(z), "g--", linewidth=2, label="Logit")
plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="center right", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])
plt.subplot(122)
plt.plot(z, derivative(np.sign, z), "r-", linewidth=2, label="Step")
plt.plot(0, 0, "ro", markersize=5)
plt.plot(0, 0, "rx", markersize=10)
plt.plot(z, derivative(logit, z), "g--", linewidth=2, label="Logit")
plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh")
plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
#plt.legend(loc="center right", fontsize=14)
plt.title("Derivatives", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("activation_functions_plot")
plt.show()
# +
def heaviside(z):
return (z >= 0).astype(z.dtype)
def sigmoid(z):
return 1/(1+np.exp(-z))
def mlp_xor(x1, x2, activation=heaviside):
return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)
# +
x1s = np.linspace(-0.2, 1.2, 100)
x2s = np.linspace(-0.2, 1.2, 100)
x1, x2 = np.meshgrid(x1s, x2s)
z1 = mlp_xor(x1, x2, activation=heaviside)
z2 = mlp_xor(x1, x2, activation=sigmoid)
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.contourf(x1, x2, z1)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: heaviside", fontsize=14)
plt.grid(True)
plt.subplot(122)
plt.contourf(x1, x2, z2)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: sigmoid", fontsize=14)
plt.grid(True)
# -
# # FNN for MNIST
# ## using tf.learn
# +
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
# -
X_train = mnist.train.images
X_test = mnist.test.images
y_train = mnist.train.labels.astype("int")
y_test = mnist.test.labels.astype("int")
# +
import tensorflow as tf
config = tf.contrib.learn.RunConfig(tf_random_seed=42) # not shown in the config
feature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(X_train)
dnn_clf = tf.contrib.learn.DNNClassifier(hidden_units=[300,100], n_classes=10,
feature_columns=feature_cols, config=config)
dnn_clf = tf.contrib.learn.SKCompat(dnn_clf) # if TensorFlow >= 1.1
dnn_clf.fit(X_train, y_train, batch_size=50, steps=40000)
# +
from sklearn.metrics import accuracy_score
y_pred = dnn_clf.predict(X_test)
accuracy_score(y_test, y_pred['classes'])
# +
from sklearn.metrics import log_loss
y_pred_proba = y_pred['probabilities']
log_loss(y_test, y_pred_proba)
# -
# ## Using plain TensorFlow
# +
import tensorflow as tf
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
# -
def neuron_layer(X, n_neurons, name, activation=None):
with tf.name_scope(name):
n_inputs = int(X.get_shape()[1])
stddev = 2 / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
W = tf.Variable(init, name="kernel")
b = tf.Variable(tf.zeros([n_neurons]), name="bias")
Z = tf.matmul(X, W) + b
if activation is not None:
return activation(Z)
else:
return Z
with tf.name_scope("dnn"):
hidden1 = neuron_layer(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = neuron_layer(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# +
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# -
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 40
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: mnist.test.images,
y: mnist.test.labels})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_model_final.ckpt")
with tf.Session() as sess:
saver.restore(sess, "./my_model_final.ckpt") # or better, use save_path
X_new_scaled = mnist.test.images[:20]
Z = logits.eval(feed_dict={X: X_new_scaled})
y_pred = np.argmax(Z, axis=1)
print("Predicted classes:", y_pred)
print("Actual classes: ", mnist.test.labels[:20])
# +
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = b"<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# -
show_graph(tf.get_default_graph())
# ## Using `dense()` instead of `neuron_layer()`
# Note: the book uses `tensorflow.contrib.layers.fully_connected()` rather than `tf.layers.dense()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dense()`, because anything in the contrib module may change or be deleted without notice. The `dense()` function is almost identical to the `fully_connected()` function, except for a few minor differences:
# * several parameters are renamed: `scope` becomes `name`, `activation_fn` becomes `activation` (and similarly the `_fn` suffix is removed from other parameters such as `normalizer_fn`), `weights_initializer` becomes `kernel_initializer`, etc.
# * the default `activation` is now `None` rather than `tf.nn.relu`.
# * a few more differences are presented in chapter 11.
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
# -
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# +
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# -
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# +
n_epochs = 20
n_batches = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: mnist.test.images, y: mnist.test.labels})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_model_final.ckpt")
# -
show_graph(tf.get_default_graph())
# # Exercise solutions
# ## 1. to 8.
# See appendix A.
# ## 9.
# _Train a deep MLP on the MNIST dataset and see if you can get over 98% precision. Just like in the last exercise of chapter 9, try adding all the bells and whistles (i.e., save checkpoints, restore the last checkpoint in case of an interruption, add summaries, plot learning curves using TensorBoard, and so on)._
# First let's create the deep net. It's exactly the same as earlier, with just one addition: we add a `tf.summary.scalar()` to track the loss and the accuracy during training, so we can view nice learning curves using TensorBoard.
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
# -
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
loss_summary = tf.summary.scalar('log_loss', loss)
# +
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# -
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
accuracy_summary = tf.summary.scalar('accuracy', accuracy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Now we need to define the directory to write the TensorBoard logs to:
# +
from datetime import datetime
def log_dir(prefix=""):
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "tf_logs"
if prefix:
prefix += "-"
name = prefix + "run-" + now
return "{}/{}/".format(root_logdir, name)
# -
logdir = log_dir("mnist_dnn")
# Now we can create the `FileWriter` that we will use to write the TensorBoard logs:
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
# Hey! Why don't we implement early stopping? For this, we are going to need a validation set. Luckily, the dataset returned by TensorFlow's `input_data()` function (see above) is already split into a training set (60,000 instances, already shuffled for us), a validation set (5,000 instances) and a test set (5,000 instances). So we can easily define `X_valid` and `y_valid`:
X_valid = mnist.validation.images
y_valid = mnist.validation.labels
m, n = X_train.shape
# +
n_epochs = 10001
batch_size = 50
n_batches = int(np.ceil(m / batch_size))
checkpoint_path = "/tmp/my_deep_mnist_model.ckpt"
checkpoint_epoch_path = checkpoint_path + ".epoch"
final_model_path = "./my_deep_mnist_model"
best_loss = np.infty
epochs_without_progress = 0
max_epochs_without_progress = 50
with tf.Session() as sess:
if os.path.isfile(checkpoint_epoch_path):
# if the checkpoint file exists, restore the model and load the epoch number
with open(checkpoint_epoch_path, "rb") as f:
start_epoch = int(f.read())
print("Training was interrupted. Continuing at epoch", start_epoch)
saver.restore(sess, checkpoint_path)
else:
start_epoch = 0
sess.run(init)
for epoch in range(start_epoch, n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val, loss_val, accuracy_summary_str, loss_summary_str = sess.run([accuracy, loss, accuracy_summary, loss_summary], feed_dict={X: X_valid, y: y_valid})
file_writer.add_summary(accuracy_summary_str, epoch)
file_writer.add_summary(loss_summary_str, epoch)
if epoch % 5 == 0:
print("Epoch:", epoch,
"\tValidation accuracy: {:.3f}%".format(accuracy_val * 100),
"\tLoss: {:.5f}".format(loss_val))
saver.save(sess, checkpoint_path)
with open(checkpoint_epoch_path, "wb") as f:
f.write(b"%d" % (epoch + 1))
if loss_val < best_loss:
saver.save(sess, final_model_path)
best_loss = loss_val
else:
epochs_without_progress += 5
if epochs_without_progress > max_epochs_without_progress:
print("Early stopping")
break
# -
os.remove(checkpoint_epoch_path)
with tf.Session() as sess:
saver.restore(sess, final_model_path)
accuracy_val = accuracy.eval(feed_dict={X: X_test, y: y_test})
accuracy_val
| 10_introduction_to_artificial_neural_networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <center>
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width="300" alt="cognitiveclass.ai logo" />
# </center>
#
# <h1 align="center"><font size="5">Classification with Python</font></h1>
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# In this notebook we try to practice all the classification algorithms that we learned in this course.
#
# We load a dataset using Pandas library, and apply the following algorithms, and find the best one for this specific dataset by accuracy evaluation methods.
#
# Lets first load required libraries:
#
# + button=false new_sheet=false run_control={"read_only": false}
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import matplotlib.ticker as ticker
from sklearn import preprocessing
# %matplotlib inline
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### About dataset
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# This dataset is about past loans. The **Loan_train.csv** data set includes details of 346 customers whose loan are already paid off or defaulted. It includes following fields:
#
# | Field | Description |
# | -------------- | ------------------------------------------------------------------------------------- |
# | Loan_status | Whether a loan is paid off on in collection |
# | Principal | Basic principal loan amount at the |
# | Terms | Origination terms which can be weekly (7 days), biweekly, and monthly payoff schedule |
# | Effective_date | When the loan got originated and took effects |
# | Due_date | Since it’s one-time payoff schedule, each loan has one single due date |
# | Age | Age of applicant |
# | Education | Education of applicant |
# | Gender | The gender of applicant |
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Lets download the dataset
#
# + button=false new_sheet=false run_control={"read_only": false}
# !wget -O loan_train.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/FinalModule_Coursera/data/loan_train.csv
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Load Data From CSV File
#
# + button=false new_sheet=false run_control={"read_only": false}
df = pd.read_csv('loan_train.csv')
df.head()
# -
df.shape
df.info()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Convert to date time object
#
# + button=false new_sheet=false run_control={"read_only": false}
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Data visualization and pre-processing
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Let’s see how many of each class is in our data set
#
# + button=false new_sheet=false run_control={"read_only": false}
df['loan_status'].value_counts()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# 260 people have paid off the loan on time while 86 have gone into collection
#
# -
# Lets plot some columns to underestand data better:
#
# +
import seaborn as sns
bins = np.linspace(df.Principal.min(), df.Principal.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'Principal', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + button=false new_sheet=false run_control={"read_only": false}
bins = np.linspace(df.age.min(), df.age.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'age', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Pre-processing: Feature selection/extraction
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Lets look at the day of the week people get the loan
#
# + button=false new_sheet=false run_control={"read_only": false}
df['dayofweek'] = df['effective_date'].dt.dayofweek
bins = np.linspace(df.dayofweek.min(), df.dayofweek.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'dayofweek', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# We see that people who get the loan at the end of the week dont pay it off, so lets use Feature binarization to set a threshold values less then day 4
#
# + button=false new_sheet=false run_control={"read_only": false}
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
df.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Convert Categorical features to numerical values
#
# -
# #### Payment Term shows significant difference between 7 and 15, 30
df.groupby(['terms'])['loan_status'].value_counts(normalize=True)
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Lets look at gender:
#
# + button=false new_sheet=false run_control={"read_only": false}
df.groupby(['Gender'])['loan_status'].value_counts(normalize=True)
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# 86 % of female pay there loans while only 73 % of males pay there loan
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Lets convert male to 0 and female to 1:
#
# + button=false new_sheet=false run_control={"read_only": false}
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## One Hot Encoding
#
# #### How about education?
#
# + button=false new_sheet=false run_control={"read_only": false}
df.groupby(['education'])['loan_status'].value_counts(normalize=True)
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# #### Feature befor One Hot Encoding
#
# + button=false new_sheet=false run_control={"read_only": false}
df[['Principal','terms','age','Gender','education']].head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# #### Use one hot encoding technique to conver categorical varables to binary variables and append them to the feature Data Frame
#
# + button=false new_sheet=false run_control={"read_only": false}
Feature = df[['Principal','terms','age','Gender','weekend']]
Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1)
Feature.drop(['Master or Above'], axis = 1,inplace=True)
Feature.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Feature selection
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Lets defind feature sets, X:
#
# + button=false new_sheet=false run_control={"read_only": false}
X = Feature
X[0:5]
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# What are our lables?
#
# + button=false new_sheet=false run_control={"read_only": false}
y = df['loan_status'].values
y[0:5]
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Normalize Data
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Data Standardization give data zero mean and unit variance (technically should be done after train test split )
#
# + button=false new_sheet=false run_control={"read_only": false}
X= preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Classification
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Now, it is your turn, use the training set to build an accurate model. Then use the test set to report the accuracy of the model
# You should use the following algorithm:
#
# - K Nearest Neighbor(KNN)
# - Decision Tree
# - Support Vector Machine
# - Logistic Regression
#
# ** Notice:**
#
# - You can go above and change the pre-processing, feature selection, feature-extraction, and so on, to make a better model.
# - You should use either scikit-learn, Scipy or Numpy libraries for developing the classification algorithms.
# - You should include the code of the algorithm in the following cells.
#
# -
# ## Train-Test Split
# +
from sklearn.model_selection import train_test_split
#encode categorical columns, concatenate & DROP 1 encoded-category
num_cols = ['Principal', 'terms', 'age', 'Gender']
cat_col = ['education']
X = pd.concat([df[num_cols], pd.get_dummies(df[cat_col])], axis=1)
X.drop('education_Master or Above', axis = 1, inplace=True)
#replacing character values with binary values
df['loan_status'].replace(to_replace=['COLLECTION', 'PAIDOFF'], value=[0, 1], inplace=True)
y = df['loan_status']
seed = 42
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=seed)
# -
y_test
# ## Normalize data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_trn_norm = scaler.fit_transform(X_train)
X_test_norm = scaler.fit_transform(X_test)
print(f"Train-dataset shape: {X_trn_norm.shape}\nTest-dataset shape: {X_test_norm.shape}")
# # K Nearest Neighbor(KNN)
#
# Notice: You should find the best k to build the model with the best accuracy.
# **warning:** You should not use the **loan_test.csv** for finding the best k, however, you can split your train_loan.csv into train and test to find the best **k**.
#
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import jaccard_score, f1_score, log_loss
from sklearn.model_selection import cross_val_score
# +
#Fitting and testing the Training data
score = []
j_score = []
f1 = []
loss = []
for k in range(2, 16):
knn = KNeighborsClassifier(n_neighbors=k, n_jobs=-1)
knn.fit(X_trn_norm, y_train)
y_pred = knn.predict(X_test_norm)
score.append(knn.score(X_test_norm, y_test))
j_score.append(jaccard_score(y_test, y_pred)*100)
f1.append(f1_score(y_test, y_pred)*100)
loss.append(log_loss(y_test, knn.predict_proba(X_test_norm)))
print(f"Best-Model Scores:\nModel Accuracy: {max(score)} for {score.index(max(score))+2}neighbors\nJaccard-score: {max(j_score)} for {j_score.index(max(j_score))+2} neighbors\nF1-score: {max(f1)} for {f1.index(max(f1))+2} neighbors\nLog-Loss: {min(loss)} for {loss.index(min(loss))+2} neighbors")
# -
#Cross validation of Model
knn = KNeighborsClassifier(n_neighbors=11, n_jobs=-1)
cv_score = cross_val_score(knn, X_trn_norm, y_train, cv=10)
print(f"MEAN Cross-validation score of Training data:\n{cv_score.mean()}")
# # Decision Tree
#
from sklearn.tree import DecisionTreeClassifier
#Fitting and testing the Training data
dtc.fit(X_trn_norm, y_train)
y_pred = dtc.predict(X_test_norm)
score = dtc.score(X_test_norm, y_test)
j_score = jaccard_score(y_test, y_pred)*100
f1 = f1_score(y_test, y_pred)*100
loss = log_loss(y_test, dtc.predict_proba(X_test_norm))
print(f"Model Accuracy: {score*100}\nJaccard-score: {j_score}\nF1-score: {f1}\nLog-Loss: {loss}")
#Cross validation of Model
dtc = DecisionTreeClassifier()
cv_score = cross_val_score(dtc, X_trn_norm, y_train, cv=10)
print(f"MEAN Cross-validation score of Training data:\n{cv_score.mean()}")
# # Support Vector Machine
#
from sklearn.svm import SVC
# +
#Fitting and testing the Training data
score = []
j_score = []
f1 = []
for n in np.arange(0.1, 1, 0.1):
svc = SVC(C=n, kernel='rbf')
svc.fit(X_trn_norm, y_train)
y_pred = svc.predict(X_test_norm)
score.append(svc.score(X_test_norm, y_test))
j_score.append(jaccard_score(y_test, y_pred)*100)
f1.append(f1_score(y_test, y_pred)*100)
print(f"Best-Model scores:\nModel Accuracy: {max(score)*100} for C={score.index(max(score))+0.1}\nJaccard-score: {max(j_score)} for C={j_score.index(max(j_score))+0.1}\nF1-score: {max(f1)} for C={f1.index(max(f1))+0.1}")
# -
#Cross validation of Model
svc = SVC(C=0.1, kernel='rbf')
cv_score = cross_val_score(svc, X_trn_norm, y_train, cv=10)
print(f"MEAN Cross-validation score of Training data:\n{cv_score.mean()}")
# # Logistic Regression
#
from sklearn.linear_model import LogisticRegression
# +
#Fitting and Testing the Training data
score = []
j_score = []
f1 = []
loss = []
for n in np.arange(0.1, 1, 0.1):
log = LogisticRegression(C=n, solver='liblinear')
log.fit(X_trn_norm, y_train)
y_pred = log.predict(X_test_norm)
score.append(log.score(X_test_norm, y_test))
j_score.append(jaccard_score(y_test, y_pred)*100)
f1.append(f1_score(y_test, y_pred)*100)
loss.append(log_loss(y_test, log.predict_proba(X_test_norm)))
print(f"Best-Model scores:\nModel Accuracy: {max(score)*100} for C={score.index(max(score))+0.1}\nJaccard-score: {max(j_score)} for C={j_score.index(max(j_score))+0.1}\nF1-score: {max(f1)} for C={f1.index(max(f1))+0.1}\nLog-Loss: {min(loss)} for C={loss.index(min(loss))+0.1}")
# -
#Cross validation of Model
log = LogisticRegression(C=0.1, solver='liblinear')
cv_score = cross_val_score(log, X_trn_norm, y_train, cv=10)
print(f"MEAN Cross-validation score of Training data:\n{cv_score.mean()}")
# ## Cross-Validation score of All Models with loan_train.csv
models = [knn, dtc, svc, log]
for model in models:
print(f"Best Cross-Validation score of {model}:\n{max(cross_val_score(model, X, y, cv=10))}\n\n")
# # Model Evaluation using Test set
#
from sklearn.pipeline import Pipeline
# First, download and load the test set:
#
# !wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Load Test set for evaluation
#
# + button=false new_sheet=false run_control={"read_only": false}
test_df = pd.read_csv('loan_test.csv')
test_df.head()
# -
# ### Preparing Test data for Prediction
# +
#one-hot encoding categorical columns
test_df['loan_status'].replace(to_replace=['COLLECTION', 'PAIDOFF'], value=[0, 1], inplace=True)
test_df['Gender'].replace(to_replace=['male', 'female'], value=[0, 1], inplace=True)
#encode categorical column, concatenate & DROP 1 encoded-category
num_cols = ['Principal', 'terms', 'age', 'Gender']
test_X = pd.concat([test_df[num_cols], pd.get_dummies(test_df['education'])], axis=1)
test_X.drop('Master or Above', axis = 1, inplace=True)
test_y = test_df['loan_status']
# -
test_X.head()
# #### Model with Log-Loss calculation possibility
#
# +
models = [knn, dtc, log]
#Initializinf & Iterating every model through Pipeline
for model in models:
pipe = Pipeline([('scaler',StandardScaler()), ('clf', model)])
#Fitting loan_train.csv
pipe.fit(X, y)
#Predicting with loan_test.csv
y_pred = pipe.predict(test_X)
#Calculating model scores
score= pipe.score(test_X, test_y)*100
j_score = jaccard_score(test_y, y_pred)*100
f1 = f1_score(test_y, y_pred)*100
loss = log_loss(test_y, pipe.predict_proba(test_X))
print(f"Scores for {model}:\nModel score: {score}\nJaccard-score: {j_score}\nF1_score: {f1}\nLog-Loss: {loss}\n\n")
# -
#Initializing SVC model in Pipeline
pipe = Pipeline([('scaler',StandardScaler()), ('clf', svc)])
#Fitting loan_train.csv
pipe.fit(X, y)
#Predicting with loan_test.csv
y_pred = pipe.predict(test_X)
#Calculating model scores
score= pipe.score(test_X, test_y)*100
j_score = jaccard_score(test_y, y_pred)*100
f1 = f1_score(test_y, y_pred)*100
print(f"Scores for {model}:\nModel score: {score}\nJaccard-score: {j_score}\nF1_score: {f1}")
# # Report
#
# You should be able to report the accuracy of the built model using different evaluation metrics:
#
# | Algorithm | Jaccard | F1-score | LogLoss |
# | ------------------ | ------------ | ------------- | ---------- |
# | KNN | 74.074 | 85.106 | 0.580 |
# | Decision Tree | 74.074 | 84.090 | 7.171 |
# | SVM | 74.074 | 85.106 | NA |
# | LogisticRegression | 74.074 | 85.106 | 0.575 |
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <h2>Want to learn more?</h2>
#
# IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a>
#
# Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a>
#
# <h3>Thanks for completing this lesson!</h3>
#
# <h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a></h4>
# <p><a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p>
#
# <hr>
#
# ## Change Log
#
# | Date (YYYY-MM-DD) | Version | Changed By | Change Description |
# | ----------------- | ------- | ------------- | ------------------------------------------------------------------------------ |
# | 2020-10-27 | 2.1 | <NAME> | Made changes in import statement due to updates in version of sklearn library |
# | 2020-08-27 | 2.0 | <NAME> | Added lab to GitLab |
#
# <hr>
#
# ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
#
# <p>
#
| Classification_Loan-defaulter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
import os
import math
import requests
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import zscore
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression, Lasso, LinearRegression, SGDClassifier
from sklearn import model_selection
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics.pairwise import cosine_similarity
import pubchempy as pcp
os.chdir('../')
from util.util import *
from modules.logit_predictor import PlastPredictor
os.chdir('notebooks')
import IPython.display as Disp
np.set_printoptions(suppress=True)
# -
# Making sure the new `logit_predictor` predictor model is working properly
# Load and format data
pl_full = pd.read_pickle('../database/old_pl/plasticizer_data_v10_polarity.pkl')
org_full = pd.read_pickle('../database/old_org/org_polarity_v2.pkl')
pl_pol = pd.concat([pl_full[pl_full.columns[1:195]], pl_full['Polarity']], axis=1)
all_cols = pl_pol.columns.to_numpy()
pl_data = pl_pol[all_cols].to_numpy()
org_data = org_full[all_cols].to_numpy()
lin_data = pd.read_pickle('../database/linolein_test.pkl')
lin_data['Polarity'] = 0.048856
lin_data = lin_data[all_cols].to_numpy()
pp = PlastPredictor()
pp.fit_model(pl_data, org_data)
pp.pl_train_acc, pp.pl_test_acc, pp.org_train_acc, pp.org_test_acc
org_acc = pp.predict(org_data, type='binary', class_id='neg')
pl_acc = pp.predict(pl_data, type='binary', class_id='pos')
lin_prob = pp.predict(lin_data)
org_acc, pl_acc, lin_prob
# Looks like it works. Now just need to generate likelihood data for all plasticizers and PubChem organics
#
# **NOTE:** This is without filtering organics by least similar. The final model should do this to ensure the negative samples are definitely not plasticizers
pl_probs = pp.predict(pl_data)
pl_smiles = pl_full['SMILES'].to_numpy()
org_probs = pp.predict(org_data)
org_smiles = org_full['SMILES'].to_numpy()
sns.distplot(pl_probs, hist=False)
sns.distplot(org_probs, hist=False)
plt.show()
best_org_probs, best_org_smiles = zip(*reversed(sorted(zip(org_probs, org_smiles))))
worst_pl_probs, worst_pl_smiles = zip(*sorted(zip(pl_probs, pl_smiles)))
# +
# org_ll = {'SMILES': org_smiles, 'Likelihood': org_probs}
# org_ll = pd.DataFrame(org_ll)
# org_ll.to_pickle('../database/org_likelihoods_v1.pkl')
# +
# pl_ll = {'SMILES': pl_smiles, 'Likelihood': pl_probs}
# pl_ll = pd.DataFrame(pl_ll)
# pl_ll.to_pickle('../database/pl_likelihoods_v1.pkl')
# -
| notebooks/logit_predictor_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import torch
import os
import numpy as np
from torch.distributions.uniform import Uniform
# + pycharm={"name": "#%%\n"}
lunarc = 0
# + pycharm={"name": "#%%\n"}
# Set wd
print(os.getcwd())
# set the wd to the base folder for the project
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev/two_moons')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev/two_moons')
sys.path.append('./')
print(os.getcwd())
# + pycharm={"name": "#%%\n"}
import functions as func
from sbi.utils import BoxUniform
# + pycharm={"name": "#%%\n"}
prior = BoxUniform(low=-1*torch.ones(2), high=1*torch.ones(2))
x_o, model = func.set_up_model(prior,
mean_radius=0.1,
sd_radius=0.01,
baseoffset=0.25)
#mean_radius=0.1,
#sd_radius=0.01,
#baseoffset=0.25
# + pycharm={"name": "#%%\n"}
post_samples = model.gen_posterior_samples(x_o,10000)
# + pycharm={"name": "#%%\n"}
post_samples
# + pycharm={"name": "#%%\n"}
# Plot post samples
import matplotlib.pyplot as plt
fig = plt.figure(figsize = (10,10))
plt.scatter(post_samples.numpy()[:,0],post_samples.numpy()[:,1],alpha = 0.5,color = "g")
#plt.ylim((-2, 2))
#plt.xlim((-2, 2))
# -
np.savetxt('data/post_greenberg19_10k.csv', post_samples.numpy(), delimiter=",")
# + pycharm={"name": "#%%\n"}
import TwoMoons
# + pycharm={"name": "#%%\n"}
x_model = model.model_sim(post_samples)
# but wait this is the posterior pred!
# + pycharm={"name": "#%%\n"}
import matplotlib.pyplot as plt
fig = plt.figure(figsize = (10,10))
plt.scatter(x_model.detach().numpy()[:,0],x_model.detach().numpy()[:,1],alpha = 0.5,color = "g")
#plt.ylim((-2, 3))
#plt.xlim((-2, 2))
# + pycharm={"name": "#%%\n"}
dim = 2
seed = 1
import numpy as np
torch.manual_seed(seed)
post_samples = model.gen_posterior_samples(x_o,1000)
np.savetxt('data/true_posterior.csv', post_samples.numpy(), delimiter=",")
# + pycharm={"name": "#%%\n"}
post_pred = model.model_sim(post_samples)
np.savetxt('data/true_post_pred.csv', post_pred.numpy(), delimiter=",")
| two_moons/sample_analytical_post.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing Image Data
#
# This notebook will demonstrate how to load and visualize astronomical images in the pywwt viewer.
# ---
#
# ## Step 1: Starting up pywwt
#
# The first thing to do is to open up a pywwt window in this notebook session. As is generally the case, we have to start with some Python imports:
from pywwt.jupyter import WWTJupyterWidget
# We'll also set up a utility function to help us load data files stored alongside this notebook:
def datapath(*args):
from os.path import join
return join('data', *args)
# Next, create a widget and display it inline. (That's why the final line is a bare `wwt`.)
wwt = WWTJupyterWidget()
wwt
# If everything is working correctly, the above command should create a pywwt viewer that looks mostly like a black box. If you’re using the JupyterLab environment rather than a plain Jupyter notebook, it is *strongly recommended* that you move the viewer to its own window pane so that you can have your code and viz side-by-side:
#
# 
#
# If you don't get a menu or the menu doesn’t look like the one pictured, you are probably not using JupyterLab and will have to move the viewer cell down as you work your way through the notebook. See the [First Steps](./First%20Steps.ipynb) notebook for more information and troubleshooting tips if you don’t get a viewer at all.
# ---
#
# ## Step 2: Visualizing a Local FITS file
#
# We'll start by visualizing a WISE 12µm image towards the [Westerhout 5 star forming region](https://en.wikipedia.org/wiki/Westerhout_5) and taking a look at some of the advanced visualization options.
#
# Images, like data tables, are represented in WWT as "layers" that can be added to the view. With a standard FITS file, all you need to do is provide a pathname:
layer = wwt.layers.add_image_layer(datapath('w5.fits'))
# The viewer will automatically center and zoom to the image you've loaded. You may get a warning from the `reproject` module; this can safely be ignored.
#
# "Printing" the following variable will create a set of widgets that let you adjust how the data are visualized:
layer.controls
# The image color scaling is controlled by the sliders in the "Fine min/max" row; the "Coarse min/max" boxes control the bounds that are placed on the range of those sliders.
#
# You should try sliding the image opacity back and forth to check the agreement between the morphology of the W5 image and the WWT all-sky map.
#
# All of the parameters that are controlled by the widgets above can be manipulated programmatically as well. Let's set a bunch of them at once:
layer.cmap = 'plasma'
layer.vmin = 400
layer.vmax = 1000
layer.stretch = 'sqrt'
layer.opacity = 0.9
# Note that the settings in the widgets adjusted automatically to match what you entered. Fancy!
#
# After you're done playing around, let's reset the WWT widget:
wwt.reset()
# ---
#
# ## Step 3: Loading data from remote sources
#
# Because pywwt is a Python module, not a standalone application, it gains a lot of power by being able to integrate with other components of the modern, Web-oriented astronomical software ecosystem.
#
# For instance, it is easy to use the Python module [astroquery](https://astroquery.readthedocs.io/en/latest/) to load in data directly from archive queries, without the requirement to save any files locally. Let's fetch 2MASS Ks-band images of the field of supernova 2011fe. This might take a little while since the Python kernel needs to download the data from MAST.
# +
from astroquery.skyview import SkyView
img_list = SkyView.get_images(
position='SN 2011FE',
survey='2MASS-K',
pixels=500 # you can adjust the size if you want
)
assert len(img_list) == 1 # there's only one matching item in this example
twomass_img = img_list[0]
twomass_img.info()
# -
#
# Once the FITS data are available, we can display them in pywwt using the same command as before:
twomass_layer = wwt.layers.add_image_layer(twomass_img)
# Once again you should see the view automatically center on your image. Let's adjust the background imagery to be more relevant:
wwt.background = wwt.imagery.ir.twomass
wwt.foreground_opacity = 0
# pywwt provides interactive controls to let you adjust the parameters of the contextual imagery that's being shown. Try choosing different sets of all-sky imagery and adjusting the blend between them:
wwt.layer_controls
# Here are some settings that we like:
wwt.background = wwt.imagery.visible.sdss
wwt.foreground = wwt.imagery.gamma.fermi
wwt.foreground_opacity = .5
# Now we'll load up another image of the same field that came from *Swift*, this time stored as a local file as in the previous step:
swift_layer = wwt.layers.add_image_layer(datapath('m101_swiftx.fits'))
# Create controls to adjust all of the visualization parameters. If you want to go wild, you can overlay data from four different wavelengths in this one view!
wwt.layer_controls
twomass_layer.controls
swift_layer.controls
# ---
#
# ## Next Steps
#
# To learn how to display data tables along with your imagery, start with the [NASA Exoplanet Archive](./NASA%20Exoplanet%20Archive.ipynb) tutorial
# ---
#
# ## Credits
#
# This notebook was prepared by:
#
# - <NAME>
# - <NAME>
# - <NAME>
| work/notebooks/WWT-VisualizingImagery.ipynb |
(* -*- coding: utf-8 -*-
(* --- *)
(* jupyter: *)
(* jupytext: *)
(* text_representation: *)
(* extension: .ml *)
(* format_name: light *)
(* format_version: '1.5' *)
(* jupytext_version: 1.14.4 *)
(* kernelspec: *)
(* display_name: OCaml *)
(* language: ocaml *)
(* name: iocaml *)
(* --- *)
(* + [markdown] deletable=true editable=true
(* <h1> Produits défectueux </h1> *)
(* <h2> Énoncé </h2> *)
(* *)
(* Une usine produit, grâce à trois machines $M_1$, $M_2$ et $M_3$, des pièces qui ont : *)
(* <ul> *)
(* <li> pour la machine $M_1$ un défaut $a$ dans 5% des cas ; *)
(* *)
(* <li> pour la machine $M_2$ un défaut $b$ dans 3% des cas ; *)
(* *)
(* <li> pour la machine $M_3$ un défaut $c$ dans 2% des cas. *)
(* </ul> *)
(* *)
(* Une machine $M$ fabrique un objet en assemblant une pièce provenant de $M_1$, une pièce provenant de $M_2$ et une pièce provenant de $M_3$. Elle prend au hasard des pièces dans trois stocks comprenant un grand nombre de pièces. Les différentes pièces sont tirées au hasard et indépendamment les unes des autres. *)
(* *)
(* On désigne par $X$ la variable aléatoire qui, à chaque objet prélevé au hasard dans la production de $M$, associe le nombre de ses défauts. On souhaite connaître la loi de $X$. *)
(* *)
(* Effectuer 100 000 simulations pour évaluer $X$. *)
(* *)
(* Retrouver ce résultat théoriquement. *)
(* + [markdown] deletable=true editable=true
(* <h2> Solution </h2> *)
(* + deletable=true editable=true
open Random;;
Random.self_init;;
(* + deletable=true editable=true
let taux_defauts=[|0.05;0.03;0.02|];;
let rec compte_defauts num_machine nbre_defauts =
"à compléter"
"à compléter"
(* + deletable=true editable=true
| Produits_defectueux/Produits_defectueux_OCaml_sujet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Trade Demo
#
# #### Goal:
# - Load the trade data for the country `Canada`
# - Launch a domain node for canada
# - Login into the domain node
# - Format the `Canada` trade dataset and convert to Numpy array
# - Convert the dataset to a private tensor
# - Upload `Canada's` trade on the domain node
# - Create a Data Scientist User
# +
# %load_ext autoreload
# %autoreload 2
import pandas as pd
canada = pd.read_csv("../../trade_demo/datasets/ca - feb 2021.csv")
# -
# ### Step 1: Load the dataset
#
# We have trade data for the country, which has provided data from Feb 2021. They key colums are:
#
# - Commodity Code: the official code of that type of good
# - Reporter: the country claiming the import/export value
# - Partner: the country being claimed about
# - Trade Flow: the direction of the goods being reported about (imports, exports, etc)
# - Trade Value (US$): the declared USD value of the good
#
# Let's have a quick look at the top five rows of the dataset.
canada.head()
# ### Step 2: Spin up the Domain Node (if you haven't already)
#
# SKIP THIS STEP IF YOU'VE ALREADY RUN IT!!!
#
# As the main requirement of this demo is to perform analysis on the Canada's trade dataset. So, we need to spin up a domain node for Canada.
#
# Assuming you have [Docker](https://www.docker.com/) installed and configured with >=8GB of RAM, navigate to PySyft/packages/hagrid and run the following commands in separate terminals (can be done at the same time):
#
#
# ```bash
# # install hagrid cli tool
# pip install -e .
# ```
#
# ```bash
# hagrid launch Canada domain
# ```
#
# <div class="alert alert-block alert-info">
# <b>Quick Tip:</b> Don't run this now, but later when you want to stop these nodes, you can simply run the same argument with the "stop" command. So from the PySyft/grid directory you would run. Note that these commands will delete the database by default. Add the flag "--keep_db=True" to keep the database around. Also note that simply killing the thread created by ./start is often insufficient to actually stop all nodes. Run the ./stop script instead. To stop the nodes listed above (and delete their databases) run:
#
# ```bash
# hagrid land Canada
# ```
# </div>
# ### Step 3: Login into the Domain as the Admin User
# +
import syft as sy
# Let's login into the domain node
domain_node = sy.login(email="<EMAIL>", password="<PASSWORD>", port=8081)
# + tags=[]
canada.head()
# -
canada[canada["Partner"] == "Egypt"]
# For, simplicity we will upload the first 10000 rows of the dataset.
canada = canada[:10000]
# ### Step 4: Format dataset and convert to numpy array
# +
# In order to the convert the whole dataset into an numpy array,
# We need to format string to integer values.
# +
# Let's create a function that converts string to int.
import hashlib
from math import isnan, nan
hash_db = {}
hash_db[nan] = nan
def convert_string(s: str, digits: int = 15):
"""Maps a string to a unique hash using SHA, converts it to a hash or an int"""
if type(s) is str:
new_hash = int(hashlib.sha256(s.encode("utf-8")).hexdigest(), 16) % 10 ** digits
hash_db[s] = new_hash
return new_hash
else:
return s
# +
# Let's filter out the string/object type columns
string_cols = []
for col, dtype in canada.dtypes.items():
if dtype in ['object', 'str']:
string_cols.append(col)
# Convert string values to integer
for col in canada.columns:
canada[col] = canada[col].map(lambda x: convert_string(x))
# -
# Let's checkout the formatted dataset
canada.head()
# +
# Great !!! now let's convert the whole dataset to numpy array.
np_dataset = canada.values
# Type cast to float values to prevent overflow
np_dataset = np_dataset.astype(float)
# -
# ### Step 5: Converting the dataset to private tensors
from syft.core.adp.entity import Entity
# +
# The 'Partner' column i.e the countries to which the data is exported
# is private, therefore let's create entities for each of the partner defined
entities = [Entity(name=partner) for partner in canada["Partner"]]
# +
# Let's convert the whole dataset to a private tensor
private_dataset_tensor = sy.Tensor(np_dataset).private(0.01, 1e15, entity=Entity(name="Canada")).tag("private_canada_trade_dataset")
# -
private_dataset_tensor[:, 0]
# ### Step 6: Upload Canada's trade data on the domain
# +
# Awesome, now let's upload the dataset to the domain.
# For, simplicity we will upload the first 10000 rows of the dataset.
domain_node.load_dataset(
assets={"feb2020": private_dataset_tensor},
name="Canada Trade Data - First 10000 rows",
description="""A collection of reports from Canada's statistics
bureau about how much it thinks it imports and exports from other countries.""",
)
# -
private_dataset_tensor.send(domain_node)
# Cool !!! The dataset was successfully uploaded onto the domain.
# Now, let's check datasets available on the domain.
domain_node.store.pandas
# ### Step 7: Create a Data Scientist User
#
# Open http://localhost:8081, login is the root user (username: <EMAIL>, password:<PASSWORD>), and create a user with the following attributes:
#
# - Name: <NAME>
# - Email: <EMAIL>
# - Password: <PASSWORD>
# Alternatively, you can create the same from the notebook itself.
domain_node.users.create(
**{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
},
)
domain_node.users.create(
**{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
},
)
# ```
# Great !!! We were successfully able to create a new user.
# Now, let's move to the Data Scientist notebook, to check out their experience.
# ```
# ### Step 8: Decline request to download entire datsaet
# Let's check if there are any requests pending for approval.
domain_node.requests.pandas
domain_node.requests[-1].accept()
# Looks like the DS wants to download the whole dataset. We cannot allow that.
# Let's select and deny this request.
domain_node.requests[0].deny()
# ### STOP: Return to Data Scientist - Canada.ipynb - STEP 3!!
| notebooks/Experimental/Ishan/ADP Demo/Data Owner - Canada.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="ZpU8UlEisTZt"
# !pip install autokeras -q
# + [markdown] id="_7PWbzmdsTZ3"
# ## 8.1.1 Loading image classification dataset
# + colab={"base_uri": "https://localhost:8080/"} id="VQLw4tctMD36" outputId="160aecb1-761c-4c72-cb7a-ffd4095c5000"
# !wget https://github.com/datamllab/automl-in-action-notebooks/raw/master/data/mnist.tar.gz
# !tar xzf mnist.tar.gz
# + [markdown] id="irnT7kbEH8zA"
# ```
# train/
# 0/
# 1.png
# 21.png
# ...
# 1/
# 2/
# 3/
# ...
#
# test/
# 0/
# 1/
# ...
# ```
# + id="_N3m_ublsTZ4" colab={"base_uri": "https://localhost:8080/"} outputId="3abd9bc2-edfc-438f-bf3f-a008d99a5915"
import os
import autokeras as ak
batch_size = 32
img_height = 28
img_width = 28
parent_dir = 'data'
test_data = ak.image_dataset_from_directory(
os.path.join(parent_dir, 'test'),
seed=123,
color_mode="grayscale",
image_size=(img_height, img_width),
batch_size=batch_size,
)
for images, labels in test_data.take(1):
print(images.shape, images.dtype)
print(labels.shape, labels.dtype)
# + [markdown] id="Y4k6bzo-LK2X"
# ## 8.1.2 Splitting the loaded dataset
# + colab={"base_uri": "https://localhost:8080/"} id="iQs1QVgNjAiF" outputId="05e9bc98-44df-46ed-bfc9-f0b996d9ec2a"
all_train_data = ak.image_dataset_from_directory(
os.path.join(parent_dir, 'train'),
seed=123,
color_mode="grayscale",
image_size=(img_height, img_width),
batch_size=batch_size,
)
train_data = all_train_data.take(int(60000 / batch_size * 0.8))
validation_data = all_train_data.skip(int(60000 / batch_size * 0.8))
# + colab={"base_uri": "https://localhost:8080/"} id="1IoxdxBVVS4v" outputId="b4218668-6a48-4fa9-d98e-f9187aedc0e4"
train_data = ak.image_dataset_from_directory(
os.path.join(parent_dir, 'train'),
validation_split=0.2,
subset="training",
seed=123,
color_mode="grayscale",
image_size=(img_height, img_width),
batch_size=batch_size,
)
validation_data = ak.image_dataset_from_directory(
os.path.join(parent_dir, 'train'),
validation_split=0.2,
subset="validation",
seed=123,
color_mode="grayscale",
image_size=(img_height, img_width),
batch_size=batch_size,
)
# + id="utYsWdI9yHpt"
import tensorflow as tf
train_data = train_data.prefetch(5)
validation_data = validation_data.prefetch(5)
test_data = test_data.prefetch(tf.data.AUTOTUNE)
# + [markdown] id="RWnnl5ZysTZ5"
# Then we just do one quick demo of AutoKeras to make sure the dataset works.
#
# + id="Qoadht1XsTZ5" colab={"base_uri": "https://localhost:8080/"} outputId="247ae09f-7e25-4144-bcd4-58ddcaa5fd95"
clf = ak.ImageClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=1, validation_data=validation_data)
print(clf.evaluate(test_data))
# + [markdown] id="tSUxx-N3sTZ6"
# ## 8.1.3 Loading text classification dataset
# You can also load text datasets in the same way.
#
# + colab={"base_uri": "https://localhost:8080/"} id="ryaY6xGAx2Gd" outputId="65d1fe6c-d551-4a06-927c-4af8e52f910c"
# !wget https://github.com/datamllab/automl-in-action-notebooks/raw/master/data/imdb.tar.gz
# !tar xzf imdb.tar.gz
# + [markdown] id="j_69yOoAsTZ7"
# For this dataset, the data is already split into train and test.
# We just load them separately.
#
# + id="rj4K8dcTsTZ7" colab={"base_uri": "https://localhost:8080/"} outputId="6267dc03-c625-4aec-9ecf-59598186ce37"
import os
import autokeras as ak
import tensorflow as tf
train_data = ak.text_dataset_from_directory(
"imdb/train",
validation_split=0.2,
subset="training",
seed=123,
max_length=1000,
batch_size=32,
).prefetch(1000)
validation_data = ak.text_dataset_from_directory(
"imdb/train",
validation_split=0.2,
subset="validation",
seed=123,
max_length=1000,
batch_size=32,
).prefetch(1000)
test_data = ak.text_dataset_from_directory(
"imdb/test",
max_length=1000,
).prefetch(1000)
# + colab={"base_uri": "https://localhost:8080/"} id="-tgDzl9s1wIB" outputId="5ca9e88b-1d19-48dd-c628-f909727555f2"
clf = ak.TextClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=2, validation_data=validation_data)
print(clf.evaluate(test_data))
# + [markdown] id="hwhWMAQCLnci"
# ## 8.1.4 Handling large dataset in general format
# + colab={"base_uri": "https://localhost:8080/"} id="YF4JhrBZp5Tc" outputId="e637bd94-373c-4a2d-d2f9-e0eab458b805"
data = [5, 8, 9, 3, 6]
def generator():
for i in data:
yield i
for x in generator():
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="vB4fgU0Atwa4" outputId="5ca907df-93a1-49be-e796-e06556400154"
dataset = tf.data.Dataset.from_generator(
generator,
output_types=tf.int32)
for x in dataset:
print(x.numpy())
# + colab={"base_uri": "https://localhost:8080/"} id="AATInn4xZfXg" outputId="24d7d23e-2cbe-4956-9f5b-3e99ace61d96"
import numpy as np
path = os.path.join(parent_dir, "train")
def load_data(path):
data = []
for class_label in ["pos", "neg"]:
for file_name in os.listdir(os.path.join(path, class_label)):
data.append((os.path.join(path, class_label, file_name), class_label))
data = np.array(data)
np.random.shuffle(data)
return data
def get_generator(data):
def data_generator():
for file_path, class_label in data:
text_file = open(file_path, "r")
text = text_file.read()
text_file.close()
yield text, class_label
return data_generator
all_train_np = load_data(os.path.join(parent_dir, "train"))
def np_to_dataset(data_np):
return tf.data.Dataset.from_generator(
get_generator(data_np),
output_types=tf.string,
output_shapes=tf.TensorShape([2]),
).map(lambda x: (x[0], x[1])).batch(32).prefetch(5)
train_data = np_to_dataset(all_train_np[:20000])
validation_data = np_to_dataset(all_train_np[20000:])
test_np = load_data(os.path.join(parent_dir, "test"))
test_data = np_to_dataset(test_np)
for texts, labels in train_data.take(1):
print(texts.shape)
print(labels.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="gQyp1Ohfg2iL" outputId="19a91a71-39ce-4c91-8a49-20bb7548ec02"
clf = ak.TextClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=2, validation_data=validation_data)
print(clf.evaluate(test_data))
| 8.1-Handling-large-scale-datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.display import HTML
with open('../style.css', 'r') as file:
css = file.read()
HTML(css)
# # Prinz und Tiger
# Es war einmal ein König, der seine Tochter mit einem Prinzen vermählen wollte. Er ließ im ganzen Land verkünden, dass er einen Gemahl für seine Tochter suche. Eines Tages kam nun ein Prinz vorbei, um sich zu bewerben. Da der König seine Tochter nicht mit irgendeiner Dumpfbacke vermählen wollte, führte der König den Prinzen in einen Raum mit 9 Türen. Der König teilte dem Prinzen mit, dass die Prinzessin sich in einem der Zimmer befinden würde, dass es aber andere Zimmer gäbe, hinter denen hungrige Tiger warten würden. Einige Zimmer wären auch leer. Wenn nun der Prinz eine Tür mit einem Tiger dahinter öffnen würde, so wäre dies vermutlich sein letzter Fehler.
#
# Weiter sagte der König, dass an allen Türen Schilder angebracht wären, auf denen eine Aussage steht. Mit diesen Aussagen verhält es sich wie folgt:
# <ul>
# <li>In den Zimmern, wo ein Tiger drin ist, ist die Aussage, die auf dem Schild steht, falsch. </li>
# <li>In dem Zimmer, in dem sich die Prinzessin befinde, ist die Aussage richtig. </li>
# <li>Bei den leeren Zimmer ist die Sachlage etwas komplizierter, denn hier gibt es zwei Möglichkeiten:
# <ol>
# <li>Entweder sind <b>alle</b> Aufschriften an leeren Zimmern wahr,</li>
# <li>oder <b>alle</b> Aufschriften an leeren Zimmern sind falsch. </li>
# </ol>
# </ul>
# Daraufhin laß der Prinz die Aufschriften. Diese waren wie folgt:
# <ol>
# <li> Zimmer: Die Prinzessin ist in einem Zimmer mit ungerader Zimmernummer.
# In den Zimmern mit gerader Nummer ist kein Tiger.</li>
# <li> Zimmer: Dieses Zimmer ist leer.</li>
# <li> Zimmer: Die Aufschrift an Zimmer Nr. 5 ist wahr, die Aufschrift an Zimmer Nr. 7
# ist falsch und in Zimmer Nr. 3 ist ein Tiger. </li>
# <li> Zimmer: Die Aufschrift an Zimmer Nr. 1 ist falsch, in Zimmer Nr. 8 ist kein Tiger,
# und die Aufschrift an Zimmer Nr. 9 ist wahr.</li>
# <li> Zimmer: Wenn die Aufschrift an Zimmer Nr. 2 oder an Zimmer Nr. 4 wahr ist,
# dann ist kein Tiger im Zimmer Nr. 1.</li>
# <li> Zimmer: Die Aufschrift an Zimmer Nr. 3 ist falsch, die Prinzessin ist im Zimmer Nr. 2
# und im Zimmer Nr. 2 ist kein Tiger.</li>
# <li> Zimmer: Die Prinzessin ist im Zimmer Nr. 1 und die Aufschrift an Zimmer Nr. 5 ist wahr.</li>
#
# <li> Zimmer: In diesem Zimmer ist kein Tiger und Zimmer Nr. 9 ist leer.</li>
#
# <li> Zimmer: Weder in diesem Zimmer noch in Zimmer Nr. 1 ist ein Tiger und außerdem ist
# die Aufschrift an Zimmer Nr. 6 wahr.</li>
# </ol>
#
# <b>Hinweis:</b> Die Aufgabe wird einfach, wenn Sie die richtigen aussagenlogischen Variablen verwenden, um die Aussagen des Königs und die Aufschriften an den Zimmern zu kodieren. In meiner Lösung habe ich die folgende Variablen verwendet:
# <ol>
# <li> $\texttt{Prinzessin<}i\texttt{>}$ ist genau dann wahr, wenn die Prinzessin im $i$-ten Zimmer ist. Der Index $i$ ist dabei ein
# Element der Menge $\{1,\cdots,9\}$.</li>
# <li> $\texttt{Tiger<}i\texttt{>}$ ist genau dann wahr, wenn im $i$-ten Zimmer ein Tiger ist.</li>
# <li> $\texttt{Zimmer<}i\texttt{>}$ ist genau dann wahr, wenn die Aufschrift im $i$-ten Zimmer wahr ist.</li>
# <li> $\texttt{empty}$ ist genau dann wahr, wenn <b>alle</b> Aufschriften an leeren Zimmern wahr sind.</li>
# </ol>
# ## Setting up Required Modules
# We will use the parser for propositional logic which is implemented in the module <tt>propLogParser</tt>.
import propLogParser as plp
# We will also need a function that turns a formula given as a nested tuple into *conjunctive normal form*. Therefore we import the module <tt>cnf</tt>.
import cnf
# The function $\texttt{parseAndNormalize}(s)$ takes a string $s$ that represents a formula from propositional logic, parses this string as a propositional formula and then turns this formula into a set of clauses. We have used this function already in the previous exercise sheet.
def parseKNF(s):
nestedTuple = plp.LogicParser(s).parse()
Clauses = cnf.normalize(nestedTuple)
return Clauses
parseKNF('(p ∧ ¬q → r) ↔ ¬r ∨ p ∨ q')
# Finally, we use the Davis-Putnam algorithm to find a solution for a given set of clauses. This algorithm is provided by the module <tt>davisPutnam</tt>.
import davisPutnam as dp
# ## Auxilliary Functions
# The functions defined below make it convenient to create the propositional variables $\texttt{Prinzessin<}i\texttt{>}$, $\texttt{Tiger<}i\texttt{>}$, and $\texttt{Zimmer<}i\texttt{>}$ for $i \in \{1,\cdots,n\}$.
def P(i):
"Return the string 'P<i>'"
return f'Prinzessin<{i}>'
P(1)
parseKNF(f'{P(1)} ∨ {P(2)}')
def T(i):
"Return the string 'T<i>'"
return f'Tiger<{i}>'
T(2)
def Z(i):
"Return the string 'Z<i>'"
return f'Zimmer<{i}>'
Z(3)
# Given a set of propositional variables $S$, the function $\texttt{atMostOne}(S)$ computes a set of clauses expressing the fact that at most one of the variables of $S$ is <tt>True</tt>.
def atMostOne(S):
return { frozenset({('¬',p), ('¬', q)}) for p in S
for q in S
if p != q
}
s = parseKNF(f'{Z(1)} → ({P(1)} ∨ {P(3)} ∨ {P(5)} ∨ {P(7)} ∨ {P(9)}) ∧ ¬{T(2)} ∧ ¬{T(4)} ∧ ¬{T(6)} ∧ ¬{T(8)}')
# ## Generating the set of Clauses describing the Problem
# ##### Below, you might need the following symbols: ¬ ∧ ∨ → ↔
# The function $\texttt{computeClauses}$ computes the set of clauses that encode the given problem.
def computeClauses():
# The princess has to be somewhere, i.e. there is a room containing the princess.
Clauses = { frozenset({ P(x) for x in range(1, 10) }) }
# There is just one princess.
Clauses |= atMostOne({ P(x) for x in range(1, 10) })
for i in range(1, 9+1):
# In the room containing the princess, the label at the door is true.
Clauses |= parseKNF(f'{P(i)} → {Z(i)}')
# In thoses rooms where there are tigers, the label is false.
Clauses |= parseKNF(f'{T(i)} → ¬{Z(i)}')
# Either all labels of empty rooms are true or all those labels are false.
Clauses |= parseKNF(f'¬{P(i)}∧¬{T(i)}→({Z(i)}↔empty)')
# Room Nr.1: The princess is in a room with an odd room number.
# The rooms with even numbers do not have tigers.
#s = f'{Z(1)} → {P(1)} ∨ {P(3)} ∨ {P(5)} ∨ {P(7)} ∨ {P(9)}) ∧ ¬{T(2)} ∧ ¬{T(4)} ∧ ¬{T(6)} ∧ ¬{T(8)}'
Clauses |= parseKNF(f'{Z(1)} ↔ ({P(1)} ∨ {P(3)} ∨ {P(5)} ∨ {P(7)} ∨ {P(9)}) ∧ ¬{T(2)} ∧ ¬{T(4)} ∧ ¬{T(6)} ∧ ¬{T(8)}')
# Room Nr.2: This room is empty.
Clauses |= parseKNF(f'{Z(2)} ↔ ¬{T(2)} ∧ ¬{P(2)}')
# Room Nr.3: The label at room number 5 is true, the label at room number 7 is false
# and there is a tiger in room number 3
Clauses |= parseKNF(f'{Z(3)} ↔ ¬{Z(7)} ∧ {Z(5)} ∧ {T(3)}')
# Room Nr.4: The label at room number 1 is false, there is no tiger in room number 8
# and the label at room number 9 is true.
Clauses |= parseKNF(f'{Z(4)} ↔ ¬{Z(1)} ∧ ¬{T(8)} ∧ {Z(9)}')
# Room Nr.5: If the label at room number 2 or room number 4 is true,
# then there is no tiger in room number 1.
Clauses |= parseKNF(f'{Z(5)} ↔ (({Z(2)} ∨ {Z(4)}) → ¬{T(1)})')
# Room Nr.6: The label on room number 3 is false, the princess is in room number 2
# and there is no tiger in room number 2.
Clauses |= parseKNF(f'{Z(6)} ↔ ¬{Z(3)} ∧ {P(2)} ∧ ¬{T(2)}')
# Room Nr.7: The princess is in room number 1 and the label of room number 5 is true.
Clauses |= parseKNF(f'{Z(7)} ↔ {P(1)} ∧ {Z(5)}')
# Room Nr.8: There is no tiger in this room and room number 9 is empty.
Clauses |= parseKNF(f'{Z(8)} ↔ ¬{T(8)} ∧ ¬{T(9)} ∧ ¬{P(9)}')
# Room Nr.9: Neither this room nor room number 1 has a tiger and
# the label of room number 6 is true.
Clauses |= parseKNF(f'{Z(9)} ↔ ¬{T(9)} ∧ ¬{T(1)} ∧ {Z(6)}')
return Clauses
Clauses = computeClauses()
Clauses
s = f'{Z(1)} → ({P(1)} ∨ {P(3)} ∨ {P(5)} ∨ {P(7)} ∨ {P(9)}) ∧ ¬{T(2)} ∧ ¬{T(4)} ∧ ¬{T(6)} ∧ ¬{T(8)})'
s
# There are 110 clauses.
len(Clauses)
# Finally, we call the function <tt>solve</tt> from the module <tt>davisPutnam</tt> to solve the problem.
solution = dp.solve(Clauses, set())
# The function $\texttt{getSolution}(S)$ takes a set of unit clauses representing the solution of the problem and returns the room where the princess is located.
def getSolution(S):
"Print only the positive literals from the set S."
for Unit in S:
for l in Unit:
if isinstance(l, str) and l[:10] == 'Prinzessin':
return l
# We print the solution.
princess = getSolution(solution)
print(f'Die Prinzessin ist im Zimmer Nummer {princess[11]}.')
# Finally, we check whether the solution is unique. If the solution is not unique, then you have missed to code some of the requirements.
def checkUniqueness(Clauses, princess):
Clauses.add(frozenset({('¬', princess)}))
alternative = dp.solve(Clauses, set())
if alternative == { frozenset() }:
print('The solution is unique.')
else:
print('ERROR: The solution is not unique.')
checkUniqueness(Clauses, princess)
| Python/Exercises/Blatt-09-Gruppenloesung.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import syft as sy
import numpy as np
from syft.core.adp.entity import DataSubject
# Instantiate Domain Object & DataSubject
ishan = DataSubject(name="Ishan")
uk = sy.login(email="<EMAIL>", password="<PASSWORD>", port=8081)
uk.store.pandas
data = np.random.rand(1, 10)
private_tensor = sy.Tensor(data).private(0, 10, entity=ishan).tag("private")
print(private_tensor)
priv_ptr = private_tensor.send(uk)
uk.store.pandas
private_autograd_tensor = priv_autograd_ptr.get()
private_autograd_tensor
y = private_tensor + private_tensor
y
new_acc = sy.core.adp.adversarial_accountant.AdversarialAccountant()
y.publish(acc=new_acc, sigma=0.1)
uk.store.pandas
public_tensor = sy.Tensor(np.random.randn(1, 3) * 10).tag("Fresh Public Data")
public_tensor
ag_ptr = uk.store['autograd']
ag_tensor = ag_ptr.get()
ag_tensor
ag_tensor + public_tensor
ag_tensor * public_tensor
y = sy.core.adp.publish.publish(private_tensor + private_tensor, acc=new_acc, sigma=0.1)
uk.domain.id
| notebooks/Experimental/Ishan/ADP Demo/Debugging/Debug_Publish.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Q#
# language: qsharp
# name: iqsharp
# ---
# # Phase Estimation
#
# The **"Phase Estimation"** quantum kata is a series of exercises designed
# to teach you the basics of using phase estimation algorithms.
#
# It covers the following topics:
# * quantum phase estimation,
# * iterative phase estimation,
# * preparing necessary inputs to phase estimation routines and applying them.
#
# Each task is wrapped in one operation preceded by the description of the task.
# Your goal is to fill in the blank (marked with the `// ...` comments)
# with some Q# code that solves the task. To verify your answer, run the cell using Ctrl+Enter (⌘+Enter on macOS).
#
# Within each section, tasks are given in approximate order of increasing difficulty;
# harder ones are marked with asterisks.
# ## Part I. Quantum Phase Estimation (QPE)
# ### Task 1.1. Inputs to QPE: eigenstates of Z/S/T gates.
#
# **Inputs:**
#
# 1. A qubit in the $|0\rangle$ state.
#
# 2. An integer `state` indicating which eigenstate to prepare.
#
# **Goal:**
#
# Prepare one of the eigenstates of Z gate (which are the same as eigenstates of S or T gates):
# eigenstate $|0\rangle$ if `state = 0`, or eigenstate $|1\rangle$ if `state = 1`.
# +
%kata T11_Eigenstates_ZST
operation Eigenstates_ZST (q : Qubit, state : Int) : Unit is Adj {
if state == 1 {
X(q);
}
}
# -
# ### Task 1.2. Inputs to QPE: powers of Z/S/T gates.
#
# **Inputs:**
#
# 1. A single-qubit unitary U.
#
# 2. A positive integer `power`.
#
# **Output:**
#
# A single-qubit unitary equal to U raised to the given power.
#
# <br/>
# <details>
# <summary><b>Need a hint? Click here</b></summary>
# Remember that you can define auxiliary operations. To do that, you'll need to create an extra code cell for each new operation and execute it before returning to this cell.
# </details>
operation UnitaryPower_Impl(U : (Qubit => Unit is Adj + Ctl), power : Int, q : Qubit) : Unit is Adj + Ctl {
for i in 1 .. power {
U(q);
}
}
# +
%kata T12_UnitaryPower
function UnitaryPower (U : (Qubit => Unit is Adj + Ctl), power : Int) : (Qubit => Unit is Adj + Ctl) {
return UnitaryPower_Impl(U, power, _);
}
# -
# ### Task 1.3. Validate inputs to QPE.
#
# <span style="color:red"><b>This task is temporarily not available in Notebook format; please use Q# project version of the PhaseEstimation kata to complete it.</b></span>
#
# **Inputs:**
#
# 1. A single-qubit unitary U.
#
# 2. A single-qubit state $|\psi\rangle$ represented by a unitary P such that $|\psi\rangle = P|0\rangle$
# (i.e., applying the unitary P to state $|0\rangle$ prepares state $|\psi\rangle$).
#
# **Goal:**
#
# Assert that the given state is an eigenstate of the given unitary,
# i.e., do nothing if it is, and throw an exception if it is not.
# ### Task 1.4. QPE for single-qubit unitaries.
#
# **Inputs:**
#
# 1. A single-qubit unitary U.
#
# 2. A single-qubit state $|\psi\rangle$ represented by a unitary P such that $|\psi\rangle = P|0\rangle$
# (i.e., applying the unitary P to state $|0\rangle$ prepares state $|\psi\rangle$).
#
# 3. An integer `n`.
#
# **Output:**
#
# The phase of the eigenvalue that corresponds to the eigenstate $|\psi\rangle$, with `n` bits of precision.
# The phase should be between 0.0 and 1.0.
operation Oracle_Reference (U : (Qubit => Unit is Adj + Ctl), power : Int, target : Qubit[]) : Unit is Adj + Ctl{
for _ in 1 .. power {
U(target[0]);
}
}
# +
%kata T14_QPE
open Microsoft.Quantum.Arithmetic;
open Microsoft.Quantum.Characterization;
open Microsoft.Quantum.Convert;
open Microsoft.Quantum.Oracles;
operation QPE (U : (Qubit => Unit is Adj + Ctl), P : (Qubit => Unit is Adj), n : Int) : Double {
// Construct a phase estimation oracle from the unitary
let oracle = DiscreteOracle(Oracle_Reference(U, _, _));
// Allocate qubits to hold the eigenstate of U and the phase in a big endian register
use (eigenstate, phaseRegister) = (Qubit[1], Qubit[n]);
let phaseRegisterBE = BigEndian(phaseRegister);
// Prepare the eigenstate of U
P(eigenstate[0]);
// Call library
QuantumPhaseEstimation(oracle, eigenstate, phaseRegisterBE);
// Read out the phase
let phase = IntAsDouble(MeasureInteger(BigEndianAsLittleEndian(phaseRegisterBE))) / IntAsDouble(1 <<< n);
ResetAll(eigenstate);
return phase;
}
# -
# ### Task 1.5. Test your QPE implementation.
#
# **Goal:**
# Use your QPE implementation from task 1.4 to run quantum phase estimation
# on several simple unitaries and their eigenstates.
# This task is not covered by a test and allows you to experiment with running the algorithm.
#
# > This is an open-ended task, and is not covered by a unit test. To run the code, execute the cell with the definition of the `Run_QPE` operation first; if it compiled successfully without any errors, you can run the operation by executing the next cell (`%simulate Run_QPE`).
# +
operation Run_QPE () : Unit {
}
# -
%simulate Run_QPE
# ## Part II. Iterative Phase Estimation
#
# Unlike quantum phase estimation, which is a single algorithm,
# iterative phase estimation is a whole class of algorithms based on the same idea:
# treating phase estimation as a classical algorithm which learns the phase via a sequence of measurements
# (the measurement performed on each iteration can depend on the outcomes of previous iterations).
#
# A typical circuit for one iteration has the following structure:
#
# 
#
# ($\psi$ is the procedure to prepare the eigenstate $|\psi\rangle$, R is a rotation gate, and M is a power of the unitary U;
# both depend on the current information about the phase).
#
# The result of the measurement performed on the top qubit defines the next iteration.
# ### Task 2.1. Single-bit phase estimation.
#
# **Inputs:**
#
# 1. A single-qubit unitary U that is guaranteed to have an eigenvalue $+1$ or $-1$
# (with eigenphases $0.0$ or $0.5$, respectively).
#
# 2. A single-qubit state $|\psi\rangle$ represented by a unitary P such that $|\psi\rangle = P|0\rangle$
# (i.e., applying the unitary P to state $|0\rangle$ prepares state $|\psi\rangle$).
#
# **Output:**
#
# The eigenvalue which corresponds to the eigenstate $|\psi\rangle$ ($+1$ or $-1$).
#
# You are allowed to allocate exactly two qubits and call `Controlled U` exactly once.
#
# > It is possible to use the QPE implementation from task 1.4 to solve this task,
# but we suggest you implement the circuit by hand for the sake of learning.
# +
%kata T21_SingleBitPE
operation SingleBitPE (U : (Qubit => Unit is Adj + Ctl), P : (Qubit => Unit is Adj)) : Int {
// ...
}
# -
# ### Task 2.2. Two bit phase estimation.
#
# **Inputs:**
#
# 1. A single-qubit unitary U that is guaranteed to have an eigenvalue $+1$, $i$, $-1$ or $-i$
# (with eigenphases $0.0$, $0.25$, $0.5$ or $0.75$, respectively).
#
# 2. A single-qubit state $|\psi\rangle$ represented by a unitary P such that $|\psi\rangle = P|0\rangle$
# (i.e., applying the unitary P to state $|0\rangle$ prepares state $|\psi\rangle$).
#
# **Output:**
#
# The eigenphase which corresponds to the eigenstate $|\psi\rangle$ ($0.0$, $0.25$, $0.5$ or $0.75$).
# The returned value has to be accurate within the absolute error of 0.001.
#
# You are allowed to allocate exactly two qubits and call `Controlled U` multiple times.
#
# <br/>
# <details>
# <summary><b>Need a hint? Click here</b></summary>
# Start by applying the same circuit as in task 2.1.
# What are the possible outcomes for each eigenvalue?
# What eigenvalues you can and can not distinguish using this circuit?
# </details>
#
# <br/>
# <details>
# <summary><b>Need another hint? Click here</b></summary>
# What eigenvalues you can and can not distinguish using this circuit?
# What circuit you can apply to distinguish them?
# </details>
# +
%kata T22_TwoBitPE
operation TwoBitPE (U : (Qubit => Unit is Adj + Ctl), P : (Qubit => Unit is Adj)) : Double {
// ...
return -1.0;
}
# -
# To be continued...
| PhaseEstimation/PhaseEstimation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="MhoQ0WE77laV"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" id="_ckMIh7O7s6D"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="jYysdyb-CaWM"
# # Egitim donguleri ile tf.distribute.Strategy
# + [markdown] id="S5Uhzt6vVIB2"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/tr/r1/tutorials/distribute/training_loops.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/tr/r1/tutorials/distribute/training_loops.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] id="-adnPCDYkI8e"
# Note: Bu dökümanlar TensorFlow gönüllü kullanıcıları tarafından çevirilmiştir.
# Topluluk tarafından sağlananan çeviriler gönüllülerin ellerinden geldiğince
# güncellendiği için [Resmi İngilizce dökümanlar](https://www.tensorflow.org/?hl=en)
# ile bire bir aynı olmasını garantileyemeyiz. Eğer bu tercümeleri iyileştirmek
# için önerileriniz var ise lütfen [tensorflow/docs](https://github.com/tensorflow/docs)
# havuzuna pull request gönderin. Gönüllü olarak çevirilere katkıda bulunmak için
# [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-tr)
# listesi ile iletişime geçebilirsiniz.
# + [markdown] id="FbVhjPpzn6BM"
# Bu rehber egitim donguleri ile [`tf.distribute.Strategy`](https://www.tensorflow.org/r1/guide/distribute_strategy)'nin nasil kullanildigini gosteriyor. Basit bir CNN modelini Fashion MNIST veri seti ile egitecegiz. Bu veri seti icinde 28X28 boyutunda 60000 egitim resmini ve 28X28 boyutunda 10000 test resmini barindirir.
#
# Burada bize esneklik ve daha cok kontrol kazandirmasi icin ozellestirilmis egitim donguleri kullanacagiz. Ustelik, bu ozel donguler modeli ve egitim dongulerindeki hatalari ayiklamamizi da kolaylastiracaktir.
# + id="dzLKpmZICaWN"
# TensorFlow'u yukleyelim
import tensorflow.compat.v1 as tf
# Yardimci kutuphaneler
import numpy as np
import os
print(tf.__version__)
# + [markdown] id="MM6W__qraV55"
# ## Fashion MNIST veri setini indirelim
# + id="7MqDQO0KCaWS"
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Diziye yeni bir boyut ekleyelim-> new shape == (28, 28, 1)
# Bunu yapmamizin sebebi ise modelimizin ilk katmaninin katlamali olmasi
# ve 4D bir girdiye ihtiyac duyar (batch_size, height, width, channels).
# batch_size boyutunu daha sonra ekleyecegiz.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Resimleri [0, 1] araligina indirgeyelim.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
# + [markdown] id="4AXoHhrsbdF3"
# ## Degiskenleri ve grafigi dagitmak icin bir taktik olusturalim
# + [markdown] id="5mVuLZhbem8d"
# `tf.distribute.MirroredStrategy` nasil calisir?
#
# * Butun degiskenler ve model grafigi birkac kere kopyalanir.
# * Girdi bu kopyalara esit olarak dagitilir.
# * Her kopya verilen girdiye gore bir kayip ve degisim tablosu hesaplar.
# * Butun degisim verileri toplanir ve kopyalardaki degerler bu toplama gore guncellenir.
# * Bu islemden sonra, ayni guncelleme degiskenlerin kopyalarina da uygulanir.
#
# Note: Butun kodu tek bir kapsam icine koyabilirsiniz, fakat biz burada daha aciklayici olmasi icin kodu boluyoruz.
#
# + id="F2VeZUWUj5S4"
# Eger kullanilacak cihazlar `tf.distribute.MirroredStrategy` yapicisinda belirtilmediyse
# otomatik olarak bulunacaktir.
strategy = tf.distribute.MirroredStrategy()
# + id="ZngeM_2o0_JO"
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
# + [markdown] id="k53F5I_IiGyI"
# ## Girdi hattinin kurulmasi
# + [markdown] id="0Qb6nDgxiN_n"
# Eger bir model birden fazla GPU'da egitiliyorsa, grup boyutu buna orantili olarak arttirilmalidir ki fazla bilgisayar gucunu verimli bir sekilde kullanabilelim. Ayrica, egitim hizi da orantili olarak ayarlanmaidir.
# + id="jwJtsCQhHK-E"
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
# + [markdown] id="J7fj3GskHC8g"
# `strategy.make_dataset_iterator`, veriyi kopyalara esit olarak dagitan bir iterator olusturur.
#
#
# Note: Bu API yakin zamanda degisecektir.
# + id="WYrMNNDhAvVl"
with strategy.scope():
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = strategy.make_dataset_iterator(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_iterator = strategy.make_dataset_iterator(test_dataset)
# + [markdown] id="bAXAo_wWbWSb"
# ## Modelin olusturulmasi
#
# `tf.keras.Sequential` ile modelimizi olusturalim. Model Subclassing API'yini da kullanarak bu islemi yapabiliriz.
# + id="9ODch-OFCaW4"
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
# + [markdown] id="e-wlFFZbP33n"
# ## Kayip fonksiyonunu tanimlayalim
#
# Normalde, eger 1 GPU/CPU'lu bir makine kullaniyorsak, kayip girdi grubundaki ornek sayisina bolunur.
#
# *Peki `tf.distribute.Strategy` ile kayip nasil hesaplanir?*
#
# > Ornegin, 4 GPU'muz ve boyutu 64 olan girdimiz oldugunu varsayalim. Bu girdiler esit olarak 4 GPU (4 kopya) ustune bolunur, yani her kopyaya giden girdi grub boyutu 16 idir.
#
# > Her kopyadaki model icindeki girdinin ustunden gecerek kayip degerini hesaplar. Simdi, bu kayip degerini icindeki girdi sayisina (16) bolmek yerine, en bastaki evrensel girdi miktarina (64) boler.
#
# *Neden bu islem boyle yaplir?*
#
# > Cunku degisim degerleri her kopyada hesaplandiktan sonra, butun kopyalardaki degerler butun degisim degerlerinin toplamina esitlenir.
#
# *Bunu TensorFlow'da nasil yapabiliriz?*
#
# Eger ozellestirilmis bir egitim dongusu yaziyorsaniz, her ornekteki kayiplari toplayip butun orneklerin toplamina bolmelisiniz:
#
# ```
# GLOBAL_BATCH_SIZE:`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)`
# ```
#
# * `tf.reduce_mean` metodunu kullanmanizi tavsiye etmiyoruz. Bu metod kayip degerini kopyalardaki ornek sayisina boler ki bu her adimda degisebilir.
#
# * Bu indirgeme ve olcekleme keras'ta otomatok olarak yapilir: model.fit ve model.compile ile
# * Eger `tf.keras.losses` siniflarini kullaniyorsaniz, kayip indirgemesinin ozellikle `NONE` ya da `SUM` olarak belirtilmesi gerekmektedir. `AUTO` ve `SUM_OVER_BATCH_SIZE` ise `tf.distribute.Strategy` ile birlikte kullanilamaz. Cunku kullanicilarin `AUTO` kullanmadan once yaptiklari indirgemenin o anki dagitim ornegindeki dogrulugundan emin olmalari gerekir. `SUM_OVER_BATCH_SIZE` kullanilamaz cunku su anki haliyle sadece kopyadaki ornek sayisina bolum yapip asil toplam ornek sayisina bolme islemini kullaniciya birakir, ki bu cok kolay gozden kacabilecek bir noktadir. Onun yerine kullanicinin indirgemeyi kendilerinin yapmalarini istiyoruz.
#
# + [markdown] id="iuKuNXPORfqJ"
# ## Egitim dongusu
# + id="47BLVkRkVQDO"
with strategy.scope():
def train_step():
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run(
step_fn, train_iterator)
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
# + id="7x7s5iYAYSGD"
with strategy.scope():
iterator_init = train_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step()
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
# + [markdown] id="6hEJNsokjOKs"
# ## Sirada ne var?
#
# Simdi `tf.distribute.Strategy` API'yini kendi modellerinizde deneyin.
| site/tr/r1/tutorials/distribute/training_loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: python
# name: pyspark
# ---
# 
# +
import twitter
import nltk
import time
from pymongo import MongoClient
__version__ = '1.0'
__all__ = []
__author__ = '<NAME> - <EMAIL>'
# +
def prepare_tweet(tweet_json):
tweet_dic = dict(tweet_json)
user_id = tweet_dic["user"]["id"]
tweet_dic["user"] =str(user_id)
return tweet_dic
def insert_timeline_into_mongo(twitter_user_id, api, MONGO_URL):
mongo_client = MongoClient(MONGO_URL )
users_collection = mongo_client.twitter.twitterUsers
tweets_collection = mongo_client.twitter.tweets
max_id = None # since_id parameter to the greatest ID of all the Tweets your application has already processed.
count = 200 # We retrieve 200 tweets at a time
current_count = 0
max_count = 1500
timeline_json = []
# Spark output isn't idempotent so, although the insert occurs in a transaction,
# it's possible for it to succeed in both tasks before one can be cancelled.
if users_collection.find({"user.id": twitter_user_id}).count() == 0:
users_collection.insert_one({"id": twitter_user_id})
while current_count <= max_count:
# We retrieve the first chunk of tweets
timeline_chunk = api.GetUserTimeline(twitter_user_id, max_id=max_id, count=count)
if len(timeline_chunk) == 1 :
current_count = max_count + 1
else:
max_id = timeline_chunk[-1].id
timeline_json.extend(timeline_chunk)
# We insert the tweets into the collection
tweets_collection.insert_many([ prepare_tweet(timeline_chunk[i]._json) for i in range(len(timeline_chunk))])
current_count += len(timeline_chunk)
time.sleep(1)
# We insert our user to the user collection
users_collection.update_one({'id': twitter_user_id}, {"$set": dict(timeline_json[0].user._json)}, upsert=False)
# we close our mongo connection
mongo_client.close()
return "OK"
# -
def process_user(twitter_user_id, api, MONGO_URL):
tweets_inserted_status = insert_timeline_into_mongo(twitter_user_id, api, MONGO_URL)
return (twitter_user_id , tweets_inserted_status)
# # Main Program
#
# We will now set all the paramters required to access twitter and the MongoDb database.
# +
# Twitter key and secret for OAuth
consumer_key = "XXX"
consumer_secret = "YYY"
access_token = "AAA"
access_token_secret = "BBB"
api = twitter.Api(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token,
access_token_secret=access_token_secret)
# The users chosen are
user_ids = ["25073877", "813286", "1339835893", "52544275", "409486555", "759251", "3235334092"]
# Address of the mongo cluster
MONGO_URL = "mongodb://"
# -
# We retieve the timelines for the specified users and print out "OK" when the task is completed by the worker.
users_ids_rdd = sc.parallelize(user_ids)
insertion = users_ids_rdd.map(lambda user_id : process_user(user_id, api, MONGO_URL))
insertion.collect()
# # Natural Language Processing
#
# ### We do a small language processing on the tweets and we insert them back into a new collection.
# If you are interested in discovering further the nltk library : http://www.nltk.org/
def process_tweets_for_user(twitter_user_id, MONGO_URL):
mongo_client = MongoClient(MONGO_URL )
tweets_collection = mongo_client.twitter.tweets
tweets_processed = mongo_client.twitter.processedTweets
for tweet in tweets_collection.find({"user": twitter_user_id}):
text = tweet["text"]
tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(tokens)
doc = {"text": text,
"tokens": tokens,
"tagged": tagged
}
tweets_processed.insert_one(doc)
return "Processed"
process_status = users_ids_rdd.map(lambda user_id : process_tweets_for_user(user_id, MONGO_URL))
process_status.collect()
# # Exercises
#
# **The reference documentation for pymongo is available at that address:** https://api.mongodb.com/python/current/
#
# Queries:
# * Count the number of tweets and users
# * Print out the name of all the users inserted
# * Find the most retweeted tweet
# * Find the shortest tweet
# * Count all the words used the tweets and find the top 5 most used
#
#
# +
mongo_client = MongoClient(MONGO_URL)
# Count the number of tweets
mongo_client.twitter.tweets.count()
# Count the number of users
mongo_client.twitter.twitterUsers.count()
# +
# Print out the name of all the users inserted
cursor = mongo_client.twitter.twitterUsers.find({},{"name": 1})
for document in cursor:
print document["name"]
# +
from pymongo import DESCENDING
# Find the most retweeted tweet
most_retweeted = mongo_client.twitter.tweets.find().sort("retweet_count", DESCENDING).limit(1)
for t in most_retweeted:
print t["retweet_count"]
print t
# +
# Find the shortest tweet
def text_length(tweet_id, MONGO_URL):
mongo_client = MongoClient(MONGO_URL)
tweet = mongo_client.twitter.tweets.find_one({"_id": tweet_id["_id"]})
return (tweet["text"], len(tweet["text"]))
def compare_length(tweet1, tweet2):
if(tweet1[1]<tweet2[1]):
return tweet1
else:
return tweet2
tweets_ids = sc.parallelize(list(mongo_client.twitter.tweets.find({},{"_id": 1})))
shortest_tweet = tweets_ids.map(lambda tweet_id : text_length(tweet_id, MONGO_URL)).reduce(lambda t1,t2: compare_length(t1,t2))
print "The shortest tweet is " + str(shortest_tweet[0]) + "\nAnd the length is " + str(shortest_tweet[1])
# +
# Count all the words used the tweets and find the top 5 most used
from bson.code import Code
mapper = Code("""
function () {
this.tokens.forEach(function(z) {
emit(z, 1);
});
}
""")
reducer = Code("""
function (key, values) {
var total = 0;
for (var i = 0; i < values.length; i++) {
total += values[i];
}
return total;
}
""")
result = mongo_client.twitter.processedTweets.map_reduce(mapper, reducer, "myresults")
for doc in result.find().sort("value", DESCENDING).limit(5):
print doc
| Twitter Mongo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # A Quick Introduction to Data Analysis and Manipulation with Python and pandas
#
#
# ## What is pandas?
#
# If you're getting into machine learning and data science and you're using Python, you're going to use pandas.
#
# [pandas](https://pandas.pydata.org/) is an open source library which helps you analyse and manipulate data.
#
# <img src="../images/pandas-6-step-ml-framework-tools-highlight.png" alt="a 6 step machine learning framework along will tools you can use for each step" width="700"/>
#
# ## Why pandas?
#
# pandas provides a simple to use but very capable set of functions you can use to on your data.
#
# It's integrated with many other data science and machine learning tools which use Python so having an understanding of it will be helpful throughout your journey.
#
# One of the main use cases you'll come across is using pandas to transform your data in a way which makes it usable with machine learning algorithms.
#
# ## What does this notebook cover?
#
# Because the pandas library is vast, there's often many ways to do the same thing. This notebook covers some of the most fundamental functions of the library, which are more than enough to get started.
#
# ## Where can I get help?
# If you get stuck or think of something you'd like to do which this notebook doesn't cover, don't fear!
#
# The recommended steps you take are:
# 1. **Try it** - Since pandas is very friendly, your first step should be to use what you know and try figure out the answer to your own question (getting it wrong is part of the process). If in doubt, run your code.
# 2. **Search for it** - If trying it on your own doesn't work, since someone else has probably tried to do something similar, try searching for your problem. You'll likely end up in 1 of 2 places:
# * [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/) - the best place for learning pandas, this resource covers all of the pandas functionality.
# * [Stack Overflow](https://stackoverflow.com/) - this is the developers Q&A hub, it's full of questions and answers of different problems across a wide range of software development topics and chances are, there's one related to your problem.
#
# An example of searching for a pandas function might be:
#
# > "how to fill all the missing values of two columns using pandas"
#
# Searching this on Google leads to this post on Stack Overflow: https://stackoverflow.com/questions/36556256/how-do-i-fill-na-values-in-multiple-columns-in-pandas
#
# The next steps here are to read through the post and see if it relates to your problem. If it does, great, take the code/information you need and **rewrite it** to suit your own problem.
#
# 3. **Ask for help** - If you've been through the above 2 steps and you're still stuck, you might want to ask your question on Stack Overflow. Remember to be specific as possible and provide details on what you've tried.
#
# Remember, you don't have to learn all of these functions off by heart to begin with.
#
# What's most important is remembering to continually ask yourself, "what am I trying to do with the data?".
#
# Start by answering that question and then practicing finding the code which does it.
#
# Let's get started.
# ## 0. Importing pandas
#
# To get started using pandas, the first step is to import it.
#
# The most common way (and method you should use) is to import pandas as the abbreviation `pd`.
#
# If you see the letters `pd` used anywhere in machine learning or data science, it's probably referring to the pandas library.
import pandas as pd
# ## 1. Datatypes
#
# pandas has two main datatypes, `Series` and `DataFrame`.
# * `Series` - a 1-dimensional column of data.
# * `DataFrame` (most common) - a 2-dimesional table of data with rows and columns.
#
# You can create a `Series` using `pd.Series()` and passing it a Python list.
# Creating a series of car types
cars = pd.Series(["BMW", "Toyota", "Honda"])
cars
# Creating a series of colours
colours = pd.Series(["Blue", "Red", "White"])
colours
# You can create a `DataFrame` by using `pd.DataFrame()` and passing it a Python dictionary.
#
# Let's use our two `Series` as the values.
# Creating a DataFrame of cars and colours
car_data = pd.DataFrame({"Car type": cars,
"Colour": colours})
car_data
# You can see the keys of the dictionary became the column headings (text in bold) and the values of the two `Series`'s became the values in the DataFrame.
#
# It's important to note, many different types of data could go into the DataFrame.
#
# Here we've used only text but you could use floats, integers and more.
# ### Exercises
#
# 1. Make a `Series` of different foods.
# 2. Make a `Series` of different dollar values (these can be integers).
# 3. Combine your `Series`'s of foods and dollar values into a `DataFrame`.
#
# Try it out for yourself first, then see how your code goes against the solution.
#
# **Note:** Make sure your two `Series` are the same size before combining them in a DataFrame.
# +
# Your code here
# +
# Example solution
# Make a Series of different foods
foods = pd.Series(["Almond butter", "Eggs", "Avocado"])
# Make a Series of different dollar values
prices = pd.Series([9, 6, 2])
# Combine your Series of foods and dollar values into a DataFrame
food_data = pd.DataFrame({"Foods": foods,
"Price": prices})
food_data
# -
# ## 2. Importing data
#
# Creating `Series` and `DataFrame`'s from scratch is nice but what you'll usually be doing is importing your data in the form of a `.csv` (comma separated value) or spreadsheet file.
#
# pandas allows for easy importing of data like this through functions such as `pd.read_csv()` and `pd.read_excel()` (for Microsoft Excel files).
#
# Say you wanted to get this information from this Google Sheet document into a pandas `DataFrame`.
#
# <img src="../images/pandas-car-sales-csv.png" alt="spreadsheet with car sales information" width="600">
#
# You could export it as a `.csv` file and then import it using `pd.read_csv()`.
#
# In this case, the exported `.csv` file is called `car-sales.csv`.
# Import car sales data
car_sales = pd.read_csv("data/data-analysis/car-sales.csv") # takes a filename as string as input
car_sales
# Now we've got the same data from the spreadsheet available in a pandas `DataFrame` called `car_sales`.
#
# Having your data available in a `DataFrame` allows you to take advantage of all of pandas functionality on it.
#
# Another common practice you'll see is data being imported to `DataFrame` called `df` (short for `DataFrame`).
# Import the car sales data and save it to df
df = pd.read_csv("data/data-analysis/car-sales.csv")
df
# Now `car_sales` and `df` contain the exact same information, the only difference is the name. Like any other variable, you can name your `DataFrame`'s whatever you want. But best to choose something simple.
#
# ### Anatomy of a DataFrame
#
# Different functions use different labels for different things. This graphic sums up some of the main components of `DataFrame`'s and their different names.
#
# <img src="../images/pandas-dataframe-anatomy.png" alt="pandas dataframe with different sections labelled" width="800"/>
#
# ## 3. Exporting data
#
# After you've made a few changes to your data, you might want to export it and save it so someone else can access the changes.
#
# pandas allows you to export `DataFrame`'s to `.csv` format using `.to_csv()` or spreadsheet format using `.to_excel()`.
#
# We haven't made any changes yet to the `car_sales` `DataFrame` but let's try export it.
# Export the car sales DataFrame to csv
car_sales.to_csv("data/data-analysis/exported-car-sales.csv")
# Running this will save a file called `export-car-sales.csv` to the current folder.
#
# <img src="../images/pandas-exported-car-sales-csv.png" alt="folder with exported car sales csv file highlighted" width="600"/>
# ## Exercises
#
# 1. Practice importing a `.csv` file using `pd.read_csv()`, you can download `heart-disease.csv`. This file contains annonymous patient medical records and whether or not they have heart disease.
# 2. Practice exporting a `DataFrame` using `.to_csv()`. You could export the heart disease `DataFrame` after you've imported it.
#
# **Note:**
# * Make sure the `heart-disease.csv` file is in the same folder as your notebook orbe sure to use the filepath where the file is.
# * You can name the variables and exported files whatever you like but make sure they're readable.
# Your code here
# ### Example solution
# Importing heart-disease.csv
patient_data = pd.read_csv("data/intro/heart-disease.csv")
patient_data
# Exporting the patient_data DataFrame to csv
patient_data.to_csv("data/data-analysis/exported-patient-data.csv")
# <img src="../images/pandas-exported-patient-data-csv.png" alt="folder containing exported patient data csv file" width="600"/>
# ## 4. Describing data
#
# One of the first things you'll want to do after you import some data into a pandas `DataFrame` is to start exploring it.
#
# pandas has many built in functions which allow you to quickly get information about a `DataFrame`.
#
# Let's explore some using the `car_sales` `DataFrame`.
car_sales
# `.dtypes` shows us what datatype each column contains.
car_sales.dtypes
# Notice how the `Price` column isn't an integer like `Odometer` or `Doors`. Don't worry, pandas makes this easy to fix.
# `.describe()` gives you a quick statistical overview of the numerical columns.
car_sales.describe()
# `.info()` shows a handful of useful information about a `DataFrame` such as:
# * How many entries (rows) there are
# * Whether there are missing values (if a columns non-null value is less than the number of entries, it has missing values)
# * The datatypes of each column
car_sales.info()
# You can also call various statistical and mathematical methods such as `.mean()` or `.sum()` directly on a `DataFrame` or `Series`.
# Calling .mean() on a DataFrame
car_sales.mean()
# Calling .mean() on a Series
car_prices = pd.Series([3000, 3500, 11250])
car_prices.mean()
# Calling .sum() on a DataFrame
car_sales.sum()
# Calling .sum() on a Series
car_prices.sum()
# Calling these on a whole `DataFrame` may not be as helpful as targeting an individual column. But it's helpful to know they're there.
#
# `.columns` will show you all the columns of a `DataFrame`.
car_sales.columns
# You can save them to a list which you could use later.
# Save car_sales columns to a list
car_columns = car_sales.columns
car_columns[0]
# `.index` will show you the values in a `DataFrame`'s index (the column on the far left).
car_sales.index
# pandas `DataFrame`'s, like Python lists, are 0-indexed (unless otherwise changed). This means they start at 0.
#
# <img src="../images/pandas-dataframe-zero-indexed.png" alt="dataframe with index number 0 highlighted" width="700"/>
# Show the length of a DataFrame
len(car_sales)
# So even though the length of our `car_sales` dataframe is 10, this means the indexes go from 0-9.
# ## 5. Viewing and selecting data
# * `head()`
# * `tail()`
# * `loc`
# * `iloc`
# * `columns` - `df['A']`
# * boolean indexing - `df[df['A'] > 5]`
# * `crosstab()`
# * `.plot()`
# * `hist()`
#
# In practice, you'll constantly be making changes to your data, and viewing it. Changing it, viewing it, changing it, viewing it.
#
# You won't always want to change all of the data in your `DataFrame`'s either. So there are just as many different ways to select data as there is to view it.
#
# `.head()` allows you to view the first 5 rows of your `DataFrame`. You'll likely be using this one a lot.
# Show the first 5 rows of car_sales
car_sales.head()
# Why 5 rows? Good question. I don't know the answer. But 5 seems like a good amount.
#
# Want more than 5?
#
# No worries, you can pass `.head()` an integer to display more than or less than 5 rows.
# Show the first 7 rows of car_sales
car_sales.head(7)
# `.tail()` allows you to see the bottom 5 rows of your `DataFrame`. This is helpful if your changes are influencing the bottom rows of your data.
# Show bottom 5 rows of car_sales
car_sales.tail()
# You can use `.loc[]` and `.iloc[]` to select data from your `Series` and `DataFrame`'s.
#
# Let's see.
# Create a sample series
animals = pd.Series(["cat", "dog", "bird", "snake", "ox", "lion"],
index=[0, 3, 9, 8, 67, 3])
animals
# `.loc[]` takes an integer as input. And it chooses from your `Series` or `DataFrame` whichever index matches the number.
# Select all indexes with 3
animals.loc[3]
# Select index 9
animals.loc[9]
# Let's try with our `car_sales` DataFrame.
car_sales
# Select row at index 3
car_sales.loc[3]
# `iloc[]` does a similar thing but works with exact positions.
#
animals
# Select row at position 3
animals.iloc[3]
# Even though `'snake'` appears at index 8 in the series, it's shown using `.iloc[3]` because it's at the 3rd (starting from 0) position.
#
# Let's try with the `car_sales` `DataFrame`.
# Select row at position 3
car_sales.iloc[3]
# You can see it's the same as `.loc[]` because the index is in order, position 3 is the same as index 3.
#
# You can also use slicing with `.loc[]` and `.iloc[]`.
# Get all rows up to position 3
animals.iloc[:3]
# Get all rows up to (and including) index 3
car_sales.loc[:3]
# When should you use `.loc[]` or `.iloc[]`?
# * Use `.loc[]` when you're referring to **indexes**.
# * Use `.iloc[]` when you're referring to **positions** in the `DataFrame` (index is out of order).
# If you want to select a particular column, you can use `['COLUMN_NAME']`.
# Select Make column
car_sales['Make']
# Select Colour column
car_sales['Colour']
# Boolean indexing works with column selection too. Using it will select the rows which fulfill the condition in the brackets.
# Select cars with over 100,000 on the Odometer
car_sales[car_sales["Odometer (KM)"] > 100000]
# Select cars which are made by Toyota
car_sales[car_sales["Make"] == "Toyota"]
# `pd.crosstab()` is a great way to view two different columns together and compare them.
# Compare car Make with number of Doors
pd.crosstab(car_sales["Make"], car_sales["Doors"])
# If you want to compare more columns in the context of another column, you can use `.groupby()`.
car_sales
# Group by the Make column and find the mean of the other columns
car_sales.groupby(["Make"]).mean()
# pandas even allows for quick plotting of columns so you can see your data visualling. To plot, you'll have to import `matplotlib`. If your plots aren't showing, try running the two lines of code below.
#
# `%matplotlib inline` is a special command which tells Jupyter to show your plots. Commands with `%` at the front are called magic commands.
# Import matplotlib and tell Jupyter to show plots
import matplotlib.pyplot as plt
# %matplotlib inline
# You can visualize a column by calling `.plot()` on it.
car_sales["Odometer (KM)"].plot()
# You can see the distribution of a column by calling `.hist()` on you.
#
# The distribution of something is a way of describing the spread of different values.
car_sales["Odometer (KM)"].hist()
# In this case, the majority of the **distribution** (spread) of the `"Odometer"` column is more towards the left of the graph. And there are two values which are more to the right. These two values to the right could be considered **outliers** (not part of the majority).
#
# Now what if we wanted to plot our `"Price"` column?
#
# Let's try.
car_sales["Price"].plot()
# Trying to run it leaves us with an error. This is because the `"Price"` column of `car_sales` isn't in numeric form. We can tell this because of the `TypeError: no numeric data to plot` at the bottom of the cell.
#
# We can check this with `.info()`.
car_sales.info()
# So what can we do?
#
# We need to convert the `"Price"` column to a numeric type.
#
# How?
#
# We could try a few different things on our own. But let's practice researching.
#
#
#
# **1.** Open up a search engine and type in something like "how to convert a pandas column price to integer".
#
# In the first result, I found this [Stack Overflow question and answer](https://stackoverflow.com/questions/44469313/price-column-object-to-int-in-pandas) . Where someone has had the same problem as us and someone else has provided an answer.
#
# **Note:** Sometimes the answer you're looking for won't be in the first result, or the 2nd or the 3rd. You may have to combine a few different solutions.
#
# **2.** In practice, you'd read through this and see if it relates to your problem.
#
# **3.** If it does, you can adjust the code from what's given in the Stack Overflow answer(s) to your own problem.
#
# <img src="../images/pandas-steps-in-stack-overflow-process.png" alt="steps in researching a problem you have using Google and Stack Overflow" width="1000"/>
#
# What's important in the beginning is not to remember every single detail off by heart but to know where to look. Remember, if in doubt, write code, run it, see what happens.
#
# Let's copy the answer code here and see how it relates to our problem.
#
# Answer code: ```dataframe['amount'] = dataframe['amount'].str.replace('[\$\,\.]', '').astype(int)```
#
# There's a lot going on here but what we can do is change the parts which aren't in our problem and keep the rest the same.
#
# Our `DataFrame` is called `car_sales` not `dataframe`.
#
# ```car_sales['amount'] = car_sales['amount'].str.replace('[\$\,\.]', '').astype(int)```
#
# And our `'amount'` column is called `"Price"`.
#
# ```car_sales["Price"] = car_sales["Price"].str.replace('[\$\,\.]', '').astype(int)```
#
# That looks better. What the code on the right of `car_sales["Price"]` is saying is "remove the $ sign and comma and change the type of the cell to int".
#
# Let's see what happens.
# Change Price column to integers
car_sales["Price"] = car_sales["Price"].str.replace('[\$\,\.]', '').astype(int)
car_sales
# Beautiful! Now let's try to plot it agian.
car_sales["Price"].plot()
# This is one of the many ways you can manipulate data using pandas.
#
# When you see a number of different functions in a row, it's referred to as **chaining**. This means you add together a series of functions all to do one overall task.
#
# Let's see a few more ways of manipulating data.
# ## 6. Manipulating data
#
# You've seen an example of one way to manipulate data but pandas has many more. How many more? Put it this way, if you can imagine it, chances are, pandas can do it.
#
# Let's start with string methods. Because pandas is based on Python, however you can manipulate strings in Python, you can do the same in pandas.
#
# You can access the string value of a column using `.str`. Knowing this, how do you think you'd set a column to lowercase?
# Lower the Make column
car_sales["Make"].str.lower()
# Notice how it doesn't change the values of the original `car_sales` `DataFrame` unless we set it equal to.
# View top 5 rows, Make column not lowered
car_sales.head()
# Set Make column to be lowered
car_sales["Make"] = car_sales["Make"].str.lower()
car_sales.head()
# Reassigning the column changes it in the original `DataFrame`. This trend occurs throughout all kinds of data manipulation with pandas.
#
# Some functions have a parameter called `inplace` which means a `DataFrame` is updated in place without having to reassign it.
#
# Let's see what it looks like in combination with `.fillna()`, a function which fills missing data. But the thing is, our table isn't missing any data.
#
# In practice, it's likely you'll work with datasets which aren't complete. What this means is you'll have to decide whether how to fill the missing data or remove the rows which have data missing.
#
# Let's check out what a version of our `car_sales` `DataFrame` might look like with missing values.
# Import car sales data with missing values
car_sales_missing = pd.read_csv("data/data-analysis/car-sales-missing-data.csv")
car_sales_missing
# Missing values are shown by `NaN` in pandas. This can be considered the equivalent of `None` in Python.
#
# Let's use the `.fillna()` function to fill the `Odometer` column with the average of the other values in the same column.
#
# We'll do it with and without `inplace`.
# Fill Odometer column missing values with mean
car_sales_missing["Odometer"].fillna(car_sales_missing["Odometer"].mean(),
inplace=False) # inplace is set to False by default
# Now let's check the original `car_sales_missing` `DataFrame`.
car_sales_missing
# Because `inplace` is set to `False` (default), there's still missing values in the `"Odometer"` column. Let's try setting `inplace` to `True`.
# Fill the Odometer missing values to the mean with inplace=True
car_sales_missing["Odometer"].fillna(car_sales_missing["Odometer"].mean(),
inplace=True)
# Now let's check the `car_sales_missing` `DataFrame` again.
car_sales_missing
# The missing values in the `Odometer` column have been filled with the mean value of the same column.
#
# In practice, you might not want to fill a column's missing values with the mean, but this example was to show the difference between `inplace=False` (default) and `inplace=True`.
#
# Whichever you choose to use will depend on how you structure your code. All you have to remember is `inplace=False` returns a copy of the `DataFrame` you're working with. This is helpful if you want to make a duplicate of your current `DataFrame` and save it to another variable.
#
# We've filled some values but there's still missing values in `car_sales_missing`. Let's say you wanted to remove any rows which had missing data and only work with rows which had complete coverage.
#
# You can do this using `.dropna()`.
# Remove missing data
car_sales_missing.dropna()
# It appears the rows with missing values have been removed, now let's check to make sure.
car_sales_missing
# Hmm, they're still there, can you guess why?
#
# It's because `.dropna()` has `inplace=False` as default. We can either set `inplace=True` or reassign the `car_sales_missing` `DataFrame`.
# The following two lines do the same thing
car_sales_missing.dropna(inplace=True) # Operation happens inplace without reassignment
car_sales_missing = car_sales_missing.dropna() # car_sales_missing gets reassigned to same DataFrame but with dropped values
# Now if check again, the rows with missing values are gone and the index numbers have been updated.
car_sales_missing
# Instead of removing or filling data, what if you wanted to create it?
#
# For example, creating a column called `Seats` for number of seats.
#
# pandas allows for simple extra column creation on `DataFrame`'s. Three common ways are adding a `Series`, Python list or by using existing columns.
# Create a column from a pandas Series
seats_column = pd.Series([5, 5, 5, 5, 5, 5, 5, 5, 5, 5])
car_sales["Seats"] = seats_column
car_sales
# Creating a column is similar to selecting a column, you pass the target `DataFrame` along with a new column name in brackets.
# Create a column from a Python list
engine_sizes = [1.3, 2.0, 3.0, 4.2, 1.6, 1, 2.0, 2.3, 2.0, 3.0]
car_sales["Engine Size"] = engine_sizes
car_sales
# You can also make a column by directly combining the values of other columns. Such as, price per kilometre on the Odometer.
# Column from other columns
car_sales["Price per KM"] = car_sales["Price"] / car_sales["Odometer (KM)"]
car_sales
# Now can you think why this might not be a great column to add?
#
# It could be confusing when a car with less kilometers on the odometer looks to cost more per kilometre than one with more. When buying a car, usually less kilometres on the odometer is better.
#
# This kind of column creation is called **feature engineering**. If `Make`, `Colour`, `Doors` are features of the data, creating `Price per KM` could be another. But in this case, not a very good one.
#
# As for column creation, you can also create a new column setting all values to a one standard value.
# Column to all 1 value (number of wheels)
car_sales["Number of wheels"] = 4
car_sales
car_sales["Passed road safety"] = True
car_sales
# Now you've created some columns, you decide to show your colleague what you've done. When they ask about the `Price per KM` column, you tell them you're not really sure why it's there.
#
# You decide you better remove it to prevent confusion.
#
# You can remove a column using `.drop('COLUMN_NAME', axis=1)`.
# Drop the Price per KM column
car_sales = car_sales.drop("Price per KM", axis=1)
car_sales
# Why `axis=1`? Because that's the axis columns live on. Rows live on `axis=0`.
#
# Let's say you wanted to shuffle the order of your `DataFrame` so you could split it into train, validation and test sets. And even though the order of your samples was random, you wanted to make sure.
#
# To do so you could use `.sample(frac=1)`.
#
# `.sample()` randomly samples different rows from a `DataFrame`. The `frac` parameter dictates the fraction, where 1 = 100% of rows, 0.5 = 50% of rows, 0.01 = 1% of rows.
# Sample car_sales
car_sales_sampled = car_sales.sample(frac=1)
car_sales_sampled
# Notice how the rows remain intact but their order is mixed (check the indexes).
#
# `.sample(frac=X)` is also helpful when you're working with a large `DataFrame`. Say you had 2,000,000 rows.
#
# Running tests, analysis and machine learning algorithms on 2,000,000 rows could take a long time. And since being a data scientist or machine learning engineer is about reducing the time between experiments, you might begin with a sample of rows first.
#
# For example, you could use `40k_rows = 2_mil_rows.sample(frac=0.05)` to work on 40,000 rows from a `DataFrame` called `2_mil_rows` containing 2,000,000 rows.
#
# What if you wanted to get the indexes back in order?
#
# You could do so using `.reset_index()`.
# Reset the indexes of car_sales_sampled
car_sales_sampled.reset_index()
# Calling `.reset_index()` on a `DataFrame` resets the index numbers to their defaults. It also creates a new `Index` column by default which contains the previous index values.
#
# Finally, what if you wanted to apply a function to a column. Such as, converting the `Odometer` column from kilometers to miles.
#
# You can do so using the `.apply()` function and passing it a lambda function. We know there's about 1.6 kilometers in a mile, so if you divide the value in the `Odometer` column by 1.6, it should convert it to miles.
# Change the Odometer values from kilometres to miles
car_sales["Odometer (KM)"].apply(lambda x: x / 1.6)
# Now let's check our `car_sales` `DataFrame`.
car_sales
# The `Odometer` column didn't change. Can you guess why?
#
# We didn't reassign it.
# Reassign the Odometer column to be miles instead of kilometers
car_sales["Odometer (KM)"] = car_sales["Odometer (KM)"].apply(lambda x: x / 1.6)
car_sales
# If you've never seen a lambda function they can be tricky. What the line above is saying is "take the value in the `Odometer (KM)` column (`x`) and set it to be itself divided by 1.6".
# ## Summary
#
# ### Main topics we covered
# * [Series](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html) - a single column (can be multiple rows) of values.
# * [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/frame.html) - multiple columns/rows of values (a DataFrame is comprised of multiple Series).
# * [Importing data](https://pandas.pydata.org/pandas-docs/stable/reference/io.html) - we used [`pd.read_csv()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html#pandas.read_csv) to read in a CSV (comma-separated values) file but there are multiple options for reading data.
# * [Exporting data](https://pandas.pydata.org/pandas-docs/stable/reference/io.html) - we exported our data using `to_csv()`, however there are multiple methods of exporting data.
# * [Describing data](https://pandas.pydata.org/pandas-docs/stable/getting_started/basics.html#attributes-and-underlying-data)
# * `df.dtypes` - find the datatypes present in a dataframe.
# * `df.describe()` - find various numerical features of a dataframe.
# * `df.info()` - find the number of rows and whether or not any of them are empty.
# * [Viewing and selecting data](https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html#viewing-data)
# * `df.head()` - view the first 5 rows of `df`.
# * `df.loc` & `df.iloc` - select specific parts of a dataframe.
# * `df['A']` - select column `A` of `df`.
# * `df[df['A'] > 1000]` - selection column `A` rows with values over 1000 of `df`.
# * `df['A']` - plot values from column `A` using matplotlib (defaults to line graph).
# * [Manipulating data and performing operations](https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html#operations) - pandas has many built-in functions you can use to manipulate data, also many of the Python operators (e.g. `+`, `-`, `>`, `==`) work with pandas.
#
# ### Further reading
# Since pandas is such a large library, it would be impossible to cover it all in one go.
#
# The following are some resources you might want to look into for more.
# * [Python for Data Analysis by <NAME>](https://www.amazon.com.au/Python-Data-Analysis-Wrangling-IPython-ebook/dp/B075X4LT6K) - possibly the most complete text of the pandas library (apart from the documentation itself) written by the creator of pandas.
# * [Data Manipulation with Pandas (section of Python Data Science Handbook by Jake VanderPlas)](https://jakevdp.github.io/PythonDataScienceHandbook/03.00-introduction-to-pandas.html) - a very hands-on approach to many of the main functions in the pandas library.
#
# ### Exercises
# After completing this notebook, you next thing should be to try out some more pandas code of your own.
#
# I'd suggest at least going through number 1 (write out all the code yourself), a couple from number 2 (again, write out the code yourself) and spend an hour reading number 3 (this is vast but keep it in mind).
#
# 1. [10-minute introduction to pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/10min.html) - go through all the functions here and be sure to write out the code yourself.
# 2. [Pandas getting started tutorial](https://pandas.pydata.org/pandas-docs/stable/getting_started/intro_tutorials/index.html) - pick a couple from here which spark your interest and go through them both writing out the code for your self.
# 3. [Pandas essential basic functionality](https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html) - spend an hour reading this and bookmark it for whenever you need to come back for an overview of pandas.
| introduction-to-pandas-theory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="GIaCGX4o2QHD"
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchtext
from torchtext import data
from sklearn.metrics import confusion_matrix,f1_score
import matplotlib.pyplot as plt
import random
import seaborn as sns
from sklearn.metrics import classification_report
# + id="THkWQr0P27hg"
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# + id="fZLJVu-X9U1W"
def generate_bigrams(x):
n_grams = set(zip(*[x[i:] for i in range(2)]))
for n_gram in n_grams:
x.append(" ".join(n_gram))
return x
# + id="GRymEiKE910U"
TEXT = data.Field(tokenize="spacy",include_lengths=True)
LABEL = data.LabelField()
fields = [(None,None),('text', TEXT),('label',LABEL), (None,None)]
# + id="QEVph_8F-4dE"
train_data, test_data = data.TabularDataset.splits(
path = '/content/drive/MyDrive/data/benchmarking_data',
train = 'train.csv',
test = 'valid.csv',
format = 'csv',
fields = fields,
skip_header = True
)
# + id="TureHuYa_DAZ"
train_data,valid_data = train_data.split(random_state=random.seed(SEED))
# + colab={"base_uri": "https://localhost:8080/"} id="qSBU2giH_V8n" outputId="ea04ed4d-807a-4250-edd9-1a0dfda438e6"
MAX_VOCAB_SIZE = 25_000
TEXT.build_vocab(train_data,max_size=MAX_VOCAB_SIZE,
vectors="glove.6B.100d",
unk_init = torch.Tensor.normal_)
LABEL.build_vocab(train_data)
# + id="3GbN3aCb_X62"
BATCH_SIZE = 64
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_dl,valid_dl,test_dl = data.BucketIterator.splits(
(train_data,valid_data,test_data),
batch_size=BATCH_SIZE,
device=device,
sort_within_batch=True,
sort_key=lambda x:len(x.text)
)
# + id="FafeIbW5C-pb"
class LSTM(nn.Module):
def __init__(self,input_dim,embedding_dim,hidden_dim,output_dim,n_layers,
bidirectional,dropout):
super().__init__()
self.embedding = nn.Embedding(input_dim,embedding_dim)
self.lstm = nn.LSTM(embedding_dim,hidden_dim,num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout)
self.fc = nn.Linear(hidden_dim*2,output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self,text,text_lengths):
#text = [sent_len,batch_size]
embedded = self.dropout(self.embedding(text))
#embedded = [sent_len,batch_size,emb_dim]
#pack sequence
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded,text_lengths)
packed_output, (hidden,cell) = self.lstm(packed_embedded)
#unpack sentence
output,output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)
#output = [sent len, batch size, hid dim * num directions]
#output over padding tokens are zero tensors
#hidden = [num layers * num directions, batch size, hid dim]
#cell = [num layers * num directions, batch size, hid dim]
#concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers and apply dropout
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1))
#hidden = [batch_size,hid_dim*num_directions]
return self.fc(hidden)
# + id="M6l84616GD88"
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = len(LABEL.vocab)
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.5
model = LSTM(INPUT_DIM,
EMBEDDING_DIM,
HIDDEN_DIM,
OUTPUT_DIM,
N_LAYERS,
BIDIRECTIONAL,
DROPOUT)
# + colab={"base_uri": "https://localhost:8080/"} id="I-IaIPZAGHkf" outputId="6e996181-eb64-4a2b-b6c6-562e124c849c"
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"The model has {count_parameters(model):,} trainable parameters")
# + id="lF6-BxMBcL_3"
import torch.optim as optim
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
model = model.to(device)
criterion = criterion.to(device)
# + id="4ZyiYyAkchc6"
def accuracy(y_pred,y):
"""
Returns a accuracy score
"""
max_preds = y_pred.argmax(dim=1,keepdim=True)
correct = max_preds.squeeze(1).eq(y)
return correct.sum()/torch.FloatTensor([y.shape[0]])
# + id="AL9RFdN3Gjmt"
#train the model
def train(model,iterator,optimizer,criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
text,text_lengths = batch.text
predictions = model(text,text_lengths)
loss = criterion(predictions,batch.label)
acc = accuracy(predictions,batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss/len(iterator),epoch_acc/len(iterator)
# + id="KrhK2QPMV1Im"
def evaluate(model,iterator,criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
text,text_lengths = batch.text
predictions = model(text,text_lengths)
loss = criterion(predictions,batch.label)
acc = accuracy(predictions,batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss/len(iterator),epoch_acc/len(iterator)
# + id="aCponiF4dSQ8"
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# + colab={"base_uri": "https://localhost:8080/"} id="VL35mFt-dVrR" outputId="97e31005-3c00-45ae-a1c5-58a6635a720b"
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_dl, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_dl, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), '/content/drive/MyDrive/Models/INTENT/lstm-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# + id="RVpri4_zrBpk"
def plot_confusion_matrix(test_y, predict_y):
C = confusion_matrix(test_y, predict_y)
# C = 9,9 matrix, each cell (i,j) represents number of points of class i are predicted class j
A =(((C.T)/(C.sum(axis=1))).T)
#divid each element of the confusion matrix with the sum of elements in that column
# C = [[1, 2],
# [3, 4]]
# C.T = [[1, 3],
# [2, 4]]
# C.sum(axis = 1) axis=0 corresonds to columns and axis=1 corresponds to rows in two diamensional array
# C.sum(axix =1) = [[3, 7]]
# ((C.T)/(C.sum(axis=1))) = [[1/3, 3/7]
# [2/3, 4/7]]
# ((C.T)/(C.sum(axis=1))).T = [[1/3, 2/3]
# [3/7, 4/7]]
# sum of row elements = 1
B =(C/C.sum(axis=0))
#divid each element of the confusion matrix with the sum of elements in that row
# C = [[1, 2],
# [3, 4]]
# C.sum(axis = 0) axis=0 corresonds to columns and axis=1 corresponds to rows in two diamensional array
# C.sum(axix =0) = [[4, 6]]
# (C/C.sum(axis=0)) = [[1/4, 2/6],
# [3/4, 4/6]]
labels = [1,2,3,4,5,6,7,8,9]
# representing A in heatmap format
print("-"*20, "Confusion matrix", "-"*20)
plt.figure(figsize=(20,7))
sns.heatmap(C, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.xlabel('Predicted Class')
plt.ylabel('Original Class')
plt.show()
print("-"*20, "Precision matrix (Columm Sum=1)", "-"*20)
plt.figure(figsize=(20,7))
sns.heatmap(B, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.xlabel('Predicted Class')
plt.ylabel('Original Class')
plt.show()
# representing B in heatmap format
print("-"*20, "Recall matrix (Row sum=1)", "-"*20)
plt.figure(figsize=(20,7))
sns.heatmap(A, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.xlabel('Predicted Class')
plt.ylabel('Original Class')
plt.show()
B = (C/C.sum(axis=0))
labels = [1,2,3,4,5,6,7,8,9]
# + id="bHKqlcp7dZbG"
def get_predictions(model,iterator):
y_pred = []
y_true = []
model.eval()
with torch.no_grad():
for batch in iterator:
text,text_lengths = batch.text
predictions = model(text,text_lengths)
y_pred.extend(torch.argmax(predictions,axis=-1).tolist())
y_true.extend(batch.label.tolist())
return y_pred,y_true
# + id="7_WhPQvli2UK"
y_pred,y_true = get_predictions(model,test_dl)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Li0tbflqrPD5" outputId="1f4d07ad-29ee-4e4e-83b1-11b589e641d0"
plot_confusion_matrix(y_true,y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="P3SIDCeCjGN8" outputId="e63002e6-8f67-4dfd-9480-153ee2de45e6"
print('Classification Report:')
print(classification_report(y_true, y_pred))
# + id="NRjpUyy3rm2m"
| intent-recogntion/nbs/LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spectral Vertex Nomination
#
# This demo shows how to use the Spectral Vertex Nomination (SVN) class. We will use SVN to nominate vertices in a Stochastic Block Model (SBM)
# + pycharm={"name": "#%%\n"}
# imports
import numpy as np
from graspologic.nominate import SpectralVertexNomination
from graspologic.simulations import sbm
from graspologic.plot import heatmap
from matplotlib import pyplot as plt
# %matplotlib inline
# + [markdown] pycharm={"name": "#%% md\n"}
# ## What is Spectral Vertex Nomination?
#
# Given a graph $G=(V,E)$ and a subset of $V$ called $S$ (the "seed"), Single Graph Vertex Nomination is the problem of ranking all $V$ ordered by degree of relation to members of $S$.
# Spectral Vertex Nomination (SVN) solves the Vertex Nomination problem by embedding a graph's adjacency matrix into a low dimensional euclidean space, then using distance based methods to establish relationship between vertices. (See embedding tutorials)
#
# SVN has the advantage of being extremely efficient on very large graphs, and requires very little prior knowledge of a graph's properties or structure. However, it does somewhat conflate the notion of community membership with vertex similarity.
# This might not be appropriate for all vertex nomination use cases. Synthetic SBM data, with inherently defined communities, is therefore a good setting for demonstrating SVN.
# + pycharm={"name": "#%%\n"}
# construct graph from sbm
n_verts = 100
p = np.array([[0.5, 0.35, 0.2],
[0.35, 0.6, 0.3],
[0.2, 0.3, 0.65]])
labels = np.array([0] * n_verts + [1] * n_verts + [2] * n_verts)
adj = np.array(sbm(3 * [n_verts], p), dtype=np.int)
fig = heatmap(adj, title='3-block SBM (undirected)', inner_hier_labels=labels)
# + [markdown] pycharm={"name": "#%% md\n"}
# There are two cases of single graph vertex nomination. In the unattributed case, the seed $S$ is simply a list of any number of vertices in the graph $G$. For each seed vertex $s \in S$, SVN will return a list of the other vertices in order of distance to $s$.
# Any distance metric supported by sklearn's `NearestNeighbors` can be used, euclidean is default and is generally sufficient.
#
# The `n_neighbors` argument can be used to specify how many vertices should be nominated for each seed, otherwise all vertices will be ranked for each seed. Setting `n_neighbors` will improve runtime on large graphs and might also improve prediction accuracy in some cases.
# + pycharm={"name": "#%%\n"}
# instantiate a default SVN
svn = SpectralVertexNomination(n_neighbors=5)
# fit to the adjacency matrix
svn.fit(X=adj)
# + [markdown] pycharm={"name": "#%% md\n"}
# The predict method takes a numpy array of seed vertex indices as an argument. It generates the nomination list, and returns a tuple of the nomination list itself and the associated distance matrix.
#
# Here, we select a seed of 5 random vertices, and the top 5 vertices nominated for each seed are plotted along the first two dimensions of the embedded space.
# + pycharm={"name": "#%%\n"}
# define the seed as a random sample of 5 vertices from V.
seed = np.random.choice(3 * n_verts, size=5, replace=False).astype(np.int)
nomination_matrix, distance_matrix = svn.predict(seed)
color = ['red', 'lightgreen', 'gold', 'cyan', 'pink']
seed_color = ['firebrick', 'green', 'tan', 'darkblue', 'purple']
plt.figure(figsize=(12, 9))
for i in range(nomination_matrix.shape[1]):
plt.scatter(svn.embedding_[nomination_matrix[:, i], 0],
svn.embedding_[nomination_matrix[:, i], 1],
c=color[i])
plt.scatter(svn.embedding_[seed[i], 0],
svn.embedding_[seed[i], 1],
c=seed_color[i])
plt.xticks([], [])
plt.yticks([], [])
plt.title('Top 5 Nominees For Each Seed (black) in First Two Embeded Dimensions')
# + [markdown] pycharm={"name": "#%% md\n"}
# The attributed case is not currently handled by Graspologic, and may be added in the future.
| docs/tutorials/vertex_nomination/SpectralVertexNomination.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Qiskit
# Welcome to the Quantum Challenge! Here you will be using Qiskit, the open source quantum software development kit developed by IBM Quantum and community members around the globe. The following exercises will familiarize you with the basic elements of Qiskit and quantum circuits.
#
# To begin, let us define what a quantum circuit is:
#
# > **"A quantum circuit is a computational routine consisting of coherent quantum operations on quantum data, such as qubits. It is an ordered sequence of quantum gates, measurements, and resets, which may be conditioned on real-time classical computation."** (https://qiskit.org/textbook/ch-algorithms/defining-quantum-circuits.html)
#
# While this might be clear to a quantum physicist, don't worry if it is not self-explanatory to you. During this exercise you will learn what a qubit is, how to apply quantum gates to it, and how to measure its final state. You will then be able to create your own quantum circuits! By the end, you should be able to explain the fundamentals of quantum circuits to your colleagues.
#
# Before starting with the exercises, please run cell *Cell 1* below by clicking on it and pressing 'shift' + 'enter'. This is the general way to execute a code cell in the Jupyter notebook environment that you are using now. While it is running, you will see `In [*]:` in the top left of that cell. Once it finishes running, you will see a number instead of the star, which indicates how many cells you've run. You can find more information about Jupyter notebooks here: https://qiskit.org/textbook/ch-prerequisites/python-and-jupyter-notebooks.html.
#
# ---
# For useful tips to complete this exercise as well as pointers for communicating with other participants and asking questions, please take a look at the following [repository](https://github.com/qiskit-community/may4_challenge_exercises). You will also find a copy of these exercises, so feel free to edit and experiment with these notebooks.
#
# ---
# +
# Cell 1
import numpy as np
from qiskit import Aer, QuantumCircuit, execute
from qiskit.visualization import plot_histogram
from IPython.display import display, Math, Latex
from may4_challenge import plot_state_qsphere
from may4_challenge.ex1 import minicomposer
from may4_challenge.ex1 import check1, check2, check3, check4, check5, check6, check7, check8
from may4_challenge.ex1 import return_state, vec_in_braket, statevec
# -
# ## Exercise I: Basic Operations on Qubits and Measurements
#
# ### Writing down single-qubit states
# Let us start by looking at a single qubit. The main difference between a classical bit, which can take the values 0 and 1 only, is that a quantum bit, or **qubit**, can be in the states $\vert0\rangle$, $\vert1\rangle$, as well as a linear combination of these two states. This feature is known as superposition, and allows us to write the most general state of a qubit as:
#
# $$\vert\psi\rangle = \sqrt{1-p}\vert0\rangle + e^{i \phi} \sqrt{p} \vert1\rangle$$
#
# If we were to measure the state of this qubit, we would find the result $1$ with probability $p$, and the result $0$ with probability $1-p$. As you can see, the total probability is $1$, meaning that we will indeed measure either $0$ or $1$, and no other outcomes exists.
#
# In addition to $p$, you might have noticed another parameter above. The variable $\phi$ indicates the relative quantum phase between the two states $\vert0\rangle$ and $\vert1\rangle$. As we will discover later, this relative phase is quite important. For now, it suffices to note that the quantum phase is what enables interference between quantum states, resulting in our ability to write quantum algorithms for solving specific tasks.
#
# If you are interested in learning more, we refer you to [the section in the Qiskit textbook on representations of single-qubit states](https://qiskit.org/textbook/ch-states/representing-qubit-states.html).
#
# ### Visualizing quantum states
# We visualize quantum states throughout this exercise using what is known as a `qsphere`. Here is how the `qsphere` looks for the states $\vert0\rangle$ and $\vert1\rangle$, respectively. Note that the top-most part of the sphere represents the state $\vert0\rangle$, while the bottom represents $\vert1\rangle$.
#
# <img src="qsphere01.png" alt="qsphere with states 0 and 1" style="width: 400px;"/>
#
# It should be no surprise that the superposition state with quantum phase $\phi = 0$ and probability $p = 1/2$ (meaning an equal likelihood of measuring both 0 and 1) is shown on the `qsphere` with two points. However, note also that the size of the circles at the two points is smaller than when we had simply $\vert0\rangle$ and $\vert1\rangle$ above. This is because the size of the circles is proportional to the probability of measuring each one, which is now reduced by half.
#
# <img src="qsphereplus.png" alt="qsphere with superposition 1" style="width: 200px;"/>
#
# In the case of superposition states, where the quantum phase is non-zero, the qsphere allows us to visualize that phase by changing the color of the respective blob. For example, the state with $\phi = 90^\circ$ (degrees) and probability $p = 1/2$ is shown in the `qsphere` below.
#
# <img src="qspherey.png" alt="qsphere with superposition 2" style="width: 200px;"/>
#
# ### Manipulating qubits
# Qubits are manipulated by applying quantum gates. Let's go through an overview of the different gates that we will consider in the following exercises.
#
# First, let's describe how we can change the value of $p$ for our general quantum state. To do this, we will use two gates:
#
# 1. **$X$-gate**: This gate flips between the two states $\vert0\rangle$ and $\vert1\rangle$. This operation is the same as the classical NOT gate. As a result, the $X$-gate is sometimes referred to as a bit flip or NOT gate. Mathematically, the $X$ gate changes $p$ to $1-p$, so in particular from 0 to 1, and vice versa.
#
# 2. **$H$-gate**: This gate allows us to go from the state $\vert0\rangle$ to the state $\frac{1}{\sqrt{2}}\left(\vert0\rangle + \vert1\rangle\right)$. This state is also known as the $\vert+\rangle$. Mathematically, this means going from $p=0, \phi=0$ to $p=1/2, \phi=0$. As the final state of the qubit is a superposition of $\vert0\rangle$ and $\vert1\rangle$, the Hadamard gate represents a true quantum operation.
#
# Notice that both gates changed the value of $p$, but not $\phi$. Fortunately for us, it's quite easy to visualize the action of these gates by looking at the figure below.
#
# <img src="quantumgates.png" alt="quantum gates" style="width: 400px;"/>
#
# Once we have the state $\vert+\rangle$, we can then change the quantum phase by applying several other gates. For example, an $S$ gate adds a phase of $90$ degrees to $\phi$, while the $Z$ gate adds a phase of $180$ degrees to $\phi$. To subtract a phase of $90$ degrees, we can apply the $S^\dagger$ gate, which is read as S-dagger, and commonly written as `sdg`. Finally, there is a $Y$ gate which applies a sequence of $Z$ and $X$ gates.
#
# You can experiment with the gates $X$, $Y$, $Z$, $H$, $S$ and $S^\dagger$ to become accustomed to the different operations and how they affect the state of a qubit. To do so, you can run *Cell 2* which starts our circuit widget. After running the cell, choose a gate to apply to a qubit, and then choose the qubit (in the first examples, the only qubit to choose is qubit 0). Watch how the corresponding state changes with each gate, as well as the description of that state. It will also provide you with the code that creates the corresponding quantum circuit in Qiskit below the qsphere.
#
# If you want to learn more about describing quantum states, Pauli operators, and other single-qubit gates, see chapter 1 of our textbook: https://qiskit.org/textbook/ch-states/introduction.html.
# Cell 2
# press shift + return to run this code cell
# then, click on the gate that you want to apply to your qubit
# next, you have to choose the qubit that you want to apply it to (choose '0' here)
# click on clear to restart
minicomposer(1, dirac=True, qsphere=True)
# Here are four small exercises to attain different states on the qsphere. You can either solve them with the widget above and copy paste the code it provides into the respective cells to create the quantum circuits, or you can directly insert a combination of the following code lines into the program to apply the different gates:
#
# qc.x(0) # bit flip
# qc.y(0) # bit and phase flip
# qc.z(0) # phase flip
# qc.h(0) # superpostion
# qc.s(0) # quantum phase rotation by pi/2 (90 degrees)
# qc.sdg(0) # quantum phase rotation by -pi/2 (90 degrees)
#
# The '(0)' indicates that we apply this gate to qubit 'q0', which is the first (and in this case only) qubit.
#
# Try to attain the given state on the qsphere in each of the following exercises.
# ### I.i) Let us start by performing a bit flip. The goal is to reach the state $\vert1\rangle$ starting from state $\vert0\rangle$. <img src="state1.png" width="300">
#
#
# If you have reached the desired state with the widget, copy and paste the code from *Cell 2* into *Cell 3* (where it says "FILL YOUR CODE IN HERE") and run it to check your solution.
# +
# Cell 3
def create_circuit():
qc = QuantumCircuit(1)
#
#
# FILL YOUR CODE IN HERE
#
#
qc.x(0)
return qc
# check solution
qc = create_circuit()
state = statevec(qc)
check1(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# -
# ### I.ii) Next, let's create a superposition. The goal is to reach the state $|+\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle + |1\rangle\right)$. <img src="stateplus.png" width="300">
# Fill in the code in the lines indicated in *Cell 4*. If you prefer the widget, you can still copy the code that the widget gives in *Cell 2* and paste it into *Cell 4*.
# +
# Cell 4
def create_circuit2():
qc = QuantumCircuit(1)
#
#
# FILL YOUR CODE IN HERE
#
#
qc.h(0)
return qc
qc = create_circuit2()
state = statevec(qc)
check2(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# -
# ### I.iii) Let's combine those two. The goal is to reach the state $|-\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle - |1\rangle\right)$. <img src="stateminus.png" width="300">
# Can you combine the above two tasks to come up with the solution?
# +
# Cell 5
def create_circuit3():
qc = QuantumCircuit(1)
#
#
# FILL YOUR CODE IN HERE
#
#
qc.h(0)
qc.z(0)
return qc
qc = create_circuit3()
state = statevec(qc)
check3(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# -
# ### I.iv) Finally, we move on to the complex numbers. The goal is to reach the state $|\circlearrowleft\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle - i|1\rangle\right)$. <img src="stateleft.png" width="300">
# +
# Cell 6
def create_circuit4():
qc = QuantumCircuit(1)
#
#
# FILL YOUR CODE IN HERE
#
#
qc.h(0)
qc.sdg(0)
return qc
qc = create_circuit4()
state = statevec(qc)
check4(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# -
#
# ## Exercise II: Quantum Circuits Using Multi-Qubit Gates
#
# Great job! Now that you've understood the single-qubit gates, let us look at gates operating on multiple qubits. The basic gates on two qubits are given by
#
# qc.cx(c,t) # controlled-X (= CNOT) gate with control qubit c and target qubit t
# qc.cz(c,t) # controlled-Z gate with control qubit c and target qubit t
# qc.swap(a,b) # SWAP gate that swaps the states of qubit a and qubit b
#
# If you'd like to read more about the different multi-qubit gates and their relations, visit chapter 2 of our textbook: https://qiskit.org/textbook/ch-gates/introduction.html.
#
# As before, you can use the two-qubit circuit widget below to see how the combined two qubit state evolves when applying different gates (run *Cell 7*) and get the corresponding code that you can copy and paste into the program. Note that for two qubits a general state is of the form $a|00\rangle + b |01\rangle + c |10\rangle + d|11\rangle$, where $a$, $b$, $c$, and $d$ are complex numbers whose absolute values squared give the probability to measure the respective state; e.g., $|a|^2$ would be the probability to end in state '0' on both qubits. This means we can now have up to four points on the qsphere.
# +
# Cell 7
# press shift + return to run this code cell
# then, click on the gate that you want to apply followed by the qubit(s) that you want it to apply to
# for controlled gates, the first qubit you choose is the control qubit and the second one the target qubit
# click on clear to restart
minicomposer(2, dirac = True, qsphere = True)
# -
# We start with the canonical two qubit gate, the controlled-NOT (also CNOT or CX) gate. Here, as with all controlled two qubit gates, one qubit is labelled as the "control", while the other is called the "target". If the control qubit is in state $|0\rangle$, it applies the identity $I$ gate to the target, i.e., no operation is performed. Instead, if the control qubit is in state $|1\rangle$, an X-gate is performed on the target qubit. Therefore, with both qubits in one of the two classical states, $|0\rangle$ or $|1\rangle$, the CNOT gate is limited to classical operations.
#
# This situation changes dramatically when we first apply a Hadamard gate to the control qubit, bringing it into the superposition state $|+\rangle$. The action of a CNOT gate on this non-classical input can produce highly entangled states between control and target qubits. If the target qubit is initially in the $|0\rangle$ state, the resulting state is denoted by $|\Phi^+\rangle$, and is one of the so-called Bell states.
#
# ### II.i) Construct the Bell state $|\Phi^+\rangle = \frac{1}{\sqrt{2}}\left(|00\rangle + |11\rangle\right)$. <img src="phi+.png" width="300">
# For this state we would have probability $\frac{1}{2}$ to measure "00" and probability $\frac{1}{2}$ to measure "11". Thus, the outcomes of both qubits are perfectly correlated.
# +
# Cell 8
def create_circuit():
qc = QuantumCircuit(2)
#
#
# FILL YOUR CODE IN HERE
#
#
qc.h(0)
qc.cnot(0,1)
return qc
qc = create_circuit()
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check5(state)
qc.draw(output='mpl') # we draw the circuit
# -
# Next, try to create the state of perfectly anti-correlated qubits. Note the minus sign here, which indicates the relative phase between the two states.
# ### II.ii) Construct the Bell state $\vert\Psi^-\rangle = \frac{1}{\sqrt{2}}\left(\vert01\rangle - \vert10\rangle\right)$. <img src="psi-.png" width="300">
# +
# Cell 9
def create_circuit6():
qc = QuantumCircuit(2,2) # this time, we not only want two qubits, but also
# two classical bits for the measurement later
#
#
# FILL YOUR CODE IN HERE
#
#
qc.h(0)
qc.x(1)
qc.cx(0, 1)
qc.z(0)
return qc
qc = create_circuit6()
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check6(state)
qc.measure(0, 0) # we perform a measurement on qubit q_0 and store the information on the classical bit c_0
qc.measure(1, 1) # we perform a measurement on qubit q_1 and store the information on the classical bit c_1
qc.draw(output='mpl') # we draw the circuit
# -
# As you can tell from the circuit (and the code) we have added measurement operators to the circuit. Note that in order to store the measurement results, we also need two classical bits, which we have added when creating the quantum circuit: `qc = QuantumCircuit(num_qubits, num_classicalbits)`.
#
# In *Cell 10* we have defined a function `run_circuit()` that will run a circuit on the simulator. If the right state is prepared, we have probability $\frac{1}{2}$ to measure each of the two outcomes, "01" and "10". However, performing the measurement with 1000 shots does not imply that we will measure exactly 500 times "01" and 500 times "10". Just like flipping a coin multiple times, it is unlikely that one will get exactly a 50/50 split between the two possible output values. Instead, there are fluctuations about this ideal distribution. You can call `run_circuit` multiple times to see the variance in the ouput.
#
# +
# Cell 10
def run_circuit(qc):
backend = Aer.get_backend('qasm_simulator') # we choose the simulator as our backend
result = execute(qc, backend, shots = 1000).result() # we run the simulation
counts = result.get_counts() # we get the counts
return counts
counts = run_circuit(qc)
print(counts)
plot_histogram(counts) # let us plot a histogram to see the possible outcomes and corresponding probabilities
# -
# ### II.iii) You are given the quantum circuit described in the function below. Swap the states of the first and the second qubit.
# This should be your final state: <img src="stateIIiii.png" width="300">
# +
# Cell 11
def create_circuit7():
qc = QuantumCircuit(2)
qc.rx(np.pi/3,0)
qc.x(1)
return qc
qc = create_circuit7()
#
#
# FILL YOUR CODE IN HERE
#
#
qc.swap(0,1)
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check7(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# -
# ### II.iv) Write a program from scratch that creates the GHZ state (on three qubits), $\vert \text{GHZ}\rangle = \frac{1}{\sqrt{2}} \left(|000\rangle + |111 \rangle \right)$, performs a measurement with 2000 shots, and returns the counts. <img src="ghz.png" width="300">
# If you want to track the state as it is evolving, you could use the circuit widget from above for three qubits, i.e., `minicomposer(3, dirac=True, qsphere=True)`. For how to get the counts of a measurement, look at the code in *Cell 9* and *Cell 10*.
# +
# Cell 12
#
#
# FILL YOUR CODE IN HERE
#
#
qc = QuantumCircuit(3,3)
qc.h(0)
qc.cx(0,1)
qc.cx(0,2)
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
qc.measure(0, 0) # we perform a measurement on qubit q_0 and store the information on the classical bit c_0
qc.measure(1, 1) # we perform a measurement on qubit q_1 and store the information on the classical bit c_1
qc.measure(2, 2)
qc.draw(output='mpl') # we draw the circuit
backend = Aer.get_backend('qasm_simulator') # we choose the simulator as our backend
result = execute(qc, backend, shots = 2000).result() # we run the simulation
counts = result.get_counts() # we get the counts
print(counts)
check8(counts)
plot_histogram(counts)
# -
# Congratulations for finishing this introduction to Qiskit! Once you've reached all 8 points, the solution string will be displayed. You need to copy and paste that string on the IBM Quantum Challenge page to complete the exercise and track your progress.
#
# Now that you have created and run your first quantum circuits, you are ready for the next exercise, where we will make use of the actual hardware and learn how to reduce the noise in the outputs.
| IBMquantumchallenge2020/Challenge1_BasicQuantumCircuits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import libraries
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
# load the train and test dataset
train = pd.read_csv("train_u6lujuX_CVtuZ9i.csv")
# make a copy of original data
train_original = train.copy()
# take a look at the top 5 rows of the train set, notice the column "Loan_Status"
train.head()
# show the shape of the dataset i.e. no of rows, no of columns
train.shape
# show the data types for each column of the train set
train.dtypes
# concise summary of the dataset, info about index dtype, column dtypes, non-null values and memory usage
train.info()
# ## Hypothesis Genereration
### Data Visualization libraries
categorical_columns = ['Gender', 'Married', 'Dependents', 'Education', 'Self_Employed', 'Property_Area','Credit_History','Loan_Amount_Term']
fig,axes = plt.subplots(4,2,figsize=(12,15))
for idx,cat_col in enumerate(categorical_columns):
row,col = idx//2,idx%2
sns.countplot(x=cat_col,data=train,hue='Loan_Status',ax=axes[row,col])
plt.subplots_adjust(hspace=1)
# +
train['Total_Income'] = train['ApplicantIncome'] + train['CoapplicantIncome']
bins = [0,2500,4000,6000,81000]
group = ['Very Low','Low','Average', 'High']
train['Total_Income_bin'] = pd.cut(train['Total_Income'],bins,labels=group)
Total_Income_bin = pd.crosstab(train['Total_Income_bin'],train['Loan_Status'])
Total_Income_bin.div(Total_Income_bin.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True)
plt.xlabel('Total_Income')
P = plt.ylabel('Percentage')
bins = [0,100,200,700]
group = ['Low','Average','High']
train['LoanAmount_bin'] = pd.cut(train['LoanAmount'],bins,labels=group)
LoanAmount_bin = pd.crosstab(train['LoanAmount_bin'],train['Loan_Status'])
LoanAmount_bin.div(LoanAmount_bin.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True)
plt.xlabel('LoanAmount')
P = plt.ylabel('Percentage')
# -
# ### From the above graphs, Hypothesis generation
# - Men have higher rate of receiving a loan than women. <br>
# - Married people have a higher shot at getting loan than unmarried people. <br>
# - As the number of dependents increase the change of you getting a loan decreases. <br>
# - Higher the Education Level i.e. graduate level higher is the rate of approval of loan. <br>
# - People with jobs have an easier time getting a loan approved than self - employed people. <br>
# - While semiurban has the highes approval rate, the rejection rate in all three of the categories is comparitively the same. <br>
# - Applicants who have repayed their previous debts should have higher chances of loan approval. <br>
# - Majority of the loans taken are for 360 months. <br>
# - High income will have more chances of loan approval. <br>
# - The chances of loan approval will be high when the loan amount is less. <br>
train= train.drop([ 'LoanAmount_bin', 'Total_Income_bin','Total_Income'], axis=1)
# replacing 3+ in Dependents variable with 3 for both train and test set
train['Dependents'].replace('3+', 3, inplace=True)
# replacing Y and N in Loan_Status variable with 1 and 0 respectively
train['Loan_Status'].replace('N', 0, inplace=True)
train['Loan_Status'].replace('Y', 1, inplace=True)
train.head()
# # Data Pre-processing
#
# ### Missing value imputation
# check for missing values
train.isnull().sum()
# +
def isNaN(string):
return str(string) != string
# Replace categorical missing values in train and test with the mode
train['Gender'] = train['Gender'].replace(np.nan, train['Gender'].mode()[0], regex=True)
train['Married'] = train['Married'].replace(np.nan, train['Married'].mode()[0], regex=True)
train['Dependents'] = train['Dependents'].replace(np.nan, train['Dependents'].mode()[0], regex=True)
train['Self_Employed'] = train['Self_Employed'].replace(np.nan, train['Self_Employed'].mode()[0], regex=True)
train['Credit_History'] = train['Credit_History'].replace(np.nan, train['Credit_History'].mode()[0], regex=True)
# -
train.isnull().sum()
# +
# Replace continuous values with the median for train
median_loanAmount = train['LoanAmount'].median()
median_loanAmount
# Replace continuous values with the median for test
train['LoanAmount'] = train['LoanAmount'].replace(np.nan, median_loanAmount, regex=True)
train['LoanAmount'].fillna(median_loanAmount, inplace=True)
# Replace continuous values with the median for train
train['Loan_Amount_Term'].value_counts()
train['Loan_Amount_Term'].fillna(360, inplace=True)
# -
train.isnull().sum()
print(train.apply(lambda x: len(x.unique())))
# ## Feature Engineering
#
# Based on the domain knowledge, we can come up with new features that might affect the target variable. We will create the following three new features:
#
# - <div style="text-align: justify"> <b>Total Income</b> - As discussed during bivariate analysis we will combine the Applicant Income and Coapplicant Income. If the total income is high, chances of loan approval might also be high. </div>
# - <div style="text-align: justify"> <b>Equated Monthly Installment</b> - EMI is the monthly amount to be paid by the applicant to repay the loan. Idea behind making this variable is that people who have high EMI’s might find it difficult to pay back the loan. We can calculate the EMI by taking the ratio of loan amount with respect to loan amount term. </div>
# - <div style="text-align: justify"> <b>Balance Income</b> - This is the income left after the EMI has been paid. Idea behind creating this variable is that if this value is high, the chances are high that a person will repay the loan and hence increasing the chances of loan approval. </div>
#calculate total income
train['Total_Income']=train['ApplicantIncome'] + train['CoapplicantIncome']
# create EMI feature
train['EMI'] = train['LoanAmount'] / train['Loan_Amount_Term']
# create new "Balance Income" variable
train['Balance Income'] = train['Total_Income'] - (train['EMI']*1000) # Multiply with 1000 to make the units equal
train.head()
# drop Loan_ID
train = train.drop(['Loan_ID','ApplicantIncome','CoapplicantIncome','Loan_Amount_Term'], axis=1)
# adding dummies to the dataset
train = pd.get_dummies(train)
train.head()
# drop "Loan_Status" and assign it to target variable
X = train.drop('Loan_Status', 1)
y = train.Loan_Status
# ## Split dataset into test and train
# split the data into train and cross validation set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=42)
# ## We will build the following models in this section.
#
# - Logistic Regression
# - Decision Tree
# - Random Forest
# - Naive Baiyes
# - Support Vector Machine
# # Logistic Regression
model = LogisticRegression()
model.fit(X_train, y_train)
# make prediction
predictions = model.predict(X_test)
#accuracy
score = model.score(X_test, y_test)
print(score*100)
# +
cm = confusion_matrix(y_test, model.predict(X_test))
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(cm)
ax.grid(False)
ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.yaxis.set(ticks=(0, 1), ticklabels=('Actual 0s', 'Actual 1s'))
ax.set_ylim(1.5, -0.5)
for i in range(2):
for j in range(2):
ax.text(j, i, cm[i, j], ha='center', va='center', color='red')
plt.show()
# -
# ### with K fold cross validation with 10 folds
model = LogisticRegression(random_state=1)
accuracy = cross_val_score(model, X, y, scoring='accuracy', cv = 10)
print(accuracy)
#get the mean of each fold
print("Accuracy of Model with Cross Validation is:",accuracy.mean() * 100)
# # Decision Tree
# import library
from sklearn import tree
model = tree.DecisionTreeClassifier(random_state=1)
model.fit(X_train, y_train)
# make prediction
predictions = model.predict(X_test)
#accuracy
score = model.score(X_test, y_test)
print(score*100)
# +
cm = confusion_matrix(y_test, model.predict(X_test))
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(cm)
ax.grid(False)
ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.yaxis.set(ticks=(0, 1), ticklabels=('Actual 0s', 'Actual 1s'))
ax.set_ylim(1.5, -0.5)
for i in range(2):
for j in range(2):
ax.text(j, i, cm[i, j], ha='center', va='center', color='red')
plt.show()
# -
# ### with K fold cross validation with 10 folds
model = tree.DecisionTreeClassifier()
accuracy = cross_val_score(model, X, y, scoring='accuracy', cv = 10)
print(accuracy)
#get the mean of each fold
print("Accuracy of Model with Cross Validation is:",accuracy.mean() * 100)
# # Random Forest
# import library
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(random_state=1)
model.fit(X_train, y_train)
# make prediction
predictions = model.predict(X_test)
#accuracy
score = model.score(X_test, y_test)
print(score*100)
# +
cm = confusion_matrix(y_test, model.predict(X_test))
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(cm)
ax.grid(False)
ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.yaxis.set(ticks=(0, 1), ticklabels=('Actual 0s', 'Actual 1s'))
ax.set_ylim(1.5, -0.5)
for i in range(2):
for j in range(2):
ax.text(j, i, cm[i, j], ha='center', va='center', color='red')
plt.show()
# -
# ### with K fold cross validation with 10 folds
model = RandomForestClassifier(random_state=42, max_depth=10, n_estimators=10)
accuracy = cross_val_score(model, X, y, scoring='accuracy', cv = 10)
print(accuracy)
#get the mean of each fold
print("Accuracy of Model with Cross Validation is:",accuracy.mean() * 100)
# # Naive Bayes
# import library
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train, y_train)
# make prediction
predictions = model.predict(X_test)
#accuracy
score = model.score(X_test, y_test)
print(score*100)
# +
cm = confusion_matrix(y_test, model.predict(X_test))
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(cm)
ax.grid(False)
ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.yaxis.set(ticks=(0, 1), ticklabels=('Actual 0s', 'Actual 1s'))
ax.set_ylim(1.5, -0.5)
for i in range(2):
for j in range(2):
ax.text(j, i, cm[i, j], ha='center', va='center', color='red')
plt.show()
# -
# ### with K fold cross validation with 10 folds
model = GaussianNB()
accuracy = cross_val_score(model, X, y, scoring='accuracy', cv = 10)
print(accuracy)
#get the mean of each fold
print("Accuracy of Model with Cross Validation is:",accuracy.mean() * 100)
# # Support Vector Machine
# import library
from sklearn import svm
model = svm.SVC()
model.fit(X_train, y_train)
# make prediction
predictions = model.predict(X_test)
#accuracy
score = model.score(X_test, y_test)
print(score*100)
# +
cm = confusion_matrix(y_test, model.predict(X_test))
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(cm)
ax.grid(False)
ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.yaxis.set(ticks=(0, 1), ticklabels=('Actual 0s', 'Actual 1s'))
ax.set_ylim(1.5, -0.5)
for i in range(2):
for j in range(2):
ax.text(j, i, cm[i, j], ha='center', va='center', color='red')
plt.show()
# -
# ### with K fold cross validation with 10 folds
model = svm.SVC()
accuracy = cross_val_score(model, X, y, scoring='accuracy', cv = 10)
print(accuracy)
#get the mean of each fold
print("Accuracy of Model with Cross Validation is:",accuracy.mean() * 100)
| LoanPrediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from kafka import KafkaConsumer
import matplotlib.pyplot as plt
import json
import threading
consumer = KafkaConsumer('bigDataMeetingOutput',
group_id='bigDataMeetingOutput',
bootstrap_servers=['localhost:9092'],
)
print("consumer started ...")
x = {}
def plot():
global x
for message in consumer:
x[json.loads((message.value).decode("utf-8"))["country"]] = json.loads((message.value).decode("utf-8"))["country_count"]
plot_thread = threading.Thread(target=plot)
plot_thread.start()
try:
fig = plt.figure(figsize = (12, 6))
x = dict(sorted(x.items(), key=lambda item: item[1], reverse=True))
plt.bar([*x.keys()][:], [*x.values()][:], width = 0.5)
plt.xlabel("country")
plt.ylabel("No. of Big data Meetup")
plt.title("Big data Meetup Events events scheduled in each country")
plt.show()
except:
print(f"atleast 10 data needed but {len(x)} data is there")
| Source Code/bigDataMeetup/bigDataMeetupOutput.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Additional training functions
# [`train`](/train.html#train) provides a number of extension methods that are added to [`Learner`](/basic_train.html#Learner) (see below for a list and details), along with three simple callbacks:
#
# - [`ShowGraph`](/train.html#ShowGraph)
# - [`GradientClipping`](/train.html#GradientClipping)
# - [`BnFreeze`](/train.html#BnFreeze)
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai.train import *
from fastai.vision import *
from fastai import *
# -
# ## [`Learner`](/basic_train.html#Learner) extension methods
# These methods are automatically added to all [`Learner`](/basic_train.html#Learner) objects created after importing this module. They provide convenient access to a number of callbacks, without requiring them to be manually created.
# + hide_input=true
show_doc(fit_one_cycle)
# -
# Fit a model with 1cycle training. See [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler) for details.
# + hide_input=true
show_doc(lr_find)
# -
# See [`LRFinder`](/callbacks.lr_finder.html#LRFinder) for details.
# + hide_input=true
show_doc(to_fp16)
# -
# See [`MixedPrecision`](/callbacks.fp16.html#MixedPrecision) for details.
# + hide_input=true
show_doc(mixup)
# -
# See [`MixUpCallback`](/callbacks.mixup.html#MixUpCallback) for more details.
# A last extension method comes from the module tta.
# + hide_input=true
show_doc(Learner.TTA, full_name='TTA')
# -
# Applies Test Time Augmentation to `learn` on the dataset `ds_type`. We take the average of our regular predictions (with a weight `beta`) with the average of predictions obtained thourh augmented versions of the training set (with a weight `1-beta`). The transforms decided for the training set are applied with a few changes `scale` controls the scale for zoom (which isn't random), the cropping isn't random but we make sure to get the four corners of the image. Flipping isn't random but applied once on each of those corner images (so that makes 8 augmented versions total).
# We'll show examples below using our MNIST sample.
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
# + hide_input=true
show_doc(ShowGraph)
# -
# ```python
# learn = create_cnn(data, models.resnet18, metrics=accuracy, callback_fns=ShowGraph)
# learn.fit(3)
# ```
# 
# + hide_input=true
show_doc(ShowGraph.on_epoch_end, doc_string=False)
# -
# If we have `last_metrics`, plot them in `self.pbar`. Set the size of the graph with `n_epochs`.
# + hide_input=true
show_doc(GradientClipping)
# -
# Clips gradient at a maximum absolute value of `clip` during training. For instance:
learn = create_cnn(data, models.resnet18, metrics=accuracy,
callback_fns=partial(GradientClipping, clip=0.1))
learn.fit(1)
# + hide_input=true
show_doc(GradientClipping.on_backward_end, doc_string=False)
# -
# Clip the gradients after they are computed but before the optimizer step.
# + hide_input=true
show_doc(BnFreeze)
# -
# For batchnorm layers where `requires_grad==False`, you generally don't want to update their moving average statistics, in order to avoid the model's statistics getting out of sync with its pre-trained weights. You can add this callback to automate this freezing of statistics (internally, it calls `eval` on these layers).
learn = create_cnn(data, models.resnet18, metrics=accuracy, callback_fns=BnFreeze)
learn.fit(1)
# + hide_input=true
show_doc(BnFreeze.on_epoch_begin, doc_string=False)
# -
# Set back the batchnorm layers on `eval` mode after the model has been set to [`train`](/train.html#train).
# ## Undocumented Methods - Methods moved below this line will intentionally be hidden
# + hide_input=true
show_doc(one_cycle_scheduler)
| docs_src/train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import libraries here; add more as necessary
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
df2019.describe()
df2019.hist()
sns.heatmap(df2019.corr(), annot = True, fmt = '.2f')
df2019 = pd.read_csv('./2019survey_results_public.csv', header = 0)
df2019.head()
df2019_rows, df2019_cols = df2019.shape
df2019_col_names = df2019.columns
print(df2019_col_names)
# +
#schema2019 = pd.read_csv('./2019survey_results_schema.csv', header = 0)
#schema2019.head()
# -
print(df2019.shape)
# + jupyter={"outputs_hidden": true}
for col in df2019.columns:
print(df2019[col].name, df2019[col].unique())
print('-----------------------------------------------------')
# -
def assess_missing_col(df):
df_rows, df_cols = df.shape
missing_num = pd.Series(df.isnull().sum(), name = 'Number of Missing')
#all columns
missing_per = pd.Series(missing_num/(df_rows)*100, name = '% NaN Missing')
#only columns with missing data
missing_data = pd.Series(missing_num[missing_num > 0]/df_rows*100, name = '% NaN Missing')
missing_data.sort_values(inplace = True)
print(missing_data)
plt.hist(missing_data, bins = 50)
plt.xlabel('Nan % in a column (%)')
plt.ylabel('Counts')
#plt.title('Histogram of missing value counts for each column')
plt.grid(True)
plt.minorticks_on()
plt.grid(b=True, which='minor', alpha=0.2)
plt.show()
return missing_data, plt.show()
missing_data, plot = assess_missing_col(df2019)
#Investigate missing data using different thresholds of %NaN missing.
def investigate_nan_threshold(df, interval, start):
'''
This function finds how many columns have more than a certain percentage
of data missing.
INPUTS:
start - the initial threshold percentage of data missing to be analyzed
interval - the amount of increase in the analysis threshold
if the previous threshold has at least 1 column remaining
OUTPUTS:
Prints the names of the columns that have more than the threshold % of
data missing as well as the current threshold.
'''
n = start
df_rows, df_cols = df.shape
missing_list = [1]
while len(missing_list) > 0:
missing_list = [col for col in df.columns if (df[col].isnull().sum()/df_rows)*100 > n]
if len(missing_list) > 0:
print('There are {} columns with more than {}% of data missing.'.format(len(missing_list), n))
print(missing_list)
print('--------------------------------------')
n = n+interval
else:
break
# + active=""
# investigate_nan_threshold(df2019, 10, 5)
# -
#Visualize all columns.
missing_data.plot(kind='barh', figsize = (7,15))
plt.xlabel('Nan % in a column (%)')
plt.ylabel('Feature')
#plt.title('Bar graph of missing value counts')
plt.grid(True)
plt.show()
#function for dropping all columns above a certain % threshold and
#returns it as a new df called df_dropped
def drop_missing_cols(df, threshold):
most_missing_cols = list(df.columns[df.isnull().mean()*100 > threshold])
df_dropped = df.copy()
for col in most_missing_cols:
df_dropped.drop(col, axis = 1, inplace = True)
return df_dropped
df2019_dropped = drop_missing_cols(df2019, 40)
df2019_dropped.head()
#currently useless
df2019_dropped['JobSat'].isnull().mean()
df2019['ConvertedComp'].hist(bins = 50, figsize = (10, 6))
# +
df2019_convertedcomp = df2019.groupby(['ConvertedComp']).mean()
#ConvertedComp 55823 non-null float64
#WorkWeekHrs
print(df2019_convertedcomp)
# -
#the .mean() feature must be numerical duh
df2019.groupby(['Hobbyist']).mean()['JobSat']
df2019.groupby(['Hobbyist']).mean()['YearsCode'].sort_values()
#only works for simple, mutually exclusive categories
def evaluate_col(df, col, plot_type):
col_num = df[df[col].isnull() == 0].shape[0]
col_vals = pd.Series(df[col].value_counts())
print(col_vals)
(col_vals/col_num*100).plot(kind = plot_type)
evaluate_col(df2019, 'UndergradMajor', 'bar')
def eval_complex_col(df, col, plot_type):
col_num = df[df[col].isnull() == 0].shape[0]
col_df = df[col].value_counts().reset_index()
col_df.rename(columns={'index': col, col:'count'}, inplace = True)
col_series = pd.Series(col_df[col].unique()).dropna()
clean_list = col_series.str.split(pat = ';').tolist()
flat_list = []
for sublist in clean_list:
for item in sublist:
flat_list.append(item)
clean_series = pd.DataFrame(flat_list)
col_vals = clean_series[0].unique()
cat_count = clean_series[0].value_counts()
print('Unique Categories: ', col_vals)
print(cat_count)
(cat_count/col_num*100).plot(kind = plot_type, figsize = (7,10))
plt.xlabel('Proportion (%)')
plt.ylabel(col)
plt.grid(True)
plt.show()
'''
'''
eval_complex_col(df2019, 'LanguageWorkedWith', 'bar')
eval_complex_col(df2019, 'PlatformWorkedWith', 'bar')
eval_complex_col(df2019, 'LastInt', 'bar')
eval_complex_col(df2019, '', 'bar')
evaluate_col(df2019, 'SocialMedia', 'bar')
evaluate_col(df2019, 'EdLevel', 'bar')
evaluate_col(df2019, 'Gender', 'bar')
evaluate_col(df2019, 'CareerSat', 'bar')
evaluate_col(df2019, 'JobSat', 'bar')
evaluate_col(df2019, 'Ethnicity', 'pie')
evaluate_col(df2019, 'WorkWeekHrs', 'hist')
eval_complex_col(df2019, 'WebFrameWorkedWith', 'bar')
eval_complex_col(df2019, 'MiscTechWorkedWith', 'bar')
evaluate_col(df2019, 'Student', 'pie')
eval_complex_col(df2019, 'DevType', 'bar')
# # Questions
# is educational level related to salary?
# What are the biggest factors relating to salary?
#
# What languages, platforms, etc are people using?
# What languages are people likely to learn together?
#
#
#
# # Variables of interest:
# ConvertedComp - annual compensation
# WorkWeekHrs - hours/week worked
# LanguageWorkedWith
# DatabaseWorkedWith
# PlatformWorkedWith
# WebFrameWorkedWith
# MiscTechWorkedWith
# DevEnviron
# OpSys
#
# LastInt - "In your most recent successful job interview (resulting in a job offer), you were asked to... (check all that apply)"
#
# JobSat
# CareerSat
# YearsCodePro - How many years have you coded professionally (as a part of your work)?
# DevType
# OrgSize
# EduOther
# UndergradMajor
# EdLevel
# Country
# Age
# Gender
# Ethnicity
#
for x in schema2019['QuestionText']:
print(x)
# Randomized respondent ID number (not in order of survey response time)
# Which of the following options best describes you today? Here, by "developer" we mean "someone who writes code."
# Do you code as a hobby?
# How often do you contribute to open source?
# How do you feel about the quality of open source software (OSS)?
#
#
# Which of the following best describes your current employment status?
# In which country do you currently reside?
#
#
# Are you currently enrolled in a formal, degree-granting college or university program?
# Which of the following best describes the highest level of formal education that you’ve completed?
#
#
# What was your main or most important field of study?
# Which of the following types of non-degree education have you used or participated in? Please select all that apply.
# Approximately how many people are employed by the company or organization you work for?
# Which of the following describe you? Please select all that apply.
# Including any education, how many years have you been coding?
#
# At what age did you write your first line of code or program? (E.g., webpage, Hello World, Scratch project)
# How many years have you coded professionally (as a part of your work)?
#
#
# Overall, how satisfied are you with your career thus far?
# How satisfied are you with your current job? (If you work multiple jobs, answer for the one you spend the most hours on.)
#
#
# How confident are you that your manager knows what they’re doing?
# Do you believe that you need to be a manager to make more money?
# Do you want to become a manager yourself in the future?
#
#
# Which of the following best describes your current job-seeking status?
# When was the last time that you took a job with a new employer?
# In your most recent successful job interview (resulting in a job offer), you were asked to... (check all that apply)
# Have you ever been asked to solve FizzBuzz in an interview?
# Imagine that you are deciding between two job offers with the same compensation, benefits, and location. Of the following factors, which 3 are MOST important to you?
# Think back to the last time you updated your resumé, CV, or an online profile on a job site. What is the PRIMARY reason that you did so?
#
#
# Which currency do you use day-to-day? If your answer is complicated, please pick the one you're most comfortable estimating in.
# Which currency do you use day-to-day? If your answer is complicated, please pick the one you're most comfortable estimating in.
#
# What is your current total compensation (salary, bonuses, and perks, before taxes and deductions), in `CurrencySymbol`? Please enter a whole number in the box below, without any punctuation. If you are paid hourly, please estimate an equivalent weekly, monthly, or yearly salary. If you prefer not to answer, please leave the box empty.
# Is that compensation weekly, monthly, or yearly?
#
# Salary converted to annual USD salaries using the exchange rate on 2019-02-01, assuming 12 working months and 50 working weeks.
#
#
# On average, how many hours per week do you work?
# How structured or planned is your work?
#
#
# Of these options, what are your greatest challenges to productivity as a developer? Select up to 3:
# How often do you work remotely?
# Where would you prefer to work?
# For the specific work you do, and the years of experience you have, how do you rate your own level of competence?
# Do you review code as part of your work?
# On average, how many hours per week do you spend on code review?
#
#
#
# Does your company regularly employ unit tests in the development of their products?
# How does your company make decisions about purchasing new technology (cloud, AI, IoT, databases)?
# What level of influence do you, personally, have over new technology purchases at your organization?
#
#
# Which of the following programming, scripting, and markup languages have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the language and want to continue to do so, please check both boxes in that row.)
# Which of the following programming, scripting, and markup languages have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the language and want to continue to do so, please check both boxes in that row.)
# Which of the following database environments have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the database and want to continue to do so, please check both boxes in that row.)
# Which of the following database environments have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the database and want to continue to do so, please check both boxes in that row.)
# Which of the following platforms have you done extensive development work for over the past year? (If you both developed for the platform and want to continue to do so, please check both boxes in that row.)
# Which of the following platforms have you done extensive development work for over the past year? (If you both developed for the platform and want to continue to do so, please check both boxes in that row.)
# Which of the following web frameworks have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the framework and want to continue to do so, please check both boxes in that row.)
# Which of the following web frameworks have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the framework and want to continue to do so, please check both boxes in that row.)
# Which of the following other frameworks, libraries, and tools have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the technology and want to continue to do so, please check both boxes in that row.)
# Which of the following other frameworks, libraries, and tools have you done extensive development work in over the past year, and which do you want to work in over the next year? (If you both worked with the technology and want to continue to do so, please check both boxes in that row.)
# Which development environment(s) do you use regularly? Please check all that apply.
# What is the primary operating system in which you work?
#
#
# How do you use containers (Docker, Open Container Initiative (OCI), etc.)?
# How is your organization thinking about or implementing blockchain technology?
# Blockchain / cryptocurrency technology is primarily:
#
#
# Do you think people born today will have a better life than their parents?
# Are you the "IT support person" for your family?
#
#
# Have you tried turning it off and on again?
# What social media site do you use the most?
# Do you prefer online chat or IRL conversations?
# What do you call it?
#
#
# To the best of your memory, when did you first visit Stack Overflow?
# How frequently would you say you visit Stack Overflow?
# I visit Stack Overflow to... (check all that apply)
# On average, how many times a week do you find (and use) an answer on Stack Overflow?
# Think back to the last time you solved a coding problem using Stack Overflow, as well as the last time you solved a problem using a different resource. Which was faster?
# About how much time did you save? If you're not sure, please use your best estimate.
#
# Do you have a Stack Overflow account?
# How frequently would you say you participate in Q&A on Stack Overflow? By participate we mean ask, answer, vote for, or comment on questions.
# Have you ever used or visited Stack Overflow Jobs?
# Have you ever used Stack Overflow for Enterprise or Stack Overflow for Teams?
# Do you consider yourself a member of the Stack Overflow community?
# Compared to last year, how welcome do you feel on Stack Overflow?
# Would you like to see any of the following on Stack Overflow? Check all that apply.
#
#
# What is your age (in years)? If you prefer not to answer, you may leave this question blank.
# Which of the following do you currently identify as? Please select all that apply. If you prefer not to answer, you may leave this question blank.
# Do you identify as transgender?
# Which of the following do you currently identify as? Please select all that apply. If you prefer not to answer, you may leave this question blank.
# Which of the following do you identify as? Please check all that apply. If you prefer not to answer, you may leave this question blank.
# Do you have any dependents (e.g., children, elders, or others) that you care for?
#
| .ipynb_checkpoints/SO_survey CRISP-DM-Copy1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["header"]
# <table width="100%">
# <tr style="border-bottom:solid 2pt #009EE3">
# <td style="text-align:left" width="10%">
# <a href="sampling_rate_and_aliasing.dwipynb" download><img src="../../images/icons/download.png"></a>
# </td>
# <td style="text-align:left" width="10%">
# <a href="https://mybinder.org/v2/gh/biosignalsnotebooks/biosignalsnotebooks/master?filepath=header_footer%2Fbiosignalsnotebooks_environment%2Fcategories%2FRecord%2Fsampling_rate_and_aliasing.dwipynb" target="_blank"><img src="../../images/icons/program.png" title="Be creative and test your solutions !"></a>
# </td>
# <td></td>
# <td style="text-align:left" width="5%">
# <a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/home.png"></a>
# </td>
# <td style="text-align:left" width="5%">
# <a href="../MainFiles/contacts.ipynb"><img src="../../images/icons/contacts.png"></a>
# </td>
# <td style="text-align:left" width="5%">
# <a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank"><img src="../../images/icons/github.png"></a>
# </td>
# <td style="border-left:solid 2pt #009EE3" width="15%">
# <img src="../../images/ost_logo.png">
# </td>
# </tr>
# </table>
# + [markdown] tags=["intro_info_title"]
# <link rel="stylesheet" href="../../styles/theme_style.css">
# <!--link rel="stylesheet" href="../../styles/header_style.css"-->
# <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
#
# <table width="100%">
# <tr>
# <td id="image_td" width="15%" class="header_image_color_2"><div id="image_img"
# class="header_image_2"></div></td>
# <td class="header_text">Problems of low sampling rate (aliasing)</td>
# </tr>
# </table>
# + [markdown] tags=["intro_info_tags"]
# <div id="flex-container">
# <div id="diff_level" class="flex-item">
# <strong>Difficulty Level:</strong> <span class="fa fa-star checked"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# </div>
# <div id="tag" class="flex-item-tag">
# <span id="tag_list">
# <table id="tag_list_table">
# <tr>
# <td class="shield_left">Tags</td>
# <td class="shield_right" id="tags">record☁sampling rate☁problems</td>
# </tr>
# </table>
# </span>
# <!-- [OR] Visit https://img.shields.io in order to create a tag badge-->
# </div>
# </div>
# + [markdown] tags=["test"]
# All data needs to be acquired before the researcher start their processing stage.
#
# The success of the processing stage is deeply dependent on the quality of acquisition. For example, if the chosen sampling rate <a href="https://www.webopedia.com/TERM/S/sampling_rate.html" target="_blank"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> is not adequate for collecting a specific electrophysiological data, we may have an aliasing problem <a href="https://en.wikipedia.org/wiki/Aliasing" target="_blank"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> problem, which makes impossible to extract knowledge.
#
# Accordingly to the Nyquist Theorem <a href="https://en.wikipedia.org/wiki/Nyquist%2DShannon_sampling_theorem" target="_blank"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>, to ensure that each signal (EMG, ECG, EDA...) is acquired correctly (avoiding aliasing) the sampling rate should be at least the double of the maximum frequency component present in the signal, being this threshold known as "Nyquist Rate" <a href="https://en.wikipedia.org/wiki/Nyquist_rate" target="_blank"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>.
#
# This maximum frequency corresponds to the last power spectrum component (after application of the Fourier Transform) with relevant information.
#
# In the following steps it will be demonstrated how the sampling rate choice affect signal morphology.
#
# -
# <hr>
# + [markdown] tags=[]
# <p class="steps">0 - The available sampling rates of <i>Plux</i> acquisition systems lie between 10 and 4000 Hz, accordingly to the firmware version</p>
# <img src="../../images/acquire/sampling_freqs.png">
# + [markdown] tags=[]
# In the following images we can see the effect of sampling rate choice. ECG acquisitions at 10, 100 and 1000 Hz have been done.
# + [markdown] tags=[]
# <p class="steps">1 - Evolution of the ECG acquisition according to the chosen sampling rate (<i>Available values on Plux devices</i>)</p>
# *As demonstrated in the following figure, for this range of sampling rate values, the differences in the signal morphology can be easily observed.*
# # + For the acquisition done at 10 Hz, we can't observe ECG complexes or the periodic behavior of cardiac signal.
# # + With an acquisition at 100 Hz, the aliasing problem was solved and, from that signal, all ECG structures are easily seen.
# # + Finally with 1000 Hz, the differences in relation to 100 Hz acquisition are more difficult to see, but for example we have a more "impulsive-like" R peak
# + tags=["hide_in"]
# OpenSignals Tools own package for loading and plotting the acquired data
import biosignalsnotebooks as bsnb
import biosignalsnotebooks.signal_samples as bsnb_ss
# Scientific package/function for interpolation purposes
from numpy import linspace
# Load of data
# [10 Hz]
data_10_hz = bsnb_ss.load_signal("ecg_20_sec_10_Hz")
# [100 Hz]
data_100_hz = bsnb_ss.load_signal("ecg_20_sec_100_Hz")
# [1000 Hz]
data_1000_hz = bsnb_ss.load_signal("ecg_20_sec_1000_Hz")
# The used device and channel is the same for the three acquisitions
mac_address = list(data_10_hz.keys())[0]
channel = list(data_10_hz[mac_address].keys())[0]
# Dictionary where the acquired data from the three acquisitions will be stored together
data_dict = {"10": {"data": data_10_hz[mac_address][channel]}, "100": {"data": data_100_hz[mac_address][channel]},
"1000": {"data": data_1000_hz[mac_address][channel]}}
# ========================== Generation of time axis in accordance with the sampling rate ======================================
# sample_rate in [10, 100, 1000] - Some of the available sample frequencies at Plux acquisition systems
for sample_rate in [1000, 100, 10]:
sample_rate_str = str(sample_rate)
nbr_samples = len(data_dict[sample_rate_str]["data"])
data_dict[sample_rate_str]["time"] = linspace(0, nbr_samples / sample_rate, nbr_samples)
bsnb.plot_sample_rate_compare(data_dict)
# -
# <i>For accessing the code instructions responsible for generating the previous figure,</i> <strong><span class="color2">biosignalsnotebooks</span></strong> <i>users can navigate through the invoked function by clicking on the "Download" button located in Notebook header or by exploring the Python implementation on our Python package <a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a></i>
# + [markdown] tags=[]
# Each electrophysiological signal has a characteristic Nyquist Rate, based on the highest informational component (the upper limit of signals pass-band).
#
# These reference values may be found in <a href="http://www.biosignalsplux.com/en/products/sensors" target="_blank">Sensors Page <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> and in the particular case of ECG the bandwidth is presented in <a href="http://www.biosignalsplux.com/en/ecg-electrocardiogram" target="_blank">ECG Sensor Page <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> (as demonstrated in the following screenshot).
# + [markdown] tags=[]
# <img src="../../images/acquire/signal_bandwidth.gif">
# + [markdown] tags=[]
# ECG Nyquist Rate will be near 200 Hz, taking into consideration the upper limit of the signal bandwidth (100 Hz).
# + [markdown] tags=["footer"]
# <hr>
# <table width="100%">
# <tr>
# <td style="border-right:solid 3px #009EE3" width="20%">
# <img src="../../images/ost_logo.png">
# </td>
# <td width="40%" style="text-align:left">
# <a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf" target="_blank">☌ Project Presentation</a>
# <br>
# <a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank">☌ GitHub Repository</a>
# <br>
# <a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">☌ How to install biosignalsnotebooks Python package ?</a>
# <br>
# <a href="../MainFiles/signal_samples.ipynb">☌ Signal Library</a>
# </td>
# <td width="40%" style="text-align:left">
# <a href="../MainFiles/biosignalsnotebooks.ipynb">☌ Notebook Categories</a>
# <br>
# <a href="../MainFiles/by_diff.ipynb">☌ Notebooks by Difficulty</a>
# <br>
# <a href="../MainFiles/by_signal_type.ipynb">☌ Notebooks by Signal Type</a>
# <br>
# <a href="../MainFiles/by_tag.ipynb">☌ Notebooks by Tag</a>
# </td>
# </tr>
# </table>
# + tags=["hide_both"]
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
# + tags=["hide_both"] language="html"
# <script>
# // AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
# require(
# ['base/js/namespace', 'jquery'],
# function(jupyter, $) {
# $(jupyter.events).on("kernel_ready.Kernel", function () {
# console.log("Auto-running all cells-below...");
# jupyter.actions.call('jupyter-notebook:run-all-cells-below');
# jupyter.actions.call('jupyter-notebook:save-notebook');
# });
# }
# );
# </script>
| notebookToHtml/biosignalsnotebooks_html/Categories/Record/sampling_rate_and_aliasing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + dc={"key": "4"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 1. Bitcoin and Cryptocurrencies: Full dataset, filtering, and reproducibility
# <p>Since the <a href="https://newfronttest.bitcoin.com/bitcoin.pdf">launch of Bitcoin in 2008</a>, hundreds of similar projects based on the blockchain technology have emerged. We call these cryptocurrencies (also coins or cryptos in the Internet slang). Some are extremely valuable nowadays, and others may have the potential to become extremely valuable in the future<sup>1</sup>. In fact, on the 6th of December of 2017, Bitcoin has a <a href="https://en.wikipedia.org/wiki/Market_capitalization">market capitalization</a> above $200 billion. </p>
# <p><center>
# <img src="https://assets.datacamp.com/production/project_82/img/bitcoint_market_cap_2017.png" style="width:500px"> <br>
# <em>The astonishing increase of Bitcoin market capitalization in 2017.</em></center></p>
# <p>*<sup>1</sup> <strong>WARNING</strong>: The cryptocurrency market is exceptionally volatile<sup>2</sup> and any money you put in might disappear into thin air. Cryptocurrencies mentioned here <strong>might be scams</strong> similar to <a href="https://en.wikipedia.org/wiki/Ponzi_scheme">Ponzi Schemes</a> or have many other issues (overvaluation, technical, etc.). <strong>Please do not mistake this for investment advice</strong>. *</p>
# <p><em><sup>2</sup> <strong>Update on March 2020</strong>: Well, it turned out to be volatile indeed :D</em></p>
# <p>That said, let's get to business. We will start with a CSV we conveniently downloaded on the 6th of December of 2017 using the coinmarketcap API (NOTE: The public API went private in 2020 and is no longer available) named <code>datasets/coinmarketcap_06122017.csv</code>. </p>
# + dc={"key": "4"} tags=["sample_code"]
# Importing pandas
import pandas as pd
# Importing matplotlib and setting aesthetics for plotting later.
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
plt.style.use('fivethirtyeight')
# Reading datasets/coinmarketcap_06122017.csv into pandas
df = pd.read_csv("datasets/coinmarketcap_06122017.csv")
# Selecting the 'id' and the 'market_cap_usd' columns
market_cap_raw = df[['id','market_cap_usd']]
# -
df
market_cap_raw.count()
df2021 = pd.read_csv("bitcoin.csv")
df2021
# + dc={"key": "11"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 2. Discard the cryptocurrencies without a market capitalization
# <p>Why do the <code>count()</code> for <code>id</code> and <code>market_cap_usd</code> differ above? It is because some cryptocurrencies listed in coinmarketcap.com have no known market capitalization, this is represented by <code>NaN</code> in the data, and <code>NaN</code>s are not counted by <code>count()</code>. These cryptocurrencies are of little interest to us in this analysis, so they are safe to remove.</p>
# + dc={"key": "11"} tags=["sample_code"]
# Filtering out rows without a market capitalization
cap = market_cap_raw.dropna()
# -
cap.count()
cap
# + dc={"key": "18"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 3. How big is Bitcoin compared with the rest of the cryptocurrencies?
# <p>At the time of writing, Bitcoin is under serious competition from other projects, but it is still dominant in market capitalization. Let's plot the market capitalization for the top 10 coins as a barplot to better visualize this.</p>
# + dc={"key": "18"} tags=["sample_code"]
#Declaring these now for later use in the plots
TOP_CAP_TITLE = 'Top 10 market capitalization'
TOP_CAP_YLABEL = '% of total cap'
# Selecting the first 10 rows and setting the index
cap10 = cap.head(10).set_index(cap.id[:10])
# Calculating market_cap_perc
cap10 = cap10.assign(market_cap_perc =
lambda x: (x.market_cap_usd / cap.market_cap_usd.sum()) * 100)
# Plotting the barplot with the title defined above
ax = cap10.plot.bar(x = 'id', y = 'market_cap_perc', title = TOP_CAP_TITLE)
# Annotating the y axis with the label defined above
ax.set_ylabel(TOP_CAP_YLABEL)
# + dc={"key": "25"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 4. Making the plot easier to read and more informative
# <p>While the plot above is informative enough, it can be improved. Bitcoin is too big, and the other coins are hard to distinguish because of this. Instead of the percentage, let's use a log<sup>10</sup> scale of the "raw" capitalization. Plus, let's use color to group similar coins and make the plot more informative<sup>1</sup>. </p>
# <p>For the colors rationale: bitcoin-cash and bitcoin-gold are forks of the bitcoin <a href="https://en.wikipedia.org/wiki/Blockchain">blockchain</a><sup>2</sup>. Ethereum and Cardano both offer Turing Complete <a href="https://en.wikipedia.org/wiki/Smart_contract">smart contracts</a>. Iota and Ripple are not minable. Dash, Litecoin, and Monero get their own color.</p>
# <p><sup>1</sup> <em>This coloring is a simplification. There are more differences and similarities that are not being represented here.</em></p>
# <p><sup>2</sup> <em>The bitcoin forks are actually <strong>very</strong> different, but it is out of scope to talk about them here. Please see the warning above and do your own research.</em></p>
# + dc={"key": "32"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 5. What is going on?! Volatility in cryptocurrencies
# <p>The cryptocurrencies market has been spectacularly volatile since the first exchange opened. This notebook didn't start with a big, bold warning for nothing. Let's explore this volatility a bit more! We will begin by selecting and plotting the 24 hours and 7 days percentage change, which we already have available.</p>
# + dc={"key": "32"} tags=["sample_code"]
# Selecting the id, percent_change_24h and percent_change_7d columns
volatility = df[['id', 'percent_change_24h', 'percent_change_7d']]
# Setting the index to 'id' and dropping all NaN rows
volatility = volatility.set_index('id').dropna()
# Sorting the DataFrame by percent_change_24h in ascending order
volatility = volatility.sort_values('percent_change_24h', ascending = True)
# Checking the first few rows
# ... YOUR CODE FOR TASK 6 ...
volatility.head()
# + dc={"key": "39"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 6. Well, we can already see that things are *a bit* crazy
# <p>It seems you can lose a lot of money quickly on cryptocurrencies. Let's plot the top 10 biggest gainers and top 10 losers in market capitalization.</p>
# + dc={"key": "39"} tags=["sample_code"]
#Defining a function with 2 parameters, the series to plot and the title
def top10_subplot(volatility_series, title):
# Making the subplot and the figure for two side by side plots
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 6))
# Plotting with pandas the barchart for the top 10 losers
ax = (volatility_series[:10].plot.bar(color = 'darkred', ax = axes[0]))
# Setting the figure's main title to the text passed as parameter
fig.suptitle(title)
# Setting the ylabel to '% change'
ax.set_ylabel('% change')
# Same as above, but for the top 10 winners
ax = (volatility_series[-10:].plot.bar(color = 'darkblue', ax = axes[1]))
# Returning this for good practice, might use later
return fig, ax
DTITLE = "24 hours top losers and winners"
# Calling the function above with the 24 hours period series and title DTITLE
fig, ax = top10_subplot(volatility.percent_change_24h, DTITLE)
# + dc={"key": "46"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 7. Ok, those are... interesting. Let's check the weekly Series too.
# <p>800% daily increase?! Why are we doing this tutorial and not buying random coins?<sup>1</sup></p>
# <p>After calming down, let's reuse the function defined above to see what is going weekly instead of daily.</p>
# <p><em><sup>1</sup> Please take a moment to understand the implications of the red plots on how much value some cryptocurrencies lose in such short periods of time</em></p>
# + dc={"key": "46"} tags=["sample_code"]
# Sorting in ascending order
volatility7d = volatility.sort_values('percent_change_7d', ascending = True)
WTITLE = "Weekly top losers and winners"
# Calling the top10_subplot function
fig, ax = top10_subplot(volatility7d.percent_change_7d, WTITLE)
# + dc={"key": "53"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 8. How small is small?
# <p>The names of the cryptocurrencies above are quite unknown, and there is a considerable fluctuation between the 1 and 7 days percentage changes. As with stocks, and many other financial products, the smaller the capitalization, the bigger the risk and reward. Smaller cryptocurrencies are less stable projects in general, and therefore even riskier investments than the bigger ones<sup>1</sup>. Let's classify our dataset based on Investopedia's capitalization <a href="https://www.investopedia.com/video/play/large-cap/">definitions</a> for company stocks. </p>
# <p><sup>1</sup> <em>Cryptocurrencies are a new asset class, so they are not directly comparable to stocks. Furthermore, there are no limits set in stone for what a "small" or "large" stock is. Finally, some investors argue that bitcoin is similar to gold, this would make them more comparable to a <a href="https://www.investopedia.com/terms/c/commodity.asp">commodity</a> instead.</em></p>
# + dc={"key": "53"} tags=["sample_code"]
# Selecting everything bigger than 10 billion
largecaps = cap.query('market_cap_usd > 10000000000')
# Printing out largecaps
largecaps
# + dc={"key": "60"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 9. Most coins are tiny
# <p>Note that many coins are not comparable to large companies in market cap, so let's divert from the original Investopedia definition by merging categories.</p>
# <p><em>This is all for now. Thanks for completing this project!</em></p>
# + dc={"key": "60"} tags=["sample_code"]
# Making a nice function for counting different marketcaps from the
# "cap" DataFrame. Returns an int.
# INSTRUCTORS NOTE: Since you made it to the end, consider it a gift :D
def capcount(query_string):
return cap.query(query_string).count().id
# Labels for the plot
LABELS = ["biggish", "micro", "nano"]
# Using capcount count the biggish cryptos
biggish = capcount('market_cap_usd > 300000000')
# Same as above for micro ...
micro = capcount('market_cap_usd > 50000000 & market_cap_usd < 300000000')
# ... and for nano
nano = capcount('market_cap_usd < 50000000')
# Making a list with the 3 counts
values = (biggish, micro, nano)
# Plotting them with matplotlib
plt.bar(range(len(values)), values, tick_label = LABELS)
# -
| Exploring the Bitcoin Cryptocurrency- Market -codecademy/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# This notebook shows a sample workflow for running hydrology simulations using the GSSHA rectangularly gridded simulator, supported by a suite of primarily open-source Python libraries. The workflow consists of:
#
# 1. Selecting parameters using widgets in a Jupyter notebook to control the model to simulate, including a watershed shape file.
# 2. Visualizing the watershed shape in a geographic context (projected into a suitable coordinate system and overlaid on map tiles from a web tile server).
# 3. If necessary, editing that watershed shape by hand and creating a new shape file with the edited result.
# 4. Selecting parameters to control the simulation, potentially overriding some selected earlier for the model creation (e.g. if running numerous conditions as a parameter sweep).
# 5. Visualizing and reviewing the inputs to the simulation.
# 6. Running the underlying simulation, collecting data on flood depth at each time point as well as the overall maximum flood depth per grid cell.
# 7. Visualizing the flood depth over time and the maximum flood depth.
# 8. Analyzing the simulation speed to help shape expectations for computational requirements for future runs.
#
# Each of these steps is configured directly in this notebook, and can thus easily be scripted or iterated as needed. The set of parameters and precisely how they are configured is still being improved, and it can likely be made into a better match to users' needs in this domain. This workflow relies on fast raster regridding added to [Datashader](http://datashader.org/user_guide/5_Rasters.html) and exposed via [HoloViews](http://holoviews.org) as part of the EarthSim project.
#
# The underlying environment needed to run this workflow is set up as described in the [README](https://github.com/ContinuumIO/EarthSim/blob/master/README.md), and though already functional will need to be greatly simplified to be more usable and maintainable in practice. The workflow currently relies on downloading data from external servers that can be slow to access from some parts of the internet, so you may see widely varying runtime speeds, especially the first time it is run.
# +
from datetime import datetime, timedelta
import os
import glob
import shutil
import param
import panel as pn
import numpy as np
import xarray as xr
import geoviews as gv
import holoviews as hv
import quest
import earthsim.gssha as esgssha
import earthsim.gssha.model as models
import cartopy.crs as ccrs
from panel.param import JSONInit
from earthsim.gssha import download_data, get_file_from_quest
from holoviews.streams import PolyEdit, BoxEdit, PointDraw, CDSStream
from holoviews.operation.datashader import regrid, shade
from earthsim.io import save_shapefile, open_gssha, get_ccrs
regrid.aggregator = 'max'
hv.extension('bokeh')
# %output holomap='scrubber' fps=2
# -
shutil.rmtree('./vicksburg_south',ignore_errors=True)
# ## Configure model parameters
# +
model_creator = esgssha.CreateGSSHAModel(name='Vicksburg South Model Creator',
mask_shapefile='../data/vicksburg_watershed/watershed_boundary.shp',
grid_cell_size=90)
pn.panel(model_creator.param, initializer=JSONInit())
# -
# ## Draw bounds to compute watershed
# Allows drawing a bounding box and adding points to serve as input to compute a watershed:
# %%opts Polygons [width=900 height=500] (fill_alpha=0 line_color='black')
# %%opts Points (size=10 color='red')
tiles = gv.WMTS('http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png',
crs=ccrs.PlateCarree(), extents=(-91, 32.2, -90.8, 32.4))
box_poly = hv.Polygons([])
points = hv.Points([])
box_stream = BoxEdit(source=box_poly)
point_stream = PointDraw(source=points)
tiles * box_poly * points
# +
if box_stream.element:
element = gv.operation.project(box_stream.element, projection=ccrs.PlateCarree())
xs, ys = element.array().T
bounds = (xs[0], ys[1], xs[2], ys[0])
print("BOUNDS", bounds)
if point_stream.element:
projected = gv.operation.project(point_stream.element, projection=ccrs.PlateCarree())
print("COORDINATE:", projected.iloc[0]['x'][0], projected.iloc[0]['y'][0])
# -
# ## Inspect and edit shapefile
#
# The plot below allows editing the shapefile using a set of tools. The controls for editing are as follows:
#
# * Double-clicking the polygon displays the vertices
# * After double-clicking the point tool is selected and vertices can be dragged around
# * By tapping on a vertex it can be selected, tapping in a new location while a single point is selected inserts a new vertex
# * Multiple points can be selected by holding shift and then tapping or using the box_select tool
# * Once multiple vertices are selected they can be deleted by selecting the point editing tool and pressing ``backspace``
# %%opts Shape [width=900 height=500 tools=['box_select']] (alpha=0.5)
mask_shape = gv.Shape.from_shapefile(model_creator.mask_shapefile)
tiles = gv.WMTS('http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png')
vertex_stream = PolyEdit(source=mask_shape)
tiles * mask_shape
# If any edits were made to the polygon in the plot above we save the ``watershed_boundary.shp`` back out and redisplay it to confirm our edits were applied correctly:
# %%opts Shape [width=600 height=400] (alpha=0.5)
if vertex_stream.data:
edited_shape_fname = './vicksburg_watershed_edited/watershed_boundary.shp'
dir_name = os.path.dirname(edited_shape_fname)
if not os.path.isdir(dir_name): os.makedirs(dir_name)
save_shapefile(vertex_stream.data, edited_shape_fname, model_creator.mask_shapefile)
model_creator.mask_shapefile = edited_shape_fname
mask_shape = gv.Shape.from_shapefile(edited_shape_fname)
mask_shape = mask_shape.opts() # Clear options
mask_shape
# ## Configure simulation parameters
# +
sim = esgssha.Simulation(name='Vicksburg South Simulation', simulation_duration=60*60,
rain_duration=30*60, model_creator=model_creator)
pn.panel(sim.param, initializer=JSONInit())
# -
# ## Create the model
#
# Note that the above code demonstrates how to collect user input, but it has not yet been connected to the remaining workflow, which uses code-based specification for the parameters.
if sim.model_creator.project_name not in quest.api.get_collections():
quest.api.new_collection(sim.model_creator.project_name)
pn.panel(sim.model_creator.param, initializer=JSONInit())
# +
# temporary workaround until workflow cleanup/parameterization is done
if sim.model_creator.project_name == 'test_philippines_small':
sim.model_creator.roughness = models.GriddedRoughnessTable(
land_use_grid=get_file_from_quest(sim.model_creator.project_name, sim.land_use_service, 'landuse', sim.model_creator.mask_shapefile),
land_use_to_roughness_table='./philippines_small/land_cover_glcf_modis.txt')
else:
sim.model_creator.roughness = models.GriddedRoughnessID(
land_use_grid=get_file_from_quest(sim.model_creator.project_name, sim.land_use_service, 'landuse', sim.model_creator.mask_shapefile),
land_use_grid_id=sim.land_use_grid_id)
sim.model_creator.elevation_grid_path = get_file_from_quest(sim.model_creator.project_name, sim.elevation_service, 'elevation', sim.model_creator.mask_shapefile)
# -
model = sim.model_creator()
# +
# add card for max depth
model.project_manager.setCard('FLOOD_GRID',
'{0}.fgd'.format(sim.model_creator.project_name),
add_quotes=True)
# Add time-based depth grids to simulation
"""
See: http://www.gsshawiki.com/Project_File:Output_Files_%E2%80%93_Required
Filename or folder to output MAP_TYPE maps of overland flow depth (m)
every MAP_FREQ minutes. If MAP_TYPE=0, then [value] is a folder name
and output files are called "value\depth.####.asc" **
"""
model.project_manager.setCard('DEPTH', '.', add_quotes=True)
model.project_manager.setCard('MAP_FREQ', '1')
# add event for simulation (optional)
"""
model.set_event(simulation_start=sim.simulation_start,
simulation_duration=timedelta(seconds=sim.simulation_duration),
rain_intensity=sim.rain_intensity,
rain_duration=timedelta(seconds=sim.rain_duration))
"""
# write to disk
model.write()
# -
# ## Review model inputs
# ### Load inputs to the simulation
# +
name = sim.model_creator.project_name
CRS = get_ccrs(os.path.join(name, name+'_prj.pro'))
roughness_arr = open_gssha(os.path.join(name,'roughness.idx'))
msk_arr = open_gssha(os.path.join(name, name+'.msk'))
ele_arr = open_gssha(os.path.join(name, name+'.ele'))
roughness = gv.Image(roughness_arr, crs=CRS, label='roughness.idx')
mask = gv.Image(msk_arr, crs=CRS, label='vicksburg_south.msk')
ele = gv.Image(ele_arr, crs=CRS, label='vicksburg_south.ele')
# -
# #### Shapefile vs. Mask
tiles * regrid(mask) * mask_shape
# #### Elevation
tiles * regrid(ele) * mask_shape
# #### Roughness
tiles * regrid(roughness) * mask_shape
# # Run Simulation
from gsshapy.modeling import GSSHAFramework
# +
# TODO: how does the info here relate to that set earlier?
# TODO: understand comment below
# assuming notebook is run from examples folder
project_path = os.path.join(sim.model_creator.project_base_directory, sim.model_creator.project_name)
gr = GSSHAFramework("gssha",
project_path,
"{0}.prj".format(sim.model_creator.project_name),
gssha_simulation_start=sim.simulation_start,
gssha_simulation_duration=timedelta(seconds=sim.simulation_duration),
# load_simulation_datetime=True, # use this if already set datetime params in project file
)
# http://www.gsshawiki.com/Model_Construction:Defining_a_uniform_precipitation_event
gr.event_manager.add_uniform_precip_event(sim.rain_intensity,
timedelta(seconds=sim.rain_duration))
gssha_event_directory = gr.run()
# -
# # Visualizing the outputs
# ### Load and visualize depths over time
# +
depth_nc = os.path.join(gssha_event_directory, 'depths.nc')
if not os.path.isfile(depth_nc):
# Load depth data files
depth_map = hv.HoloMap(kdims=['Minute'])
for fname in glob.glob(os.path.join(gssha_event_directory, 'depth.*.asc')):
depth_arr = open_gssha(fname)
minute = int(fname.split('.')[-2])
# NOTE: Due to precision issues not all empty cells match the NaN value properly, fix later
depth_arr.data[depth_arr.data==depth_arr.data[0,0]] = np.NaN
depth_map[minute] = hv.Image(depth_arr)
# Convert data to an xarray and save as NetCDF
arrays = []
for minute, img in depth_map.items():
ds = hv.Dataset(img)
arr = ds.data.z.assign_coords(minute=minute)
arrays.append(arr)
depths = xr.concat(arrays, 'minute')
depths.to_netcdf(depth_nc)
else:
depths = xr.open_dataset(depth_nc)
depth_ds = hv.Dataset(depths)
depth_ds.data
# -
# Now that we have a Dataset of depths we can convert it to a series of Images.
# %%opts Image [width=600 height=400 logz=True xaxis=None yaxis=None] (cmap='viridis') Histogram {+framewise}
regrid(depth_ds.to(hv.Image, ['x', 'y'])).redim.range(z=(0, 0.04)).hist(bin_range=(0, 0.04))
# We can also lay out the plots over time to allow for easier comparison.
# %%opts Image [width=300 height=300 logz=True xaxis=None yaxis=None] (cmap='viridis')
regrid(depth_ds.select(minute=range(10, 70, 10)).to(hv.Image, ['x', 'y']).redim.range(z=(0, 0.04))).layout().cols(3)
# ### Flood Grid Depth
#
# (Maximum flood depth over the course of the simulation)
# %%opts Image [width=600 height=400] (cmap='viridis')
fgd_arr = open_gssha(os.path.join(gssha_event_directory,'{0}.fgd'.format(sim.model_creator.project_name)))
fgd = gv.Image(fgd_arr, crs=CRS, label='vicksburg_south.fgd').redim.range(z=(0, 0.04))
regrid(fgd, streams=[hv.streams.RangeXY]).redim.range(z=(0, 0.04))
# ### Analyzing the simulation speed
# %%opts Spikes [width=600]
times = np.array([os.path.getmtime(f) for f in glob.glob(os.path.join(gssha_event_directory, 'depth*.asc'))] )
minutes = (times-times[0])/60
hv.Spikes(minutes, kdims=['Real Time (minutes)'], label='Time elapsed for each minute of simulation time') +\
hv.Curve(np.diff(minutes), kdims=['Simulation Time (min)'], vdims=[('runtime', 'Runtime per minute simulation time')]).redim.range(runtime=(0, None))
# Here if the "spikes" are regularly spaced, simulation time is regularly scaled with real time, and so you should be able read out the approximate time to expect per unit of simulation time.
| examples/topics/GSSHA_Workflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# _You are currently looking at **version 1.2** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # Assignment 2 - Pandas Introduction
# All questions are weighted the same in this assignment.
# ## Part 1
# The following code loads the olympics dataset (olympics.csv), which was derrived from the Wikipedia entry on [All Time Olympic Games Medals](https://en.wikipedia.org/wiki/All-time_Olympic_Games_medal_table), and does some basic data cleaning.
#
# The columns are organized as # of Summer games, Summer medals, # of Winter games, Winter medals, total # number of games, total # of medals. Use this dataset to answer the questions below.
# + nbgrader={"grade": false, "grade_id": "1", "locked": false, "solution": false} umich_question="prolog-000"
import pandas as pd
df = pd.read_csv('olympics.csv', index_col=0, skiprows=1)
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold'+col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver'+col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#'+col[1:]}, inplace=True)
names_ids = df.index.str.split('\s\(') # split the index by '('
df.index = names_ids.str[0] # the [0] element is the country name (new index)
df['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)
df = df.drop('Totals')
df.head()
# -
# ### Question 0 (Example)
#
# What is the first country in df?
#
# *This function should return a Series.*
# + umich_question="000"
# You should write your whole answer within the function provided. The autograder will call
# this function and compare the return value against the correct solution value
def answer_zero():
# This function returns the row for Afghanistan, which is a Series object. The assignment
# question description will tell you the general format the autograder is expecting
return df.iloc[0]
# You can examine what your function returns by calling it in the cell. If you have questions
# about the assignment formats, check out the discussion forums for any FAQs
answer_zero()
# -
# ### Question 1
# Which country has won the most gold medals in summer games?
#
# *This function should return a single string value.*
# + nbgrader={"grade": false, "locked": false, "solution": false} umich_part_id="001" umich_partlist_id="001"
def answer_one():
return df.loc[df['Gold'].idxmax, :].name
answer_one()
# -
# ### Question 2
# Which country had the biggest difference between their summer and winter gold medal counts?
#
# *This function should return a single string value.*
# + umich_part_id="002" umich_partlist_id="001"
def answer_two():
df['gold_diff'] = df['Gold'] - df['Gold.1']
return df.loc[df['gold_diff'].idxmax, :].name
answer_two()
# -
# ### Question 3
# Which country has the biggest difference between their summer gold medal counts and winter gold medal counts relative to their total gold medal count?
#
# $$\frac{Summer~Gold - Winter~Gold}{Total~Gold}$$
#
# Only include countries that have won at least 1 gold in both summer and winter.
#
# *This function should return a single string value.*
# + umich_part_id="003" umich_partlist_id="001"
def answer_three():
df_gold = df[(df['Gold'] >=1) & (df['Gold.1'] >=1)]
df_gold['rel_gold'] = (df_gold['Gold'] - df_gold['Gold.1'])/df_gold['Gold.2']
return df_gold.loc[df_gold['rel_gold'].idxmax].name
answer_three()
# -
# ### Question 4
# Write a function that creates a Series called "Points" which is a weighted value where each gold medal (`Gold.2`) counts for 3 points, silver medals (`Silver.2`) for 2 points, and bronze medals (`Bronze.2`) for 1 point. The function should return only the column (a Series object) which you created, with the country names as indices.
#
# *This function should return a Series named `Points` of length 146*
# + umich_part_id="004" umich_partlist_id="001"
def answer_four():
df['Points'] = df['Gold.2']*3 + df['Silver.2']*2 + df['Bronze.2']*1
return df['Points']
answer_four()
# -
# ## Part 2
# For the next set of questions, we will be using census data from the [United States Census Bureau](http://www.census.gov). Counties are political and geographic subdivisions of states in the United States. This dataset contains population data for counties and states in the US from 2010 to 2015. [See this document](https://www2.census.gov/programs-surveys/popest/technical-documentation/file-layouts/2010-2015/co-est2015-alldata.pdf) for a description of the variable names.
#
# The census dataset (census.csv) should be loaded as census_df. Answer questions using this as appropriate.
#
# ### Question 5
# Which state has the most counties in it? (hint: consider the sumlevel key carefully! You'll need this for future questions too...)
#
# *This function should return a single string value.*
# + umich_question="prolog-005"
census_df = pd.read_csv('census.csv')
census_df.head()
# + umich_part_id="005" umich_partlist_id="002"
def answer_five():
census_df['county_count'] = census_df.groupby('STATE')['COUNTY'].transform('count')
census_df_st = census_df[['STATE', 'STNAME', 'county_count']].drop_duplicates('STATE')
return census_df_st.loc[census_df_st['county_count'].idxmax]['STNAME']
answer_five()
# -
# ### Question 6
# **Only looking at the three most populous counties for each state**, what are the three most populous states (in order of highest population to lowest population)? Use `CENSUS2010POP`.
#
# *This function should return a list of string values.*
# + umich_part_id="006" umich_partlist_id="002"
def answer_six():
census_df_pop = census_df.query('SUMLEV == 50').groupby('STATE').apply(lambda x: x.sort_values('CENSUS2010POP', ascending=False)).reset_index(drop=True)
census_df_pop_top3 = census_df_pop[['STATE', 'STNAME', 'CENSUS2010POP']].groupby('STATE').head(3)
census_df_pop_top3_total = census_df_pop_top3.groupby('STNAME')['CENSUS2010POP'].sum()
census_df_pop_top3_total.sort_values(ascending=False, inplace=True)
return list(census_df_pop_top3_total.head(3).index)
answer_six()
# -
# ### Question 7
# Which county has had the largest absolute change in population within the period 2010-2015? (Hint: population values are stored in columns POPESTIMATE2010 through POPESTIMATE2015, you need to consider all six columns.)
#
# e.g. If County Population in the 5 year period is 100, 120, 80, 105, 100, 130, then its largest change in the period would be |130-80| = 50.
#
# *This function should return a single string value.*
# + umich_part_id="007" umich_partlist_id="002"
def answer_seven():
census_df_cnty = census_df.query('SUMLEV == 50')
census_df_cnty['cnty_pop_diff'] = census_df_cnty[['CTYNAME', 'POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012',
'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']].max(axis=1) - census_df_cnty[['CTYNAME', 'POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012',
'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']].min(axis=1)
return census_df_cnty.loc[census_df_cnty['cnty_pop_diff'].idxmax]['CTYNAME']
answer_seven()
# -
# ### Question 8
# In this datafile, the United States is broken up into four regions using the "REGION" column.
#
# Create a query that finds the counties that belong to regions 1 or 2, whose name starts with 'Washington', and whose POPESTIMATE2015 was greater than their POPESTIMATE 2014.
#
# *This function should return a 5x2 DataFrame with the columns = ['STNAME', 'CTYNAME'] and the same index ID as the census_df (sorted ascending by index).*
# + umich_part_id="008" umich_partlist_id="002"
def answer_eight():
census_df_fltrd = census_df.query('REGION in [1, 2] and POPESTIMATE2015 > POPESTIMATE2014')
census_df_fltrd_wash = census_df_fltrd[census_df_fltrd.CTYNAME.str.startswith("Washington")]
return census_df_fltrd_wash[['STNAME', 'CTYNAME']]
answer_eight()
# -
| Assignment+2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 521153S, Deep Learning Final Project: Mini Image classification Competition with CNN
# ## Outline
# ### In this assignment, you will learn:
# * Combine all you learned from the previous assignments.
# * Build your own CNN as you like with Pytorch, train and validate it on a given dataset.
# * Test your CNN model on our server.
#
# ### Tasks (<span style="color:orange">40 points</span>)
# We want to keep the task as clean and simple as possible.
# 1. You would be given a dataset containing 45,000 grayscale 32x32 images. Also, we will set a private testing data and upload it to our server. You could only use the private testing data to test your model. Instructions about evaluating your model would be given along with the project in Moodle.
# 2. Before evaluating your model on the server, you have to train your model based on the given 45,000 images. Specifically, in these images, there are nine classes, and each class has 5000 images. The testing data on the server has 9000 images, and each class has 1000 images.
# 3. To get a good CNN model, basically, there are some rules for you to follow which would be considered when grading your report:
# * Make the best out of the given images. It means you have to split it into training and validation set, training your model on the training set and validating the trained model on the validation set. If the accuracy on the validation set is not good, then you have to adjust your CNN model structure or some adjustable parameters. For example, batch size, learning rate, momentum on SGD, lambda in weight decay, etc. Then retrain your model until the accuracy on the validation set is good enough because the testing data would have similar accuracy with the validation set.
# * It also means that you could do some augmentation on the training set. This includes randomly flipping, cropping a small window in a random location within the image(refer to assignment 4), adding some noise, resizing-and-cropping, etc. All these are to make the training process more tolerant.
# * From the CNN structure perspective, you also need to design your CNN model by yourself. Well-known network architecture you can use includes [ResNet](https://arxiv.org/abs/1512.03385), [Inception](https://arxiv.org/abs/1512.00567), [VGGNet](https://arxiv.org/abs/1409.1556), [DenseNet](https://arxiv.org/abs/1608.06993), [MobileNet](https://arxiv.org/abs/1801.04381), [ShuffleNet](https://arxiv.org/abs/1807.11164), [ResNeXt](https://arxiv.org/abs/1611.05431) etc.
# 4. Similar to real-life applications, your model will be tested with unknown data. In this project, after training and validating the model, you need to test it on our hold-on testing dataset. We will provide you with a submission server and a leaderboard. The instructions would be given alongside the project in Moodle.
# 5. Please give a pdf report (also your source code, e.g., this Jupyter notebook file), documenting the whole model training process and also the evaluated accuracy on the server. Tensorboard visualization is also necessary for your report to visualize your network structure, accuracy, and losses, etc. as done in assignment 4.
# 6. You need to return the pdf report as well as your trained model (a checkpoint file) with your source code file to moodle. We will run your model on the server and compare the results with the one written in the reports.
#
# ### Grading
# You can get 40 points in total.
# * You will get <span style="color:orange;font-weight:bold">20 points</span> if your model achieve more than 82.5% of the testing accuracy.
# * You will get <span style="color:orange;font-weight:bold">20 points</span> if your report is clear and well-organized.
#
# ### Files you have to submit
# please submit a .zip file containing:
# 1. a pdf report;
# 2. source code files (jupyter notebook or common python files);
# 3. a checkpoint file (which saves your trained model).
#
# ### Group members
# Maximum 2 members.
#
#
# ### Environment
# Python 3, Numpy, matplotlib, torch, torchvision...
#
# ### Dataset
# Please follow the code below to download the 45,000 images and corresponding labels. <br>
# We have already split them into training and validation set; please refer to assignment 3 and 4 to create your DataLoader, with your data augmentation methods. Good luck.
# #### Download the given dataset
# +
# import necessary packages
import os, time
import torch
import requests, zipfile, sys
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from utils import download_given_data, get_preds_figure
import torchvision
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms, utils
import random, matplotlib
import pandas as pd
from torch.utils.tensorboard import SummaryWriter
from torchvision.models.resnet import BasicBlock
download_given_data('./')
print("GPU_available={}".format(torch.cuda.is_available()))
# -
# **Create a dataset class for our Mini ImageNet dataset**
class MiniImageNetDataset(Dataset):
def __init__(self, csv_file, transform=None):
# Read the csv file
self.frame = pd.read_csv(csv_file, header=None)
self.transform = transform
self.label_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship']
def __getitem__(self, idx):
image_name = self.frame.iloc[idx, 0]
image = Image.open(image_name)
label = self.frame.iloc[idx, 1]
if self.transform:
image = self.transform(image)
sample = {'image': image, 'label': label}
return sample
def __len__(self):
return len(self.frame)
# ### Dataset checking ###
# +
train_set = MiniImageNetDataset(csv_file='./given_data/train.csv', transform=None)
num_to_show = 5
idx = np.random.choice(range(len(train_set)), num_to_show, replace=False)
fig = plt.figure(figsize=(16, 8))
for i in range(len(idx)):
image, label = train_set[idx[i]]['image'], train_set[idx[i]]['label']
label_name = train_set.label_names[label]
ax = plt.subplot(1, num_to_show, i + 1)
plt.tight_layout()
ax.set_title('class #{}'.format(label_name))
ax.axis('off')
plt.imshow(np.asarray(image), cmap=matplotlib.cm.binary)
plt.show()
# Print the number of total images in the training set
print('total number of training set: {}'.format(len(train_set)))
# Print the number of images per class in the training set
class_num_train = np.zeros(10, dtype=np.int32)
for x in train_set:
class_num_train[x['label']] += 1
for i in range(9):
print('numer of images for class {}: {}'.format(train_set.label_names[i], class_num_train[i]))
# +
valid_set = MiniImageNetDataset(csv_file='./given_data/val.csv', transform=None)
num_to_show = 5
idx = np.random.choice(range(len(valid_set)), num_to_show, replace=False)
fig = plt.figure(figsize=(16, 8))
for i in range(len(idx)):
image, label = valid_set[idx[i]]['image'], valid_set[idx[i]]['label']
label_name = valid_set.label_names[label]
# TODO: show plots
ax = plt.subplot(1, num_to_show, i + 1)
plt.tight_layout()
ax.set_title('class #{}'.format(label_name))
ax.axis('off')
plt.imshow(np.asarray(image), cmap=matplotlib.cm.binary)
plt.show()
# TODO: Print the number of total images in the validation set
print('total number of validing set: {}'.format(len(valid_set)))
# TODO: Print the number of images per class in the validation set
class_num_val = np.zeros(10, dtype=np.int32)
for x in valid_set:
class_num_val[x['label']] += 1
for i in range(9):
print('numer of images for class {}: {}'.format(valid_set.label_names[i], class_num_val[i]))
# -
# ### Build our custom data augmentation
# We can define a custom data transformation for data augmentation namely RandomWindowDrop. There are 50%/50% chance that it will randomly cut a square window inside our image or horizontally flip the image.
class RandomWindowDrop(object):
def __init__(self, window_size):
assert isinstance(window_size, (int, tuple))
if isinstance(window_size, int):
self.window_size = (window_size, window_size)
else:
assert len(window_size) == 2
self.window_size = window_size
# Define a horizontalFlip
self.trans = transforms.RandomHorizontalFlip(p=1.0)
def __call__(self, sample):
image = sample
# Perform either RandomWindowDrop or RandomHorizontalFlip
if random.random() < 0.5:
h, w = image.size
imagePixels = image.load()
top = np.random.randint(self.window_size[0], h - self.window_size[0])
left = np.random.randint(self.window_size[1], w - self.window_size[1])
for i in range(top, top+self.window_size[0], 1):
for j in range(left, left+self.window_size[1], 1):
imagePixels[i, j] = 0
else:
image = self.trans(image)
return image
# **Load the training/validation data to Dataloader**
# +
def getTrainingData(csv_file='./given_data/train.csv', batch_size=64, num_workers=0):
__normalize_stats = {'mean': [0.5], 'std': [0.5]}
# transforms.Compose create a list of transformations
transformed_training = MiniImageNetDataset(csv_file=csv_file,
transform=transforms.Compose([
RandomWindowDrop(6),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(__normalize_stats['mean'],
__normalize_stats['std'])
]))
dataloader_training = DataLoader(transformed_training, batch_size, shuffle=True, num_workers=num_workers)
return dataloader_training
def getEvalData(csv_file='./given_data/val.csv', batch_size=64, num_workers=0):
__normalize_stats = {'mean': [0.5], 'std': [0.5]}
transformed_eval = MiniImageNetDataset(csv_file=csv_file,
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(__normalize_stats['mean'],
__normalize_stats['std'])
]))
# DataLoader shuffle=False
dataloader_eval = DataLoader(transformed_eval, batch_size, shuffle=True, num_workers=num_workers)
return dataloader_eval
# +
train_loader = getTrainingData(csv_file='./given_data/train.csv', batch_size=64, num_workers=0)
# batch_size = 64
valid_loader = getEvalData(csv_file='./given_data/val.csv', batch_size=64, num_workers=0)
# -
# ### Define the Network Architecture in Pytorch ###
# +
## ------ ResNet18 -------- ##
class ResNet18_MiniImageNet(torchvision.models.ResNet):
def __init__(self, num_classes=9):
super(ResNet18_MiniImageNet, self).__init__(BasicBlock, [2, 2, 2, 2], num_classes=num_classes)
# Override the "conv1" layer from the resnet-18 model
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
def forward(self, x):
return super(ResNet18_MiniImageNet, self).forward(x)
## Set ResNet18 Model
Ourmodel = 'ResNet18'
model = ResNet18_MiniImageNet(num_classes= 9).float()
## ------ ResNet50 -------- ##
# In order to keep memory on 8G GPU, please set 32 as batch size
## Set ResNet50 model
# Ourmodel = 'ResNet50'
# model = torchvision.models.resnet50(pretrained=False)
# model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
# model.fc = torch.nn.Sequential(
# torch.nn.Linear(
# in_features=2048,
# out_features=9
# )
# # torch.nn.Sigmoid()
# )
## ------ VGG 16 -------- ##
# -
# ### Check the Model Architecture ###
# +
writer = SummaryWriter('tb_graphs/training')
val_writer = SummaryWriter('tb_graphs/validation')
# Print out the layers of our model
print(model)
# Get some random training images
_iter = iter(train_loader)
samples = _iter.next()
images = samples['image']
# Create grid of images
img_grid = torchvision.utils.make_grid(images)
# Show images
plt.imshow(img_grid.mean(dim=0).cpu().numpy(), cmap="Greys")
# Write to tensorboard
writer.add_image('train_images', img_grid)
# Write model graph to tensorboard
writer.add_graph(model, images)
# -
# ### Set Parameteres ###
# +
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#####Change model to cuda
if torch.cuda.is_available():
model = model.cuda()
# +
# evaluation function
def eval(net, data_loader):
net.eval()
correct = 0.0
num_images = 0.0
running_loss = 0.0
loss_function = torch.nn.CrossEntropyLoss()
for i, sample in enumerate(data_loader):
#
images, labels = sample['image'], sample['label']
images, labels = images.to(device),labels.to(device)
outs = net(images)
_, preds = outs.max(1)
correct += preds.eq(labels).sum()
running_loss += loss_function(outs, labels).item()
num_images += len(labels)
acc = correct.float() / num_images
loss = running_loss / len(data_loader)
return acc, loss
# training function
def train(net, train_loader, valid_loader):
# build SGD optimizer with learning rate=0.01, momentum=0.9, weight decay = 1e-4
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay= 1e-10)
loss_function = torch.nn.CrossEntropyLoss()
# Log training process to tensorboard every 100 iterations
log_every = 100
# Training for 20 epochs
epoches = 50
for epoch in range(epoches):
if epoch > 30:
optimizer = optim.SGD(net.parameters(), lr=0.005, momentum=0.9, weight_decay= 1e-10)
start_t = time.time()
net.train()
running_loss = 0.0
running_acc = 0.0
for i, sample in enumerate(train_loader):
images, labels = sample['image'], sample['label']
images, labels = images.to(device),labels.to(device)
# TODO: fill these blanks
outs = net(images)
loss = loss_function(outs, labels)
_, preds = outs.max(1)
correct = preds.eq(labels).sum()
running_acc += correct.float() / len(labels)
# clear grads, back-propagation, backward propogation, update parameters
# clear grads
optimizer.zero_grad()
# backward propogation
loss.backward()
# update parameters
optimizer.step()
net.eval()
running_loss += loss.item()
if i % log_every == 99:
print('[Epoch/iter]: [{}/{}], loss: {:.05f}, accuracy: {:.05f}'.format(epoch, i+1,
running_loss / log_every, running_acc / log_every))
log_index = epoch * len(train_loader) + i
# Log the training loss and accuracy
# Example of using .add_scalar()
# Note tag='Loss'
writer.add_scalar('Loss', running_loss / log_every, log_index) # tag='Loss'
# TODO: load the training accuracy using writer (0.5 points)
# Note tag='Accuracy'
writer.add_scalar('Accuracy', running_acc / log_every, log_index)
# Log predictions
# Example of using .add_figure()
writer.add_figure('predictions', get_preds_figure(net, images, labels), log_index)
running_loss = 0.0
running_acc = 0.0
# Running the validation
acc_eval, loss_eval = eval(net, valid_loader)
print('Elapsed time: {:.02f} seconds, end of epoch: {}, lr: {}, val_loss: {:.05f}, val_acc: {:.05f}'.format(
time.time()-start_t, epoch, optimizer.param_groups[0]['lr'], loss_eval, acc_eval))
# Log the validation loss and accuracy
# Using val_writer will log the values to the validation writer
val_log_index = epoch * len(valid_loader) + i
# Note tag='Loss'
val_writer.add_scalar('Loss', loss_eval, val_log_index) # tag='Loss'
# TODO: load the validation accuracy using val_writer (0.5 points)
# Note tag='Accuracy'
val_writer.add_scalar('Accuracy', acc_eval, val_log_index)
return net
# +
train(model, train_loader, valid_loader)
# -
model.load_state_dict(torch.load('./BestModelResNet18.pth'))
# +
### Just for check valid set result
acc_eval, loss_eval = eval(model, valid_loader)
print('val_loss: {:.05f}, val_acc: {:.05f}'.format(loss_eval, acc_eval))
# +
# In this part, we hope we can save the weight for different models.
if Ourmodel == 'ResNet18':
# Save the weight of current model to disk
PATH = './ResNet18.pth'
torch.save(model.state_dict(), PATH)
# Load the model weights
model.load_state_dict(torch.load('./ResNet18.pth'))
if Ourmodel == 'ResNet50':
# Save the weight of current model to disk
PATH = './ResNet50.pth'
torch.save(model.state_dict(), PATH)
# Load the model weights
model.load_state_dict(torch.load('./ResNet50.pth'))
# -
# Define best model name and record temporily
PATH = './BestModel***.pth'
torch.save(model.state_dict(), PATH)
# **Record for Train and Valid Set Accuracy**
# * ResNet50: 96% and 80% ---- 0.01 lr, 1e-4 weight decay and 32 batch size
# * ResNet18: 96% and 82% ---- 0.01 lr, 1e-4 weight decay and 128 batch size
# * ResNet18: 92% and 78% ---- 0.005 lr, 1e-4 weight decay and 32 batch size
# * ResNet18: 98% and 84.2% ---- 0.006 lr and lr decay, 1e-10 weight decay and 64 batch size
# ### Run test set on our model
def getTestData(csv_file='./group_002_test.csv', batch_size=64, num_workers=0):
__normalize_stats = {'mean': [0.5], 'std': [0.5]}
transformed_eval = MiniImageNetDataset(csv_file=csv_file,
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(__normalize_stats['mean'],
__normalize_stats['std'])
]))
# DataLoader shuffle=False
dataloader_eval = DataLoader(transformed_eval, batch_size, shuffle=False, num_workers=num_workers)
return dataloader_eval
test_loader = getTestData(csv_file='./group_002_test.csv', batch_size=64, num_workers=0)
# **Import Model**
model.load_state_dict(torch.load('./BestModelResNet18.pth'))
# **Define function to write the file into 'test.txt' file**
def getPretest(net, data_loader):
net.eval()
# Create new file
f= open("test28_12.txt","w+")
for i, sample in enumerate(data_loader):
#Define image for cuda
images, labels = sample['image'], sample['label']
batch_size = len(images)
images, labels = images.to(device),labels.to(device)
outs = net(images)
_, preds = outs.max(1)
# Write down txt
for k in range(batch_size):
f.write(str(preds[k].item()) + '\n')
getPretest(model, test_loader)
| DeepLearning_project/Project-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="a--rFzYazTo2"
# # Introduction to Brain Segmentation with Keras
#
# # ***MAIN 2018 Educational Course***
#
# ## <NAME>
#
# ## McGill University
#
# ## **Contact**: email: [<EMAIL>](mailto:<EMAIL>) , Twitter: [@tffunck](https://twitter.com/tffunck)
#
# + [markdown] id="BEzld-ZDKjwq" colab_type="text"
# # Load Code & Data
# + [markdown] id="_YbNsbqxa-KL" colab_type="text"
# ## Pull minc_keras from github
# + colab_type="code" id="OPVCbLjaj0An" outputId="d8dd150e-a3bf-4c8d-e5d4-cb90b9d50de7" colab={"base_uri": "https://localhost:8080/", "height": 136}
#Download repository
# !git clone https://github.com/tfunck/minc_keras
#Switch dir
def set_base_dir():
import os
os.chdir('/content/minc_keras')
set_base_dir()
# + [markdown] id="iGVV41VsbD7U" colab_type="text"
# ## Download and unzip data
# + id="ewA4QpL94SZY" colab_type="code" outputId="b6e04e65-a535-4dee-a3a8-1d9e2352def5" colab={"base_uri": "https://localhost:8080/", "height": 258}
#Download and unzip data
# !tar -jxvf data/output.tar.bz2 &> /dev/null
# !mv output mri
# !wget https://amnesia.cbrain.mcgill.ca/deeplearning/sorteo.tar.bz2 --no-check-certificate
# !mkdir -p pet
# !tar -jxvf sorteo.tar.bz2 -C pet &> /dev/null
# + [markdown] id="tU5GWfhobSZQ" colab_type="text"
# ## Initialize T1 MRI data
# + id="IswkR7t-s4Hc" colab_type="code" outputId="8231de79-46c8-43c2-ff9b-0d10122946d7" colab={"base_uri": "https://localhost:8080/", "height": 255}
set_base_dir()
from utils import *
import numpy as np
from minc_keras import *
setup_dirs('mri_results')
### Load data from brain images and save them into .npy. Sort them into train/validate/test splits
[images_mri, data_mri] = prepare_data('mri/', 'mri_results/data', 'mri_results/report', input_str='_T1w_anat_rsl', label_str='variant-seg', clobber=False)
### 1) Load data
Y_validate_mri=np.load(data_mri["validate_y_fn"]+'.npy')
nlabels_mri=len(np.unique(Y_validate_mri))
X_train_mri=np.load(data_mri["train_x_fn"]+'.npy')
Y_train_mri=np.load(data_mri["train_y_fn"]+'.npy')
X_validate_mri=np.load(data_mri["validate_x_fn"]+'.npy')
X_test_mri=np.load(data_mri["test_x_fn"]+'.npy')
Y_test_mri=np.load(data_mri["test_y_fn"]+'.npy')
Y_test_mri=to_categorical(Y_test_mri)
Y_train_mri = to_categorical(Y_train_mri, num_classes=nlabels_mri)
Y_validate_mri = to_categorical(Y_validate_mri, num_classes=nlabels_mri)
# + [markdown] id="_eOBMRDabKgD" colab_type="text"
# ## Initialize PET data
# + colab_type="code" id="SqBWn-PmzTpO" colab={}
set_base_dir()
import minc_keras
from utils import *
import numpy as np
from minc_keras import *
setup_dirs('pet_results')
### Load data from brain images and save them into .npy. Sort them into train/validate/test splits
[images_pet, data_pet] = prepare_data('pet/','pet_results/data','pet_results/report',input_str='_pet.mnc', label_str='brainmask', pad_base=3,ratios=[0.7,0.15], clobber=True)
### 1) Load data
Y_validate_pet=np.load(data_pet["validate_y_fn"]+'.npy')
nlabels_pet=len(np.unique(Y_validate_pet))
X_train_pet=np.load(data_pet["train_x_fn"]+'.npy')
Y_train_pet=np.load(data_pet["train_y_fn"]+'.npy')
X_validate_pet=np.load(data_pet["validate_x_fn"]+'.npy')
X_test_pet=np.load(data_pet["test_x_fn"]+'.npy')
Y_test_pet=np.load(data_pet["test_y_fn"]+'.npy')
Y_test_pet=to_categorical(Y_test_pet)
Y_train_pet = to_categorical(Y_train_pet, num_classes=nlabels_pet)
Y_validate_pet = to_categorical(Y_validate_pet, num_classes=nlabels_pet)
# + [markdown] colab_type="text" id="F5dHDJJBzTo4"
# # T1 Segmentation
#
# * 261 MRI (skull stripped)
#
# * [1000 Functional Connectomes](http://fcon_1000.projects.nitrc.org/fcpClassic/FcpTable.html)
#
# * GM/WM segmentation produced with FSL-5.0-fast
#
# >>>>> __Input__
#
# 
#
# >>>>> __Label__
#
# 
#
#
#
#
#
#
# + [markdown] colab_type="text" id="ZB0ZXe4DzTpF"
# ## Training a simple model
# + colab_type="code" id="vDqzkarSzTpF" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
from keras.callbacks import History, ModelCheckpoint
model_name="mri_1.hdf5"
IN = Input(shape=(data_mri["image_dim"][1], data_mri["image_dim"][2],1))
CONV1 = Conv2D(16, kernel_size=[3,3], activation="relu",padding='same')(IN)
CONV2 = Conv2D(16, kernel_size=[3,3], activation="relu",padding='same')(CONV1)
CONV3 = Conv2D(16, kernel_size=[3,3], activation="relu",padding='same')(CONV2)
OUT = Conv2D(nlabels_mri, kernel_size=[1,1], activation='softmax', padding='same')(CONV3)
model = keras.models.Model(inputs=[IN], outputs=OUT)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'binary_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_mri],Y_train_mri, validation_data=([X_validate_mri], Y_validate_mri), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_mri, Y_test_mri)
print("Test :", test_score)
# + id="ygnxmSu8hqt0" colab_type="code" colab={}
###Create predictions for model
# !git pull
from predict import predict
predict('mri_1.hdf5', 'mri_results/predict/test', 'mri_results/data', 'mri_results/report/images.csv', 'categorical_crossentropy', images_to_predict='1', category="test", verbose=True)
# + id="wg1scPa4uQFX" colab_type="code" colab={}
#If using Google Chrome, can download directly through browser
from google.colab import files
from glob import glob
for fn in glob('mri_results/predict/test/*.png') :
print(fn)
files.download(fn)
#import matplotlib.pyplot as plt
#plt.imshow(plt.imread('mri_results/predict/test//sub-75922_task-01_ses-01_T1w_anat_rsl_predict_0.png'))
# + [markdown] id="ZW3RdNcUpVU-" colab_type="text"
# ## Adding drop-out
# + id="DRozzICApVVC" colab_type="code" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
from keras.callbacks import History, ModelCheckpoint
model_name="mri_2.hdf5"
IN = Input(shape=(data_mri["image_dim"][1], data_mri["image_dim"][2],1))
CONV1 = Conv2D(16, kernel_size=[3,3], activation="relu",padding='same')(IN)
DROPOUT1 = Dropout(0.2)(CONV1)
CONV2 = Conv2D(16, kernel_size=[3,3], activation="relu",padding='same')(DROPOUT1)
DROPOUT2 = Dropout(0.2)(CONV2)
CONV3 = Conv2D(16, kernel_size=[3,3], activation="relu",padding='same')(DROPOUT2)
DROPOUT3 = Dropout(0.2)(CONV3)
OUT = Conv2D(nlabels_mri, kernel_size=[1,1], activation='softmax', padding='same')(DROPOUT3)
model = keras.models.Model(inputs=[IN], outputs=OUT)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'binary_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_mri],Y_train_mri, validation_data=([X_validate_mri], Y_validate_mri), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_mri, Y_test_mri)
print("Test :", test_score)
# + [markdown] id="2Ztw_OrtpVVL" colab_type="text"
# ## Adding dilations
# + colab_type="code" id="9qhOKYbtEsT1" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
from keras.callbacks import History, ModelCheckpoint
model_name="mri_3.hdf5"
IN = Input(shape=(data_mri["image_dim"][1], data_mri["image_dim"][2],1))
CONV1 = Conv2D(16, kernel_size=[3,3], dilation_rate=[2,2], activation="relu",padding='same')(IN)
CONV2 = Conv2D(16, kernel_size=[3,3], dilation_rate=[2,2], activation="relu",padding='same')(CONV1)
CONV3 = Conv2D(16, kernel_size=[3,3], dilation_rate=[2,2], activation="relu",padding='same')(CONV2)
OUT = Conv2D(nlabels_mri, kernel_size=[1,1], activation='softmax', padding='same')(CONV3)
model = keras.models.Model(inputs=[IN], outputs=OUT)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'binary_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_mri],Y_train_mri, validation_data=([X_validate_mri], Y_validate_mri), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_mri, Y_test_mri)
print("Test :", test_score)
# + [markdown] id="feOljZP6pVVV" colab_type="text"
# ## __Exercises__
#
# ### 1. Modify the drop-out, dilation_rate, and number of kernels for one of the above templates.
#
# ### 2. Build a CNN with: 5 convolutional layers, 5x5 kernels and 8, 8, 16, 16, and 32 kernels in each layer
#
# ### 3. Train a CNN that has better than 0.95 test accuracy. How high can you get the accuracy without overfitting?
# + [markdown] id="UDRgIXTjKiUY" colab_type="text"
# ## Solutions
# + [markdown] id="E5UVKxKkJza6" colab_type="text"
# ### 1
# Modify as you like! No wrong answers so long as it runs.
# + [markdown] id="4ttNoEvRJU_-" colab_type="text"
# ### 2.
# + colab_type="code" id="5Fo61_R0GYoq" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
model_name="mri_4.hdf5"
IN = Input(shape=(data_mri["image_dim"][1], data_mri["image_dim"][2],1))
CONV1 = Conv2D(8, kernel_size=[5,5], activation="relu",padding='same')(IN)
CONV2 = Conv2D(8, kernel_size=[5,5], activation="relu",padding='same')(CONV1)
CONV3 = Conv2D(16, kernel_size=[5,5], activation="relu",padding='same')(CONV2)
CONV4 = Conv2D(16, kernel_size=[5,5], activation="relu",padding='same')(CONV3)
CONV5 = Conv2D(32, kernel_size=[5,5], activation="relu",padding='same')(CONV4)
OUT = Conv2D(nlabels_mri, kernel_size=[1,1], activation='softmax', padding='same')(CONV5)
model = keras.models.Model(inputs=[IN], outputs=OUT)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'categorical_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_mri],Y_train_mri, validation_data=([X_validate_mri], Y_validate_mri), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_mri, Y_test_mri)
print("Test :", test_score)
# + [markdown] id="dltIipwsOhpQ" colab_type="text"
# ### 3.
# + [markdown] id="Cc3StrTcDxV_" colab_type="text"
# Run the previous model for 10 iterations to exceed 0.950 test accuracy.
# + [markdown] colab_type="text" id="D8VmKHg4zTpH"
# #U-Net
#
# >>> __Input: Racploride PET__
#
# 
#
# >>> __Input: FDOPA PET__
#
# 
#
# >>> __Input: FDG PET__
#
# 
#
# >>> __Input: Label__
#
# 
# + [markdown] colab_type="text" id="rGkJHbkPzTpQ"
# ## Building a U-NET in Keras
#
# 
#
# Ronneberger, Fischer, and Brox. 2015."U-net: Convolutional networks for biomedical image segmentation." International Conference on Medical image computing and computer-assisted intervention. https://arxiv.org/abs/1505.04597
# + colab_type="code" id="0SDJ5RvOzTpQ" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
from keras.layers import Input, Add, Multiply, Dense, BatchNormalization
from keras.layers import LeakyReLU, MaxPooling2D, Conv2DTranspose, Concatenate, ZeroPadding2D
from prepare_data import pad
### Warning : if you change the number of times you downsample with max_pool,
### then you need to rerun prepare_data() with pad_base=<number of downsample nodes>
model_name="pet_1.hdf5"
### 1) Define architecture of neural network
IN = Input(shape=(data_pet['image_dim'][1],data_pet['image_dim'][2] ,1))
BN1 = BatchNormalization()(IN)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(BN1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
up6 = UpSampling2D(size=(2, 2))(conv4)
#up6 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv6)
conc6 = Concatenate(axis=3)([up6, conv3])
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conc6)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up7 = UpSampling2D(size=(2, 2))(conv7)
#up7 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv7)
conc7 = Concatenate(axis=3)([up7, conv2])
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conc7) #(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up8 = UpSampling2D(size=(2, 2))(conv8)
#up8 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv8)
conc8 = Concatenate(axis=3)([up8, conv1])
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conc8)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(nlabels_pet, 1, 1, activation='softmax')(conv9)
model = keras.models.Model(input=[IN], output=conv10)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'categorical_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_pet],Y_train_pet, validation_data=([X_validate_pet], Y_validate_pet), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_pet, Y_test_pet)
print("Test :", test_score)
# + id="ntjdPpO3cvmA" colab_type="code" colab={}
###Create predictions for model
from predict import *
predict(model_name, 'pet_results/predict/test', 'pet_results/data', 'pet_results/report/images.csv', 'categorical_crossentropy', images_to_predict='all', category="test", verbose=True)
# + colab_type="code" id="uPeNV6NWzTpu" colab={}
#If using Google Chrome, can download directly through browser
#from google.colab import files
#from glob import glob
#for fn in glob('pet_results/predict/test/*.png') : files.download(fn)
#import matplotlib.pyplot as plt
#plt.imshow(plt.imread('pet_results/predict/test//sub-D13_ses-01_task-01_acq-rcl_dwn-smpl_pet_predict_2.png'))
# + [markdown] id="HPBrekZ2CrtV" colab_type="text"
# ##Exercises
#
# 1. Modify the template above so that it has less than 120,000 parameters.
#
# 2. Use a transpose convolution to perform upsampling steps in the U-Net template. What happens to the number of parameters? How does accuracy change relative to this?
#
# 3. Add another level of downsampling and up-sampling to the U-Net template. Remember to re-run the configuration cell with <pad_base=4> and <clobber=True> in order to pad the input images and labels appropriately relative to the number of times you use max pooling to downsample the images.
#
# 4. Run a U-Net architecture on the GM-WM segementation task from part 1. How does the performance improvement compare to the increased number of parameters?
#
# 4. Using whatever techniques you like (downsampling, upsampling, dilations, drop-out, etc.), create the best architecture possible with least possible number of parameters.
# + [markdown] id="HNM4QTBrWWnb" colab_type="text"
# ## Solutions
#
# + [markdown] id="NmCLa-y_EgLe" colab_type="text"
# ### 1.
# + id="_FXISR5VTer4" colab_type="code" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
from keras.layers import Input, Add, Multiply, Dense, BatchNormalization
from keras.layers import LeakyReLU, MaxPooling2D, Conv2DTranspose, Concatenate, ZeroPadding2D
from prepare_data import pad
### Warning : if you change the number of times you downsample with max_pool,
### then you need to rerun prepare_data() with pad_base=<number of downsample nodes>
model_name="mri_2.hdf5"
### 1) Define architecture of neural network
IN = Input(shape=(data_pet['image_dim'][1],data_pet['image_dim'][2] ,1))
BN1 = BatchNormalization()(IN)
conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(BN1)
conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
up6 = UpSampling2D(size=(2, 2))(conv4)
#up6 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv6)
conc6 = Concatenate(axis=3)([up6, conv3])
conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conc6)
conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv7)
up7 = UpSampling2D(size=(2, 2))(conv7)
#up7 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv7)
conc7 = Concatenate(axis=3)([up7, conv2])
conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conc7) #(up8)
conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv8)
up8 = UpSampling2D(size=(2, 2))(conv8)
#up8 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv8)
conc8 = Concatenate(axis=3)([up8, conv1])
conv9 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(conc8)
conv9 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(nlabels_pet, 1, 1, activation='softmax')(conv9)
model = keras.models.Model(input=[IN], output=conv10)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'categorical_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_pet],Y_train_pet, validation_data=([X_validate_pet], Y_validate_pet), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_pet, Y_test_pet)
print("Test :", test_score)
# + [markdown] id="jcGPyuVsWGmn" colab_type="text"
# ### 2.
# + id="GXtx5qDzVNci" colab_type="code" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
from keras.layers import Input, Add, Multiply, Dense, BatchNormalization
from keras.layers import LeakyReLU, MaxPooling2D, Conv2DTranspose, Concatenate, ZeroPadding2D
from prepare_data import pad
### Warning : if you change the number of times you downsample with max_pool,
### then you need to rerun prepare_data() with pad_base=<number of downsample nodes>
model_name="mri_3.hdf5"
### 1) Define architecture of neural network
IN = Input(shape=(data_pet['image_dim'][1],data_pet['image_dim'][2] ,1))
BN1 = BatchNormalization()(IN)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(BN1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
up6 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv4)
conc6 = Concatenate(axis=3)([up6, conv3])
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conc6)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up7 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv7)
conc7 = Concatenate(axis=3)([up7, conv2])
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conc7) #(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up8 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv8)
conc8 = Concatenate(axis=3)([up8, conv1])
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conc8)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(nlabels_pet, 1, 1, activation='softmax')(conv9)
model = keras.models.Model(input=[IN], output=conv10)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'categorical_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_pet],Y_train_pet, validation_data=([X_validate_pet], Y_validate_pet), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_pet, Y_test_pet)
print("Test :", test_score)
# + [markdown] id="Z23en20QVGpC" colab_type="text"
#
# + [markdown] id="J743HAKqWKJ9" colab_type="text"
# ### 3.
# + id="_JVNMLDzWL6b" colab_type="code" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
from keras.layers import Input, Add, Multiply, Dense, BatchNormalization
from keras.layers import LeakyReLU, MaxPooling2D, Conv2DTranspose, Concatenate, ZeroPadding2D
from prepare_data import pad
set_base_dir()
import minc_keras
from utils import *
import numpy as np
from minc_keras import *
setup_dirs('pet_pad_4_results')
### Load data from brain images and save them into .npy. Sort them into train/validate/test splits
[images_pet_pad_4, data_pet_pad_4]=prepare_data('pet/','pet_pad_4_results/data','pet_pad_4_results/report',\
input_str='_pet.mnc',label_str='brainmask',pad_base=4,ratios=[0.7,0.15],clobber=True)
### 1) Load data
Y_validate_pet_pad_4=np.load(data_pet_pad_4["validate_y_fn"]+'.npy')
nlabels_pet_pad_4=len(np.unique(Y_validate_pet_pad_4))
X_train_pet_pad_4=np.load(data_pet_pad_4["train_x_fn"]+'.npy')
Y_train_pet_pad_4=np.load(data_pet_pad_4["train_y_fn"]+'.npy')
X_validate_pet_pad_4=np.load(data_pet_pad_4["validate_x_fn"]+'.npy')
X_test_pet_pad_4=np.load(data_pet_pad_4["test_x_fn"]+'.npy')
Y_test_pet_pad_4=np.load(data_pet_pad_4["test_y_fn"]+'.npy')
Y_test_pet_pad_4=to_categorical(Y_test_pet_pad_4)
Y_train_pet_pad_4 = to_categorical(Y_train_pet_pad_4, num_classes=nlabels_pet_pad_4)
Y_validate_pet_pad_4 = to_categorical(Y_validate_pet_pad_4, num_classes=nlabels_pet_pad_4)
### Warning : if you change the number of times you downsample with max_pool,
### then you need to rerun prepare_data() with pad_base=<number of downsample nodes>
model_name="mri_4.hdf5"
### 1) Define architecture of neural network
IN = Input(shape=(data_pet_pad_4['image_dim'][1],data_pet_pad_4['image_dim'][2] ,1))
BN1 = BatchNormalization()(IN)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(BN1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up5 = UpSampling2D(size=(2, 2))(conv5)
#up6 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv6)
conc5 = Concatenate(axis=3)([up5, conv4])
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conc5)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up6 = UpSampling2D(size=(2, 2))(conv6)
#up6 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv6)
conc6 = Concatenate(axis=3)([up6, conv3])
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conc6)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up7 = UpSampling2D(size=(2, 2))(conv7)
#up7 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv7)
conc7 = Concatenate(axis=3)([up7, conv2])
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conc7) #(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up8 = UpSampling2D(size=(2, 2))(conv8)
#up8 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv8)
conc8 = Concatenate(axis=3)([up8, conv1])
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conc8)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(nlabels_pet_pad_4, 1, 1, activation='softmax')(conv9)
model = keras.models.Model(input=[IN], output=conv10)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'categorical_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_pet_pad_4],Y_train_pet_pad_4, validation_data=([X_validate_pet_pad_4], Y_validate_pet_pad_4), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_pet_pad_4, Y_test_pet_pad_4)
print("Test :", test_score)
# + [markdown] id="j6jBPK8qEn26" colab_type="text"
# ### 4.
# + colab_type="code" id="lzd3DkZFzTpw" colab={}
import keras
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from custom_loss import *
from keras.utils import to_categorical
from keras.activations import relu
from keras.layers.core import Dropout
from keras.layers import Input, Add, Multiply, Dense, BatchNormalization
from keras.layers import LeakyReLU, MaxPooling2D, Conv2DTranspose, Concatenate, ZeroPadding2D
from prepare_data import pad
### Load data from brain images and save them into .npy. Sort them into train/validate/test splits
setup_dirs('mri_pad_4_results')
[images_mri_pad_4, data_mri_pad_4] = prepare_data('mri','mri_pad_4_results/data', 'mri_pad_4_results/report', input_str='_T1w_anat_rsl.mnc',\
label_str='variant-seg',images_fn='mri_unet.csv',pad_base=4, clobber=True)
### 1) Load data
Y_validate_mri_pad_4=np.load(data_mri_pad_4["validate_y_fn"]+'.npy')
nlabels_mri_pad_4=len(np.unique(Y_validate_mri_pad_4))
X_train_mri_pad_4=np.load(data_mri_pad_4["train_x_fn"]+'.npy')
Y_train_mri_pad_4=np.load(data_mri_pad_4["train_y_fn"]+'.npy')
X_validate_mri_pad_4=np.load(data_mri_pad_4["validate_x_fn"]+'.npy')
X_test_mri_pad_4=np.load(data_mri_pad_4["test_x_fn"]+'.npy')
Y_test_mri_pad_4=np.load(data_mri_pad_4["test_y_fn"]+'.npy')
Y_test_mri_pad_4=to_categorical(Y_test_mri_pad_4)
Y_train_mri_pad_4 = to_categorical(Y_train_mri_pad_4, num_classes=nlabels_mri_pad_4)
Y_validate_mri_pad_4 = to_categorical(Y_validate_mri_pad_4, num_classes=nlabels_mri_pad_4)
### Warning : if you change the number of times you downsample with max_pool,
### then you need to rerun prepare_data() with pad_base=<number of downsample nodes>
model_name="mri_unet.hdf5"
### 1) Define architecture of neural network
IN = Input(shape=(data_mri_pad_4['image_dim'][1],data_mri_pad_4['image_dim'][2] ,1))
BN1 = BatchNormalization()(IN)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(BN1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up5 = UpSampling2D(size=(2, 2))(conv5)
#up6 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv6)
conc5 = Concatenate(axis=3)([up5, conv4])
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conc5)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up6 = UpSampling2D(size=(2, 2))(conv6)
#up6 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv6)
conc6 = Concatenate(axis=3)([up6, conv3])
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conc6)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up7 = UpSampling2D(size=(2, 2))(conv7)
#up7 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv7)
conc7 = Concatenate(axis=3)([up7, conv2])
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conc7) #(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up8 = UpSampling2D(size=(2, 2))(conv8)
#up8 = Conv2DTranspose( filters=512, kernel_size=(3,3), strides=(2, 2), padding='same')(conv8)
conc8 = Concatenate(axis=3)([up8, conv1])
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conc8)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(nlabels_mri_pad_4, 1, 1, activation='softmax')(conv9)
model = keras.models.Model(input=[IN], output=conv10)
print(model.summary())
#set compiler
ada = keras.optimizers.Adam(0.0001)
#compile the model
model.compile(loss = 'categorical_crossentropy', optimizer=ada,metrics=['acc'] )
#fit model
history = model.fit([X_train_mri_pad_4],Y_train_mri_pad_4, validation_data=([X_validate_mri_pad_4], Y_validate_mri_pad_4), epochs = 3)
#save model
model.save(model_name)
test_score = model.evaluate(X_test_mri_pad_4, Y_test_mri_pad_4)
print("Test :", test_score)
# + id="XlDzUb8nLIzc" colab_type="code" colab={}
###Create predictions for model
from predict import *
predict('mri_unet.hdf5', test_dir, 'mri_results/data', 'mri_results/report/mri_unet.csv', 'categorical_crossentropy', images_to_predict='all', category="test", verbose=True)
# + id="8SgbItuJdQPd" colab_type="code" colab={}
from google.colab import files
from glob import glob
for fn in glob('pet_results/predict/test/*.png') :
print(fn)
files.download(fn)
# + [markdown] id="v9SQSNiSIz-7" colab_type="text"
# ### 5.
#
# Modify the code for solutions 3 to improve network for PET segmentation.
#
# Modify answer to question 4 to improve MRI segmentation.
# + id="1fGv2-swLH0i" colab_type="code" colab={}
| main2018.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
'''
Load packages for working with data
'''
import numpy as np
import pandas as pd
'''
Load packages for plotting graphs
'''
import matplotlib.pyplot as plt
import seaborn as sns
'''
Load packages for model deployment and evaluation
'''
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, \
QuadraticDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
import sklearn.linear_model as skl_lm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, \
KFold, \
cross_val_score
from sklearn.metrics import auc, \
confusion_matrix, \
classification_report, \
roc_curve, \
roc_auc_score, \
precision_recall_curve, \
average_precision_score, \
accuracy_score, \
balanced_accuracy_score, \
precision_score, \
recall_score
'''
Suppress warnings
'''
import warnings
warnings.filterwarnings('ignore')
'''
Load Standart Scalar for standardization
'''
from sklearn.preprocessing import StandardScaler
# +
'''
Load data with appropriate column names
'''
german_credit = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/statlog/german/german.data", \
delimiter=' ', header=None)
german_credit.columns = ["chk_acct", "duration", "credit_his", "purpose",
"amount", "saving_acct", "present_emp", "installment_rate", "sex", "other_debtor",
"present_resid", "property", "age", "other_install", "housing", "n_credits",
"job", "n_people", "telephone", "foreign", "response"]
german_credit.response = german_credit.response - 1
# -
'''
dummification
'''
german_credit_dummies = pd.get_dummies(german_credit, drop_first = True)
'''
Separate features and label from the original german dataset
'''
X = german_credit_dummies.drop(columns=['response'])
y = german_credit_dummies['response']
# y = y.values - 1
print('The number of observation is: {}'.format(len(y)))
print('The number of positive class is: {}'.format(sum(y)))
print('The number of negative class is: {}'.format(sum(y == 0)))
'''
train and test datasets
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42, shuffle=True)
print('The number of observation in training set is: {}'.format(len(y_train)))
print('The number of positive class in training set is: {}'.format(sum(y_train)))
print('The number of negative class in training set is: {}'.format(sum(y_train == 0)))
def roc_curve_plot(fpr, tpr):
'''
Plot ROC rurve
Parameters:
fpr: float
tpr: float
Returns:
plot: ROC curve graph
'''
x = np.linspace(0,1,100)
plt.figure(figsize = (10,6))
plt.plot(fpr, tpr)
plt.plot(x,x,".", markersize = 1.6)
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
# ## 1. Apply LDA on the training set. Draw the ROC curve and calculate the AUC
# +
# sc = StandardScaler()
# X_train = sc.fit_transform(X_train)
# X_test = sc.transform(X_test)
# -
'''
Create the model and fit on training dataset
'''
lda = LinearDiscriminantAnalysis(tol = 0.0000001).fit(X_train,y_train)
'''
Measures wich will be used on confusion matrix and roc-curve calculation
'''
pred = lda.predict(X_test)
pred_prob = lda.predict_proba(X_test)
'''
Obtain confusion_matrix
'''
tn, fp, fn, tp = confusion_matrix(y_true = y_test, y_pred = pred, labels = np.array([0,1])).ravel()
print(tn, fp, fn, tp)
'''
Calculate auc(Area Under the Curve) for positive class
'''
fpr, tpr, thresholds = roc_curve(y_true = y_test, y_score = pred_prob[:,1], pos_label = 1)
auc_lda = auc(fpr,tpr)
print(auc_lda)
roc_curve_plot(fpr=fpr, tpr=tpr)
# ## 2. Apply QDA on the training set. Draw the ROC curve and calculate the AUC.
'''
Create the model and fit on training dataset
'''
qda = QuadraticDiscriminantAnalysis(tol = 0.0000001).fit(X_train,y_train)
'''
Measures wich will be used on confusion matrix and roc-curve calculation
'''
pred = qda.predict(X_test)
pred_prob = qda.predict_proba(X_test)
'''
Obtain confusion_matrix
'''
tn, fp, fn, tp = confusion_matrix(y_true = y_test, y_pred = pred, labels = np.array([0,1])).ravel()
print(tn, fp, fn, tp)
'''
Calculate auc(Area Under the Curve) for positive class
'''
fpr, tpr, thresholds = roc_curve(y_true = y_test, y_score = pred_prob[:,1], pos_label = 1)
auc_qda = auc(fpr,tpr)
print(auc_qda)
roc_curve_plot(fpr=fpr, tpr=tpr)
# ## 3. Apply Naïve Bayes on the training set. Draw the ROC curve and calculate the AUC
'''
Create the model and fit on training dataset
'''
gnb = GaussianNB().fit(X_train,y_train)
'''
Measures wich will be used on confusion matrix and roc-curve calculation
'''
pred = gnb.predict(X_test)
pred_prob = gnb.predict_proba(X_test)
'''
Obtain confusion_matrix
'''
tn, fp, fn, tp = confusion_matrix(y_true = y_test, y_pred = pred, labels = np.array([0,1])).ravel()
print(tn, fp, fn, tp)
'''
Calculate auc(Area Under the Curve) for positive class
'''
fpr, tpr, thresholds = roc_curve(y_true = y_test, y_score = pred_prob[:,1], pos_label = 1)
auc_gnb = auc(fpr,tpr)
print(auc_gnb)
roc_curve_plot(fpr=fpr, tpr=tpr)
# ## 4. Apply Logistic Regression on the training set. Draw the ROC curve and calculate the AUC
'''
Create the model and fit on training dataset
'''
lr = skl_lm.LogisticRegression(max_iter=1000,tol=0.000001).fit(X_train,y_train)
'''
Measures wich will be used on confusion matrix and roc-curve calculation
'''
pred = lr.predict(X_test)
pred_prob = lr.predict_proba(X_test)
'''
Obtain confusion_matrix
'''
tn, fp, fn, tp = confusion_matrix(y_true = y_test, y_pred = pred, labels = np.array([0,1])).ravel()
print(tn, fp, fn, tp)
'''
Calculate auc(Area Under the Curve) for positive class
'''
fpr, tpr, thresholds = roc_curve(y_true = y_test, y_score = pred_prob[:,1], pos_label = 1)
auc_lr = auc(fpr,tpr)
print(auc_lr)
roc_curve_plot(fpr=fpr, tpr=tpr)
# ## Apply k-NN on the training set and by 10-fold cross validation find the optimal value of the parameter k. For the optimal model draw the ROC curve and calculate the AUC
# +
'''
Find the optimal value of the parameter k for knn
'''
K = np.arange(1,25)
scores = []
for k in K:
knn = KNeighborsClassifier(n_neighbors=k)
kfold = KFold(n_splits=10)
score = cross_val_score(knn, X_train, y_train, cv = kfold, scoring = "accuracy")
scores.append(score)
# +
'''
Plot accuracy over various k to find the optimal one.
'''
plt.figure(figsize = (10,6))
plt.plot(K, np.mean(np.array(scores), axis = 1))
plt.title('Accuracy score over k on training set')
plt.xlabel('k')
plt.ylabel('accuracy')
plt.show()
# +
'''
Fit the best model
'''
optimal_k = np.argmax(np.mean(scores, axis=1)) + 1
optimal_knn = KNeighborsClassifier(n_neighbors = optimal_k)
optimal_knn.fit(X_train,y_train)
# -
'''
Measures wich will be used on confusion matrix and roc-curve calculation
'''
pred = optimal_knn.predict(X_test)
pred_prob = optimal_knn.predict_proba(X_test)
'''
Obtain confusion_matrix
'''
tn, fp, fn, tp = confusion_matrix(y_true = y_test, y_pred = pred, labels = np.array([0,1])).ravel()
print(tn, fp, fn, tp)
'''
Calculate auc(Area Under the Curve) for positive class
'''
fpr, tpr, thresholds = roc_curve(y_true = y_test, y_score = pred_prob[:,1], pos_label = 1)
auc_knn = auc(fpr,tpr)
print(auc_knn)
roc_curve_plot(fpr=fpr, tpr=tpr)
# ## Compare AUC measures of different models. Find the best model.
'''
Print the all obtained auc's
'''
print('auc of lda model is: {}'.format(auc_lda))
print('auc of qda model is: {}'.format(auc_qda))
print('auc of gnb model is: {}'.format(auc_gnb))
print('auc of lr model is: {}'.format(auc_lr))
print('auc of knn model is: {}'.format(auc_knn))
# ## For the best model calculate the test Accuracy, Balanced Accuracy, Sensitivity and Precision of the positive class
'''
As we can see from the above printed auc values , the best model is logistic regression, so I will calculate appropriate metrics for that model
Measures wich will be used on confusion matrix and roc-curve calculation
'''
pred = lr.predict(X_test)
pred_prob = lr.predict_proba(X_test)
'''
Obtain confusion_matrix
'''
tn, fp, fn, tp = confusion_matrix(y_true = y_test, y_pred = pred, labels = np.array([0,1])).ravel()
'''
False positive rate and True positive rate for positive class
'''
fpr, tpr, thresholds = roc_curve(y_true = y_test, y_score = pred_prob[:,1], pos_label = 1)
# +
'''
Classification report on test values
'''
print(classification_report(y_test, pred, digits = 3))
# +
'''
Calculation of metrics using standard functions
'''
print('Accuracy: {}'.format(accuracy_score(y_test,pred)))
print('Balanced accuracy: {}'.format(balanced_accuracy_score(y_test, pred)))
print('Precision: {}'.format(precision_score(y_test, pred)))
print('Recall: {}'.format(recall_score(y_test, pred)))
# -
'''
Calculation of metrics on scratch
'''
def accuracy(predicted, actual):
'''
Calculate accuracy of the model on scratch
Parameters:
predicted (list): predicted response
actual (list): actual values of response
Return:
float: accuracy
'''
if isinstance(actual, pd.Series):
actual = actual.values.tolist()
if isinstance(predicted, pd.Series):
predicted = predicted.values.tolist()
k = 0
for i in range(len(predicted)):
if predicted[i] == actual[i]:
k = k + 1
accuracy = k/len(predicted)
return accuracy
# +
'''
Calculation of metrics on scratch
'''
print('Accuracy: {}'.format(accuracy(pred, y_test)))
print('Balanced accuracy: {}'.format(balanced_accuracy_score(y_test, pred)))
print('Precision: {}'.format(tp/(tp + fp)))
print('Recall: {}'.format(tp/(tp + fn)))
| Homework/Part 1/Homework_5/levon.khachatryan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Context Manager aka the Measurement Object
#
# This notebook shows some ways of performing different measurements using
# QCoDeS parameters and the new DataSet accessed via a context manager. Here, it is assumed that the reader has some degree of familiarity with fundamental objects and methods of QCoDeS.
#
# Let us start with necessary imports:
# +
# %matplotlib notebook
import numpy.random as rd
import matplotlib.pyplot as plt
from functools import partial
import numpy as np
from time import sleep, monotonic
import qcodes as qc
from qcodes import Station, load_or_create_experiment, \
initialise_database, Measurement, load_by_run_spec, load_by_guid
from qcodes.tests.instrument_mocks import DummyInstrument
from qcodes.dataset.plotting import plot_dataset
qc.logger.start_all_logging()
# -
# In what follows, we shall define some utility functions as well as declare our dummy instruments. We, then, add these instruments to a ``Station`` object.
# +
# a generator to simulate a physical signal, in this case an exponentially
# decaying signal
def exponential_decay(a: float, b: float):
"""
Yields a*exp(-b*x) where x is put in
"""
x = 0
while True:
x = yield
yield a*np.exp(-b*x) + 0.02*a*np.random.randn()
# +
# preparatory mocking of physical setup
dac = DummyInstrument('dac', gates=['ch1', 'ch2'])
dmm = DummyInstrument('dmm', gates=['v1', 'v2'])
station = qc.Station(dmm, dac)
# +
# and then a bit of "wiring" to make the dmm "measure"
# the exponential decay
ed = exponential_decay(5, 0.2)
next(ed)
def customgetter(dac):
val = ed.send(dac.ch1())
next(ed)
return val
dmm.v1.get = partial(customgetter, dac)
# +
# now make some silly set-up and tear-down actions
def veryfirst():
print('Starting the measurement')
def numbertwo(inst1, inst2):
print('Doing stuff with the following two instruments: {}, {}'.format(inst1, inst2))
def thelast():
print('End of experiment')
# -
# **Database and experiments may be missing**
#
# If this is the first time you create a dataset, the underlying database file has
# most likely not been created. The following cell creates the database file. Please
# refer to documentation on [`The Experiment Container`](The-Experiment-Container.ipynb) for details.
#
# Furthermore, datasets are associated to an experiment. By default the run
# is appended to the latest existing experiments. If no experiment has been created,
# we must create one. We do that by calling the `load_or_create_experiment` function.
#
# Here we explicitly pass the loaded or created experiment to the `Measurement` object to ensure that we are always
# using the `dataset_context_manager` `Experiment` created within this tutorial.
initialise_database()
exp = load_or_create_experiment(experiment_name='dataset_context_manager',
sample_name="no sample")
# +
# And then run an experiment
meas = Measurement(exp=exp)
meas.register_parameter(dac.ch1) # register the first independent parameter
meas.register_parameter(dmm.v1, setpoints=(dac.ch1,)) # now register the dependent oone
meas.add_before_run(veryfirst, ()) # add a set-up action
meas.add_before_run(numbertwo, (dmm, dac)) # add another set-up action
meas.add_after_run(thelast, ()) # add a tear-down action
meas.write_period = 2
with meas.run() as datasaver:
for set_v in np.linspace(0, 25, 10):
dac.ch1.set(set_v)
get_v = dmm.v1.get()
datasaver.add_result((dac.ch1, set_v),
(dmm.v1, get_v))
dataset = datasaver.dataset # convenient to have for plotting
# -
ax, cbax = plot_dataset(dataset)
# ### Exporting data
# QCoDeS ``DataSet`` implements a number of methods for accessing the data of a given dataset. Here we will concentrate on the two most user friendly methods.
# The method `get_parameter_data` returns the data as a dictionary of ``numpy`` arrays. The dictionary is indexed by the measured (dependent) parameter in the outermost level and the names of the dependent and independent parameters in the innermost level. The first parameter in the innermost level is always the dependent parameter.
datasaver.dataset.get_parameter_data()
# By default `get_parameter_data` returns all data stored in the dataset. The data that is specific to one or more measured parameters can be returned by passing the parameter name(s) or by using `ParamSpec` object:
datasaver.dataset.get_parameter_data('dmm_v1')
# You can also simply fetch the data for one or more dependent parameter
datasaver.dataset.get_parameter_data('dac_ch1')
# The data can also be exported as one or more [Pandas](https://pandas.pydata.org/) DataFrames. The DataFrames are returned as a dictionary from measured parameters to DataFrames.
datasaver.dataset.get_data_as_pandas_dataframe()['dmm_v1']
# For more details about using Pandas and XArray see [Working With Pandas and XArray](./Working-With-Pandas-and-XArray.ipynb)
# ## Reloading datasets
# To load existing datasets QCoDeS provides several functions. The most useful and generic function is called `load_by_run_spec`.
# This function takes one or more pieces of information about a dataset and will either, if the dataset is uniquely identifiable by the information, load the dataset or print information about all the datasets that match the supplied information allowing you to provide more information to uniquely identify the dataset.
# Here, we will load a dataset based on the `captured_run_id` printed on the plot above.
datasaver.dataset.captured_run_id
loaded_ds = load_by_run_spec(captured_run_id=datasaver.dataset.captured_run_id)
loaded_ds.the_same_dataset_as(datasaver.dataset)
# As long as you are working within one database file the dataset should be uniquely identified by `captured_run_id`. However, once you mix several datasets from different database files this is likely not unique. See the following section and [Extracting runs from one DB file to another](Extracting-runs-from-one-DB-file-to-another.ipynb) for more information on how to handle this.
# ### GUID
# Internally each dataset is refereed too by a Globally Unique Identifier (GUID) that ensures that the dataset uniquely identified even if datasets from several databases with potentially identical captured_run_id, experiment and sample names.
# A dataset can always be reloaded from the GUID if known.
print(f"Dataset GUID is: {datasaver.dataset.guid}")
loaded_ds = load_by_guid(datasaver.dataset.guid)
loaded_ds.the_same_dataset_as(datasaver.dataset)
# ## The power of the new construct
#
# This new form is so free that we may easily do thing impossible with the old Loop construct
# +
# from the above plot, we decide that a voltage below
# 1 V is uninteresting, so we stop the sweep at that point
# thus, we do not know in advance how many points we'll measure
with meas.run() as datasaver:
for set_v in np.linspace(0, 25, 100):
dac.ch1.set(set_v)
get_v = dmm.v1.get()
datasaver.add_result((dac.ch1, set_v),
(dmm.v1, get_v))
if get_v < 1:
break
dataset = datasaver.dataset # convenient to have for plotting
# -
ax, cbax = plot_dataset(dataset)
# +
# Or we might want to simply get as many points as possible in 10 s
# randomly sampling the region between 0 V and 10 V (for the setpoint axis)
from time import monotonic, sleep
with meas.run() as datasaver:
t_start = monotonic()
while monotonic() - t_start < 10:
set_v = 10/2*(np.random.rand() + 1)
dac.ch1.set(set_v)
# some sleep to not get too many points (or to let the system settle)
sleep(0.1)
get_v = dmm.v1.get()
datasaver.add_result((dac.ch1, set_v),
(dmm.v1, get_v))
dataset = datasaver.dataset # convenient to have for plotting
# -
axes, cbax = plot_dataset(dataset)
# we slightly tweak the plot to better visualise the highly non-standard axis spacing
axes[0].lines[0].set_marker('o')
axes[0].lines[0].set_markerfacecolor((0.6, 0.6, 0.9))
axes[0].lines[0].set_markeredgecolor((0.4, 0.6, 0.9))
axes[0].lines[0].set_color((0.8, 0.8, 0.8))
# ## Some 2D examples
# +
# For the 2D, we'll need a new batch of parameters, notably one with two
# other parameters as setpoints. We therefore define a new Measurement
# with new parameters
meas = Measurement(exp=exp)
meas.register_parameter(dac.ch1) # register the first independent parameter
meas.register_parameter(dac.ch2) # register the second independent parameter
meas.register_parameter(dmm.v1, setpoints=(dac.ch1, dac.ch2)) # now register the dependent oone
# -
# and we'll make a 2D gaussian to sample from/measure
def gauss_model(x0: float, y0: float, sigma: float, noise: float=0.0005):
"""
Returns a generator sampling a gaussian. The gaussian is
normalised such that its maximal value is simply 1
"""
while True:
(x, y) = yield
model = np.exp(-((x0-x)**2+(y0-y)**2)/2/sigma**2)*np.exp(2*sigma**2)
noise = np.random.randn()*noise
yield model + noise
# +
# and finally wire up the dmm v1 to "measure" the gaussian
gauss = gauss_model(0.1, 0.2, 0.25)
next(gauss)
def measure_gauss(dac):
val = gauss.send((dac.ch1.get(), dac.ch2.get()))
next(gauss)
return val
dmm.v1.get = partial(measure_gauss, dac)
# +
# run a 2D sweep
with meas.run() as datasaver:
for v1 in np.linspace(-1, 1, 200):
for v2 in np.linspace(-1, 1, 200):
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v1.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v1, val))
dataset = datasaver.dataset # convenient to have for plotting
# -
# When exporting a two or higher dimensional datasets as a Pandas DataFrame a [MultiIndex](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html) is used to index the measured parameter based on all the dependencies
datasaver.dataset.get_data_as_pandas_dataframe()['dmm_v1'][0:10]
# If your data is on a regular grid it may make sense to view the data as an [XArray](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html) Dataset. The Pandas DataFrame can be directly exported to a XArray Dataset.
datasaver.dataset.get_data_as_pandas_dataframe()['dmm_v1'].to_xarray()
# Note, however, that XArray is only suited for data that is on a rectangular grid with few or no missing values.
ax, cbax = plot_dataset(dataset)
# +
# Looking at the above picture, we may decide to sample more finely in the central
# region
with meas.run() as datasaver:
v1points = np.concatenate((np.linspace(-1, -0.5, 5),
np.linspace(-0.51, 0.5, 200),
np.linspace(0.51, 1, 5)))
v2points = np.concatenate((np.linspace(-1, -0.25, 5),
np.linspace(-0.26, 0.5, 200),
np.linspace(0.51, 1, 5)))
for v1 in v1points:
for v2 in v2points:
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v1.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v1, val))
dataset = datasaver.dataset # convenient to have for plotting
# -
ax, cbax = plot_dataset(dataset)
# +
# or even perform an adaptive sweep... ooohh...
#
# This example is a not-very-clever toy model example,
# but it nicely shows a semi-realistic measurement that the old qc.Loop
# could not handle
v1_points = np.linspace(-1, 1, 250)
v2_points = np.linspace(1, -1, 250)
threshold = 0.25
with meas.run() as datasaver:
# Do normal sweeping until the peak is detected
for v2ind, v2 in enumerate(v2_points):
for v1ind, v1 in enumerate(v1_points):
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v1.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v1, val))
if val > threshold:
break
else:
continue
break
print(v1ind, v2ind, val)
print('-'*10)
# now be more clever, meandering back and forth over the peak
doneyet = False
rowdone = False
v1_step = 1
while not doneyet:
v2 = v2_points[v2ind]
v1 = v1_points[v1ind+v1_step-1]
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v1.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v1, val))
if val < threshold:
if rowdone:
doneyet = True
v2ind += 1
v1_step *= -1
rowdone = True
else:
v1ind += v1_step
rowdone = False
dataset = datasaver.dataset # convenient to have for plotting
# -
ax, cbax = plot_dataset(dataset)
# ## Random sampling
# We may also chose to sample completely randomly across the phase space
# +
gauss = gauss_model(0.1, 0.2, 0.25)
next(gauss)
def measure_gauss(x, y):
val = gauss.send((x, y))
next(gauss)
return val
# +
v1_points = np.linspace(-1, 1, 250)
v2_points = np.linspace(1, -1, 250)
threshold = 0.25
npoints = 5000
with meas.run() as datasaver:
for i in range(npoints):
x = 2*(np.random.rand()-.5)
y = 2*(np.random.rand()-.5)
z = measure_gauss(x,y)
datasaver.add_result((dac.ch1, x),
(dac.ch2, y),
(dmm.v1, z))
dataset = datasaver.dataset # convenient to have for plotting
# -
ax, cbax = plot_dataset(dataset)
datasaver.dataset.get_data_as_pandas_dataframe()['dmm_v1'][0:10]
# Unlike the data measured above, which lies on a grid, here, all the measured data points have an unique combination of the two dependent parameters. When exporting to XArray NaN's will therefore replace all the missing combinations of `dac_ch1` and `dac_ch2` and the data is unlikely to be useful in this format.
# +
#df_sliced = datasaver.dataset.get_data_as_pandas_dataframe()['dmm_v1'].sort_index()[0:10]
#df_sliced.index = df_sliced.index.remove_unused_levels()
#df_sliced.to_xarray()
# -
# ## Optimiser
# An example to show that the algorithm is flexible enough to be used with completely unstructured data such as the output of an downhill simplex optimization. The downhill simplex is somewhat more sensitive to noise and it is important that 'fatol' is set to match the expected noise.
from scipy.optimize import minimize
# +
noise = 0.0005
gauss = gauss_model(0.1, 0.2, 0.25, noise=noise)
next(gauss)
def measure_gauss(x, y):
val = gauss.send((x, y))
next(gauss)
return val
# -
x0 = [np.random.rand(), np.random.rand()]
with meas.run() as datasaver:
def mycallback(xk):
datasaver.add_result((dac.ch1, xk[0]),
(dac.ch2, xk[1]),
(dmm.v1, measure_gauss(xk[0], xk[1])))
res = minimize(lambda x: -measure_gauss(*x), x0, method='Nelder-Mead', tol=1e-10,
callback=mycallback, options={'fatol': noise})
dataset = datasaver.dataset # convenient to have for plotting
res
ax, cbax = plot_dataset(dataset)
# ## Subscriptions
#
# The ``Measurement`` object can also handle subscriptions to the dataset. Subscriptions are, under the hood, triggers in the underlying SQLite database. Therefore, the subscribers are only called when data is written to the database (which happens every `write_period`).
#
# When making a subscription, two things must be supplied: a function and a mutable state object. The function **MUST** have a call signature of `f(result_list, length, state, **kwargs)`, where ``result_list`` is a list of tuples of parameter values inserted in the dataset, ``length`` is an integer (the step number of the run), and ``state`` is the mutable state object. The function does not need to actually use these arguments, but the call signature must match this.
#
# Let us consider two generic examples:
# ### Subscription example 1: simple printing
# +
def print_which_step(results_list, length, state):
"""
This subscriber does not use results_list nor state; it simply
prints how many results we have added to the database
"""
print(f'The run now holds {length} rows')
meas = Measurement(exp=exp)
meas.register_parameter(dac.ch1)
meas.register_parameter(dmm.v1, setpoints=(dac.ch1,))
meas.write_period = 1 # We write to the database every 1 second
meas.add_subscriber(print_which_step, state=[])
with meas.run() as datasaver:
for n in range(10):
datasaver.add_result((dac.ch1, n), (dmm.v1, n**2))
print(f'Added points to measurement, step {n}.')
sleep(0.5)
# -
# ### Subscription example 2: using the state
#
# We add two subscribers now.
# +
def get_list_of_first_param(results_list, lenght, state):
"""
Modify the state (a list) to hold all the values for
the first parameter
"""
param_vals = [parvals[0] for parvals in results_list]
state += param_vals
meas = Measurement(exp=exp)
meas.register_parameter(dac.ch1)
meas.register_parameter(dmm.v1, setpoints=(dac.ch1,))
meas.write_period = 1 # We write to the database every 1 second
first_param_list = []
meas.add_subscriber(print_which_step, state=[])
meas.add_subscriber(get_list_of_first_param, state=first_param_list)
with meas.run() as datasaver:
for n in range(10):
datasaver.add_result((dac.ch1, n), (dmm.v1, n**2))
print(f'Added points to measurement, step {n}.')
print(f'First parameter value list: {first_param_list}')
sleep(0.5)
# -
# ## QCoDeS Array and MultiParameter
# The ``Measurement`` object supports automatic handling of ``Array`` and ``MultiParameters``. When registering these parameters
# the individual components are unpacked and added to the dataset as if they were separate parameters. Lets consider a ``MultiParamter`` with array components as the most general case.
#
# First lets use a dummy instrument that produces data as ``Array`` and ``MultiParameters``.
from qcodes.tests.instrument_mocks import DummyChannelInstrument
mydummy = DummyChannelInstrument('MyDummy')
# This instrument produces two ``Array``s with the names, shapes and setpoints given below.
mydummy.A.dummy_2d_multi_parameter.names
mydummy.A.dummy_2d_multi_parameter.shapes
mydummy.A.dummy_2d_multi_parameter.setpoint_names
# +
meas = Measurement(exp=exp)
meas.register_parameter(mydummy.A.dummy_2d_multi_parameter)
meas.parameters
# -
# When adding the MultiParameter to the measurement we can see that we add each of the individual components as a
# separate parameter.
with meas.run() as datasaver:
datasaver.add_result((mydummy.A.dummy_2d_multi_parameter, mydummy.A.dummy_2d_multi_parameter()))
# And when adding the result of a ``MultiParameter`` it is automatically unpacked into its components.
plot_dataset(datasaver.dataset)
datasaver.dataset.get_parameter_data('that')
datasaver.dataset.get_data_as_pandas_dataframe()['that']
datasaver.dataset.get_data_as_pandas_dataframe()['that'].to_xarray()
| docs/examples/DataSet/Dataset Context Manager.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="ZOma4jpKeYm6" outputId="80d419d6-26eb-4b8c-fe84-993187bd2152"
from google.colab import drive
drive.mount('/content/drive')
# + id="MHeMvFpkR-Oe"
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import RMSprop
# + id="egZwLd-iJDED"
import glob
import cv2
import numpy as np
with tf.device('/device:GPU:0'):
IMG_DIR='/content/drive/MyDrive/PIAIC/flower-recognition/daisy'
IMG_DIR1='/content/drive/MyDrive/PIAIC/flower-recognition/dandelion'
IMG_DIR2='/content/drive/MyDrive/PIAIC/flower-recognition/rose'
IMG_DIR3='/content/drive/MyDrive/PIAIC/flower-recognition/sunflower'
IMG_DIR4='/content/drive/MyDrive/PIAIC/flower-recognition/tulip'
def read_images(directory):
for img in glob.glob(directory+"/*.jpg"):
image = cv2.imread(img)
resized_img = cv2.resize(image/255.0 , (150 , 150))
yield resized_img
resized_imgs0 = np.array(list(read_images(IMG_DIR)))
resized_imgs1 = np.array(list(read_images(IMG_DIR1)))
resized_imgs2 = np.array(list(read_images(IMG_DIR2)))
resized_imgs3 = np.array(list(read_images(IMG_DIR3)))
resized_imgs4 = np.array(list(read_images(IMG_DIR4)))
# + id="coTWYSuim7aH"
label0 = np.zeros((resized_imgs0.shape[0],1)) #0
label1 = np.ones((resized_imgs1.shape[0],1)) #1
label2 = np.ones((resized_imgs2.shape[0],1))*2 #2
label3 = np.ones((resized_imgs3.shape[0],1))*3 #3
label4 = np.ones((resized_imgs4.shape[0],1))*4 #4
# + id="kfxqQEKJm7ho"
data = np.concatenate((resized_imgs0,resized_imgs1,resized_imgs2,resized_imgs3,resized_imgs4))
# + id="v1rIpkgunCsg"
data= data.reshape(data.shape[0], data.shape[1]*data.shape[2]*data.shape[3])
# + id="v46rF7dJm7l0"
labels = np.concatenate((label0,label1,label2,label3,label4))
# + id="SBw851DUqx1d"
del resized_imgs0
del resized_imgs1
del resized_imgs2
del resized_imgs3
del resized_imgs4
del label0
del label1
del label2
del label3
del label4
# + id="GdRMJtr-m7ob"
import pandas as pd
data = pd.DataFrame(data)
data['labels'] = labels.astype('float32')
# + id="YIxDtQiWnVLg"
from sklearn.model_selection import train_test_split
train_data, test_data, train_labels, test_labels=train_test_split(data.iloc[0:,:-1], data['labels'], test_size=0.3, random_state=42, stratify = labels)
# + id="ySi80YLhm7rY"
train_data = np.array(train_data).reshape(len(train_data),150,150,3)
# test_data.reshape(150,150,3)
# + id="u2Q2bSk0i8SS"
test_data = np.array(test_data).reshape(len(test_data),150,150,3)
# + id="RVYQi0yowy5d"
train_labels = np.array(train_labels).astype('float32')
test_labels = np.array(test_labels).astype('float32')
# + id="xwFlpTZXm7tw"
# from keras.utils import to_categorical
# train_labels = to_categorical(train_labels)
# test_labels = to_categorical(test_labels)
# + id="mMFN2MN4nKEI"
data_gen = tf.keras.preprocessing.image.ImageDataGenerator(
# featurewise_center=True,
# samplewise_center=True,
# featurewise_std_normalization=True,
# samplewise_std_normalization=True,
# zca_whitening=False,
zca_epsilon=1e-06,
rotation_range=0,
width_shift_range=0.0,
height_shift_range=0.0,
brightness_range=None,
shear_range=0.3,
zoom_range=0.2,
channel_shift_range=0.3,
fill_mode="nearest",
cval=0.0,
horizontal_flip=True,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0,
dtype=None,
)
# + id="IJxEir7CoyCj"
data_gen.fit(train_data)
# + id="ZIn7Xpbat0aV"
data_gen.fit(test_data)
# + id="n0kNKV8DatIC"
#MOdels with COnv2D and Maxpooling
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3,)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
tf.keras.layers.Conv2D(16, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(5, activation='softmax')
])
# + id="2QFL7tA4D0Sa"
model.compile(optimizer=RMSprop(learning_rate= 0.0001),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + id="kBJbtQp1fhfx"
# %tensorflow_version 2.x
import tensorflow as tf
# + id="mwG7_qctxUBl" colab={"base_uri": "https://localhost:8080/"} outputId="392b159b-f788-4de0-8b3d-fd078d00f9b9"
with tf.device('/device:GPU:0'):
result = model.fit(
x= train_data,
y= train_labels,
batch_size = 30,
steps_per_epoch=30,
epochs=100,
validation_split=0.3,
validation_steps=10,
validation_batch_size=10)
# + id="FnuSBCoRwkjd"
# + id="kAMVOUBUhcj9" colab={"base_uri": "https://localhost:8080/"} outputId="32d9d399-d50a-4ee8-980e-291d565692cf"
model.evaluate(test_data,test_labels)
# + id="Ufw4FqsmU-g2" colab={"base_uri": "https://localhost:8080/"} outputId="eb9d95f9-c0e5-4525-9478-645ef65be0de"
x = result.history.keys()
x
# + id="dOKPsS3HWwUn" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="fb601771-3b95-4989-c588-cb1c64a20246"
# Visualize training history
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import numpy
plt.plot(result.history['accuracy'])
plt.plot(result.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(result.history['loss'])
plt.plot(result.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="u7p4aMxliqvO"
y_pred1 = model.predict(test_data)
# + id="-qhQ6JA_i0YM"
y_pred1.shape
# + id="TRKNoHsBNvdR"
# list1 = []
# for i in range(0,len(validation_generator)):
# for j in range(0,val_batch):
# try:
# list1.append(validation_generator[i][1][j].tolist())
# except:
# break
# + id="P9u0xdNxfeHl" colab={"base_uri": "https://localhost:8080/"} outputId="59ee497f-3145-4d40-dab6-eb813fdf76fd"
np.argmax(y_pred1, axis=1)[0:30]
# + colab={"base_uri": "https://localhost:8080/"} id="zFzNqz3IvhSG" outputId="71758497-7dce-486a-b889-9a258f4fa4fe"
np.array(test_labels[0:30]).astype('int32')
# + id="gq1gUHQh_rJ5"
plt.figure(figsize = (20,20))
for i in range(10):
img = train_datagen[600*i][0]
plt.subplot(1,5,i+1)
plt.imshow(img)
plt.axis("off")
plt.title(train_datagen[600*i][1])
plt.show()
| Deep learning assignment/working_flowerclassification_28_with_Augmentation_L.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### New to Plotly?
# Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
# <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
# <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
# #### Version Check
# Plotly's python package is updated frequently. Run pip install plotly --upgrade to use the latest version.
import plotly
plotly.__version__
# ### Basic Heatmap
# +
import plotly.plotly as py
import plotly.graph_objs as go
trace = go.Heatmap(z=[[1, 20, 30],
[20, 1, 60],
[30, 60, 1]])
data=[trace]
py.iplot(data, filename='basic-heatmap')
# -
# ### Heatmap with Categorical Axis Labels
# +
import plotly.plotly as py
import plotly.graph_objs as go
trace = go.Heatmap(z=[[1, 20, 30, 50, 1], [20, 1, 60, 80, 30], [30, 60, 1, -10, 20]],
x=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'],
y=['Morning', 'Afternoon', 'Evening'])
data=[trace]
py.iplot(data, filename='labelled-heatmap')
# -
# ### Heatmap with Unequal Block Sizes
#
# +
import numpy as np
import plotly.plotly as py
def spiral(th):
a = 1.120529
b = 0.306349
r = a*np.exp(-b*th)
return (r*np.cos(th), r*np.sin(th))
nspiral = 2 # number of spiral loops
th = np.linspace(-np.pi/13,2*np.pi*nspiral,1000); # angle
(x,y) = spiral(th)
# shift the spiral north so that it is centered
yshift = (1.6 - (max(y)-min(y)))/2
s = dict(x= -x+x[0], y= y-y[0]+yshift,
line =dict(color='white',width=3))
# Build the rectangles as a heatmap
# specify the edges of the heatmap squares
phi = ( 1+np.sqrt(5) )/2.
xe = [0, 1, 1+(1/(phi**4)), 1+(1/(phi**3)), phi]
ye = [0, 1/(phi**3),1/phi**3+1/phi**4,1/(phi**2),1]
z = [ [13,3,3,5],
[13,2,1,5],
[13,10,11,12],
[13,8,8,8]
]
hm = dict(x = np.sort(xe),
y = np.sort(ye)+yshift,
z = z,
type = 'heatmap',
colorscale = 'Viridis')
axis_template = dict(range = [0,1.6], autorange = False,
showgrid = False, zeroline = False,
linecolor = 'black', showticklabels = False,
ticks = '' )
layout = dict( margin = dict(t=200,r=200,b=200,l=200),
xaxis = axis_template,
yaxis = axis_template,
showlegend = False,
width = 700, height = 700,
autosize = False )
figure = dict(data=[s, hm],layout=layout)
py.iplot(figure, filename='golden spiral', height=750)
# -
# ### Heatmap with Datetime Axis
# +
import datetime
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
programmers = ['Alex','Nicole','Sara','Etienne','Chelsea','Jody','Marianne']
base = datetime.datetime.today()
date_list = [base - datetime.timedelta(days=x) for x in range(0, 180)]
z = []
for prgmr in programmers:
new_row = []
for date in date_list:
new_row.append( np.random.poisson() )
z.append(list(new_row))
data = [
go.Heatmap(
z=z,
x=date_list,
y=programmers,
colorscale='Viridis',
)
]
layout = go.Layout(
title='GitHub commits per day',
xaxis = dict(ticks='', nticks=36),
yaxis = dict(ticks='' )
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='datetime-heatmap')
# -
# ### Dash Example
from IPython.display import IFrame
IFrame(src= "https://dash-simple-apps.plotly.host/dash-heatmapplot/", width="120%", height="650px", frameBorder="0")
# Find the dash app source code [here](https://github.com/plotly/simple-example-chart-apps/tree/master/heatmap)
# #### Reference
# See https://plot.ly/python/reference/#heatmap for more information and chart attribute options!
#
# +
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/csshref="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
# ! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'heatmaps.ipynb', ' python/heatmaps/', 'Heatmaps | plotly',
'How to make Heatmaps in Python with Plotly.',
title = 'Python Heatmaps | plotly',
name = 'Heatmaps',
has_thumbnail='true', thumbnail='thumbnail/heatmap.jpg',
language='python', page_type='example_index',
display_as='scientific',order=3,
ipynb= '~notebook_demo/33', redirect_from='python/heatmap/')
| _posts/python/scientific/heatmap/heatmaps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from pandas_profiling import ProfileReport
# ### Import Crime Data
# Was downloaded to /data directory as 'chicago30dayData22sep2020.csv' from
#
# Excel CSV file from [Chicago Data Portal](https://data.cityofchicago.org/Public-Safety/Crimes-Last-30-days/fjjk-7e4n)
# Excel CSV file from [Chicago Data Portar](https://data.cityofchicago.org/Public-Safety/Crimes-Last-30-days/fjjk-7e4n)
raw_file = '../data/raw/chicago30dayData22sep2020.csv'
crime_data = pd.read_csv(raw_file)
crime_data
# 7 columns with one or more null values
crime_data.info()
# Drop the na values reduces rows from 73388 rows to 73160
crime_data_dropna = crime_data.dropna()
crime_data_dropna.info()
crime_data_dropna.describe()
# ### <font color="red">NOTE: if reloading then must run the commented out line below, once, then comment out
#crime_data_dropna.set_index("ID") # must run this once with new data
crime_data_dropna
crime_data_dropna['Primary Type'].value_counts()
crime_data_dropna['Description'].value_counts()
crime_data_dropna['Description'].describe()
crime_data_dropna['Description'].unique()
crime_data_dropna['Block'].value_counts()
# ### Pandas Profiling Summaries
crime_data_profile = ProfileReport(crime_data_dropna, title='Pandas Profiling Report')
crime_data_profile.to_widgets()
crime_data_profile.to_file('../data/processed/crime_data_profile.html') # '../data/raw/chicago30dayData22sep2020.csv'
| notebooks/Springboard Capstone 2 Data Wrangling 30 day download.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from datetime import datetime
from sqlalchemy import create_engine
df = pd.read_excel("amostra_analise_au5.xlsx")
df["cnpj"].iloc[0]
df["cnpj"] = df["cnpj"].astype(str)
df["cnpj"] = df.apply(lambda x : "00" + x["cnpj"] if len(x["cnpj"])==12 else
("0" + x["cnpj"] if len(x["cnpj"])==13 else x["cnpj"]), axis=1)
df.head()
lista_cnpj = df["cnpj"].tolist()
lista_cnpj.__len__()
l = ["05389492000172", "09526806000192", "22941478000159", "05126521000103"]
lista_cnpj = lista_cnpj + l
lista_cnpj.__len__()
# +
from pricing.service.scoring.lscore import LScoring
from pricing.utils import formata_cnpj
import pandas as pd
import numpy as np
from datetime import datetime
from dateutil.relativedelta import relativedelta
from sqlalchemy import create_engine
class CriteriosElegibilidade(object):
def __init__(self, cnpj, produto):
self.cnpj = cnpj
self.produto = produto
self.elegibilidade_dividas=1.5
self.elegibilidade_transacoes = 12
self.dados = None
self.flag_faturamento = None
self.fat_medio = None
self.flag_transacoes = None
self.flag_cheques = None
self.flag_dividas = None
self.data_consulta = None
self.scoring = None
self.prop_boleto = None
def get_dados(self):
if self.produto in ["tomatico", "padrao"]:
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cm<EMAIL>7sv.sa-east-1.rds.amazonaws.com:23306/credito-digital")
con = engine.connect()
else:
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo")
con = engine.connect()
query_wirecard = "select cnpj, data, valor, numero_transacoes from fluxo_wirecard where cnpj='{}'".format(self.cnpj)
query_pv = "select cpf_cnpj as cnpj, data, valor, valor_boleto, numero_transacoes from fluxo_pv where cpf_cnpj='{}'".format(formata_cnpj(self.cnpj))
query_tomatico = "select cnpj, dataFluxo as data, valorFluxo as valor from tb_Fluxo where cnpj='{}'".format(self.cnpj)
query_justa = "select cnpj, data, valor, numero_transacoes from fluxo_justa where cnpj='{}'".format(self.cnpj)
query_au5 = "select cnpj, data, valor, numero_transacoes from fluxo_au5 where cnpj='{}'".format(self.cnpj)
query_kred = "select cnpj, data, valor from fluxo_kred where cnpj='{}'".format(self.cnpj)
dict_query = {"tomatico" : query_tomatico,
"padrao" : query_tomatico,
"wirecard" : query_wirecard,
"moip" : query_wirecard,
"pagueveloz" : query_pv,
"justa" : query_justa,
"au5" : query_au5,
"kred" : query_kred
}
query = dict_query.get(self.produto)
df = pd.read_sql(query, con)
con.close()
df = df.groupby("data").sum().reset_index()
try:
df["data"] = df.apply(lambda x : x["data"].date(), axis=1)
except:
pass
self.dados = df
return
def mensaliza(self, df):
df.index = pd.to_datetime(df.data)
if self.produto=='pagueveloz':
df = df.resample('MS').sum()[["valor", "valor_boleto"]].reset_index()
else:
df = df.resample('MS').sum().reset_index()
return df
def check_faturamento(self):
if self.produto == 'pagueveloz':
df = self.dados[["data", "valor", "valor_boleto"]]
else:
df = self.dados[["data", "valor"]]
df = self.mensaliza(df)
df6 = df.sort_values("data", ascending=False).iloc[:6, :]
df6["data"] = df6.apply(lambda x : x["data"].date(), axis=1)
flag_faturamento = int((len(df6)==6) and (0 not in df6["valor"].tolist()) and (df6["data"].max()==datetime.now().date().replace(day=1) - relativedelta(months=1)))
self.flag_faturamento = flag_faturamento
self.fat_medio = df.sort_values("data", ascending=False).iloc[:12, :]["valor"].mean()
if self.produto == 'pagueveloz':
db = df.sort_values("data", ascending=False).iloc[:12, :]
db["prop"] = db["valor_boleto"].sum()/db["valor"].sum()
self.prop_boleto = db["prop"].iloc[0]
return
def check_transacoes(self):
if self.produto != 'tomatico':
try:
df = self.dados[["data", "numero_transacoes"]]
df.index = pd.to_datetime(df.data)
df.resample('MS').sum().reset_index()
df = df.iloc[:12, :]
media_transacoes = df["numero_transacoes"].mean()
flag_transacoes = int(media_transacoes > self.elegibilidade_transacoes)
self.flag_transacoes = flag_transacoes
except:
self.flag_transacoes = 1
return
def get_dividas(self):
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo")
con = engine.connect()
query = "select * from consultas_idwall_operacoes where cnpj_cpf='{}'".format(self.cnpj)
df = pd.read_sql(query, con)
con.close()
if df.empty:
return df
df = df[df['data_ref']==df['data_ref'].max()]
lista_consultas = df['numero_consulta'].unique().tolist()
df = df[(df['data_ref']==df['data_ref'].max()) & (df['numero_consulta']==lista_consultas[0])]
return df
def check_cheques(self):
dfdiv = self.get_dividas()
if dfdiv.empty:
flag_cheques = 1
data_consulta = None
else:
flag_cheques = int('cheques' not in dfdiv["tipo"].tolist())
data_consulta = dfdiv["data_ref"].max()
self.flag_cheques = flag_cheques
self.data_consulta = data_consulta
return
def check_dividas(self):
dfdiv = self.get_dividas()
if dfdiv.empty:
self.flag_dividas = 1
self.data_consulta = None
else:
df = dfdiv[dfdiv['tipo']!="cheques"]
if df.empty:
self.flag_dividas = 1
self.data_consulta = dfdiv["data_ref"].iloc[0]
else:
total_dividas = df["valor"].sum()
fat_medio = self.fat_medio
prop = total_dividas/fat_medio
flag_dividas = int(prop <=self.elegibilidade_dividas)
self.flag_dividas = flag_dividas
self.data_consulta = df["data_ref"].iloc[0]
return
def analisa(self):
self.get_dados()
self.check_faturamento()
# self.check_transacoes()
self.check_cheques()
self.check_dividas()
return
# -
from tqdm import tqdm_notebook
el = lista_cnpj[0]
el = '05389492000172'
pa = CriteriosElegibilidade(cnpj=el, produto='au5')
pa.get_dados()
pa.dados
pa = CriteriosElegibilidade(cnpj=el, produto='au5')
pa.analisa()
_df = pd.DataFrame()
_df["cnpj"] = [el]
_df["flag_faturamento"] = [pa.flag_faturamento]
# _df["flag_transacoes"] = [pa.flag_transacoes]
_df["flag_cheques"] = [pa.flag_cheques]
_df["flag_dividas"] = [pa.flag_dividas]
_df["historico"] = [len(pa.dados)]
# _df["prop_boleto"] = [pa.prop_boleto]
_df["data_consulta"] = [pa.data_consulta]
_df
lista_cnpj.__len__()
resp = []
err = []
for el in tqdm_notebook(lista_cnpj):
try:
pa = CriteriosElegibilidade(cnpj=el, produto='au5')
pa.analisa()
_df = pd.DataFrame()
_df["cnpj"] = [el]
_df["flag_faturamento"] = [pa.flag_faturamento]
# _df["flag_transacoes"] = [pa.flag_transacoes]
_df["flag_cheques"] = [pa.flag_cheques]
_df["flag_dividas"] = [pa.flag_dividas]
# _df["historico"] = [len(pa.dados)]
# _df["prop_boleto"] = [pa.prop_boleto]
_df["data_consulta"] = [pa.data_consulta]
resp.append(_df)
except:
print('ERROR')
err.append(el)
resp.__len__()
dfamostra = pd.concat(resp)
dfamostra.shape
dfamostra["flag_aprovacao"] = dfamostra["flag_faturamento"]*dfamostra["flag_cheques"]*dfamostra["flag_dividas"]
dfamostra.groupby("flag_aprovacao").count()
dfamostra["data_atualizacao"] = datetime.now().date()
dfamostra["produto"] = "au5"
dfamostra["flag_transacoes"] = None
dfamostra['flag_proposta'] = 1
dfamostra
dfamostra["data_consulta"] = dfamostra.apply(lambda x : x["data_consulta"].date(), axis=1)
dfamostra.head()
l
dfamostra[dfamostra["cnpj"].isin(l)]
dfp1 = pd.read_excel("pre_analise_au5_geral_20190711.xlsx", sheet_name='base1')
dfp2 = pd.read_excel("pre_analise_au5_geral_20190711.xlsx", sheet_name='base2')
dfp = pd.concat([dfp1, dfp2])
dfp.head()
dfp["cnpj"].iloc[0]
dfp["cnpj"] = dfp['cnpj'].astype(str)
dfp['cnpj'] = dfp.apply(lambda x : "00" + x["cnpj"] if len(x["cnpj"])==12 else
("0" + x["cnpj"] if len(x["cnpj"])==13 else x["cnpj"]), axis=1)
dfamostra.merge(dfp[['cnpj', 'proposta_max']], left_on='cnpj', right_on='cnpj', how='left').to_excel("analise_completa_amostra_20190726.xlsx")
err.__len__()
err
formata_cnpj('06057209000178')
err = [el.replace(',', '').replace('\\', '') for el in err]
err
err2 = []
for el in err:
try:
pa = CriteriosElegibilidade(cnpj=el, produto='pagueveloz')
pa.analisa()
_df = pd.DataFrame()
_df["cnpj"] = [el.replace('.', '').replace('-', '').replace('/', '')]
_df["flag_faturamento"] = [pa.flag_faturamento]
_df["flag_transacoes"] = [pa.flag_transacoes]
_df["flag_cheques"] = [pa.flag_cheques]
_df["flag_dividas"] = [pa.flag_dividas]
_df["historico"] = [len(pa.dados)]
_df['prop_boleto'] = [pa.prop_boleto]
_df["data_consulta"] = [pa.data_consulta]
resp.append(_df)
except:
err2.append(el)
err2
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo")
con = engine.connect()
for el in err2:
con.execute("update fluxo_pv set flag_aprovacao=0 where cpf_cnpj='{}'".format(el))
con.close()
engine = create_engine("mysql+pymysql://capmaster:#<EMAIL>pot123#<EMAIL>:23306/varejo")
con = engine.connect()
for el in err2:
con.execute("update fluxo_pv set flag_aprovacao=0 where cpf_cnpj='{}'".format(el))
con.close()
final = pd.concat(resp)
final.shape
final.head()
final.groupby("flag_faturamento").count()
final["data_consulta"] = final.apply(lambda x : x["data_consulta"].date() if not x["data_consulta"] is None else x["data_consulta"], axis=1)
final["flag_consulta"] = final.apply(lambda x : int(x["data_consulta"] is None), axis=1)
final[(final["flag_consulta"]==1)]["flag_faturamento"].unique().tolist()
final[(final['flag_consulta']==1) & (final["flag_faturamento"]==1)].to_excel('atualizar_divida_pv.xlsx')
final[(final['flag_consulta']==1) & (final["flag_faturamento"]==1)]
final[final["flag_consulta"]==1]["flag_faturamento"].unique().tolist()
final[(final["flag_consulta"]==1) & (final["flag_faturamento"]==1)]["cnpj"].unique().tolist()
final.head()
final['flag_transacoes'].unique().tolist()
final["flag_cheques"].unique().tolist()
final.groupby("flag_transacoes").count()
final["flag_aprovacao"] = final["flag_faturamento"]*final["flag_transacoes"]*final["flag_cheques"]*final["flag_dividas"]
final["flag_aprovacao"] = final["flag_faturamento"]
final.groupby("flag_aprovacao").count()
final.shape
final.head()
final[final['flag_transacoes']==0].shape
final["doc"] = final.apply(lambda x : formata_cnpj(x["cnpj"]), axis=1)
final.head()
# +
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@capt<EMAIL>:23306/varejo")
con = engine.connect()
for el in final["cnpj"].unique().tolist():
dt = final[final["cnpj"]==el]
flag = dt["flag_aprovacao"].iloc[0]
con.execute("update fluxo_au5 set flag_aprovacao={} where cnpj='{}'".format(flag, el))
con.close()
# -
# +
engine = create_engine("mysql+pymysql://capmaster:#jackpot123#@captalysdev.cmrbiv<EMAIL>7sv.sa-east-1.rds.amazonaws.com:23306/varejo")
con = engine.connect()
for el in final["cnpj"].unique().tolist():
dt = final[final["cnpj"]==el]
flag = dt["flag_aprovacao"].iloc[0]
con.execute("update fluxo_au5 set flag_aprovacao={} where cnpj='{}'".format(flag, el))
con.close()
# -
lista_pricing = final[final["flag_aprovacao"]==1]["cnpj"].tolist()
lista_pricing.__len__()
lista_pricing.__len__()
from tqdm import tqdm_notebook
import requests
# +
url = "https://api.pricing.captalys.io/models/PricingPadrao"
header = {"X-Consumer-Custom-Id": "padrao"}
fr = []
err = []
for el in tqdm_notebook(lista_pricing):
try:
pa = CriteriosElegibilidade(cnpj=el, produto='au5')
pa.get_dados()
dff = pa.dados
dff['adquirentes'] = "bin"
fat_medio = dff.groupby(["data"]).sum()["valor"].mean()
dff.dropna(inplace=True)
fluxos = dict()
for adq in dff['adquirentes'].unique().tolist():
dt = dff[dff['adquirentes']==adq]
dt['data'] = dt.apply(lambda x : str(x['data'].day) + "-" +str(x['data'].month)+"-" + str(x['data'].year), axis=1)
fluxos[adq] = dt[['data', 'valor']].to_dict("records")
body = {
"fluxos" : fluxos,
"cnpj" : formata_cnpj(el),
"cnae" : "4744-0",
"volume_escolhido" : 0.5*fat_medio
}
req = requests.post(url, headers=header, json=body)
js = req.json()
if len(js) == 0:
vol_max = 0
else:
vol_max = js.get("valor_maximo")
fr.append(pd.DataFrame({"cnpj" : [el], "vol_max" : [vol_max]}))
except:
print("error")
err.append(el)
# -
err.__len__()
dfp = pd.concat(fr)
dfp.shape
dfp.sort_values('vol_max').head()
dfp.head()
# +
url = "https://api.pricing.captalys.io/models/PricingPadrao"
header = {"X-Consumer-Custom-Id": "pague-veloz"}
err2 = []
for el in tqdm_notebook(err):
try:
pa = CriteriosElegibilidade(cnpj=el, produto='pagueveloz')
pa.get_dados()
dff = pa.dados
dff['adquirentes'] = "pagueveloz"
fat_medio = dff.groupby(["data"]).sum()["valor"].mean()
dff.dropna(inplace=True)
fluxos = dict()
for adq in dff['adquirentes'].unique().tolist():
dt = dff[dff['adquirentes']==adq]
dt['data'] = dt.apply(lambda x : str(x['data'].day) + "-" +str(x['data'].month)+"-" + str(x['data'].year), axis=1)
fluxos[adq] = dt[['data', 'valor']].to_dict("records")
body = {
"fluxos" : fluxos,
"cnpj" : formata_cnpj(el),
"cnae" : "4744-0",
"volume_escolhido" : 0.5*fat_medio
}
req = requests.post(url, headers=header, json=body)
js = req.json()
if len(js) == 0:
vol_max = 0
else:
vol_max = js.get("valor_maximo")
fr.append(pd.DataFrame({"cnpj" : [el], "vol_max" : [vol_max]}))
except:
print("error")
err2.append(el)
# -
err2
dfp.shape
dfp.head()
dfp = pd.concat(fr)
dfp.sort_values("vol_max")
lista_extra = ['13766218000101', '14178863000168', '27045717000106']
# +
url = "https://api.pricing.captalys.io/models/PricingPadrao"
header = {"X-Consumer-Custom-Id": "padrao"}
fr2 = []
for el in tqdm_notebook(lista_extra):
print(el)
pa = CriteriosElegibilidade(cnpj=el, produto='au5')
pa.get_dados()
dff = pa.dados
dff['adquirentes'] = "bin"
fat_medio = dff.groupby(["data"]).sum()["valor"].mean()
dff.dropna(inplace=True)
fluxos = dict()
for adq in dff['adquirentes'].unique().tolist():
dt = dff[dff['adquirentes']==adq]
dt['data'] = dt.apply(lambda x : str(x['data'].day) + "-" +str(x['data'].month)+"-" + str(x['data'].year), axis=1)
fluxos[adq] = dt[['data', 'valor']].to_dict("records")
body = {
"fluxos" : fluxos,
"cnpj" : formata_cnpj(el),
"cnae" : "4744-0",
"volume_escolhido" : 0.5*fat_medio
}
req = requests.post(url, headers=header, json=body)
js = req.json()
if len(js) == 0:
vol_max = 0
else:
vol_max = js.get("valor_maximo")
fr2.append(pd.DataFrame({"cnpj" : [el], "vol_max" : [vol_max]}))
# -
dfp2 = pd.concat(fr2)
dfp = dfp.dropna()
df_pricing = pd.concat([dfp, dfp2])
df_pricing.shape
df_pricing = dfp.dropna()
reprovados_pricing = df_pricing[df_pricing["vol_max"] ==0]
reprovados_pricing.head()
df_pricing = dfp.copy()
df_pricing.head()
sem_proposta = df_pricing[df_pricing["vol_max"]==0]["cnpj"].tolist()
sem_proposta.__len__()
final["flag_proposta"] = final.apply(lambda x : int(x["cnpj"] not in sem_proposta), axis=1)
final.head()
final["data_atualizacao"] = datetime.now().date()
final["flag_aprovacao"] = final["flag_proposta"]*final["flag_faturamento"]
final["produto"] = "au5_2"
final.drop(columns=['doc'], axis=1, inplace=True)
final.drop(columns=['flag_cnpj'], axis=1, inplace=True)
final.drop_duplicates(inplace=True)
final["flag_dividas"] = None
final["flag_cheques"] = None
final["flag_transacoes"] = None
final["data_consulta"] = None
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@<EMAIL>:23306/varejo")
con = engine.connect()
dfamostra.to_sql("pre_analise", schema="varejo", con=con, if_exists='append', index=False)
con.close()
1432*15
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@capt<EMAIL>.sa-east-1.rds.amazonaws.com:23306/varejo")
con = engine.connect()
for el in sem_proposta:
con.execute("update fluxo_au5 set flag_aprovacao=0 where cnpj='{}'".format(el))
con.close()
engine = create_engine("mysql+pymysql://capmaster:#jackpot123#@<EMAIL>:23306/varejo")
con = engine.connect()
for el in sem_proposta:
con.execute("update fluxo_au5 set flag_aprovacao=0 where cnpj='{}'".format(el))
con.close()
sem_proposta.__len__()
df_aprov = df_pricing[df_pricing["vol_max"]>0]
df_aprov.shape
df_aprov.shape
df_aprov.shape
df_aprov.columns = ['cnpj', 'proposta_max']
df_aprov["produto"] = "au5_2"
df_aprov["data_posicao"] = datetime.now().date()
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@<EMAIL>:23306/varejo")
con = engine.connect()
df_aprov.to_sql("propostas_mensais", schema="varejo", con=con, if_exists="append", index=False)
con.close()
df2 = pd.read_excel("pre_analise_au5_20190625 (1).xlsx")
df2.columns = ["cnpj", "proposta_max"]
df2["produto"] = "au5_1"
df2["data_posicao"] = datetime(2019, 6, 25).date()
df2.drop(columns=["data_atualizacao"], inplace=True)
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@<EMAIL>:23306/varejo")
con = engine.connect()
df2.to_sql("propostas_mensais", schema="varejo", con=con, if_exists="append", index=False)
con.close()
df_aprov.to_excel("pre_analise_au5_2_20190711.xlsx")
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#<EMAIL>:23306/varejo")
con = engine.connect()
df = pd.read_sql("select distinct cnpj from fluxo_au5", con)
con.close()
df.head()
engine = create_engine("mysql+pymysql://capMaster:#<EMAIL>#<EMAIL>:23306/creditoDigital")
con = engine.connect()
dfcip = pd.read_sql("select cnpj, DtHrSit, bandeira, trava, statusConsulta, dataConsulta from consulta_cip where cnpj in {}".format(tuple(df["cnpj"].tolist())), con)
con.close()
dfcip = dfcip[dfcip["cnpj"].isin(df["cnpj"].tolist())]
dfcip = dfcip[dfcip["dataConsulta"]>datetime(2019, 6, 1).date()]
dfcip["dataConsulta"].unique().tolist()
dfcip["cnpj"].unique().tolist().__len__()
dfcip.to_excel("consulta_cip_au5_20190710.xlsx")
df_aprov["vol_max"].sum()
lista_reprov = dfp[dfp["vol_max"]==0]["cnpj"].unique().tolist()
lista_reprov.__len__()
rep = lista_reprov + sem_proposta
rep.__len__()
rep_total = final[final['flag_aprovacao']==0]["cnpj"].tolist() + rep
rep_total.__len__()
df_aprov.shape
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#<EMAIL>:23306/apiPricing")
con = engine.connect()
dfacomp = pd.read_sql("select cnpj from acompanhamento where produto='CREDITOVELOZ' and status!='QUITADA'", con)
con.close()
_rep = df_aprov[df_aprov["cnpj"].isin(dfacomp["cnpj"].tolist())]["cnpj"].tolist()
df_aprov = df_aprov[~df_aprov["cnpj"].isin(dfacomp["cnpj"].tolist())]
df_aprov.shape
df_aprov.head()
dfacomp.shape
df_aprov.to_excel("pre_analise_aprovados_pv_201907.xlsx")
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo")
con = engine.connect()
dfrep = pd.read_sql("select distinct cpf_cnpj from fluxo_pv where flag_aprovacao=0 and flag_cnpj=1", con)
con.close()
dfrep["cnpj"] = dfrep.apply(lambda x : x["cpf_cnpj"].replace(".", "").replace("-", "").replace("/", ""), axis=1)
dfrep.to_excel("reprovados_201907.xlsx")
df_reprov = pd.DataFrame()
df_reprov["cnpj"] = rep_total + _rep
df_reprov.to_excel("pre_analise_reprovados_pv_201906.xlsx")
df_reprov
dfop = pd.read_excel("analise_justa_final.xlsx")
dfop["cnpj"] = dfop["cnpj"].astype(str)
dfop["cnpj"] = dfop.apply(lambda x : "0" + x["cnpj"] if len(x["cnpj"])==13 else
("00" + x["cnpj"] if len(x['cnpj'])==12 else x["cnpj"]), axis=1)
lista_op = dfop["cnpj"].unique().tolist()
df_aprov.shape
df_aprov = df_aprov[~df_aprov["cnpj"].isin(lista_op)]
df_aprov.head()
df_aprov.shape
dftrava = pd.read_excel("consulta_trava_justa_20190612.xlsx")
dftrava["cnpj"].iloc[0]
dftrava["cnpj"] = dftrava["cnpj"].astype(str)
dftrava[dftrava.index==84]["cnpj"].iloc[0]
dftrava["cnpj"] = dftrava.apply(lambda x : "00" + x["cnpj"] if len(x["cnpj"])==12 else
("0" + x["cnpj"] if len(x["cnpj"])==13 else x["cnpj"]), axis=1)
dftrava[dftrava["cnpj"]=='04943517000175']
fr = []
for el in dftrava["cnpj"].unique().tolist():
dt = dftrava[dftrava['cnpj']==el]
lista_trava = dt['trava'].tolist()
flag_trava = 'S' if 'S' in lista_trava else 'N'
fr.append(pd.DataFrame({'cnpj' : [el], 'flag_trava' : [flag_trava]}))
resp = pd.concat(fr)
df_aprov.shape
ret = df_aprov.merge(resp, left_on='cnpj', right_on='cnpj', how='left')
ret[ret["cnpj"].isin(["30589990000106", "30589922000106", "30589922000147"])]
ret.to_excel("pre_analise_justa_201906.xlsx")
final["flag_pricing"] = final.apply(lambda x : 0 if x["cnpj"] in sem_proposta else 1, axis=1)
final[final["flag_pricing"]==0]
final
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/apiPricing")
con = engine.connect()
dfacomp = pd.read_sql("select cnpj, produto from acompanhamento where produto='WIRECARD' and status!='QUITADA'", con)
con.close()
df_aprov.shape
df_aprov = df_aprov[~df_aprov["cnpj"].isin(dfacomp["cnpj"].tolist())]
df_aprov.shape
df_aprov.to_excel("pre_analise_wirecard_201906.xlsx")
df_aprov
from watchdog.rotinas import analise_pendencias
| Modelagem/pre_analysis/pre_analise_au5_amostra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import xarray
infile = r'C:\Users\xavier.mouy\Documents\PhD\Projects\Dectector\DFO_RCA_run\RCA_in_April_July2019_1342218252\summary\hourly_summary.nc'
ds = xarray.open_dataset(infile)
ds
| tests/old_tests/aTest_Xarray2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !vw -h | head -n10
# +
import pandas as pd
import numpy as np
import scipy.sparse
import sklearn.feature_extraction
import matplotlib.pylab as plt
# %matplotlib inline
from tqdm import tqdm
import platform
pd.set_option("display.max_rows", 10)
pd.set_option('display.max_columns', 1100)
import os
# %pylab inline
warnings.filterwarnings('ignore')
# -
# # Load and transform data
# +
main_data = np.load('df/main_data.npy').tolist()
values_data = np.load('df/values_data.npy').tolist()
order_data = np.load('df/order_data.npy').tolist()
main_df = pd.DataFrame(main_data)
main_df
# +
important_values_keys_set = {
'Accept',
'Accept-Charset',
'Accept-Encoding'
}
important_orders_keys_set = {
'Upgrade-Insecure-Requests',
'Accept',
'If-Modified-Since',
'Host',
'Connection',
'User-Agent',
'From',
'Accept-Encoding'
}
orders_vectorizer = sklearn.feature_extraction.DictVectorizer(sparse=True, dtype=float)
values_vectorizer = sklearn.feature_extraction.DictVectorizer(sparse=True, dtype=float)
labels = main_df.ua_string.value_counts().index.tolist()
labels.append('NaN')
from lib.parsers.logParser import LogParser
l_parser = LogParser(log_folder='Logs/')
l_parser.reassign_orders_values(order_data, values_data)
full_sparce_dummy = l_parser.prepare_data(orders_vectorizer, values_vectorizer, important_orders_keys_set, important_values_keys_set, fit_dict=True)
#lb = preprocessing.LabelBinarizer(sparse_output=True)
lb = preprocessing.LabelEncoder()
lb.fit(labels)
y = lb.transform(main_df.ua_string.fillna('NaN'))
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(full_sparce_dummy, y, test_size=0.33, random_state=42)
# -
y_train
# +
train_filename = 'vw/headers.train.vw'
test_filename = 'vw/headers.test.vw'
#y_tr = y_train.reset_index(drop=True)
ftr = open(train_filename, "w")
for row in tqdm(range(X_train.shape[0])):
s = str(y_train[row]) + " | "
for i, value in enumerate(X_train.getrow(row).toarray()[0]):
s += str(i) + ":" + str(value) + " "
print(s, file=ftr)
ftr.close()
# +
#y_ts = y_test.reset_index(drop=True)
fts = open(test_filename, "w")
for row in tqdm(range(X_test.shape[0])):
s = str(y_test[row]) + " | "
for i, value in enumerate(X_test.getrow(row).toarray()[0]):
s += str(i) + ":" + str(value) + " "
print(s, file=fts)
fts.close()
# -
# !head -n 5 vw/headers.train.vw | cut -c 1-50
# ## Train VW
#
# #### Which loss function should I use?
#
# If the problem is a binary classification (i.e. labels are -1 and +1) your choices should be Logistic or Hinge loss (although Squared loss may work as well). If you want VW to report the 0-1 loss instead of the logistic/hinge loss, add --binary. Example: spam vs non-spam, odds of click vs no-click.
# For binary classification where you need to know the posterior probabilities, use --loss_function logistic --link logistic.
#
# If the problem is a regression problem, meaning the target label you're trying to predict is a real value -- you should be using Squared or Quantile loss.
#
# Example: revenue, height, weight. If you're trying to minimize the mean error, use squared-loss. See: http://en.wikipedia.org/wiki/Least_squares.
#
# If OTOH you're trying to predict rank/order and you don't mind the mean error to increase as long as you get the relative order correct, you need to minimize the error vs the median (or any other quantile), in this case, you should use quantile-loss. See: http://en.wikipedia.org/wiki/Quantile_regression
#
# **Важно!!!!**
# Было бы классно использовать --loss_function logistic --link logistic. Тогда мы бы получили вероятностные предсказания. Но мы к сожалению прогнозируем кучу классов а не бинарную
#
# Вариант добавить юзерагентов или браузеров в классификацию. Но это почти то же самое что наша вторая модель и улучшние скорости с помощью VW весьма бессмысленно
# +
# %%time
# !vw -d vw/headers.train.vw -f vw/model.vw --loss_function quantile --passes 100 -c -k 2> vw/train.log
# -
# ## Very fast
# !head -n20 vw/train.log
# +
# %%time
# !vw -d vw/headers.test.vw -i vw/model.vw -t -p vw/output.csv --quiet
# !head -n3 vw/output.csv
# -
y_hat = pd.read_csv('vw/output.csv', header=None)
print(len(y_test))
y_hat[0]
y_hat['Real'] = y_test
y_hat.columns = ['Predicted', 'Real']
y_hat = y_hat.round({'Predicted': 0})
y_hat['Success'] = y_hat.Predicted == y_hat.Real
y_hat
y_hat['Success'].value_counts()
# ### As we see linear models not very effective for multiclass header predictions
#
# I think VW not very useful in this case
| 13-VW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os.path as osp
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GCNConv, GNNExplainer, ARMAConv
from torch.nn import Sequential, Linear
dataset = 'Cora'
path = osp.join(osp.dirname(osp.realpath('__file__')), '..', 'data', 'Planetoid')
dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures())
data = dataset[0]
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.lin = Sequential(Linear(10, 10))
self.conv1 = ARMAConv(dataset.num_features, 16, 2)
self.conv2 = ARMAConv(16, dataset.num_classes, 2)
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
x, edge_index = data.x, data.edge_index
for epoch in range(1, 201):
model.train()
optimizer.zero_grad()
log_logits = model(x, edge_index)
loss = F.nll_loss(log_logits[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
explainer = GNNExplainer(model, epochs=200)
node_idx = 10
node_feat_mask, edge_mask = explainer.explain_node(node_idx, x, edge_index)
ax, G = explainer.visualize_subgraph(node_idx, edge_index, edge_mask, y=data.y)
# -
| notebooks/pyg_explain/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
import os
sys.path.append(os.environ['GOTMWORK_ROOT']+'/tools', )
from gotmanalysis import *
np.seterr(all='raise')
# %matplotlib inline
casename = 'JRA55-do_Global_dampV5d'
forcing_reg_type = 'BG12'
tmname = 'KPP-CVMix'
update_data = False
plot_figure = True
apply_mask = True
# check forcing_reg_type
fr_list = ['BG12', 'LF17']
if forcing_reg_type not in fr_list:
print('Forcing regime {} not supported. Stop.'.format(forcing_reg_type))
# check time tag
month_labels = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
timetag_list = ['20090101-20090131',
'20090201-20090228',
'20090301-20090331',
'20090401-20090430',
'20090501-20090531',
'20080601-20080630',
'20080701-20080731',
'20080801-20080831',
'20080901-20080930',
'20081001-20081031',
'20081101-20081130',
'20081201-20081231']
# paths
fig_root = os.environ['GOTMFIG_ROOT']+'/'+casename
# read data
mon_gmobj = []
for j in np.arange(12):
timetag = timetag_list[j]
s1data_root = os.environ['GOTMRUN_ROOT']+'/'+casename+'/VR1m_DT600s_'+timetag
s2data_root = os.environ['GOTMFIG_ROOT']+'/data/'+casename+'/VR1m_DT600s_'+timetag
os.makedirs(s2data_root, exist_ok=True)
os.makedirs(fig_root, exist_ok=True)
# get forcing regime
basepath = s1data_root+'/'+tmname
s2data_name = s2data_root+'/data_forcing_regime_'+forcing_reg_type+'_'+tmname+'.npz'
mask_name = s2data_root+'/mask_'+tmname+'.npz'
if update_data or not os.path.isfile(s2data_name):
# update data
print('Updating data...')
loclist = sorted(os.listdir(basepath))
pathlist = [basepath+'/'+x+'/gotm_out_s1.nc' for x in loclist]
godmobj = GOTMOutputDataMap(pathlist)
forcing_regime = np.zeros(godmobj.ncase)
for i in np.arange(godmobj.ncase):
if np.mod(i, 100) == 0:
print('{:6.2f} %'.format(i/godmobj.ncase*100.0))
tmp = GOTMOutputData(godmobj._paths[i], init_time_location=False)
if forcing_reg_type == 'BG12':
forcing_regime[i] = tmp.diag_forcing_regime_BG12()
elif forcing_reg_type == 'LF17':
forcing_regime[i] = tmp.diag_forcing_regime_LF17()
gmobj = GOTMMap(data=forcing_regime, lon=godmobj.lon, lat=godmobj.lat, name='forcing_regime')
if apply_mask:
# read mask
gmobj_mask = GOTMMap().load(mask_name)
# apply mask
gmobj.masked(gmobj_mask)
# save data
gmobj.save(s2data_name)
else:
# read data
gmobj = GOTMMap().load(s2data_name)
if apply_mask:
# read mask
gmobj_mask = GOTMMap().load(mask_name)
# apply mask
gmobj.masked(gmobj_mask)
# update data
gmobj.save(s2data_name)
# append to list
mon_gmobj.append(gmobj)
# plot figure
if plot_figure:
# create figure
nrow = 6
fig_width = 12
fig_height = 13
# plot figure
height_ratios = [1]*nrow
height_ratios.append(0.15)
width_ratios = [1, 1, 0.05]
f, axarr = plt.subplots(nrow, 2, sharex='col')
f.set_size_inches(fig_width, fig_height)
# plot
for j in np.arange(12):
m = j//2
n = j%2
mlabel = month_labels[j]
im0 = plot_forcing_regime_map(mon_gmobj[j], axis=axarr[m,n], add_colorbar=False)
# label
axarr[m,n].text(40, 55, mlabel, fontsize=14, color='k', fontweight='bold', va='top',
bbox=dict(boxstyle="square",ec='k',fc='w'))
# reduce margin
plt.tight_layout()
# colorbar
cb_ticks = [1, 2, 3, 4, 5, 6, 7, 8]
cb_ticks_labels = ['S', 'L', 'C', 'SL', 'SC', 'LC', 'SLC', 'NA']
plt.subplots_adjust(right=0.95)
cax0 = plt.axes([0.85, 0.15, 0.1, 0.7])
cax0.set_visible(False)
cb0 = plt.colorbar(im0, ax=cax0, ticks=cb_ticks, aspect=35)
cb0.ax.set_yticklabels(cb_ticks_labels)
# save figure
figname = fig_root+'/fig_forcing_regime_all_'+forcing_reg_type+'.png'
plt.savefig(figname, dpi = 300)
| visualization/jupyter_notebook/plot_map_forcing_regime_all.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Webscraping 40k Hindi songs
# We'll be scraping http://giitaayan.com/
# ### Phase 2
# In Phase 2, we will scrape the song lyrics from all the song pages
from selenium import webdriver
import pandas as pd
import csv
import time
Chrome = webdriver.Chrome
chromedriver = './chromedriver'
browser = Chrome(chromedriver)
table_headers = ['Song', 'Film', 'Year' 'Music Director', 'Lyricist', 'Singers']
with open(r'hindi_lyrics_phase2_part2.csv', 'w') as file:
writer = csv.writer(file)
writer.writerow(table_headers)
df = pd.read_csv('hindi_lyrics_phase1_part2.csv', index_col='Unnamed: 0')
df.head()
# +
# %%time
for i in range (len(df)):
song_row = list(df.iloc[i])
song_url = song_row[0]
time.sleep(1)
try:
browser.get(song_url)
browser.find_element_by_id('langName').click()
song_row[0] = browser.find_element_by_id('ConvertedText').text
with open(r'hindi_lyrics_phase2_part2.csv', 'a') as file:
writer = csv.writer(file)
writer.writerow(song_row)
print(f'Writing {i+1} of {len(df)}')
except Exception as e:
print(e)
# -
| Scraping_giitaayan/Parallel Scraping/2/HindiWebscraping40k_Phase2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python385jvsc74a57bd098b0a9b7b4eaaa670588a142fd0a9b87eaafe866f1db4228be72b4211d12040f
# ---
# +
import importlib
import copy
from tabulate import tabulate
import random
import mysklearn.myutils
importlib.reload(mysklearn.myutils)
import mysklearn.myutils as myutils
import mysklearn.mypytable
importlib.reload(mysklearn.mypytable)
from mysklearn.mypytable import MyPyTable
import mysklearn.myclassifiers
importlib.reload(mysklearn.myclassifiers)
from mysklearn.myclassifiers import MyKNeighborsClassifier, MySimpleLinearRegressor, MyNaiveBayesClassifier, MyDecisionTreeClassifier, MyRandomForestClassifier
import mysklearn.myevaluation
importlib.reload(mysklearn.myevaluation)
import mysklearn.myevaluation as myevaluation
# +
wildfire_fname = os.path.join("data","clean_fire_data.csv")
wildfire_table = MyPyTable()
wildfire_table.load_from_file(wildfire_fname)
fire_date = wildfire_table.get_column("date")
county = wildfire_table.get_column("county")
acres = wildfire_table.get_column("acres")
cause = wildfire_table.get_column("cause")
lat = wildfire_table.get_column("lat")
lng = wildfire_table.get_column("lon")
binlat = wildfire_table.get_column("binlat")
binlon = wildfire_table.get_column("binlon")
binacres = wildfire_table.get_column("binacres")
wildfire_X = [[fire_date[i], county[i], cause[i], binlat[i], binlon[i]] for i in range(len(fire_date))]
wildfire_y = [x for x in binacres]
# -
print(len(wildfire_X))
print(set(fire_date))
print(set(county))
print(set(cause))
print(set(binlat))
print(set(binlon))
print(set(binacres))
# ## Finding the best tree
#
# The below code tries to find a better 'best tree' than previously found
# so far the best tree that we have found is
#
# F: 1 N: 5 M: 4 Accuracy: 0.433
# + tags=[]
X_train, X_test, y_train, y_test = myevaluation.train_test_split(copy.deepcopy(wildfire_X), copy.deepcopy(wildfire_y), test_size=1000)
best_trees = []
max_accuracy = .433
for i in range(20):
F = random.randint(1,5)
N = random.randint(1,30)
M = random.randint(1,N)
rf = MyRandomForestClassifier()
rf.fit(wildfire_X, wildfire_y, F=1, N=5, M=4)
predictions = []
for i, x in enumerate(X_test):
prediction = rf.predict([x])
# print(prediction, y_test[i])
# print(prediction)
predictions.append(int(prediction[0] == y_test[i]))
if sum(predictions)/len(predictions) > max_accuracy:
print("F:", F, "N:", N, "M:", M, "Accuracy:", sum(predictions)/len(predictions))
f = open("best_tree.txt", "w")
f.write(str(rf.trees))
f.close()
max_accuracy = sum(predictions)/len(predictions)
best_trees = rf.trees
print("done")
# -
# ## Testing of best tree
#
# next we test the best tree with some random samples
# +
import ast
if best_trees == []:
with open("best_tree.txt", "r") as data:
best_trees = ast.literal_eval(data.read())
# +
test_size = 1000
X_train, X_test, y_train, y_test = myevaluation.train_test_split(copy.deepcopy(wildfire_X), copy.deepcopy(wildfire_y), test_size=test_size, shuffle=True)
rf = MyRandomForestClassifier()
rf.trees = copy.deepcopy(best_trees)
predictions = []
for i, x in enumerate(X_test):
sys.stdout.write("\r" + str(i) + "/" + str(len(X_test) -1) + " ")
sys.stdout.flush()
prediction = rf.predict([x])
predictions.append(prediction[0])
print()
acc = round(sum([int(x==y) for x,y in zip(predictions, y_test)])/len(predictions), 2)
print("Random Forest: accuracy = " + str(acc) + " error rate = " + str(1-acc))
# -
headers = ["acres", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0", "total", "recognition %"]
mat = myevaluation.confusion_matrix(predictions, y_test, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
myutils.build_confusion_matrix(mat)
print("Random Forests Results")
print(tabulate(mat, headers))
# ## KNN
# The code below tests intstances of the dataset using the the knn classifier
# +
test_size = 1000
X_train, X_test, y_train, y_test = myevaluation.train_test_split(copy.deepcopy(wildfire_X), copy.deepcopy(wildfire_y), test_size=test_size, shuffle=True)
nb = MyKNeighborsClassifier()
nb.fit(X_train, y_train)
predictions = []
for i, x in enumerate(X_test):
sys.stdout.write("\r" + str(i) + "/" + str(len(X_test) -1) + " ")
sys.stdout.flush()
prediction = nb.predict([x])
predictions.append(prediction[0])
print()
acc = round(sum([int(x==y) for x,y in zip(predictions, y_test)])/len(predictions), 2)
print("KNN: accuracy = " + str(acc) + " error rate = " + str(1-acc))
# -
headers = ["acres", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0", "total", "recognition %"]
mat = myevaluation.confusion_matrix(predictions, y_test, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
myutils.build_confusion_matrix(mat)
print("KNN Results")
print(tabulate(mat, headers))
# ## Naive Bayes
#
# The code below tests the acuracy of the dataset using the Naive Bayes classifier
# +
test_size = 1000
X_train, X_test, y_train, y_test = myevaluation.train_test_split(copy.deepcopy(wildfire_X), copy.deepcopy(wildfire_y), test_size=test_size, shuffle=True)
nb = MyNaiveBayesClassifier()
nb.fit(X_train, y_train)
predictions = []
for i, x in enumerate(X_test):
sys.stdout.write("\r" + str(i) + "/" + str(len(X_test) -1) + " ")
sys.stdout.flush()
prediction = nb.predict([x])
predictions.append(prediction[0])
print()
acc = round(sum([int(x==y) for x,y in zip(predictions, y_test)])/len(predictions), 2)
print("Naive Bayes: accuracy = " + str(acc) + " error rate = " + str(1-acc))
# -
headers = ["acres", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0", "total", "recognition %"]
mat = myevaluation.confusion_matrix(predictions, y_test, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
myutils.build_confusion_matrix(mat)
print("KNN Results")
print(tabulate(mat, headers))
| classifiers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7 (XPython)
# language: python
# name: xpython
# ---
# ### Entity Linking with spaCy
import rdflib
from rdflib import Graph, Literal, RDF, URIRef
from rdflib.namespace import FOAF , XSD, Namespace
import pandas as pd
import spacy
from spacy.kb import KnowledgeBase
import os
import csv
import re
import random
nlp = spacy.load("en_core_web_lg")
# ! python -m spacy validate
# ### Skywalker is ambiguous
starwars_text = 'Skywalker, also known as <NAME>, is a fictional character in the Star Wars franchise'
starwars_text
doc = nlp(starwars_text)
for ent in doc.ents:
print(f"Named Entity '{ent.text}' with label '{ent.label_}'")
# ### Get names related to 'Skywalker'
graph = rdflib.Graph()
graph.parse('./data/starwars.ttl', format='turtle')
for ent in doc.ents:
print(ent)
query_str = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?s ?o
WHERE {
?s rdfs:label ?o.
FILTER regex(?o, "%s")
}
""" % ent
res = graph.query(query_str)
print(list(res))
print()
query_str = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX voc: <https://swapi.co/vocabulary/>
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
SELECT ?s ?o
WHERE {
?s rdfs:label ?o.
FILTER regex(?o, "Skywalker")
}
"""
res = graph.query(query_str)
list(res)
# Throw out candidates that do not have a description
query_str = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX voc: <https://swapi.co/vocabulary/>
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
SELECT ?s ?p ?o
WHERE {
?s <https://swapi.co/vocabulary/desc> ?o.
FILTER (?s=<https://swapi.co/resource/human/1>)
}
"""
res = graph.query(query_str)
list(res)
entities = pd.read_csv('./entity_linking_data/entities.txt', header=None, delimiter='|')
entities
# +
names = {}
descriptions = {}
for i, row in entities.iterrows():
qid = row[0]
name = row[1]
desc = row[2]
names[qid] = name
descriptions[qid] = desc
for qid in names.keys():
print(f"qid={qid}, name={names[qid]}, desc={descriptions[qid]}")
# -
# ### Create spaCy KB
kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=300)
for qid, desc in descriptions.items():
desc_doc = nlp(desc)
desc_enc = desc_doc.vector
print(desc_enc)
kb.add_entity(entity=qid, entity_vector=desc_enc, freq=342)
len(kb)
for qid, name in names.items():
print(qid, name)
kb.add_alias(alias=name, entities=[qid], probabilities=[1])
qids = names.keys()
probs = [1/len(qids) for qid in qids]
kb.add_alias(alias="Skywalker", entities=qids, probabilities=probs)
print(f"Entities in the KB: {kb.get_entity_strings()}")
print(f"Aliases in the KB: {kb.get_alias_strings()}")
print(f"Candidates for 'Shmi Skywalker': {[c.entity_ for c in kb.get_candidates('Shmi Skywalker')]}")
print(f"Candidates for '<NAME>walker': {[c.entity_ for c in kb.get_candidates('<NAME>walker')]}")
print(f"Candidates for '<NAME>': {[c.entity_ for c in kb.get_candidates('<NAME>walker')]}")
print(f"Candidates for 'Skywalker': {[c.entity_ for c in kb.get_candidates('Skywalker')]}")
print(f"Candidates for 'R2-D2': {[c.entity_ for c in kb.get_candidates('R2-D2')]}")
label_to_name = {
'Human11': '<NAME>',
'Human1': '<NAME>',
'Human43': '<NAME>'
}
# ### Save KB
output_dir = './entity_linking_data'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
kb.dump(output_dir + "/my_kb")
nlp.to_disk(output_dir + "/my_nlp")
# ### Train EntityLinker: Create Training/Testing Datasets
# Creating training data by scraping sentences with 'Skywalker' from internet
# > https://starwars.fandom.com/wiki/Shmi_Skywalker_Lars
# +
corpus = []
labels = []
with open ('./entity_linking_data/skywalker_input_data.txt', 'r') as f:
for row in csv.reader(f,delimiter=','):
text, label = row[0].strip(), row[1].strip()
corpus.append(text)
labels.append(label)
print(corpus[0], labels[0])
print(corpus[-1], labels[-1])
# -
# > Training data must be representative of ambiguous entities after running NER
doc = nlp(corpus[2])
print(doc.ents[0].start_char, doc.ents[0].end_char)
for idx, i in enumerate(corpus):
doc = nlp(corpus[idx])
doc.ents.index
print(idx+1, doc.ents)
# +
dataset = []
for index, text in enumerate(corpus):
offset = re.search(r'Skywalker', text).span()
dataset.append((text, {"links": {offset: {labels[index]:1.0}}}))
print(dataset[2])
# +
gold_ids = []
for text, annot in dataset:
for span, links_dict in annot["links"].items():
for link, value in links_dict.items():
if value:
gold_ids.append(link)
from collections import Counter
print(Counter(gold_ids))
# +
train_dataset = []
test_dataset = []
for QID in qids:
indices = [i for i, j in enumerate(gold_ids) if j == QID]
train_dataset.extend(dataset[index] for index in indices[0:8]) # first 8 in training
test_dataset.extend(dataset[index] for index in indices[8:10]) # last 2 in test
random.shuffle(train_dataset)
random.shuffle(test_dataset)
# -
TRAIN_DOCS = []
for text, annotation in train_dataset:
doc = nlp(text)
TRAIN_DOCS.append((doc, annotation))
TRAIN_DOCS[:5]
# ### Create Entity_Linker pipeline
entity_linker = nlp.create_pipe("entity_linker", config={"incl_prior": False})
entity_linker.set_kb(kb)
nlp.add_pipe(entity_linker, last=True)
# +
from spacy.util import minibatch, compounding
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "entity_linker"]
with nlp.disable_pipes(*other_pipes): # train only the entity_linker
optimizer = nlp.begin_training()
for itn in range(500): # 500 iterations takes about a minute to train
random.shuffle(TRAIN_DOCS)
batches = minibatch(TRAIN_DOCS, size=compounding(4.0, 32.0, 1.001)) # increasing batch sizes
losses = {}
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts,
annotations,
drop=0.5, # prevent overfitting
losses=losses,
sgd=optimizer,
)
if itn % 50 == 0:
print(itn, "Losses", losses) # print the training loss
print(itn, "Losses", losses)
# -
other_pipes
# ### Test the EL Model
text = 'Skywalker, also known as <NAME>, is a fictional character in the Star Wars franchise'
doc = nlp(text)
for ent in doc.ents:
print(ent.text, ent.label_, ent.kb_id_, str(label_to_name.get(ent.kb_id_)))
# > Explain why Darth Vader is None (not trained)
# ### Test on Training Data
correct = 0
for text, true_annot in train_dataset:
print(text)
print(f"Gold annotation: {true_annot}")
doc = nlp(text)
for ent in doc.ents:
if ent.text == "Skywalker":
print(f"Prediction: {ent.text}, {ent.label_}, {ent.kb_id_}, {str(label_to_name.get(ent.kb_id_))}")
if ent.kb_id_ == list(true_annot['links'][list(true_annot['links'].keys())[0]].keys())[0]:
correct += 1
print()
print('Correct: {} out of {}, Accuracy: {}'.format(correct, len(train_dataset), correct/len(train_dataset)))
correct = 0
for text, true_annot in test_dataset:
print(text)
print(f"Gold annotation: {true_annot}")
doc = nlp(text)
for ent in doc.ents:
if ent.text == "Skywalker":
print(f"Prediction: {ent.text}, {ent.label_}, {ent.kb_id_}, {str(label_to_name.get(ent.kb_id_))}")
if ent.kb_id_ == list(true_annot['links'][list(true_annot['links'].keys())[0]].keys())[0]:
correct += 1
print()
print('Correct: {} out of {}, Accuracy: {}'.format(correct, len(test_dataset), correct/len(test_dataset)))
# ### Save Model
output_dir = './entity_linking_data'
nlp.to_disk(output_dir + "/trained_el")
# ### Load and Predict
output_dir = './entity_linking_data'
nlp = spacy.load(output_dir + "/trained_el")
text = 'Skywalker, also known as <NAME>, is a fictional character in the Star Wars franchise'
doc = nlp(text)
for ent in doc.ents:
print(ent.text, ent.label_, ent.kb_id_, str(label_to_name.get(ent.kb_id_)))
print(str(1) + ',' + str(2))
print(*[1,2,3], sep=',')
**kwargs
# ### Entity Linking Use Case
text = '<NAME>, also known by his birth name <NAME>, is a fictional character in the Star Wars franchise.123 Vader appears in the original film trilogy as a pivotal antagonist whose actions drive the plot, while his past as <NAME> and the story of his corruption are central to the narrative of the prequel trilogy., The character was created by <NAME> and has been portrayed by numerous actors. His appearances span the first six Star Wars films, as well as Rogue One, and his character is heavily referenced in Star Wars: The Force Awakens. He is also an important character in the Star Wars expanded universe of television series, video games, novels, literature and comic books. Originally a Jedi prophesied to bring balance to the Force, he falls to the dark side of the Force and serves the evil Galactic Empire at the right hand of his Sith master, Emperor Palpatine (also known as <NAME>).4 He is also the father of <NAME> and <NAME>, secret husband of <NAME> and grandfather of <NAME>., <NAME> has become one of the most iconic villains in popular culture, and has been listed among the greatest villains and fictional characters ever.56 The American Film Institute listed him as the third greatest movie villain in cinema history on 100 Years... 100 Heroes and Villains, behind Hannibal Lecter and Norman Bates.7 However, other critics consider him a tragic hero, citing his original motivations for the greater good before his fall to the dark side.'
text
doc = nlp(text)
for ent in doc.ents:
print(ent.text, ent.label_, ent.kb_id_, str(label_to_name.get(ent.kb_id_)))
# > Regex is a bottleneck
for ent in doc.ents:
print(ent)
query_str = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?s ?o
WHERE {
?s rdfs:label ?o.
FILTER regex(?o, "%s")
}
""" % ent
res = graph.query(query_str)
print(list(res))
print()
| notebooks/Entity_Linking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="SRxqMakh3PRY" colab_type="text"
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="47ieDn-jNLYd" colab_type="text"
# # NASBench-101
#
# This colab accompanies [**NAS-Bench-101: Towards Reproducible Neural Architecture Search**](https://arxiv.org/abs/1902.09635) and the rest of the code at https://github.com/google-research/nasbench.
#
# In this colab, we demonstrate how to use the dataset for simple benchmarking and analysis. The publicly available and free hosted colab instances are sufficient to run this colab.
# + [markdown] id="lBNMsBUS3SAq" colab_type="text"
# ## Load NASBench library and dataset
# + id="vl1oLYux3FhJ" colab_type="code" outputId="260484df-3a86-48ef-88a4-f8e441d524bc" colab={"base_uri": "https://localhost:8080/", "height": 853}
# This code was written in TF 1.12 but should be supported all the way through
# TF 1.15. Untested in TF 2.0+.
# %tensorflow_version 1.x
# Download the raw data (only 108 epoch data points, for full dataset,
# uncomment the second line for nasbench_full.tfrecord).
# !curl -O https://storage.googleapis.com/nasbench/nasbench_only108.tfrecord
# # !curl -O https://storage.googleapis.com/nasbench/nasbench_full.tfrecord
# Clone and install the code and dependencies.
# !git clone https://github.com/google-research/nasbench
# !pip install ./nasbench
# Initialize the NASBench object which parses the raw data into memory (this
# should only be run once as it takes up to a few minutes).
from nasbench import api
# Use nasbench_full.tfrecord for full dataset (run download command above).
nasbench = api.NASBench('nasbench_only108.tfrecord')
# + id="oFhFRmck7NzM" colab_type="code" colab={}
# Standard imports
import copy
import numpy as np
import matplotlib.pyplot as plt
import random
# Useful constants
INPUT = 'input'
OUTPUT = 'output'
CONV3X3 = 'conv3x3-bn-relu'
CONV1X1 = 'conv1x1-bn-relu'
MAXPOOL3X3 = 'maxpool3x3'
NUM_VERTICES = 7
MAX_EDGES = 9
EDGE_SPOTS = NUM_VERTICES * (NUM_VERTICES - 1) / 2 # Upper triangular matrix
OP_SPOTS = NUM_VERTICES - 2 # Input/output vertices are fixed
ALLOWED_OPS = [CONV3X3, CONV1X1, MAXPOOL3X3]
ALLOWED_EDGES = [0, 1] # Binary adjacency matrix
# + [markdown] id="llC2AebQOWq9" colab_type="text"
# ## Basic usage
# + id="kZvm6i0VGP_M" colab_type="code" outputId="2999b6a6-9f1d-4361-8be9-ee54afdb82be" colab={"base_uri": "https://localhost:8080/", "height": 258}
# Query an Inception-like cell from the dataset.
cell = api.ModelSpec(
matrix=[[0, 1, 1, 1, 0, 1, 0], # input layer
[0, 0, 0, 0, 0, 0, 1], # 1x1 conv
[0, 0, 0, 0, 0, 0, 1], # 3x3 conv
[0, 0, 0, 0, 1, 0, 0], # 5x5 conv (replaced by two 3x3's)
[0, 0, 0, 0, 0, 0, 1], # 5x5 conv (replaced by two 3x3's)
[0, 0, 0, 0, 0, 0, 1], # 3x3 max-pool
[0, 0, 0, 0, 0, 0, 0]], # output layer
# Operations at the vertices of the module, matches order of matrix.
ops=[INPUT, CONV1X1, CONV3X3, CONV3X3, CONV3X3, MAXPOOL3X3, OUTPUT])
# Querying multiple times may yield different results. Each cell is evaluated 3
# times at each epoch budget and querying will sample one randomly.
data = nasbench.query(cell)
for k, v in data.items():
print('%s: %s' % (k, str(v)))
# + [markdown] id="uXnVdG32Oe19" colab_type="text"
# ## Example search experiment (random vs. evolution)
# + id="Xtl_Aqr7OeOF" colab_type="code" colab={}
def random_spec():
"""Returns a random valid spec."""
while True:
matrix = np.random.choice(ALLOWED_EDGES, size=(NUM_VERTICES, NUM_VERTICES))
matrix = np.triu(matrix, 1)
ops = np.random.choice(ALLOWED_OPS, size=(NUM_VERTICES)).tolist()
ops[0] = INPUT
ops[-1] = OUTPUT
spec = api.ModelSpec(matrix=matrix, ops=ops)
if nasbench.is_valid(spec):
return spec
def mutate_spec(old_spec, mutation_rate=1.0):
"""Computes a valid mutated spec from the old_spec."""
while True:
new_matrix = copy.deepcopy(old_spec.original_matrix)
new_ops = copy.deepcopy(old_spec.original_ops)
# In expectation, V edges flipped (note that most end up being pruned).
edge_mutation_prob = mutation_rate / NUM_VERTICES
for src in range(0, NUM_VERTICES - 1):
for dst in range(src + 1, NUM_VERTICES):
if random.random() < edge_mutation_prob:
new_matrix[src, dst] = 1 - new_matrix[src, dst]
# In expectation, one op is resampled.
op_mutation_prob = mutation_rate / OP_SPOTS
for ind in range(1, NUM_VERTICES - 1):
if random.random() < op_mutation_prob:
available = [o for o in nasbench.config['available_ops'] if o != new_ops[ind]]
new_ops[ind] = random.choice(available)
new_spec = api.ModelSpec(new_matrix, new_ops)
if nasbench.is_valid(new_spec):
return new_spec
def random_combination(iterable, sample_size):
"""Random selection from itertools.combinations(iterable, r)."""
pool = tuple(iterable)
n = len(pool)
indices = sorted(random.sample(range(n), sample_size))
return tuple(pool[i] for i in indices)
def run_random_search(max_time_budget=5e6):
"""Run a single roll-out of random search to a fixed time budget."""
nasbench.reset_budget_counters()
times, best_valids, best_tests = [0.0], [0.0], [0.0]
while True:
spec = random_spec()
data = nasbench.query(spec)
# It's important to select models only based on validation accuracy, test
# accuracy is used only for comparing different search trajectories.
if data['validation_accuracy'] > best_valids[-1]:
best_valids.append(data['validation_accuracy'])
best_tests.append(data['test_accuracy'])
else:
best_valids.append(best_valids[-1])
best_tests.append(best_tests[-1])
time_spent, _ = nasbench.get_budget_counters()
times.append(time_spent)
if time_spent > max_time_budget:
# Break the first time we exceed the budget.
break
return times, best_valids, best_tests
def run_evolution_search(max_time_budget=5e6,
population_size=50,
tournament_size=10,
mutation_rate=1.0):
"""Run a single roll-out of regularized evolution to a fixed time budget."""
nasbench.reset_budget_counters()
times, best_valids, best_tests = [0.0], [0.0], [0.0]
population = [] # (validation, spec) tuples
# For the first population_size individuals, seed the population with randomly
# generated cells.
for _ in range(population_size):
spec = random_spec()
data = nasbench.query(spec)
time_spent, _ = nasbench.get_budget_counters()
times.append(time_spent)
population.append((data['validation_accuracy'], spec))
if data['validation_accuracy'] > best_valids[-1]:
best_valids.append(data['validation_accuracy'])
best_tests.append(data['test_accuracy'])
else:
best_valids.append(best_valids[-1])
best_tests.append(best_tests[-1])
if time_spent > max_time_budget:
break
# After the population is seeded, proceed with evolving the population.
while True:
sample = random_combination(population, tournament_size)
best_spec = sorted(sample, key=lambda i:i[0])[-1][1]
new_spec = mutate_spec(best_spec, mutation_rate)
data = nasbench.query(new_spec)
time_spent, _ = nasbench.get_budget_counters()
times.append(time_spent)
# In regularized evolution, we kill the oldest individual in the population.
population.append((data['validation_accuracy'], new_spec))
population.pop(0)
if data['validation_accuracy'] > best_valids[-1]:
best_valids.append(data['validation_accuracy'])
best_tests.append(data['test_accuracy'])
else:
best_valids.append(best_valids[-1])
best_tests.append(best_tests[-1])
if time_spent > max_time_budget:
break
return times, best_valids, best_tests
# + id="HMfF2zXxpQNA" colab_type="code" outputId="edff2ca4-8a55-4537-838a-dedc7a9f360b" colab={"base_uri": "https://localhost:8080/", "height": 187}
# Run random search and evolution search 10 times each. This should take a few
# minutes to run. Note that each run would have taken days of compute to
# actually train and evaluate if the dataset were not precomputed.
random_data = []
evolution_data = []
for repeat in range(10):
print('Running repeat %d' % (repeat + 1))
times, best_valid, best_test = run_random_search()
random_data.append((times, best_valid, best_test))
times, best_valid, best_test = run_evolution_search()
evolution_data.append((times, best_valid, best_test))
# + id="2d-yRmuhkz35" colab_type="code" outputId="2ae12897-074f-4705-db94-6be1aa4208dc" colab={"base_uri": "https://localhost:8080/", "height": 367}
plt.figure(figsize=(20, 5))
plt.subplot(1, 3, 1)
for times, best_valid, best_test in random_data:
plt.plot(times, best_valid, label='valid', color='red', alpha=0.5)
plt.plot(times, best_test, label='test', color='blue', alpha=0.5)
plt.ylabel('accuracy')
plt.xlabel('time spent (seconds)')
plt.ylim(0.92, 0.96)
plt.grid()
plt.title('Random search trajectories (red=validation, blue=test)')
plt.subplot(1, 3, 2)
for times, best_valid, best_test in evolution_data:
plt.plot(times, best_valid, label='valid', color='red', alpha=0.5)
plt.plot(times, best_test, label='test', color='blue', alpha=0.5)
plt.ylabel('accuracy')
plt.xlabel('time spent (seconds)')
plt.ylim(0.92, 0.96)
plt.grid()
plt.title('Evolution search trajectories (red=validation, blue=test)')
# + id="F9lB_qL2oz9M" colab_type="code" outputId="2a9d25c8-3adb-456a-ae2d-3229a7ed8bfe" colab={"base_uri": "https://localhost:8080/", "height": 283}
# Compare the mean test accuracy along with error bars.
def plot_data(data, color, label, gran=10000, max_budget=5000000):
"""Computes the mean and IQR fixed time steps."""
xs = range(0, max_budget+1, gran)
mean = [0.0]
per25 = [0.0]
per75 = [0.0]
repeats = len(data)
pointers = [1 for _ in range(repeats)]
cur = gran
while cur < max_budget+1:
all_vals = []
for repeat in range(repeats):
while (pointers[repeat] < len(data[repeat][0]) and
data[repeat][0][pointers[repeat]] < cur):
pointers[repeat] += 1
prev_time = data[repeat][0][pointers[repeat]-1]
prev_test = data[repeat][2][pointers[repeat]-1]
next_time = data[repeat][0][pointers[repeat]]
next_test = data[repeat][2][pointers[repeat]]
assert prev_time < cur and next_time >= cur
# Linearly interpolate the test between the two surrounding points
cur_val = ((cur - prev_time) / (next_time - prev_time)) * (next_test - prev_test) + prev_test
all_vals.append(cur_val)
all_vals = sorted(all_vals)
mean.append(sum(all_vals) / float(len(all_vals)))
per25.append(all_vals[int(0.25 * repeats)])
per75.append(all_vals[int(0.75 * repeats)])
cur += gran
plt.plot(xs, mean, color=color, label=label, linewidth=2)
plt.fill_between(xs, per25, per75, alpha=0.1, linewidth=0, facecolor=color)
plot_data(random_data, 'red', 'random')
plot_data(evolution_data, 'blue', 'evolution')
plt.legend(loc='lower right')
plt.ylim(0.92, 0.95)
plt.xlabel('total training time spent (seconds)')
plt.ylabel('accuracy')
plt.grid()
# + [markdown] id="DPnRtp0zXUDh" colab_type="text"
# ## More information
#
# For more information on using the dataset, see the API documentation at https://github.com/google-research/nasbench/blob/master/nasbench/api.py.
#
| NASBench.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gmihaila/character-mining/blob/developer/doc/json_tsv.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="6oaNu3HJtTTo"
# # Parse transcripts to **.tsv** file
#
# ## Parse all transcripts for a more seamless experience
#
# Using the **.json** files from each season, create a master file that contain all transcripts in a easier to work with format.
#
# The notebook will create **friends_transcripts.tsv** which contain all seasons and episodes.
#
# This is a sample of the **.tsv** file:
#
# <br>
#
# |season_id|episode_id|scene_id|utterance_id|speaker|tokens|transcript|
# |:-|:-|:-|:-|:-|:-|:-|
# |0| s01| e01| c01| u001| <NAME>| [[There, 's, nothing, to, tell, !], [He, 's, j...| There's nothing to tell! He's just some guy I ...|
# |1| s01| e01| c01| u002| <NAME>| [[C'mon, ,, you, 're, going, out, with, the, g...| C'mon, you're going out with the guy! There's ...|
# + [markdown] id="vF3UbENTtz2_"
# # Imports
# + id="gjr_J342tOPq"
import requests
import json
import pandas as pd
from tqdm.notebook import tqdm
# + [markdown] id="OdnKsoFbt1Wd"
# # Helper Functions
# + id="J9pRq4Szz5eD" outputId="23f792b1-d7b8-4daf-8d3f-32fddb787ee9" colab={"base_uri": "https://localhost:8080/", "height": 283, "referenced_widgets": ["15dd8cf9a1e74381b0ef14a957ca7619", "ce76a25c3ba64da89f56142162e672ba", "f6c8f8b2642449ecb8301ee67af73e0c", "1924d24a50e3415b9d9e7573a6df986e", "<KEY>", "<KEY>", "2e2ea5bc2838434791009676bdee45c4", "d98ab96a83454a998d2c33935ea2fd44"]}
# define data type
friends_data = dict(season_id=[],
episode_id=[],
scene_id=[],
utterance_id=[],
speaker=[],
tokens=[],
transcript=[]
)
# loop through each season
print('Loading seasons...')
for season_index in tqdm(range(1, 11)):
season_index = '0%d'%season_index if season_index <10 else str(season_index)
# url of json file
json_url = 'https://raw.githubusercontent.com/emorynlp/character-mining/master/json/friends_season_%s.json'%season_index
# get request from url
request = requests.get(json_url)
# read seson from json file
season = json.loads(request.text)
# get season id
season_id = season['season_id']
# read each episode
for episode in season['episodes']:
episode_id = episode['episode_id']
# read each scene
for scene in episode['scenes']:
scene_id = scene['scene_id']
# read each utterance
for utterance in scene['utterances']:
utterance_id = utterance['utterance_id']
speaker = utterance['speakers'][0] if utterance['speakers'] else 'unknown'
friends_data['season_id'].append(season_id)
friends_data['episode_id'].append(episode_id.split('_')[-1])
friends_data['scene_id'].append(scene_id.split('_')[-1])
friends_data['utterance_id'].append(utterance_id.split('_')[-1])
friends_data['speaker'].append(speaker)
friends_data['tokens'].append(utterance['tokens'])
friends_data['transcript'].append(utterance['transcript'])
# save dicitonary to data frame
friends_df = pd.DataFrame(friends_data)
# save data frame to .tsv
friends_df.to_csv('friends_transcripts.tsv', sep='\t', index=False)
print('File saved in `friends_transcripts.tsv` !')
# show sample
friends_df.head()
| doc/json_tsv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 15: Off to analyzing text
# Way to go! You have already learned a lot of essential components of the Python language. Being able to deal with data structures, import packages, build your own functions and operate with files is not only essential for most tasks in Python, but also a prerequisite for text analysis. We have applied some common preprocessing steps like casefolding/lowercasing, punctuation removal, and stemming/lemmatization. Did you know that there are some very useful NLP packages and modules that do some of these steps? One that is often used in text analysis is the Python package **NLTK (the Natural Language Toolkit)**.
#
# ### At the end of this chapter, you will be able to:
# * have an idea of the NLP tasks that constitute an NLP pipeline
# * use the functions of the NLTK module to manipulate the content of files for NLP purposes (e.g. sentence splitting, tokenization, POS-tagging, and lemmatization);
# * do nesting of multiple for-loops or files
#
# ### More NLP software for Python:
# * [NLTK](http://www.nltk.org/)
# * [SpaCy](https://spacy.io/)
# * [Stanford CoreNLP](https://stanfordnlp.github.io/CoreNLP/index.html)
# * [About Python NLP libraries](https://elitedatascience.com/python-nlp-libraries)
#
#
# If you have **questions** about this chapter, please contact us **(<EMAIL>)**.
# # 1 A short intro to text processing
# There are many aspects of text we can (try to) analyze. Commonly used analyses conducted in Natural Language Processing (**NLP**) are for instance:
#
# * determining the part of speech of words in a text (verb, noun, etc.)
# * analyzing the syntactic relations between words and phrases in a sentence (i.e., syntactic parsing)
# * analyzing which entities (people, organizations, locations) are mentioned in a text
#
# ...and many more. Each of these aspects is addressed within its own **NLP task**.
#
# **The NLP pipeline**
#
# Usually, these tasks are carried out sequentially because they depend on each other. For instance, we need to first tokenize the text (split it into words) in order to be able to assign part-of-speech tags to each word. This sequence is often called an **NLP pipeline**. For example, a general pipeline could consist of the components shown below (taken from [here](https://www.slideshare.net/YuriyGuts/natural-language-processing-nlp)) You can see the NLP pipeline of the NewsReader project [here](http://www.newsreader-project.eu/files/2014/02/SystemArchitecture.png). (you can ignore the middle part of the picture, and focus on the blue and green boxes in the outer row).
#
# <img src='images/nlp-pipeline.jpg'>
#
# In this chapter we will look into four simple NLP modules that are nevertheless very common in NLP: **tokenization, sentence splitting**, **lemmatization** and **POS tagging**.
#
# There are also more advanced processing modules out there - feel free to do some research yourself :-)
# # 2 The NLTK package
# NLTK (Natural Language Processing Toolkit) is a module we can use for most fundamental aspects of natural language processing. There are many more advanced approaches out there, but it is a good way of getting started.
#
# Here we will show you how to use it for tokenization, sentence splitting, POS tagging, and lemmatization. These steps are necessary processing steps for most NLP tasks.
#
# We will first give you an overview of all tasks and then delve into each of them in more detail.
#
# Before we can use NLTK for the first time, we have to make sure it is downloaded and installed on our computer (some of you may have already done this).
#
# To install NLTK, please try to run the following two cells. If this does not work, please try and follow the [documentation](http://www.nltk.org/install.html). If you don't manage to get this to work, please ask for help.
# + language="bash"
# pip install nltk
# -
# Once you have downloaded the NLTK book, you do not need to run the download again. If you are using the NLTK again, it is sufficient to import it.
# +
# downloading nltk
import nltk
nltk.download('book')
# -
# Now that we have installed and downloaded NLTK, let's look at an example of a simple NLP pipeline. In the following cell, you can observe how we tokenize raw text into tokens and setnences, perform part of speech tagging and lemmatize some of the tokens. Don't worry about the details just yet - we will go trhough them step by step.
# +
text = "This example sentence is used for illustrating some basic NLP tasks. Language is awesome!"
# Tokenization
tokens = nltk.word_tokenize(text)
# Sentence splitting
sentences = nltk.sent_tokenize(text)
# POS tagging
tagged_tokens = nltk.pos_tag(tokens)
# Lemmatization
lmtzr = nltk.stem.wordnet.WordNetLemmatizer()
lemma=lmtzr.lemmatize(tokens[4], 'v')
# Printing all information
print(tokens)
print(sentences)
print(tagged_tokens)
print(lemma)
# -
# ## 2.1 Tokenization and sentence splitting with NLTK
# ### 2.1.1 `word_tokenize()`
# Now, let's try tokenizing our Charlie story! First, we will open and read the file again and assign the file contents to the variable `content`. Then, we can call the `word_tokenize()` function from the `nltk` module as follows:
# +
with open("../Data/Charlie/charlie.txt") as infile:
content = infile.read()
tokens = nltk.word_tokenize(content)
print(type(tokens), len(tokens))
print(tokens)
# -
# As you can see, we now have a list of all words in the text. The punctuation marks are also in the list, but as separate tokens.
# ### 2.1.2 `sent_tokenize()`
# Another thing that NLTK can do for you is to split a text into sentences by using the `sent_tokenize()` function. We use it on the entire text (as a string):
# +
with open("../Data/Charlie/charlie.txt") as infile:
content = infile.read()
sentences = nltk.sent_tokenize(content)
print(type(sentences), len(sentences))
print(sentences)
# -
# We can now do all sorts of cool things with these lists. For example, we can search for all words that have certain letters in them and add them to a list. Let's say we want to find all present participles in the text. We know that present participles end with *-ing*, so we can do something like this:
# +
# Open and read in file as a string, assign it to the variable `content`
with open("../Data/Charlie/charlie.txt") as infile:
content = infile.read()
# Split up entire text into tokens using word_tokenize():
tokens = nltk.word_tokenize(content)
# create an empty list to collect all words having the present participle -ing:
present_participles = []
# looking through all tokens
for token in tokens:
# checking if a token ends with the present parciciple -ing
if token.endswith("ing"):
# if the condition is met, add it to the list we created above (present_participles)
present_participles.append(token)
# Print the list to inspect it
print(present_participles)
# -
# This looks good! We now have a list of words like *boiling*, *sizzling*, etc. However, we can see that there is one word in the list that actually is not a present participle (*ceiling*). Of course, also other words can end with *-ing*. So if we want to find all present participles, we have to come up with a smarter solution.
# ## 2.2. Part-of-speech (POS) tagging
# Once again, NLTK comes to the rescue. Using the function `pos_tag()`, we can label each word in the text with its part of speech.
#
# To do pos-tagging, you first need to tokenize the text. We have already done this above, but we will repeat the steps here, so you get a sense of what an NLP pipeline may look like.
# ### 2.2.1 `pos_tag()`
# To see how `pos_tag()` can be used, we can (as always) look at the documentation by using the `help()` function. As we can see, `pos_tag()` takes a tokenized text as input and returns a list of tuples in which the first element corresponds to the token and the second to the assigned pos-tag.
# As always, we can start by reading the documentation:
help(nltk.pos_tag)
# +
# Open and read in file as a string, assign it to the variable `content`
with open("../Data/Charlie/charlie.txt") as infile:
content = infile.read()
# Split up entire text into tokens using word_tokenize():
tokens = nltk.word_tokenize(content)
# Apply pos tagging to the tokenized text
tagged_tokens = nltk.pos_tag(tokens)
# Inspect pos tags
print(tagged_tokens)
# -
# ### 2.2.2 Working with POS tags
# As we saw above, `pos_tag()` returns a list of tuples: The first element is the token, the second element indicates the part of speech (POS) of the token.
#
# This POS tagger uses the POS tag set of the Penn Treebank Project, which can be found [here](https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html). For example, all tags starting with a V are used for verbs.
#
# We can now use this, for example, to identify all the verbs in a text:
#
# +
# Open and read in file as a string, assign it to the variable `content`
with open("../Data/Charlie/charlie.txt") as infile:
content = infile.read()
# Apply tokenization and POS tagging
tokens = nltk.word_tokenize(content)
tagged_tokens = nltk.pos_tag(tokens)
# List of verb tags (i.e. tags we are interested in)
verb_tags = ["VBD", "VBG", "VBN", "VBP", "VBZ"]
# Create an empty list to collect all verbs:
verbs = []
# Iterating over all tagged tokens
for token, tag in tagged_tokens:
# Checking if the tag is any of the verb tags
if tag in verb_tags:
# if the condition is met, add it to the list we created above
verbs.append(token)
# Print the list to inspect it
print(verbs)
# -
# ## 2.3. Lemmatization
# We can also use NLTK to lemmatize words.
#
# The lemma of a word is the form of the word which is usually used in dictionary entries. This is useful for many NLP tasks, as it gives a better generalization than the strong a word appears in. To a computer, `cat` and `cats` are two completely different tokens, even though we know they are both forms of the same lemma.
#
#
# ### 2.3.1 The WordNet lemmatizer
# We will use the WordNetLemmatizer for this using the `lemmatize()` function. In the code below, we loop through the list of verbs, lemmatize each of the verbs, and add them to a new list called `verb_lemmas`. Again, we show all the processing steps (consider the comments in the code below):
# +
#################################################################################
#### Process text as explained above ###
with open("../Data/Charlie/charlie.txt") as infile:
content = infile.read()
tokens = nltk.word_tokenize(content)
tagged_tokens = nltk.pos_tag(tokens)
verb_tags = ["VBD", "VBG", "VBN", "VBP", "VBZ"]
verbs = []
for token, tag in tagged_tokens:
if tag in verb_tags:
verbs.append(token)
print(verbs)
#############################################################################
#### Use the list of verbs collected above to lemmatize all the verbs ###
# Instatiate a lemmatizer object
lmtzr = nltk.stem.wordnet.WordNetLemmatizer()
# Create list to collect all the verb lemmas:
verb_lemmas = []
for participle in verbs:
# For this lemmatizer, we need to indicate the POS of the word (in this case, v = verb)
lemma = lmtzr.lemmatize(participle, "v")
verb_lemmas.append(lemma)
print(verb_lemmas)
# -
# **Note about the wordnet lemmatizer:**
#
# We need to specify a POS tag to the WordNet lemmatizer, in a WordNet format ("n" for noun, "v" for verb, "a" for adjective). If we do not indicate the Part-of-Speech tag, the WordNet lemmatizer thinks it is a noun (this is the default value for its part-of-speech). See the examples below:
test_nouns = ('building', 'applications', 'leafs')
for n in test_nouns:
print(f"Noun in conjugated form: {n}")
default_lemma=lmtzr.lemmatize(n) # default lemmatization, without specifying POS, n is interpretted as a noun!
print(f"Default lemmatization: {default_lemma}")
verb_lemma=lmtzr.lemmatize(n, 'v')
print(f"Lemmatization as a verb: {verb_lemma}")
noun_lemma=lmtzr.lemmatize(n, 'n')
print(f"Lemmatization as a noun: {noun_lemma}")
print()
test_verbs=('grew', 'standing', 'plays')
for v in test_verbs:
print(f"Verb in conjugated form: {v}")
default_lemma=lmtzr.lemmatize(v) # default lemmatization, without specifying POS, v is interpretted as a noun!
print(f"Default lemmatization: {default_lemma}")
verb_lemma=lmtzr.lemmatize(v, 'v')
print(f"Lemmatization as a verb: {verb_lemma}")
noun_lemma=lmtzr.lemmatize(v, 'n')
print(f"Lemmatization as a noun: {noun_lemma}")
print()
# # 3 Nesting
# So far, we typically used a single for-loop, or we were opening a single file at a time. In Python (and most programming languages), one can **nest** multiple loops or files in one another. For instance, we can use one (outer) for-loop to iterate through files, and then for each file iterate through all its sentences (internal for-loop). As we have learned above, `glob` is a convenient way of creating a list of files.
#
# You might think: can we stretch this on more levels? Iterate through files, then iterate through the sentences in these files, then iterate through each word in these sentences, then iterate through each letter in these words, etc. This is possible. Python (and most programming languages) allow you to perform nesting with (in theory) as many loops as you want. Keep in mind that nesting too much will eventually cause computational problems, but this also depends on the size of your data.
#
# For the tasks we are treating here, a a couple of levels of nesting are fine.
#
# In the code below, we want get an idea of the number and length of the sentences in the texts stored in the `../Data/dreams` directory. We do this by creating two for loops: We iterate over all the files in the directory (loop 1), apply sentence tokenization and iterate over all the sentences in the file (loop 2).
#
# Look at the code and comments below to figure out what is going on:
# +
import glob
### Loop 1 ####
# Loop1: iterate over all the files in the dreams directory
for filename in glob.glob("../Data/dreams/*.txt"):
# read in the file and assign the content to a variable
with open(filename, "r") as infile:
content = infile.read()
# split the content into sentences
sentences = nltk.sent_tokenize(content)
# Print the number of sentences in the file
print(f"INFO: File {filename} has {len(sentences)} sentences")
# For each file, assign a number to each sentence. Start with 0:
counter=0
#### Loop 2 ####
# Loop 2: loop over all the sentences in a file:
for sentence in sentences:
# add 1 to the counter
counter+=1
# tokenize the sentence
tokens=nltk.word_tokenize(sentence)
# print the number of tokens per sentence
print(f"Sentence {counter} has {len(tokens)} tokens")
# print an empty line after each file (this belongs to loop 1)
print()
# -
# # 4 Putting it all together
# In this section, we will use what we have learned above to write a small NLP program. We will go through all the steps and show how they can be put together. In the last chapters, we have already learned how to write functions. We will make use of this skill here.
#
# Our goal is to collect all the nouns from Vickie's dream reports.
#
# Before we write actual code, it is always good to consider which steps we need to carry out to reach the goal.
#
# Important steps to remember:
#
# * create a list of all the files we want to process
# * open and read the files
# * tokenize the texts
# * perform pos-tagging
# * collect all the tokens analyzed as nouns
#
# Remember, we first needed to import `nltk` to use it.
# ## 4.1 Writing a processing function for a single file
#
# Since we want to carry out the same task for each of the files, it is very useful (and good practice!) to write a single function which can do the processing. The following function reads the specified file and returns the tokens with their POS tags:
# +
import nltk
def tag_tokens_file(filepath):
"""Read the contents of the file found at the location specified in
FILEPATH and return a list of its tokens with their POS tags."""
with open(filepath, "r") as infile:
content = infile.read()
tokens = nltk.word_tokenize(content)
tagged_tokens = nltk.pos_tag(tokens)
return tagged_tokens
# -
# Now, instead of having to open a file, read the contents and close the file, we can just call the function `tag_tokens_file` to do this. We can test it on a single file:
filename = "../Data/dreams/vickie1.txt"
tagged_tokens = tag_tokens_file(filename)
print(tagged_tokens)
# ## 4.2 Iterating over all the files and applying the processing function
# We can also do this for each of the files in the `../Data/dreams` directory by using a for-loop:
# +
import glob
# Iterate over the `.txt` files in the directory and perform POS tagging on each of them
for filename in glob.glob("../Data/dreams/*.txt"):
tagged_tokens = tag_tokens_file(filename)
print(filename, "\n", tagged_tokens, "\n")
# -
# ## 4.3 Collecting all the nouns
# Now, we extend this code a bit so that we don't print all POS-tagged tokens of each file, but we get all (proper) nouns from the texts and add them to a list called `nouns_in_dreams`. Then, we print the set of nouns:
# +
# Create a list that will contain all nouns
nouns_in_dreams = []
# Iterate over the `.txt` files in the directory and perform POS tagging on each of them
for filename in glob.glob("../Data/dreams/*.txt"):
tagged_tokens = tag_tokens_file(filename)
# Get all (proper) nouns in the text ("NN" and "NNP") and add them to the list
for token, pos in tagged_tokens:
if pos in ["NN", "NNP"]:
nouns_in_dreams.append(token)
# Print the set of nouns in all dreams
print(set(nouns_in_dreams))
# -
# Now we have an idea what Vickie dreams about!
#
# # Exercises
# **Exercise 1:**
#
# Try to collect all the present participles in the the text store in `../Data/Charlie/charlie.txt` using the NLTK tokenizer and POS-tagger.
# +
# you code here
# -
# You should get the following list:
# `['boiling', 'bubbling', 'hissing', 'sizzling', 'clanking', 'running', 'hopping', 'knowing', 'rubbing', 'cackling', 'going']`
# we can test our code using the assert statement (don't worry about this now,
# but if you want to use it, you can probably figure out how it works yourself :-)
# If our code is correct, we should get a compliment :-)
assert len(present_participles) == 11 and type(present_participles[0]) == str
print("Well done!")
# **Exercise 2:**
#
# The resulting list `verb_lemmas` above contains a lot of duplicates. Do you remember how you can get rid of these duplicates? Create a set in which each verb occurs only once and name it `unique_verbs`. Then print it.
# +
## the list is stored under the variable 'verb_lemmas'
# your code here
# -
# Test your code here! If your code is correct, you should get a compliment :-)
assert len(unique_verbs) == 28
print("Well done!")
# **Exercise 3:**
#
# Now use a for-loop to count the number of times that each of these verb lemmas occurs in the text! For each verb in the list you just created, get the count of this verb in `charlie.txt` using the `count()` method. Create a dictionary that contains the lemmas of the verbs as keys, and the counts of these verbs as values. Refer to the notebook about Topic 1 if you forgot how to use the `count()` method or how to create dictionary entries!
#
# Tip: you don't need to read in the file again, you can just use the list called verb_lemmas.
# +
verb_counts = {}
# Finish this for-loop
for verb in unique_verbs:
# your code here
print(verb_counts)
# -
# Test your code here! If your code is correct, you should get a compliment :-)
assert len(verb_counts) == 28 and verb_counts["bubble"] == 1 and verb_counts["be"] == 9
print("Well done!")
# **Exercise 4:**
#
# Write your counts to a file called `charlie_verb_counts.txt` and write it to `../Data/Charlie/charlie_verb_counts.txt` in the following format:
#
# verb, count
#
# verb, count
#
# ...
#
# Don't forget to use newline characters at the end of each line.
| Chapters/Chapter 15 - Off to analyzing text.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R 4.1.2
# language: R
# name: ir41
# ---
# # Average Monthly Temperatures, 1970-2004
#
# **Date:** 2021-12-02
#
# **Reference:**
library(TTR)
options(
jupyter.plot_mimetypes = "image/svg+xml",
repr.plot.width = 7,
repr.plot.height = 5
)
# ## Summary
#
# The aim of this notebook was to show how to decompose seasonal time series data using **R** so the trend, seasonal and irregular components can be estimated.
# Data on the average monthly temperatures in central England from January 1970 to December 2004 was plotted.
# The series was decomposed using the `decompose` function from `R.stats` and the seasonal factors displayed as a `matrix`.
# A seasonally adjusted series was calculated by subtracting the seasonal factors from the original series.
# The seasonally adjusted series was used to plot an estimate of the trend component by taking a simple moving average.
# The irregular component was estimated by subtracting the estimate of the trend and seasonal components from the original time series.
# ## Get the data
#
# Data on the average monthly temperatures in central England January 1970 to December 2004 is shown below.
monthlytemps <- read.csv("..\\..\\data\\moderntemps.csv")
head(monthlytemps)
modtemps <- monthlytemps$temperature
# ## Plot the time series
ts_modtemps <- ts(modtemps, start = c(1970, 1), frequency = 12)
plot.ts(ts_modtemps, xlab = "year", ylab = "temperature")
# The time series is highly seasonal with little evidence of a trend.
# There appears to be a constant level of approximately 10$^{\circ}$C.
# ## Decompose the data
#
# Use the `decompose` function from `R.stats` to return estimates of the trend, seasonal, and irregular components of the time series.
decomp_ts <- decompose(ts_modtemps)
# ## Seasonal factors
#
# Calculate the seasonal factors of the decomposed time series.
# Cast the `seasonal` time series object held in `decomp_ts` to a `vector`, slice the new vector to isolate a single period, and then cast the sliced vector to a named `matrix`.
sf <- as.vector(decomp_ts$seasonal)
(matrix(sf[1:12], dimnames = list(month.abb, c("factors"))))
# _Add a comment_
# ## Plot the components
#
# Plot the trend, seasonal, and irregular components in a single graphic.
plot(decomp_ts, xlab = "year")
# Plot the individual components of the decomposition by accessing the variables held in the `tsdecomp`.
# This will generally make the components easier to understand.
plot(decomp_ts$trend, xlab = "year", ylab = "temperature (Celsius)")
title(main = "Trend component")
plot(decomp_ts$seasonal, xlab = "year", ylab = "temperature (Celsius)")
title(main = "Seasonal component")
plot(decomp_ts$random, xlab = "year", ylab = "temperature (Celsius)")
title(main = "Irregular component")
# _Add comment on trend, seasonal, and irregular components._
#
# _Which component dominates the series?_
# ## Seasonal adjusted plot
#
# Plot the seasonally adjusted series by subtracting the seasonal factors from the original series.
adjusted_ts <- ts_modtemps - decomp_ts$seasonal
plot(adjusted_ts, xlab = "year", ylab = "temperature (Celsius)")
title(main = "Seasonally adjusted series")
# This new seasonally adjusted series only contains the trend and irregular components, so it can be treated as if it is non-seasonal data.
# Estimate the trend component by taking the simple moving order of order 35.
sma35_adjusted_ts <- SMA(adjusted_ts, n = 35)
plot.ts(sma35_adjusted_ts, xlab = "year", ylab = "temperature (Celsius)")
title(main = "Trend component (ma35)")
# Note that this is a different estimate of the trend component to what is contained in `decomp_ts`, as it uses a different order for the simple moving average.
| jupyter/2_time_series/2_03_ljk_decompose_seasonal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Shared Library with GCC
#
# When your program is linked against a shared library, only a small table is created in the executable. Before the executable starts running, **the operating system loads the machine code needed for the external functions** - a process known as **dynamic linking.**
#
#
# * Dynamic linking makes executable files smaller and saves disk space, because `one` copy of a **library** can be **shared** between `multiple` programs.
#
#
# * Furthermore, most operating systems allows one copy of a shared library in memory to be used by all running programs, thus, saving memory.
#
#
# * The shared library codes can be upgraded without the need to recompile your program.
#
#
# A **shared library** has file extension of
#
# * **`.so`** (shared objects) in `Linux(Unixes)`
#
#
# * **`.dll** (dynamic link library) in `Windows`.
#
# ## 1: Building the shared library
#
# The shared library we will build consist of a single source file: `SumArray.c/h`
#
# We will compile the C file with `Position Independent Code( PIC )` into a shared library。
#
# GCC assumes that all libraries
#
# * `start` with `lib`
#
# * `end` with `.dll`(windows) or `.so`(Linux),
#
# so, we should name the shared library begin with `lib prefix` and the `.so/.dll` extensions.
#
# * libSumArray.dll(Windows)
#
# * libSumArray.so(Linux)
#
#
# #### Under Windows
# !gcc -c -O3 -Wall -fPIC -o ./demo/bin/SumArray.o ./demo/src/SumArray.c
# !gcc -shared -o ./demo/bin/libSumArray.dll ./demo/bin/SumArray.o
# !dir .\demo\bin\libSumArray.*
# #### under Linux
# !gcc -c -O3 -Wall -fPIC -o ./demo/obj/SumArray.o ./demo/gcc/SumArray.c
# !gcc -shared -o ./cdemo/bin/libSumArray ./demo/obj/SumArray.o
# !ls ./demo/bin/libSumArray.*
#
# * `-c`: compile into object file with default name : funs.o.
#
# By default, the object file has the same name as the source file with extension of ".o"
#
#
# * `-O3`: Optimize yet more.
#
# turns on all optimizations specified by -O2 and also turns on the -finline-functions, -fweb, -frename-registers and -funswitch-loops optionsturns on all optimizations
#
#
# * `-Wall`: prints "all" compiler's warning message.
#
# This option should always be used, in order to generate better code.
#
#
# * **`-fPIC`** : stands for `Position Independent Code`(位置无关代码)
#
# the generated machine code is `not dependent` on being located at a `specific address` in order to `work`.
#
# Position-independent code can be `executed` at `any memory address`
#
#
# * **-shared:** creating a shared library
#
# +
# %%file ./demo/makefile-SumArray-dll
CC=gcc
CFLAGS=-O3 -Wall -fPIC
SRCDIR= ./demo/src/
OBJDIR= ./demo/obj/
BINDIR= ./demo/bin/
all: libdll
libdll: obj
$(CC) -shared -o $(BINDIR)libSumArray.dll $(OBJDIR)SumArray.o
del .\demo\obj\SumArray.o
obj: ./demo/src/SumArray.c
$(CC) -c $(CFLAGS) -o $(OBJDIR)SumArray.o $(SRCDIR)SumArray.c
clean:
del .\demo\src\libSumArray.dll
# -
# !make -f ./demo/makefile-SumArray-dll
# !dir .\demo\bin\libSum*.dll
# #### Under Linux
# +
# %%file ./code/makefile-SumArray-so
CC=gcc
CFLAGS=-O3 -Wall -fPIC
SRCDIR= ./demo/src/
OBJDIR= ./demo/obj/
BINDIR= ./demo/bin/
all: libdll
libdll: obj
$(CC) -shared -o $(BINDIR)libSumArray.dll $(OBJDIR)SumArray.o
rm -f ./demo/obj/SumArray.o
obj: ./demo/src/SumArray.c
$(CC) -c $(CFLAGS) -o $(OBJDIR)SumArray.o $(SRCDIR)SumArray.c
clean:
rm -f ./demo/src/libSumArray.dll
# -
# !make -f ./code/makefile-SumArray-so
# !ls ./code/bin/libSum*.so
# ## 2 Building a client executable
# ### Header Files and Libraries
#
# * `Header File`: When compiling the program, the **compiler** needs the **header** files to compile the source codes;
#
# * `libraries`: the **linker** needs the **libraries** to resolve external references from other object files or libraries.
#
# The `compiler` and `linker` will not find the `headers/libraries` unless you set **the appropriate options**
#
# * **1 Searching for Header Files**
#
# **`-Idir`:** The include-paths are specified via **-Idir** option (`uppercase` 'I' followed by the directory path or environment variable **CPATH**).
#
#
# * **2 Searching for libraries Files**
#
# **`-Ldir`**: The library-path is specified via **-Ldir** option (`uppercase` 'L' followed by the directory path(or environment variable **LIBRARY_PATH**).
#
#
# * **3 Linking the library**
#
# **`-llibname`**: Link with the library name **without** the `lib` prefix and the `.so/.dll` extensions.
#
# Windows
# ```bash
# -I./demo/src/ -L./demo/bin/ -lSumArray
# ```
# Linux
# ```bash
# -I./demo/src/ -L./demo/bin/ -lSumArray -Wl,-rpath=./demo/bin/
# ```
#
# * **`-Wl,option`**
#
# Pass option as an option to the **linker**. If option contains `commas`, it is split into multiple options at the commas. You can use this syntax to pass an argument to the option. For example, -Wl,-Map,output.map passes -Map output.map to the linker. When using the GNU linker, you can also get the same effect with `-Wl,-Map=output.map'.
#
# * **`-rpath=dir`**
#
# **Add a directory to the runtime library search path**. This is used when linking an ELF executable with shared objects. All -rpath arguments are concatenated and passed to the runtime linker, which uses them to locate shared objects at runtime. The -rpath option is also used when locating shared objects which are needed by shared objects explicitly included in the link;
#
#
# ---
# The following source code `"mainSum.c"` demonstrates calling the DLL's functions:
#
# **NOTE:** mainSum.c is the same code in multi-source example
# +
# %%file ./demo/src/mainSum.c
#include <stdio.h>
#include "SumArray.h"
int main() {
int a1[] = {8, 4, 5, 3, 2};
printf("sum is %d\n", sum(a1, 5)); // sum is 22
return 0;
}
# -
# #### Windows
# !gcc -c -o ./demo/obj/mainSum.o ./demo/src/mainSum.c
# !gcc -o ./demo/bin/mainSum ./demo/obj/mainSum.o -I./demo/src/ -L./demo/bin/ -lSumArray
# !.\demo\bin\mainSum
# #### Linux
# !gcc -c -o ./demo/obj/mainSum.o ./demo/obj/mainSum.c
# !gcc -o ./demo/bin/mainSum ./demo/obj/mainSum.o -I./demo/obj/ -L./demo/bin/ -lSumArray -Wl,-rpath=./demo/bin/
# !ldd ./demo/bin/mainSum
# !./code/demo/mainSum
# #### Under Windows
# +
# %%file ./demo/makefile-call-dll
SRCDIR= ./demo/src/
OBJDIR= ./demo/obj/
BINDIR= ./demo/bin/
all: mainexe
clean:
del .\demo\bin\mainSum.exe
mainexe: sumobj $(SRCDIR)SumArray.h
gcc -o $(BINDIR)mainSum.exe $(OBJDIR)mainSum.o -I$(SRCDIR) -L$(BINDIR) -lSumArray
del .\demo\obj\mainSum.o
sumobj: $(SRCDIR)mainSum.c
gcc -c -o $(OBJDIR)mainSum.o $(SRCDIR)mainSum.c
# -
# !make -f ./demo/makefile-call-dll
# !.\demo\bin\mainSum
# #### Under Linux
# +
# %%file ./demo/makefile-call-so
SRCDIR= ./demo/src/
OBJDIR= ./demo/obj/
BINDIR= ./demo/bin/
all: main
clean:
rm -f ./demo/bin/mainSum.exe
main: sumobj $(SRCDIR)SumArray.h
gcc -o $(BINDIR)mainSum.exe $(OBJDIR)mainSum.o -I$(SRCDIR) -L$(BINDIR) -lSumArray -Wl,-rpath=./code/bin/
rm -f ./demo/obj/mainSum.o
sumobj: $(SRCDIR)mainSum.c
gcc -c -o $(OBJDIR)mainSum.o $(SRCDIR)mainSum.c
# -
# !make -f ./demo/makefile-call-so
# !./demo/bin/mainSum
# ## 3 Building a `shared library` with `multi-source` files
#
# The shared library we will build consist of a multi-source files
#
# * funs.c/h
#
# * SumArray.c/h
# +
# %%file ./demo/src/funs.h
#ifndef FUNS_H
#define FUNS_H
double dprod(double *x, int n);
int factorial(int n);
#endif
# +
# %%file ./demo/src/funs.c
#include "funs.h"
// x[0]*x[1]*...*x[n-1]
double dprod(double *x, int n)
{
double y = 1.0;
for (int i = 0; i < n; i++)
{
y *= x[i];
}
return y;
}
// The factorial of a positive integer n, denoted by n!, is the product of all positive integers less than or equal to n.
// For example,5!=5*4*3*2*1=120
// The value of 0! is 1
int factorial(int n)
{
if (n == 0 ) {
return 1;
}
else
{
return n * factorial(n - 1);
}
}
# -
# #### Building `funs.c` and `SumArray.c` into libmultifuns.dll
# !gcc -c -O3 -Wall -fPIC -o ./demo/obj/funs.o ./demo/src/funs.c
# !gcc -c -O3 -Wall -fPIC -o ./demo/obj/SumArray.o ./demo/src/SumArray.c
# !gcc -shared -o ./demo/bin/libmultifuns.dll ./demo/obj/funs.o ./demo/obj/SumArray.o
# !dir .\demo\bin\libmulti*.dll
# #### Building with makefile
#
# +
# %%file ./demo/makefile-libmultifun
CC=gcc
CFLAGS=-O3 -Wall -fPIC
SRCDIR= ./demo/src/
OBJDIR= ./demo/obj/
BINDIR= ./demo/bin/
all: libmultifuns.dll
libmultifuns.dll: multifunsobj
$(CC) -shared -o $(BINDIR)libmultifuns.dll $(OBJDIR)funs.o $(OBJDIR)SumArray.o
del .\demo\obj\funs.o .\demo\obj\SumArray.o
multifunsobj: $(SRCDIR)funs.c $(SRCDIR)SumArray.c
$(CC) -c $(CFLAGS) -o $(OBJDIR)SumArray.o $(SRCDIR)SumArray.c
$(CC) -c $(CFLAGS) -o $(OBJDIR)funs.o $(SRCDIR)funs.c
clean:
del .\demo\bin\libmultifuns.dll
# -
# !make -f ./demo/makefile-libmultifun
# The result is a compiled shared library **`libmultifuns.dll`**
# ##### makefile-libmultifun - more vars
# +
# %%file ./code/makefile-libmultifun
CC=gcc
CFLAGS=-O3 -Wall -fPIC
SRCDIR= ./demo/src/
OBJDIR= ./demo/obj/
BINDIR= ./demo/bin/
INC = -I$(SRCDIR)
SRCS= $(SRCDIR)funs.c \
$(SRCDIR)SumArray.c
all: libmultifuns.dll
libmultifuns.dll: multifunsobj
$(CC) -shared -o $(BINDIR)libmultifuns.dll funs.o SumArray.o
del funs.o SumArray.o
multifunsobj:
$(CC) -c $(CFLAGS) $(INC) $(SRCS)
clean:
del .\demo\bin\libmultifuns.dll
# -
# !make -f ./code/makefile-libmultifun
# ##### Building a client executable
#
# The following source code `"mainMultifuns.c"` demonstrates calling the DLL's functions:
#
# +
# %%file ./demo/src/mainMultifuns.c
#include <stdio.h>
#include "SumArray.h"
#include "funs.h"
int main() {
int a1[] = {8, 4, 5, 3, 2};
printf("sum is %d\n", sum(a1, 5)); // sum is 22
double a2[] = {8.0, 4.0, 5.0, 3.0, 2.0};
printf("dprod is %f\n", dprod(a2, 5)); // dprod is 960
int n =5;
printf("the factorial of %d is %d\n",n,factorial(n)); // 5!=120
return 0;
}
# -
# !gcc -c -o ./demo/obj/mainMultifuns.o ./demo/src/mainMultifuns.c
# !gcc -o ./demo/bin/mainMultifuns ./demo/obj/mainMultifuns.o -I./demo/src/ -L./demo/bin/ -lmultifuns
# !.\demo\bin\mainMultifuns
# ## Reference
#
# * GCC (GNU compilers) http://gcc.gnu.org
#
# * GCC Manual http://gcc.gnu.org/onlinedocs
#
# * An Introduction to GCC http://www.network-theory.co.uk/docs/gccintro/index.html.
#
# * GCC and Make:Compiling, Linking and Building C/C++ Applications http://www3.ntu.edu.sg/home/ehchua/programming/cpp/gcc_make.html
#
# * MinGW-W64 (GCC) Compiler Suite: http://www.mingw-w64.org/doku.php
#
#
# * C/C++ for VS Code https://code.visualstudio.com/docs/languages/cpp
#
# * C/C++ Preprocessor Directives http://www.cplusplus.com/doc/tutorial/preprocessor/
#
#
# * What is a DLL and How Do I Create or Use One? http://www.mingw.org/wiki/DLL
#
#
| notebook/Unit8-2-GCC_DLL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf_macos
# language: python
# name: tf_macos
# ---
# ***The content below is a TensorFlow port of the tutorial [Deep Learning for NLP with PyTorch](https://pytorch.org/tutorials/beginner/deep_learning_nlp_tutorial.html).***
# + [markdown] toc=true
# <h1>Word Embedding - Encoding Lexical Semantics<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#N-gram-Language-modeling" data-toc-modified-id="N-gram-Language-modeling-1"><span class="toc-item-num">1 </span>N-gram Language modeling</a></span></li><li><span><a href="#Exercise:-Continuous-Bag-of-words" data-toc-modified-id="Exercise:-Continuous-Bag-of-words-2"><span class="toc-item-num">2 </span>Exercise: Continuous Bag-of-words</a></span></li></ul></div>
# -
# ## N-gram Language modeling
# word embeddings are a representation of the *semantics* of a word, efficiently encoding semantic information that might be relevant to the task at hand. You can embed other things too: part of speech tags, parse trees, anything! The idea of feature embeddings is central to the field.
import nltk
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from nltk.corpus import gutenberg
test_sentence = [w.lower() for w in gutenberg.words()]
# bow
vocab = set(test_sentence)
word_to_ix = {word.lower(): i for i, word in enumerate(vocab)}
VOCAB_SIZE = len(vocab)
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
trigrams = [(test_sentence[i:i+CONTEXT_SIZE], test_sentence[i+2]) \
for i in range(len(test_sentence)-CONTEXT_SIZE)]
trigrams[:3]
# * transform into `tf.data.Dataset`
# +
x_train, y_train = list(zip(*trigrams))
# convert to index
x_train = np.array(list(map(lambda ngram: [word_to_ix[w.lower()] for w in ngram], x_train)), dtype=int)
y_train = np.array(list(map(lambda w: word_to_ix[w.lower()], y_train)), dtype=int)
trainset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(512)
# -
# create model
# + active=""
# # language modeler
# model = tf.keras.Sequential(
# [
# tf.keras.layers.Embedding(VOCAB_SIZE, EMBEDDING_DIM),
# tf.keras.layers.Reshape((1, CONTEXT_SIZE*EMBEDDING_DIM)),
# tf.keras.layers.Dense(128),
# tf.keras.layers.ReLU(),
# tf.keras.layers.Dense(VOCAB_SIZE)
# ]
# )
# +
# language modeler
class NGramLanguageModel(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModel, self).__init__()
## sample sizes
#self.vocab_size = vocab_size
#self.embedding_dim = embedding_dim
#self.context_size = context_size
# layers
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.reshape = tf.keras.layers.Reshape((1, context_size * embedding_dim))
self.linear1 = tf.keras.layers.Dense(128)
self.relu1 = tf.keras.layers.ReLU()
self.linear2 = tf.keras.layers.Dense(vocab_size)
def call(self, x):
x = self.embedding(x)
x = self.reshape(x)
x = self.relu1(self.linear1(x))
x = self.linear2(x) # this is "logit". If we softmax it, it becomes "probability distribution"
return x
model = NGramLanguageModel(VOCAB_SIZE, EMBEDDING_DIM, CONTEXT_SIZE)
# -
# train the model
criterion = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.05, momentum=0.9)
model.compile(optimizer=optimizer, loss=criterion, metrics=["accuracy"])
# ```python
# history = model.fit(trainset, epochs=2, verbose=1)
# ```
# save model
# ```python
# model.save('./tf-3gram-embedding')
# ```
# load model
# ```python
# model = tf.keras.models.load_model("./tf-3gram-embedding")
# ```
# results
plt.plot(history.history["loss"])
embedding = model.embedding
# +
from collections import Counter
n = 150
top_n, _ = (["would", "should", "could", "may", "shall",
"he", "she", "his", "my", "their"], None) #list(zip(*Counter(test_sentence).most_common(n)))
test_raw = [word.lower() for word in top_n]
test_words = [word_to_ix[word.lower()] for word in top_n]
# -
embed_xy = embedding(np.array(test_words))[:, :2].numpy()
embed_x, embed_y = list(zip(*embed_xy))
plt.figure(figsize=(6,6))
plt.title("Word Embedding")
plt.scatter(embed_x, embed_y, alpha=.3)
plt.axhline([0], ls=":", c="grey")
plt.axvline([0], ls=":", c="grey")
for xy, word in zip(embed_xy, test_raw):
plt.annotate(word.lower(), xy, fontsize=14)
# ## Exercise: Continuous Bag-of-words
# Given a target word $w_i$ and an $N$ context window on each side, $w_{i−1},\cdots,w_{i−N}$ and $w_{i+1},\cdots,w_{i+N}$, referring to all context words collectively as C, CBOW tries to minimize
#
# $$
# -\log p(w_i | C) = -\log \text{Softmax}(A(\sum_{w \in C} q_w) + b)
# $$
#
# where $q_w$ is the embedding for word $w$.
CONTEXT_SIZE = 2 # 2 words to the left, 2 to the right
EMBEDDING_DIM = 2
raw_text = """We are about to study the idea of a computational process.
Computational processes are abstract beings that inhabit computers.
As they evolve, processes manipulate other abstract things called data.
The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.""".split()
# +
# By deriving a set from `raw_text`, we deduplicate the array
vocab = set(raw_text)
vocab_size = len(vocab)
word_to_ix = {word.lower(): i for i, word in enumerate(vocab)}
data = []
for i in range(2, len(raw_text) - 2):
context = [raw_text[i - 2], raw_text[i - 1],
raw_text[i + 1], raw_text[i + 2]]
target = raw_text[i]
data.append((context, target))
data[:5]
# -
# Define the model
class CBOW(nn.Module):
def __init__(self):
super(CBOW, self).__init__()
self.embedding = nn.Embedding(vocab_size, EMBEDDING_DIM)
self.linear1 = nn.Linear(2*CONTEXT_SIZE*EMBEDDING_DIM, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, x):
x = self.embedding(x)
x = x.view(1, -1)
x = F.relu(self.linear1(x))
x = F.log_softmax(self.linear2(x), dim=1)
return x
# Train the model
model = CBOW()
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0.9)
# +
def make_context_vector(context, word_to_ix):
idxs = [word_to_ix[w.lower()] for w in context]
return torch.tensor(idxs, dtype=torch.long)
make_context_vector(data[0][0], word_to_ix) # example
# +
N_EPOCH = 200
loss_history = []
for i in range(1, N_EPOCH+1):
loss_epoch = 0.
for context, target in data:
context_idx = make_context_vector(context, word_to_ix)
target_idx = torch.tensor([word_to_ix[target.lower()]], dtype=torch.long)
model.zero_grad()
out = model(context_idx)
loss = criterion(out, target_idx)
loss.backward()
optimizer.step()
loss_epoch += loss.item()
loss_history.append(loss_epoch)
if i % 20 == 0: print(f"[{i}] loss: {loss_epoch: .2f}")
# -
# Results
plt.plot(loss_history)
embedding = model.embedding
test_words = [word_to_ix[word.lower()] for word in raw_text[:50]]
with torch.no_grad():
embed_xy = embedding(torch.tensor(test_words))[:, :2].detach().numpy()
embed_x, embed_y = list(zip(*embed_xy))
plt.figure(figsize=(6,6))
plt.title("Word Embedding")
plt.scatter(embed_x, embed_y, marker=".", alpha=.7)
plt.axhline([0], ls=":", c="grey")
plt.axvline([0], ls=":", c="grey")
for xy, word in zip(embed_xy, raw_text[:50]):
plt.annotate(word.lower(), xy)
| NLP_with_TensorFlow/1_basics_(tutorial)/1-2_word embedding - encoding lexical semantics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Projeto II - SCC0250
# ## <NAME> - 10724239
# ## <NAME> - 9361073
#
# #### Instruções:
# - wasd: se movimenta pela cena
# - mouse: olha ao redor
# - espaço: chuta a bola de futebol
# ### Configurações de janela, código de shader e linkagem:
# +
import glfw
from OpenGL.GL import *
import OpenGL.GL.shaders
import numpy as np
import glm
import math
import time
from PIL import Image
glfw.init()
glfw.window_hint(glfw.VISIBLE, glfw.FALSE);
altura = 1080
largura = 1920
window = glfw.create_window(largura, altura, "Projeto 2", None, None)
glfw.make_context_current(window)
vertex_code = """
attribute vec3 position;
attribute vec2 texture_coord;
varying vec2 out_texture;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main(){
gl_Position = projection * view * model * vec4(position,1.0);
out_texture = vec2(texture_coord);
}
"""
fragment_code = """
uniform vec4 color;
varying vec2 out_texture;
uniform sampler2D samplerTexture;
void main(){
vec4 texture = texture2D(samplerTexture, out_texture);
gl_FragColor = texture;
}
"""
# Request a program and shader slots from GPU
program = glCreateProgram()
vertex = glCreateShader(GL_VERTEX_SHADER)
fragment = glCreateShader(GL_FRAGMENT_SHADER)
# Set shaders source
glShaderSource(vertex, vertex_code)
glShaderSource(fragment, fragment_code)
# Compile shaders
glCompileShader(vertex)
if not glGetShaderiv(vertex, GL_COMPILE_STATUS):
error = glGetShaderInfoLog(vertex).decode()
print(error)
raise RuntimeError("Erro de compilacao do Vertex Shader")
glCompileShader(fragment)
if not glGetShaderiv(fragment, GL_COMPILE_STATUS):
error = glGetShaderInfoLog(fragment).decode()
print(error)
raise RuntimeError("Erro de compilacao do Fragment Shader")
# Attach shader objects to the program
glAttachShader(program, vertex)
glAttachShader(program, fragment)
# Build program
glLinkProgram(program)
if not glGetProgramiv(program, GL_LINK_STATUS):
print(glGetProgramInfoLog(program))
raise RuntimeError('Linking error')
# Make program the default program
glUseProgram(program)
# -
# # Carregar wavefront (.obj):
def load_model_from_file(filename):
"""Loads a Wavefront OBJ file. """
objects = {}
vertices = []
texture_coords = []
faces = []
material = None
# abre o arquivo obj para leitura
for line in open(filename, "r"): ## para cada linha do arquivo .obj
if line.startswith('#'): continue ## ignora comentarios
values = line.split() # quebra a linha por espaço
if not values: continue
### recuperando vertices
if values[0] == 'v':
vertices.append(values[1:4])
### recuperando coordenadas de textura
elif values[0] == 'vt':
texture_coords.append(values[1:3])
### recuperando faces
elif values[0] in ('usemtl', 'usemat'):
material = values[1]
elif values[0] == 'f':
face = []
face_texture = []
for v in values[1:]:
w = v.split('/')
face.append(int(w[0]))
if len(w) >= 2 and len(w[1]) > 0:
face_texture.append(int(w[1]))
else:
face_texture.append(0)
faces.append((face, face_texture, material))
model = {}
model['vertices'] = vertices
model['texture'] = texture_coords
model['faces'] = faces
return model
# # Carregar textura
# +
glEnable(GL_TEXTURE_2D)
qtd_texturas = 10
textures = glGenTextures(qtd_texturas)
def load_texture_from_file(texture_id, img_textura):
glBindTexture(GL_TEXTURE_2D, texture_id)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
img = Image.open(img_textura)
print(img_textura,img.mode)
img_width = img.size[0]
img_height = img.size[1]
#image_data = img.tobytes("raw", "RGB", 0, -1)
image_data = img.convert("RGBA").tobytes("raw", "RGBA",0,-1)
#image_data = np.array(list(img.getdata()), np.uint8)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img_width, img_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image_data)
# -
# ### A lista abaixo armazena todos os vertices carregados dos arquivos
vertices_list = []
textures_coord_list = []
# ## Matrizes Model, View e Projection
# +
def model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z):
angle = math.radians(angle)
matrix_transform = glm.mat4(1.0) # instanciando uma matriz identidade
# aplicando translacao
matrix_transform = glm.translate(matrix_transform, glm.vec3(t_x, t_y, t_z))
# aplicando rotacao
matrix_transform = glm.rotate(matrix_transform, angle, glm.vec3(r_x, r_y, r_z))
# aplicando escala
matrix_transform = glm.scale(matrix_transform, glm.vec3(s_x, s_y, s_z))
matrix_transform = np.array(matrix_transform).T # pegando a transposta da matriz (glm trabalha com ela invertida)
return matrix_transform
def view():
global cameraPos, cameraFront, cameraUp
mat_view = glm.lookAt(cameraPos, cameraPos + cameraFront, cameraUp);
mat_view = np.array(mat_view)
return mat_view
def projection():
global altura, largura
# perspective parameters: fovy, aspect, near, far
mat_projection = glm.perspective(glm.radians(45.0), largura/altura, 0.1, 1000.0)
mat_projection = np.array(mat_projection)
return mat_projection
# -
# # Função dinâmica para desenhar cada objeto:
def draw_object(obj, angle=90.0, r_x = 0.0, r_y = 1.0, r_z = 0.0, t_x=0.0, t_y=0.0, t_z=0.0, s_x = 1.0, s_y = 1.0, s_z = 1.0, custom_mat_model = []):
mat_model = custom_mat_model if len(custom_mat_model) > 0 else model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z)
loc_model = glGetUniformLocation(program, "model")
glUniformMatrix4fv(loc_model, 1, GL_TRUE, mat_model)
#define id da textura do modelo
glBindTexture(GL_TEXTURE_2D, obj['textid'])
# desenha o modelo
glDrawArrays(GL_TRIANGLES, obj['initial_vertice'], obj['obj_size']) ## renderizando
# +
# import glfw
# from OpenGL.GL import *
# import OpenGL.GL.shaders
# import numpy as np
# import glm
# import math
# from PIL import Image
# len([])
# -
# ### Vamos carregar cada modelo e definir funções para desenhá-los
textid = 0
def load2project(pathname, objname, textname):
global vertices_list, textures_coord_list, textid
modelo = load_model_from_file(pathname + '/' + objname + '.obj')
print('Processando modelo '+ objname +'.obj. Vertice inicial:',len(vertices_list))
res = dict()
res['initial_vertice'] = len(vertices_list)
pos = {
'max_x': float('-inf'),
'max_y': float('-inf'),
'max_z': float('-inf'),
'min_x': float('inf'),
'min_y': float('inf'),
'min_z': float('inf'),
'center_x': 0.0,
'center_y': 0.0,
'center_z': 0.0
}
#print(pos)
for face in modelo['faces']:
for vertice_id in face[0]:
v = modelo['vertices'][vertice_id-1]
if float(v[0]) > float(pos['max_x']): pos['max_x'] = v[0]
if float(v[0]) < float(pos['min_x']): pos['min_x'] = v[0]
if float(v[1]) > float(pos['max_y']): pos['max_y'] = v[1]
if float(v[1]) < float(pos['min_y']): pos['min_y'] = v[1]
if float(v[2]) > float(pos['max_z']): pos['max_z'] = v[2]
if float(v[2]) < float(pos['min_z']): pos['min_z'] = v[2]
vertices_list.append( v )
for texture_id in face[1]:
textures_coord_list.append( modelo['texture'][texture_id-1] )
print('Processando modelo '+ objname +'.obj. Vertice final:',len(vertices_list))
res['obj_size'] = len(vertices_list) - res['initial_vertice']
### inserindo coordenadas de textura do modelo no vetor de texturas
### carregando textura equivalente e definindo um id (buffer): use um id por textura!
pos['center_x'] = (float(pos['max_x']) + float(pos['min_x'])) / 2.0
pos['center_y'] = (float(pos['max_y']) + float(pos['min_y'])) / 2.0
pos['center_z'] = (float(pos['max_z']) + float(pos['min_z'])) / 2.0
res['pos'] = pos
load_texture_from_file(textid, pathname + '/' + textname)
print("Textura do modelo " + objname + ' carregada com o id:', textid)
res['textid'] = textid
textid = textid + 1
print(pos)
return res
# lista apenas para lembrar o nome das funçoes draw
draw_functions = []
# Terreno
terreno = load2project('planograma', 'grama', 'dirt.jpg')
def draw_terreno():
draw_object(terreno)
draw_functions.append('draw_terreno')
# Pedras da entrada
cobble = load2project('planocobble', 'cobble', 'cobble.jpg')
def draw_cobble():
draw_object(cobble)
draw_functions.append('draw_cobble')
rua = load2project('planorua', 'road', 'road.jpg')
def draw_rua():
draw_object(rua)
draw_functions.append('draw_rua')
ceu = load2project('ceu', 'ceu', 'ceu.jpg')
def draw_ceu():
draw_object(ceu)
draw_functions.append('draw_ceu')
cama = load2project('cama', 'cama', 'cama.jpg')
def draw_cama():
draw_object(cama)
draw_functions.append('draw_cama')
# +
# Discoball
print('Carregando Modelos da Bola de Disco')
discoball_cylinder = load2project('discoball', 'cilinder', 'discoball.jpg')
discoball_sphere = load2project('discoball', 'sphere', 'discoball.jpg')
def draw_disco_cylinder():
draw_object(discoball_cylinder)
def draw_disco_ball():
draw_object(discoball_sphere)
draw_functions.append('draw_disco_cylinder')
draw_functions.append('draw_disco_ball')
# +
# Casa
print('Carregando Modelos da Casa')
casa = load2project('casa', 'casa', 'casa.png')
casa_chao = load2project('casa', 'chao', 'chao.jpg')
def draw_casa():
draw_object(casa)
draw_object(casa_chao)
draw_functions.append('draw_casa')
# +
bola = load2project('ball', 'ball', 'ball.jpg')
def draw_bola():
draw_object(bola)
draw_functions.append('draw_bola')
# +
geladeira = load2project('fridge', 'fridge', 'fridge.jpg')
def draw_geladeira():
draw_object(geladeira)
draw_functions.append('draw_geladeira')
# +
arco = load2project('arch', 'arch', 'arch.jpg')
def draw_arco():
draw_object(arco)
draw_functions.append('draw_arco')
# +
# Árvore
print('Carregando Modelos da Árvore')
arvore_folhas = load2project('tree', 'leaves', 'leaves.jpg')
arvore_tronco = load2project('tree', 'wood', 'wood.jpg')
def draw_arvore():
draw_object(arvore_folhas)
draw_object(arvore_tronco)
draw_functions.append('draw_arvore')
# +
sofa = load2project('sofa', 'sofa', 'sofa.jpg')
def draw_sofa():
draw_object(sofa)
draw_functions.append('draw_sofa')
# +
tv = load2project('tv', 'tv', 'tv.png')
def draw_tv():
draw_object(tv)
draw_functions.append('draw_tv')
# -
# Request a buffer slot from GPU
buffer = glGenBuffers(2)
# +
vertices = np.zeros(len(vertices_list), [("position", np.float32, 3)])
vertices['position'] = vertices_list
# Upload data
glBindBuffer(GL_ARRAY_BUFFER, buffer[0])
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
stride = vertices.strides[0]
offset = ctypes.c_void_p(0)
loc_vertices = glGetAttribLocation(program, "position")
glEnableVertexAttribArray(loc_vertices)
glVertexAttribPointer(loc_vertices, 3, GL_FLOAT, False, stride, offset)
# +
textures = np.zeros(len(textures_coord_list), [("position", np.float32, 2)]) # duas coordenadas
textures['position'] = textures_coord_list
# Upload data
glBindBuffer(GL_ARRAY_BUFFER, buffer[1])
glBufferData(GL_ARRAY_BUFFER, textures.nbytes, textures, GL_STATIC_DRAW)
stride = textures.strides[0]
offset = ctypes.c_void_p(0)
loc_texture_coord = glGetAttribLocation(program, "texture_coord")
glEnableVertexAttribArray(loc_texture_coord)
glVertexAttribPointer(loc_texture_coord, 2, GL_FLOAT, False, stride, offset)
# +
# Desenha
# def draw_grass():
# draw_object(terreno)
# -
# # Carrega <NAME>:
# +
# def draw_cbble():
# angle = 90.0;
# r_x = 0.0; r_y = 1.0; r_z = 0.0;
# t_x=0.0;t_y=0.0;t_z=0.0;
# s_x = 1.0; s_y = 1.0; s_z = 1.0;
# mat_model = model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z)
# loc_model = glGetUniformLocation(program, "model")
# glUniformMatrix4fv(loc_model, 1, GL_TRUE, mat_model)
# #define id da textura do modelo
# glBindTexture(GL_TEXTURE_2D, cobble['textid'])
# # desenha o modelo
# glDrawArrays(GL_TRIANGLES, cobble['initial_vertice'], cobble['obj_size']) ## renderizando
# -
# # <NAME>:
# +
# def draw_road():
# angle = 90.0;
# r_x = 0.0; r_y = 1.0; r_z = 0.0;
# t_x=0.0;t_y=0.0;t_z=0.0;
# s_x = 1.0; s_y = 1.0; s_z = 1.0;
# mat_model = model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z)
# loc_model = glGetUniformLocation(program, "model")
# glUniformMatrix4fv(loc_model, 1, GL_TRUE, mat_model)
# #define id da textura do modelo
# glBindTexture(GL_TEXTURE_2D, rua['textid'])
# # desenha o modelo
# glDrawArrays(GL_TRIANGLES, rua['initial_vertice'], rua['obj_size']) ## renderizando
# -
# # Carrega Céu:
# +
# def draw_sky():
# angle = 90.0;
# r_x = 0.0; r_y = 0.0; r_z = 1.0;
# t_x = 0.0; t_y = -10.1; t_z = 0.0;
# s_x = 5.0; s_y = 5.0; s_z = 5.0;
# mat_model = model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z)
# loc_model = glGetUniformLocation(program, "model")
# glUniformMatrix4fv(loc_model, 1, GL_TRUE, mat_model)
# #define id da textura do modelo
# glBindTexture(GL_TEXTURE_2D, ceu['textid'])
# # desenha o modelo
# glDrawArrays(GL_TRIANGLES, ceu['initial_vertice'], ceu['obj_size']) ## renderizando
# -
# # Carrega Casa:
# +
# def draw_house():
# angle = 90.0;
# r_x = 0.0; r_y = 1.0; r_z = 0.0;
# t_x = 0.0; t_y = 0.1; t_z = 0.0;
# s_x = 1.0; s_y = 1.0; s_z = 1.0;
# mat_model = model(angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z)
# loc_model = glGetUniformLocation(program, "model")
# glUniformMatrix4fv(loc_model, 1, GL_TRUE, mat_model)
# #define id da textura do modelo
# glDisable(GL_TEXTURE_2D)
# glBindTexture(GL_TEXTURE_2D, casa['textid'])
# # desenha o modelo
# glDrawArrays(GL_TRIANGLES, casa['initial_vertice'], casa['obj_size']) ## renderizando
# -
# ### Eventos para modificar a posição da câmera.
#
# * Usei as teclas A, S, D e W para movimentação no espaço tridimensional
# * Usei a posição do mouse para "direcionar" a câmera
# +
cameraPos = glm.vec3(50.0, 10.0, 1.0);
cameraFront = glm.vec3(0.0, 0.0, -1.0);
cameraUp = glm.vec3(0.0, 1.0, 0.0);
polygonal_mode = False
ballinfo = dict()
ballinfo['moving'] = False
ballinfo['acceleration'] = 0
ballinfo['speed'] = 0
ballinfo['direction'] = 1
ballinfo['pos'] = 0
ballinfo['rotation'] = 0
def key_event(window,key,scancode,action,mods):
global cameraPos, cameraFront, cameraUp, polygonal_mode, ballinfo
cameraSpeed = 0.8
if key == 87 and (action==1 or action==2): # tecla W
cameraPos += cameraSpeed * cameraFront
if key == 83 and (action==1 or action==2): # tecla S
cameraPos -= cameraSpeed * cameraFront
if key == 65 and (action==1 or action==2): # tecla A
cameraPos -= glm.normalize(glm.cross(cameraFront, cameraUp)) * cameraSpeed
if key == 68 and (action==1 or action==2): # tecla D
cameraPos += glm.normalize(glm.cross(cameraFront, cameraUp)) * cameraSpeed
if key == 80 and action==1 and polygonal_mode==True:
polygonal_mode=False
else:
if key == 80 and action==1 and polygonal_mode==False:
polygonal_mode=True
if key == 32 and action == 1 and ballinfo['moving'] == False:
ballinfo['moving'] = True
ballinfo['acceleration'] = 1
ballinfo['speed'] = 0.1
ballinfo['direction'] *= -1
firstMouse = True
yaw = -90.0
pitch = 0.0
lastX = largura/2
lastY = altura/2
def mouse_event(window, xpos, ypos):
global firstMouse, cameraFront, yaw, pitch, lastX, lastY
if firstMouse:
lastX = xpos
lastY = ypos
firstMouse = False
xoffset = xpos - lastX
yoffset = lastY - ypos
lastX = xpos
lastY = ypos
sensitivity = 0.3
xoffset *= sensitivity
yoffset *= sensitivity
yaw += xoffset;
pitch += yoffset;
if pitch >= 90.0: pitch = 90.0
if pitch <= -90.0: pitch = -90.0
front = glm.vec3()
front.x = math.cos(glm.radians(yaw)) * math.cos(glm.radians(pitch))
front.y = math.sin(glm.radians(pitch))
front.z = math.sin(glm.radians(yaw)) * math.cos(glm.radians(pitch))
cameraFront = glm.normalize(front)
glfw.set_key_callback(window,key_event)
glfw.set_cursor_pos_callback(window, mouse_event)
# for f in draw_functions:
# print(' ' + f + '()')
# -
# ### Nesse momento, nós exibimos a janela!
glfw.show_window(window)
glfw.set_cursor_pos(window, lastX, lastY)
# ### Loop principal da janela.
# Enquanto a janela não for fechada, esse laço será executado. É neste espaço que trabalhamos com algumas interações com a OpenGL.
# +
glEnable(GL_DEPTH_TEST) ### importante para 3D
t_x = 0
rotacao_inc = 0
while not glfw.window_should_close(window):
glfw.poll_events()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glClearColor(1.0, 1.0, 1.0, 1.0)
if polygonal_mode==True:
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
if polygonal_mode==False:
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)
### CONFIGURAÇOES DA CÂMERA PARA NAO SAIR DO CENARIO
# CHECA SE NÃO SAI DA ESFERA DO CÉU
if((cameraPos[0]/200)**2+(cameraPos[1]/200)**2+(cameraPos[2]/200)**2 > 1):
i = np.argmax(abs(cameraPos)) # acha o índice com maior valor absoluto da câmera
if (cameraPos[i]) > 0:
cameraPos[i] -= 5.0
else:
cameraPos[i] += 5.0
# CHECA SE NÃO PASSA DO CHÃO
if(cameraPos[1] < 1.0):
cameraPos[1] = 1.0
#draw_object(obj, angle, r_x, r_y, r_z, t_x, t_y, t_z, s_x, s_y, s_z, custom_mat_model):
draw_terreno()
draw_cobble()
draw_rua()
draw_ceu()
draw_cama()
draw_disco_cylinder()
#draw_disco_ball()
draw_casa()
#draw_bola()
draw_geladeira()
draw_arco()
draw_arvore()
draw_sofa()
draw_tv()
### CONFIGURAÇOES DA BOLA DE FUTEBOL
# CONFIGURAÇOES DA TRANSLAÇÃO
if (ballinfo['speed'] == 0 or ballinfo['acceleration'] < 0):
ballinfo['moving'] = False
if ballinfo['moving']:
ballinfo['pos'] += ballinfo['speed'] * ballinfo['direction'] * ballinfo['acceleration']
ballinfo['acceleration'] -= 0.001
# CONFIGURAÇÕES DA ROTAÇÃO COM MATRIZ CUSTOMIZADA
if (ballinfo['moving']):
ballinfo['rotation'] += ballinfo['speed'] * 18 * ballinfo['acceleration'] * ballinfo['direction']
angle = math.radians(ballinfo['rotation'])
matrix_transform_soccerball = glm.mat4(1.0) # instanciando uma matriz identidade
# aplicando translacao
matrix_transform_soccerball = glm.translate(
matrix_transform_soccerball,
glm.vec3(
bola['pos']['center_x'] + ballinfo['pos'] - 5.0 ,
bola['pos']['center_y'] ,
bola['pos']['center_z'] - 53.0))
# aplicando rotacao
matrix_transform_soccerball = glm.rotate(matrix_transform_soccerball, angle, glm.vec3(0, 0, -1))
# aplicando translacao
matrix_transform_soccerball = glm.translate(
matrix_transform_soccerball,
glm.vec3(
-bola['pos']['center_x'],
-bola['pos']['center_y'],
-bola['pos']['center_z']))
# aplicando escala
matrix_transform_soccerball = glm.scale(matrix_transform_soccerball, glm.vec3(1, 1, 1))
matrix_transform_soccerball = np.array(matrix_transform_soccerball).T # pegando a transposta da matriz (glm trabalha com ela invertida)
draw_object(bola, t_x = ballinfo['pos'], custom_mat_model = matrix_transform_soccerball)
### DESENHANDO ESFERA DA DISCOBALL COM MATRIZ DE TRANSFORMAÇAO CUSTOMIZADA
angle = math.radians(rotacao_inc)
matrix_transform_discoball = glm.mat4(1.0) # instanciando uma matriz identidade
# aplicando translacao
matrix_transform_discoball = glm.translate(
matrix_transform_discoball,
glm.vec3(
discoball_sphere['pos']['center_x'] - 8.5,
discoball_sphere['pos']['center_y'] + 0.5,
discoball_sphere['pos']['center_z'] + 3.5))
# aplicando rotacao
matrix_transform_discoball = glm.rotate(matrix_transform_discoball, angle, glm.vec3(0, 1, 0))
# aplicando translacao
matrix_transform_discoball = glm.translate(
matrix_transform_discoball,
glm.vec3(
-discoball_sphere['pos']['center_x'],
-discoball_sphere['pos']['center_y'],
-discoball_sphere['pos']['center_z']))
# aplicando escala
matrix_transform_discoball = glm.scale(matrix_transform_discoball, glm.vec3(1, 1, 1))
matrix_transform_discoball = np.array(matrix_transform_discoball).T # pegando a transposta da matriz (glm trabalha com ela invertida)
draw_object(discoball_sphere, custom_mat_model = matrix_transform_discoball)
rotacao_inc += 0.1
mat_view = view()
loc_view = glGetUniformLocation(program, "view")
glUniformMatrix4fv(loc_view, 1, GL_FALSE, mat_view)
mat_projection = projection()
loc_projection = glGetUniformLocation(program, "projection")
glUniformMatrix4fv(loc_projection, 1, GL_FALSE, mat_projection)
glfw.swap_buffers(window)
# time.sleep(0.1)
glfw.terminate()
# -
| projeto cg/projeto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
### A1 ###
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpp
from sklearn import svm
import tensorflow as tf
import random
import palettable
# +
### A2 - 1 ###
# generate xdata
x = np.linspace(-5, 5, num=100, endpoint=True)
# define quadratic function
def y(x):
return x**2
# plot function with given xdata
plt.plot(x, y(x))
plt.xlabel("x")
plt.ylabel("f(x)")
# +
### A2 - 2 ###
# initialize arrays for x and y data
samples = 100
s = 0
xdata = np.zeros(samples)
ydata = np.zeros(samples)
# fill arrays with data drawn from Gaussians
while s < samples:
xdata[s] = random.gauss(1, 0.4)
ydata[s] = random.gauss(0.5, 0.4)
s += 1
# plot
plt.figure()
plt.scatter(xdata, ydata)
ax = plt.gca()
color = palettable.colorbrewer.sequential.OrRd_7_r.mpl_colors
col = 0
plt.scatter([1], [0.5], marker="x", s = 100, color = color[col]) # plot mean
sigmafactors = [.25, .5, .75, 1, 1.5, 2]
sigmax = 0.4
sigmay = 0.4
for s in sigmafactors: # contour lines (ellipses)
label = "$%g\sigma$" % s
e = mpp.Ellipse((1,0.5), 2*s*sigmax, 2*s*sigmay, fill=False, label=label, color = color[col])
ax.add_patch(e)
col += 1
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
# +
### A3 - 1 ###
# set interval
xstart = -5
xend = 8
fit_xdata = np.linspace(xstart, xend, 1000) # for plotting fits
# define function
def f(x):
return 7.2 - 3.1*x + 4.3* x**3
# generate noisy data
def generate_noisydata(xstart, xend, bins, f):
# initialize x and y arrays
xdata = np.linspace(xstart, xend, num=bins+1)
ydata = np.zeros(bins+1)
# modify ydata by adding Gaussian random variables
ind = 0
for x in xdata:
ydata[ind] = f(x) + random.gauss(0,150)
ind += 1
return xdata, ydata
# generate noisy data for 100 bins
bins = 100
xdata, ydata = generate_noisydata(xstart, xend, bins, f)
# +
### A3 - 2 ###
# define polynomial degrees which shall be fitted
polys = [1, 2, 3, 5, 10]
def fitpolys(polys, xstart, xend, xdata, ydata, fit_xdata, bins, fig_ind=1):
''' xdata, ydata: data to be fitted
xstart, xend: specifies the boundaries of the fitting interval
fit_xdata: xdata for fit
bins: number of data points in specified array
fig_ind: figure index for mpl's figure '''
plt.figure(fig_ind)
fitcoeffs = []
# generate fit coefficients for all specified polynomial degrees
# using numpy's polyfit
ind = 0
for p in polys:
fitcoeffs.append( np.polyfit(xdata, ydata, deg=p) )
ind += 1
# initialize overall fit data arrays and generate fit data:
# take coefficient array for every polynomial
fit_ydatas = []
ind = 0
for coeff in fitcoeffs:
# initialize ydata array for current polynomial
poly_degree = 0
fit_ydata = np.zeros(len(fit_xdata))
coeff = coeff[::-1] # start with coeff from lowest polynomial
# create ydata for the current polynomial:
# the fit is given by sum[ c_i * x^(p_i) ]
for c in coeff:
fit_ydata += c * (fit_xdata ** poly_degree)
poly_degree += 1
# append generated data to overall fit array and plot
fit_ydatas.append(fit_ydata)
label = "%d" % polys[ind]
plt.plot(fit_xdata, fit_ydata, label=label)
ind += 1
# plot data points
plt.scatter(xdata, ydata)
plt.legend(title="Polyn. Deg.")
plt.xlabel("x")
plt.ylabel("f(x)")
fitpolys(polys, xstart, xend, xdata, ydata, fit_xdata, bins)
# +
### A3 - 3 ###
### Reduced data sets
bins_arr = [100, 50, 20, 10]
fig_ind = 0
for bins in bins_arr:
# generate current reduced data set
xdata, ydata = generate_noisydata(xstart, xend, bins, f)
# fit the polynomial and plot
fitpolys(polys, xstart, xend, xdata, ydata, fit_xdata, bins, fig_ind = fig_ind)
fig_ind += 1
# -
# A3 - 3 / Reduced data sets
#
# Observations:
# - For large sample sizes (100 bins), polynomial degrees larger than 3 result in rougly the same fit, meaning that the orgiginal polynomial degree of 3 may be retrieved.
# - For small sample sizes (extreme case: 10 bins), we can observe over-fitting for the high polynomial degree of 10. This overfitting does not happen with a larger number of bins.
# - As a result, we see that the relation of the number of data points should be much higher than the number of fitting parameters to obtain solid fitting results.
#
# +
### A3 - 3 ###
### Modified intervals
bins = 100
intervals = [(-2,5), (-3,6), (-4,7), (-5,8), (-6,9)]
# plot for various intervals
fig_ind = 0
for (xstart, xend) in intervals:
# adapt fit_xdata to interval
fit_xdata = np.linspace(xstart, xend, 1000) # for plotting fits
# generate current reduced data set
xdata, ydata = generate_noisydata(xstart, xend, bins, f)
# fit the polynomial and plot
fitpolys(polys, xstart, xend, xdata, ydata, fit_xdata, bins, fig_ind = fig_ind)
fig_ind += 1
# -
# A3 - 3 / Modified Intervals
#
# Observations:
# - The larger the interval, the more higher polynomial fits resemble the ideal fit of a polynomial degree of 3. There is enough data to characterize the function as a degree 3 polynomial.
# - The smaller the interval, the less features of the actual function are given and deviations matter more. This is seen from stronger variations in higher polynomials.
# - Lower degree polynomials may fit certain intervals of the function better (such as linear regions in the degree 3 polynomial), however they badly predict the overall function.
# - To conclude, a broad fitting interval is crucial for determining the correct fitting function.
#
| 00/MarioGaimann_getting_started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <a id="top"></a>Intersecting model grids with shapes
#
# _Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._
#
# This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy modelgrid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersection methods.
#
# There are three intersection modes:
# - the first (default mode) builds an STR-tree for fast spatial queries before calculating intersections, thereby reducing the number of grid cells it has to process. This method works for structured and vertex grids.
# - the second method does not construct the STR-tree, and loops through all gridcells to determine the intersection between the grid and the shape. This method also works for structured and vertex grids.
# - the third method only works for structured grids and uses information from the structured grid to limit the search space for intersections.
#
# This notebook showcases the functionality of the GridIntersect class.
#
#
# ### Table of Contents
# - [GridIntersect Class](#gridclass)
# - [Rectangular regular grid](#rectgrid)
# - [Polygon with regular grid](#rectgrid.1)
# - [MultiLineString with regular grid](#rectgrid.2)
# - [MultiPoint with regular grid](#rectgrid.3)
# - [Vertex grid](#trigrid)
# - [Polygon with triangular grid](#trigrid.1)
# - [MultiLineString with triangular grid](#trigrid.2)
# - [MultiPoint with triangular grid](#trigrid.3)
# Import some stuff
# +
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
# -
# ## <a id="gridclass"></a>[GridIntersect Class](#top)
#
# The GridIntersect class is constructed by passing a flopy modelgrid object to the constructor. There are options users can select to change how the intersection is calculated.
#
# - `method`: either `"vertex"` (default) or `"structured"`. If `"structured"` is passed, the intersections are performed using structured methods. These methods use information about the regular grid to limit the search space for intersection calculations.
# - `rtree`: either `True` (default) or `False`, only read when `method="vertex"`. When True, an STR-tree is built, which allows for fast spatial queries. Building the STR-tree does take some time however. Setting the option to False avoids building the STR-tree but requires the intersection calculation to essentially loop through all grid cells.
#
# In general the default option is robust (it always works) and fast and is therefore recommended in most situations. If you are working with a structured grid, then the `method="structured"` can speed up intersection operations (especially for points and linestrings) with the added advantage of not having to build an STR-tree. In some cases with vertex grids, it might not be worth your time building the STR-tree, in which case it can be avoided by passing `rtree=False`.
#
# The important methods in the GridIntersect object are:
# - `intersects()`: returns cellids for gridcells that intersect a shape
# - `intersect()`: for intersecting the modelgrid with point, linestrings, and polygon geometries (intersect can accept shapely geometry objects, flopy geometry object, shapefile.Shape objects, and geojson objects)
# - `plot_point()`: for plotting point intersection results
# - `plot_linestring()`: for plotting linestring intersection results
# - `plot_polygon()`: for plotting polygon intersection results
#
# In the following sections examples of intersections are shown for structured and vertex grids for different types of shapes (Polygon, LineString and Point).
# ## <a id="rectgrid"></a>[Rectangular regular grid](#top)
delc = 10*np.ones(10, dtype=float)
delr = 10*np.ones(10, dtype=float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
# ### <a id="rectgrid.1"></a>[Polygon with regular grid](#top)
# Polygon to intersect with:
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50),
(80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
# Create the GridIntersect class for our modelgrid. The keyword arguments are shown below, but as these are the default options, they do not need to be passed necesssarily.
ix = GridIntersect(sgr, method="vertex", rtree=True)
# Do the intersect operation for a polygon
# %timeit ix.intersect(p)
result = ix.intersect(p)
# The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:
# - **cellids**: contains the cell ids of the intersected grid cells
# - **vertices**: contains the vertices of the intersected shape
# - **areas**: contains the area of the polygon in that grid cell (only for polygons)
# - **lenghts**: contains the length of the linestring in that grid cell (only for linestrings)
# - **ixshapes**: contains the shapely object representing the intersected shape (useful for plotting the result)
#
# Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
result[:5]
# pd.DataFrame(result) # for prettier formatting
# The cellids can be easily obtained
result.cellids
# Or the areas
result.areas
# If a user is only interested in which cells the shape intersects (and not the areas or the actual shape of the intersected object) with there is also the `intersects()` method. This method works for all types of shapely geometries.
ix.intersects(p)
# The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:
# - `plot_polygon`
# - `plot_linestring`
# - `plot_point`
# +
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
ix.plot_polygon(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
# -
# Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
ixs = GridIntersect(sgr, method="structured")
# The methods are optimized for structured grids, but for certain types of polygons there is no benefit (as can be seen in this example).
# %timeit ixs.intersect(p)
# The result is the same as before:
result2 = ixs.intersect(p)
result2[:5]
# ### <a id="rectgrid.2"></a>[Polyline with regular grid](#top)
# MultiLineString to intersect with:
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
# %timeit ix.intersect(mls)
result = ix.intersect(mls)
# Plot the result
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax, cmap="viridis")
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
# -
# Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
ixs = GridIntersect(sgr, method="structured")
# %timeit ixs.intersect(mls)
# ### [MultiPoint with regular grid](#top)<a id="rectgrid.3"></a>
#
# MultiPoint to intersect with
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
# %timeit ix.intersect(mp)
result = ix.intersect(mp)
result
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
# -
# Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
ixs = GridIntersect(sgr, method="structured")
# %timeit ixs.intersect(mp)
ixs.intersect(mp)
# ## <a id="trigrid"></a>[Vertex Grid](#top)
cell2d = [[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4]]
vertices = [[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0]]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
# ### <a id="trigrid.1"></a>[Polygon with triangular grid](#top)
ix2 = GridIntersect(tgr)
result = ix2.intersect(p)
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
# -
# ### <a id="trigrid.2"></a>[LineString with triangular grid](#top)
result = ix2.intersect(mls)
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
# -
# ### <a id="trigrid.3"></a>[MultiPoint with triangular grid](#top)
result = ix2.intersect(mp)
result
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid],
"kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
| examples/Notebooks/flopy3_grid_intersection_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: myenv
# language: python
# name: python3
# ---
import csv
import json
import os
import pandas as pd
somef_dataset_dir = '../data/somef_dataset/papers_with_code'
somef_data ={}
for file in os.listdir(somef_dataset_dir):
if file[-5:] != '.json':
continue
file_data = json.load(open(os.path.join(somef_dataset_dir, file)))
if not file_data:
continue
repo = file_data['codeRepository']['excerpt']
text = '"'
if 'description' in file_data:
for i in file_data['description']:
if i['technique'] == "Header extraction":
text += i['excerpt'].replace('\n', ' ').replace(',', ' ')
text += ' '
"""if 'installation' in file_data:
for i in file_data['installation']:
text += i['excerpt'].replace('\n', ' ').replace(',', ' ')
text += ' '
if 'usage' in file_data:
for i in file_data['usage']:
if i['technique'] == "Header extraction":
text += i['excerpt'].replace('\n', ' ').replace(',', ' ')
text += ' '"""
text += '"'
somef_data[repo] = text
# +
reader = csv.DictReader(open('../data/abstracts.csv'), delimiter=';')
keys = ['Text', 'Label', 'Repo']
data = { key: [] for key in keys}
i = 0
for row in reader:
if row['Repo'] in somef_data and somef_data[row['Repo']] != '""':
data['Text'].append(somef_data[row['Repo']])
data['Label'].append(row['Label'])
data['Repo'].append(row['Repo'])
i+=1
# -
df = pd.DataFrame(data)
df.drop_duplicates(inplace=True)
df.to_csv("../data/somef_data_description.csv", sep=';', index=False)
| documentation/src/generate_dataset_from_somef_json.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NUMPY
#
# Numpy is an important python library which is used for implementing the operation on multi-dimensional arrays and it has a large collection of high level mathematical operations to be used on arrays and matrices as well.
#
# #Why do we use Numpy?
# Python lists and arrays are slow.
# Python is not very compact.
# Python is slower than C and C++
# Numpy has the same elements in the compact form.
# Numpy is mostly written in C,so it is comparitively faster.
# +
# Taking multi-dimensional array as the input
print(" Enter the Number Of Rows and Columns ")
a = [int(i) for i in input().strip().split(" ")]
n = a[0]
m = a[1]
print("Row = ",n)
print("Column = ",m)
print()
print("Enter the Values ")
b = [int(i) for i in input().strip().split(" ")]
out = [[b[i*m +j] for j in range(m)]for i in range(n)]
out
# +
#importing Modules
import numpy as np
# +
l = [1,2,3,4,5]
print("l =",l)
print("Numpy array= ",np.array(l))
l1 = [1,2,3.14] #All Elememts Will be converted Into Float
print("Float numpy =", np.array(l1))
l3 = [1,2,3,'str']
print("Numpy array having a string Value =", np.array(l3)) #All elements will be converted into strings
l4 = list(l3)
l4.extend(l1)
l4.extend(l)
print("The List l4 = ", l4)
print("Float numpy =", np.array(l4)) #Top = 1.String 2. Float 3. Integer
# +
a = np.zeros(10) #ShortCut to print 10 zeros
print(a)
b = np.ones(5) #ShortCut to print 10 ones
print(b)
print(a.dtype)
print(b.dtype)
# -
a = np.zeros(10,int)
b = np.ones(5,str)
print(a.dtype)
print(b.dtype)
#2-D List Using Numpy
print("2-D List")
a = np.zeros((2,3))
print(a)
print()
print("3-d List")
b = np.zeros((4,3,2),int)
print(b)
# +
b = np.zeros(10,int)
print(b)
b[2] = 1
print(b)
b[3] = '4'
print(b)
#b[5] = 's' #ValueError: invalid literal for int() with base 10: 'str'
# -
a = np.zeros((4,3,2),int)
print(a)
print()
print(a[2])
print()
print(a[2][1])
print()
print(a[2][1][0])
# +
#Slicing
a = np.zeros(15)
print(a[2 : 6])
print(a.shape) #a.shape() tells the size of array
b = np.ones((3,4),int) #shape() returns a tuple
print(b.shape)
print(b)
a = np.array([[1,2,3],[4,5,6]])
print()
print(a)
print()
print(a[ : ,0])
print()
b = np.array([[1,2,3],[4,5,6],[11,12,13],[14,15,16]])
print(b)
print(b[ : ,1])
print()
print(b[0:2,0:2])
print()
print(b[0:3,1:3])
print()
c = np.ones((5,5),int)
#c = np.array[[1,2,3,],[4,5,6]] #TypeError: 'builtin_function_or_method' object is not subscriptable
#print(c.shape)
# -
b = np.array([[1,2,3,4,5],[6,7,8,9,0],[0,9,8,7,6],[5,4,3,2,1]])
print(b)
print()
print(b[1:3,1:4])
print()
print(b[ : , :2])
print(np.arange(10))
print(np.arange(100))
a = np.zeros(5)
print(a.shape)
b = np.zeros((2,3),int)
print(b.shape)
a = np.array([0, 1, 2, 3])
a.shape
# some numpy operations
a = np.array([1,2,3,4])
b = np.array([3,4,5,6])
print(a+2)
print(a + b)
print(a *2)
print(a * b)
print(a**b)
print(a ** 2)
c = np.array([3,4,5,6,1,2,3,4])
print(c.sum())
print(c.mean())
a = np.array([i*i for i in range(10)])
print(a)
print()
n = 5
m = 2
# printing the multi-dimensional array where n represents rows and m represents columns
out = [[a[i*m + j]for j in range(m)]for i in range(n)]
c = np.array(out)
print(c)
import math
x = np.array([ int(i) for i in input().strip().split(" ")])
print(x)
x_mean = x.mean()
print()
ans = ((x - x_mean) **2).sum()
print(ans)
# +
# printing the dot product
a1 = np.array([[1,2],[3,4]])
a2 = np.array([[10,20],[30,40]])
print(a1.dot(a2))
print(a1.dot(a1))
print(a2.dot(a1))
print(a2.dot(a2))
# -
a1 = np.mat([[1,2,3],[4,5,6]])
a2 = np.mat([[1,2],[4,5],[1,3]])
print(a1.shape,a2.shape)
print(a1 * a2)
print(a1)
print(a2)
a = np.matrix('1 2 4; 3 4 5')
a.shape
a = np.sum([[0, 1], [0, 5],[20,15]])
print(a)
b = np.sum([[0, 1], [0, 5]], axis=1)
print(b)
| algorithms/ml/numpy/Numpy operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="R8C06s763Mie"
# <div align="center">
# <h1><img width="30" src="https://madewithml.com/static/images/rounded_logo.png"> <a href="https://madewithml.com/">Made With ML</a></h1>
# Applied ML · MLOps · Production
# <br>
# Join 20K+ developers in learning how to responsibly <a href="https://madewithml.com/about/">deliver value</a> with applied ML.
# </div>
#
# <br>
#
# <div align="center">
# <a target="_blank" href="https://newsletter.madewithml.com"><img src="https://img.shields.io/badge/Subscribe-20K-brightgreen"></a>
# <a target="_blank" href="https://github.com/GokuMohandas/madewithml"><img src="https://img.shields.io/github/stars/GokuMohandas/madewithml.svg?style=social&label=Star"></a>
# <a target="_blank" href="https://www.linkedin.com/in/goku"><img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>
# <a target="_blank" href="https://twitter.com/GokuMohandas"><img src="https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social"></a>
# <p>🔥 Among the <a href="https://github.com/topics/deep-learning" target="_blank">top ML</a> repositories on GitHub</p>
# </div>
#
# <br>
# <hr>
# + [markdown] id="ckRIiGksZUnw"
# # Data Quality
# In this lesson we'll introduce and illusrate the fundamental concept of data quality.
# + [markdown] id="K-BaYyztanFh"
# <div align="left">
# <a target="_blank" href="https://madewithml.com/courses/ml-foundations/data-quality/"><img src="https://img.shields.io/badge/📖 Read-blog post-9cf"></a>
# <a href="https://github.com/GokuMohandas/madewithml/blob/main/notebooks/09_Data_Quality.ipynb" role="button"><img src="https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d"></a>
# <a href="https://colab.research.google.com/github/GokuMohandas/madewithml/blob/main/notebooks/09_Data_Quality.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
# </div>
# + [markdown] id="qAE9BjMH8x4q"
# In a nutshell, a machine learning model consumes input data and produces predictions. The quality of the predictions directly corresponds to the quality of data you train the model with; **garbage in, garbage out**. Check out this [article](https://venturebeat.com/2018/06/30/understanding-the-practical-applications-of-business-ai/) on where it makes sense to use AI and how to properly apply it.
# + [markdown] id="iLYQQyDzFx5c"
# We're going to go through all the concepts with concrete code examples and some synthesized data to train our models on. The task is to determine whether a tumor will be benign (harmless) or malignant (harmful) based on leukocyte (white blood cells) count and blood pressure. This is a synthetic dataset that we created and has no clinical relevance.
# + [markdown] id="FwJBLYj7HTie"
# # Set up
# + id="5o1UIQh6HUih"
import numpy as np
import random
# + id="pe7s9nLDHUlN"
SEED = 1234
# + id="V4ax1hLXHUob"
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
# + [markdown] id="LaHVAEnzKbKl"
# # Full dataset
# + [markdown] id="1e1RSFdnKfN2"
# We'll first train a model with the entire dataset. Later we'll remove a subset of the dataset and see the effect it has on our model.
# + [markdown] id="rgbY9WkklG6T"
# ## Data
# + [markdown] id="FLH7kzZl8wnf"
# ### Load data
# + id="5wDazzQdaoy2"
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import scatter_matrix
# + id="y6LNWmoidh8q" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1608144475885, "user_tz": 420, "elapsed": 1212, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="9b242558-9b42-40c7-8171-1bf813f8ba47"
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/madewithml/main/datasets/tumors.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# + id="4yUmtoznqc9r" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144475886, "user_tz": 420, "elapsed": 1202, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="e0379de3-a50d-4156-cb23-bf5219948b78"
# Define X and y
X = df[['leukocyte_count', 'blood_pressure']].values
y = df['tumor_class'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# + id="nXFUmnfte6z6" colab={"base_uri": "https://localhost:8080/", "height": 279} executionInfo={"status": "ok", "timestamp": 1608144476295, "user_tz": 420, "elapsed": 1600, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="791e034b-f6d7-430a-8a53-ef67a8e1594c"
# Plot data
colors = {'benign': 'red', 'malignant': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], s=25, edgecolors='k')
plt.xlabel('leukocyte count')
plt.ylabel('blood pressure')
plt.legend(['malignant ', 'benign'], loc="upper right")
plt.show()
# + [markdown] id="yVD-8qgGwz5l"
# We want to choose features that have strong predictive signal for our task. If you want to improve performance, you need to continuously do feature engineering by collecting and adding new signals. So you may run into a new feature that has high correlation (orthogonal signal) with your existing features but it may still possess some unique signal to boost your predictive performance.
# + id="kXXO2ZlGr8Kp" colab={"base_uri": "https://localhost:8080/", "height": 429} executionInfo={"status": "ok", "timestamp": 1608144476533, "user_tz": 420, "elapsed": 1826, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="9f41cedb-d50c-4259-86bb-c22a60e03f85"
# Correlation matrix
scatter_matrix(df, figsize=(5, 5));
df.corr()
# + [markdown] id="IcA13EL9MKq0"
# ### Split data
# + id="v7vmMREZMK6w"
import collections
from sklearn.model_selection import train_test_split
# + id="7ZrZrhNvD5q9"
TRAIN_SIZE = 0.70
VAL_SIZE = 0.15
TEST_SIZE = 0.15
# + id="NklqFClX7p9H"
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# + colab={"base_uri": "https://localhost:8080/"} id="Ych4dgkLhRV3" executionInfo={"status": "ok", "timestamp": 1608144477465, "user_tz": 420, "elapsed": 2730, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="fc857b44-701a-48e5-c551-86c7d970f00c"
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# + [markdown] id="_OH53BpMi-e_"
# ### Label encoding
# + id="oA7kC6GFjD8c"
from sklearn.preprocessing import LabelEncoder
# + id="wLC4GRM2jEAt"
# Output vectorizer
label_encoder = LabelEncoder()
# + id="LMR1_Du9jEEf" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144477467, "user_tz": 420, "elapsed": 2710, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="06b1f30a-f966-434a-9fc9-7b9c6a8483a4"
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# + id="McIId6GajHty" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144477467, "user_tz": 420, "elapsed": 2701, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="2b9d54c2-c641-4768-cddf-8c149999a947"
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# + id="O7NWNJQxMM-t" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144477468, "user_tz": 420, "elapsed": 2692, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="edfa1fac-7988-484d-9855-f5dac3db88f3"
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
# + [markdown] id="rprTIVQIjLq6"
# ### Standardize data
# + id="Vk-dk3X0jLxR"
from sklearn.preprocessing import StandardScaler
# + id="GDTGSoERjL0B"
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# + id="7CcSCO4bjL2n"
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# + id="hzQ5s5eqjPMy" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144477470, "user_tz": 420, "elapsed": 2663, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="5939484b-9072-47e3-a7bf-bc3187496f60"
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
# + [markdown] id="YZwkrokjlL81"
# ## Modeling
# + id="OSmsF8b8A_r-"
import torch
# + id="nF_-_GQTHMuL" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144480176, "user_tz": 420, "elapsed": 5352, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="dd57efdb-23f9-4b9e-a092-3dcc6295634e"
# Set seed for reproducibility
torch.manual_seed(SEED)
# + [markdown] id="owLnzReJJdpj"
# ### Model
# + id="F6PCHuW0rBco"
from torch import nn
import torch.nn.functional as F
# + id="6NiIKIzKHRMa"
INPUT_DIM = 2 # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = 2
# + id="pfDgN3F-GCKP"
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# + id="qaYgpFapGCRg" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144480178, "user_tz": 420, "elapsed": 5325, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="978dfb40-b192-4c6e-c065-8e37155ce9ef"
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
# + [markdown] id="fkSY4lj5Ibe0"
# ### Training
# + id="bvAvgInrsXtp"
from torch.optim import Adam
# + id="_uzT5WdgrPcV"
LEARNING_RATE = 1e-3
NUM_EPOCHS = 5
BATCH_SIZE = 32
# + id="w5zvcjQ8GCHF"
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# + id="q5t1LqwFrMej"
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# + id="CA3un62trOZi"
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# + id="tEfrIX0tGCBU"
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# + id="NXyUnTDlGB_f" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144480857, "user_tz": 420, "elapsed": 5955, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="20bb8dfa-2067-4b68-d804-1298650e917d"
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# + [markdown] id="Gp-2SuGlJCd8"
# ### Evaluation
# + id="ZIq3_RBwgwzT"
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
# + id="rfJfD4Wvgw2W"
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# + id="5MM9gNg6I4eG"
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# + id="5sRAXVwPOSBs" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144480860, "user_tz": 420, "elapsed": 5929, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="89b9f7e9-c397-493a-a8bc-59050a2a5a93"
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# + [markdown] id="ViwfNFOYRDkm"
# We're going to plot a point, which we know belongs to the malignant tumor class. Our well trained model here would accurately predict that it is indeed a malignant tumor!
# + id="q9qZ6Fijgvfd"
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# + id="bzFb90SJOmI2" colab={"base_uri": "https://localhost:8080/", "height": 336} executionInfo={"status": "ok", "timestamp": 1608144481128, "user_tz": 420, "elapsed": 6180, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="5966696c-a66c-4a51-a4a7-7616d3fd21bf"
# Visualize the decision boundary
plt.figure(figsize=(8,5))
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
# Sample point near the decision boundary
mean_leukocyte_count, mean_blood_pressure = X_scaler.transform(
[[np.mean(df.leukocyte_count), np.mean(df.blood_pressure)]])[0]
plt.scatter(mean_leukocyte_count+0.05, mean_blood_pressure-0.05, s=200,
c='b', edgecolor='w', linewidth=2)
# Annotate
plt.annotate('true: malignant,\npred: malignant',
color='white',
xy=(mean_leukocyte_count, mean_blood_pressure),
xytext=(0.4, 0.65),
textcoords='figure fraction',
fontsize=16,
arrowprops=dict(facecolor='white', shrink=0.1))
plt.show()
# + [markdown] id="o231eJaQPi5E"
# Great! We received great performances on both our train and test data splits. We're going to use this dataset to show the importance of data quality.
# + [markdown] id="pZ3rnGH8PtBu"
# # Reduced dataset
# + [markdown] id="ONRP3WQgR3zc"
# Let's remove some training data near the decision boundary and see how robust the model is now.
# + [markdown] id="IoiDkq7ClP2Q"
# ## Data
# + [markdown] id="3E8MUvGCK0zW"
# ### Load data
# + id="sU69PjH3Z4bm" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1608144481132, "user_tz": 420, "elapsed": 6174, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="9db28cbc-f7ea-4369-9e25-c684075b1710"
# Raw reduced data
url = "https://raw.githubusercontent.com/GokuMohandas/madewithml/main/datasets/tumors_reduced.csv"
df_reduced = pd.read_csv(url, header=0) # load
df_reduced = df_reduced.sample(frac=1).reset_index(drop=True) # shuffle
df_reduced.head()
# + id="pWh45mgZqqaO" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144481136, "user_tz": 420, "elapsed": 6167, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="8a6b2779-4b23-43e8-ca59-640254f89fce"
# Define X and y
X = df_reduced[['leukocyte_count', 'blood_pressure']].values
y = df_reduced['tumor_class'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# + id="1OwgEJSsZ4g5" colab={"base_uri": "https://localhost:8080/", "height": 279} executionInfo={"status": "ok", "timestamp": 1608144481480, "user_tz": 420, "elapsed": 6501, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="5d8d7b98-625b-40d8-987d-0275811ceb29"
# Plot data
colors = {'benign': 'red', 'malignant': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], s=25, edgecolors='k')
plt.xlabel('leukocyte count')
plt.ylabel('blood pressure')
plt.legend(['malignant ', 'benign'], loc="upper right")
plt.show()
# + [markdown] id="zxjsnyNtK4EO"
# ### Split data
# + colab={"base_uri": "https://localhost:8080/"} id="nUoL8WFkhU5J" executionInfo={"status": "ok", "timestamp": 1608144481480, "user_tz": 420, "elapsed": 6490, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="d63d44b3-acc1-4e24-deda-298b8fb9c1a8"
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# + [markdown] id="tTNpd_IsLO6V"
# ### Label encoding
# + id="GSAMj9h6lLBo"
# Encode class labels
label_encoder = LabelEncoder()
label_encoder = label_encoder.fit(y_train)
num_classes = len(label_encoder.classes_)
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
# + id="w-Wvh03BMf09" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144481481, "user_tz": 420, "elapsed": 6472, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="6c46da51-d8b3-4ce0-f8cd-2706623064b3"
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
# + [markdown] id="7Vk9k9dnLSDg"
# ### Standardize data
# + id="-8FjM7u8llPT"
# Standardize inputs using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# + [markdown] id="F_EdY0FLlXTc"
# ## Modeling
# + [markdown] id="90RuBAKFLj0X"
# ### Model
# + id="-IZ4YOKtSCRk"
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
# + [markdown] id="J_2ydOvTL39a"
# ### Training
# + id="bW7Tn7H_4RAp"
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# + id="EKo1sfHsLq2B"
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# + id="7NBWLKDISDj8"
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# + id="-CAhJIhEs5va" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1608144481484, "user_tz": 420, "elapsed": 6438, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="284bff93-9945-4329-c594-8ace041660c8"
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# + [markdown] id="7CnVl8OzMFBL"
# ### Evaluation
# + id="uGWbZlhUSFOz"
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# + colab={"base_uri": "https://localhost:8080/"} id="Iyw8bjgrhl3Q" executionInfo={"status": "ok", "timestamp": 1608144481486, "user_tz": 420, "elapsed": 6424, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="12389fd4-a2a5-49a2-d11c-598c9b6fd487"
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# + id="DmTCz8OnSFRn" colab={"base_uri": "https://localhost:8080/", "height": 336} executionInfo={"status": "ok", "timestamp": 1608144481748, "user_tz": 420, "elapsed": 6676, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjMIOf3R_zwS_zZx4ZyPMtQe0lOkGpPOEUEKWpM7g=s64", "userId": "00378334517810298963"}} outputId="83f11dfb-5b27-4310-a49a-80ad5f626e65"
# Visualize the decision boundary
plt.figure(figsize=(8,5))
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
# Sample point near the decision boundary (same point as before)
plt.scatter(mean_leukocyte_count+0.05, mean_blood_pressure-0.05, s=200,
c='b', edgecolor='w', linewidth=2)
# Annotate
plt.annotate('true: malignant,\npred: benign',
color='white',
xy=(mean_leukocyte_count, mean_blood_pressure),
xytext=(0.45, 0.60),
textcoords='figure fraction',
fontsize=16,
arrowprops=dict(facecolor='white', shrink=0.1))
plt.show()
# + [markdown] id="kdP98xnlbvVn"
# This is a very fragile but highly realistic scenario. Based on our reduced synthetic dataset, we have achieved a model that generalized really well on the test data. But when we ask for the prediction for the same point tested earlier (which we known is malignant), the prediction is now a benign tumor. We would have completely missed the tumor. To mitigate this, we can:
# 1. Get more data around the space we are concerned about
# 2. Consume predictions with caution when they are close to the decision boundary
# + [markdown] id="yWzAC39adTwk"
# # Takeaway
# + [markdown] id="8q3CiF_xF5rY"
# Models are not crystal balls. So it's important that before any machine learning, we really look at our data and ask ourselves if it is truly representative for the task we want to solve. The model itself may fit really well and generalize well on your data but if the data is of poor quality to begin with, the model cannot be trusted.
# + [markdown] id="cR45QpjQdY6N"
# Once you are confident that your data is of good quality, you can finally start thinking about modeling. The type of model you choose depends on many factors, including the task, type of data, complexity required, etc.
#
# So once you figure out what type of model your task needs, start with simple models and then slowly add complexity. You don’t want to start with neural networks right away because that may not be right model for your data and task. Striking this balance in model complexity is one of the key tasks of your data scientists. **simple models → complex models**
#
#
| notebooks/09_Data_Quality.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
import torch
from torch import nn
from torch import optim
from nflows.flows import MaskedAutoregressiveFlow
# -
x, y = datasets.make_moons(128, noise=.1)
plt.scatter(x[:, 0], x[:, 1]);
flow = MaskedAutoregressiveFlow(features = 2, hidden_features = 4)
optimizer = optim.Adam(flow.parameters())
num_iter = 5000
for i in range(num_iter):
x, y = datasets.make_moons(128, noise=.1)
x = torch.tensor(x, dtype=torch.float32)
optimizer.zero_grad()
loss = -flow.log_prob(inputs=x).mean()
loss.backward()
optimizer.step()
if (i + 1) % 500 == 0:
xline = torch.linspace(-1.5, 2.5)
yline = torch.linspace(-.75, 1.25)
xgrid, ygrid = torch.meshgrid(xline, yline)
xyinput = torch.cat([xgrid.reshape(-1, 1), ygrid.reshape(-1, 1)], dim=1)
with torch.no_grad():
zgrid = flow.log_prob(xyinput).exp().reshape(100, 100)
plt.contourf(xgrid.numpy(), ygrid.numpy(), zgrid.numpy())
plt.title('iteration {}'.format(i + 1))
plt.show()
| examples/Example-MAF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append ('./ model / model')
sys.path.append ('./ model / utils')
# +
from keras.models import load_model
import os
import scipy.io.wavfile as wavfile
import numpy as np
#import utils
import tensorflow as tf
# -
from model.utils import utils
def get_data_name(line,people=people,database=database,face_emb=face_emb):
parts = line.split() # get each name of file for one testset
mix_str = parts[0]
name_list = mix_str.replace('.npy','')
name_list = name_list.replace('mix-','',1)
names = name_list.split('-')
single_idxs = []
for i in range(people):
single_idxs.append(names[i])
file_path = database + mix_str
mix = np.load(file_path)
face_embs = np.zeros((1,75,1,1792,people))
for i in range(people):
face_embs[0,:,:,:,i] = np.load(face_emb+single_idxs[i]+"_face_emb.npy")
return mix,single_idxs,face_embs
# +
#parameters
people = 2
num_gpu=1
#path
model_path = './saved_AV_models/AVmodel-2p-009-0.50991.h5'
result_path = './predict/'
os.makedirs(result_path,exist_ok=True)
# +
database = './data/AV_model_database/mix/'
face_emb_path = './model/face_embedding/face1022_emb/'
print('Initialing Parameters......')
#loading data
print('Loading data ......')
test_file = []
with open('./data/AVdataset_val.txt','r') as f:
test_file = f.readlines()
# -
from model.model.loss import audio_discriminate_loss2 as audio_loss
loss = audio_loss(gamma=0.1, beta=0.2, people_num=people)
#model = load_model('model/multi_task/try.h5', custom_objects={'loss': audio_loss})
av_model = load_model(model_path,custom_objects={'tf':tf,'loss_func': loss})
for line in test_file[:10]:
mix,single_idxs,face_embed = get_data_name(line,people,database,face_emb_path)
mix_ex = np.expand_dims(mix,axis=0)
cRMs = av_model.predict([mix_ex,face_embed])
cRMs = cRMs[0]
prefix =''
for idx in single_idxs:
prefix +=idx+'-'
for i in range(len(cRMs)):
cRM =cRMs[:,:,:,i]
assert cRM.shape ==(298,257,2)
F = utils.fast_icRM(mix,cRM)
print(F.shape)
T = utils.fast_istft(F,power=False)
filename = result_path+str(single_idxs[i])+'.wav'
wavfile.write(filename,16000,T)
import numpy as np
a = np.array([1,2,3])
a
b = np.array([5,6,7])
c = a*b
c
| Serving.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Calculating the Risk of a Security
# *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*
# Load the data for Microsoft (‘MSFT’) for the period ‘2000-1-1’ until today. <br />
# Assess the daily and the annual risk of ‘MSFT’. Repeat the exercise for Apple for the same timeframe.
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
data = pd.read_csv('D:/Python/MSFT_AAPL_2000_2017.csv', index_col='Date')
returns = np.log(data / data.shift(1))
returns
# ### MSFT
# Daily risk:
returns['MSFT'].std()
# Annual risk:
returns['MSFT'].std() * 250 ** 0.5
# ### Apple
# Daily risk:
returns['AAPL'].std()
# Annual risk:
returns['AAPL'].std() * 250 ** 0.5
# ******
# Store the volatilities of the two stocks in an array called "vols".
vols = returns[['MSFT', 'AAPL']].std() * 250 ** 0.5
vols
# How are the two risk values different?
| 23 - Python for Finance/3_Measuring Investment Risk/2_Calculating a Security's Risk in Python (5:55)/Calculating the Risk of a Security - Solution_CSV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Introduction Basic statistics in R
#
# Here, we will work with random numbers, distributions and sampling
#
# ## Installation of libraries and necessary software
#
# Install the necessary libraries (only needed once) by executing (shift-enter) the following cell:
#
install.packages("MASS", repos='http://cran.us.r-project.org')
install.packages("cluster", repos='http://cran.us.r-project.org')
# ## Loading data and libraries
# This requires that the installation above have been finished without error
library("MASS")
library("cluster")
# ### Exercise 1
# _Probabilities_
# - Read the description of ```dnorm()```: ```help(dnorm)```
# - Plot the density (```dnorm()```) and the cumulative (```pnorm()```) probability distribution of a normal distribution with mean 2.5 and standard deviation 1.5.
# - Read the probability of having a number between 0.5 and 4 from the cumulative distribution. Verify this number with its calculation ```pnorm(4, 2.5, 1.5) - pnorm(0.5, 2.5, 1.5)```
# - Repeat the same for the intervals (-1, 2) and (1, 2)
#
# _Frequencies_
# - The relative number of observations per unit interval around $x=2$ (between 1.5 and 2.5) is given by ```dnorm(x=2, 2.5, 1.5)```. Hence
# - In a sample of 100 the expected number of observations per unit interval in the immediate vicinity of $x=2$ is 25.16
# - In a sample of 1000 the expected number of observations per unit interval in the immediate vicinity of $x=2$ is 251.6
# - The expected number of values from a sample of 1000, between 1.9 and 2.1, is approximately $0.2 \cdot 251.6 = 50.32$, or, more precisely,
# ```pnorm(2.1, 2.5, 1.5) - pnorm(1.9, 2.5, 1.5)```
#
# - Repeat the calculation for the intervals (-1,2) and (1,2).
#
# +
x <- seq(-5,10,0.01)
density <- dnorm(x, mean=2.5, sd=1.5)
cumulative <- pnorm(x, mean=2.5, sd=1.5)
## plot the functions:
# This code is related to a question below and the sample with 1000 observations above
plot(x, 1000*dnorm(x, mean=2.5, sd=1.5), type="l",ylab="frequency")
interval <- seq(1.5,2.5,0.01)
polygon(c(1.5,interval,2.5), c(0,1000*dnorm(interval, 2.5,1.5),0), col = "#FF000055")
polygon(c(1.5,2.5,2.5,1.5), 1000*c(dnorm(2, 2.5,1.5),dnorm(2, 2.5,1.5),0,0), col = "#00FF0055")
points(2,1000*dnorm(2,2.5,1.5),pch=15,col=2)
text(2,1000*dnorm(2,2.5,1.5),pch=15,col=2,labels =1000*dnorm(2,2.5,1.5), pos=1)
# -
# ##### Question I: <u>What are the 3 different arguments of these functions? How are they related to the Gaussian function?</u>
#
# _Answer_
#
# ##### Question II: <u>What is the difference between the first argument of ```dnorm``` and ```rnorm```?</u>
#
# _Answer_
#
# ##### Question III: <u>How would you estimate the probability of having a number between 0.5 and 4 from the density distribution?</u>
#
# _Answer_
#
# ##### Question IV: <u>What is the probability to obtain the number 2?</u>
#
# _Answer_
#
# ##### Question V: <u>What is the difference between probability and frequency?</u>
#
# _Answer_
#
# ##### Question VI: <u>How would you calculate the area of the rectangle and the area under the curve in the figure given above?</u>
#
# _Answer_
#
# ### Exercise 2
# We now check the behavior of the t-distribution which is an integral part of the t-test and exponential distribution.
# - Plot the density and cumulative probability distribution (```dt()``` and ```pt``` with argument ```df=3```) for a t-distribution with 3 degrees of freedom. Plot the normal distribution over it with ```lines()```.
# - Plot the density and cumulative probability distribution for an exponential distribution (```dexp()```) with a rate parameter equal to 1 (the default). Repeat with a rate parameter equal to 2. What happens when you do the plot on logarithmic (y-coordinate) and double-logarithmic scale?
#
# +
x <- seq(-5,5,0.01)
# density function
dens_t <- dt(x, df=3)
dens_exp <- dexp(x, rate = 1)
# continue ...
# -
# ##### Question I: <u>What happens with the t-distribution of high degrees of freedom?</u>
#
# _Answer_
#
# ##### Question II: <u>Which is a good visual way to check whether data is exponentially distributed?</u>
#
# _Answer_
#
# ### Exercise 3
# Use the function ```rnorm()``` to draw a random sample of 25 values from a normal distribution with a mean of 0 and a standard deviation equal to 1.0. Use a histogram, with ```probability=TRUE``` to display the values. Overlay the histogram with: (a) an estimated density curve; (b) the theoretical density curve for a normal distribution with mean 0 and standard deviation equal to 1.0. Repeat with samples of 100, 500 and 1000 values, showing the different displays in different panels on the same graphics page (```par(mfrow=...)```)
#
rand <- rnorm(25)
hist(rand, probability = TRUE,ylim=c(0,0.5), border="#FFFFFF", col="#333333")
lines(density(rand))
x <- seq(-5,5,0.01)
lines(x, dnorm(x), col=2)
# ##### Question I: <u>What are the black and the red lines?</u>
#
# _Answer_
#
# ##### Question II: <u>What improves when you increase the number of values?</u>
#
# _Answer_
#
# ##### Question III: <u>What does ```#333333``` mean?</u>
#
# _Answer_
# ### Exercise 4
# Data with a distribution close to lognormal are common. Size measurements of biological organisms often have this character. As an example, consider the measurements of body weight (```body```) in the data frame ```Animals``` (```MASS``` package). Begin by drawing a histogram of the untransformed values, and overlay a density curve. Then
#
# - Draw an estimated density curve for the logarithms of the values.
# - Determine the mean and standard deviation of ```log(Animals$body)```. Overlay the estimated density with the theoretical density for a normal distribution with the mean and standard deviation just obtained.
#
#
# +
# Add you code here:
# -
# ##### Question I: <u>Does the distribution look like a normal distribution after transformation to a logarithmic scale??</u>
#
# _Answer_
# ### Exercise 5
# The following script plots an estimated density curve for a random sample of 50 values from a normal distribution:
#
# - Plot estimated density curves (```plot(density(...))```) for random samples containing 50 values
# - the normal distribution
# - the uniform distribution (```runif(50)```)
# - the $t$-distribution with 3 degrees of freedom.
# - Overlay the three plots and use different colors.
# - Repeat the same but now taking random samples of 500 and 5000 values
#
#
# +
# Add your code here:
# -
# ##### Question I: <u>Why is the estimated density curve of the uniformely distiubuted values much higher?</u>
#
# _Answer_
# ### Exercise 6
# There are two ways to make the estimated density smoother:
#
# - One is to increase the number of samples
# - The other one is to increase the bandwidth. For example
# ```
# plot(density(rnorm(50), bw=0.2), type="l")
# plot(density(rnorm(50), bw=0.6), type="l")
# ```
#
# Repeat each of these with bandwidths of 0.15, with default choice of bandwidth, and with the bandwidth set to 0.75
# Add your code here:
# ### Exercise 7
# The density estimation has the issue that it depends strongly on bandwidth and choice of kernel, making it sometimes not very useful to judge normality. A much better tool is the quantile-quantile plot, which uses an output similar to cumulative probability distributions. Try the following script and compare assess how the plot characterizes normally distributed data.
# - See how the plot deviates when comparing the normal distribution with random variables from other distributions.
# - Increase the number of data points
# - Substitute the ```rnorm()``` function by random variables from other distributions (e.g. ```rexp()``` and ```rlnorm()```)
#
qqnorm(rnorm(10))
qqnorm(rnorm(15))
qqnorm(rnorm(200))
# ##### Question I: <u>How does the ```qqnorm()``` function show that the data is normally distributed?</u>
#
# _Answer_
#
# ##### Question II: <u>Which is the limiting function when increasing the number of values to infinity?</u>
#
# _Answer_
#
# ##### Question III: <u>How do the other tested distributions show their difference to a normal distribution when using the ```qqnorm()``` function?</u>
#
# _Answer_
#
# ### Exercise 8
# Take the data sets ```lh``` and ```Animals``` and check for normality using ```qqnorm```. Do the same on their logarithmic values. Additionally, use ```boxplot()``` to get an idea about how the boxplot of a normal distribution looks.
#
library(MASS)
data("Animals")
# add your code here
# ##### Question I: <u>Which data set is (approximately) normally distributed?</u>
#
# _Answer_
#
# ##### Question II: <u>Which data set is (approximately) log-normally distributed?</u>
#
# _Answer_
# ### Exercise 9
# Here, we will calculate the limit distribution of the mean of random variables. Note that the mean corresponds to the sum divided by the number of variables, and therefore the central limit theorem applies.
#
# First take a random sample from the normal distribution, and plot the estimated density function
#
# Then, take the repeated samples of size 4, calculate the mean for each such sample, and plot the density function for the distribution of means:
# Additionally, use ```qqnorm()``` to estimate normality.
#
# Repeat this code, using different sample numbers (e.g. 9 and 25) and numbers of averages larger than 100.
#
y <- rnorm(100)
plot(density(y), type="l",ylim=c(0,1))
av <- numeric(100)
for (i in 1:100) {
av[i] <- mean(rnorm(4))
}
lines(density(av), col=2)
# ##### Question I: <u>Why is the red distribution more narrow than the black one?</u>
#
# _Answer_
#
# ##### Question II: <u>What happens when increasing the number of samples?</u>
#
# _Answer_
#
# ##### Question III: <u>What happens when increasing the number of averages?</u>
#
# _Answer_
# ### Exercise 10
# In Exercise 9, we calculated the mean of normally distributed variables. But the central limit theorem applies for almost arbitrary distributions. Show this by calculating the mean distribution of $n$ uniformly distributed variables (```runif(n)```), log-normally distributed ones (```rlnorm(n)```) and exponentially distributed ones (```rexp(n,rate=1)```) by changing the script in Exercise 9 accordingly.
#
# +
# add your code here:
# -
# ##### Question I: <u>How much do you need to increase the number of samples and averages to reach a descent normal distribution (give the numbers for each type of distribution separately)?</u>
#
# _Answer_
# ### Exercise 11
# It is also possible to take random samples, usually with replacement, from a vector of values, i.e. from an empirical distribution. This is the bootstrap concept. Again, it may of interest to study the sampling distributions of means of different sizes. Consider the distribution of heights of female Adelaide University students, in the data frame ```survey``` (_MASS_ package). The script below takes 1000 bootstrap samples of size 4, calculating the mean for each such sample.
#
# Repeat the procedure, taking samples of size 9 and 16. In each case use a density plot and ```qqnorm()``` to display the (empirical) sampling distribution.
#
library(MASS)
y <- na.omit(survey[survey$Sex == "Female", "Height"])
av <- numeric(1000)
for (i in 1:1000)
av[i] <- mean(sample(y, 4, replace=T))
# ##### Question I: <u>What do you observe?</u>
#
# _Answer_
#
# ##### Question II: <u>Give a reason why you observe this change:</u>
#
# _Answer_
# ### Exercise 12
# Generate random numbers from a normal distribution with a sequential dependence.
#
# Try to understand the definition of y. The autocorrelation function (```acf()``` in R) calculates the dependence within a series (see also http://en.wikipedia.org/wiki/Autocorrelation). Apply this function on both data sets and check whether there is a consistent pattern for the correlated data set. Vary number of data points and repeat the experiment several times to get feeling of how a autocorrelation function can look like.
#
y1 <- rnorm(i)
y <- y1[-1] + y1[-i]
acf(y1)
acf(y)
# ##### Question I: <u>What is the main difference when introducing the above correlation?</u>
#
# _Answer_
# ### Exercise 13
# See below the function that calculates the correlated data set of Exercise 12. The input of the function is the number of data points with a default value of 51.
#
# Create a ```for``` loop that calculates the sum and the mean of the correlated data set 1000 times. Check whether the sum and the mean are normally distributed.
#
# +
par(mfrow=c(2,1))
corrdat <- function(i=51) {
y1 <- rnorm(i)
y <- y1[-1] + y1[-i]
return(y)
}
means <- sums <- vector(,1000)
for (j in 1:1000) {
# add our code here:
}
# -
# ##### Question I: <u>What do the results suggest??</u>
#
# _Answer_
# ### Exercise 14
# Take the artificial count data for e.g. the number of tumors in 7 rats suffering from a certain type of cancer.
#
# Enter the data and compute mean and variance. In order to check whether a Poisson model would be appropriate, calculate seven random values for the corresponding Poisson distribution (```lambda=78.3```). Take their mean and variance and compare them to the artificial data.
# Calculate the distribution of mean and variance, plot their histograms and check whether mean and variance from the artificial data are within the main core of the distributions.
#
dat <- c(87, 53, 72, 90, 78, 85, 83)
rdat <- rpois(7, lambda=78.3)
# to go on from here
# ##### Question I: <u>How well does the Poisson model fit the data?</u>
#
# _Answer_
| E_Biostatistics/Playground/Basic statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from PIL import Image, ImageFont, ImageDraw,ImageEnhance,ImageTransform,ImageFilter
import random
def quad(img):
fact=100
mn=[random.randint(0,fact) for i in range(8)]
w,h=img.size
img=img.filter(ImageFilter.GaussianBlur(radius=1))
img=img.transform(size=img.size,data=(mn[0],mn[1],mn[2],h-mn[3],w-mn[4],h-mn[5],w-mn[6],mn[7]),method=Image.QUAD)
return(img)
img=Image.open('./eng/que20.png')
# -
import os
for i in os.listdir('./eng'):
name='./eng'+'/'+i
img=Image.open(name)
img=(quad(img))
img.save(name[:-4]+'new'+'.png')
| stitching.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A2C (train)
# [Deep Reinforcement Learning with Policy Gradients and Actor-Critic Methods](https://www.datahubbs.com/policy-gradients-and-advantage-actor-critic/)
# [Two-Headed A2C Network in PyTorch](https://www.datahubbs.com/two-headed-a2c-network-in-pytorch/)
# ## 1. Setup
# +
import config
import gym
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import sys
from collections import deque
from a2c import A2C
from preprocessing import *
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
print(f'sys.version: {sys.version}')
print(f'skimage.version: {skimage.__version__}')
print(f'tf.version: {tf.__version__}')
# -
# ## 2. Look at our env
env = gym.make(config.GAME_NAME)
state = env.reset()
print(f'Game: {config.GAME_NAME}')
print('A state (in env, not in our dqn) is represented by a frame')
print(type(state), state.dtype)
print(state.shape)
print(f'Num_Actions: {env.action_space.n}')
action_meanings = env.unwrapped.get_action_meanings()
for i in range(env.action_space.n):
print(f'{i}: {action_meanings[i]}')
# __Look how the rgb frame is preprocessed__
# +
env.reset()
rgb_frame, _, _, _ = env.step(1)
preprocessed_frame = preprocess_frame(rgb_frame)
print(f'initial: {rgb_frame.dtype}')
print(f'preprocessed_frame: {preprocessed_frame.dtype}'
f', [{preprocessed_frame.min()}, {preprocessed_frame.max()}]')
plt.figure()
plt.subplot(1, 2, 1)
plt.title(f'initial: {rgb_frame.shape}')
plt.axis('off')
plt.imshow(rgb_frame)
plt.subplot(1, 2, 2)
plt.title(f'preprocessed: {preprocessed_frame.shape}')
plt.axis('off')
plt.imshow(preprocessed_frame, cmap='gray')
plt.show()
# -
# ## 3. A2C
# ### CNN architecture
# 1. Conv2d(filters=32, kernel_size=8, strides=4, padding='valid', activation=ReLU)
# 2. Conv2d(filters=64, kernel_size=4, strides=2, padding='valid', activation=ReLU)
# 3. Conv2d(filters=64, kernel_size=3, strides=1, padding='valid', activation=ReLU) -> Flatten()
# 4. FC(512, activation=ReLU)
# - Actor: FC(NUM_ACTIONS, activation=Linear) + Softmax()
# - Critic: FC(1, activation=Linear)
# ### Instantiate A2C net
# +
tf.reset_default_graph()
a2c = A2C(action_size=env.action_space.n)
# -
# ## 4. Training
# ### A2C (training algorithm)
def a2c_training(a2c, env):
# to save some ckpts
saver = tf.train.Saver(max_to_keep=50)
with tf.Session() as sess:
# init tf variables
sess.run(tf.global_variables_initializer())
# some vars for train_stats
step = 0
iters_per_episode = []
reward_per_episode = []
actor_loss_per_iter = []
critic_loss_per_iter = []
for episode in range(1, config.NUM_EPISODES + 1):
episode_step = 0
done = False
stacked_frames = None
episode_rewards = []
# new episode
state = env.reset()
stacked_frames_hwc, stacked_frames = stack_frames(stacked_frames, state,
is_new_episode=True)
while not done:
episode_step += 1
step += 1
# current state
current_state = stacked_frames_hwc[:,:,-config.STACK_SIZE:]
# get action probs from the actor
action_probs = sess.run(a2c.predicted_probs,
feed_dict={
a2c.input_ph: current_state[np.newaxis]
})
# sample an action using action_probs
action = np.random.choice(env.action_space.n, p=action_probs.squeeze())
# take an action
next_state, reward, done, _ = env.step(action)
episode_rewards.append(reward)
# next_state
stacked_frames_hwc, stacked_frames = stack_frames(stacked_frames, next_state,
is_new_episode=False)
next_state = stacked_frames_hwc[:,:,-config.STACK_SIZE:]
# prepare target for TD error
td_target = None
if done:
td_target = reward
else:
next_state_value = sess.run(a2c.predicted_value,
feed_dict={
a2c.input_ph: next_state[np.newaxis]
})
td_target = reward + config.GAMMA * next_state_value.squeeze()
# optimization
optim_t = sess.run([a2c.actor_loss,
a2c.critic_loss,
a2c.actor_optimization_step,
a2c.critic_optimization_step],
feed_dict={
a2c.input_ph: current_state[np.newaxis],
a2c.action_ph: action,
a2c.target_ph: td_target
})
actor_loss, critic_loss = optim_t[:2]
actor_loss_per_iter.append(actor_loss.squeeze())
critic_loss_per_iter.append(critic_loss.squeeze())
if done:
# the end of episode
total_reward = np.sum(episode_rewards)
reward_per_episode.append(total_reward)
iters_per_episode.append(episode_step)
print(f'Episode: {episode}, '
f'Total reward: {total_reward}, '
f'Episode steps: {episode_step}\n'
f'Total training steps: {step}')
if (episode in [1, config.NUM_EPISODES]) or (episode % config.SAVE_EVERY == 0):
saver.save(sess, f'{config.SAVE_PATH}/model_episode_{episode}.ckpt')
print('Model saved')
return iters_per_episode, reward_per_episode, actor_loss_per_iter, critic_loss_per_iter
# %%time
# !rm -rf $config.SAVE_PATH
train_stats = a2c_training(a2c, env)
# #### Training stats
iters_per_episode, reward_per_episode = train_stats[:2]
actor_loss_per_iter, critic_loss_per_iter = train_stats[2:]
# #### Some plots
# +
episode_range = np.arange(len(iters_per_episode)) + 1
iter_range = np.arange(len(actor_loss_per_iter)) + 1
steps_to_skip = 1
plt.figure(figsize=(18, 10))
plt.subplot(2, 2, 1)
plt.title('Iterations per episode')
plt.plot(episode_range, iters_per_episode)
plt.ylabel('iterations')
plt.xlabel('episode')
plt.subplot(2, 2, 2)
plt.title('Reward per episode')
plt.plot(episode_range, reward_per_episode)
plt.ylabel('reward')
plt.xlabel('episode')
plt.subplot(2, 2, 3)
plt.title('Actor loss')
plt.plot(iter_range[::steps_to_skip] // steps_to_skip,
actor_loss_per_iter[::steps_to_skip])
plt.ylabel('actor loss')
plt.xlabel(f'step // {steps_to_skip}')
plt.subplot(2, 2, 4)
plt.title('Critic loss')
plt.plot(iter_range[::steps_to_skip] // steps_to_skip,
critic_loss_per_iter[::steps_to_skip])
plt.ylabel('critic loss')
plt.xlabel(f'step // {steps_to_skip}')
plt.show()
| 2018-2019/assignment 9 (A2C)/A2C_train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Table Visualization
#
# This section demonstrates visualization of tabular data using the [Styler][styler]
# class. For information on visualization with charting please see [Chart Visualization][viz]. This document is written as a Jupyter Notebook, and can be viewed or downloaded [here][download].
#
# [styler]: ../reference/api/pandas.io.formats.style.Styler.rst
# [viz]: visualization.rst
# [download]: https://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/user_guide/style.ipynb
# ## Styler Object and HTML
#
# Styling should be performed after the data in a DataFrame has been processed. The [Styler][styler] creates an HTML `<table>` and leverages CSS styling language to manipulate many parameters including colors, fonts, borders, background, etc. See [here][w3schools] for more information on styling HTML tables. This allows a lot of flexibility out of the box, and even enables web developers to integrate DataFrames into their exiting user interface designs.
#
# The `DataFrame.style` attribute is a property that returns a [Styler][styler] object. It has a `_repr_html_` method defined on it so they are rendered automatically in Jupyter Notebook.
#
# [styler]: ../reference/api/pandas.io.formats.style.Styler.rst
# [w3schools]: https://www.w3schools.com/html/html_tables.asp
# + nbsphinx="hidden"
import matplotlib.pyplot
# We have this here to trigger matplotlib's font cache stuff.
# This cell is hidden from the output
# +
import pandas as pd
import numpy as np
import matplotlib as mpl
df = pd.DataFrame([[38.0, 2.0, 18.0, 22.0, 21, np.nan],[19, 439, 6, 452, 226,232]],
index=pd.Index(['Tumour (Positive)', 'Non-Tumour (Negative)'], name='Actual Label:'),
columns=pd.MultiIndex.from_product([['Decision Tree', 'Regression', 'Random'],['Tumour', 'Non-Tumour']], names=['Model:', 'Predicted:']))
df.style
# -
# The above output looks very similar to the standard DataFrame HTML representation. But the HTML here has already attached some CSS classes to each cell, even if we haven't yet created any styles. We can view these by calling the [.to_html()][to_html] method, which returns the raw HTML as string, which is useful for further processing or adding to a file - read on in [More about CSS and HTML](#More-About-CSS-and-HTML). Below we will show how we can use these to format the DataFrame to be more communicative. For example how we can build `s`:
#
# [tohtml]: ../reference/api/pandas.io.formats.style.Styler.to_html.rst
# + nbsphinx="hidden"
# Hidden cell to just create the below example: code is covered throughout the guide.
s = df.style\
.hide_columns([('Random', 'Tumour'), ('Random', 'Non-Tumour')])\
.format('{:.0f}')\
.set_table_styles([{
'selector': '',
'props': 'border-collapse: separate;'
},{
'selector': 'caption',
'props': 'caption-side: bottom; font-size:1.3em;'
},{
'selector': '.index_name',
'props': 'font-style: italic; color: darkgrey; font-weight:normal;'
},{
'selector': 'th:not(.index_name)',
'props': 'background-color: #000066; color: white;'
},{
'selector': 'th.col_heading',
'props': 'text-align: center;'
},{
'selector': 'th.col_heading.level0',
'props': 'font-size: 1.5em;'
},{
'selector': 'th.col2',
'props': 'border-left: 1px solid white;'
},{
'selector': '.col2',
'props': 'border-left: 1px solid #000066;'
},{
'selector': 'td',
'props': 'text-align: center; font-weight:bold;'
},{
'selector': '.true',
'props': 'background-color: #e6ffe6;'
},{
'selector': '.false',
'props': 'background-color: #ffe6e6;'
},{
'selector': '.border-red',
'props': 'border: 2px dashed red;'
},{
'selector': '.border-green',
'props': 'border: 2px dashed green;'
},{
'selector': 'td:hover',
'props': 'background-color: #ffffb3;'
}])\
.set_td_classes(pd.DataFrame([['true border-green', 'false', 'true', 'false border-red', '', ''],
['false', 'true', 'false', 'true', '', '']],
index=df.index, columns=df.columns))\
.set_caption("Confusion matrix for multiple cancer prediction models.")\
.set_tooltips(pd.DataFrame([['This model has a very strong true positive rate', '', '', "This model's total number of false negatives is too high", '', ''],
['', '', '', '', '', '']],
index=df.index, columns=df.columns),
css_class='pd-tt', props=
'visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'
'background-color: white; color: #000066; font-size: 0.8em;'
'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')
# -
s
# ## Formatting the Display
#
# ### Formatting Values
#
# Before adding styles it is useful to show that the [Styler][styler] can distinguish the *display* value from the *actual* value, in both datavlaues and index or columns headers. To control the display value, the text is printed in each cell as string, and we can use the [.format()][formatfunc] and [.format_index()][formatfuncindex] methods to manipulate this according to a [format spec string][format] or a callable that takes a single value and returns a string. It is possible to define this for the whole table, or index, or for individual columns, or MultiIndex levels.
#
# Additionally, the format function has a **precision** argument to specifically help formatting floats, as well as **decimal** and **thousands** separators to support other locales, an **na_rep** argument to display missing data, and an **escape** argument to help displaying safe-HTML or safe-LaTeX. The default formatter is configured to adopt pandas' regular `display.precision` option, controllable using `with pd.option_context('display.precision', 2):`
#
# [styler]: ../reference/api/pandas.io.formats.style.Styler.rst
# [format]: https://docs.python.org/3/library/string.html#format-specification-mini-language
# [formatfunc]: ../reference/api/pandas.io.formats.style.Styler.format.rst
# [formatfuncindex]: ../reference/api/pandas.io.formats.style.Styler.format_index.rst
df.style.format(precision=0, na_rep='MISSING', thousands=" ",
formatter={('Decision Tree', 'Tumour'): "{:.2f}",
('Regression', 'Non-Tumour'): lambda x: "$ {:,.1f}".format(x*-1e6)
})
# Using Styler to manipulate the display is a useful feature because maintaining the indexing and datavalues for other purposes gives greater control. You do not have to overwrite your DataFrame to display it how you like. Here is an example of using the formatting functions whilst still relying on the underlying data for indexing and calculations.
# +
weather_df = pd.DataFrame(np.random.rand(10,2)*5,
index=pd.date_range(start="2021-01-01", periods=10),
columns=["Tokyo", "Beijing"])
def rain_condition(v):
if v < 1.75:
return "Dry"
elif v < 2.75:
return "Rain"
return "Heavy Rain"
def make_pretty(styler):
styler.set_caption("Weather Conditions")
styler.format(rain_condition)
styler.format_index(lambda v: v.strftime("%A"))
styler.background_gradient(axis=None, vmin=1, vmax=5, cmap="YlGnBu")
return styler
weather_df
# -
weather_df.loc["2021-01-04":"2021-01-08"].style.pipe(make_pretty)
# ### Hiding Data
#
# The index and column headers can be completely hidden, as well subselecting rows or columns that one wishes to exclude. Both these options are performed using the same methods.
#
# The index can be hidden from rendering by calling [.hide_index()][hideidx] without any arguments, which might be useful if your index is integer based. Similarly column headers can be hidden by calling [.hide_columns()][hidecols] without any arguments.
#
# Specific rows or columns can be hidden from rendering by calling the same [.hide_index()][hideidx] or [.hide_columns()][hidecols] methods and passing in a row/column label, a list-like or a slice of row/column labels to for the ``subset`` argument.
#
# Hiding does not change the integer arrangement of CSS classes, e.g. hiding the first two columns of a DataFrame means the column class indexing will start at `col2`, since `col0` and `col1` are simply ignored.
#
# We can update our `Styler` object from before to hide some data and format the values.
#
# [hideidx]: ../reference/api/pandas.io.formats.style.Styler.hide_index.rst
# [hidecols]: ../reference/api/pandas.io.formats.style.Styler.hide_columns.rst
s = df.style.format('{:.0f}').hide_columns([('Random', 'Tumour'), ('Random', 'Non-Tumour')])
s
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_hide')
# -
# ## Methods to Add Styles
#
# There are **3 primary methods of adding custom CSS styles** to [Styler][styler]:
#
# - Using [.set_table_styles()][table] to control broader areas of the table with specified internal CSS. Although table styles allow the flexibility to add CSS selectors and properties controlling all individual parts of the table, they are unwieldy for individual cell specifications. Also, note that table styles cannot be exported to Excel.
# - Using [.set_td_classes()][td_class] to directly link either external CSS classes to your data cells or link the internal CSS classes created by [.set_table_styles()][table]. See [here](#Setting-Classes-and-Linking-to-External-CSS). These cannot be used on column header rows or indexes, and also won't export to Excel.
# - Using the [.apply()][apply] and [.applymap()][applymap] functions to add direct internal CSS to specific data cells. See [here](#Styler-Functions). As of v1.4.0 there are also methods that work directly on column header rows or indexes; [.apply_index()][applyindex] and [.applymap_index()][applymapindex]. Note that only these methods add styles that will export to Excel. These methods work in a similar way to [DataFrame.apply()][dfapply] and [DataFrame.applymap()][dfapplymap].
#
# [table]: ../reference/api/pandas.io.formats.style.Styler.set_table_styles.rst
# [styler]: ../reference/api/pandas.io.formats.style.Styler.rst
# [td_class]: ../reference/api/pandas.io.formats.style.Styler.set_td_classes.rst
# [apply]: ../reference/api/pandas.io.formats.style.Styler.apply.rst
# [applymap]: ../reference/api/pandas.io.formats.style.Styler.applymap.rst
# [applyindex]: ../reference/api/pandas.io.formats.style.Styler.apply_index.rst
# [applymapindex]: ../reference/api/pandas.io.formats.style.Styler.applymap_index.rst
# [dfapply]: ../reference/api/pandas.DataFrame.apply.rst
# [dfapplymap]: ../reference/api/pandas.DataFrame.applymap.rst
# ## Table Styles
#
# Table styles are flexible enough to control all individual parts of the table, including column headers and indexes.
# However, they can be unwieldy to type for individual data cells or for any kind of conditional formatting, so we recommend that table styles are used for broad styling, such as entire rows or columns at a time.
#
# Table styles are also used to control features which can apply to the whole table at once such as creating a generic hover functionality. The `:hover` pseudo-selector, as well as other pseudo-selectors, can only be used this way.
#
# To replicate the normal format of CSS selectors and properties (attribute value pairs), e.g.
#
# ```
# tr:hover {
# background-color: #ffff99;
# }
# ```
#
# the necessary format to pass styles to [.set_table_styles()][table] is as a list of dicts, each with a CSS-selector tag and CSS-properties. Properties can either be a list of 2-tuples, or a regular CSS-string, for example:
#
# [table]: ../reference/api/pandas.io.formats.style.Styler.set_table_styles.rst
cell_hover = { # for row hover use <tr> instead of <td>
'selector': 'td:hover',
'props': [('background-color', '#ffffb3')]
}
index_names = {
'selector': '.index_name',
'props': 'font-style: italic; color: darkgrey; font-weight:normal;'
}
headers = {
'selector': 'th:not(.index_name)',
'props': 'background-color: #000066; color: white;'
}
s.set_table_styles([cell_hover, index_names, headers])
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_tab_styles1')
# -
# Next we just add a couple more styling artifacts targeting specific parts of the table. Be careful here, since we are *chaining methods* we need to explicitly instruct the method **not to** ``overwrite`` the existing styles.
s.set_table_styles([
{'selector': 'th.col_heading', 'props': 'text-align: center;'},
{'selector': 'th.col_heading.level0', 'props': 'font-size: 1.5em;'},
{'selector': 'td', 'props': 'text-align: center; font-weight: bold;'},
], overwrite=False)
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_tab_styles2')
# -
# As a convenience method (*since version 1.2.0*) we can also pass a **dict** to [.set_table_styles()][table] which contains row or column keys. Behind the scenes Styler just indexes the keys and adds relevant `.col<m>` or `.row<n>` classes as necessary to the given CSS selectors.
#
# [table]: ../reference/api/pandas.io.formats.style.Styler.set_table_styles.rst
s.set_table_styles({
('Regression', 'Tumour'): [{'selector': 'th', 'props': 'border-left: 1px solid white'},
{'selector': 'td', 'props': 'border-left: 1px solid #000066'}]
}, overwrite=False, axis=0)
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('xyz01')
# -
# ## Setting Classes and Linking to External CSS
#
# If you have designed a website then it is likely you will already have an external CSS file that controls the styling of table and cell objects within it. You may want to use these native files rather than duplicate all the CSS in python (and duplicate any maintenance work).
#
# ### Table Attributes
#
# It is very easy to add a `class` to the main `<table>` using [.set_table_attributes()][tableatt]. This method can also attach inline styles - read more in [CSS Hierarchies](#CSS-Hierarchies).
#
# [tableatt]: ../reference/api/pandas.io.formats.style.Styler.set_table_attributes.rst
out = s.set_table_attributes('class="my-table-cls"').to_html()
print(out[out.find('<table'):][:109])
# ### Data Cell CSS Classes
#
# *New in version 1.2.0*
#
# The [.set_td_classes()][tdclass] method accepts a DataFrame with matching indices and columns to the underlying [Styler][styler]'s DataFrame. That DataFrame will contain strings as css-classes to add to individual data cells: the `<td>` elements of the `<table>`. Rather than use external CSS we will create our classes internally and add them to table style. We will save adding the borders until the [section on tooltips](#Tooltips).
#
# [tdclass]: ../reference/api/pandas.io.formats.style.Styler.set_td_classes.rst
# [styler]: ../reference/api/pandas.io.formats.style.Styler.rst
s.set_table_styles([ # create internal CSS classes
{'selector': '.true', 'props': 'background-color: #e6ffe6;'},
{'selector': '.false', 'props': 'background-color: #ffe6e6;'},
], overwrite=False)
cell_color = pd.DataFrame([['true ', 'false ', 'true ', 'false '],
['false ', 'true ', 'false ', 'true ']],
index=df.index,
columns=df.columns[:4])
s.set_td_classes(cell_color)
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_classes')
# -
# ## Styler Functions
#
# ### Acting on Data
#
# We use the following methods to pass your style functions. Both of those methods take a function (and some other keyword arguments) and apply it to the DataFrame in a certain way, rendering CSS styles.
#
# - [.applymap()][applymap] (elementwise): accepts a function that takes a single value and returns a string with the CSS attribute-value pair.
# - [.apply()][apply] (column-/row-/table-wise): accepts a function that takes a Series or DataFrame and returns a Series, DataFrame, or numpy array with an identical shape where each element is a string with a CSS attribute-value pair. This method passes each column or row of your DataFrame one-at-a-time or the entire table at once, depending on the `axis` keyword argument. For columnwise use `axis=0`, rowwise use `axis=1`, and for the entire table at once use `axis=None`.
#
# This method is powerful for applying multiple, complex logic to data cells. We create a new DataFrame to demonstrate this.
#
# [apply]: ../reference/api/pandas.io.formats.style.Styler.apply.rst
# [applymap]: ../reference/api/pandas.io.formats.style.Styler.applymap.rst
np.random.seed(0)
df2 = pd.DataFrame(np.random.randn(10,4), columns=['A','B','C','D'])
df2.style
# For example we can build a function that colors text if it is negative, and chain this with a function that partially fades cells of negligible value. Since this looks at each element in turn we use ``applymap``.
def style_negative(v, props=''):
return props if v < 0 else None
s2 = df2.style.applymap(style_negative, props='color:red;')\
.applymap(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)
s2
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s2.set_uuid('after_applymap')
# -
# We can also build a function that highlights the maximum value across rows, cols, and the DataFrame all at once. In this case we use ``apply``. Below we highlight the maximum in a column.
def highlight_max(s, props=''):
return np.where(s == np.nanmax(s.values), props, '')
s2.apply(highlight_max, props='color:white;background-color:darkblue', axis=0)
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s2.set_uuid('after_apply')
# -
# We can use the same function across the different axes, highlighting here the DataFrame maximum in purple, and row maximums in pink.
s2.apply(highlight_max, props='color:white;background-color:pink;', axis=1)\
.apply(highlight_max, props='color:white;background-color:purple', axis=None)
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s2.set_uuid('after_apply_again')
# -
# This last example shows how some styles have been overwritten by others. In general the most recent style applied is active but you can read more in the [section on CSS hierarchies](#CSS-Hierarchies). You can also apply these styles to more granular parts of the DataFrame - read more in section on [subset slicing](#Finer-Control-with-Slicing).
#
# It is possible to replicate some of this functionality using just classes but it can be more cumbersome. See [item 3) of Optimization](#Optimization)
#
# <div class="alert alert-info">
#
# *Debugging Tip*: If you're having trouble writing your style function, try just passing it into ``DataFrame.apply``. Internally, ``Styler.apply`` uses ``DataFrame.apply`` so the result should be the same, and with ``DataFrame.apply`` you will be able to inspect the CSS string output of your intended function in each cell.
#
# </div>
# ### Acting on the Index and Column Headers
#
# Similar application is acheived for headers by using:
#
# - [.applymap_index()][applymapindex] (elementwise): accepts a function that takes a single value and returns a string with the CSS attribute-value pair.
# - [.apply_index()][applyindex] (level-wise): accepts a function that takes a Series and returns a Series, or numpy array with an identical shape where each element is a string with a CSS attribute-value pair. This method passes each level of your Index one-at-a-time. To style the index use `axis=0` and to style the column headers use `axis=1`.
#
# You can select a `level` of a `MultiIndex` but currently no similar `subset` application is available for these methods.
#
# [applyindex]: ../reference/api/pandas.io.formats.style.Styler.apply_index.rst
# [applymapindex]: ../reference/api/pandas.io.formats.style.Styler.applymap_index.rst
s2.applymap_index(lambda v: "color:pink;" if v>4 else "color:darkblue;", axis=0)
s2.apply_index(lambda s: np.where(s.isin(["A", "B"]), "color:pink;", "color:darkblue;"), axis=1)
# ## Tooltips and Captions
#
# Table captions can be added with the [.set_caption()][caption] method. You can use table styles to control the CSS relevant to the caption.
#
# [caption]: ../reference/api/pandas.io.formats.style.Styler.set_caption.rst
s.set_caption("Confusion matrix for multiple cancer prediction models.")\
.set_table_styles([{
'selector': 'caption',
'props': 'caption-side: bottom; font-size:1.25em;'
}], overwrite=False)
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_caption')
# -
# Adding tooltips (*since version 1.3.0*) can be done using the [.set_tooltips()][tooltips] method in the same way you can add CSS classes to data cells by providing a string based DataFrame with intersecting indices and columns. You don't have to specify a `css_class` name or any css `props` for the tooltips, since there are standard defaults, but the option is there if you want more visual control.
#
# [tooltips]: ../reference/api/pandas.io.formats.style.Styler.set_tooltips.rst
tt = pd.DataFrame([['This model has a very strong true positive rate',
"This model's total number of false negatives is too high"]],
index=['Tumour (Positive)'], columns=df.columns[[0,3]])
s.set_tooltips(tt, props='visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'
'background-color: white; color: #000066; font-size: 0.8em;'
'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_tooltips')
# -
# The only thing left to do for our table is to add the highlighting borders to draw the audience attention to the tooltips. We will create internal CSS classes as before using table styles. **Setting classes always overwrites** so we need to make sure we add the previous classes.
s.set_table_styles([ # create internal CSS classes
{'selector': '.border-red', 'props': 'border: 2px dashed red;'},
{'selector': '.border-green', 'props': 'border: 2px dashed green;'},
], overwrite=False)
cell_border = pd.DataFrame([['border-green ', ' ', ' ', 'border-red '],
[' ', ' ', ' ', ' ']],
index=df.index,
columns=df.columns[:4])
s.set_td_classes(cell_color + cell_border)
# + nbsphinx="hidden"
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_borders')
# -
# ## Finer Control with Slicing
#
# The examples we have shown so far for the `Styler.apply` and `Styler.applymap` functions have not demonstrated the use of the ``subset`` argument. This is a useful argument which permits a lot of flexibility: it allows you to apply styles to specific rows or columns, without having to code that logic into your `style` function.
#
# The value passed to `subset` behaves similar to slicing a DataFrame;
#
# - A scalar is treated as a column label
# - A list (or Series or NumPy array) is treated as multiple column labels
# - A tuple is treated as `(row_indexer, column_indexer)`
#
# Consider using `pd.IndexSlice` to construct the tuple for the last one. We will create a MultiIndexed DataFrame to demonstrate the functionality.
df3 = pd.DataFrame(np.random.randn(4,4),
pd.MultiIndex.from_product([['A', 'B'], ['r1', 'r2']]),
columns=['c1','c2','c3','c4'])
df3
# We will use subset to highlight the maximum in the third and fourth columns with red text. We will highlight the subset sliced region in yellow.
slice_ = ['c3', 'c4']
df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\
.set_properties(**{'background-color': '#ffffb3'}, subset=slice_)
# If combined with the ``IndexSlice`` as suggested then it can index across both dimensions with greater flexibility.
idx = pd.IndexSlice
slice_ = idx[idx[:,'r1'], idx['c2':'c4']]
df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\
.set_properties(**{'background-color': '#ffffb3'}, subset=slice_)
# This also provides the flexibility to sub select rows when used with the `axis=1`.
slice_ = idx[idx[:,'r2'], :]
df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\
.set_properties(**{'background-color': '#ffffb3'}, subset=slice_)
# There is also scope to provide **conditional filtering**.
#
# Suppose we want to highlight the maximum across columns 2 and 4 only in the case that the sum of columns 1 and 3 is less than -2.0 *(essentially excluding rows* `(:,'r2')`*)*.
slice_ = idx[idx[(df3['c1'] + df3['c3']) < -2.0], ['c2', 'c4']]
df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\
.set_properties(**{'background-color': '#ffffb3'}, subset=slice_)
# Only label-based slicing is supported right now, not positional, and not callables.
#
# If your style function uses a `subset` or `axis` keyword argument, consider wrapping your function in a `functools.partial`, partialing out that keyword.
#
# ```python
# my_func2 = functools.partial(my_func, subset=42)
# ```
# ## Optimization
#
# Generally, for smaller tables and most cases, the rendered HTML does not need to be optimized, and we don't really recommend it. There are two cases where it is worth considering:
#
# - If you are rendering and styling a very large HTML table, certain browsers have performance issues.
# - If you are using ``Styler`` to dynamically create part of online user interfaces and want to improve network performance.
#
# Here we recommend the following steps to implement:
# ### 1. Remove UUID and cell_ids
#
# Ignore the `uuid` and set `cell_ids` to `False`. This will prevent unnecessary HTML.
# <div class="alert alert-warning">
#
# <font color=red>This is sub-optimal:</font>
#
# </div>
df4 = pd.DataFrame([[1,2],[3,4]])
s4 = df4.style
# <div class="alert alert-info">
#
# <font color=green>This is better:</font>
#
# </div>
from pandas.io.formats.style import Styler
s4 = Styler(df4, uuid_len=0, cell_ids=False)
# ### 2. Use table styles
#
# Use table styles where possible (e.g. for all cells or rows or columns at a time) since the CSS is nearly always more efficient than other formats.
# <div class="alert alert-warning">
#
# <font color=red>This is sub-optimal:</font>
#
# </div>
props = 'font-family: "Times New Roman", Times, serif; color: #e83e8c; font-size:1.3em;'
df4.style.applymap(lambda x: props, subset=[1])
# <div class="alert alert-info">
#
# <font color=green>This is better:</font>
#
# </div>
df4.style.set_table_styles([{'selector': 'td.col1', 'props': props}])
# ### 3. Set classes instead of using Styler functions
#
# For large DataFrames where the same style is applied to many cells it can be more efficient to declare the styles as classes and then apply those classes to data cells, rather than directly applying styles to cells. It is, however, probably still easier to use the Styler function api when you are not concerned about optimization.
# <div class="alert alert-warning">
#
# <font color=red>This is sub-optimal:</font>
#
# </div>
df2.style.apply(highlight_max, props='color:white;background-color:darkblue;', axis=0)\
.apply(highlight_max, props='color:white;background-color:pink;', axis=1)\
.apply(highlight_max, props='color:white;background-color:purple', axis=None)
# <div class="alert alert-info">
#
# <font color=green>This is better:</font>
#
# </div>
build = lambda x: pd.DataFrame(x, index=df2.index, columns=df2.columns)
cls1 = build(df2.apply(highlight_max, props='cls-1 ', axis=0))
cls2 = build(df2.apply(highlight_max, props='cls-2 ', axis=1, result_type='expand').values)
cls3 = build(highlight_max(df2, props='cls-3 '))
df2.style.set_table_styles([
{'selector': '.cls-1', 'props': 'color:white;background-color:darkblue;'},
{'selector': '.cls-2', 'props': 'color:white;background-color:pink;'},
{'selector': '.cls-3', 'props': 'color:white;background-color:purple;'}
]).set_td_classes(cls1 + cls2 + cls3)
# ### 4. Don't use tooltips
#
# Tooltips require `cell_ids` to work and they generate extra HTML elements for *every* data cell.
# ### 5. If every byte counts use string replacement
#
# You can remove unnecessary HTML, or shorten the default class names by replacing the default css dict. You can read a little more about CSS [below](#More-About-CSS-and-HTML).
my_css = {
"row_heading": "",
"col_heading": "",
"index_name": "",
"col": "c",
"row": "r",
"col_trim": "",
"row_trim": "",
"level": "l",
"data": "",
"blank": "",
}
html = Styler(df4, uuid_len=0, cell_ids=False)
html.set_table_styles([{'selector': 'td', 'props': props},
{'selector': '.c1', 'props': 'color:green;'},
{'selector': '.l0', 'props': 'color:blue;'}],
css_class_names=my_css)
print(html.to_html())
html
# ## Builtin Styles
# Some styling functions are common enough that we've "built them in" to the `Styler`, so you don't have to write them and apply them yourself. The current list of such functions is:
#
# - [.highlight_null][nullfunc]: for use with identifying missing data.
# - [.highlight_min][minfunc] and [.highlight_max][maxfunc]: for use with identifying extremeties in data.
# - [.highlight_between][betweenfunc] and [.highlight_quantile][quantilefunc]: for use with identifying classes within data.
# - [.background_gradient][bgfunc]: a flexible method for highlighting cells based or their, or other, values on a numeric scale.
# - [.text_gradient][textfunc]: similar method for highlighting text based on their, or other, values on a numeric scale.
# - [.bar][barfunc]: to display mini-charts within cell backgrounds.
#
# The individual documentation on each function often gives more examples of their arguments.
#
# [nullfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_null.rst
# [minfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_min.rst
# [maxfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_max.rst
# [betweenfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_between.rst
# [quantilefunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_quantile.rst
# [bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst
# [textfunc]: ../reference/api/pandas.io.formats.style.Styler.text_gradient.rst
# [barfunc]: ../reference/api/pandas.io.formats.style.Styler.bar.rst
# ### Highlight Null
df2.iloc[0,2] = np.nan
df2.iloc[4,3] = np.nan
df2.loc[:4].style.highlight_null(null_color='yellow')
# ### Highlight Min or Max
df2.loc[:4].style.highlight_max(axis=1, props='color:white; font-weight:bold; background-color:darkblue;')
# ### Highlight Between
# This method accepts ranges as float, or NumPy arrays or Series provided the indexes match.
left = pd.Series([1.0, 0.0, 1.0], index=["A", "B", "D"])
df2.loc[:4].style.highlight_between(left=left, right=1.5, axis=1, props='color:white; background-color:purple;')
# ### Highlight Quantile
# Useful for detecting the highest or lowest percentile values
df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color='yellow')
# ### Background Gradient and Text Gradient
# You can create "heatmaps" with the `background_gradient` and `text_gradient` methods. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap.
# +
import seaborn as sns
cm = sns.light_palette("green", as_cmap=True)
df2.style.background_gradient(cmap=cm)
# -
df2.style.text_gradient(cmap=cm)
# [.background_gradient][bgfunc] and [.text_gradient][textfunc] have a number of keyword arguments to customise the gradients and colors. See the documentation.
#
# [bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst
# [textfunc]: ../reference/api/pandas.io.formats.style.Styler.text_gradient.rst
# ### Set properties
#
# Use `Styler.set_properties` when the style doesn't actually depend on the values. This is just a simple wrapper for `.applymap` where the function returns the same properties for all cells.
df2.loc[:4].style.set_properties(**{'background-color': 'black',
'color': 'lawngreen',
'border-color': 'white'})
# ### Bar charts
# You can include "bar charts" in your DataFrame.
df2.style.bar(subset=['A', 'B'], color='#d65f5f')
# Additional keyword arguments give more control on centering and positioning, and you can pass a list of `[color_negative, color_positive]` to highlight lower and higher values or a matplotlib colormap.
#
# To showcase an example here's how you can change the above with the new `align` option, combined with setting `vmin` and `vmax` limits, the `width` of the figure, and underlying css `props` of cells, leaving space to display the text and the bars. We also use `text_gradient` to color the text the same as the bars using a matplotlib colormap (although in this case the visualization is probably better without this additional effect).
df2.style.format('{:.3f}', na_rep="")\
.bar(align=0, vmin=-2.5, vmax=2.5, cmap="bwr", height=50,
width=60, props="width: 120px; border-right: 1px solid black;")\
.text_gradient(cmap="bwr", vmin=-2.5, vmax=2.5)
# The following example aims to give a highlight of the behavior of the new align options:
# + nbsphinx="hidden"
# Hide the construction of the display chart from the user
import pandas as pd
from IPython.display import HTML
# Test series
test1 = pd.Series([-100,-60,-30,-20], name='All Negative')
test2 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')
test3 = pd.Series([10,20,50,100], name='All Positive')
test4 = pd.Series([100, 103, 101, 102], name='Large Positive')
head = """
<table>
<thead>
<th>Align</th>
<th>All Negative</th>
<th>Both Neg and Pos</th>
<th>All Positive</th>
<th>Large Positive</th>
</thead>
</tbody>
"""
aligns = ['left', 'right', 'zero', 'mid', 'mean', 99]
for align in aligns:
row = "<tr><th>{}</th>".format(align)
for series in [test1,test2,test3, test4]:
s = series.copy()
s.name=''
row += "<td>{}</td>".format(s.to_frame().style.hide_index().bar(align=align,
color=['#d65f5f', '#5fba7d'],
width=100).to_html()) #testn['width']
row += '</tr>'
head += row
head+= """
</tbody>
</table>"""
# -
HTML(head)
# ## Sharing styles
# Say you have a lovely style built up for a DataFrame, and now you want to apply the same style to a second DataFrame. Export the style with `df1.style.export`, and import it on the second DataFrame with `df1.style.set`
style1 = df2.style\
.applymap(style_negative, props='color:red;')\
.applymap(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\
.set_table_styles([{"selector": "th", "props": "color: blue;"}])\
.hide_index()
style1
style2 = df3.style
style2.use(style1.export())
style2
# Notice that you're able to share the styles even though they're data aware. The styles are re-evaluated on the new DataFrame they've been `use`d upon.
# ## Limitations
#
# - DataFrame only (use `Series.to_frame().style`)
# - The index and columns must be unique
# - No large repr, and construction performance isn't great; although we have some [HTML optimizations](#Optimization)
# - You can only style the *values*, not the index or columns (except with `table_styles` above)
# - You can only apply styles, you can't insert new HTML entities
#
# Some of these might be addressed in the future.
# ## Other Fun and Useful Stuff
#
# Here are a few interesting examples.
# ### Widgets
#
# `Styler` interacts pretty well with widgets. If you're viewing this online instead of running the notebook yourself, you're missing out on interactively adjusting the color palette.
from ipywidgets import widgets
@widgets.interact
def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)):
return df2.style.background_gradient(
cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l,
as_cmap=True)
)
# ### Magnify
def magnify():
return [dict(selector="th",
props=[("font-size", "4pt")]),
dict(selector="td",
props=[('padding', "0em 0em")]),
dict(selector="th:hover",
props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",
props=[('max-width', '200px'),
('font-size', '12pt')])
]
# +
np.random.seed(25)
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()
bigdf.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '1pt'})\
.set_caption("Hover to magnify")\
.format(precision=2)\
.set_table_styles(magnify())
# -
# ### Sticky Headers
#
# If you display a large matrix or DataFrame in a notebook, but you want to always see the column and row headers you can use the [.set_sticky][sticky] method which manipulates the table styles CSS.
#
# [sticky]: ../reference/api/pandas.io.formats.style.Styler.set_sticky.rst
bigdf = pd.DataFrame(np.random.randn(16, 100))
bigdf.style.set_sticky(axis="index")
# It is also possible to stick MultiIndexes and even only specific levels.
bigdf.index = pd.MultiIndex.from_product([["A","B"],[0,1],[0,1,2,3]])
bigdf.style.set_sticky(axis="index", pixel_size=18, levels=[1,2])
# ### HTML Escaping
#
# Suppose you have to display HTML within HTML, that can be a bit of pain when the renderer can't distinguish. You can use the `escape` formatting option to handle this, and even use it within a formatter that contains HTML itself.
df4 = pd.DataFrame([['<div></div>', '"&other"', '<span></span>']])
df4.style
df4.style.format(escape="html")
df4.style.format('<a href="https://pandas.pydata.org" target="_blank">{}</a>', escape="html")
# ## Export to Excel
#
# Some support (*since version 0.20.0*) is available for exporting styled `DataFrames` to Excel worksheets using the `OpenPyXL` or `XlsxWriter` engines. CSS2.2 properties handled include:
#
# - `background-color`
# - `color`
# - `font-family`
# - `font-style`
# - `font-weight`
# - `text-align`
# - `text-decoration`
# - `vertical-align`
# - `white-space: nowrap`
#
#
# - Currently broken: `border-style`, `border-width`, `border-color` and their {`top`, `right`, `bottom`, `left` variants}
#
#
# - Only CSS2 named colors and hex colors of the form `#rgb` or `#rrggbb` are currently supported.
# - The following pseudo CSS properties are also available to set excel specific style properties:
# - `number-format`
#
# Table level styles, and data cell CSS-classes are not included in the export to Excel: individual cells must have their properties mapped by the `Styler.apply` and/or `Styler.applymap` methods.
df2.style.\
applymap(style_negative, props='color:red;').\
highlight_max(axis=0).\
to_excel('styled.xlsx', engine='openpyxl')
# A screenshot of the output:
#
# 
#
# ## Export to LaTeX
#
# There is support (*since version 1.3.0*) to export `Styler` to LaTeX. The documentation for the [.to_latex][latex] method gives further detail and numerous examples.
#
# [latex]: ../reference/api/pandas.io.formats.style.Styler.to_latex.rst
# ## More About CSS and HTML
#
# Cascading Style Sheet (CSS) language, which is designed to influence how a browser renders HTML elements, has its own peculiarities. It never reports errors: it just silently ignores them and doesn't render your objects how you intend so can sometimes be frustrating. Here is a very brief primer on how ``Styler`` creates HTML and interacts with CSS, with advice on common pitfalls to avoid.
# ### CSS Classes and Ids
#
# The precise structure of the CSS `class` attached to each cell is as follows.
#
# - Cells with Index and Column names include `index_name` and `level<k>` where `k` is its level in a MultiIndex
# - Index label cells include
# + `row_heading`
# + `level<k>` where `k` is the level in a MultiIndex
# + `row<m>` where `m` is the numeric position of the row
# - Column label cells include
# + `col_heading`
# + `level<k>` where `k` is the level in a MultiIndex
# + `col<n>` where `n` is the numeric position of the column
# - Data cells include
# + `data`
# + `row<m>`, where `m` is the numeric position of the cell.
# + `col<n>`, where `n` is the numeric position of the cell.
# - Blank cells include `blank`
# - Trimmed cells include `col_trim` or `row_trim`
#
# The structure of the `id` is `T_uuid_level<k>_row<m>_col<n>` where `level<k>` is used only on headings, and headings will only have either `row<m>` or `col<n>` whichever is needed. By default we've also prepended each row/column identifier with a UUID unique to each DataFrame so that the style from one doesn't collide with the styling from another within the same notebook or page. You can read more about the use of UUIDs in [Optimization](#Optimization).
#
# We can see example of the HTML by calling the [.to_html()][tohtml] method.
#
# [tohtml]: ../reference/api/pandas.io.formats.style.Styler.to_html.rst
print(pd.DataFrame([[1,2],[3,4]], index=['i1', 'i2'], columns=['c1', 'c2']).style.to_html())
# ### CSS Hierarchies
#
# The examples have shown that when CSS styles overlap, the one that comes last in the HTML render, takes precedence. So the following yield different results:
df4 = pd.DataFrame([['text']])
df4.style.applymap(lambda x: 'color:green;')\
.applymap(lambda x: 'color:red;')
df4.style.applymap(lambda x: 'color:red;')\
.applymap(lambda x: 'color:green;')
# This is only true for CSS rules that are equivalent in hierarchy, or importance. You can read more about [CSS specificity here](https://www.w3schools.com/css/css_specificity.asp) but for our purposes it suffices to summarize the key points:
#
# A CSS importance score for each HTML element is derived by starting at zero and adding:
#
# - 1000 for an inline style attribute
# - 100 for each ID
# - 10 for each attribute, class or pseudo-class
# - 1 for each element name or pseudo-element
#
# Let's use this to describe the action of the following configurations
df4.style.set_uuid('a_')\
.set_table_styles([{'selector': 'td', 'props': 'color:red;'}])\
.applymap(lambda x: 'color:green;')
# This text is red because the generated selector `#T_a_ td` is worth 101 (ID plus element), whereas `#T_a_row0_col0` is only worth 100 (ID), so is considered inferior even though in the HTML it comes after the previous.
df4.style.set_uuid('b_')\
.set_table_styles([{'selector': 'td', 'props': 'color:red;'},
{'selector': '.cls-1', 'props': 'color:blue;'}])\
.applymap(lambda x: 'color:green;')\
.set_td_classes(pd.DataFrame([['cls-1']]))
# In the above case the text is blue because the selector `#T_b_ .cls-1` is worth 110 (ID plus class), which takes precendence.
df4.style.set_uuid('c_')\
.set_table_styles([{'selector': 'td', 'props': 'color:red;'},
{'selector': '.cls-1', 'props': 'color:blue;'},
{'selector': 'td.data', 'props': 'color:yellow;'}])\
.applymap(lambda x: 'color:green;')\
.set_td_classes(pd.DataFrame([['cls-1']]))
# Now we have created another table style this time the selector `T_c_ td.data` (ID plus element plus class) gets bumped up to 111.
#
# If your style fails to be applied, and its really frustrating, try the `!important` trump card.
df4.style.set_uuid('d_')\
.set_table_styles([{'selector': 'td', 'props': 'color:red;'},
{'selector': '.cls-1', 'props': 'color:blue;'},
{'selector': 'td.data', 'props': 'color:yellow;'}])\
.applymap(lambda x: 'color:green !important;')\
.set_td_classes(pd.DataFrame([['cls-1']]))
# Finally got that green text after all!
# ## Extensibility
#
# The core of pandas is, and will remain, its "high-performance, easy-to-use data structures".
# With that in mind, we hope that `DataFrame.style` accomplishes two goals
#
# - Provide an API that is pleasing to use interactively and is "good enough" for many tasks
# - Provide the foundations for dedicated libraries to build on
#
# If you build a great library on top of this, let us know and we'll [link](https://pandas.pydata.org/pandas-docs/stable/ecosystem.html) to it.
#
# ### Subclassing
#
# If the default template doesn't quite suit your needs, you can subclass Styler and extend or override the template.
# We'll show an example of extending the default template to insert a custom header before each table.
from jinja2 import Environment, ChoiceLoader, FileSystemLoader
from IPython.display import HTML
from pandas.io.formats.style import Styler
# We'll use the following template:
with open("templates/myhtml.tpl") as f:
print(f.read())
# Now that we've created a template, we need to set up a subclass of ``Styler`` that
# knows about it.
class MyStyler(Styler):
env = Environment(
loader=ChoiceLoader([
FileSystemLoader("templates"), # contains ours
Styler.loader, # the default
])
)
template_html_table = env.get_template("myhtml.tpl")
# Notice that we include the original loader in our environment's loader.
# That's because we extend the original template, so the Jinja environment needs
# to be able to find it.
#
# Now we can use that custom styler. It's `__init__` takes a DataFrame.
MyStyler(df3)
# Our custom template accepts a `table_title` keyword. We can provide the value in the `.to_html` method.
HTML(MyStyler(df3).to_html(table_title="Extending Example"))
# For convenience, we provide the `Styler.from_custom_template` method that does the same as the custom subclass.
EasyStyler = Styler.from_custom_template("templates", "myhtml.tpl")
HTML(EasyStyler(df3).to_html(table_title="Another Title"))
# #### Template Structure
#
# Here's the template structure for the both the style generation template and the table generation template:
# Style template:
# + nbsphinx="hidden"
with open("templates/html_style_structure.html") as f:
style_structure = f.read()
# -
HTML(style_structure)
# Table template:
# + nbsphinx="hidden"
with open("templates/html_table_structure.html") as f:
table_structure = f.read()
# -
HTML(table_structure)
# See the template in the [GitHub repo](https://github.com/pandas-dev/pandas) for more details.
# + nbsphinx="hidden"
# # Hack to get the same style in the notebook as the
# # main site. This is hidden in the docs.
# from IPython.display import HTML
# with open("themes/nature_with_gtoc/static/nature.css_t") as f:
# css = f.read()
# HTML('<style>{}</style>'.format(css))
| doc/source/user_guide/style.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# # Transfer learning sample
# * Train model
# * Build Image
# * Create IoT Edge deployment JSON
# * Deploy Model
# 
# For prod
# !source activate py36 && pip install azureml-core azureml-contrib-iot azure-mgmt-containerregistry azure-cli
# !source activate py36 && az extension add --name azure-cli-iot-ext
import os
print(os.__file__)
# +
# Check core SDK version number
import azureml.core as azcore
print("SDK version:", azcore.VERSION)
# -
# ### Create a Workspace
# #### Change this cell from markdown to code and run this if you need to create a workspace
# #### Update the values for your workspace below
# ws=Workspace.create(subscription_id="replace-with-subscription-id",
# resource_group="your-resource-group",
# name="your-workspace-name",
# location="eastus2")
#
# ws.write_config()
# +
#Initialize Workspace
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
# -
# ### Create Experiment
# Experiment is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
# +
experiment_name = 'soda_cans'
from azureml.core import Experiment
exp = Experiment(workspace = ws, name = experiment_name)
# -
# ## Get data
# ### Option 1: Upload data files into datastore
# Every workspace comes with a default datastore (and you can register more) which is backed by the Azure blob storage account associated with the workspace. We can use it to transfer data from local to the cloud, and access it from the compute target.
# get the default datastore
ds = ws.get_default_datastore()
print(ds.name, ds.datastore_type, ds.account_name, ds.container_name)
data_path = experiment_name + '_training_data'
ds.upload(src_dir='data/soda_cans', target_path=data_path, overwrite=True)
# ### Option 2: Use existing datastore in Azure blob storage
from azureml.core.datastore import Datastore
ds = Datastore.register_azure_blob_container(workspace=ws,
datastore_name='xxx',
container_name='xxx',
account_name='xxxx',
account_key='xxx',
create_if_not_exists=False)
data_path = "soda_cans_training_data" # This is the path to the folder in the blob container. Set this to None to get all the contents.
print(ds.name, ds.datastore_type, ds.account_name, ds.container_name)
# ### Configure for using ACI
# Linux-based ACI is available in West US, East US, West Europe, North Europe, West US 2, Southeast Asia, Australia East, East US 2, and Central US regions. See details [here](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-quotas#region-availability)
from azureml.core.runconfig import DataReferenceConfiguration
dr = DataReferenceConfiguration(datastore_name=ds.name,
path_on_datastore=data_path,
mode='download', # download files from datastore to compute target
overwrite=True)
# Set the system to build a conda environment based on the run configuration. Once the environment is built, and if you don't change your dependencies, it will be reused in subsequent runs.
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your cluster
cluster_name = "cpucluster1"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target.')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_D3', max_nodes=2)
# create the cluster
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# Use the 'status' property to get a detailed status for the current AmlCompute.
print(compute_target.status.serialize())
# +
from azureml.core.runconfig import RunConfiguration, DEFAULT_CPU_IMAGE
from azureml.core.conda_dependencies import CondaDependencies
# create a new runconfig object
run_config = RunConfiguration(framework = "python")
# Set compute target
run_config.target = compute_target.name
# set the data reference of the run configuration
run_config.data_references = {ds.name: dr}
# enable Docker
run_config.environment.docker.enabled = True
# set Docker base image to the default CPU-based image
run_config.environment.docker.base_image = DEFAULT_CPU_IMAGE
# use conda_dependencies.yml to create a conda environment in the Docker image for execution
run_config.environment.python.user_managed_dependencies = False
# auto-prepare the Docker image when used for execution (if it is not already prepared)
run_config.auto_prepare_environment = True
# specify CondaDependencies obj
run_config.environment.python.conda_dependencies = CondaDependencies.create(conda_packages=['tensorflow==1.8.0'])
# -
# ### Submit the Experiment
# Submit script to run in the Docker image in the remote VM. If you run this for the first time, the system will download the base image, layer in packages specified in the conda_dependencies.yml file on top of the base image, create a container and then execute the script in the container.
# +
from azureml.core import Run
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory = './02-mobilenet-transfer-learning-scripts', script = 'retrain.py', run_config = run_config,
# pass the datastore reference as a parameter to the training script
arguments=['--image_dir', str(ds.as_download()),
'--architecture', 'mobilenet_1.0_224',
'--output_graph', 'outputs/retrained_graph.pb',
'--output_labels', 'outputs/output_labels.txt',
'--model_download_url', 'https://raw.githubusercontent.com/rakelkar/models/master/model_output/',
'--model_file_name', 'imagenet_2_frozen.pb'
])
run = exp.submit(config=src)
# -
run
run.wait_for_completion(show_output=True)
# ## Get the trained model
trained_model_path = "models/mobilenet-orig"
# Download the retrained model and the labels locally
run.download_file(name = 'outputs/retrained_graph.pb', output_file_path = trained_model_path)
run.download_file(name = 'outputs/output_labels.txt', output_file_path = trained_model_path)
# ### VAM config file
# This step uses the trained model from your local folder in the Notebooks shell.
# There are three files (i) the __model_name.pb__ file, (ii) the __lables_names.txt__ and (iii) __va-snpe-engine-library_config.json__, in this folder.
#
# This va-snpe-engine-library_config file is used by the camera when loading the model into the inference engine.
#
# __key fields are:__
# 1. Engine: This is the network used by the model
# * 0: MobileNet
# * 1: MobileNet-SSD
# * 2: SqueezeNet
# 2. NetworkIO:
# * 0: CPU (default)
# * 1: DSP
# 3. Runtime: this is the HW option to use for inferencing
# * 0: CPU
# * 1: DSP
# * 2: GPU
# ConfThreshold: This is the threshold for when the bounding boxes are shown or inferencing results are shown on screen.
vam_config_file = trained_model_path + "/va-snpe-engine-library_config.json"
# %%writefile $vam_config_file
{
"Engine":0,
"NetworkIO":1,
"ScaleWidth":224,
"ScaleHeight":224,
"PixelNorm":127.5,
"BlueMean":104,
"GreenMean":117,
"RedMean":123,
"TargetFPS":30,
"ConfThreshold":0.0,
"DLC_NAME":"model.dlc",
"LABELS_NAME":"output_labels.txt",
"InputLayers":"input:0",
"OutputLayers":["final_result"],
"ResultLayers":["final_result:0"],
"Runtime":1
}
# +
from azureml.core.model import Model
model = Model.register(model_path = trained_model_path,
model_name = "soda_cans",
tags = {"data": "Imagenet", "model": "object_detection", "type": "imagenet"},
description = "Retrained soda cans based on MobileNet",
workspace = ws)
# +
from azureml.contrib.iot.model_converters import SnpeConverter
# submit a compile request
compile_request = SnpeConverter.convert_tf_model(
ws,
source_model=model,
input_node="input",
input_dims="1,224,224,3",
outputs_nodes = ["final_result"],
allow_unconsumed_nodes = True)
print(compile_request._operation_id)
# -
# wait for the request to complete
compile_request.wait_for_completion(show_output=True)
# Get converted model
converted_model = compile_request.result
print(converted_model.name, converted_model.url, converted_model.version, converted_model.id, converted_model.created_time)
from azureml.core.image import Image
from azureml.contrib.iot import IotContainerImage
print ('We will create an image for you now ...')
image_config = IotContainerImage.image_configuration(
architecture="arm32v7",
execution_script="main.py",
dependencies=["camera.py","iot.py","ipcprovider.py","utility.py", "frame_iterators.py"],
docker_file="Dockerfile",
tags = ["mobilenet"],
description = "MobileNet model retrained soda cans")
image = Image.create(name = "mobilenetsoda",
# this is the model object
models = [converted_model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
# #### Setup the details for the IoT Hub.
# You can use the configuration from an existing IoT Hub if you have one setup.
# +
# Parameter list
# Pick a name for what you want to call the module you deploy to the camera
module_name = "visionsample"
# Resource group in Azure
resource_group_name= ws.resource_group
iot_rg="vaidk_"+resource_group_name # or use the existing RG
# Azure region where your services will be provisioned
iot_location="eastus2" # or use the existing location
# Azure IoT Hub name
iot_hub_name="iothub-"+ ws.get_details()["name"] # or use the name of an existing IoT Hub
# Pick a name for your camera
iot_device_id="vadik_"+ ws.get_details()["name"] # or use the existing device ID from IoT Hub configuration
# Pick a name for the deployment configuration
iot_deployment_id="demovaidk" #
# -
# Getting your container details
container_reg = ws.get_details()["containerRegistry"]
reg_name=container_reg.split("/")[-1]
container_url = "\"" + image.image_location + "\","
subscription_id = ws.subscription_id
print('{}'.format(image.image_location))
print('{}'.format(reg_name))
print('{}'.format(subscription_id))
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.mgmt import containerregistry
client = ContainerRegistryManagementClient(ws._auth,subscription_id)
result= client.registries.list_credentials(resource_group_name, reg_name, custom_headers=None, raw=False)
username = result.username
password = result.passwords[0].value
# ### Deployment file
# This is the deployment.json file that you will use to deploy your model. Please see the other sample notebooks on using this file to deploy the new model you created.
file = open('./deployment-template.json')
contents = file.read()
contents = contents.replace('__MODULE_NAME', module_name)
contents = contents.replace('__REGISTRY_NAME', reg_name)
contents = contents.replace('__REGISTRY_USER_NAME', username)
contents = contents.replace('__REGISTRY_PASSWORD', password)
contents = contents.replace('__REGISTRY_IMAGE_LOCATION', image.image_location)
with open('./deployment.json', 'wt', encoding='utf-8') as output_file:
output_file.write(contents)
# ### Option 1: Push this updated deployment manifest to an existing IoT Hub
# !az login
# !az account set --subscription subscription_id
print("Pushing deployment to IoT Edge device")
print ("Set the deployment")
# !az iot edge set-modules --device-id $iot_device_id --hub-name $iot_hub_name --content deployment.json
# ### Option 2: Create a new Azure IoT Hub
# __SKIP__ if you have already pushed to an existing IoT Hub
# +
print ( 'We will create your HUB now')
with open ('setsub','w+') as command1:
command1.write('az account set --subscription'+subscription_id )
# !sh setsub
with open ('create','w+') as command2:
regcommand="\n echo Installing Extension ... \naz extension add --name azure-cli-iot-ext \n"+ "\n echo CREATING RG "+iot_rg+"... \naz group create --name "+ iot_rg +" --location "+ iot_location+ "\n" +"\n echo CREATING HUB "+iot_hub_name+"... \naz iot hub create --name "+ iot_hub_name + " --resource-group "+ iot_rg +" --sku S1"
command2.write(regcommand +"\n echo CREATING DEVICE ID "+iot_device_id+"... \n az iot hub device-identity create --device-id "+ iot_device_id + " --hub-name " + iot_hub_name +" --edge-enabled")
# !sh create
with open ('deploy','w+')as command3:
createcommand="\n echo DEPLOYING "+iot_deployment_id+" ... \naz iot edge deployment create --deployment-id \"" + iot_deployment_id + "\" --content \"deployment.json\" --hub-name \"" + iot_hub_name +"\" --target-condition \"deviceId='"+iot_device_id+"'\" --priority 1"
command3.write(createcommand)
# !sh deploy
with open ('details','w+')as command4:
get_string="\n echo THIS IS YOUR CONNECTION STRING ... \naz iot hub device-identity show-connection-string --device-id \"" + iot_device_id + "\" --hub-name \"" + iot_hub_name+"\""
command4.write(get_string)
print("COPY THIS CONNECTION STRING FOR YOUR DEVICE")
# !sh details
# -
# %%writefile ./setsub
az account set --subscription
iot_sub=subscription_id
# %store iot_sub >> setsub
# !sh setsub
print ('{}'.format(iot_sub))
#RG and location to create hub
# iot_rg="vaidk_"+resource_group_name
# iot_location=ws.get_details()["location"]
#temp to delete
# iot_location="eastus2"
# iot_hub_name="iothub-"+ ws.get_details()["name"]
# iot_device_id="vadik_"+ ws.get_details()["name"]
# iot_deployment_id="demovaidk"
print('{}'.format(iot_hub_name))
# %%writefile ./create
#Command to create hub and device
# Adding Intialization steps
regcommand="\n echo Installing Extension ... \naz extension add --name azure-cli-iot-ext \n"+ "\n echo CREATING RG "+iot_rg+"... \naz group create --name "+ iot_rg +" --location "+ iot_location+ "\n" +"\n echo CREATING HUB "+iot_hub_name+"... \naz iot hub create --name "+ iot_hub_name + " --resource-group "+ iot_rg +" --sku S1"
#print('{}'.format(regcommand))
# %store regcommand >> create
# #### Create identity for your device
#Adding Device ID
create_device="\n echo CREATING DEVICE ID "+iot_device_id+"... \n az iot hub device-identity create --device-id "+ iot_device_id + " --hub-name " + iot_hub_name +" --edge-enabled"
#print('{}'.format(create_device))
# %store create_device >> create
#Create command and vonfigure device
# !sh create
# #### Create Deployment
# %%writefile ./deploy
#Command to create hub and device
#Add deployment command
deploy_device="\n echo DEPLOYING "+iot_deployment_id+" ... \naz iot edge deployment create --deployment-id \"" + iot_deployment_id + "\" --content \"deployment.json\" --hub-name \"" + iot_hub_name +"\" --target-condition \"deviceId='"+iot_device_id+"'\" --priority 1"
#print('{}'.format(deploy_device))
# %store deploy_device >> deploy
#run deployment to stage all work for when the model is ready
# !sh deploy
# #### Use this conenction string on your camera to Initialize it
# %%writefile ./showdetails
#Command to create hub and device
#Add deployment command
get_string="\n echo THIS IS YOUR CONNECTION STRING ... \naz iot hub device-identity show-connection-string --device-id \"" + iot_device_id + "\" --hub-name \"" + iot_hub_name+"\""
#print('{}'.format(get_string))
# %store get_string >> showdetails
# !sh showdetails
# !az account set --subscription 5f08d643-1910-4a38-a7c7-84a39d4f42e0
# !az iot hub show --name hub-peabody
| machine-learning-notebooks/.ipynb_checkpoints/02-mobilenet-transfer-learning-Mod-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
# - The genders of the mice were evenly distributed between female and male mice.
# - Capomulin and Ramicane clinical trials revealed the best overall outcomes towards the reduction of avg tumor volume.
# - In general, linear regression shows that the heavier the mouse was, the greater the likelihood that their tumor size was larger than the average tumor size.
# - The correlation coefficient indicates a positive (negative) linear relationship via a firm linear rule. (http://www.dmstat1.com/res/TheCorrelationCoefficientDefined.html
# ## Dependencies and starter code
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata = "data/Mouse_metadata.csv"
study_results = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata)
study_results = pd.read_csv(study_results)
# Combine the data into a single dataset
pym_merge_data = pd.merge(mouse_metadata, study_results, on=["Mouse ID"])
pym_merge_data.head()
# -
# Remove duplicate mice by ID number that shows up for Mouse ID and Timepoint. (tutor assisted)
duplicate_mouse_ids = pym_merge_data.loc[pym_merge_data.duplicated(subset=['Mouse ID', 'Timepoint']),'Mouse ID'].unique()
duplicate_mouse_ids
# Merged data with duplicates removed (tutor assisted)
pym_merge_data = pym_merge_data[pym_merge_data['Mouse ID'].isin(duplicate_mouse_ids)==False]
pym_merge_data
# ## Summary statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
pym_mean_data = pym_merge_data.groupby(["Drug Regimen"]).mean()["Tumor Volume (mm3)"]
pym_median_data = pym_merge_data.groupby(["Drug Regimen"]).median()["Tumor Volume (mm3)"]
pym_variance_data = pym_merge_data.groupby(["Drug Regimen"]).var()["Tumor Volume (mm3)"]
pym_std_data = pym_merge_data.groupby(["Drug Regimen"]).std()["Tumor Volume (mm3)"]
pym_SEM_data = pym_merge_data.groupby(["Drug Regimen"]).sem()["Tumor Volume (mm3)"]
summary_stats_table = pd.DataFrame({"Mean Tumor Volume": pym_mean_data,
"Median Tumor Volume": pym_median_data,
"Variance Tumor Volume": pym_variance_data,
"Standard Deviation Volume": pym_std_data,
"SEM Volume": pym_SEM_data})
summary_stats_table
# -
# ## Bar plots
# +
# Generate a bar plot showing number of data points for each treatment regimen using pandas
pym_merge_data2 = pym_merge_data.loc[(pym_merge_data['Timepoint']==45)]
# break down the data points using value_counts function
data_points = pym_merge_data['Drug Regimen'].value_counts()
data_points
# -
# after displaying the counts this one is easy using panda default
data_points.plot(kind='bar')
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Data Points")
plt.tight_layout()
plt.show()
# +
# Generate a bar plot showing number of data points for each treatment regimen using pyplot
x_axis = np.arange(len(data_points))
tick_locations = [value for value in x_axis]
plt.xticks(rotation=90)
# Jack (TA) helped with the plt.bar function below
plt.bar(data_points.index.values, data_points)
plt.ylabel('Number of Data Points')
plt.xlabel('Drug Regimen')
plt.tight_layout()
plt.show()
# -
# ## Pie plots
# +
# generate a pie plot showing the distribution of female versus male mice using pandas
# create a groupby variable to count the mice by gender, ID
mice_gender_count = mouse_metadata.groupby("Sex")["Mouse ID"].count()
# reset the index
mice_gender_count = mice_gender_count.reset_index(name="Gender")
# create pie plot using pandas, add %, change the angle and labels/title
mice_gender_pieplot = mice_gender_count.plot.pie(y="Gender", title= "Distribution of female versus male mice",
labels= mice_gender_count["Sex"], startangle= 180, autopct= '%1.1f%%')
plt.axis("equal")
plt.tight_layout()
plt.show()
# +
# generate a pie plot showing the distribution of female versus male mice using pyplot
# use groupby variable above to create pyplot, create labels, legend, title
plt.pie(mice_gender_count["Gender"], labels= mice_gender_count['Sex'], startangle= 180, autopct= '%1.1f%%')
plt.axis('equal')
plt.legend(loc= 'upper right')
plt.title("Distribution of female versus male mice")
plt.ylabel('Gender')
plt.tight_layout()
plt.show()
# -
# ## Quartiles, outliers and boxplots
# tutor helped me create a new function to perform ordered calculations & print value statements
def iqr(drugdf):
# run the calculations
quartiles = drugdf.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
# print f string statements and outputs for above calculations
print(f"The lower quartile of tumor volume is: {lowerq}")
print(f"The upper quartile of tumor volume is: {upperq}")
print(f"The interquartile range of tumor volume is: {iqr}")
print(f"The the median of tumor volume is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens. Calculate the IQR and quantitatively determine if there are any potential outliers.
# create a variable to hold key regimens
four_regimens = ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin']
# locate these specific regimens within the index/ array
most_promising = pym_merge_data.loc[(pym_merge_data['Drug Regimen'].isin(four_regimens))]
# tutor assisted with below in order to write the new function above
pym_merge_data2 = pym_merge_data.groupby(['Mouse ID']).max()
pym_merge_data2 = pym_merge_data2.reset_index()
pym_merge_data2 = pym_merge_data2[['Mouse ID', 'Timepoint']].merge(pym_merge_data, on=['Mouse ID', 'Timepoint'], how="left")
capomulin = pym_merge_data2.loc[pym_merge_data2["Drug Regimen"] == "Capomulin"]['Tumor Volume (mm3)']
ramicane = pym_merge_data2.loc[pym_merge_data2["Drug Regimen"] == "Ramicane"]['Tumor Volume (mm3)']
infubinol = pym_merge_data2.loc[pym_merge_data2["Drug Regimen"] == "Infubinol"]['Tumor Volume (mm3)']
ceftamin = pym_merge_data2.loc[pym_merge_data2["Drug Regimen"] == "Ceftamin"]['Tumor Volume (mm3)']
iqr(capomulin)
iqr(ramicane)
iqr(infubinol)
iqr(ceftamin)
# +
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# I was able to play around with some visualizations but need to work at finding a way to separate the regimens
fig1, ax1 = plt.subplots()
green_diamond = dict(markerfacecolor='g', marker='D')
ax1.boxplot(pym_merge_data['Tumor Volume (mm3)'], four_regimens, flierprops=green_diamond )
ax1.yaxis.grid(True)
ax1.set_title('Final Tumor Volume of Each Mouse')
ax1.set_ylabel('Tumor Volume (mm3)')
plt.tight_layout()
plt.show()
# -
# ## Line and scatter plots
# +
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
capomulin = pym_merge_data.loc[pym_merge_data['Drug Regimen'] == "Capomulin"]
mouse_x401 = capomulin.loc[capomulin['Mouse ID']== 'x401']
### need to fix timepoint to '45'
plt.plot(mouse_x401['Timepoint'], mouse_x401['Tumor Volume (mm3)'])
plt.xlabel('Timepoint')
plt.ylabel('Tumor Volume (mm3)')
plt.title('Capomulin treatment Mouse x401')
plt.show()
# +
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
# locate 'Capomulin' for each row in Drug Regimen column
capomulin_regimen = pym_merge_data.loc[(pym_merge_data['Drug Regimen'] == 'Capomulin')]
# use groupby to focus on each mouse ID, their weight and calculated avg tumor volume
capomulin_regimen = pym_merge_data.groupby(['Mouse ID','Weight (g)'])['Tumor Volume (mm3)'].mean()
# reset the index
capomulin_regimen = pym_merge_data.reset_index(drop=True)
# plot the scatter plot
plt.scatter(capomulin_regimen['Weight (g)'], capomulin_regimen['Tumor Volume (mm3)'])
plt.xlabel("Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title('Tumor Volume vs Weight')
plt.show()
# +
# Calculate the correlation coefficient and linear regression model between mouse weight and average tumor volume for the Capomulin treatment.
weight = capomulin_regimen['Weight (g)']
tumor_volume = capomulin_regimen['Tumor Volume (mm3)']
print(f"The correlation coefficient for the Capomulin regimen is {round(st.pearsonr(weight, tumor_volume)[0],2)}")
capomulin_regimen.corr(method= 'pearson')
# +
# Plot the linear regression model on top of the previous scatter plot.
# Resource: 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html'
# calculate linear regression for slope, intercept, rvalue, pvalue, stderr
(slope, intercept, rvalue, pvalue, stderr) = st.linregress(weight, tumor_volume)
print("slope: %f intercept: %f" % (slope, intercept))
print("R-squared: %f" % rvalue**2)
# Resource: '../Lessons/05-Matplotlib/3/Activities/08-Ins_Fits_and_Regression/Solved/regression.ipynb'
regress_values = weight * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(weight, tumor_volume)
plt.plot(weight, regress_values, "r-")
plt.annotate(line_eq, (6,10), fontsize=15, color="red")
plt.xlabel('Weight (g)')
plt.ylabel('Tumor Volume (mm3)')
plt.title('Tumor Volume vs Weight')
plt.show()
# +
# NEED TO FIX TIMEPOINT TO '45'. Calculations are not correct
| pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Prior distributions
#
# One important aspect of Bayesian inference has not yet been discussed in this tutorial: [prior distributions](https://en.wikipedia.org/wiki/Prior_probability). In Bayesian statistics, one has to provide probability (density) values for every possible parameter value *before* taking into account the data at hand. This prior distribution thus reflects all *prior* knowledge of the system that is to be investigated. In the case that no prior knowledge is available, a *non-informative* prior in the form of the so-called [Jeffreys prior](https://en.wikipedia.org/wiki/Jeffreys_prior) allows to minimize the effect of the prior on the results. The next two sub-sections discuss how one can set custom prior distributions for the parameters of the observation model and for hyper-parameters in a hyper-study or change-point study.
# +
# %matplotlib inline
import matplotlib.pyplot as plt # plotting
import seaborn as sns # nicer plots
sns.set_style('whitegrid') # plot styling
import numpy as np
import bayesloop as bl
# prepare study for coal mining data
S = bl.Study()
S.loadExampleData()
# -
# ## Parameter prior
#
# *bayesloop* employs a forward-backward algorithm that is based on [Hidden Markov models](http://www.cs.sjsu.edu/~stamp/RUA/HMM.pdf). This inference algorithm iteratively produces a parameter distribution for each time step, but it has to start these iterations from a specified probability distribution - the parameter prior. All built-in observation models already have a predefined prior, stored in the attribute `prior`. Here, the prior distribution is stored as a Python function that takes as many arguments as there are parameters in the observation model. The prior distributions can be looked up directly within `observationModels.py`. For the `Poisson` model discussed in this tutorial, the default prior distribution is defined in a method called `jeffreys` as
# ```
# def jeffreys(x):
# return np.sqrt(1. / x)
# ```
# corresponding to the non-informative Jeffreys prior, $p(\lambda) \propto 1/\sqrt{\lambda}$. This type of prior can also be determined automatically for arbitrary user-defined observation models, see [here](customobservationmodels.html#Sympy.stats-random-variables).
#
# ### Prior functions and arrays
#
# To change the predefined prior of a given observation model, one can add the keyword argument `prior` when defining an observation model. There are different ways of defining a parameter prior in *bayesloop*: If `prior=None` is set, *bayesloop* will assign equal probability to all parameter values, resulting in a uniform prior distribution within the specified parameter boundaries. One can also directly supply a Numpy array with prior probability (density) values. The shape of the array must match the shape of the parameter grid! Another way to define a custom prior is to provide a function that takes exactly as many arguments as there are parameters in the defined observation model. *bayesloop* will then evaluate the function for all parameter values and assign the corresponding probability values.
#
# <div style="background-color: #e7f2fa; border-left: 5px solid #6ab0de; padding: 0.5em; margin-top: 1em; margin-bottom: 1em">
# **Note:** In all of the cases described above, *bayesloop* will re-normalize the provided prior values, so they do not need to be passed in a normalized form. Below, we describe the possibility of using probability distributions from the SymPy stats module as prior distributions, which are not re-normalized by *bayesloop*.
# </div>
#
# Next, we illustrate the difference between the Jeffreys prior and a flat, uniform prior with a very simple inference example: We fit the coal mining example data set using the `Poisson` observation model and further assume the rate parameter to be static:
# +
# we assume a static rate parameter for simplicity
S.set(bl.tm.Static())
print 'Fit with built-in Jeffreys prior:'
S.set(bl.om.Poisson('accident_rate', bl.oint(0, 6, 1000)))
S.fit()
jeffreys_mean = S.getParameterMeanValues('accident_rate')[0]
print('-----\n')
print 'Fit with custom flat prior:'
S.set(bl.om.Poisson('accident_rate', bl.oint(0, 6, 1000),
prior=lambda x: 1.))
# alternatives: prior=None, prior=np.ones(1000)
S.fit()
flat_mean = S.getParameterMeanValues('accident_rate')[0]
# -
# First note that the model evidence indeed slightly changes due to the different choices of the parameter prior. Second, one may notice that the posterior mean value of the flat-prior-fit does not exactly match the arithmetic mean of the data. This small deviation shows that a flat/uniform prior is not completely non-informative for a Poisson model! The fit using the Jeffreys prior, however, succeeds in reproducing the *frequentist* estimate, i.e. the arithmetic mean:
print('arithmetic mean = {}'.format(np.mean(S.rawData)))
print('flat-prior mean = {}'.format(flat_mean))
print('Jeffreys prior mean = {}'.format(jeffreys_mean))
# ### SymPy prior
#
# The second option is based on the [SymPy](http://www.sympy.org/en/index.html) module that introduces symbolic mathematics to Python. Its sub-module [sympy.stats](http://docs.sympy.org/dev/modules/stats.html) covers a wide range of discrete and continuous random variables. The keyword argument `prior` also accepts a list of `sympy.stats` random variables, one for each parameter (if there is only one parameter, the list can be omitted). The multiplicative joint probability density of these random variables is then used as the prior distribution. The following example defines an exponential prior for the `Poisson` model, favoring small values of the rate parameter:
import sympy.stats
S.set(bl.om.Poisson('accident_rate', bl.oint(0, 6, 1000),
prior=sympy.stats.Exponential('expon', 1)))
S.fit()
# Note that one needs to assign a name to each `sympy.stats` variable. In this case, the output of *bayesloop* shows the mathematical formula that defines the prior. This is possible because of the symbolic representation of the prior by `SymPy`.
#
# <div style="background-color: #e7f2fa; border-left: 5px solid #6ab0de; padding: 0.5em; margin-top: 1em; margin-bottom: 1em">
# **Note:** The support interval of a prior distribution defined via SymPy can deviate from the parameter interval specified in *bayesloop*. In the example above, we specified the parameter interval ]0, 6[, while the exponential prior has the support ]0, $\infty$[. SymPy priors are not re-normalized with respect to the specified parameter interval. Be aware that the resulting model evidence value will only be correct if no parameter values outside of the parameter boundaries gain significant probability values. In most cases, one can simply check whether the parameter distribution has sufficiently *fallen off* at the parameter boundaries.
# </div>
#
# ## Hyper-parameter priors
#
# As shown before, [hyper-studies](hyperstudy.html) and [change-point studies](changepointstudy.html) can be used to determine the full distribution of hyper-parameters (the parameters of the transition model). As for the time-varying parameters of the observation model, one might have prior knowledge about the values of certain hyper-parameters that can be included into the study to refine the resulting distribution of these hyper-parameters. Hyper-parameter priors can be defined just as regular priors, either by an arbitrary function or by a list of `sympy.stats` random variables.
#
# In a first example, we return to the simple change-point model of the coal-mining data set and perform to fits of the change-point: first, we specify no hyper-prior for the time step of our change-point, assuming equal probability for each year in our data set. Second, we define a Normal distribution around the year 1920 with a (rather unrealistic) standard deviation of 5 years as the hyper-prior using a SymPy random variable. For both fits, we plot the change-point distribution to show the differences induced by the different priors:
# +
print 'Fit with flat hyper-prior:'
S = bl.ChangepointStudy()
S.loadExampleData()
L = bl.om.Poisson('accident_rate', bl.oint(0, 6, 1000))
T = bl.tm.ChangePoint('tChange', 'all')
S.set(L, T)
S.fit()
plt.figure(figsize=(8,4))
S.plot('tChange', facecolor='g', alpha=0.7)
plt.xlim([1870, 1930])
plt.show()
print('-----\n')
print 'Fit with custom normal prior:'
T = bl.tm.ChangePoint('tChange', 'all', prior=sympy.stats.Normal('norm', 1920, 5))
S.set(T)
S.fit()
plt.figure(figsize=(8,4))
S.plot('tChange', facecolor='g', alpha=0.7)
plt.xlim([1870, 1930]);
# -
# Since we used a quite narrow prior (containing a lot of information) in the second case, the resulting distribution is strongly shifted towards the prior. The following example revisits the two break-point-model from [here](changepointstudy.html#Analyzing-structural-breaks-in-time-series-models) and a linear decrease with a varying slope as a hyper-parameter. Here, we define a Gaussian prior for the slope hyper-parameter, which is centered around the value -0.2 with a standard deviation of 0.4, via a lambda-function. For simplification, we set the break-points to fixed years.
# +
S = bl.HyperStudy()
S.loadExampleData()
L = bl.om.Poisson('accident_rate', bl.oint(0, 6, 1000))
T = bl.tm.SerialTransitionModel(bl.tm.Static(),
bl.tm.BreakPoint('t_1', 1880),
bl.tm.Deterministic(lambda t, slope=np.linspace(-2.0, 0.0, 30): t*slope,
target='accident_rate',
prior=lambda slope: np.exp(-0.5*((slope + 0.2)/(2*0.4))**2)/0.4),
bl.tm.BreakPoint('t_2', 1900),
bl.tm.Static()
)
S.set(L, T)
S.fit()
# -
# Finally, note that you can mix SymPy- and function-based hyper-priors for nested transition models.
| docs/source/tutorials/priordistributions.ipynb |
# ---
# layout : jupyter
# title : datetime object 관련
# category : Code Snippet
# tags : python
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
| _ipynb/2021-03-12-datetime.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dataset of CAT usage across time
#
# The phab task for measuring CAT usage asks to have a dashboard. We might as well populate that dashboard with historic data. To do so, we grab a dataset of all CAT revisions (using the edit tags) and grab data points weekly on Mondays from the first edit through the current time. This dataset will then be augmented with weekly updates from the dashboard code.
# +
import os
import json
import datetime as dt
import tabulate
import numpy as np
import pandas as pd
from wmfdata import hive, mariadb
# +
## We store one long dataset, where each weekly snapshot has four rows
dataset_filename = 'datasets/weekly-CAT-measurements.tsv'
# -
# ## Dataset
#
# Query:
# +
## NOTE: there are some anonmyous CAT edits, so we coalesce actor_user with 0 to make
## it easy to filter those out later.
cat_revisions_query = '''
SELECT cat_edits.rev_id, actor_user, rev_timestamp, rev_page,
IF(mobile_edits.rev_id IS NOT NULL, 1, 0) AS mobile_edit,
IF(android_edits.rev_id IS NOT NULL, 1, 0) AS android_edit
FROM (
SELECT rv.rev_id, coalesce(ac.actor_user, 0) AS actor_user, rev_timestamp, rev_page
FROM revision rv
INNER JOIN change_tag ct
ON rev_id = ct_rev_id
INNER JOIN change_tag_def ctd
ON ct_tag_id = ctd_id
INNER JOIN revision_actor_temp rat
ON rv.rev_id = rat.revactor_rev
INNER JOIN actor ac
ON rat.revactor_actor = ac.actor_id
WHERE ctd_name IN ("computer-aided-tagging", "computer-aided-tagging-manual")
) AS cat_edits
LEFT JOIN (
SELECT ct_rev_id AS rev_id
FROM change_tag
JOIN change_tag_def
ON ct_tag_id = ctd_id
WHERE ctd_name = "mobile edit"
) AS mobile_edits
ON cat_edits.rev_id = mobile_edits.rev_id
LEFT JOIN (
SELECT ct_rev_id AS rev_id
FROM change_tag
JOIN change_tag_def
ON ct_tag_id = ctd_id
WHERE ctd_name = "android app edit"
) AS android_edits
ON cat_edits.rev_id = android_edits.rev_id
'''
# -
# Grab data:
cat_revisions = mariadb.run(cat_revisions_query, 'commonswiki')
cat_revisions.head()
cat_revisions.loc[cat_revisions['actor_user'] == 0].tail()
cat_revisions.loc[cat_revisions['actor_user'] == 0].count()
100 * cat_revisions.loc[cat_revisions['actor_user'] == 0].count() / len(cat_revisions)
cat_revisions['rev_ts'] = pd.to_datetime(cat_revisions['rev_timestamp'], format='%Y%m%d%H%M%S')
cat_revisions['actor_user'] = cat_revisions['actor_user'].astype(int)
# We remove all anonymous edits. Currently we do not know what causes them, and they are few.
cat_revisions = cat_revisions.loc[cat_revisions['actor_user'] > 0]
# # Mobile edits
#
# Out of curiosity, how many mobile edits do we have?
len(cat_revisions.loc[cat_revisions['mobile_edit'] == 1])
# ## Identify first date in the dataset
first_edit = cat_revisions['rev_ts'].min()
# Identify the first Monday _after_ that first date.
first_monday = first_edit.date() + dt.timedelta(days = (0 - first_edit.weekday()) % 7)
# ## Iterate and calculate
#
# Iterate weekly starting from `first_monday` until we're past today's date.
today = dt.date.today()
def count_cat_contribs(df):
'''
For the slice `df` of a larger DataFrame with revisions, count the number
of files and contributors, overall and split by desktop, mobile, and Android.
Returns a `pandas.DataFrame` with four rows and three columns: platform,
number of contributors, number of files.
'''
platforms = ['all', 'desktop', 'mobile', 'android']
n_contributors = [df['actor_user'].nunique()]
n_files = [df['rev_page'].nunique()]
## Desktop edits are non-mobile edits (and Android edits are mobile edits by default)
n_contributors.append(
df.loc[df['mobile_edit'] == 0]['actor_user'].nunique()
)
n_files.append(
df.loc[df['mobile_edit'] == 0]['rev_page'].nunique()
)
## Mobile edits are non-Android edits
n_contributors.append(
df.loc[(df['mobile_edit'] == 1) & (df['android_edit'] == 0)]['actor_user'].nunique()
)
n_files.append(
df.loc[(df['mobile_edit'] == 1) & (df['android_edit'] == 0)]['rev_page'].nunique()
)
## Android edits are Android edits
n_contributors.append(
df.loc[df['android_edit'] == 1]['actor_user'].nunique()
)
n_files.append(
df.loc[df['android_edit'] == 1]['rev_page'].nunique()
)
return(pd.DataFrame({
'platform' : platforms,
'n_contributors' : n_contributors,
'n_files' : n_files
}))
# +
cur_date = first_monday
results = list()
while cur_date < today:
cur_ts = dt.datetime.combine(cur_date, dt.time(0, 0, 0))
summary_df = count_cat_contribs(cat_revisions.loc[cat_revisions['rev_ts'] < cur_ts])
summary_df['snapshot_timestamp'] = cur_ts
results.append(summary_df)
## advance one week
cur_date += dt.timedelta(days = 7)
# -
cat_summary = pd.concat(results)
cat_summary
# ## Write out datasets
#
# We add the `snapshot_method` column and set it to `summary` for data added using our "summary statistics" approach. For fresh data gathered weekly, it will be `live` instead, as it'll reflect the status of the live replica at that point in time. This allows us to separate between data points that are based on estimates of available data ("summary", where edits to deleted files aren't counted) and those that reflect a snapshot of the database when the measurements were gathered ("live").
cat_summary['snapshot_method'] = 'summary'
# +
output_columns = ['snapshot_timestamp', 'snapshot_method', 'platform', 'n_contributors', 'n_files']
cat_summary.to_csv(dataset_filename, columns = output_columns,
header = True, index = False, sep = '\t')
| CAT-usage-historic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/projects/theory/RNN_working_memory.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <a href="https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/projects/theory/RNN_working_memory.ipynb" target="_parent"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open in Kaggle"/></a>
# -
import numpy as np
import math
import matplotlib.pyplot as plt
# +
# Set some properties of the model. We'll store these in a dict so they're
# easier to pass around or save.
model = {}
# properties of the recurrent pool:
model['N'] = 1000 # number of neurons
model['g'] = 0.95 # gain of synaptic weights in pool
model['sp'] = 0.25 # fraction of weights that are nonzero
model['tau'] = 20 # neural membrane time constant in ms
model['dt'] = 0.1 # simulation timestep in ms
model['nonlin'] = lambda x: np.tanh(x) # firing rate nonlinearity for pool units
# properties of the input layer:
# a note: we're going to encode the "value" of the input by the identity of the
# active input layer units. We'll use one-hot encoding: for each input step
# during simulation, one unit will be activated with "firing rate" 1, and the
# rest will be set to firing rate 0 (adjust gIn to change the scaling of input
# to the recurrent pool.)
# Note 1: This is just one way of setting up input- are there other approaches
# that would improve memory capacity?
# Note 2: Burn-in time is especially important if your model has g>1, in which
# case neurons will be spontaneously active.
model['nIn'] = 20 # size of the input layer
model['gIn'] = 10.0 # gain of the input weights
model['spIn'] = 0.05 # sparsity of input->pool connectivity
model['burnIn'] = 10 # time before input starts
model['durIn'] = 1 # time for which an input is active in ms
model['ISI'] = 0 # time between inputs in ms
model['nonlinIn'] = lambda x: x # best to keep the input linear
# +
# Create the synaptic weight matrix.
# Normalizing weights by sqrt(N*sparsity) keeps the eigenvalue spectrum
# invariant to the size of the population N.
randMat = np.random.normal(0, 1, size=(model['N'], model['N']))
spMat = np.random.uniform(0, 1, size=(model['N'], model['N'])) <= model['sp']
model['J'] = np.multiply(randMat, spMat) * model['g'] / math.sqrt(model['N'] * model['sp'])
# Create the input weight matrix.
randMatIn = np.random.normal(0, 1, size=(model['N'], model['nIn']))
spMatIn = np.random.uniform(0, 1, size=(model['N'], model['nIn'])) <= model['spIn']
model['Jin'] = np.multiply(randMatIn, spMatIn) * model['gIn'] / math.sqrt(model['nIn'] * model['spIn'])
# +
# Define a couple helper functions for simulation.
def step(firing_rates, input_layer, model):
# The simulation function. We use Euler's method to simulate the evolution of
# model neuron firing rates given the input_layer firing rates.
timestep = math.exp(-model['dt']/model['tau'])
vIn = np.matmul(model['J'], firing_rates) \
+ np.matmul(model['Jin'], model['nonlinIn'](input_layer))
updated_rates = model['nonlin'](vIn + (firing_rates - vIn) * timestep)
return updated_rates
def make_input(sequence_length, model):
# Generates a sequence of inputs according to the parameters in model. Returns
# the sequence both as a one-hot encoding and as a sequence of integer values.
input_stream = [0] * int(model['burnIn']/model['dt'])
for i in range(sequence_length):
val = np.random.randint(0, model['nIn']) + 1
for t in range(int(model['ISI']/model['dt'])):
input_stream.append(0.0)
for t in range(int(model['durIn']/model['dt'])):
input_stream.append(val)
input_stream = np.array(input_stream)
onehot = np.zeros((model['nIn'] + 1, input_stream.size))
onehot[input_stream, np.arange(input_stream.size)] = 1.0
onehot = onehot[1:, :]
return onehot, input_stream
# +
# Look at an example input stream.
onehot, stream = make_input(50, model)
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
omit = int(model['burnIn']/model['dt']) # don't plot the burn-in period
ax[0].plot(np.arange(len(stream) - omit) * model['dt'], stream[omit:])
ax[0].set_xlabel('time (ms)')
ax[0].set_ylabel('input value')
ax[1].imshow(onehot[:, omit:], aspect='auto')
ax[1].set_xlabel('time (ms)')
ax[1].set_ylabel('input one-hot encoding')
fig.show()
# +
# Take a look at the eigenvalue spectrum of J.
w, v = np.linalg.eig(model['J'])
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
showCount = 50 # portion of J to actually show (for readability)
h = ax[0].imshow(model['J'][:showCount,:showCount])
ax[0].set_title('Sample from weight matrix J')
ax[0].set_xlabel('presynaptic neuron')
ax[0].set_ylabel('postsynaptic neuron')
plt.colorbar(h, ax=ax[0])
ax[1].plot(np.real(w),np.imag(w),'.')
ax[1].plot(np.sin(np.linspace(0,2*math.pi,100)),
np.cos(np.linspace(0,2*math.pi,100))) # circle with radius 1
ax[1].set_title('Eigenvalue spectrum of J')
ax[1].set_xlabel('real component')
ax[1].set_ylabel('imaginary component')
fig.show()
# +
# Simulate the model activity.
# generate the input to the model
onehot, input_stream = make_input(10, model)
# initialize the firing rates randomly
firing_rates = np.zeros((model['N'], len(input_stream)))
firing_rates[:, 0] = np.random.uniform(0, 0.1, size=(model['N']))
for t in range(len(input_stream)-1):
firing_rates[:,t+1] = step(firing_rates[:,t], onehot[:,t], model)
fig, ax = plt.subplots(2, 1, figsize=(8, 12))
simulation_time = np.arange(len(input_stream))*model['dt'] - model['burnIn']
ax[0].plot(simulation_time, input_stream)
ax[0].set_xlabel('Time (ms)')
ax[0].set_ylabel('Input value')
extents = [simulation_time[0],simulation_time[-1], 0, model['N']]
ax[1].imshow(firing_rates, aspect='auto', extent=extents)
ax[1].set_xlabel('Time (ms)')
ax[1].set_ylabel('Neurons')
fig.show()
# -
# Now: can you decode the model's input history from its firing rates?
| projects/theory/RNN_working_memory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Configurations for Colab
# +
import sys
IN_COLAB = "google.colab" in sys.modules
if IN_COLAB:
# !apt install python-opengl
# !apt install ffmpeg
# !apt install xvfb
# !pip install pyvirtualdisplay
# !pip install gym[all]
from pyvirtualdisplay import Display
# Start virtual display
dis = Display(visible=0, size=(400, 400))
dis.start()
# -
# # 07. N-Step Learning
#
# [<NAME>, "Learning to predict by the methods of temporal differences." Machine learning, 3(1):9–44, 1988.](http://incompleteideas.net/papers/sutton-88-with-erratum.pdf)
#
# Q-learning accumulates a single reward and then uses the greedy action at the next step to bootstrap. Alternatively, forward-view multi-step targets can be used (Sutton 1988). We call it Truncated N-Step Return
# from a given state $S_t$. It is defined as,
#
# $$
# R^{(n)}_t = \sum_{k=0}^{n-1} \gamma_t^{(k)} R_{t+k+1}.
# $$
#
# A multi-step variant of DQN is then defined by minimizing the alternative loss,
#
# $$
# (R^{(n)}_t + \gamma^{(n)}_t \max_{a'} q_{\theta}^{-}
# (S_{t+n}, a')
# - q_{\theta}(S_t, A_t))^2.
# $$
#
# Multi-step targets with suitably tuned $n$ often lead to faster learning (Sutton and Barto 1998).
# +
import os
from collections import deque
from typing import Deque, Dict, List, Tuple
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from IPython.display import clear_output
# -
# ## Replay buffer for N-step learning
#
# There are a little bit changes in Replay buffer for N-step learning. First, we use `deque` to store the most recent n-step transitions.
#
# ```python
# self.n_step_buffer = deque(maxlen=n_step)
# ```
#
# You can see it doesn't actually store a transition in the buffer, unless `n_step_buffer` is full.
#
# ```
# # in store method
# if len(self.n_step_buffer) < self.n_step:
# return ()
# ```
#
# When the length of `n_step_buffer` becomes equal to N, it eventually stores the N-step transition, which is calculated by `_get_n_step_info` method.
#
# (Please see *01.dqn.ipynb* for detailed description of the basic replay buffer.)
class ReplayBuffer:
"""A simple numpy replay buffer."""
def __init__(
self,
obs_dim: int,
size: int,
batch_size: int = 32,
n_step: int = 3,
gamma: float = 0.99,
):
self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size], dtype=np.float32)
self.rews_buf = np.zeros([size], dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.max_size, self.batch_size = size, batch_size
self.ptr, self.size, = 0, 0
# for N-step Learning
self.n_step_buffer = deque(maxlen=n_step)
self.n_step = n_step
self.gamma = gamma
def store(
self,
obs: np.ndarray,
act: np.ndarray,
rew: float,
next_obs: np.ndarray,
done: bool
) -> Tuple[np.ndarray, np.ndarray, float, np.ndarray, bool]:
transition = (obs, act, rew, next_obs, done)
self.n_step_buffer.append(transition)
# single step transition is not ready
if len(self.n_step_buffer) < self.n_step:
return ()
# make a n-step transition
rew, next_obs, done = self._get_n_step_info(
self.n_step_buffer, self.gamma
)
obs, act = self.n_step_buffer[0][:2]
self.obs_buf[self.ptr] = obs
self.next_obs_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
return self.n_step_buffer[0]
def sample_batch(self) -> Dict[str, np.ndarray]:
indices = np.random.choice(
self.size, size=self.batch_size, replace=False
)
return dict(
obs=self.obs_buf[indices],
next_obs=self.next_obs_buf[indices],
acts=self.acts_buf[indices],
rews=self.rews_buf[indices],
done=self.done_buf[indices],
# for N-step Learning
indices=indices,
)
def sample_batch_from_idxs(
self, indices: np.ndarray
) -> Dict[str, np.ndarray]:
# for N-step Learning
return dict(
obs=self.obs_buf[indices],
next_obs=self.next_obs_buf[indices],
acts=self.acts_buf[indices],
rews=self.rews_buf[indices],
done=self.done_buf[indices],
)
def _get_n_step_info(
self, n_step_buffer: Deque, gamma: float
) -> Tuple[np.int64, np.ndarray, bool]:
"""Return n step rew, next_obs, and done."""
# info of the last transition
rew, next_obs, done = n_step_buffer[-1][-3:]
for transition in reversed(list(n_step_buffer)[:-1]):
r, n_o, d = transition[-3:]
rew = r + gamma * rew * (1 - d)
next_obs, done = (n_o, d) if d else (next_obs, done)
return rew, next_obs, done
def __len__(self) -> int:
return self.size
# ## Network
#
# We are going to use a simple network architecture with three fully connected layers and two non-linearity functions (ReLU).
class Network(nn.Module):
def __init__(self, in_dim: int, out_dim: int):
"""Initialization."""
super(Network, self).__init__()
self.layers = nn.Sequential(
nn.Linear(in_dim, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, out_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward method implementation."""
return self.layers(x)
# ## DQN Agent + N-step learning Agent
#
# Here is a summary of DQNAgent class.
#
# | Method | Note |
# | --- | --- |
# |select_action | select an action from the input state. |
# |step | take an action and return the response of the env. |
# |compute_dqn_loss | return dqn loss. |
# |update_model | update the model by gradient descent. |
# |target_hard_update| hard update from the local model to the target model.|
# |train | train the agent during num_frames. |
# |test | test the agent (1 episode). |
# |plot | plot the training progresses. |
#
# We use two buffers: `memory` and `memory_n` for 1-step transitions and n-step transitions respectively. It guarantees that any paired 1-step and n-step transitions have the same indices (See `step` method for more details). Due to the reason, we can sample pairs of transitions from the two buffers once we have indices for samples.
#
# ```python
# def update_model(self) -> torch.Tensor:
# ...
# samples = self.memory.sample_batch()
# indices = samples["indices"]
# ...
#
# # N-step Learning loss
# if self.use_n_step:
# samples = self.memory_n.sample_batch_from_idxs(indices)
# ...
# ```
#
# One thing to note that we are gonna combine 1-step loss and n-step loss so as to control high-variance / high-bias trade-off.
#
# (Search the comments with *N-step Leaning* to see any difference from DQN.)
class DQNAgent:
"""DQN Agent interacting with environment.
Attribute:
env (gym.Env): openAI Gym environment
memory (ReplayBuffer): replay memory to store transitions
batch_size (int): batch size for sampling
epsilon (float): parameter for epsilon greedy policy
epsilon_decay (float): step size to decrease epsilon
max_epsilon (float): max value of epsilon
min_epsilon (float): min value of epsilon
target_update (int): period for target model's hard update
gamma (float): discount factor
dqn (Network): model to train and select actions
dqn_target (Network): target model to update
optimizer (torch.optim): optimizer for training dqn
transition (list): transition information including
state, action, reward, next_state, done
use_n_step (bool): whether to use n_step memory
n_step (int): step number to calculate n-step td error
memory_n (ReplayBuffer): n-step replay buffer
"""
def __init__(
self,
env: gym.Env,
memory_size: int,
batch_size: int,
target_update: int,
epsilon_decay: float,
max_epsilon: float = 1.0,
min_epsilon: float = 0.1,
gamma: float = 0.99,
# N-step Learning
n_step: int = 3,
):
"""Initialization.
Args:
env (gym.Env): openAI Gym environment
memory_size (int): length of memory
batch_size (int): batch size for sampling
target_update (int): period for target model's hard update
epsilon_decay (float): step size to decrease epsilon
lr (float): learning rate
max_epsilon (float): max value of epsilon
min_epsilon (float): min value of epsilon
gamma (float): discount factor
n_step (int): step number to calculate n-step td error
"""
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
self.env = env
self.batch_size = batch_size
self.epsilon = max_epsilon
self.epsilon_decay = epsilon_decay
self.max_epsilon = max_epsilon
self.min_epsilon = min_epsilon
self.target_update = target_update
self.gamma = gamma
# memory for 1-step Learning
self.memory = ReplayBuffer(
obs_dim, memory_size, batch_size, n_step=1
)
# memory for N-step Learning
self.use_n_step = True if n_step > 1 else False
if self.use_n_step:
self.n_step = n_step
self.memory_n = ReplayBuffer(
obs_dim, memory_size, batch_size, n_step=n_step, gamma=gamma
)
# device: cpu / gpu
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
print(self.device)
# networks: dqn, dqn_target
self.dqn = Network(obs_dim, action_dim).to(self.device)
self.dqn_target = Network(obs_dim, action_dim).to(self.device)
self.dqn_target.load_state_dict(self.dqn.state_dict())
self.dqn_target.eval()
# optimizer
self.optimizer = optim.Adam(self.dqn.parameters())
# transition to store in memory
self.transition = list()
# mode: train / test
self.is_test = False
def select_action(self, state: np.ndarray) -> np.ndarray:
"""Select an action from the input state."""
# epsilon greedy policy
if self.epsilon > np.random.random():
selected_action = self.env.action_space.sample()
else:
selected_action = self.dqn(
torch.FloatTensor(state).to(self.device)
).argmax()
selected_action = selected_action.detach().cpu().numpy()
if not self.is_test:
self.transition = [state, selected_action]
return selected_action
def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:
"""Take an action and return the response of the env."""
next_state, reward, done, _ = self.env.step(action)
if not self.is_test:
self.transition += [reward, next_state, done]
# N-step transition
if self.use_n_step:
one_step_transition = self.memory_n.store(*self.transition)
# 1-step transition
else:
one_step_transition = self.transition
# add a single step transition
if one_step_transition:
self.memory.store(*one_step_transition)
return next_state, reward, done
def update_model(self) -> torch.Tensor:
"""Update the model by gradient descent."""
samples = self.memory.sample_batch()
indices = samples["indices"]
loss = self._compute_dqn_loss(samples, self.gamma)
# N-step Learning loss
# we are gonna combine 1-step loss and n-step loss so as to
# prevent high-variance.
if self.use_n_step:
samples = self.memory_n.sample_batch_from_idxs(indices)
gamma = self.gamma ** self.n_step
n_loss = self._compute_dqn_loss(samples, gamma)
loss += n_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def train(self, num_frames: int, plotting_interval: int = 200):
"""Train the agent."""
self.is_test = False
state = self.env.reset()
update_cnt = 0
epsilons = []
losses = []
scores = []
score = 0
for frame_idx in range(1, num_frames + 1):
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
# if episode ends
if done:
state = self.env.reset()
scores.append(score)
score = 0
# if training is ready
if len(self.memory) >= self.batch_size:
loss = self.update_model()
losses.append(loss)
update_cnt += 1
# linearly decrease epsilon
self.epsilon = max(
self.min_epsilon, self.epsilon - (
self.max_epsilon - self.min_epsilon
) * self.epsilon_decay
)
epsilons.append(self.epsilon)
# if hard update is needed
if update_cnt % self.target_update == 0:
self._target_hard_update()
# plotting
if frame_idx % plotting_interval == 0:
self._plot(frame_idx, scores, losses, epsilons)
self.env.close()
def test(self) -> List[np.ndarray]:
"""Test the agent."""
self.is_test = True
state = self.env.reset()
done = False
score = 0
frames = []
while not done:
frames.append(self.env.render(mode="rgb_array"))
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
print("score: ", score)
self.env.close()
return frames
def _compute_dqn_loss(
self,
samples: Dict[str, np.ndarray],
gamma: float
) -> torch.Tensor:
"""Return dqn loss."""
device = self.device # for shortening the following lines
state = torch.FloatTensor(samples["obs"]).to(device)
next_state = torch.FloatTensor(samples["next_obs"]).to(device)
action = torch.LongTensor(samples["acts"].reshape(-1, 1)).to(device)
reward = torch.FloatTensor(samples["rews"].reshape(-1, 1)).to(device)
done = torch.FloatTensor(samples["done"].reshape(-1, 1)).to(device)
# G_t = r + gamma * v(s_{t+1}) if state != Terminal
# = r otherwise
curr_q_value = self.dqn(state).gather(1, action)
next_q_value = self.dqn_target(next_state).max(
dim=1, keepdim=True
)[0].detach()
mask = 1 - done
target = (reward + gamma * next_q_value * mask).to(self.device)
# calculate dqn loss
loss = F.smooth_l1_loss(curr_q_value, target)
return loss
def _target_hard_update(self):
"""Hard update: target <- local."""
self.dqn_target.load_state_dict(self.dqn.state_dict())
def _plot(
self,
frame_idx: int,
scores: List[float],
losses: List[float],
epsilons: List[float],
):
"""Plot the training progresses."""
clear_output(True)
plt.figure(figsize=(20, 5))
plt.subplot(131)
plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))
plt.plot(scores)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.subplot(133)
plt.title('epsilons')
plt.plot(epsilons)
plt.show()
# ## Environment
#
# You can see the [code](https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py) and [configurations](https://github.com/openai/gym/blob/master/gym/envs/__init__.py#L53) of CartPole-v0 from OpenAI's repository.
# environment
env_id = "CartPole-v0"
env = gym.make(env_id)
if IN_COLAB:
env = gym.wrappers.Monitor(env, "videos", force=True)
# ## Set random seed
# +
seed = 777
def seed_torch(seed):
torch.manual_seed(seed)
if torch.backends.cudnn.enabled:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
seed_torch(seed)
env.seed(seed)
# -
# ## Initialize
# +
# parameters
num_frames = 20000
memory_size = 1000
batch_size = 32
target_update = 100
epsilon_decay = 1 / 2000
# train
agent = DQNAgent(env, memory_size, batch_size, target_update, epsilon_decay)
# -
# ## Train
agent.train(num_frames)
# ## Test
#
# Run the trained agent (1 episode).
frames = agent.test()
# ## Render
# +
if IN_COLAB: # for colab
import base64
import glob
import io
import os
from IPython.display import HTML, display
def ipython_show_video(path: str) -> None:
"""Show a video at `path` within IPython Notebook."""
if not os.path.isfile(path):
raise NameError("Cannot access: {}".format(path))
video = io.open(path, "r+b").read()
encoded = base64.b64encode(video)
display(HTML(
data="""
<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4"/>
</video>
""".format(encoded.decode("ascii"))
))
list_of_files = glob.glob("videos/*.mp4")
latest_file = max(list_of_files, key=os.path.getctime)
print(latest_file)
ipython_show_video(latest_file)
else: # for jupyter
from matplotlib import animation
from JSAnimation.IPython_display import display_animation
from IPython.display import display
def display_frames_as_gif(frames: List[np.ndarray]) -> None:
"""Displays a list of frames as a gif, with controls."""
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(
plt.gcf(), animate, frames = len(frames), interval=50
)
display(display_animation(anim, default_mode='loop'))
# display
display_frames_as_gif(frames)
| 07.n_step_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
# %matplotlib inline
# +
columns = [
"duration", "protocol_type", "service", "flag", "src_bytes", "dst_bytes",
"land", "wrong_fragment", "urgent", "hot", "num_failed_logins",
"logged_in", "num_compromised", "root_shell", "su_attempted", "num_root",
"num_file_creations", "num_shells", "num_access_files",
"num_outbound_cmds", "is_host_login", "is_guest_login", "count",
"srv_count", "serror_rate", "srv_serror_rate", "rerror_rate",
"srv_rerror_rate", "same_srv_rate", "diff_srv_rate", "srv_diff_host_rate",
"dst_host_count", "dst_host_srv_count", "dst_host_same_srv_rate",
"dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate", "dst_host_serror_rate",
"dst_host_srv_serror_rate", "dst_host_rerror_rate",
"dst_host_srv_rerror_rate", "label"
]
df = pd.read_csv("./datasets/kddcup.data.corrected",
sep=",",
names=columns,
index_col=None)
# -
df.shape
df = df[df["service"] == "http"]
df = df.drop("service", axis=1)
columns.remove("service")
df.shape
df["label"].value_counts()
df.head(5)
for col in df.columns:
if df[col].dtype == "object":
encoded = LabelEncoder()
encoded.fit(df[col])
df[col] = encoded.transform(df[col])
df.head(5)
# +
for f in range(0, 3):
df = df.iloc[np.random.permutation(len(df))]
df2 = df[:500000]
labels = df2["label"]
df_validate = df[500000:]
x_train, x_test, y_train, y_test = train_test_split(df2,
labels,
test_size=0.2,
random_state=42)
x_val, y_val = df_validate, df_validate["label"]
# -
print("Shapes:\nx_train:%s\ny_train:%s\n" % (x_train.shape, y_train.shape))
print("x_test:%s\ny_test:%s\n" % (x_test.shape, y_test.shape))
print("x_val:%s\ny_val:%s\n" % (x_val.shape, y_val.shape))
isolation_forest = IsolationForest(n_estimators=100,
max_samples=256,
contamination=0.1,
random_state=42)
isolation_forest.fit(x_train)
anomaly_scores = isolation_forest.decision_function(x_val)
plt.figure(figsize=(15, 10))
plt.hist(anomaly_scores, bins=100)
plt.xlabel('Average Path Lengths', fontsize=14)
plt.ylabel('Number of Data Points', fontsize=14)
plt.show()
# +
from sklearn.metrics import roc_auc_score
anomalies = anomaly_scores > -0.19
matches = y_val == list(encoded.classes_).index("normal.")
auc = roc_auc_score(anomalies, matches)
print("AUC: {:.2%}".format(auc))
# -
anomaly_scores_test = isolation_forest.decision_function(x_test)
plt.figure(figsize=(15, 10))
plt.hist(anomaly_scores_test, bins=100)
plt.xlabel('Average Path Lengths', fontsize=14)
plt.ylabel('Number of Data Points', fontsize=14)
plt.show()
anomalies_test = anomaly_scores_test > -0.19
matches = y_test == list(encoded.classes_).index("normal.")
auc = roc_auc_score(anomalies_test, matches)
print("AUC: {:.2%}".format(auc))
print(y_test)
| 01_Isolation Forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Music Machine Learning - Probability basics
#
# ### Author: <NAME> (<EMAIL>)
#
# In this course we will cover
# 1. An explanation on [simple probability](#probability) concepts
# 2. A review of the axioms and definitions of probability [theory](#theory)
# 3. An discussion on [conditional probability](#conditional) and independence
# <a id="probability"> </a>
#
# ## Understanding probability
#
# The field of probability aims to model random or uncertain events. We usually refer to random events when we cannot predict the outcome of a phenomenon with absolute certainty. Maybe the simplest and most iconic case of randomness that everyone has encoutered is to _flip a coin_. In that case, we are uncertain of the result, and might see randomness as a way of expressing this uncertainty.
#
# Hence, a random variable $X$ denotes a quantity that is uncertain, such as the result of an experiment (flipping a coin) or the measurement of an uncertain property (the weather of tomorrow). However, if we can observe a sufficiently large set of _samples_ $\{\mathbf{x}_{i}\}_{i=1}$, it might take different values on each occasion, but some values may occur more often than others. Hence, we can start reasoning on these random events from a different perspective. If the coin is fair, and do not have access to any external information, we expect the _probability_ of the result to be heads 1 times out of 2, or equivalently 50\% of the times. Here, the use of percentage informs us on one of the interpretation of probability, as the _relative frequency_ of events. By repeatedly flipping the coin and observing the results, we can count the occurrence of each outcome.
#
# To understand these concepts graphically, we will rely on `numpy` and `matplotlib`
import random
import matplotlib.pyplot as plt
import numpy as np
# ## Definitions
# Let's start with some basic definitions and principles.
# - An ***experiment*** or ***trial*** is an action with an uncertain outcome, such as tossing a coin.
# - A ***sample space*** is the set of all possible outcomes of an experiment. In a coin toss, there's a set of two possible oucomes (*heads* and *tails*).
# - A ***sample point*** is a single possible outcome - for example, *heads*)
# - An ***event*** is a specific outome of single instance of an experiment - for example, tossing a coin and getting *tails*.
# - ***Probability*** is a value between 0 and 1 that indicates the likelihood of a particular event, with 0 meaning that the event is impossible, and 1 meaning that the event is inevitable. In general terms, it's calculated like this:
#
# $$
# \begin{equation}
# \text{probability of an event} = \frac{\text{Number of sample points that produce the event}}{\text{Total number of sample points in the sample space}}
# \end{equation}
# $$
#
# For example, the probability of getting *heads* when tossing as coin is <sup>1</sup>/<sub>2</sub> - there is only one side of the coin that is designated *heads*. and there are two possible outcomes in the sample space (*heads* and *tails*). So the probability of getting *heads* in a single coin toss is 0.5 (or 50% when expressed as a percentage).
vals = ['heads', 'tails']
trials = 5
for t in range(trials):
# Get a random 0 or 1
toss = random.randint(0,1)
# Print the result of our toss
print('Coin is tossed on ' + vals[toss])
# If we run the previous code several times, we will always get different (uncertain) results, showing the underlying randomness. However, if we start simulating (_sampling_) this _experiment_ a certain number of times, we can start to look at the _distribution_ of _events_
plt.figure(figsize=(25,4))
# loop through 5,10,50,100,1000,10000 trials
for t_id, trials in enumerate([5,10,50,100,1000,10000]):
# Keep track
heads_or_tails = [0, 0]
for t in range(trials):
# Get a random 0 or 1
toss = random.randint(0,1)
# Increment the list element corresponding to the toss result
heads_or_tails[toss] = heads_or_tails[toss] + 1
# Show a pie chart of the results
plt.subplot(1, 6, t_id+1)
plt.pie(heads_or_tails, labels=['heads', 'tails'])
plt.legend()
plt.show()
# ### A more complicated example
#
# Let us look at another slightly more complicated example, where we have two dice, hoping to get a particular number. The dice throw itself is an *experiment*, as you will not know the outome until the dice has been thrown. In this case, the *sample space* of all possible outcomes is every combination of two dice - 36 *sample points*:
# <table style='font-size:36px;'>
# <tr><td>⚀+⚀</td><td>⚀+⚁</td><td>⚀+⚂</td><td>⚀+⚃</td><td>⚀+⚄</td><td>⚀+⚅</td></tr>
# <tr><td>⚁+⚀</td><td>⚁+⚁</td><td>⚁+⚂</td><td>⚁+⚃</td><td>⚁+⚄</td><td>⚁+⚅</td></tr>
# <tr><td>⚂+⚀</td><td>⚂+⚁</td><td>⚂+⚂</td><td>⚂+⚃</td><td>⚂+⚄</td><td>⚂+⚅</td></tr>
# <tr><td>⚃+⚀</td><td>⚃+⚁</td><td>⚃+⚂</td><td>⚃+⚃</td><td>⚃+⚄</td><td>⚃+⚅</td></tr>
# <tr><td>⚄+⚀</td><td>⚄+⚁</td><td>⚄+⚂</td><td>⚄+⚃</td><td>⚄+⚄</td><td>⚄+⚅</td></tr>
# <tr><td>⚅+⚀</td><td>⚅+⚁</td><td>⚅+⚂</td><td>⚅+⚃</td><td>⚅+⚄</td><td>⚅+⚅</td></tr>
# </table>
#
# The *event* you want to happen is throwing a 7. There are 6 *sample points* that could produce this event:
#
# <table style='font-size:36px;'>
# <tr><td>⚀+⚅</td><td>⚁+⚄</td><td>⚂+⚃</td><td>⚃+⚂</td><td>⚄+⚁</td><td>⚅+⚀</td></tr>
# </table>
#
# The *probability* of throwing a 7 is therefore <sup>6</sup>/<sub>36</sub> which can be simplified to <sup>1</sup>/<sub>6</sub> or approximately 0.167 (16.7%).
#
# <a id="theory"> </a>
# ## Probability theory
#
# Probability theory defines the mathematical framework that allows to describe and analyze random phenomena, regardless of the interpretation of probability that we prefer. To rely on this framework, we starts by assuming few axioms of probability. We postulate a set of possible events $\Omega$ called the *sample space* (in our coin toss example, we would have $\Omega = \{Heads, Tails\}$. We can define a _probability measure_ $p(\omega)$ to an event $\omega\in\Omega$, which gives a value between 0 (very unlikely) and 1 (very probable) that shows how likely the event is. In our previous example, we could write that the probability of throwing a 7 is expressed as
#
# $$
# p(\omega = 7) = 0.167
# $$
#
# where our sample space is defined as $\Omega = \{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\}$
#
# ### Axioms of probability
# The foundation of probability theory is defined in these axioms, on which we can develop tools and techniques to calculate probabilities of different events.
#
# - For any event $\omega\in\Omega$, $0\leq p(\omega) \leq 1$.
# - Probability of the sample space $\Omega$ is $p(\Omega)=1$.
# - For a set of \textit{disjoint} events $\omega_1,\cdots,\omega_n$ we have $p(\bigcup_{i=1}^{n} \omega_i)=\sum_{i=1}^{n} \omega_{i}$
#
# The first axiom is quite straightforward and states that probability is measured in the interval $[0,1]$. The second axiom states that the probability of the whole sample space is equal to one, as it contains all possible outcomes of our random experiment. For instance, in our coin toss, the outcome will always be either $Heads$ or $Tails$, and therefore will always be contained in $\Omega$. Finally, the third axioms state that for two disjoint events, the probability of one or the other to happen is the sum of their individual probabilities.
#
# ### Complement of an event
# The *complement* of an event is the set of *sample points* that do ***not*** result in the event. If we follow our previous example, suppose that we want to know what is the probability of **not** throwing a 7. The *complement* of the event (throwing a 7) is all of the possible outcomes that **do not** result in throwing a 7. If we were to count all these events, we would find that there are 30 sample points in the complement, so the probability of the complement is <sup>30</sup>/<sub>36</sub> which is <sup>5</sup>/<sub>6</sub> or 0.833.
#
# However, we can also note that the probability of an event and the probability of its complement ***always add up to 1***. This can be written as
#
# $$
# p(\bar{\omega}) = 1 - p(\omega)
# $$
#
# This fact can be useful in some cases. For example, suppose you throw two dice and want to know the probability of throwing more than 4. You *could* count all of the outcomes that would produce this result, but there are a lot of them. It might be easier to identify the ones that *do not* produce this result (in other words, the complement).
# <a id="conditional"> </a>
# ## Conditional Probability and dependence
# Events can be:
# - *Independent* (events that are not affected by other events)
# - *Dependent* (events that are conditional on other events)
# - *Mutually Exclusive* (events that can't occur together)
#
# ### Independent Events
# Imagine you toss a coin. The sample space contains two possible outomes: heads or tails. The probability of getting *heads* is <sup>1</sup>/<sub>2</sub>, and the probability of getting *tails* is also <sup>1</sup>/<sub>2</sub>. Imagine we toss a coin and get *heads*. Now, we toss the coin again and get *heads* again. If we were to toss the coin a third time, what is the probability that we get *heads*?
#
# Although you might be tempted to think that a *tail* is overdue, the fact is that each coin toss is an independent event. The outcome of the first coin toss does not affect the second coin toss (or the third, or any number of other coin tosses). For each independent coin toss, the probability of getting *heads* (or *tails*) remains <sup>1</sup>/<sub>2</sub>, or 50%.
# ### Combining Independent Events
# Now, we would like to know the probability of getting three *heads* in a row. Since the probability of a heads on each independent toss is <sup>1</sup>/<sub>2</sub>, you might be tempted to think that the same probability applies to getting three *heads* in a row. But in reality, we need to understand that "getting three *heads* in a row" is a different random event, which is the _combination of three independent events_. To combine independent events like this, we need to multiply the probability of each event. As we know that $p(heads)$ = <sup>1</sup>/<sub>2</sub>, we have
#
# $p(heads, heads, heads)$ = <sup>1</sup>/<sub>2</sub> x <sup>1</sup>/<sub>2</sub> x <sup>1</sup>/<sub>2</sub>
#
# So the probability of tossing three *heads* in a row is 0.5 x 0.5 x 0.5, which is 0.125 (or 12.5%). You can simulate this behavior with the following code.
import random
# Count the number of 3xHeads results
h3 = 0
# loop through 10000 trials
trials = 10000
for t in range(trials):
trial = trial + 1
# Flip three coins
result = ['H' if random.randint(0,1) == 1 else 'T',
'H' if random.randint(0,1) == 1 else 'T',
'H' if random.randint(0,1) == 1 else 'T']
# If it's three heads, add it to the count
h3 = h3 + int(result == ['H','H','H'])
# What proportion of trials produced 3x heads
print ("Proportion of 3 heads : %.2f%%" % ((h3/trials)*100))
# The output shows the percentage of times a trial resulted in three heads (which should be somewhere close to 12.5%).
#
# #### Combined Event Probability Notation
# When calculating the probability of combined events, we assign a letter such as **A** or **B** to each event, and we use the *intersection* (**∩**) symbol to indicate that we want the combined probability of multiple events. So we could assign the letters **A**, **B**, and **C** to each independent coin toss in our sequence of three tosses, and express the combined probability like this:
#
# \begin{equation}p(A \cap B \cap C) = p(A) \times p(B) \times p(C) \end{equation}
#
# Imagine you have created a new game that mixes coin-tossing and die-rolling. The objective of the game is to roll a die and get *6*, and toss a coin and get *heads*. On each turn of the game, a player rolls the die and tosses the coin. There are two independent events required to win: a die-roll of *6* (which we'll call event **A**), and a coin-toss of *heads* (which we'll call event **B**). Our formula for combined independent events is:
#
# \begin{equation}p(A \cap B) = p(A) \times p(B) \end{equation}
#
# The probablilty of rolling a *6* on a fair die is <sup>1</sup>/<sub>6</sub> or 0.167; and the probability of tossing a coin and getting *heads* is <sup>1</sup>/<sub>2</sub> or 0.5:
#
# \begin{equation}p(A \cap B) = 0.167 \times 0.5 = 0.083 \end{equation}
#
# So on each turn, there's an 8.3% chance to win the game.
#
# ### Dependent events
#
# In some cases, random events might be dependent. For instance, in the case of a dice, we can have the event $a=$"*getting an even number*" and $b=$"*rolling a 6*". If we *observe* event $a$, it will modify the probability of event $b$. This is given by the _conditional probability_ $p(b\vert a)$. We can also compute the full joint probability of both events as
#
# $$
# p(a,b)=p(a\vert b)p(b)
# $$
#
# Note that joint probabilities are symmetrical, which means that $p(a,b)=p(b,a)$ and also
# $$
# p(a,b) = p(a\vert b)p(b) = p(b\vert a)p(a) = p(b,a)
# $$
#
# In the case of our dice rolling, we have that $p(b=6 \vert a=even)=1/3$. Interestingly, this leads to
# $$
# p(b = 6, a = even) = p(b=6 \vert a=even)p(a=even) = 1/3 * 1/2 = 1/6
# $$
#
# which is our original probability of getting a 6. To summarize our discussion on dependence, you can note that _independent events_ will **not** be conditionned on each others, which means that $p(a \vert b) = p(a)$ and also $p(a, b) = p(a)p(b)$.
| 04a_probabilities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RoyMillamis/CPEN-21A-CPE-1-2/blob/main/Control_Structure.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Qz5QV-wJH6fk"
# ##If Statement
# + colab={"base_uri": "https://localhost:8080/"} id="Q9E_rWmVH-67" outputId="0de7dc0a-bc92-4fff-fbe7-9622583df9e7"
a=12
b=100
if b>a:
print("b is greater than a")
# + [markdown] id="iVYPGvZEIYDr"
# ## ElIf Statement
#
# + colab={"base_uri": "https://localhost:8080/"} id="aU5R3lZdIcYq" outputId="5e46a24c-7daf-4748-fabf-bad45e4c2efd"
a=120
b=100
if b>a:
print("b is greater than a")
elif a>b:
print("a is greater than b")
# + [markdown] id="ErDZZy-yJGQ7"
# ## Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="WFjXxBTiJIzs" outputId="322afc4a-d662-4243-c15b-8dc2ca4f653b"
a=100
b=100
if b>a:
print("b is greater than a")
elif a>b:
print("a is greater than b")
else:
print("a is equal to b")
# + [markdown] id="7aEhoOoCJzFr"
# ##Short Hand if Statement
# + colab={"base_uri": "https://localhost:8080/"} id="mbYzyS8bJ2z6" outputId="293a62ad-2832-4491-bfd2-eb440a31d612"
a=45
b=30
if a>b:print("a is greater than b")
# + id="q8cHGpZwKDEU"
# + [markdown] id="eW09dXHvJ_wU"
# ##Short Hand Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="If4CDp7PKFLc" outputId="9fc97cc0-ec70-4163-a4cc-d300e483c8b5"
a=45
b=30
print("a is greater than b")if a>b else print("b is greater than a")
# + [markdown] id="uluoiZPvLD79"
# And Logic Condition
# + colab={"base_uri": "https://localhost:8080/"} id="sfwbkEJOLJpC" outputId="4974b2e1-5230-414e-8d86-9f8075d7b888"
x=7
if x>5 and x>6:
print("Both conditions are True")
# + [markdown] id="HKRMve4tLdh0"
# Or logic Condition
# + colab={"base_uri": "https://localhost:8080/"} id="26BjEIPMLgD0" outputId="38ab7bfc-1d20-4e68-a7fa-fa2434341c6f"
x=21
if x>18 or x<15:
print("True")
else:
print("False")
# + colab={"base_uri": "https://localhost:8080/"} id="IO6ujpkPMDc8" outputId="73f45f10-3518-4db7-98ab-41d197bca748"
if x>10:
print("Above 10")
if x>20:
print("Above 20")
else:
print("below 20")
else:
print("x is less than 10")
# + [markdown] id="TWqBV86jM-FE"
# Example 1
# + colab={"base_uri": "https://localhost:8080/"} id="Ig5WvtSONwIb" outputId="3396219c-2469-4f1a-91cb-7985c41f1618"
age=19
if age>=18:
print("You are qualified to vote")
else:
print("you are not qualified to vote")
# + colab={"base_uri": "https://localhost:8080/"} id="cYfZeSxWM_6M" outputId="e8a52381-f82e-4910-a8b2-2e4883f0261c"
age=int(input("Enter your age:"))
if age>=18:
print("You are qualified to vote")
else:
print("you are not qualified to vote")
# + colab={"base_uri": "https://localhost:8080/"} id="wpKV4zxaNzOT" outputId="cec74e43-0ff1-409f-e503-1ea117310a4a"
age=17
if age>=18:
print("You are qualified to vote")
else:
print("you are not qualified to vote")
# + colab={"base_uri": "https://localhost:8080/"} id="NoG26X1FNTJt" outputId="ffd1ab98-aef1-4450-ad90-43ce0c070270"
age=int(input("Enter your age:"))
if age>=18:
print("You are qualified to vote")
else:
print("you are not qualified to vote")
# + [markdown] id="bdhBuyM1Nohz"
# Example 2
# + colab={"base_uri": "https://localhost:8080/"} id="8ndmkiyWNp2b" outputId="b75be20f-5bee-425b-852e-8c1d3037925a"
number= float(input("Enter the number:"))
if number == 0:
print("Zero")
elif number>0:
print("Positive")
# + colab={"base_uri": "https://localhost:8080/"} id="xGKe6cY6THi1" outputId="039eaf0f-c19c-4836-d4fa-66ac517b4c8c"
number= float(input("Enter the number:"))
if number == 0:
print("Zero")
elif number>0:
print("Positive")
# + colab={"base_uri": "https://localhost:8080/"} id="eb0sQmSBOj10" outputId="be5a0635-5df7-46ff-eee3-fcbfbd27e0c3"
number= float(input("Enter the number:"))
if number == 0:
print("Zero")
elif number>0:
print("Positive")
else:
print("Negative")
# + [markdown] id="te3MTDb4Qn3T"
# #Example 3 (ACTIVITY)
# + colab={"base_uri": "https://localhost:8080/"} id="gu17xagLQp7z" outputId="5a0d6cdb-d9c3-47ba-b013-dfdaec9a339c"
grade= float(input("Enter the grade:"))
if grade>75 or grade==75:
print("Passed")
elif grade==74 or grade> 74:
print("Remedial")
else:
if grade<74:
print("Failed")
# + colab={"base_uri": "https://localhost:8080/"} id="-GuAldvJk7Wz" outputId="0e8f84b4-acb6-42ba-dfab-672a45a92044"
grade= float(input("Enter the grade:"))
if grade>75 or grade==75:
print("Passed")
elif grade==74 or grade> 74:
print("Remedial")
else:
if grade<74:
print("Failed")
# + colab={"base_uri": "https://localhost:8080/"} id="RFKSkckblFRz" outputId="0305718c-0f4e-4f74-d6a0-ad1b120991d3"
grade= float(input("Enter the grade:"))
if grade>75 or grade==75:
print("Passed")
elif grade==74 or grade> 74:
print("Remedial")
else:
if grade<74:
print("Failed")
| Control_Structure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NumPy
# +
import numpy as np
#Creamos un np array de rango 1,
un_array=np.array([7,77,777])
print(type(un_array))
# +
#Imprimimos la forma (shape) de array
print(type(un_array.shape))
print(un_array.shape)
print(un_array)
# +
#Acceder a las varaibles
print(un_array[0])
print(un_array[1])
print(un_array[2])
# -
# # Array Multidimensional
multi =np.array([[12,2,32],[53,43,76],[52,79,75]])
print("La forma de este array es: ",multi.shape)
print(multi[2,0])
print("Para imprimir todo es ")
print(multi)
# $ I^{n \times n} =
# \left( \begin{array}{cccc}
# 11 & 12 & 13 \\
# 22 & 23 & 24 \\
# 33 & 34 & 35 \\
# \end{array} \right) $
# # Distintas Maneras de cear nd arrays
#
#Crear array lleno de ceros
ej1=np.zeros([1080,1920,3])
print(ej1)
# +
#Creamos un array lleno de un valor determinado(floats por ejemplo)
ej2=np.full((2,2), 9.1)
print(ej2)
# -
#Creando un array 2*2 condiagonal en "unos" y el resto ceros
ej3=np.eye(7,7)
print(ej3)
# +
#Creando un array de "unos"
ej4 = np.ones((3,3))
print(ej4)
# +
#Creamos un aray de flotantes aleatorios entre 0 y 1
ej5 = np.random.random((5,5))
print(ej5)
# -
# # Array de Indices
#
# A veces es util,preparar una serie de indices en un array,para luego utilizarlos para operar sobre otro.
# +
#Creamos un nuevo array
array_a = np.array([[11,12,13],[21,22,23],[31,32,33],[41,42,43]])
print(array_a)
# +
#Creamos 2 arrays con numeros enteros que usaremoos como indices
cols=np.array([0,1,2,0])
rows=np.arange(4)
print("Elegimos estos indices para filas -> ",rows)
print("Elegimos estos indices para columnas -> ",cols)
# -
#Imprimimos los inices tomando uno de cada array, de a pares,
for row,col in zip(rows,cols):
print("(",row,",",col,")")
# +
#Entonces ahora para seleccionar un elemento de cada fila, hacemos de esta manera
print("Los valores contenidos en los indices son: ",array_a[rows,cols])
# -
#Vamos a operar con los valores seleccionados
print(array_a)
array_a[rows,cols] +=1000
print(array_a)
# # Indexado Boolena
#
# Indexado booleano para cambiar elementos
# +
#Creamos un array de 3x2
array_b = np.array([[11,12],[21,22],[31,32]])
print(array_b)
print("----------------")
print(array_b.shape)
# -
#Crear un filtro
filtro =(array_b>15)
filtro
# +
#Aplicamos el filtro a nuestro array
filtrado = array_b[filtro]
print(filtrado)
# +
#No es necesario crear el filtro
print(array_b[(array_b %2==0)])
# -
array_b[array_b%2==0]+=1000
print(array_b)
# # Rebanando arrays (Slice)
# +
un_array = np.array([[11,12,13,14,15,16],[21,22,23,24,25,26],[31,32,33,34,35,36],[41,42,43,44,45,46]])
print(un_array)
# +
#Creamos un slice de 2x2 a partir de "un_array"
un_slice=un_array[:2,1:3]
print(un_slice)
# -
print("Slice: ",un_slice[0,0])
un_slice[0,0] += 100
print(un_slice)
print(un_array)
# ### *Muy Importante*
#
# y si necesitaramos tener un nuevo array a partir de un slice?
nuevo_array= np.array(un_slice)
print(nuevo_array)
# +
#Puedo seleccionar una sola fila, entonces el slice sera unidimensional.
row_rankl= un_array[1,:]
print(row_rankl,row_rankl.shape)
# -
# # Tipos de datos de los array numpy
ej1=np.array([11,12])
print(ej1.dtype)
ej2=np.array([11.0,12.0])
print(ej2.dtype)
ej3=np.array([11,21], dtype=np.float64)
print(ej3.dtype)
#Se truncan los datos
ej4=np.array([11.4,21.8], dtype=np.int64)
print(ej4.dtype)
print()
print(ej4)
# # Operaciones Aritmeticas
# +
x=np.array([[111,112],[121,122]], dtype=np.int)
y=np.array([[211.1,212.1],[221.1,222.1]], dtype=np.float64)
print(x)
print()
print(y)
# -
#Restar
print(x-y)
print()
print(np.subtract(x,y))
#Multiplicar
print(x*y)
print()
print(np.multiply(x,y))
#Division
print(x/y)
print()
print(np.divide(x,y))
#Raiz Cuadrada
print(np.sqrt(x))
# # Operaciones Estadisticas Basicas
arr = 10 *np.random.randn(2,5)
print(arr)
# +
#Calcular la media (promedio) de un array
print(arr.mean())
# -
#Calcular promedio, fila por fila
# eje 0 = columnas
#eje 1 = filas
print(arr.mean(axis=1))
#Calcular promedio, columna por columna
# eje 0 = columnas
#eje 1 = filas
print(arr.mean(axis=0))
#Creamos un array de 10 elementos aleatorios
desordenado = np.random.randn(10)
print(desordenado)
# +
#Creamos una copia
ordenado = np.array(desordenado)
#lo ordenamos
ordenado.sort()
print(ordenado)
# -
#buscando elementos unicos
array = np.array([1,2,1,4,2,1,4,2])
print(np.unique(array))
# +
#Operaciones de conjunto
s1 = np.array(['escritorio','silla','mesa'])
s2 = np.array(['escritorio','silla','lampara'])
print(s1)
print(s2)
# -
#Interseccion
print(np.intersect1d(s1,s2))
#Union en id
print(np.union1d(s1,s2))
#elementos de s1 que no estan en s2
print(np.setdiff1d(s1,s2))
#que elementos de s1 estan en s2
print(np.in1d(s1,s2))
# # Algunas Operaciones Frecuentes
#sumamos todos los elementos de un array
ex1 = np.array([[11,12],[21,22]])
print(np.sum(ex1))
#Sumamos solo una columna
print(np.sum(ex1, axis=0))
#Sumamos solo una fila
print(np.sum(ex1, axis=1))
# ### Reformateando un array
arr = np.arange(20)
print(arr)
arr.shape
#Cambiamos su forma
reshped_arr = arr.reshape(4,5)
print(reshped_arr)
#Trasnponer o transposicionar un array
ex1 = np.array([[11,12],[21,22]])
print(ex1)
ex1.T
#Where
mat = np.random.rand(4,4)
mat
np.where(mat>0.5,1000,-1)
arr_bools= np.array([0,0,0,0,0,True])
arr_bools.any()
arr_bools= np.array([1,1,True])
arr_bools.all()
# ### Uniendo data sets
# +
K = np.random.randint(low=2,high=50,size=(2,2))
print(K)
print()
M = np.random.randint(low=2,high=50,size=(2,2))
print(M)
# -
#Unimos "ampliando" verticalmente
np.vstack((K,M))
#Unimos "ampliando" horizontalmente
np.hstack((K,M))
#Podemos unir concatenando
np.concatenate((K,M), axis=0)
np.concatenate((K,M), axis=1)
# # Propagacion
base = np.zeros((4,4))
#Creamos un array unidimensional
row = np.array([1,0,2,7])
print(row)
print(row.shape)
y = base+row
print(y)
col = np.array([[0,1,2,3]])
print(col)
col = col.T
print(col)
y = base+col
print(y)
col=np.array([[0,1,2]])
col = col.T
print(col)
y = base+col
print(y)
arr_x=np.array([4])
print(arr_x)
print("___________________")
print(base+arr_x)
# # Prueba de Velocidad
# +
from numpy import arange
from timeit import Timer
size = 1000000
timeits=1000
nd_array = arange(size)
print("Tipo de dato -> ",type(nd_array))
print("Forma -> ",nd_array.shape)
# +
#con la funcion timer podemos medir, el tiempo que lleva realizar una operacion o funcion
timer_numpy=Timer("nd_array.sum()","from __main__ import nd_array")
print("Time que toma Numpy nd_array: %f segundos" % (timer_numpy.timeit(timeits)))
# -
a_list = list(range(size))
print(type(a_list))
print("Forma -> ", len(a_list))
timer_list = Timer("sum(a_list)","from __main__ import a_list")
print("Time que toma la lista: %f segundos" % (timer_list.timeit(timeits)))
# ## Grabar array a disco / recuperar array de disco
x=np.array([23.23,24.24])
np.save('array_importante',x)
recuperado = np.load('array_importante.npy')
print("El array recuperado es: ",recuperado)
| NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] raw_mimetype="text/latex"
# (c) <NAME> 2019. Thanks to Universidad EAFIT for support. This material is part of the course Introduction to Finite Element Analysis
# -
# # SOLUTION: Application: Visualization in a Full Domain
# ## By <NAME>
# ### Class activity
# Consider the theory of elasticity solution for a cantilever beam of height $h = 2c$, length $2\ell$ and moment of inertia $I$, subjected to a uniformly distributed load of intensity $q$ (Timoshenko and Goodier, 2011):
#
# $$\sigma_{xx}=-\frac q{2I}\left(x^2y-\frac23y^3\right)$$
#
# $$\sigma_{yy}=-\frac q{2I}\left(\frac13y^3-c^2y+\frac23c^3\right)$$
#
# $$\tau_{xy}=-\frac q{2I}\left(c^2-y^2\right)x$$
#
#
#
# <center><img src="img/beam.png" alt="beam" style="width:600px"></center>
#
#
# * Identify the problem domain and use the free three-dimensional mesh generator [Gmsh](http://gmsh.info/) to create a finite element mesh of this domain.
#
# * Use the Python module [meshio](https://github.com/nschloe/meshio) to read the mesh from the **.msh** created file and convert it into the appropriate text files for nodes and elements.
#
# * Use the interpolation and visualization subroutines discussed in the notebook to create an independent script (or notebook) and visualize the principal stresses over the computational domain.
# We will use all the plotting subroutines defined in the original Notebook. The gmsh model is available in the files folder under the name beam.msh
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.tri import Triangulation, CubicTriInterpolator
import numpy as np
import sympy as sym
import meshio
import solidspy.preprocesor as msh
def plot_SFIELD(UC, nodes, elements, Ngra, plt_type="contourf", levels=12,
savefigs=False, title="Solution:" ):
"""Plots a user defined scalar field using a triangulation.
Parameters
----------
UC : ndarray (float)
Array with the nodal solution.
"""
tri = mesh2tri(nodes, elements)
tri_plot(tri, UC , Ngra , title=r'$U_{var}$',
figtitle=title + "User variable",
levels=levels, plt_type=plt_type, savefigs=savefigs,
filename="uservar.pdf")
def mesh2tri(nodes, elements):
"""Generates a matplotlib.tri.Triangulation object from the mesh
Parameters
----------
nodes : ndarray (float)
Array with number and nodes coordinates:
`number coordX coordY BCX BCY`
elements : ndarray (int)
Array with the node number for the nodes that correspond to each
element.
Returns
-------
tri : Triangulation
An unstructured triangular grid consisting of npoints points
and ntri triangles.
"""
x = nodes[:, 1]
y = nodes[:, 2]
triangs = []
for el in elements:
if el[1]==1:
triangs.append(el[[3, 4, 5]])
triangs.append(el[[5, 6, 3]])
if el[1]==2:
triangs.append(el[[3, 6, 8]])
triangs.append(el[[6, 7, 8]])
triangs.append(el[[6, 4, 7]])
triangs.append(el[[7, 5, 8]])
if el[1]==3:
triangs.append(el[3:])
tri = Triangulation(x, y, np.array(triangs))
#
return tri
def tri_plot(tri, field, Ngra , title="", figtitle="", levels=12, savefigs=False,
plt_type="contourf" , filename="solution_plot.pdf" ):
plt.figure(Ngra)
if plt_type=="pcolor":
disp_plot = plt.tripcolor
elif plt_type=="contourf":
disp_plot = plt.tricontourf
plt.figure(figtitle)
disp_plot(tri, field, levels)
plt.title(title)
plt.colorbar(orientation='vertical')
plt.axis("image")
plt.grid()
# #### Mesh reading and stress function.
#
# The following script uses the functions **node_writer()** and **ele_writer()** defined in the preprocessing module from SolidsPy to convert the gmsh file (beam.msh) into nodal and element input files.
def script_mesh(mesh):
points = mesh.points
cells = mesh.cells
point_data = mesh.point_data
cell_data = mesh.cell_data
field_data = mesh.field_data
nodes_array = msh.node_writer(points, point_data)
nf, els_array = msh.ele_writer(cells, cell_data, "triangle", 100, 3, 0, 0)
np.savetxt("files/Beles.txt", els_array, fmt="%d")
np.savetxt("files/Bnodes.txt", nodes_array, fmt=("%d", "%.4f", "%.4f", "%d", "%d"))
return
def principal_stress_field(x, y, q , c):
I = 2/3*c**3
# Stress field
S_xx = -(q/2/I)*(y*x**2-2/3*y**3.)
S_yy = -(q/2/I)*(1/3*y**3-y*c**2+2/3*c**3)
T_xy = -(q/2/I)*(1/3*y**3-y*c**2+2/3*c**3)*x
#Principal stresses
sig_c = (S_xx+S_yy)/2
Rsq = ((S_xx-S_yy)/2)**2 + T_xy**2
R = np.sqrt(Rsq)
sig_p = sig_c + R
sig_m = sig_c - R
return sig_p , sig_m , R
# #### Main code
#
# * Reads the model.
#
# * Extracts nodal coordinates
#
# * Evaluates the solution and plots.ion of the solution array **SOL[]**.
mesh = meshio.read("files/beam.msh")
script_mesh(mesh)
nodes = np.loadtxt('files/'+'Bnodes.txt')
elements = np.loadtxt('files/'+'Beles.txt')
nn =len(nodes[:,0])
coords=np.zeros([nn,2])
coords[:,0]=nodes[:,1]
coords[:,1]=nodes[:,2]
SOL_p = np.zeros([nn])
SOL_m = np.zeros([nn])
SOL_r = np.zeros([nn])
q = 1.0
c = 1.0
for i in range(0,nn):
x = coords[i,0]
y = coords[i,1]
Sig_p , Sig_m , r = principal_stress_field(x, y, q , c)
SOL_p[i] = Sig_p
SOL_m[i] = Sig_m
SOL_r[i] = r
plot_SFIELD(SOL_p , nodes , elements, 0 , plt_type ="contourf", levels = 12 )
plot_SFIELD(SOL_m , nodes , elements, 1 , plt_type ="contourf", levels = 12 )
# ### References
#
# * <NAME>, <NAME> (2018). SolidsPy: 2D-Finite Element Analysis with Python, <https://github.com/AppliedMechanics-EAFIT/SolidsPy>.
#
# * <NAME>., and <NAME>. (1976). Theory of Elasticity. International Student Edition. McGraw-Hill International.
plot_SFIELD(SOL_r , nodes , elements, 2 , plt_type ="contourf", levels = 12 )
from IPython.core.display import HTML
def css_styling():
styles = open('./nb_style.css', 'r').read()
return HTML(styles)
css_styling()
| solutions/SOL_04_lagrange_full.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('base')
# language: python
# name: python3
# ---
var1 = 10
type(var1)
print(var1)
10+15
a = 10
b = 20
type(a+b)
a * b
b / a
c = b / a
type(int(c))
11//5
11%5
7**3
# +
# hey there! ;)
# -
pow(5,2)
abs(-5)
10.5*2
import math
math.sqrt(64)
str1 = "<NAME>"
str1
str1 = """
Barev
gegheckuhi
"""
print(str1)
str1.upper()
str1.lower()
str1.replace("Barev", "Privet")
str1.find('gegheckuhi')
# +
w = 'econometrics'
len(w)
x = 'erik'
len(x)
# -
w[1]
x[0]
x[1]
x[-2]
x[-1]
x[1:3]
w[4:8:4]
print(w)
w[4:8]
w[3:7]
w[3:7:2]
w[3:7:4]
z = 7
z > 10
z == 7
z == 6
z >= 7
print (round(6.78))
print (round(7.24649, 2))
print (isinstance(5, int)) #int - целое число
print (isinstance(7.5, int))
print (isinstance(2.2, float)) #float - нецелое число
print (isinstance(1.7, (int,float,str))) #str - текстовая запись
pow(2,4)
help(pow)
print(pow(3,3,2)) #???
print('jan')
print("hello")
print("i don't")
print('i don\'t' )
'He' + "l"*2 + 'o'
'hey <NAME> ' * 3
| lesson1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: python3.5
# ---
# # Kepler Data
# ## The Data
# SIMBAD info: http://simbad.u-strasbg.fr/simbad/sim-id?Ident=KIC7198959
#
# Lightcurve data from: https://archive.stsci.edu/kepler/publiclightcurves.html
# +
# # !curl -O https://archive.stsci.edu/pub/kepler/lightcurves/0071/007198959/kplr007198959-2009259160929_llc.fits
# -
from astropy.io import fits
hdulist = fits.open('kplr007198959-2009259160929_llc.fits')
hdulist.info()
hdulist[1].header
from astropy.table import Table
data = Table(hdulist[1].data)
data
df = data.to_pandas()[['TIME', 'SAP_FLUX', 'SAP_FLUX_ERR']]
df.shape
df = df.dropna()
df.shape
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from astropy.timeseries import LombScargle
plt.style.use('seaborn-whitegrid')
# -
# ## Data and Window
from astropy.timeseries import LombScargle
ls = LombScargle(df['TIME'], 1, center_data=False, fit_mean=False)
freqW, powerW = ls.autopower(minimum_frequency=0,
maximum_frequency=200)
# Find the maximum near 2 hours
f, p = ls.autopower(minimum_frequency=1.95*24,
maximum_frequency=2.05*24,
samples_per_peak=100)
f_ny = f[np.argmax(p)]
t_sorted = np.sort(df['TIME'])
p_ny = 24 * 60 * 60 / f_ny
delta_t = (t_sorted[1:] - t_sorted[:-1]) * 24 * 60 * 60
# +
ls = LombScargle(df['TIME'], df['SAP_FLUX'], df['SAP_FLUX_ERR'])
freq, power = ls.autopower(minimum_frequency=0,
maximum_frequency=200)
fmax = freq[np.argmax(power)] / 24
# +
fig, ax = plt.subplots(2, 2, figsize=(12, 5))
fig.suptitle('Kepler object ID 007198959', size=14)
fig.subplots_adjust(hspace=0.35, wspace=0.15, left=0.07, right=0.97)
# upper left
ax[0, 0].plot(df['TIME'], df['SAP_FLUX'] / 1E6, 'ok', markersize=2, rasterized=True)
ax[0, 0].set(ylabel='SAP flux ($10^6 e^-/s$)',
title='Observed light curve',
xlim=(168, 260))
# bottom left
left, bottom, width, height = ax[1, 0].get_position().bounds
ax[1, 0].set_position([left, bottom + 0.15, width, height-0.15])
inset = fig.add_axes([left, bottom, width, 0.1])
ax[1, 0].plot(t_sorted[:-1], delta_t / 60, 'ok', markersize=2, rasterized=True)
ax[1, 0].axhline(p_ny / 60, color='gray', linestyle='--')
ax[1, 0].set(xlim=ax[0, 0].get_xlim(),
ylim=(10, 10000),
yscale='log',
ylabel='$\Delta t$ (min)',
title='Time between observations')
ax[1, 0].xaxis.set_major_formatter(plt.NullFormatter())
inset.plot(t_sorted[:-1], 1000 * (delta_t - p_ny), 'ok', markersize=2, rasterized=True)
inset.axhline(0, color='gray', linestyle='--')
inset.set(xlim=ax[0, 0].get_xlim(),
ylim=(-100, 100),
xlabel='Observation time (days)',
ylabel='$\Delta t - p_{W}$ (ms)')
inset.yaxis.set_major_locator(plt.MaxNLocator(3));
# Upper right
ax[0, 1].plot(freqW / 24, powerW, '-k', rasterized=True);
ax[0, 1].set(xlim=(0, 6.5),
ylim=(-0.1, 1.1),
ylabel='Lomb-Scargle power',
title='Window Power Spectrum');
ax[0, 1].annotate('', (0, 0.6), (f_ny / 24, 0.6),
arrowprops=dict(arrowstyle='<->', color='gray'));
ax[0, 1].text(f_ny / 48, 0.6, r'$({0:.1f}\ {{\rm minutes}})^{{-1}}$'.format(24 * 60 / f_ny),
size=12, ha='center', va='bottom');
# Lower right
ax[1, 1].plot(freq / 24, power, '-k', rasterized=True)
ax[1, 1].fill_between([0.5 * f_ny / 24, 1.5 * f_ny / 24], -0.05, 1,
color='gray', alpha=0.3)
ax[1, 1].text(2.25, 0.95, r"(Aliased Region)", size=14, color='gray', ha='right', va='top')
ax[1, 1].text(fmax, 0.85, r"$f_{{max}}=({0:.2f}\ {{\rm hours}})^{{-1}}$".format(1 / fmax),
size=12)
ax[1, 1].set(xlim=(0, 2.3),
ylim=(-0.05, 1.0),
xlabel='frequency (hours$^{-1}$)',
ylabel='Lomb-Scargle power',
title='Light Curve Power Spectrum');
fig.savefig('fig16_kepler_data.pdf')
# -
# ## Size of required grid
# +
n_o = 5
T = df['TIME'].max() - df['TIME'].min()
delta_f = 1. / n_o / T
print("f_ny =", f_ny)
print("T =", T)
print("n_grid =", f_ny / delta_f)
| figures/Kepler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''score-diff'': conda)'
# name: python3810jvsc74a57bd00dd71e44a40e3bff9e2fa73414ec8f9a1ed4bbd004313799300ef7fd50845337
# ---
# +
import lib.NotationLinear as nlin
import lib.score_comparison_lib as scl
import lib.score_visualization as sv
import importlib
import music21 as m21
from pathlib import Path
# -
score1_path = Path("tests/test_scores/chord_score_3a.mei")
score1 = m21.converter.parse(str(score1_path))
score2_path = Path("tests/test_scores/chord_score_3b.mei")
score2 = m21.converter.parse(str(score2_path))
# build ScoreTrees
score_lin1 = nlin.Score(score1)
score_lin2 = nlin.Score(score2)
score1.show()
score2.show()
op_list, cost=scl.complete_scorelin_diff(score_lin1,score_lin2)
sv.annotate_differences(score1,score2, op_list)
score1.show()
score2.show()
sv.show_differences(score1,score2)
| experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="YpGFMiKfiTuk" colab_type="text"
# # PyTorch Metric Learning
# See the documentation [here](https://kevinmusgrave.github.io/pytorch-metric-learning/)
# + [markdown] id="Ix6axXiRiaal" colab_type="text"
# ## Install the packages
# + id="yhkMjfZFVyxM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 208} outputId="d428362b-61fe-4ef4-a319-6db89138c161"
# !pip install pytorch-metric-learning
# !git clone https://github.com/akamaster/pytorch_resnet_cifar10
# + [markdown] id="arrmHAWJiffp" colab_type="text"
# ## Import the packages
# + id="u-TsIJjkVmwt" colab_type="code" colab={}
# %matplotlib inline
from pytorch_resnet_cifar10 import resnet # pretrained models from https://github.com/akamaster/pytorch_resnet_cifar10
from pytorch_metric_learning.utils.inference import MatchFinder, InferenceModel
from pytorch_metric_learning.utils import common_functions as c_f
from torchvision import datasets, transforms
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import matplotlib.pyplot as plt
import numpy as np
# + [markdown] id="TAqGqYrnihQc" colab_type="text"
# ## Create helper functions
# + id="WdUzoef_WtCa" colab_type="code" colab={}
def print_decision(is_match):
if is_match:
print("Same class")
else:
print("Different class")
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
inv_normalize = transforms.Normalize(
mean= [-m/s for m, s in zip(mean, std)],
std= [1/s for s in std]
)
def imshow(img, figsize=(8, 4)):
img = inv_normalize(img)
npimg = img.numpy()
plt.figure(figsize = figsize)
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# + [markdown] id="sZl6T8MjiuOD" colab_type="text"
# ## Create the dataset and load the trained model
# + id="3rDNNVjjito4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="c4ad272e-764e-45b3-f198-8abb8cf1bfd6"
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
dataset = datasets.CIFAR10(root="CIFAR10_Dataset", train=False, transform=transform, download=True)
labels_to_indices = c_f.get_labels_to_indices(dataset.targets)
model = torch.nn.DataParallel(resnet.resnet20())
checkpoint = torch.load("pytorch_resnet_cifar10/pretrained_models/resnet20-12fca82f.th")
model.load_state_dict(checkpoint['state_dict'])
model.linear = c_f.Identity()
model.to(torch.device("cuda"))
print("done model loading")
# + [markdown] id="b176nhrwimmf" colab_type="text"
# ## Create the InferenceModel wrapper
# + id="0m_bCb8-h-WV" colab_type="code" colab={}
match_finder = MatchFinder(mode="sim", threshold=0.7)
inference_model = InferenceModel(model, match_finder=match_finder)
# cars and frogs
classA, classB = labels_to_indices[1], labels_to_indices[6]
# + [markdown] id="jjZLaY5qi1uv" colab_type="text"
# ## Compare two images of the same class
# + id="R42W1mwlXjBt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="3b8d2ced-ef6d-4cc6-bcb6-ec423b6a439f"
# compare two images of the same class
(x, _), (y, _) = dataset[classA[0]], dataset[classA[1]]
imshow(torchvision.utils.make_grid(torch.stack([x,y], dim=0)))
decision = inference_model.is_match(x.unsqueeze(0), y.unsqueeze(0))
print_decision(decision)
# + [markdown] id="ikC8rGD9i4lG" colab_type="text"
# ## Compare two images of different classes
# + id="0UIj-AnSZVq8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="c3741465-1f42-474b-f952-f409dbec192f"
# compare two images of a different class
(x, _), (y, _) = dataset[classA[0]], dataset[classB[0]]
imshow(torchvision.utils.make_grid(torch.stack([x,y], dim=0)))
decision = inference_model.is_match(x.unsqueeze(0), y.unsqueeze(0))
print_decision(decision)
# + [markdown] id="AiHZXR6ki77J" colab_type="text"
# ## Compare multiple pairs of images
# + id="tFXrTUigcVJn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 578} outputId="dac0aad2-d91c-4379-a32b-e4fa5e231113"
# compare multiple pairs of images
x = torch.zeros(20, 3, 32, 32)
y = torch.zeros(20, 3, 32, 32)
for i in range(0, 20, 2):
x[i] = dataset[classA[i]][0]
x[i+1] = dataset[classB[i]][0]
y[i] = dataset[classA[i+20]][0]
y[i+1] = dataset[classB[i+20]][0]
imshow(torchvision.utils.make_grid(torch.cat((x,y), dim=0), nrow=20), figsize=(30, 3))
decision = inference_model.is_match(x, y)
for d in decision:
print_decision(d)
print("accuracy = {}".format(np.sum(decision)/len(x)))
# + [markdown] id="VvqLS2cci9vT" colab_type="text"
# ## Compare all pairs within a batch
# + id="xd1MnZDgdJEt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="8d260a19-901a-4c66-db44-c4a60624d0f9"
# compare all pairs within a batch
match_matrix = inference_model.get_matches(x)
assert match_matrix[0,0] # the 0th image should match with itself
imshow(torchvision.utils.make_grid(torch.stack((x[3],x[4]), dim=0)))
print_decision(match_matrix[3,4]) # does the 3rd image match the 4th image?
# + [markdown] id="w0Uvn5DCjBLy" colab_type="text"
# ## Compare all pairs between queries and references
# + id="mxHIlREegSYw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="03123259-aede-4a52-a3cb-e1fca0064813"
# compare all pairs between queries and references
match_matrix = inference_model.get_matches(x, y)
imshow(torchvision.utils.make_grid(torch.stack((x[6],y[6]), dim=0)))
print_decision(match_matrix[6, 6]) # does the 6th query match the 6th reference?
# + [markdown] id="djkNbKsrjEC5" colab_type="text"
# # Get results in tuple form
# + id="9VvFa4kKhHM6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e9ec9243-e959-4559-ac5e-623eeb1e39ac"
# make a new model with high threshold
match_finder = MatchFinder(mode="sim", threshold=0.95)
inference_model = InferenceModel(model, match_finder=match_finder)
# get all matches in tuple form
match_tuples = inference_model.get_matches(x, y, return_tuples=True)
print("MATCHING IMAGE PAIRS")
for i,j in match_tuples:
print(i,j)
imshow(torchvision.utils.make_grid(torch.stack((x[i],y[j]), dim=0)))
| examples/notebooks/Inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example Notebook
# +
import math
import unittest
# import gtsam
import gtsam
from gtsam.utils.test_case import GtsamTestCase
# import local package
import example.module as mod
# +
class TestGTSamFunctionality(GtsamTestCase):
"""testing GTSAM functionality."""
def test_range(self):
"""Test range method."""
l2 = gtsam.Point3(1, 1, 0)
x1 = gtsam.Pose3()
# establish range is indeed sqrt2
self.assertEqual(math.sqrt(2.0), x1.range(point=l2))
class TestLocalModule(GtsamTestCase):
"""testing GTSAM functionality through local module."""
def test_meaning_of_everything(self):
"""Test simple function in module."""
self.assertEqual(mod.meaning_of_everything(), 42)
def test_create_special_2d_pose(self):
"""Test GTSAM function in module."""
actual = mod.create_special_2d_pose()
self.assertIsInstance(actual, gtsam.Pose2)
self.gtsamAssertEquals(actual, gtsam.Pose2(1,2,3), 1e-7)
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
# -
| Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Making multipanel plots with matplotlib
#
# #### First, we import numpy and matplotlib as usual
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Then we define an array of angles, and thier sines and cosines using numpy. This time we will use linspace.
# +
x = np.linspace(0,2*np.pi,100)
print(x[-1],2*np.pi)
y = np.sin(x)
z = np.cos(x)
w = np.sin(4*x)
v= np.cos(4*x)
# +
f, axarr=plt.subplots(1,2) #f is fig, axarr: axis. subplot(1 roll, 2 columns)
axarr[0].plot(x,y)
axarr[0].set_xlabel('x')
axarr[0].set_ylabel('sin(x)')
axarr[0].set_title(r'$/sin(x)$')
axarr[1].plot(x,z)
axarr[1].set_xlabel('x')
axarr[1].set_ylabel('cos(x)')
axarr[1].set_title(r'$cos(x)$')
f.subplots_adjust(wspace=0.7) #hspace(): space between height
axarr[0].set_aspect('equal') #make the ratio of the tick units equal
axarr[1].set_aspect(np.pi) #make a square by setting the aspect to be the ratio of the trick unit range
# +
fig = plt.figure(figsize=(8,8))
plt.plot(x,y, label=r'$y=\sin(x)$')
plt.plot(x,z, label=r'$y=\cos(x)$')
plt.plot(x,w, label=r'$y=\sin(4x)$')
plt.plot(x,v, label=r'$y=\cos(4x)$')
plt.xlabel(r'$x$')
plt.ylabel(r'$y(x)$')
plt.xlim(0,2*np.pi)
plt.ylim(-1.2,1.2)
plt.legend(loc=1,framealpha=.5)
plt.gca().set_aspect(np.pi/1.2)
# -
| test_multipanel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Selección óptima de portafolios II
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Separation_theorem_of_MPT.svg/2000px-Separation_theorem_of_MPT.svg.png" width="400px" height="400px" />
#
# Entonces, tenemos que:
# - La LAC describe las posibles selecciones de riesgo-rendimiento entre un activo libre de riesgo y un activo riesgoso.
# - Su pendiente es igual al radio de Sharpe del activo riesgoso.
# - La asignación óptima de capital para cualquier inversionista es el punto tangente de la curva de indiferencia del inversionista con la LAC.
#
# Para todo lo anterior, supusimos que ya teníamos el portafolio óptimo (activo riesgoso).
#
# En la clase pasada aprendimos a hallar este portafolio óptimo si el conjunto de activos riesgosos estaba conformado únicamente por dos activos:
#
# $$w_{1,EMV}=\frac{(E[r_1]-r_f)\sigma_2^2-(E[r_2]-r_f)\sigma_{12}}{(E[r_2]-r_f)\sigma_1^2+(E[r_1]-r_f)\sigma_2^2-((E[r_1]-r_f)+(E[r_2]-r_f))\sigma_{12}}.$$
#
# - Sin embargo, la complejidad del problema crece considerablemente con el número de variables, y la solución analítica deja de ser viable cuando mencionamos que un portafolio bien diversificado consta aproximadamente de 50-60 activos.
# - En esos casos, este problema se soluciona con rutinas numéricas que hagan la optimización por nosotros, porque son una solución viable y escalable a más variables.
#
#
# **Objetivos:**
# - ¿Cuál es el portafolio óptimo de activos riesgosos cuando tenemos más de dos activos?
# - ¿Cómo construir la frontera de mínima varianza cuando tenemos más de dos activos?
#
# *Referencia:*
# - Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.
# ___
# ## 1. Maximizando el radio de Sharpe
#
# ### ¿Qué pasa si tenemos más de dos activos riesgosos?
#
# En realidad es algo muy similar a lo que teníamos con dos activos.
# - Para dos activos, construir la frontera de mínima varianza es trivial: todas las posibles combinaciones.
# - Con más de dos activos, recordar la definición: la frontera de mínima varianza es el lugar geométrico de los portafolios que proveen el mínimo riesgo para un nivel de rendimiento dado.
#
# <font color=blue> Ver en el tablero.</font>
# Analíticamente:
# - $n$ activos,
# - caracterizados por $(\sigma_i,E[r_i])$,
# - cada uno con peso $w_i$, con $i=1,2,\dots,n$.
#
# Entonces, buscamos los pesos tales que
#
# \begin{align}
# \min_{w_1,\dots,w_n} & \quad \sum_{i=1}^{n}w_i^2\sigma_i^2+\sum_{i=1}^{n}\sum_{j=1,j\neq i}^{n}w_iw_j\sigma_{ij}\\
# \text{s.a.} & \quad \sum_{i=1}^{n}w_i=1, w_i\geq0\\
# & \quad \sum_{i=1}^{n}w_iE[r_i]=\bar{\mu},
# \end{align}
#
# donde $\bar{\mu}$ corresponde a un nivel de rendimiento objetivo.
#
# **Obviamente, tendríamos que resolver este problema para muchos niveles de rendimiento objetivo.**
#
# - <font color=blue> Explicar relación con gráfica.</font>
#
# - <font color=green> Recordar clase 10.</font>
# Lo anterior se puede escribir vectorialmente como:
# \begin{align}
# \min_{\boldsymbol{w}} & \quad \boldsymbol{w}^T\Sigma\boldsymbol{w}\\
# \text{s.a.} & \quad \boldsymbol{1}^T\boldsymbol{w}=1, \boldsymbol{w}\geq0\\
# & \quad E[\boldsymbol{r}^T]\boldsymbol{w}=\bar{\mu},
# \end{align}
#
# donde:
# - $\boldsymbol{w}=\left[w_1,\dots,w_n\right]^T$ es el vector de pesos,
# - $\boldsymbol{1}=\left[1,\dots,1\right]^T$ es un vector de unos,
# - $E[\boldsymbol{r}]=\left[E[r_1],\dots,E[r_n]\right]^T$ es el vector de rendimientos esperados, y
# - $\Sigma=\left[\begin{array}{cccc}\sigma_{1}^2 & \sigma_{12} & \dots & \sigma_{1n} \\
# \sigma_{21} & \sigma_{2}^2 & \dots & \sigma_{2n} \\
# \vdots & \vdots & \ddots & \vdots \\
# \sigma_{n1} & \sigma_{n2} & \dots & \sigma_{n}^2\end{array}\right]$ es la matriz de varianza-covarianza.
#
# **Esta última forma es la que comúnmente usamos al programar, por ser eficiente y escalable a problemas de N variables.**
# ### Entonces, ¿para cuántos niveles de rendimiento objetivo tendríamos que resolver el anterior problema con el fin de graficar la frontera de mínima varianza?
# - Observar que el problema puede volverse muy pesado a medida que incrementamos el número de activos en nuestro portafolio...
# - Una tarea bastante compleja.
# ### Sucede que, en realidad, sólo necesitamos conocer dos portafolios que estén sobre la *frontera de mínima varianza*.
# - Si logramos encontrar dos portafolios sobre la frontera, entonces podemos a la vez encontrar todas las posibles combinaciones de estos dos portafolios para trazar la frontera de mínima varianza.
# - Ver el caso de dos activos.
# ### ¿Qué portafolios usar?
# Hasta ahora, hemos estudiando profundamente como hallar dos portafolios muy importantes que de hecho yacen sobre la frontera de mínima varianza:
# 1. Portafolio de EMV: máximo SR.
# 2. Portafolio de mínima varianza: básicamente, el mismo problema anterior, sin la restricción de rendimiento objetivo.
#
# Luego, tomar todas las posibles combinaciones de dichos portafolios usando las fórmulas para dos activos de medias y varianzas:
# - w: peso para el portafolio EMV,
# - 1-w: peso para le portafolio de mínima varianza.
# ## 2. Ejemplo ilustrativo.
#
# Retomamos el ejemplo de mercados de acciones en los países integrantes del $G5$: EU, RU, Francia, Alemania y Japón.
# Importamos pandas y numpy
import pandas as pd
import numpy as np
# +
# Resumen en base anual de rendimientos esperados y volatilidades
annual_ret_summ = pd.DataFrame(columns=['EU', 'RU', 'Francia', 'Alemania', 'Japon'], index=['Media', 'Volatilidad'])
annual_ret_summ.loc['Media'] = np.array([0.1355, 0.1589, 0.1519, 0.1435, 0.1497])
annual_ret_summ.loc['Volatilidad'] = np.array([0.1535, 0.2430, 0.2324, 0.2038, 0.2298])
annual_ret_summ.round(4)
# -
# Matriz de correlación
corr = pd.DataFrame(data= np.array([[1.0000, 0.5003, 0.4398, 0.3681, 0.2663],
[0.5003, 1.0000, 0.5420, 0.4265, 0.3581],
[0.4398, 0.5420, 1.0000, 0.6032, 0.3923],
[0.3681, 0.4265, 0.6032, 1.0000, 0.3663],
[0.2663, 0.3581, 0.3923, 0.3663, 1.0000]]),
columns=annual_ret_summ.columns, index=annual_ret_summ.columns)
corr.round(4)
# Tasa libre de riesgo
rf = 0.05
# Esta vez, supondremos que tenemos disponibles todos los mercados de acciones y el activo libre de riesgo.
# #### 1. Construir la frontera de mínima varianza
# ##### 1.1. Encontrar portafolio de mínima varianza
# Importamos funcion minimize del modulo optimize de scipy
# +
## Construcción de parámetros
# 1. Sigma: matriz de varianza-covarianza Sigma = S.dot(corr).dot(S)
# 2. Eind: rendimientos esperados activos individuales
# -
# Función objetivo
# +
# Número de activos
# Dato inicial
# Cotas de las variables
# Restricciones
# -
# Portafolio de mínima varianza
# Pesos, rendimiento y riesgo del portafolio de mínima varianza
# ##### 1.2. Encontrar portafolio EMV
# Función objetivo
# +
# Número de activos
# Dato inicial
# Cotas de las variables
# Restricciones
# -
# Portafolio EMV
# Pesos, rendimiento y riesgo del portafolio EMV
# ##### 1.3. Construir frontera de mínima varianza
# También debemos encontrar la covarianza (o correlación) entre estos dos portafolios:
# Covarianza entre los portafolios
# Correlación entre los portafolios
# Vector de w
# DataFrame de portafolios:
# 1. Índice: i
# 2. Columnas 1-2: w, 1-w
# 3. Columnas 3-4: E[r], sigma
# 4. Columna 5: Sharpe ratio
# Importar librerías de gráficos
# +
# Gráfica de dispersión de puntos coloreando
# de acuerdo a SR, los activos individuales
# y los portafolios hallados
# Frontera
# Activos ind
# Port. óptimos
# Etiquetas de los ejes
# Leyenda
# -
# **A partir de lo anterior, solo restaría construir la LAC y elegir la distribución de capital de acuerdo a las preferencias (aversión al riesgo).**
# ___
# ## 3. Comentarios finales
#
# ### 3.1. Restricciones adicionales
#
# Los inversionistas pueden tener restricciones adicionales:
# 1. Restricciones en posiciones cortas.
# 2. Pueden requerir un rendimiento mínimo.
# 3. Inversión socialmente responsable: prescinden de inversiones en negocios o paises considerados éticamente o políticamente indeseables.
#
# Todo lo anterior se puede incluir como restricciones en el problema de optimización, y puede ser llevado a cabo a costa de un cociente de Sharpe menor.
# ### 3.2. Críticas a la optimización media varianza
# 1. Solo importan medias y varianzas: recordar que la varianza subestima el riesgo en algunos casos.
# 2. Preferencias media-varianza tratan las ganancias y pérdidas simétricamente: el sentimiento de insatisfacción de una perdida es mayor al sentimiento de satisfacción de una ganancia (aversión a pérdidas).
# 3. La aversión al riesgo es constante: la actitud frente al riesgo puede cambiar, por ejemplo con el estado de la economía.
# 4. Horizonte corto (un periodo).
# 5. Basura entra - basura sale: la optimización media varianza es supremamente sensible a las entradas: estimaciones de rendimientos esperados y varianzas.
# ___
# # Anuncios parroquiales
#
# ## 1. Quiz la próxima clase (clases 12, 13, y 14).
# ## 2. Revisar archivo Tarea 6.
# ## 3. [Nota interesante](http://yetanothermathprogrammingconsultant.blogspot.com/2016/08/portfolio-optimization-maximize-sharpe.html)
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
| Modulo3/Clase14_SeleccionOptimaPortII.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import h5py
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
np.random.seed(1)
# -
def zero_pad(X, pad):
X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode = 'constant', constant_values = (0, 0))
return X_pad
# +
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =\n", x.shape)
print ("x_pad.shape =\n", x_pad.shape)
print ("x[1,1] =\n", x[1,1])
print ("x_pad[1,1] =\n", x_pad[1,1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
# -
def conv_single_step(a_slice_prev, W, b):
s = np.multiply(a_slice_prev, W)
Z = np.sum(s)
Z = Z + float(b)
return Z
def conv_forward(A_prev, W, b, hparameters):
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
(f, f, n_C_prev, n_C) = W.shape
stride = hparameters['stride']
pad = hparameters['pad']
n_H = int((n_H_prev + 2 * pad - f) / stride) + 1
n_W = int((n_W_prev + 2 * pad - f) / stride) + 1
Z = np.zeros((m, n_H, n_W, n_C))
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m):
a_prev_pad = A_prev_pad[i, :, :, :]
for h in range(n_H):
vert_start = h * stride
vert_end = vert_start + f
for w in range(n_W):
horiz_start = w * stride
horiz_end = horiz_start + f
for c in range(n_C):
a_slice_prev = a_prev_pad[vert_start : vert_end, horiz_start : horiz_end, :]
weights = W[:, :, :, c]
biases = b[:, :, :, c]
Z[i, h, w, c] = conv_single_step(a_slice_prev, weights, biases)
cache = (A_prev, W, b, hparameters)
return Z, cache
def pool_forward(A_prev, hparameters, mode = "max"):
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
f = hparameters["f"]
stride = hparameters["stride"]
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
A = np.zeros((m, n_H, n_W, n_C))
for i in range(m):
for h in range(n_H):
vert_start = h * stride
vert_end = vert_start + f
for w in range(n_W):
horiz_start = w * stride
horiz_end = horiz_start + f
for c in range (n_C):
a_prev_slice = A_prev[i, vert_start : vert_end, horiz_start : horiz_end, c]
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.mean(a_prev_slice)
cache = (A_prev, hparameters)
return A, cache
def conv_backward(dZ, cache):
(A_prev, W, b, hparameters) = cache
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
(f, f, n_C_prev, n_C) = W.shape
stride = hparameters["stride"]
pad = hparameters["pad"]
(m, n_H, n_W, n_C) = dZ.shape
dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev))
dW = np.zeros((f, f, n_C_prev, n_C))
db = np.zeros((1, 1, 1, n_C))
A_prev_pad = zero_pad(A_prev, pad)
dA_prev_pad = zero_pad(dA_prev, pad)
for i in range(m):
a_prev_pad = A_prev_pad[i]
da_prev_pad = dA_prev_pad[i]
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
db[:,:,:,c] += dZ[i, h, w, c]
dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :]
return dA_prev, dW, db
def create_mask_from_window(x):
mask = x == np.max(x)
return mask
def distribute_value(dz, shape):
(n_H, n_W) = shape
average = dz / (n_H * n_W)
a = np.ones(shape) * average
return a
def pool_backward(dA, cache, mode = "max"):
(A_prev, hparameters) = cache
stride = hparameters['stride']
f = hparameters['f']
m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
m, n_H, n_W, n_C = dA.shape
dA_prev = np.zeros_like(A_prev)
for i in range(m):
a_prev = A_prev[i]
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
if mode == "max":
a_prev_slice = a_prev[vert_start: vert_end, horiz_start: horiz_end, c]
mask = create_mask_from_window(a_prev_slice)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += mask * dA[i, h, w, c]
elif mode == "average":
da = dA[i, h, w, c]
shape = (f, f)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += distribute_value(da, shape)
return dA_prev
# +
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
| Basic ConvNet/Basic ConvNet Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Import reuired modules
# +
import sys
sys.path.append('../')
import matplotlib.pyplot as plt
from porousmedialab.batch import Batch
import numpy as np
import seaborn as sns
# %matplotlib inline
# -
# ### Initiate the model
bl = Batch(tend = 1, dt = 0.01)
# ### Create your solution
# +
# Acids
bl.add_species(name='H2CO3', init_conc=0)
bl.add_species(name='HCO3', init_conc=0)
bl.add_species(name='CO3', init_conc=0)
bl.add_acid(species=['H2CO3', 'HCO3', 'CO3'], pKa=[3.6, 10.32])
bl.add_species(name='H2SO4', init_conc=0)
bl.add_species(name='HSO4', init_conc=0)
bl.add_species(name='SO4', init_conc=0)
bl.add_acid(species=['H2SO4', 'HSO4', 'SO4'], pKa=[-10, 1.99])
# Minerals
bl.add_species(name='CaCO3', init_conc=0)
bl.add_species(name='CaSO4', init_conc=0)
# dissolved Ca
bl.add_species(name='Ca', init_conc=0)
bl.add_ion(name='Ca', charge=2)
# -
# ### Specify rate constants
bl.constants['Ks_CaCO3'] = 3.3e-9
bl.constants['Ks_CaSO4'] = 10**-4.58
bl.constants['k_pre'] = 1e-4
bl.constants['k_dis'] = 1e-3
# ### Specify rates
bl.rates['R_pre_CaCO3'] = 'k_pre * (Ca*CO3/Ks_CaCO3-1)'
bl.rates['R_pre_CaSO4'] = 'k_pre * (Ca*SO4/Ks_CaSO4-1)'
bl.rates['R_dis_CaCO3'] = 'k_dis * CaCO3 * (1 - Ca*CO3/Ks_CaCO3)'
bl.rates['R_dis_CaSO4'] = 'k_dis * CaSO4 * (1 - Ca*SO4/Ks_CaSO4)'
# ### ODE
bl.dcdt['CaCO3'] = 'R_pre_CaCO3 - R_dis_CaCO3'
bl.dcdt['Ca'] = '-R_pre_CaCO3 + R_dis_CaCO3 - R_pre_CaSO4 + R_dis_CaSO4+1.4e-2'
bl.dcdt['CO3'] = '-R_pre_CaCO3 + R_dis_CaCO3+0.7e-2'
bl.dcdt['CaSO4'] = 'R_pre_CaSO4 - R_dis_CaSO4'
bl.dcdt['SO4'] = '-R_pre_CaSO4 + R_dis_CaSO4+0.7e-2'
bl.solve()
bl.plot_profiles()
ax = plt.subplot(111)
ax.plot(bl.time, np.log10(bl.SO4['concentration'][0]*bl.Ca['concentration'][0]/bl.constants['Ks_CaSO4']), label='CaSO4', lw=3)
ax.plot(bl.time, np.log10(bl.CO3['concentration'][0]*bl.Ca['concentration'][0]/bl.constants['Ks_CaCO3']), label='CaCO3', lw=3)
ax.axhline(0, c='k')
ax.set_ylabel('Saturation index')
ax.set_xlabel('Time')
ax.grid(lw=0.2)
ax.legend(frameon=1)
# +
# bl.reconstruct_rates()
# bl.plot_rates()
# -
| examples/Batch - Calcite-Gypsum Evaporation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.environ['SPARK_HOME'] = '/home/envmodules/lib/spark-2.2.0-bin-hadoop2.7/'
import findspark
findspark.init()
from pyspark.ml.classification import RandomForestClassifier, LogisticRegression
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import VectorAssembler
from pyspark.mllib.evaluation import MulticlassMetrics
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit
from pyspark.sql import SparkSession
from pyspark.sql.functions import isnan, when, count, col
import pandas as pd
spark = SparkSession.builder.appName('733').getOrCreate()
# Using the integrated file to start working on
integrated_df = spark.read.parquet('/user/vcs/annual_integrated_dataset_with_labels_ibes_fix_v2.parquet').cache()
def find_performance_metrics(res, model_used):
res = res.withColumn('correct', res.label == res.prediction)
num_rows = res.count()
accuracy = res.filter(res.label == res.prediction).count() / res.count()
# positive class (misstatements)
true_positives_df = res.filter(res.prediction == 1.0).filter(res.label == 1.0)
ground_truth_positives_df = res.filter(res.label == 1.0)
misstatement_recall = true_positives_df.count() / ground_truth_positives_df.count()
new_all_predicted_positive_df = res.filter(res.prediction == 1.0)
misstatement_precision = true_positives_df.count() / new_all_predicted_positive_df.count()
# negative class (non misstatements)
true_negative_df = res.filter(res.prediction == 0.0).filter(res.label == 0.0)
ground_truth_negative_df = res.filter(res.label == 0.0)
non_misstatement_recall = true_negative_df.count() / ground_truth_negative_df.count()
new_all_predicted_negative_df = res.filter(res.prediction == 0.0)
non_misstatement_precision = true_negative_df.count() / new_all_predicted_negative_df.count()
d = {'model_used': model_used, 'accuracy': accuracy, \
'misstatement_precision': misstatement_precision, \
'misstatement_recall': misstatement_recall}
df = pd.DataFrame(data=d, index=[0])
file_name = "performance_metrics" + "".join(model_used.split()) + ".csv"
df.to_csv(file_name, encoding='utf-8')
print("Using {}".format(model_used))
print('accuracy is {}'.format(accuracy))
print('misstatement_precision is {}, misstatement recall is {}'.format(misstatement_precision, misstatement_recall))
print('non_misstatement_precision is {}, non_misstatement recall is {}'.format(non_misstatement_precision,
non_misstatement_recall))
# Downsampling:
misstated_df = integrated_df.filter(integrated_df.label == 1.0)
misstated_count = misstated_df.count()
non_misstated_df = integrated_df.filter(integrated_df.label == 0.0).limit(misstated_count)
integrated_df = misstated_df.union(non_misstated_df).cache()
# Using nullcounts to filter columns to keep
nullcounts = integrated_df.select([count(when(isnan(c) | col(c).isNull(), c)).alias(c) for c in integrated_df.columns])
nc = list(nullcounts.first())
# Services-packaged software category selection (from EDA)
services_prepacked_software = integrated_df # .filter(integrated_df.sic == '7372')
print('Total records in integrated file: ', integrated_df.count())
print('Number of records in Services-packaged software industrial category: ', services_prepacked_software.count())
# Reusing preprocessing steps implemented by Vincent
# filling nulls and nones with zeroes.
some_dict = {}
for x in services_prepacked_software.columns:
some_dict[x] = 0
nwdf = services_prepacked_software.fillna(some_dict)
good_columns = []
for i in range(0, len(nc)):
if nc[i] == 0:
good_columns.append(i)
great_columns = [nwdf.columns[i] for i in good_columns]
great_columns.append('rea')
nwdf = nwdf.fillna(some_dict)
# dropping all string columns
non_string_columns = [k for (k, v) in nwdf.dtypes if v != 'string']
nwdf_no_strings = nwdf.select(*non_string_columns)
feature_columns = [item for item in nwdf_no_strings.columns if item not in ['rea', 'features', 'label', 'rea_label']]
assembler = VectorAssembler(inputCols=feature_columns, outputCol="features")
final_df = assembler.transform(nwdf_no_strings)
final_final_df = final_df.drop(*feature_columns).cache()
# String indexing not required
stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
si_model = stringIndexer.fit(final_final_df)
td = si_model.transform(final_final_df)
# Evaluators
evaluator = MulticlassClassificationEvaluator(metricName='accuracy')
eval = BinaryClassificationEvaluator()
# RandomForest classifier
rf = RandomForestClassifier(numTrees=100, maxDepth=16, labelCol="indexed", seed=42)
model = rf.fit(td)
result = model.transform(final_final_df)
print('Accuracy on training data: ', evaluator.evaluate(result))
# Train test split for model evaluation
train, test = final_final_df.randomSplit([0.7, 0.3], seed=12345)
train.cache()
test.cache()
# -
final_final_df.count()
# +
# ---------------
# Random Forest:
# ---------------
rf = RandomForestClassifier(numTrees=100, maxDepth=16, labelCol="label", seed=42)
print('Training RandomForest model on training set. \n Model parameters: {}'.format(rf._paramMap))
trained_model = rf.fit(train)
res = trained_model.transform(test)
metrics = MulticlassMetrics(res.select(['label', 'prediction']).rdd)
print('Accuracy on test set: ', evaluator.evaluate(res))
print('Area under ROC curve: ', eval.evaluate(res))
find_performance_metrics(res, "random forest")
# +
# ---------------
# Logistic regression:
# ---------------
print('Training LogisticRegression model on training set.')
logistic = LogisticRegression(regParam=0.1, labelCol="label") # , thresholds = [0.2, 0.5])
trained_model = logistic.fit(train)
res = trained_model.transform(test)
metrics = MulticlassMetrics(res.select(['label', 'prediction']).rdd)
print('Accuracy on test set: ', evaluator.evaluate(res))
print('Area under ROC curve: ', eval.evaluate(res))
find_performance_metrics(res, "logistic regression")
# Extract the summary from the returned LogisticRegressionModel instance trained
# in the earlier example
trainingSummary = trained_model.summary
# Obtain the objective per iteration
objectiveHistory = trainingSummary.objectiveHistory
print("objectiveHistory:")
for objective in objectiveHistory:
print(objective)
# Obtain the receiver-operating characteristic as a dataframe and areaUnderROC.
trainingSummary.roc.show()
print("areaUnderROC: " + str(trainingSummary.areaUnderROC))
# Set the model threshold to maximize F-Measure
fMeasure = trainingSummary.fMeasureByThreshold
maxFMeasure = fMeasure.groupBy().max('F-Measure').select('max(F-Measure)').head()
bestThreshold = fMeasure.where(fMeasure['F-Measure'] == maxFMeasure['max(F-Measure)']).select('threshold').head()[
'threshold']
logistic.setThreshold(bestThreshold)
print('best threshold is:' + str(bestThreshold))
print("For Logistic regression:")
trained_model = logistic.fit(train)
res = trained_model.transform(test)
metrics = MulticlassMetrics(res.select(['label', 'prediction']).rdd)
print('Accuracy on test set: ', evaluator.evaluate(res))
print('Area under ROC curve: ', eval.evaluate(res))
# find_performance_metrics(res, "logistic regression")
find_performance_metrics(res, "logistic regression with best threshold")
df = pd.DataFrame(
{'lr_coeff': trained_model.coefficients,
'feature_column': feature_columns,
})
df['abs_lr_coeff'] = df['lr_coeff'].abs()
df = df = df.sort_values('abs_lr_coeff', ascending=False).reset_index()
print(df.head())
# +
# ------------------------------------------------------------
# Code for making use of validation set for parameter tuning
train, test = final_final_df.randomSplit([0.9, 0.1], seed=12345)
lr = LogisticRegression()
# We use a ParamGridBuilder to construct a grid of parameters to search over.
# TrainValidationSplit will try all combinations of values and determine best model using
# the evaluator.
paramGrid = ParamGridBuilder()\
.addGrid(lr.regParam, [0.2, 0.15, 0.1, 0.01]) \
.addGrid(lr.threshold, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])\
.build()
# A TrainValidationSplit requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.
tvs = TrainValidationSplit(estimator=lr,
estimatorParamMaps=paramGrid,
evaluator=BinaryClassificationEvaluator(),
# 80% of the data will be used for training, 20% for validation.
trainRatio=0.8)
# Run TrainValidationSplit, and choose the best set of parameters.
model = tvs.fit(train)
# Make predictions on test data. model is the model with combination of parameters
# that performed best.
res = model.transform(test)
find_performance_metrics(res, 'logistic_with_validation')
# +
# ---------------------------------------------------------
# For RandomForest
rf = RandomForestClassifier()
paramGrid = ParamGridBuilder()\
.addGrid(rf.numTrees, [50, 100, 150, 200]) \
.addGrid(rf.maxDepth, [4, 8, 12, 16, 18, 20])\
.build()
tvs = TrainValidationSplit(estimator=rf,
estimatorParamMaps=paramGrid,
evaluator=BinaryClassificationEvaluator(),
# 80% of the data will be used for training, 20% for validation.
trainRatio=0.8)
model = tvs.fit(train)
res = model.transform(test)
find_performance_metrics(res, 'rf_with_validation')
# -
| machine_learning/rf_and_logistic_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
x=np.arange(0,10)
y=np.arange(11,21)
x
y
a=np.arange(40,50)
b=np.arange(50,60)
# ### Scatter plot
plt.scatter(x,y,c='g')
plt.xlabel('x axis')
plt.ylabel('y axis')
plt.title('Graph in 2D')
plt.savefig("fig.png")
y=x*x
plt.plot(x,y,'r*-',linestyle='dashed',linewidth=2,markersize=12)
# +
#creating subplot
plt.subplot(2,2,1)
plt.plot(x,y,'r--')
plt.subplot(2,2,2)
plt.plot(x,y,'g*-')
plt.subplot(2,2,3)
plt.plot(x,y,'bo')
plt.subplot(2,2,4)
plt.plot(x,y,'r')
# -
x=np.arange(1,11)
y=3*x+5
plt.plot(x,y)
np.pi
x=np.arange(0,4*np.pi,0.1)
y=np.sin(x)
plt.title("sine wave form")
plt.plot(x,y)
# +
x=np.arange(0,5*np.pi,0.1)
y_sin=np.sin(x)
y_cos=np.cos(x)
plt.subplot(2,2,1)
plt.plot(x,y_sin,'r--')
plt.title('sine wave')
plt.subplot(2,2,2)
plt.plot(x,y_cos,'g--')
plt.title('cos wave')
# -
# ### Bar Plot
# +
x=[2,8,10]
y=[11,16,9]
x2=[3,9,11]
y2=[6,15,7]
plt.bar(x,y)
plt.bar(x2,y2,color='g')
# -
# ### Histogram
a=np.array([22,87,5,43,56,73,55,54,11,20,51,5,79,31,27])
plt.hist(a,bins=20)
plt.show()
# ### Box Plot
data=[np.random.normal(0,std,100) for std in range(1,4)]
plt.boxplot(data,vert=True,patch_artist=True)
plt.show()
data
# ### Pie Chart
# +
labels='Python','C++','Ruby','Java'
sizes=[215,130,245,210]
colors=['gold','yellowgreen','lightcoral','lightskyblue']
explode=(0.1,0,0,0)
plt.pie(sizes,explode=explode,labels=labels,colors=colors,
autopct="%1.1f%%",shadow=True)
plt.axis('equal')
plt.show()
# -
| Machine Learning/ML using Python/MatplotLib .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:face-recognition]
# language: python
# name: conda-env-face-recognition-py
# ---
# The goal of this notebook is to choose the number of PCA components for the feature extraction part. We want the number to provide the best possible accuracy without complicating the model too much.
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from src.models.knn_model import create_knn_model
from src.features.build_features import create_pca_model
train_data_filepath = os.path.join('..', 'data', 'raw', 'face_data_train.csv')
train_labels_filepath = os.path.join('..', 'data', 'raw', 'labels_train.csv')
test_data_filepath = os.path.join('..', 'data', 'raw', 'face_data_test.csv')
test_labels_filepath = os.path.join('..', 'data', 'raw', 'labels_test.csv')
train_data = pd.read_csv(train_data_filepath)
train_labels = pd.read_csv(train_labels_filepath)
test_data = pd.read_csv(test_data_filepath)
test_labels = pd.read_csv(test_labels_filepath)
scores = list()
for eigenface_count in range(1, train_data.shape[1] + 1):
pca = create_pca_model(eigenface_count, train_data)
current_train_data = pca.transform(train_data)
current_test_data = pca.transform(test_data)
model = create_knn_model(1, current_train_data, train_labels.values.ravel())
current_score = accuracy_score(model.predict(current_test_data), test_labels)
scores.append(current_score)
max(scores) # best possible accuracy
scores.index(max(scores)) + 1 # minimal number of components required to achieve the best accuracy
| notebooks/03-szymanskir-knn-model-feature-tweaking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from Data_Processing import DataProcessing
from sklearn.model_selection import train_test_split
import joblib
from sklearn.ensemble import RandomForestRegressor
from datetime import datetime
from sklearn.metrics import mean_squared_error
import warnings
#warnings.filterwarnings('ignore')
# -
pop = pd.read_csv('../Data/population.csv')
train = pd.read_csv('../Data/train.csv')
test = pd.read_csv('../Data/test.csv')
X, y, test = DataProcessing(train, test, pop)
y = y.ravel()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# ## SVR
# +
model = RandomForestRegressor
model.fit(X_train, y_train)
# +
y_true = y_test
y_pred = model.predict(X_test)
mean_squared_error(y_true, y_pred)
# -
joblib.dump(model, '../Models/SVR.h5')
| Notebooks/Random Forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Today concepts
# Applying Linear Regression algorithm
# 1. prediction of salary of employee with his experience
# 2. prediction of revenue of reliance industries with number of jio subscribers
# ### Linear Regression
#
# Linear regression is a example of linear model
#
# Linear Model is a sum of weighted variables that predict the target value of given input
# 
# y = mx + c
#
# here
# 1. y target ----->salary
# 2. m slope of line
# 3. x is the input/features/ ----> experience
# 4. c is a constant
# #### prediction of salary of employee with his experience
# **1. Get the data**
import pandas as pd
df = pd.read_csv('Salary_Data.csv')
df.head()
# **How many rows and columns avaliable in df?**
df.shape
# **What are the column names?**
df.columns
df
# **2.Pre-processing**
# **is there any missing values?**
df.isna().sum()
# **is there any invalid values?**
df.info()
# **Which type of problem it is?**
# target is the salary column
#
#
# target column is continuous then it is regression problem
# **When we apply Linear regression**
#
# if is there any linear relation availabel between feature and target
# 1. Positive linear relation
# if feature/input value increases then target also increase
# examples
# experience Vs Salary
# Temperature Increases Vs Sales of Ice creams
# Price of house Vs Land
#
# 2. Negative linear relation
# if feature and target inversly proportional
# examples
# production decreases Vs price Increase
# GDP decreases Vs Unemploybility Increases
#
# 3. No relation
import matplotlib.pyplot as plt
# **simple plot**
# +
plt.figure()
plt.scatter([1,2,3,5], [7,8,9,10] ,c='red' )
plt.show()
# -
plt.figure()
plt.scatter(df['YearsExperience'],df['Salary'],c='green')
plt.xlabel('Experience of employee')
plt.ylabel('Salary of employee')
plt.title('Experience Vs Salary of employee')
plt.show()
# Seperate the features and target and save those in variables
# Gloabal developers use X(upper case) for features
# y(lower case y) for target
X = df[['YearsExperience']]
y = df['Salary']
# **3. Train model**
# **we have to identify algorithm**
#
#
# Applying Linear Regression algorithm
from sklearn.linear_model import LinearRegression
# to train the model we have to do again 2 steps
# a. create the object for algorithm
model = LinearRegression()
# b. we have to train the model with features and target
#
#
# model.fit(features,target)
model.fit(X,y)
# **Test model**
# **How much salary employee expect for 6 years experience**
# model.predict([[feature1,feature2,.....]])
model.predict([[6]])
# **How much salary employee expect for 6 year and 11 years experience**
model.predict([[6] , [11] ])
# **performance checking**
#
#
# model.score(features,target) * 100
print("accuracy of model is ",model.score(X,y)* 100)
# **improve the model**
#
# coming sessions we will learn improve techniques
# #### 2. Prediction of revenue of reliance indutries with number jio subscribers
# target is revenue
#
# feature is jio subcribers
# **1. Get the data**
import pandas as pd
df = pd.read_csv('reliance.csv')
df
df.shape
df.columns
# **2.Pre-processing**
df.isna().sum()
df.info()
import matplotlib.pyplot as plt
# +
plt.figure()
plt.scatter(df['JioSubscribers'],df['Revenue'])
plt.xlabel('Jio Subscribers')
plt.ylabel('Revenue')
plt.title('Jio Subscribers Vs Revenue')
plt.show()
# -
X = df[['JioSubscribers']]
y = df['Revenue']
# **Train the model**
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X,y)
# **Test the model**
# how much revenue we expect for 400 jio subscribers?
model.predict([[400]])
print('score',model.score(X,y)*100)
# **metrics**
from sklearn.metrics import r2_score
# r2_score(y_actual,y_predicted)*100
y_predict = model.predict(X)
r2_score(y, y_predict )* 100
# y = mx + c
#
#
# **coefficient**
model.coef_
# **interscept**
model.intercept_
# **Visualization**
# +
plt.figure()
plt.scatter(df['JioSubscribers'],df['Revenue'],c='blue',label='true values')
plt.plot(df['JioSubscribers'],model.predict(X),c='red',label='predicted line')
plt.xlabel('Jio subscribers')
plt.ylabel('Revenue')
plt.title('Jio subcribers VS Revenue')
plt.legend()
plt.show()
# -
# Task
# **prediction of Brain weight of person with his head size**
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/AP-State-Skill-Development-Corporation/Datasets/master/Regression/headbrain.csv')
df.shape
df.columns
df = df[['Head Size(cm^3)', 'Brain Weight(grams)']]
df.shape
# **Pre-processing**
df.isna().sum()
df.info()
plt.figure()
plt.scatter(df['Head Size(cm^3)'],df['Brain Weight(grams)'],c='green')
plt.xlabel('Head Size')
plt.ylabel('Brain weight')
plt.show()
X = df[['Head Size(cm^3)']]
y = df['Brain Weight(grams)']
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X,y)
print('accuracy',model.score(X,y)*100)
| Day-2/02_Day2_27Oct2020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/FairozaAmira/basic_algorithms_a/blob/master/Lecture07.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="9K7vXB1GTx8N" colab_type="text"
# #第7回目の講義中の練習問題解答
#
# ## 集合
#
# 1. `List_A`の変数を作り、`[1,1,2,5,5,5,8,8,9]`を`List_A`の中に入れてください。`List_B`の変数を作り、`[1,2,2,3,3,3,6,10]`を`List_B`の中に入れてください。
# + id="uTXr_Rv-Tblv" colab_type="code" outputId="60a3ecb8-9e1d-4ef5-aee1-e32101c3ae56" colab={"base_uri": "https://localhost:8080/", "height": 50}
List_A = [1,1,2,5,5,5,8,8,9]
print(List_A)
List_B = [1,2,2,3,3,3,6,10]
print(List_B)
# + [markdown] id="73zV7A-JWHjo" colab_type="text"
# 2. `List_A`と`List_B`の集合を作りなさい。
# + id="KDwQbKrnWPUg" colab_type="code" outputId="dcd1698e-3ede-4a53-face-96e3e2b4360f" colab={"base_uri": "https://localhost:8080/", "height": 50}
List_A_set = set(List_A)
print(List_A_set)
List_B_set = set(List_B)
print(List_B_set)
# + [markdown] id="sNjAgdP1Wcyt" colab_type="text"
# 3. 集合の連結を探せ。
# + id="_xzRcFvbWgNk" colab_type="code" outputId="7f65e079-a048-4ae2-a968-b6a563820ec0" colab={"base_uri": "https://localhost:8080/", "height": 33}
List_A_set | List_B_set
# + id="eADXQ7HiWlgT" colab_type="code" outputId="ae6abcd6-f965-4585-9c31-ec0e65a9a4b4" colab={"base_uri": "https://localhost:8080/", "height": 33}
List_A_set.union(List_B_set)
# + [markdown] id="fi1CCwHJWkC8" colab_type="text"
# 4. 集合の交差を探せ。
# + id="CouecuaZWtmV" colab_type="code" outputId="ff991e6b-558b-4941-ab89-6a53b4c4711b" colab={"base_uri": "https://localhost:8080/", "height": 33}
List_A_set & List_B_set
# + id="XkWsogYhW2hz" colab_type="code" outputId="970144b6-9fcb-4414-fbc1-f1d1b610bda8" colab={"base_uri": "https://localhost:8080/", "height": 33}
List_A_set.intersection(List_B_set)
# + [markdown] id="plIjuYA9W8fO" colab_type="text"
# 5. `List_A`にはあるが、`List_B`にはない要素探せ。
# + id="UK9mSOITXIJ8" colab_type="code" outputId="18e9bde9-8240-4fe5-890f-3ae34e8fdec1" colab={"base_uri": "https://localhost:8080/", "height": 33}
List_A_set - List_B_set
# + id="Wle56fR5XMSh" colab_type="code" outputId="27a45097-021d-4642-91fe-616a77c8ecf9" colab={"base_uri": "https://localhost:8080/", "height": 33}
List_A_set.difference(List_B_set)
# + [markdown] id="U8ce39bcXREv" colab_type="text"
# 6. `List_A`か`List_B`のどちらかにある要素を探せ。
# + id="6p3QHYKcXbCo" colab_type="code" outputId="65bc823e-452e-42f6-a94d-bfdca8d3e484" colab={"base_uri": "https://localhost:8080/", "height": 33}
List_A_set ^ List_B_set
# + id="EqYMlTMVXfdp" colab_type="code" outputId="4e7be2a3-e5aa-45f3-8354-4036a61da3e6" colab={"base_uri": "https://localhost:8080/", "height": 33}
List_A_set.symmetric_difference(List_B_set)
# + [markdown] id="-7DvSJutYCJn" colab_type="text"
# ## 辞書(dict)
#
# 1. 日本語のひらがなとその数字を1から10まで、ランダムにマップした辞書を作れ。例、`いち: 1`
#
# + id="JTUXDDM_YdCT" colab_type="code" outputId="aa34b1f5-710d-42eb-e9b6-f81550ae3bc7" colab={"base_uri": "https://localhost:8080/", "height": 53}
numbers = {'いち': 1, 'さん': 3, 'しち': 7, 'に': 2, 'よん': 4, 'はち': 8, 'じゅう': 10, 'ご': 5, 'ろく': 6, 'きゅう': 9}
print(numbers)
# + [markdown] id="DCcGJQfQYorw" colab_type="text"
# 2. `numbers`の第2のキーは何?
# + id="O3aFcjtzZGwE" colab_type="code" outputId="c3fea37c-eccf-40c0-a8f2-82c16df36100" colab={"base_uri": "https://localhost:8080/", "height": 181}
print(numbers[1] in numbers)
# + [markdown] id="eOmRVJcqZ9GC" colab_type="text"
# 3. `numbers`の一意のキー長さを探せ。
# + id="gozBTNsvaFe6" colab_type="code" outputId="a9478369-b466-46d1-d0e5-a4f95caf5dd1" colab={"base_uri": "https://localhost:8080/", "height": 33}
num_keys = len(numbers)
print(num_keys)
# + [markdown] id="l9wropPNaJ_Z" colab_type="text"
# 4. `十`は`numbers`のキーの中にあるかどうか確認してください。
# + id="TcWjBtObZGrw" colab_type="code" outputId="504da9d1-ed8d-4585-8a9d-eef23a71095d" colab={"base_uri": "https://localhost:8080/", "height": 33}
contains_ten_k = numbers.get('十')
print (contains_ten_k)
# + id="b1rRDQGdashR" colab_type="code" outputId="50494eb0-7821-4ced-d390-e7b7d8ab431a" colab={"base_uri": "https://localhost:8080/", "height": 33}
contains_ten_k = "十" in numbers
print(contains_ten_k)
# + [markdown] id="gZGdEwajaoo7" colab_type="text"
# 5. `じゅういち:11`を新しいキーに入れてください。
# + id="QPExQ2T-a-ht" colab_type="code" outputId="a2dc27a5-4286-4734-c5f1-b38a69a3522f" colab={"base_uri": "https://localhost:8080/", "height": 53}
numbers['じゅういち'] = 11
print(numbers)
# + [markdown] id="EgFxcvwRbElG" colab_type="text"
# 6. `numbers`のキーをソートしなさい。
# + id="nPcJJQIqbPXF" colab_type="code" outputId="9ac518c5-7426-4465-8db2-826a95fbc56a" colab={"base_uri": "https://localhost:8080/", "height": 53}
sorted_keys = sorted(numbers.keys())
print(sorted_keys)
# + [markdown] id="r-ewNKnpbWwR" colab_type="text"
# 7. ソートしたキーから一番目の要素を探せ。
# + id="-YbTYbp0bcpB" colab_type="code" outputId="b4e7b31c-72e2-4a41-f7e4-014c3d2065ad" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(sorted_keys[0])
# + [markdown] id="py5R95kabzYb" colab_type="text"
# 8. ソートしたキーから最後のキーを探せ。
# + id="1TdRvGHTb60l" colab_type="code" outputId="394693b9-17b6-411a-a004-fb5a4c865338" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(sorted_keys[-1])
# + [markdown] id="mNl_cYWtb-YS" colab_type="text"
# 9. 値をソートした新しい`numbers`のリストを作れ。
# + id="5bTodJN9cKtN" colab_type="code" outputId="48904e97-1ac6-4229-a453-5471fc037686" colab={"base_uri": "https://localhost:8080/", "height": 33}
sorted_values = sorted(numbers.values())
print(sorted_values)
# + [markdown] id="QmkPYMmkcZ3r" colab_type="text"
# 10. ソートした値リストから一番目の要素を探せ。
# + id="Uqn40GQjchXl" colab_type="code" outputId="29179296-2ceb-41d0-9830-fe16911dc044" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(sorted_values[0])
# + [markdown] id="XkIqisEvckDH" colab_type="text"
# 11. `きゅう`の値をアクセスせよ。
# + id="BH_4KgL8cqGN" colab_type="code" outputId="07963c25-b898-4b66-fd78-e1700f84c470" colab={"base_uri": "https://localhost:8080/", "height": 33}
numbers["きゅう"]
# + [markdown] id="vyR-KngbdmoO" colab_type="text"
# ## 条件文
#
# 1. 下記のフローチャートに基づき、条件文を書きなさい。
# 
#
# + id="mKd3GCPud-BO" colab_type="code" outputId="bd554eb2-b5da-41cf-f2ed-f9c3eb353302" colab={"base_uri": "https://localhost:8080/", "height": 67}
print("please insert any numbers")
x = input()
x = int(x)
if x == 0:
print(x, "is zero")
elif x > 0:
print(x, "is positive")
elif x < 0:
print(x, "is negative")
else:
print(x, "is None")
# + [markdown] id="KkrA9BOnyJc0" colab_type="text"
# 2. **税込み価格の計算**
#
# 国によって消費税が違うため、消費税を含んだ売っているものの価格を計算しなさい。
#
# Japan - 10% <br/>
# Malaysia - 6% <br/>
# UK - 14% <br/>
# Europe - 20% <br/>
# USA - 5%
#
# 二桁丸目した税込みの価格を表示しなさい。但し、売っている物の単価はUSD単位とする。`input()`を使って、ユーザーに入力させること。
#
# 必要な変数:</br>
# `raw_price_of_good`</br>
# `country`
#
#
#
# + id="h97hO4s40tUe" colab_type="code" outputId="1177d6c8-fe79-4418-ff9f-01f1161dbc73" colab={"base_uri": "https://localhost:8080/", "height": 100}
print("Please put country name (Japan, Malaysia, UK, Europe, USA):")
country = input()
print("Please put the raw price of each good:")
raw_price_of_good = input()
raw_price_of_good = int(raw_price_of_good)
if country == 'Japan':
tax_amount = 0.08
total_cost = raw_price_of_good * (1+tax_amount)
total_cost = round(total_cost, 2)
result = "You need to pay {} because you are from {}".format(total_cost, country)
elif country == 'Malaysia':
tax_amount = 0.06
total_cost = raw_price_of_good * (1+tax_amount)
total_cost = round(total_cost, 2)
result = "You need to pay {} because you are from {}".format(total_cost, country)
elif country == 'UK':
tax_amount = 0.14
total_cost = raw_price_of_good * (1+tax_amount)
total_cost = round(total_cost, 2)
result = "You need to pay {} because you are from {}".format(total_cost, country)
elif country == 'USA':
tax_amount = 0.05
total_cost = raw_price_of_good * (1+tax_amount)
total_cost = round(total_cost, 2)
result = "You need to pay {} because you are from {}".format(total_cost, country)
else:
tax_amount = 0.20
total_cost = raw_price_of_good * (1+tax_amount)
total_cost = round(total_cost, 2)
result = "You need to pay {} because you are from {}".format(total_cost, country)
print(result)
| Lecture07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to keras
#
# > Keras is a high-level neural networks API, written in Python and capable of running on top of either [TensorFlow](https://www.tensorflow.org),[CNTK](https://github.com/Microsoft/cntk) or [Theano](https://github.com/Theano/Theano). It was developed with a focus on enabling fast experimentation.
# > _Being able to go from idea to result with the least possible delay is key to doing good research._
#
# There are two ways to build Keras models: _sequential_ and _functional_.
#
# The [Sequential API](https://keras.io/getting-started/sequential-model-guide/) is a linear stack of layers. Only for single-input, single-output, it is fit for most of use cases.
# It is limited in that it does not allow to create models that share layers or have multiple inputs or outputs.
#
# Alternatively, the [Functional API](https://keras.io/getting-started/functional-api-guide/) provides a more flexible way for defining models, allowing to connect layers to any other layer. This results in the definition of complex models, such as multi-output models, directed acyclic graphs, or models with shared layers.
#
# The package offers many recipes to follow and many [examples](https://github.com/fchollet/keras/tree/master/examples) for problems in NLP and image classification.
#
# The example below was originally published at [Mike Irvine's Blog](https://sempwn.github.io/blog/2017/03/24/keras_basic_intro)
# ## Toy dataset
#
# We start by generating data using the [scikit learn library](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html).
# %pylab inline
from sklearn.datasets import make_moons
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
X, Y = make_moons(noise=0.2, random_state=0, n_samples=1000)
X = scale(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)
X.ndim
fig, ax = plt.subplots()
ax.scatter(X[Y==0, 0], X[Y==0, 1], label='Class 0')
ax.scatter(X[Y==1, 0], X[Y==1, 1], color='r', label='Class 1')
ax.legend()
ax.set(xlabel='X', ylabel='Y', title='Toy binary classification data set');
# Our data has a binary class (0 or 1), with two input dimensions ($x$ and $y$) and is visualised above. In order
# to correctly classify the class the neural network will need to successfully separate out the zig-zag shape that intersects
# where the two classes meet.
# ## Introduction to the Sequential API
# Now we import the keras library as well as the `Sequential` model class which forms the basic skeleton for our neural network. We'll only consider one type of layer, where all the neurons in a layer are connected to all the other neurons in the previous layer.
#
# * Models can be instaniated using the `Sequential()` class.
# * Neural networks are built up from bottom layer to top using the `add()` method.
import tensorflow
print(tensorflow.__version__)
# +
# tf1 vs. tf2
# tf1: import keras, tf.Session.run, tf.placeholder, tf.get_variable
# tf2: import tensorflow.keras as keras or use tensorflow.keras since tensorflow was imported already above
# if you need to migrate: https://www.tensorflow.org/guide/migrate
# -
tensorflow.keras.__version__
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow import random
random.set_seed(3) # for reproducible results
# ## Defining the model
#
# We'll create a very simple multi-layer perceptron with one hidden layer.
#
# This is done in keras by first defining a `Sequential` class object. Layers are then added from the initial layer
# that includes the data, so we need to specifiy the number of input dimensions using the keyword `input_dim`. We also define the acitvation of this layer to be a rectified linear unit `relu`.
#
# Finally a densely connected layer is added with one output and a `sigmoid` activation corresponding to the binary class.
# +
# Create sequential multi-layer perceptron
model = Sequential()
# The model expects rows of data with 2 variables input dimensions.
model.add(Dense(32, input_dim=X.ndim, activation='relu'))
# The output layer has one node and uses the sigmoid activation function.
model.add(Dense(1, activation='sigmoid'))
# -
# ## Compiling the model
#
# Before training a model, we need to configure the learning process, that is, compiling the model.
# The `compile` method uses the efficient numerical libraries under the covers (the so-called backend) such as Theano or TensorFlow. The backend automatically chooses the best way to represent the network for training and making predictions to run on your hardware, such as CPU or GPU or even distributed.
#
# It expects the following parameters:
# + [optimizer](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers), the optimization algorithm such as `RMSprop` or `adagrad`
# + [loss function](https://www.tensorflow.org/api_docs/python/tf/keras/losses), the objective that the model will try to minimize such as `categorical_crossentropy` or `MSE`
# + any additional [metrics](https://www.tensorflow.org/api_docs/python/tf/keras/metrics) we want to consider. For any classification problem we want to set this to `metrics=['accuracy']`
model.compile(optimizer=tensorflow.keras.optimizers.Adadelta(),
loss=tensorflow.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
# ## Investigating the model
#
# Let's see what we've constructed layer by layer:
#
# + **Confirm layer order**. It is easy to add layers in the wrong order with the sequential API or to connect them together incorrectly with the functional API. The graph plot can help you confirm that the model is connected the way you intended.
# + **Confirm the output shape of each layer**. It is common to have problems when defining the shape of input data for complex networks like convolutional and recurrent neural networks. The summary and plot can help you confirm the input shape to the network is as you intended.
# + **Confirm parameters**. Some network configurations can use far fewer parameters, such as the use of a TimeDistributed wrapped Dense layer in an Encoder-Decoder recurrent neural network. Reviewing the summary can help spot cases of using far more parameters than expected.
model.summary()
# ## Fitting the model
#
# We have defined our model and compiled it ready for efficient computation.
# Now it is time to execute the model on some data. Keras models are trained on Numpy arrays of input data and labels.
#
#
# The training process will run for a fixed number of iterations through of the rows in the dataset, that we must specify using the __epochs__ argument.
#
# We must also set the number of dataset rows that are considered before the model weights are updated within each epoch, called the batch size and set using the __batch_size__ argument.
#
# Training occurs over epochs and each epoch is split into batches:
# + Epoch: One pass through all of the rows in the training dataset.
# + Batch: One or more samples considered by the model within an epoch before weights are updated.
#
#
# For more on that, check out this post on [Machine Learning Mastery Blog](https://machinelearningmastery.com/difference-between-a-batch-and-an-epoch/)
model.fit(X_train, Y_train, batch_size=32, epochs=200,
verbose=0, validation_data=(X_test, Y_test))
# ## Evaluating the model
#
# This will only give us an idea of how well we have modeled the dataset (e.g. train accuracy), but no idea of how well the algorithm might perform on new data.
#
# You can evaluate your model on your training dataset using the evaluate() function on your model and pass it the same input and output used to train the model.
#
# This will generate a prediction for each input and output pair and collect scores, including the average loss and any metrics you have configured, such as accuracy.
score = model.evaluate(X_test, Y_test, verbose=0)
for i, s in enumerate(score, 0):
print(model.metrics_names[i], s)
# ## Plotting the model predictions across the grid
# We can create a grid of $(x,y)$ values and then predict the class probability on each of these values using our fitted model. We'll then plot the original data with the underlying probabilities to see what the classification looks like and how it compares to the data.
#
# More details about how to make predictions for classification and regression models can be found at the [Machine Learning Mastery Blog](https://machinelearningmastery.com/how-to-make-classification-and-regression-predictions-for-deep-learning-models-in-keras/)
# +
grid = np.mgrid[-3:3:100j,-3:3:100j]
grid_2d = grid.reshape(2, -1).T
X, Y = grid
prediction_probs = model.predict_proba(grid_2d, batch_size=32, verbose=0)
fig, ax = plt.subplots(figsize=(10, 6))
contour = ax.contourf(X, Y, prediction_probs.reshape(100, 100))
ax.scatter(X_test[Y_test==0, 0], X_test[Y_test==0, 1])
ax.scatter(X_test[Y_test==1, 0], X_test[Y_test==1, 1], color='r')
cbar = plt.colorbar(contour, ax=ax)
# -
# As we can see in the graph above, the model is not able to fully capture the zig-zag pattern to fully separate the classes.
# We could potentially experiment adding more layers or additional input ($xy$ for instance).
# # Conclusion
#
# This was a trivial example of the use of keras on some test data. The real power comes when we start to consider [convolutional](https://en.wikipedia.org/wiki/Convolutional_neural_network) or [recurrent](https://en.wikipedia.org/wiki/Long_short-term_memory) neural networks.
#
# More on this on the next notebooks.
| 00-keras-intro-sequential-api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
# +
x=[2,6]
y=[4,9]
x1=[8,3]
y1=[4,7]
plt.xlabel("time")
plt.ylabel("students")
plt.plot(x,y,label="ml learning")
plt.plot(x1,y1,label="cloud learning")
plt.legend()
plt.grid(color='g')
plt.show()
# -
x=[200,60,34,90]
y=["suku","priyanshi","big bro","lil bro"]
plt.xlabel("time")
plt.ylabel("speed")
plt.bar(y,x,label="coder",color='y')
plt.legend()
plt.grid(color='y')
plt.show()
# +
x=[2,6,5,6,7,78,9]
y=[4,9,23,6,9,6,5]
x1=[2,9,5,6,2,38,8]
y1=[5,8,24,5,8,2,5]
plt.xlabel("time")
plt.ylabel("speed")
plt.scatter(x,y,label="ml learning",marker='x',s=120,c='r')
plt.scatter(x1,y1,label="hadoop",s=150)
plt.legend()
plt.grid(color='g')
plt.show()
# -
| pythonmatplot(1).ipynb |