code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
'''
This script takes two DNA sequences as input
and returns the name of the restriction enzymes able to cut BOTH sequences exactly N times.
It is possible to specify how many times a restriction enzymes is supposed to cut each sequence.
Version : 1.0
Requirements : Python >= 2.7 ; biopython
Last edit : February 1 2018
author : <EMAIL>
'''
##Play this cell only to install biopython
# !pip install biopython
from Bio import Restriction
from Bio.Seq import Seq
from __future__ import print_function #in case of Python 2.7
# +
##Insert your "Plasmid 1" sequence
p1 = '''
caggtggcacttttcggggaaatgtgcgcggaacccctatttgtttatttttctaaatacattcaaatatgtatccgctc
atgagacaataaccctgataaatgcttcaataatattgaaaaaggaagagtatgagtattcaacatttccgtgtcgccct
tattcccttttttgcggcattttgccttcctgtttttgctcacccagaaacgctggtgaaagtaaaagatgctgaagatc
agttgggtgcacgagtgggttacatcgaactggatctcaacagcggtaagatccttgagagttttcgccccgaagaacgt
tttccaatgatgagcacttttaaagttctgctatgtggcgcggtattatcccgtattgacgccgggcaagagcaactcgg
tcgccgcatacactattctcagaatgacttggttgagtactcaccagtcacagaaaagcatcttacggatggcatgacag
taagagaattatgcagtgctgccataaccatgagtgataacactgcggccaacttacttctgacaacgatcggaggaccg
aaggagctaaccgcttttttgcacaacatgggggatcatgtaactcgccttgatcgttgggaaccggagctgaatgaagc
cataccaaacgacgagcgtgacaccacgatgcctgtagcaatggcaacaacgttgcgcaaactattaactggcgaactac
ttactctagcttcccggcaacaattaatagactggatggaggcggataaagttgcaggaccacttctgcgctcggccctt
ccggctggctggtttattgctgataaatctggagccggtgagcgtgggtctcgcggtatcattgcagcactggggccaga
tggtaagccctcccgtatcgtagttatctacacgacggggagtcaggcaactatggatgaacgaaatagacagatcgctg
agataggtgcctcactgattaagcattggtaactgtcagaccaagtttactcatatatactttagattgatttaaaactt
catttttaatttaaaaggatctaggtgaagatcctttttgataatctcatgaccaaaatcccttaacgtgagttttcgtt
ccactgagcgtcagaccccgtagaaaagatcaaaggatcttcttgagatcctttttttctgcgcgtaatctgctgcttgc
aaacaaaaaaaccaccgctaccagcggtggtttgtttgccggatcaagagctaccaactctttttccgaaggtaactggc
ttcagcagagcgcagataccaaatactgtccttctagtgtagccgtagttaggccaccacttcaagaactctgtagcacc
gcctacatacctcgctctgctaatcctgttaccagtggctgctgccagtggcgataagtcgtgtcttaccgggttggact
caagacgatagttaccggataaggcgcagcggtcgggctgaacggggggttcgtgcacacagcccagcttggagcgaacg
acctacaccgaactgagatacctacagcgtgagctatgagaaagcgccacgcttcccgaagggagaaaggcggacaggta
tccggtaagcggcagggtcggaacaggagagcgcacgagggagcttccagggggaaacgcctggtatctttatagtcctg
tcgggtttcgccacctctgacttgagcgtcgatttttgtgatgctcgtcaggggggcggagcctatggaaaaacgccagc
aacgcggcctttttacggttcctggccttttgctggccttttgctcacatgttctttcctgcgttatcccctgattctgt
ggataaccgtattaccgcctttgagtgagctgataccgctcgccgcagccgaacgaccgagcgcagcgagtcagtgagcg
aggaagcggaagagcgcccaatacgcaaaccgcctctccccgcgcgttggccgattcattaatgcagctggcacgacagg
tttcccgactggaaagcgggcagtgagcgcaacgcaattaatgtgagttagctcactcattaggcaccccaggctttaca
ctttatgcttccggctcgtatgttgtgtggaattgtgagcggataacaatttcacacaggaaacagctatgaccatgatt
acgccaagcgcgcaattaaccctcactaaagggaacaaaagctggagctgcaagcttaatgtagtcttatgcaatactct
tgtagtcttgcaacatggtaacgatgagttagcaacatgccttacaaggagagaaaaagcaccgtgcatgccgattggtg
gaagtaaggtggtacgatcgtgccttattaggaaggcaacagacgggtctgacatggattggacgaaccactgaattgcc
gcattgcagagatattgtatttaagtgcctagctcgatacaataaacgggtctctctggttagaccagatctgagcctgg
gagctctctggctaactagggaacccactgcttaagcctcaataaagcttgccttgagtgcttcaagtagtgtgtgcccg
tctgttgtgtgactctggtaactagagatccctcagacccttttagtcagtgtggaaaatctctagcagtggcgcccgaa
cagggacctgaaagcgaaagggaaaccagagctctctcgacgcaggactcggcttgctgaagcgcgcacggcaagaggcg
aggggcggcgactggtgagtacgccaaaaattttgactagcggaggctagaaggagagagatgggtgcgagagcgtcagt
attaagcgggggagaattagatcgcgatgggaaaaaattcggttaaggccagggggaaagaaaaaatataaattaaaaca
tatagtatgggcaagcagggagctagaacgattcgcagttaatcctggcctgttagaaacatcagaaggctgtagacaaa
tactgggacagctacaaccatcccttcagacaggatcagaagaacttagatcattatataatacagtagcaaccctctat
tgtgtgcatcaaaggatagagataaaagacaccaaggaagctttagacaagatagaggaagagcaaaacaaaagtaagac
caccgcacagcaagcggccgctgatcttcagacctggaggaggagatatgagggacaattggagaagtgaattatataaa
tataaagtagtaaaaattgaaccattaggagtagcacccaccaaggcaaagagaagagtggtgcagagagaaaaaagagc
agtgggaataggagctttgttccttgggttcttgggagcagcaggaagcactatgggcgcagcctcaatgacgctgacgg
tacaggccagacaattattgtctggtatagtgcagcagcagaacaatttgctgagggctattgaggcgcaacagcatctg
ttgcaactcacagtctggggcatcaagcagctccaggcaagaatcctggctgtggaaagatacctaaaggatcaacagct
cctggggatttggggttgctctggaaaactcatttgcaccactgctgtgccttggaatgctagttggagtaataaatctc
tggaacagattggaatcacacgacctggatggagtgggacagagaaattaacaattacacaagcttaatacactccttaa
ttgaagaatcgcaaaaccagcaagaaaagaatgaacaagaattattggaattagataaatgggcaagtttgtggaattgg
tttaacataacaaattggctgtggtatataaaattattcataatgatagtaggaggcttggtaggtttaagaatagtttt
tgctgtactttctatagtgaatagagttaggcagggatattcaccattatcgtttcagacccacctcccaaccccgaggg
gacccgacaggcccgaaggaatagaagaagaaggtggagagagagacagagacagatccattcgattagtgaacggatct
cgacggttaacttttaaaagaaaaggggggattggggggtacagtgcaggggaaagaatagtagacataatagcaacaga
catacaaactaaagaattacaaaaacaaattacaaaaattcaaaattttatcgataagcttgggagttccgcgttacata
acttacggtaaatggcccgcctggctgaccgcccaacgacccccgcccattgacgtcaataatgacgtatgttcccatag
taacgccaatagggactttccattgacgtcaatgggtggagtatttacggtaaactgcccacttggcagtacatcaagtg
tatcatatgccaagtacgccccctattgacgtcaatgacggtaaatggcccgcctggcattatgcccagtacatgacctt
atgggactttcctacttggcagtacatctacgtattagtcatcgctattaccatggtgatgcggttttggcagtacatca
atgggcgtggatagcggtttgactcacggggatttccaagtctccaccccattgacgtcaatgggagtttgttttggcac
caaaatcaacgggactttccaaaatgtcgtaacaactccgccccattgacgcaaatgggcggtaggcgtgtacggtggga
ggtctatataagcagagctcgtttagtgaaccgtcagatcgcctggagacgccatccacgctgttttgacctccatagaa
gacaccgactctagAGCCACCATGTACCCATACGATGTTCCAGATTACGCTGTGCTTGTGGGCACGCAGGATGTACCCAT
ACGATGTTCCAGATTACGCTGTGCTTGTGGGCACGCAGGATGCCATCAGCGCTGCGAATCCCCGGGTTATCGACGACAGC
AGAGCCCGCAAGCTCTCCACAGATCTGAAGCGGTGCACCTACTATGAGACGTGCGCGACCTACGGGCTCAATGTGGAGCG
TGTCTTCCAGGACGTGGCCCAGAAGGTAGTGGCCTTGCGAAAGAAGCAGCAACTGGCCATCGGGCCCTGCAAGTCACTGC
CCAACTCGCCCAGCCACTCGGCCGTGTCCGCCGCCTCCATCCCGGCCGTGCACATCAACCAGGCCACGAATGGCGGCGGC
AGCGCCTTCAGCGACTACTCGTCCTCAGTCCCCTCCACCCCCAGCATCAGCCAGCGGGAGCTGCGCATCGAGACCATCGC
TGCCTCCTCCACCCCCACACCCATCCGAAAGCAGTCCAAGCGGCGCTCCAACATCTTCACGGACTTGATTAGAGACCAAG
GATTTCGTGGTGATGGAGGATCAACCACAGGTTTGTCTGCTACCCCCCCTGCCTCATTACCTGGCTCACTAACTAACGTG
AAAGCCTTACAGAAATCTCCAGGACCTCAGCGAGAAAGGAAGTCATCTTCATCCTCAGAAGACAGGAATCGAATGAAAAC
ACTTGGTAGACGGGACTCGAGTGATGATTGGGAGATTCCTGATGGGCAGATTACAGTGGGACAAAGAATTGGATCTGGAT
CATTTGGAACAGTCTACAAGGGAAAGTGGCATGGTGATGTGGCAGTGAAAATGTTGAATGTGACAGCACCTACACCTCAG
CAGTTACAAGCCTTCAAAAATGAAGTAGGAGTACTCAGGAAAACACGACATGTGAATATCCTACTCTTCATGGGCTATTC
CACAAAGCCACAACTGGCTATTGTTACCCAGTGGTGTGAGGGCTCCAGCTTGTATCACCATCTCCATATCATTGAGACCA
AATTTGAGATGATCAAACTTATAGATATTGCACGACAGACTGCACAGGGCATGGATTACTTACACGCCAAGTCAATCATC
CACAGAGACCTCAAGAGTAATAATATATTTCTTCATGAAGACCTCACAGTAAAAATAGGTGATTTTGGTCTAGCTACAGT
GAAATCTCGATGGAGTGGGTCCCATCAGTTTGAACAGTTGTCTGGATCCATTTTGTGGATGGCACCAGAAGTCATCAGAA
TGCAAGATAAAAATCCATACAGCTTTCAGTCAGATGTATATGCATTTGGGATTGTTCTGTATGAATTGATGACTGGACAG
TTACCTTATTCAAACATCAACAACAGGGACCAGATAATTTTTATGGTGGGACGAGGATACCTGTCTCCAGATCTCAGTAA
GGTACGGAGTAACTGTCCAAAAGCCATGAAGAGATTAATGGCAGAGTGCCTCAAAAAGAAAAGAGATGAGAGACCACTCT
TTCCCCAAATTCTCGCCTCTATTGAGCTGCTGGCCCGCTCATTGCCAAAAATTCACCGCAGTGCATCAGAACCCTCCTTG
AATCGGGCTGGTTTCCAAACAGAGGATTTTAGTCTATATGCTTGTGCTTCTCCAAAAACACCCATCCAGGCAGGGGGATA
TGGTGCGTTTCCTGTCCACTGAGtcgacaatcaacctctggattacaaaatttgtgaaagattgactggtattcttaact
atgttgctccttttacgctatgtggatacgctgctttaatgcctttgtatcatgctattgcttcccgtatggctttcatt
ttctcctccttgtataaatcctggttgctgtctctttatgaggagttgtggcccgttgtcaggcaacgtggcgtggtgtg
cactgtgtttgctgacgcaacccccactggttggggcattgccaccacctgtcagctcctttccgggactttcgctttcc
ccctccctattgccacggcggaactcatcgccgcctgccttgcccgctgctggacaggggctcggctgttgggcactgac
aattccgtggtgttgtcggggaagctgacgtcctttccatggctgctcgcctgtgttgccacctggattctgcgcgggac
gtccttctgctacgtcccttcggccctcaatccagcggaccttccttcccgcggcctgctgccggctctgcggcctcttc
cgcgtcttcgccttcgccctcagacgagtcggatctccctttgggccgcctccccgcctgGAATTCTACCGGGTAGGGGA
GGCGCTTTTCCCAAGGCAGTCTGGAGCATGCGCTTTAGCAGCCCCGCTGGGCACTTGGCGCTACACAAGTGGCCTCTGGC
CTCGCACACATTCCACATCCACCGGTAGGCGCCAACCGGCTCCGTTCTTTGGTGGCCCCTTCGCGCCACCTTCTACTCCT
CCCCTAGTCAGGAAGTTCCCCCCCGCCCCGCAGCTCGCGTCGTGCAGGACGTGACAAATGGAAGTAGCACGTCTCACTAG
TCTCGTGCAGATGGACAGCACCGCTGAGCAATGGAAGCGGGTAGGCCTTTGGGGCAGCGGCCAATAGCAGCTTTGCTCCT
TCGCTTTCTGGGCTCAGAGGCTGGGAAGGGGTGGGTCCGGGGGCGGGCTCAGGGGCGGGCTCAGGGGCGGGGCGGGCGCC
CGAAGGTCCTCCGGAGGCCCGGCATTCTGCACGCTTCAAAAGCGCACGTCTGCCGCGCTGTTCTCCTCTTCCTCATCTCC
GGGCCTTTCGACCTGCAGCCCAAGCTTACCATGACCGAGTACAAGCCCACGGTGCGCCTCGCCACCCGCGACGACGTCCC
CAGGGCCGTACGCACCCTCGCCGCCGCGTTCGCCGACTACCCCGCCACGCGCCACACCGTCGATCCGGACCGCCACATCG
AGCGGGTCACCGAGCTGCAAGAACTCTTCCTCACGCGCGTCGGGCTCGACATCGGCAAGGTGTGGGTCGCGGACGACGGC
GCCGCGGTGGCGGTCTGGACCACGCCGGAGAGCGTCGAAGCGGGGGCGGTGTTCGCCGAGATCGGCCCGCGCATGGCCGA
GTTGAGCGGTTCCCGGCTGGCCGCGCAGCAACAGATGGAAGGCCTCCTGGCGCCGCACCGGCCCAAGGAGCCCGCGTGGT
TCCTGGCCACCGTCGGCGTCTCGCCCGACCACCAGGGCAAGGGTCTGGGCAGCGCCGTCGTGCTCCCCGGAGTGGAGGCG
GCCGAGCGCGCCGGGGTGCCCGCCTTCCTGGAGACCTCCGCGCCCCGCAACCTCCCCTTCTACGAGCGGCTCGGCTTCAC
CGTCACCGCCGACGTCGAGGTGCCCGAAGGACCGCGCACCTGGTGCATGACCCGCAAGCCCGGTGCCTGACGCCCGCCCC
ACGACCCGCAGCGCCCGACCGAAAGGAGCGCACGACCCCATGCATCTCGAGGGCCCGGTACctttaagaccaatgactta
caaggcagctgtagatcttagccactttttaaaagaaaaggggggactggaagggctagctcactcccaacgaagacaag
atctgctttttgcttgtactgggtctctctggttagaccagatctgagcctgggagctctctggctgcctagggaaccca
ctgcttaagcctcaataaagcttgccttgagtgcttcaagtagtgtgtgcccgtctgttgtgtgactctggtaactagag
atccctcagacccttttagtcagtgtggaaaatctctagcagtagtagttcatgtcatcttattattcagtatttataac
ttgcaaagaaatgaatatcagagagtgagaggaacttgtttattgcagcttataatggttacaaataaagcaatagcatc
acaaatttcacaaataaagcatttttttcactgcattctagttgtggtttgtccaaactcatcaatgtatcttatcatgt
ctggctctagctatcccgcccctaactccgcccagttccgcccattctccgccccatggctgactaattttttttattta
tgcagaggccgaggccgcctcggcctctgagctattccagaagtagtgaggaggcttttttggaggcctaggcttttgcg
tcgagacgtacccaattcgccctatagtgagtcgtattacgcgcgctcactggccgtcgttttacaacgtcgtgactggg
aaaaccctggcgttacccaacttaatcgccttgcagcacatccccctttcgccagctggcgtaatagcgaagaggcccgc
accgatcgcccttcccaacagttgcgcagcctgaatggcgaatggcgcgacgcgccctgtagcggcgcattaagcgcggc
gggtgtggtggttacgcgcagcgtgaccgctacacttgccagcgccctagcgcccgctcctttcgctttcttcccttcct
ttctcgccacgttcgccggctttccccgtcaagctctaaatcgggggctccctttagggttccgatttagtgctttacgg
cacctcgaccccaaaaaacttgattagggtgatggttcacgtagtgggccatcgccctgatagacggtttttcgcccttt
gacgttggagtccacgttctttaatagtggactcttgttccaaactggaacaacactcaaccctatctcggtctattctt
ttgatttataagggattttgccgatttcggcctattggttaaaaaatgagctgatttaacaaaaatttaacgcgaatttt
aacaaaatattaacgtttacaatttcc'''
# +
##Insert your "Plasmid 2" sequence
p2 = '''
caggtggcacttttcggggaaatgtgcgcggaacccctatttgtttatttttctaaatacattcaaatatgtatccgctc
atgagacaataaccctgataaatgcttcaataatattgaaaaaggaagagtatgagtattcaacatttccgtgtcgccct
tattcccttttttgcggcattttgccttcctgtttttgctcacccagaaacgctggtgaaagtaaaagatgctgaagatc
agttgggtgcacgagtgggttacatcgaactggatctcaacagcggtaagatccttgagagttttcgccccgaagaacgt
tttccaatgatgagcacttttaaagttctgctatgtggcgcggtattatcccgtattgacgccgggcaagagcaactcgg
tcgccgcatacactattctcagaatgacttggttgagtactcaccagtcacagaaaagcatcttacggatggcatgacag
taagagaattatgcagtgctgccataaccatgagtgataacactgcggccaacttacttctgacaacgatcggaggaccg
aaggagctaaccgcttttttgcacaacatgggggatcatgtaactcgccttgatcgttgggaaccggagctgaatgaagc
cataccaaacgacgagcgtgacaccacgatgcctgtagcaatggcaacaacgttgcgcaaactattaactggcgaactac
ttactctagcttcccggcaacaattaatagactggatggaggcggataaagttgcaggaccacttctgcgctcggccctt
ccggctggctggtttattgctgataaatctggagccggtgagcgtgggtctcgcggtatcattgcagcactggggccaga
tggtaagccctcccgtatcgtagttatctacacgacggggagtcaggcaactatggatgaacgaaatagacagatcgctg
agataggtgcctcactgattaagcattggtaactgtcagaccaagtttactcatatatactttagattgatttaaaactt
catttttaatttaaaaggatctaggtgaagatcctttttgataatctcatgaccaaaatcccttaacgtgagttttcgtt
ccactgagcgtcagaccccgtagaaaagatcaaaggatcttcttgagatcctttttttctgcgcgtaatctgctgcttgc
aaacaaaaaaaccaccgctaccagcggtggtttgtttgccggatcaagagctaccaactctttttccgaaggtaactggc
ttcagcagagcgcagataccaaatactgtccttctagtgtagccgtagttaggccaccacttcaagaactctgtagcacc
gcctacatacctcgctctgctaatcctgttaccagtggctgctgccagtggcgataagtcgtgtcttaccgggttggact
caagacgatagttaccggataaggcgcagcggtcgggctgaacggggggttcgtgcacacagcccagcttggagcgaacg
acctacaccgaactgagatacctacagcgtgagctatgagaaagcgccacgcttcccgaagggagaaaggcggacaggta
tccggtaagcggcagggtcggaacaggagagcgcacgagggagcttccagggggaaacgcctggtatctttatagtcctg
tcgggtttcgccacctctgacttgagcgtcgatttttgtgatgctcgtcaggggggcggagcctatggaaaaacgccagc
aacgcggcctttttacggttcctggccttttgctggccttttgctcacatgttctttcctgcgttatcccctgattctgt
ggataaccgtattaccgcctttgagtgagctgataccgctcgccgcagccgaacgaccgagcgcagcgagtcagtgagcg
aggaagcggaagagcgcccaatacgcaaaccgcctctccccgcgcgttggccgattcattaatgcagctggcacgacagg
tttcccgactggaaagcgggcagtgagcgcaacgcaattaatgtgagttagctcactcattaggcaccccaggctttaca
ctttatgcttccggctcgtatgttgtgtggaattgtgagcggataacaatttcacacaggaaacagctatgaccatgatt
acgccaagcgcgcaattaaccctcactaaagggaacaaaagctggagctgcaagcttaatgtagtcttatgcaatactct
tgtagtcttgcaacatggtaacgatgagttagcaacatgccttacaaggagagaaaaagcaccgtgcatgccgattggtg
gaagtaaggtggtacgatcgtgccttattaggaaggcaacagacgggtctgacatggattggacgaaccactgaattgcc
gcattgcagagatattgtatttaagtgcctagctcgatacaataaacgggtctctctggttagaccagatctgagcctgg
gagctctctggctaactagggaacccactgcttaagcctcaataaagcttgccttgagtgcttcaagtagtgtgtgcccg
tctgttgtgtgactctggtaactagagatccctcagacccttttagtcagtgtggaaaatctctagcagtggcgcccgaa
cagggacctgaaagcgaaagggaaaccagagctctctcgacgcaggactcggcttgctgaagcgcgcacggcaagaggcg
aggggcggcgactggtgagtacgccaaaaattttgactagcggaggctagaaggagagagatgggtgcgagagcgtcagt
attaagcgggggagaattagatcgcgatgggaaaaaattcggttaaggccagggggaaagaaaaaatataaattaaaaca
tatagtatgggcaagcagggagctagaacgattcgcagttaatcctggcctgttagaaacatcagaaggctgtagacaaa
tactgggacagctacaaccatcccttcagacaggatcagaagaacttagatcattatataatacagtagcaaccctctat
tgtgtgcatcaaaggatagagataaaagacaccaaggaagctttagacaagatagaggaagagcaaaacaaaagtaagac
caccgcacagcaagcggccgctgatcttcagacctggaggaggagatatgagggacaattggagaagtgaattatataaa
tataaagtagtaaaaattgaaccattaggagtagcacccaccaaggcaaagagaagagtggtgcagagagaaaaaagagc
agtgggaataggagctttgttccttgggttcttgggagcagcaggaagcactatgggcgcagcctcaatgacgctgacgg
tacaggccagacaattattgtctggtatagtgcagcagcagaacaatttgctgagggctattgaggcgcaacagcatctg
ttgcaactcacagtctggggcatcaagcagctccaggcaagaatcctggctgtggaaagatacctaaaggatcaacagct
cctggggatttggggttgctctggaaaactcatttgcaccactgctgtgccttggaatgctagttggagtaataaatctc
tggaacagattggaatcacacgacctggatggagtgggacagagaaattaacaattacacaagcttaatacactccttaa
ttgaagaatcgcaaaaccagcaagaaaagaatgaacaagaattattggaattagataaatgggcaagtttgtggaattgg
tttaacataacaaattggctgtggtatataaaattattcataatgatagtaggaggcttggtaggtttaagaatagtttt
tgctgtactttctatagtgaatagagttaggcagggatattcaccattatcgtttcagacccacctcccaaccccgaggg
gacccgacaggcccgaaggaatagaagaagaaggtggagagagagacagagacagatccattcgattagtgaacggatct
cgacggttaacttttaaaagaaaaggggggattggggggtacagtgcaggggaaagaatagtagacataatagcaacaga
catacaaactaaagaattacaaaaacaaattacaaaaattcaaaattttatcgataagcttgggagttccgcgttacata
acttacggtaaatggcccgcctggctgaccgcccaacgacccccgcccattgacgtcaataatgacgtatgttcccatag
taacgccaatagggactttccattgacgtcaatgggtggagtatttacggtaaactgcccacttggcagtacatcaagtg
tatcatatgccaagtacgccccctattgacgtcaatgacggtaaatggcccgcctggcattatgcccagtacatgacctt
atgggactttcctacttggcagtacatctacgtattagtcatcgctattaccatggtgatgcggttttggcagtacatca
atgggcgtggatagcggtttgactcacggggatttccaagtctccaccccattgacgtcaatgggagtttgttttggcac
caaaatcaacgggactttccaaaatgtcgtaacaactccgccccattgacgcaaatgggcggtaggcgtgtacggtggga
ggtctatataagcagagctcgtttagtgaaccgtcagatcgcctggagacgccatccacgctgttttgacctccatagaa
gacaccgactctagaggatccaccggtcgccaccatggtgagcaagggcgaggagctgttcaccggggtggtgcccatcc
tggtcgagctggacggcgacgtaaacggccacaagttcagcgtgtccggcgagggcgagggcgatgccacctacggcaag
ctgaccctgaagttcatctgcaccaccggcaagctgcccgtgccctggcccaccctcgtgaccaccctgacctacggcgt
gcagtgcttcagccgctaccccgaccacatgaagcagcacgacttcttcaagtccgccatgcccgaaggctacgtccagg
agcgcaccatcttcttcaaggacgacggcaactacaagacccgcgccgaggtgaagttcgagggcgacaccctggtgaac
cgcatcgagctgaagggcatcgacttcaaggaggacggcaacatcctggggcacaagctggagtacaactacaacagcca
caacgtctatatcatggccgacaagcagaagaacggcatcaaggtgaacttcaagatccgccacaacatcgaggacggca
gcgtgcagctcgccgaccactaccagcagaacacccccatcggcgacggccccgtgctgctgcccgacaaccactacctg
agcacccagtccgccctgagcaaagaccccaacgagaagcgcgatcacatggtcctgctggagttcgtgaccgccgccgg
gatcactctcggcatggacgagctgtacaagtaaagcggccgcgtcgacaatcaacctctggattacaaaatttgtgaaa
gattgactggtattcttaactatgttgctccttttacgctatgtggatacgctgctttaatgcctttgtatcatgctatt
gcttcccgtatggctttcattttctcctccttgtataaatcctggttgctgtctctttatgaggagttgtggcccgttgt
caggcaacgtggcgtggtgtgcactgtgtttgctgacgcaacccccactggttggggcattgccaccacctgtcagctcc
tttccgggactttcgctttccccctccctattgccacggcggaactcatcgccgcctgccttgcccgctgctggacaggg
gctcggctgttgggcactgacaattccgtggtgttgtcggggaagctgacgtcctttccatggctgctcgcctgtgttgc
cacctggattctgcgcgggacgtccttctgctacgtcccttcggccctcaatccagcggaccttccttcccgcggcctgc
tgccggctctgcggcctcttccgcgtcttcgccttcgccctcagacgagtcggatctccctttgggccgcctccccgcct
gGAATTCTACCGGGTAGGGGAGGCGCTTTTCCCAAGGCAGTCTGGAGCATGCGCTTTAGCAGCCCCGCTGGGCACTTGGC
GCTACACAAGTGGCCTCTGGCCTCGCACACATTCCACATCCACCGGTAGGCGCCAACCGGCTCCGTTCTTTGGTGGCCCC
TTCGCGCCACCTTCTACTCCTCCCCTAGTCAGGAAGTTCCCCCCCGCCCCGCAGCTCGCGTCGTGCAGGACGTGACAAAT
GGAAGTAGCACGTCTCACTAGTCTCGTGCAGATGGACAGCACCGCTGAGCAATGGAAGCGGGTAGGCCTTTGGGGCAGCG
GCCAATAGCAGCTTTGCTCCTTCGCTTTCTGGGCTCAGAGGCTGGGAAGGGGTGGGTCCGGGGGCGGGCTCAGGGGCGGG
CTCAGGGGCGGGGCGGGCGCCCGAAGGTCCTCCGGAGGCCCGGCATTCTGCACGCTTCAAAAGCGCACGTCTGCCGCGCT
GTTCTCCTCTTCCTCATCTCCGGGCCTTTCGACCTGCAGCCCAAGCTTACCATGACCGAGTACAAGCCCACGGTGCGCCT
CGCCACCCGCGACGACGTCCCCAGGGCCGTACGCACCCTCGCCGCCGCGTTCGCCGACTACCCCGCCACGCGCCACACCG
TCGATCCGGACCGCCACATCGAGCGGGTCACCGAGCTGCAAGAACTCTTCCTCACGCGCGTCGGGCTCGACATCGGCAAG
GTGTGGGTCGCGGACGACGGCGCCGCGGTGGCGGTCTGGACCACGCCGGAGAGCGTCGAAGCGGGGGCGGTGTTCGCCGA
GATCGGCCCGCGCATGGCCGAGTTGAGCGGTTCCCGGCTGGCCGCGCAGCAACAGATGGAAGGCCTCCTGGCGCCGCACC
GGCCCAAGGAGCCCGCGTGGTTCCTGGCCACCGTCGGCGTCTCGCCCGACCACCAGGGCAAGGGTCTGGGCAGCGCCGTC
GTGCTCCCCGGAGTGGAGGCGGCCGAGCGCGCCGGGGTGCCCGCCTTCCTGGAGACCTCCGCGCCCCGCAACCTCCCCTT
CTACGAGCGGCTCGGCTTCACCGTCACCGCCGACGTCGAGGTGCCCGAAGGACCGCGCACCTGGTGCATGACCCGCAAGC
CCGGTGCCTGACGCCCGCCCCACGACCCGCAGCGCCCGACCGAAAGGAGCGCACGACCCCATGCATCTCGAGGGCCCGGT
ACctttaagaccaatgacttacaaggcagctgtagatcttagccactttttaaaagaaaaggggggactggaagggctag
ctcactcccaacgaagacaagatctgctttttgcttgtactgggtctctctggttagaccagatctgagcctgggagctc
tctggctgcctagggaacccactgcttaagcctcaataaagcttgccttgagtgcttcaagtagtgtgtgcccgtctgtt
gtgtgactctggtaactagagatccctcagacccttttagtcagtgtggaaaatctctagcagtagtagttcatgtcatc
ttattattcagtatttataacttgcaaagaaatgaatatcagagagtgagaggaacttgtttattgcagcttataatggt
tacaaataaagcaatagcatcacaaatttcacaaataaagcatttttttcactgcattctagttgtggtttgtccaaact
catcaatgtatcttatcatgtctggctctagctatcccgcccctaactccgcccagttccgcccattctccgccccatgg
ctgactaattttttttatttatgcagaggccgaggccgcctcggcctctgagctattccagaagtagtgaggaggctttt
ttggaggcctaggcttttgcgtcgagacgtacccaattcgccctatagtgagtcgtattacgcgcgctcactggccgtcg
ttttacaacgtcgtgactgggaaaaccctggcgttacccaacttaatcgccttgcagcacatccccctttcgccagctgg
cgtaatagcgaagaggcccgcaccgatcgcccttcccaacagttgcgcagcctgaatggcgaatggcgcgacgcgccctg
tagcggcgcattaagcgcggcgggtgtggtggttacgcgcagcgtgaccgctacacttgccagcgccctagcgcccgctc
ctttcgctttcttcccttcctttctcgccacgttcgccggctttccccgtcaagctctaaatcgggggctccctttaggg
ttccgatttagtgctttacggcacctcgaccccaaaaaacttgattagggtgatggttcacgtagtgggccatcgccctg
atagacggtttttcgccctttgacgttggagtccacgttctttaatagtggactcttgttccaaactggaacaacactca
accctatctcggtctattcttttgatttataagggattttgccgatttcggcctattggttaaaaaatgagctgatttaa
caaaaatttaacgcgaattttaacaaaatattaacgtttacaatttcc
'''
# +
## List all common restriction enzymes that cut both plasmids exactly N times
N = 1 #how many cuts?
for e in Restriction.CommOnly: #scan only for commonly-used enzymes
if len(e.search(Seq(p1))) == N:
if len(e.search(Seq(p1))) == N:
print(e)
# +
## Find common RE in many sequences
from collections import Counter
def parse_fasta(text):
'''file_path => dict
Return a dict of id:sequences.
'''
d = {}
_id = False
seq = ''
for line in text.split('\n'):
if line != '':
if line.startswith('>'):
if not _id:
_id = line[1:].strip()
elif _id and seq:
d.update({_id:seq})
_id = line[1:].strip()
seq = ''
else:
seq += line.strip()
d.update({_id:seq})
return d
text = \
'''
>BB200_1 (len:243 mean_flex:13.0 max_flex:14.9 entropy:1.99 GC%:44.86)
GGGCATGCACAGATGTACACGGCGGCGCAAGATGATGTGCCGAACCTGACATGGCATCGACTGGTATGGATCAATACTGATGCGATATCGATACCGGATAAATCATATATGCATAATATCACATTATATTAATTATAATACATCGGCGTACATATACACGTACGCATCATTTCACTATCTATCGGTACTATACGTAGTGCCGGTCTGTTGGCCGGGCGACATAGATGCTGCATGACATAGCCC
>BB200_2 (len:244 mean_flex:13.15 max_flex:14.69 entropy:1.96 GC%:38.52)
GGGCATGCACAGATGTACACGTGACGCAACGATGATGTTAGCTATTTGTTCAATGACAAATCTGGTATGATCAATACCGATGCGATATTGATATCTGATAACTCATATATGTAGAATATCACATTATATTTATTATAATACATCGTCGAACATATACACAATGCATCTTATCTATACGTATCGGGATAGCGTTGGCATAGCACTGGATGGCATGACCCTCATTAGATGCTGCATGACATAGCCC
>BB200_3 (len:244 mean_flex:13.06 max_flex:14.9 entropy:1.96 GC%:39.75)
GGGCATGCACAGATGTACACGAGACCGCAAGATGATGTTCATTCTTGAACATGAGATCGGATGGGTATGGATCAATACCGATGCGATATGATAACTGATAAATCATATATCTATAATATCACATTATATTAATTATAATACAGGATCGTTACATGCATACACAATGTATACTATACGTATTCGGTAGTTAGTGTACGGTCGGAATGGAGGTGGTGGCGGTGATAGATGCTGCATGACATAGCCC
>BB200_4 (len:243 mean_flex:13.29 max_flex:14.44 entropy:1.93 GC%:34.57)
GGGCATGCACAGATGTACACGAATCCCGAAGATGTTGTCCATTCATTGAATATGAGATCTCATGGTATGATCAATATCGGATGCGATATTGATACTGATAAATCATATATGCATAATCTCACATTATATTTATTATAATAAATCATCGTAGATATACACAATGTGAATTGTATACAATGGATAGTATAACTATCCAATTTCTTTGAGCATTGGCCTTGGTGTAGATGCTGCATGACATAGCCC
>BB200_5 (len:243 mean_flex:13.37 max_flex:14.52 entropy:1.94 GC%:35.8)
GGGCATGCACAGATGTACACGAATCCGTGAGATGACTATCTTATTTGTGACATTCATCGATCTGGATATGATCAATACCATGCGATATTGATTACTGATAAATCATATATGTAGAATATCACATTATATTAATTATAATAAATCGTCGTACATATACATCCACAATTAGCTATGTATACTATCTATAGAGATGGTGCATCATCGTACTCCACCATTCCCACTAGATGCTGCATGACATAGCCC
'''
sequences = parse_fasta(text)
# +
N = 1 #how many cuts?
c = Counter()
for _id, seq in sequences.items():
for e in Restriction.CommOnly:
if len(e.search(Seq(seq))) == N:
c.update({str(e)})
for e in Restriction.CommOnly:
if str(e) in c:
if c[str(e)] == len(sequences):
print(str(e),'\t', e.site)
# -
e.site
| Find_RE_cutting_2_plasmids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.9 64-bit
# language: python
# name: python3
# ---
cipher = "".join(open("input.txt").readlines())
shakespeare = "".join(open("shakespeare.txt").readlines())
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# +
from string import whitespace
def ngrams(text):
letters, bigrams, trigrams, quadgrams = {}, {}, {}, {}
a, b, c, d = "", "", "", ""
for char in text:
char = char.upper()
if char in whitespace or char not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
continue
a, b, c = b, c, d
d = char
if d not in letters:
letters[d] = 0
letters[d] += 1
if c+d not in bigrams:
bigrams[c+d] = 0
bigrams[c+d] += 1
if b+c+d not in trigrams:
trigrams[b+c+d] = 0
trigrams[b+c+d] += 1
if a+b+c+d not in quadgrams:
quadgrams[a+b+c+d] = 0
quadgrams[a+b+c+d] += 1
letters_total = sum(letters.values())
bigrams_total = sum(bigrams.values())
trigrams_total = sum(trigrams.values())
quadgrams_total = sum(quadgrams.values())
for k in letters:
letters[k] /= letters_total
for k in bigrams:
bigrams[k] /= bigrams_total
for k in trigrams:
trigrams[k] /= trigrams_total
for k in quadgrams:
quadgrams[k] /= quadgrams_total
letters = sorted(letters.items(), key=lambda x: x[1], reverse=True)
bigrams = sorted(bigrams.items(), key=lambda x: x[1], reverse=True)
trigrams = sorted(trigrams.items(), key=lambda x: x[1], reverse=True)
quadgrams = sorted(quadgrams.items(), key=lambda x: x[1], reverse=True)
return letters, bigrams, trigrams, quadgrams
# -
SHAKESPEARE_NGRAMS = ngrams(shakespeare)
CIPHER_NGRAMS = ngrams(cipher)
[(a, round(100 * n, 1)) for (a, n) in SHAKESPEARE_NGRAMS[0]]
[(a, round(100 * n, 1)) for (a, n) in CIPHER_NGRAMS[0]]
for (s, p) in SHAKESPEARE_NGRAMS[2]:
if s[0] == "E" and s[2] == "E" :
print(s, round(p * 100, 1))
# +
for (cs, cp) in CIPHER_NGRAMS[1]:
if len(cs) >= 2 and cs[0] == cs[1]:
print(cs, cp)
print("---------------------")
for (ss, sp) in SHAKESPEARE_NGRAMS[1]:
if len(ss) >= 2 and ss[0] == ss[1]:
print(ss, sp)
| hw1/assignment/ngrams.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tweepy
consumer_key = '...'
consumer_secret = '...'
access_token = '...'
access_token_secret = '...'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
#Comprobar que funciona la api
status = api.get_status('937349434668498944')
print(status.user.id)
user = api.get_user(status.user.id).screen_name
api.get_user('Humbert18960727').created_at
# +
import time
import tweepy
import pandas as pd
import matplotlib.pyplot as plt
def get_tweet_status(tweet_id):
'''
Devuelve un objeto status con todos los datos de un tweet
'''
try:
return api.get_status(tweet_id)
except tweepy.TweepError as e:
return 'SuspendedAccount'
def get_user_by_id(id):
'''
Devuelve un objeto user a traves del id del user
'''
return api.get_user(id)
def get_screen_name(id):
try:
user = get_user_by_id(id)
screen_name = user.screen_name
return screen_name
except tweepy.TweepError as e:
return ('SuspendedAccount')
def get_screen_name_from_tweet(tweet_id):
try:
status = api.get_status(tweet_id)
user = get_user_by_id(status.user.id)
screen_name = user.screen_name
return screen_name
except tweepy.TweepError as e:
return ('SuspendedAccount')
def get_followers_page(screen_name):
'''
Devuelve los ids de los usuarios que sigue un user, screen_name es el nombre de usuario sin el '@'
ej @NelsonMandela -> NelsonMandela
'''
ids = []
for page in tweepy.Cursor(api.followers_ids, screen_name=screen_name).pages():
ids.extend(page)
#time.sleep(60)
return ids
def get_followers_limited(screen_name, total):
'''
Devuelve los ids de los usuarios que sigue un user, screen_name es el nombre de usuario sin el '@'
ej @NelsonMandela -> NelsonMandela
Obtiene solo un numero de ids de usuarios indicado en el parametro total
'''
ids = []
for page in tweepy.Cursor(api.followers_ids, screen_name=screen_name).items(total):
ids.append(page)
return ids
def get_followers_from_tweet_id(tweet_id, total = 0):
'''
Pasandole el id de un tweet obtenemos todos sus seguidores,
se le puede pasar el parametro total y obtener solamente un numero concreto de ids
'''
user_id = get_tweet_status(tweet_id).user.id
screen_name = get_user_by_id(user_id).screen_name
if total == 0:
followers_ids = get_followers_page(screen_name)
else:
followers_ids = get_followers_limited(screen_name,total)
return followers_ids
def get_tweets(num_noticias):
'''
Le pasamos el numero de noticias de las cuales queremos obtener los tweets_ids
Con el DataFrame de Pandas separamos los tweets de una misma noticia
Devuelve un array de [titulos, array(tweets_ids)]
'''
df = pd.read_csv('./data/politifact_fake.csv')
#Esto se puede cambiar por otro dataset (ej: ./data/politifact_real.csv), pero de momento trabajamos con este
df['tweet_ids'][0].split('\t')
titulo = []
tweets = []
i = 0
for tweet in df['tweet_ids']:
if i < num_noticias:
array_tweets = str(tweet).split('\t')
#Se puede cambiar a que tenga al menos 5 o 10 tweets para probar
if(len(array_tweets) > 1 and len(array_tweets) < 2000): #Solo si tiene algun tweet lo añadimos, 1 elemento es NaN
tweets.append(array_tweets)
titulo.append(df['title'][i])
i = i+1
noticias = [titulo, tweets]
return noticias
# -
# # Grafo de propagación de noticias
# Este grafo sirve para ver la difusión de la noticia, es decir, ver las veces que ha sido retweeteado, para ello lo separamos en nivels:
# nivel 0: la noticia como origen.
#
# nivel 1: las personas que han tweeado sobre la noticia del nivel 1.
#
# nivel 2: aquellos que han hecho RT del tweet del nivel 1.
#
# nivel 3: todos que han hecho RT del RT del nivel 2.
#
# Asi sucesivamete hasta que nadie haga mas RT, ahi es cuando se habrá terminado la difusión.
# +
# Usado solo para sacar dos noticis
#Sacamos el nivel 0 = la noticia
df = pd.read_csv('./data/politifact_fake.csv',delimiter=",")
df4 = df.tail(2).copy()
#id_noticia = df['id'][0]
#id_noticia = df.loc[0:3,'id']
#list_nivel1 = df.loc[0,'tweet_ids'].split('\t')
#politic_news = get_tweets(4)
titulo = []
tweets = []
noticias= []
i = 0
for tweet in df4['tweet_ids']:
array_tweets = str(tweet).split('\t')
if(len(array_tweets) > 1 and len(array_tweets) < 2000): #Solo si tiene algun tweet lo añadimos, 1 elemento es NaN
tweets.append(array_tweets)
titulo.append(df['title'][i])
i = i+1
noticias = [titulo, tweets]
# +
# ids de noticisa convertidos en una lista
ids = []
for i in range (0,4):
ids.append(noticias[1][1][i])
# -
nodos
def get_ids_fromStatuses(statuses):
'''
Dado un objeto statuses de un tweet nos devuelve la lista de ids que han rtweeteado ese tweet(statuses)
'''
lis_ids = []
for i in range (len(statuses)):
lis_ids.append(statuses[i].id)
return lis_ids
def get_nodo(list_id):
'''
le pasamos una lista de ids de tweets y nos devuelve por cada ids los ids de los rts en un DataFrame
'''
nodos = pd.DataFrame(columns = ['padre', 'hijos'])
for i in range (len(list_id)):
t = api.retweets(list_id[i])
if t != 'SuspendedAccount':
l_rts = get_ids_fromStatuses(t)
nodos = nodos.append({"padre": list_id[i], "hijos": l_rts}, ignore_index=True)
return nodos
def get_propagacion_graph(ids0): #Es prueba solo para una lista de ids previamente convertidos
grafo = pd.DataFrame(columns = ['padre', 'hijos'])
grafo = grafo.append(get_nodo(ids0), ignore_index=True)
grafo_aux = pd.DataFrame(columns = ['padre', 'hijos'])
for i in range (grafo["padre"].count()):
grafo_aux = grafo_aux.append(get_nodo(grafo["hijos"][i]),ignore_index=True)
grafo = grafo.append(grafo_aux,ignore_index=True)
return grafo
def get_propagacionGraph():
'''
nos saca el grafo de una noticia a partir de una lista de ids sacados del csv politicalfact.csv
Para sacar mas grafos de mas noticias de golpe solo habria que pasar un argumento a la funcion con la lista de ids de
cada noticia y hacer una estructura iterativa en otra funcion, fuera, que llame a esta funcion tantas veces como queramos
tambine se podria desde esta misma funcin leyendo el csv dentro e iterando tantas veces como noticias haya.
'''
grafo = pd.DataFrame(columns = ['padre', 'hijos'])
grafo = grafo.append(get_nodo(ids), ignore_index=True)
etapa = get_next_level(grafo)
#grafo = grafo.append(etapa,ignore_index=True)
return etapa[0]
def get_next_level(df_ph):
'''
esta funcion nos permite sacar los nodos del nivel anterios, devuelve una tupla de df [0] es el grafo resultante,
[1] es el nivel anterior y que vamos a sacar sus hijos se realiza de forma recursiva.
'''
grafo_aux = pd.DataFrame(columns = ['padre', 'hijos'])
grafo_aut = pd.DataFrame(columns = ['padre', 'hijos'])
grafo_aut = df_ph.copy()
for i in range (grafo_aut["padre"].count()):
if bool(grafo_aut.iloc[:,1][i]) == True: #comprueba que la lisat de hijos no este vacia para llamar getNodo sino continua
grafo_aux = grafo_aux.append(get_nodo(grafo_aut["hijos"][i]),ignore_index=True)
if grafo_aux["hijos"].count() > 0: #comprueba que no este vacio para hacer la llamada recursiva
get_next_level(grafo_aux)
grafo_aut = grafo_aut.append(grafo_aux,ignore_index=True)
return [grafo_aut,grafo_aux]
p = get_propagacionGraph()
p
| FakeNews/PropagacionGraph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pandas Exercises
#
# **Source of the Data: https://www.kaggle.com/chirin/africa-economic-banking-and-systemic-crisis-data/downloads/africa-economic-banking-and-systemic-crisis-data.zip/1**
#
#
# This dataset is a derivative of Reinhart et. al's Global Financial Stability dataset which can be found online at: https://www.hbs.edu/behavioral-finance-and-financial-stability/data/Pages/global.aspx
#
# The dataset will be valuable to those who seek to understand the dynamics of financial stability within the African context.
#
# Content
# The dataset specifically focuses on the Banking, Debt, Financial, Inflation and Systemic Crises that occurred, from 1860 to 2014, in 13 African countries, including: Algeria, Angola, Central African Republic, Ivory Coast, Egypt, Kenya, Mauritius, Morocco, Nigeria, South Africa, Tunisia, Zambia and Zimbabwe.
#
#
#
# * caseA number which denotes a specific country
# * cc3A three letter country code
# * country The name of the country
# * year The year of the observation
# * systemic_crisis"0" means that no systemic crisis occurred in the year and "1" means that a systemic crisis occurred in the year.
# * exch_usd The exchange rate of the country vis-a-vis the USD
# * domestic_debt_in_default "0" means that no sovereign domestic debt default occurred in the year and "1" means that a sovereign domestic debt default occurred in the year
# * sovereign_external_debt_default "0" means that no sovereign external debt default occurred in the year and "1" means that a sovereign external debt default occurred in the year
# * gdp_weighted_default The total debt in default vis-a-vis the GDP
# * inflation_annual_cpi The annual CPI Inflation rate
# * independence "0" means "no independence" and "1" means "independence"
# * currency_crises "0" means that no currency crisis occurred in the year and "1" means that a currency crisis occurred in the year
# * inflation_crises "0" means that no inflation crisis occurred in the year and "1" means that an inflation crisis occurred in the year
# * banking_crisis"no_crisis" means that no banking crisis occurred in the year and "crisis" means that a banking crisis occurred in the year
# **TASK: Import pandas**
# CODE HERE
import pandas as pd
# **TASK: Read in the african_econ_crises.csv file that is located under the 01-Crash-Course-Pandas folder. Pay close attention to where the .csv file is located! Please don't post to the QA forums if you can't figure this one out, instead, run our solutions notebook directly to see how its done.**
# CODE HERE
df = pd.read_csv("african_econ_crises.csv")
# **TASK: Display the first 5 rows of the data set**
# CODE HERE
df.head(3)
# +
# Do not run this
# -
# **TASK: How many countries are represented in this data set?**
# CODE HERE
df["country"].nunique()
# +
# Do not run this
# -
# **TASK: What are the countries represented in this data set?**
# CODE HERE
df["country"].unique()
# +
# Do not run this
# -
# **TASK: What country had this highest annual CPI Inflation rate? What was the inflation rate?**
# CODE HERE
df[df["inflation_annual_cpi"] == df["inflation_annual_cpi"].max()]
# +
# Do not run this
# -
# **TASK: In what year did Kenya have its first System Crisis?**
# CODE HERE
df[(df["country"] == "Kenya") & (df["systemic_crisis"]==1)].sort_values("year", ascending=False)
# +
# Do not run this
# -
# **TASK: How many yearly systemic crisises have occurred per country?**
# +
# CODE HERE
# -
# **TASK: How many years did Zimbabwe have a sovereign external debt default occur?**
# +
# CODE HERE
# -
# **TASK: In what year did Algeria have its highest exchange rate?**
# +
# CODE HERE
# -
| 0 - Visualization-and-analysis/2. Pandas/07-Pandas-Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Block layer latencies
# Blklayer results from reading to the drp nvme-sssd. Each ssd was formatted with zfs.
# The data are collected: blktrace->blkparse->btt
from os.path import join
import pandas as pd
import matplotlib.pyplot as plt
data = {
"ffb02_write": "20180309_ffb02/nvme0n1_write_q2c.csv", "ffb02_write_proper": "20180309_ffb02/nvme0n1_write_proper_q2c.csv",
"ffb03_read": "nvme0n1_write_q2c.csv"
}
# +
basepath = "/reg/data/ana01/temp/xrd/wilko/drp"
#basepath = "/home/wilko/psdm/data/drp"
# latency data for driver to completion
#dq = pd.read_csv(join(basepath, "2018-03-08-nvme1n1_d2c_latency.csv"), names=["ts","lat"])
# latency data for queue to completion
#dq = pd.read_csv(join(basepath, "2018-03-08-nvme1n1_q2c_latency.csv"), names=["ts","lat"])
#dq = pd.read_csv(join(basepath, "20180309_ffb03/nvme2n1_write.q2c_lantency_q2c.csv"), names=["ts","lat"])
#dq = pd.read_csv(join(basepath, "20180309_ffb03/nvme2n1_read_q2c.csv"), names=["ts","lat"])
dq = pd.read_csv(join(basepath, data["ffb02_write_proper"]), names=["ts","lat"])
dq.lat *= 1000. #latency in ms
# +
plt.style.use('seaborn')
fig = plt.figure(figsize=(15,5))
fig.add_subplot(121)
cut = (dq.lat < 10000.)
dq.loc[cut].lat.hist(bins=90)
plt.xlabel("latency [ms]")
plt.ylabel("count")
plt.title("queue->completion latency")
# Select the three peaks and plot how the entries
# are distributed in time (even it looks like)
fig.add_subplot(122)
cut = (dq.lat < 0.1)
cut1 = (dq.lat > 0.1) & (dq.lat < 20.0)
cut2 = (dq.lat > 20.0)
q = dq.loc[cut].ts
q1 = dq.loc[cut1].ts
q2 = dq.loc[cut2].ts
q1.values
plt.hist((q.values,q1.values,q2.values)) # stacked=True)
plt.xlabel("time [s]")
plt.ylabel("count (normalized)")
plt.title("time distribution for the three peaks")
plt.tight_layout()
plt.show()
# -
dq.loc[dq.lat<110.1].describe()
| datasets/nvmeio/blk_lat_oss10/blklat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Ricapitolazione Lez 4
# - scansione di una stringa con il while o con il for
# - for: i in elenco, indicizza su un elenco di oggetti, non tipato
# - for su elementi di una stringa vs for su indici di una stringa
# - funzione range
# - ricorsione
# - disegni con asterischi
# - liste
# ## Soluzione Esercizi
# #### strig.format()
print('ciao sono {} e vivo a {}'.format('Marco','Trieste'))
print('ciao sono {} ed ho {} anni'.format('Luigi',25))
# #### Esercizio (Soluzione): insertionSort
# Passi dell'insertionSort:
#
# **input:** [29, 3, 71, 34, 8, 55, 91, 5, 27]
# 1 -> [**3**, 29, 71, 34, 8, 55, 91, 5, 27]
# 2 -> [**3, 29**, 71, 34, 8, 55, 91, 5, 27]
# 3 -> [**3, 29, 34**, 71, 8, 55, 91, 5, 27]
# 4 -> [**3, 8, 29, 34, 71**, 55, 91, 5, 27]
# 5 -> [**3, 8, 29, 34, 55, 71**, 91, 5, 27]
# 6 -> [**3, 8, 29, 34, 55, 71, 91**, 5, 27]
# 7 -> [**3, 5, 8, 29, 34, 55, 71, 91**, 27]
# 8 -> [**3, 5, 8, 27, 29, 34, 55, 71, 91**]
# **output:** [3, 5, 8, 27, 29, 34, 55, 71, 91]
# +
def insertionSort(lista):
for indice in range(1,len(lista)):
valoreCorrente = lista[indice]
posizione = indice
while posizione>0 and lista[posizione-1]>valoreCorrente:
lista[posizione]=lista[posizione-1]
posizione = posizione-1
lista[posizione]=valoreCorrente
##print('{} -> {}'.format(indice,lista)) #stampa i passi dell'algoritmo
miaLista = [29,3,71,34,8,55,91,5,27]
print(miaLista)
insertionSort(miaLista)
print(miaLista)
# -
# #### Esercizio (Soluzione): basi diverse
# +
#### Esercizio
#Scrivere un programma che converte un numero in una base da 2 a 10 ad un numero in base 10
base=int(input("inserisci la base (2..10): "))
s=input("inserisci il numero in base "+str(base)+": ")
valore=0
polinomio=''
for i in range(len(s)):
valore=valore+int(s[i])*base**(len(s)-(i+1))
polinomio = polinomio+'+'+s[i]+'*'+str(base)+'^'+str((len(s)-(i+1)))
print("{} (base {}), in base 10 vale {} = {}".format(s,base,polinomio,valore))
# +
#f(0)=1
#f(1)=1
#f(n)=f(n-1)+f(n-2)
def fibonacci(n):
if n<=1:
return 1
return fibonacci(n-1)+fibonacci(n-2)
fibonacci(7)
# +
def fattoriale(n):
if n==0:
print('fattoriale(0)=1')
return 1
##print('fattoriale({})={}*fattoriale({})'.format(n,n,n-1)) #stampa le iterazioni
return n*fattoriale(n-1)
fattoriale(10)
# -
# # Lez 5
# ## Liste
#
# Torniamo alle liste.
#
# La lista è una sequenza ordinata di elementi eterogenei e **mutabili**!!!
#
#le liste sono mutabili
lista = ['a',5,'b']
print('lista='+str(lista))
lista[0] = 'f'
print('lista='+str(lista))
#le stringe sono immutabili!!
parola = "ciao"
parola[0]='3'
#le liste possono contenere elementi di tipo diverso.
lista = ["ciao", 2.0, 5, [10, 20]]
#posso generare la lista vuola
lista = []
lista= list()
#posso costruire una lista partendo range.
list(range(5))
list(range(1,5))
list(range(2,10,2))
list(range(10,0,-2))
#posso costruire una lista partendo da una stringa.
list("ciao")
#posso costruire una lista partendo da una stringa e applicando la funzione split.
s1 = 'ciao come stai?'
s2 = s1.split(' ') # equivalemente s1.split(" ")
print(s2)
s1 = 'anshd#sdsda#asddd#dfdf#3233#ss?'
s2 = s1.split('#') # equivalemente s1.split(" ")
print(s2)
# +
#Osservazione: in Python virgolette ' ' e doppie-virgolette " " per riferirsi a stringhe sono intercambiabili.
#Sopra infatti abbiamo usato a volte '', altre volte "".
# -
# ### Accedere a un elemento: **x[ indice ]**
# (tutto quello che diremo fino agli Esercizi vale anche per le stringhe)
x = [23,3,2,65]
print('il primo elemento vale:'+str(x[0])) # <-- Python e molti altri linguaggi cominciano a contare da zero.
print('l\'ultimo elemento vale:'+str(x[-1]))
# ### Slicing: x[init : end : step]
x = [23,3,2,65]
x[1:3] #seleziona gli elementi di indice 1 e 3 (escluso).
x[2:] #dall'elemento di indice 2 fino alla fine.
x[:3] #dall'inizio fino all'elemento di indice 3 escluso.
y = x + [6,7,8,9,10] #concatenazione
print(y)
y[1:6:2] #dall'elemento di indice 1 fino all'elemento di indice 6 (escluso), con step 2 .
y[::2] #dall'inizio alla fine, con step 2.
y[::-1] #dall'inizio alla fine, con step -1 (quindi parte dalla fine e va verso l'inizio con step 1 )
y[::-3] #dall'inizio alla fine, con step -1 (quindi parte dalla fine e va verso l'inizio con step 3 )
# ## Esercizi (insieme)
parola = 'anna'
isPalindromo = (parola[::-1]==parola)
print('"{}" è un palindromo? {} '.format(parola,isPalindromo))
#palindromo
frase = 'i topi non avevano nipoti'
frase = frase.replace(" ","") #eliminiamo gli spazi bianchi
isPalindromo = (frase[::-1]==frase)
print('"{}" è un palindromo? {} '.format(frase,isPalindromo))
#ordinamento tra char
'a'<'b'
# + active=""
# ord
# Signature: ord(c, /)
# Docstring: Return the Unicode code point for a one-character string.
# Type: builtin_function_or_method
# -
ord('a')
ord('b')
#anagramma
a1 = 'no more stars'
a2 = 'astronomers'
a1 = a1.replace(" ","") #eliminiamo gli spazi bianchi
print('insertionSort: {}'.format(insertionSort(list(a1))==insertionSort(list(a2))))
print('sorted function : {}'.format(sorted(a1)==sorted(a2)))
# + active=""
# sorted?
# Signature: sorted(iterable, /, *, key=None, reverse=False)
# Docstring:
# Return a new list containing all items from the iterable in ascending order.
#
# A custom key function can be supplied to customize the sort order, and the
# reverse flag can be set to request the result in descending order.
# Type: builtin_function_or_method
# -
# ### Lunghezza di una lista
# Per sapere la lunghezza di una lista uso _len()_. Importante quando si deve ciclare sugli elementi di una lista!!!
# + active=""
# len?
# Signature: len(obj, /)
# Docstring: Return the number of items in a container.
# Type: builtin_function_or_method
# +
i=0
while i < len(x):
print(x[i])
i = i + 1
# -
# L'operatore **in** serve per vedere se un elemento è in una lista e per ciclare sulla lista con il for
x
1 in x
for i in x:
print(i)
#classico ciclo for
for i in range(len(x)):
print(x[i])
#range e intervalli
for i in range(0,10,2):
print(i)
# Operazioni sulle liste
[1,2]+[4,5] # concatenazione
[1,2]*3 # ripetizione concatenata
x= [1,2,3,4,5,6,7,8]
print('prima: '+str(x))
x[1:3]=[] #elimino gli elementi dall'indice 1 all'indice 3 (escluso)
print('dopo: '+str(x))
x= [1,2,3,4,5,6,7,8]
print('prima: '+str(x))
x[1:3]=['ciccia','casa','pluto'] #sostituisco gli elementi dall'indice 1 all'indice 3 (escluso) con altri elementi.
print('dopo: '+str(x))
x= [1,2,3,4,5,6,7,8]
print('x='+str(x))
print('cancello il primo elemento')
del x[0]
print('x='+str(x))
# ### Attenzione all'operatore di assegnazione ('=') quando lavorate con oggetti mutabili.
list1 = ['carlo','magno']
print('prima: list1='+str(list1))
list2=list1 # list2 ed list1 puntano alla stesso oggetto in memoria.
list2[0]='alessandro' #tutte le modifiche che faccio su list2 le sto facendo anche su list1!!!!
print('dopo: list1='+str(list1))
# <img src="fig/assegnazione.png" width="400">
list1 = ['a',7,'ciao']
list2=l1
list2[1]=123
print('list1:'+str(list1))
# ### Copiare una lista: list.copy()
# <img src="fig/copy.png" width="400">
list1 = ['a',7,'pippo']
list2=list1.copy() # oppure list2=list1[:]
list2[0]='b'
print('list1[0]='+str(list1[0]))
print('list2[0]='+str(list2[0]))
#quando si effettua la copia
list3 = ['11',7,23]
list4 = list3.copy()
# list4 punta a un oggetto diverso da list3
print('list3 e list4 puntano allo stesso oggetto? '+str( id(list4)==id(list3)))
# tuttavia al loro interno ci sono delle variabili list3[0],list3[1],list3[3], list4[0],list4[1],list4[3]
#che puntano ai medesimi oggetti... bisogna fare attenzione se alcuni di questi oggetti sono oggetti mutabili
# per esempio delle liste.... (vedi liste annidate!)
for i in range(len(l3)):
print('list3[{}] e list4[{}] puntano allo stesso oggetto? {}'.format(i,i,id(list3[i])==id(list4[i])))
# ## Attenzione alla copia di liste annidate.
# <img src="fig/annidate.png" width="450">
#occhio alle liste annidate!!!
list1 = ['a','b',['c','d']]
list2= list1.copy()
print('prima: list1: '+str(list1))
print('prima: list2: '+str(list2))
#list1 e list2 puntano ad oggetti diversi.
print('list1 e list2 puntano ad oggetti diversi? '+str(id(list1)!=id(list2)))
#tuttavia visto che list1[2] e list2[2] puntano a un oggetto mutabile se io lo modifico tramite list2...
list2[2][0]='q'
print('assegno "q" a list2[2][0]')
#...la modifica la vedrò anche su list1
print('dopo: list1: '+str(list1))
print('dopo: list2: '+str(list2))
# ### per copiare liste annidate posso usare il deepcopy()
# <img src="fig/deepcopy.png" width="450">
# +
import copy
list1 = ['a','b',['c','d']]
list2= copy.deepcopy(list1)
print('prima: list1: '+str(list1))
print('prima: list2: '+str(list2))
#list1 e list2 puntano ad oggetti diversi.
print('list1 e list2 puntano ad oggetti diversi? '+str(id(list1)!=id(list2)))
list2[2][0]='k'
print('assegno "k" a list1[2][0]')
#visto che ho usato il deepcopy la modifica la vedrò solo su list2
print('dopo: list1: '+str(list1))
print('dopo: list2: '+str(list2))
# -
l1
# ## Metodi delle liste
# + active=""
# !!!!! I metodi delle liste modificano l'oggetto di partenza, i metodi delle stringhe non modificano la lista stessa !!!!
# -
l3=list()
l3
l4 = l3.append('casa') ### modifica l3 ma non restituisce nessun valore
l3
l3.append(9) # ho modificato la lista
l3
l2
l2=l2*2 # ho creato una nuova lista
l2
l2[0]='ciao'
l2
l2[2][0]='giuseppe'
l2
l4=['a','b','c']
l4
ele1= l4.pop()
ele1
l4
ele2 = l4.pop(0)
l4
l5 = [1,2,3]
l6=[4,5,6]
l5=l5+l6
l5
l5 = [1,2,3]
l6=[4,5,6]
l5.extend(l6)
l5
l5.extend(l6)
l5
x=[3,4,67,88,900,2,1,124,5]
x.sort()
x
# ## Esercizio (insieme): trova errore
# +
import random
# random.randint?
# -
#generare un numero intero randomico tra 0 e 9 (incluso)
random.randint(0,9)
for i in range(30):
print(str(random.randint(0,9))+' ',end="")
#scrivere la funzione randomNumber che accetta come input un numero intero (n) tra 0 e 9 e genera una stringa di numeri
#random che devono contenere n solo in ultima posizione. Se n non è in [0,9] la funzione deve ritornare 'fail'.
#Esempio:
#randomNumber(3) può generare 124573, oppure 14587413 ma non deve generare 13245783 (perchè c'è un 3
#in posizione 2), 124574 (perchè non c'è il 3) etc.
#randomNumber(57) ritorna 'fail' perchè 57 non è in [0,9]
import random
def randomNumber(n):
s = str(n)
condition = s in '0123456789' # ERRORE: controlla se esiste una sottostringa di '0123456789' uguale a s,
if not condition: # quindi anche 12 23 45 sono in '0123456789' ....
res= 'fail'
else:
exit = 'p'
res = ''
while exit!= s:
exit=str(random.randint(0,9))
res =res+exit
return res
# +
# randomNumber(12) entra in un ciclo infinito
# -
import random
def randomNumberCorrect(n):
s = str(n)
condition = s in list('0123456789') # ERRORE: controlla se esiste una sottostringa di '0123456789' uguale a s,
if not condition: # quindi anche 12 23 45 sono in '0123456789' ....
res= 'fail'
else:
exit = 'p'
res = ''
while exit!= s:
exit=str(random.randint(0,9))
res =res+exit
return res
randomNumberCorrect(8)
# ## Esercizio (a casa)
# <img src="fig/esercizio.png" width="500">
| Lez05/Lez5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="tQ6YfzmpfQ1C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 340} outputId="19bf6125-323c-4464-8758-f4d9f85222e2"
# !pip install transformers
# + id="z_l2fgLbe4Z_" colab_type="code" colab={}
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
import torch
src_text = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."""
]
model_name = 'google/pegasus-xsum'
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = PegasusTokenizer.from_pretrained(model_name)
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
batch = tokenizer.prepare_seq2seq_batch(src_text, truncation=True, padding='longest').to(torch_device)
translated = model.generate(**batch)
# + id="9xaLQJikfynV" colab_type="code" colab={}
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
assert tgt_text[0] == "California's largest electricity provider has turned off power to hundreds of thousands of customers."
# + id="BZXlbE2xh5ea" colab_type="code" colab={}
| text-summarisation/PEGASUS_implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# ## *DISCLAIMER*
# <p style="font-size:16px; color:#117d30;">
# By accessing this code, you acknowledge the code is made available for presentation and demonstration purposes only and that the code: (1) is not subject to SOC 1 and SOC 2 compliance audits; (2) is not designed or intended to be a substitute for the professional advice, diagnosis, treatment, or judgment of a certified financial services professional; (3) is not designed, intended or made available as a medical device; and (4) is not designed or intended to be a substitute for professional medical advice, diagnosis, treatment or judgement. Do not use this code to replace, substitute, or provide professional financial advice or judgment, or to replace, substitute or provide medical advice, diagnosis, treatment or judgement. You are solely responsible for ensuring the regulatory, legal, and/or contractual compliance of any use of the code, including obtaining any authorizations or consents, and any solution you choose to build that incorporates this code in whole or in part.
# </p>
# ## Please don't run / don't click "Run all" the notebook:
# At the time of writing of this document, the current core limit is 200 cores per workspace and depending upon number of concurrent users, you may end up with core capacity being exceeded or maximum number of parallel jobs being exceeded error.
# ## Fetch Marketing Campaigns data into DataFrame and Calculate Revenue Variance
# %%pyspark
data_path = spark.read.load('abfss://marketingdb-staging@#DATA_LAKE_NAME#.dfs.core.windows.net/CampaignAnalytics.csv', format='csv',header=True)
data_path.show(10)
# ## Load into Pandas and Perform Cleansing Operations
#
# +
# %%pyspark
from pyspark.sql.functions import *
from pyspark.sql.types import *
import numpy as np
pd_df = data_path.select("*").toPandas()
'''Cleansing Operations:
1. Columns Revenue, Revenue_Target: Remove '$' symbol and convert datatype to float
2. Columns Revenue, Revenue_Target: Replace null values with 0
3. Columns Region, Country, Product_Category, Campaign_Name: Convert columns to Camel Case
'''
pd_df['Revenue']= pd_df['Revenue'].replace('[\$,]', '', regex=True).astype(float)
pd_df['Revenue_Target']= pd_df['Revenue_Target'].replace('[\$,]', '', regex=True).astype(float)
pd_df['Revenue'].fillna(value=0, inplace=True)
pd_df['Revenue_Target'].fillna(value=0, inplace=True)
pd_df['Region'] = pd_df.Region.str.title()
pd_df['Country'] = pd_df.Country.str.title()
pd_df['Product_Category'] = pd_df.Product_Category.str.title()
pd_df['Campaign_Name'] = pd_df.Campaign_Name.str.title()
# -
# ## Data Transformation - Calculate Revenue Variance
#
# +
#Create new column
pd_df['Revenue_Variance'] = pd_df['Revenue_Target'] - pd_df['Revenue']
print(pd_df[1:5])
# -
# ## Move data to Azure Data Lake Gen2
#
# +
# %%pyspark
df = spark.createDataFrame(pd_df)
df.show(5)
(df
.coalesce(1)
.write
.mode("overwrite")
.option("header", "true")
.format("com.databricks.spark.csv")
.save('abfss://marketingdb-staging@#DATA_LAKE_NAME#.dfs.core.windows.net/Campaignsdata'))
| Manufacturing/automation/artifacts/notebooks/4 MFG Campaign Analytics DataPrep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="tMXsZ8LkP0s6" outputId="95ec90b6-2ef0-4994-e128-626a51bc68cd"
#DataFlair Guide for Binary Search
#RECURSIVE FUNCTION CALL BASED APPROACH
#Function to search element in list
def binary_search(start,end,int_list,target):
#Condition to check if element is not present
if start<=end:
mid = (start+end) // 2
#Check if mid element is the target element
if int_list[mid] == target:
return mid +1
#If not, check if lesser than mid element
#Change range to start to mid-1, since less than mid
elif target < int_list[mid]:
return binary_search(start,mid-1,int_list,target)
#Check if lesser than mid element
#Change range to mid+1 to end, since greater than mid
elif target > int_list[mid]:
return binary_search(mid+1,end,int_list,target)
else:
return -1
#Read length of list from user
length = int(input("Enter length of list: "))
int_list = []
#Read elements of list
for i in range(length):
element = int(input("Enter element: "))
int_list.append(element)
#Sort the list
int_list=sorted(int_list)
print(int_list)
#Read target element to be found
target = int(input("Enter target element: "))
position = binary_search(0,length-1,int_list,target)
if position == -1:
print('Element not in list')
else:
print("Element found at position: "+ str(position))
# + colab={"base_uri": "https://localhost:8080/"} id="-uCtitJDRVMw" outputId="f2ac39bd-38cc-44b4-9a1a-b32f5815f911"
#DataFlair Guide for Binary Search
#ITERATIVE APPROACH
#Read length of list from user
length = int(input("Enter length of list: "))
int_list = []
#Read elements of list
for i in range(length):
element = int(input("Enter element: "))
int_list.append(element)
#Sort the list
int_list=sorted(int_list)
print(int_list)
#Read target element to be found
target = int(input("Enter target element: "))
#Define variables
start = 0
end = length-1
position = -1
while(start<=end):
mid = (start+end) // 2
if int_list[mid] == target:
position = mid
break
#If not, check if lesser than mid element
#Change range to start to mid-1, since less than mid
elif target < int_list[mid]:
end = mid-1
#Check if lesser than mid element
#Change range to mid+1 to end, since greater than mid
elif target > int_list[mid]:
start = mid+1
if position == -1:
print('Element not in list')
else:
print("Element found at position: "+ str(position+1))
# + id="_r7zNUfdUjW7"
1
| binary-search-python-code/Binary_Search_Algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="uQ6wc2HE0pke"
# # **Lab: Neural Networks**
#
#
# + [markdown] id="tQgxLRrvjiJb"
# ## Exercise 1: Regression with Pytorch
#
# In this exercise, we will build a Neural Networks with Pytorch for predicting pollution level. We will be working on the Beijing Pollution dataset:
# https://code.datasciencedojo.com/datasciencedojo/datasets/tree/master/Beijing%20PM2.5
#
# The steps are:
# 1. Setup Repository
# 2. Load and Explore Dataset
# 3. Prepare Data
# 4. Baseline Model
# 5. Define Architecture
# 6. Create Data Loader
# 7. Train Model
# 8. Assess Performance
# 9. Push Changes
#
# + [markdown] id="_NCwQQFkU3v5"
# ### 2. Load and Explore Dataset
# + [markdown] id="lupPwT3qWqyq"
# **[2.1]** Download the dataset into the `data/raw` folder:https://code.datasciencedojo.com/datasciencedojo/datasets/raw/master/Beijing%20PM2.5/PRSA_data_2010.1.1-2014.12.31.csv
# + id="iqVIqdjbWqyq" outputId="28601707-563e-48d3-a364-35f5124576e9"
# !wget -P ../data/raw https://code.datasciencedojo.com/datasciencedojo/datasets/raw/master/Beijing%20PM2.5/PRSA_data_2010.1.1-2014.12.31.csv
# + [markdown] id="4cZojBrtWRz0"
# **[2.2]** Launch the magic commands for auto-relaoding external modules
# + id="0p7MGcUSWSAr"
#Solution
# %load_ext autoreload
# %autoreload 2
# + [markdown] id="JAIO_Y5Z9_Ay"
# **[2.3]** Import the pandas and numpy packages
# + id="2VRE9JYD9_Kk"
#Solution
import pandas as pd
import numpy as np
# + [markdown] id="R-Zy6Oq8pkuB"
# **[2.4]** Load the data in a dataframe called `df`
#
# + id="Q1iETWjDftMg"
#Solution:
df = pd.read_csv('../data/raw/PRSA_data_2010.1.1-2014.12.31.csv')
# + [markdown] id="CLyMcoNCsx2k"
# **[2.5]** Display the first 5 rows of df
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="xvnbhiPhs0ZP" outputId="e53ecd49-2f83-4391-8d44-6a02fdcc13df"
# Solution
df.head()
# + [markdown] id="gQgeYjQDs12m"
# **[2.6]** Display the dimensions (shape) of df
# + colab={"base_uri": "https://localhost:8080/"} id="Dg_89DlAs1_w" outputId="7b5cdeac-b62b-4a49-dd1f-db9dbb336fd0"
# Solution
df.shape
# + [markdown] id="xyle1PCws7B0"
# **[2.7]** Display the summary (info) of df
# + colab={"base_uri": "https://localhost:8080/"} id="l1msvlh7s7Lt" outputId="7e607270-809a-4587-ac90-5e168d8f4ee8"
# Solution
df.info()
# + [markdown] id="eWLgqm2YtAgP"
# **[2.8]** Display the descriptive statistics of df
#
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="FQLSaoXltAp-" outputId="6fb0ec68-3920-451b-a5e6-20b708a8f3d7"
# Solution
df.describe()
# + [markdown] id="miQ6SiKlscLx"
# ### 3. Prepare Data
# + [markdown] id="NtuF1V6ctwn-"
# **[3.1]** Create a copy of `df` and save it into a variable called `df_cleaned`
# + id="HrXR7NCLtwxB"
# Solution
df_cleaned = df.copy()
# + [markdown] id="SWnL2dh5W2Th"
# **[3.2]** Remove the column `No` as it is an identifier for rows
# + id="TdbDsjpLW2ev"
# Solution
df_cleaned.drop('No', axis=1, inplace=True)
# + [markdown] id="nsKJOOyZXGOw"
# **[3.3]** Remove the missing values from the target variable `pm2.5`
# + id="AhJBqXnbXGY-"
# Solution
df_cleaned.dropna(inplace=True)
# + [markdown] id="6K9x8C7dXbis"
# **[3.4]** Reset the indexes of the dataframe
# + id="83iRE5u5XbtD"
# Solution
df_cleaned.reset_index(drop=True, inplace=True)
# + [markdown] id="m1RrTCBRDybQ"
# **[3.5]** Import `StandardScaler` and `OneHotEncoder` from `sklearn.preprocessing`
# + id="aezRs9S3Dyl0"
# Solution
from sklearn.preprocessing import StandardScaler, OneHotEncoder
# + [markdown] id="5pKaCxasERAt"
# **[3.6]** Create a list called `num_cols` that contains `year`, `DEWP`, `TEMP`, `PRES`, `Iws`, `Is`, `Ir`
# + id="yDwCmIXvERJO"
# Solution
num_cols = ['year', 'DEWP', 'TEMP', 'PRES', 'Iws', 'Is', 'Ir']
# + [markdown] id="-l0Hkri1FVrv"
# **[3.7]** Instantiate a `StandardScaler` and called it `sc`
# + id="x_yCjMqgFV1u"
# Solution
sc = StandardScaler()
# + [markdown] id="w2v1vLMWTofp"
# **[3.8]** Fit and transform the numeric feature of `df_cleaned` and replace the data into it
# + id="dfUpbRYSTopS"
# Solution
df_cleaned[num_cols] = sc.fit_transform(df_cleaned[num_cols])
# + [markdown] id="lt8YYcHuTsDs"
# **[3.9]** Create a list called `cat_cols` that contains `month`, `day`, `hour`, `cbwd`
# + id="z6isEtGBTsNz"
# Solution
cat_cols = ['month', 'day', 'hour', 'cbwd']
# + [markdown] id="_sWy8I5FYVhU"
# **[3.10]** Instantiate a `OneHotEncoder` and called it `ohe`
# + id="OyWdvAEwYVrW"
# Solution
ohe = OneHotEncoder(sparse=False)
# + [markdown] id="guu_QM_KN8Aw"
# **[3.11]** Perform One-Hot encoding on `cat_cols` and save them into a dataframe called `X_cat`
# + id="Gv7PFYRONv0o"
# Solution
X_cat = pd.DataFrame(ohe.fit_transform(df_cleaned[cat_cols]))
# + [markdown] id="Uo-a_TQpYyOA"
# **[3.12]** Extract the feature names from `ohe` and replace the names of the columns of the `X_cat`
# + id="L5RmoZq1YyiQ"
# Solution
X_cat.columns = ohe.get_feature_names(cat_cols)
# + [markdown] id="HjseyrsDYzdO"
# **[3.13]** Drop the original columns of `cat_cols` from `df_cleaned`
# + id="ysPhEhxfYztM"
# Solution
df_cleaned.drop(cat_cols, axis=1, inplace=True)
# + [markdown] id="8UjZDvdZN7N6"
# **[3.14]** Concatenate `df_cleaned` with `X_cat` and save the result to a variable called `X`
# + id="qsTYYnsSN7Wl"
# Solution
X = pd.concat([df_cleaned, X_cat], axis=1)
# + [markdown] id="Cx4JAuiuzaIe"
# **[3.15]** Import `split_sets_by_time` and `save_sets` from `src.data.sets`
# + id="OOefCMTLzaRQ"
# Solution
from src.data.sets import split_sets_by_time, save_sets
# + [markdown] id="X7DCMLV6TBjr"
# **[3.16]** Split the data into training and testing sets with 80-20 ratio
# + id="ICjW0_-4TBvu"
# Solution
X_train, y_train, X_val, y_val, X_test, y_test = split_sets_by_time(X, target_col='pm2.5', test_ratio=0.2)
# + [markdown] id="Bvx44G2_Wqy4"
# **[3.17]** Create the following folder: `data/processed/beijing_pollution`
# + id="Jm3cSvCSWqy5"
# !mkdir ../data/processed/beijing_pollution
# + [markdown] id="J--rD8iNRH8B"
# **[3.18]** Save the sets in the `data/processed/beijing_pollution` folder
# + id="_wO5x1RlRIE6"
save_sets(X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, X_test=X_test, y_test=y_test, path='../data/processed/beijing_pollution/')
# + [markdown] id="N8MNBrC4Zgz6"
# ### 4. Baseline Model
# + [markdown] id="3Lt1lE8lJ9zS"
# **[4.1]** Import `NullModel` from `src.models.null`
# + id="2xufkK8VJ99s"
# Solution
from src.models.null import NullModel
# + [markdown] id="sJNZfvA4dJ9X"
# **[4.2]** Instantiate a `NullModel` and call `.fit_predict()` on the training target to extract your predictions into a variable called `y_base`
# + id="fev4FWAYdU1G"
# Solution:
baseline_model = NullModel()
y_base = baseline_model.fit_predict(y_train)
# + [markdown] id="kRLIZeci7cfW"
# **[4.3]** Import `print_reg_perf` from `src.models.performance`
# + id="Z1Jt8WX57cqn"
# Solution:
from src.models.performance import print_reg_perf
# + [markdown] id="zlv1ny8Jg10r"
# **[4.4]** Print the regression metrics for this baseline model
# + id="n4DoenMJg2AC" outputId="aecd3131-a35a-4370-baf4-36d5309d146e"
# Solution:
print_reg_perf(y_base, y_train, set_name='Training')
# + [markdown] id="JUEbyrm2ZzhL"
# ### 5. Define Architecture
# + [markdown] id="faMubeDzZzuX"
# **[5.1]** Import `torch`, `torch.nn` as `nn` and `torch.nn.functional` as `F`
# + id="vBSoR7LTZz3-"
# Solution:
import torch
import torch.nn as nn
import torch.nn.functional as F
# + [markdown] id="HKBNkwgmgVPQ"
# **[5.2]** Create in `src/models/pytorch.py` a class called `PytorchRegression` that inherits from `nn.Module` with:
# - `num_features` as input parameter
# - attributes:
# - `layer_1`: fully-connected layer with 128 neurons
# - `layer_out`: fully-connected layer with 1 neurons
# - methods:
# - `forward()` with `inputs` as input parameter, perform ReLU and DropOut on the fully-connected layer followed by the output layer
# + [markdown] id="siffJESGfZDt"
# # Solution:
# class PytorchRegression(nn.Module):
# def __init__(self, num_features):
# super(PytorchRegression, self).__init__()
#
# self.layer_1 = nn.Linear(num_features, 128)
# self.layer_out = nn.Linear(128, 1)
#
# def forward(self, x):
# x = F.dropout(F.relu(self.layer_1(x)))
# x = self.layer_out(x)
# return (x)
# + [markdown] id="ORcJNo4ygaRa"
# **[5.3]** Instantiate `PytorchRegression` with the correct number of input feature and save it into a variable called `model`
# + id="QkXUqKMcgaZN"
# Solution:
from src.models.pytorch import PytorchRegression
model = PytorchRegression(X_train.shape[1])
# + [markdown] id="rBxCIn10ZDAF"
# **[5.4]** Create in `src/models/pytorch.py` a function called `get_device()` with:
# - Logics: check if cuda is available and return `cuda:0` if that is the case `cpu` otherwise
# - Output: device to be used by Pytorch
# + id="c6oNYKJEZDM4"
# Solution:
def get_device():
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu') # don't have GPU
return device
# + [markdown] id="CR22BA8dZnKz"
# **[5.5]** Set `model` to use the device available
# + id="nmw1xla2ZnYT" outputId="cf03f71b-e351-4c65-85de-333497000da7"
# Solution:
from src.models.pytorch import get_device
device = get_device()
model.to(device)
# + [markdown] id="ekaUYYjqgfcF"
# **[5.6]** Print the architecture of `model`
# + id="h8Jmfhk0MQ0i" outputId="e57e6a4a-9378-4a8b-bb4b-36c2c3dd3d41"
# Solution:
print(model)
# + [markdown] id="-IbQs_iqinhq"
# ### 6. Create Data Loader
# + [markdown] id="IRx25yhYjDVB"
# **[6.1]** Import `Dataset` and `DataLoader` from `torch.utils.data`
# + id="fFshbWLVjDiU"
# Solution:
from torch.utils.data import Dataset, DataLoader
# + [markdown] id="fEueJp-0iy2T"
# **[6.2]** Create in `src/models/pytorch.py`a class called `PytorchDataset` that inherits from `torch.utils.data.Dataset` with:
# - `X` ans `y` as input parameters
# - attributes:
# - `X_tensor`: X converted to Pytorch tensor
# - `y_tensor`: y converted to Pytorch tensor
# - methods:
# - `__getitem__(index)`
# Return features and target for a given index
# - `__len__`
# Return the number of observations
# - `to_tensor(data)`
# Convert Pandas Series to Pytorch tensor
# + id="EqQR9E5hiy-T"
# Placeholder for student's code (multiple lines of Python code)
# Task: Create a class called PytorchDataset
# + id="SoP-YD7EizA_"
# Solution:
class PytorchDataset(Dataset):
"""
Pytorch dataset
...
Attributes
----------
X_tensor : Pytorch tensor
Features tensor
y_tensor : Pytorch tensor
Target tensor
Methods
-------
__getitem__(index)
Return features and target for a given index
__len__
Return the number of observations
to_tensor(data)
Convert Pandas Series to Pytorch tensor
"""
def __init__(self, X, y):
self.X_tensor = self.to_tensor(X)
self.y_tensor = self.to_tensor(y)
def __getitem__(self, index):
return self.X_tensor[index], self.y_tensor[index]
def __len__ (self):
return len(self.X_tensor)
def to_tensor(self, data):
return torch.Tensor(np.array(data))
# + [markdown] id="55TZNjVLjzxP"
# **[6.3]** Import this class from `src/models/pytorch` and convert all sets to PytorchDataset
# + id="ls8gm3JIjz7A"
# Solution:
from src.models.pytorch import PytorchDataset
train_dataset = PytorchDataset(X=X_train, y=y_train)
val_dataset = PytorchDataset(X=X_val, y=y_val)
test_dataset = PytorchDataset(X=X_test, y=y_test)
# + [markdown] id="XjypmrX2sojQ"
# **[6.4]** Import DataLoader from `torch.utils.data`
# + id="F0NlzTRAsouj"
# Solution:
from torch.utils.data import DataLoader
# + [markdown] id="ppK2EFnZs8mb"
# ### 7. Train Model
# + [markdown] id="JE8fVHin92-6"
# **[7.1]** Instantiate a `nn.MSELoss()` and save it into a variable called `criterion`
# + id="oludTfN193I-"
# Solution:
criterion = nn.MSELoss()
# + [markdown] id="4sxqDnYJ_CxY"
# **[7.2]** Instantiate a `torch.optim.Adam()` optimizer with the model's parameters and 0.001 as learning rate and save it into a variable called `optimizer`
# + id="GUxr7KPY_C-y"
# Solution:
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# + [markdown] id="d7L45Xp3aJJc"
# **[7.3]** Create a function called `train_regression()` that will perform forward and back propagation and calculate loss and RMSE scores
# + id="bPf8514oaJTt"
def train_regression(train_data, model, criterion, optimizer, batch_size, device, scheduler=None, collate_fn=None):
"""Train a Pytorch regresssion model
Parameters
----------
train_data : torch.utils.data.Dataset
Pytorch dataset
model: torch.nn.Module
Pytorch Model
criterion: function
Loss function
optimizer: torch.optim
Optimizer
bacth_size : int
Number of observations per batch
device : str
Name of the device used for the model
scheduler : torch.optim.lr_scheduler
Pytorch Scheduler used for updating learning rate
collate_fn : function
Function defining required pre-processing steps
Returns
-------
Float
Loss score
Float:
RMSE Score
"""
# Set model to training mode
model.train()
train_loss = 0
# Create data loader
data = DataLoader(train_data, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
# Iterate through data by batch of observations
for feature, target_class in data:
# Reset gradients
optimizer.zero_grad()
# Load data to specified device
feature, target_class = feature.to(device), target_class.to(device)
# Make predictions
output = model(feature)
# Calculate loss for given batch
loss = criterion(output, target_class)
# Calculate global loss
train_loss += loss.item()
# Calculate gradients
loss.backward()
# Update Weights
optimizer.step()
# Adjust the learning rate
if scheduler:
scheduler.step()
return train_loss / len(train_data), np.sqrt(train_loss / len(train_data))
# + [markdown] id="_nXfK0H-_1aM"
# **[7.4]** Create a function called `test_regression()` that will perform forward and calculate loss and RMSE scores
# + id="HjsdSLle_1mb"
def test_regression(test_data, model, criterion, batch_size, device, collate_fn=None):
"""Calculate performance of a Pytorch regresssion model
Parameters
----------
test_data : torch.utils.data.Dataset
Pytorch dataset
model: torch.nn.Module
Pytorch Model
criterion: function
Loss function
bacth_size : int
Number of observations per batch
device : str
Name of the device used for the model
collate_fn : function
Function defining required pre-processing steps
Returns
-------
Float
Loss score
Float:
RMSE Score
"""
# Set model to evaluation mode
model.eval()
test_loss = 0
# Create data loader
data = DataLoader(test_data, batch_size=batch_size, collate_fn=collate_fn)
# Iterate through data by batch of observations
for feature, target_class in data:
# Load data to specified device
feature, target_class = feature.to(device), target_class.to(device)
# Set no update to gradients
with torch.no_grad():
# Make predictions
output = model(feature)
# Calculate loss for given batch
loss = criterion(output, target_class)
# Calculate global loss
test_loss += loss.item()
return test_loss / len(test_data), np.sqrt(test_loss / len(test_data))
# + [markdown] id="R7OmK12vAD1O"
# **[7.5]** Create 2 variables called `N_EPOCHS` and `BATCH_SIZE` that will take respectively 5 and 32 as values
# + id="O7IyO0ugAD9d"
# Solution:
N_EPOCHS = 5
BATCH_SIZE = 32
# + [markdown] id="tLMMkp9XAGMf"
# **[7.6]** Create a for loop that will iterate through the specified number of epochs and will train the model with the training set and assess the performance on the validation set and print their scores
# + id="DJTG4hvIddpv" outputId="242f89a5-f1e4-42ad-e49c-809505326d68"
# Solution:
from src.models.pytorch import train_regression, test_regression
for epoch in range(N_EPOCHS):
train_loss, train_rmse = train_regression(train_dataset, model=model, criterion=criterion, optimizer=optimizer, batch_size=BATCH_SIZE, device=device)
valid_loss, valid_rmse = test_regression(val_dataset, model=model, criterion=criterion, batch_size=BATCH_SIZE, device=device)
print(f'Epoch: {epoch}')
print(f'\t(train)\tLoss: {train_loss:.4f}\t|\tRMSE: {train_rmse:.1f}')
print(f'\t(valid)\tLoss: {valid_loss:.4f}\t|\tRMSE: {valid_rmse:.1f}')
# + [markdown] id="P2W8S3ewcyKl"
# **[7.7]** Save the model into the `models` folder
# + id="gPfmTNxocyXC"
# Solution
torch.save(model, "../models/pytorch_reg_pm2_5.pt")
# + [markdown] id="GyacNiLhz7BF"
# ### 8. Assess Performance
# + [markdown] id="d3-lk01m0F1S"
# **[8.1]** Assess the model performance on the testing set and print its scores
# + id="IRzOY6wK0FJ9" outputId="c7da0826-a933-493d-fa0c-4e3be0d7cffa"
test_loss, test_rmse = test_regression(test_dataset, model=model, criterion=criterion, batch_size=BATCH_SIZE, device=device)
print(f'\tLoss: {test_loss:.4f}\t|\tRMSE: {test_rmse:.1f}')
# + [markdown] id="1yX0Ocg4hcZM"
# ### 9. Push changes
# + [markdown] id="3guOKU9gjrmp"
# **[9.1]** Add you changes to git staging area
# + id="lKuRNeqAj0ym"
# Placeholder for student's code (1 command line)
# Task: Add you changes to git staging area
# + id="axcj-jS0jruy"
# Solution:
git add .
# + [markdown] id="6nUK2dp_j67X"
# **[9.2]** Create the snapshot of your repository and add a description
# + id="1-M-aS-Ij7EE"
# Placeholder for student's code (1 command line)
# Task: Create the snapshot of your repository and add a description
# + id="zovhzXRxj7Il"
# Solution:
git commit -m "pytorch regression"
# + [markdown] id="Y9FciIQZj7nX"
# **[9.3]** Push your snapshot to Github
# + id="IR7i6D5hj7uO"
# Placeholder for student's code (1 command line)
# Task: Push your snapshot to Github
# + id="WaVAgJ4Aj7wi"
# Solution:
git push
# + [markdown] id="d7a6bwMniAs1"
# **[9.4]** Check out to the master branch
# + id="eM9v_33XiA1I"
# Placeholder for student's code (1 command line)
# Task: Check out to the master branch
# + id="d6-AI0x7iA4M"
# Solution:
git checkout master
# + [markdown] id="v98Ka9kNiBLw"
# **[9.5]** Pull the latest updates
# + id="yNZb1PyEjIOP"
# Placeholder for student's code (1 command line)
# Task: Pull the latest updates
# + id="5TJAEYxPjIRS"
# Solution:
git pull
# + [markdown] id="pGd3Xdx-jJDk"
# **[9.6]** Check out to the `pytorch_reg` branch
# + id="aEYg8wauiBUb"
# Placeholder for student's code (1 command line)
# Task: Merge the branch pytorch_reg
# + id="YNZunyVsiBXd"
# Solution:
git checkout pytorch_reg
# + [markdown] id="hjB_56bGdqpS"
# **[9.7]** Merge the `master` branch and push your changes
# + id="q25rjnxTdqx5"
# Placeholder for student's code (2 command lines)
# Task: Merge the master branch and push your changes
# + id="NTbrlcNzdq03"
# Solution:
git merge master
git push
# + [markdown] id="8B98cSvWkB-x"
# **[9.8]** Go to Github and merge the branch after reviewing the code and fixing any conflict
# + [markdown] id="EkQx18jYiEvQ"
# **[9.9]** Stop the Docker container
# + id="OlimZMQsiE7w"
# Placeholder for student's code (1 command line)
# Task: Stop the Docker container
# + id="ovwOAbC5iE-T"
# Solution:
docker stop adv_dsi_lab_5
| notebooks/1_pytorch_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # t-SNE 範例
# * 觀察S曲線使用 t-SNE 不同 perplexity 的流形還原效果
# +
# 載入套件
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
from time import time
# +
# 設定模型與繪圖參數
n_samples = 300
n_components = 2
(fig, subplots) = plt.subplots(2, 5, figsize=(15, 6))
perplexities = [4, 6, 9, 14, 21, 30, 45, 66, 100]
# 設定S曲線資料點
X, color = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# 繪製資料原圖
ax = subplots[0][0]
ax.set_title("Original")
ax.scatter(X[:, 0], X[:, 2], c=color)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
for i, perplexity in enumerate(perplexities):
if i < 4:
ax = subplots[0][i + 1]
else:
ax = subplots[1][i - 4]
t0 = time()
tsne = manifold.TSNE(n_components=n_components, perplexity=perplexity)
trans_X = tsne.fit_transform(X)
t1 = time()
print('perplexity={} in {:.2f} sec'.format(perplexity, (t1-t0)))
ax.set_title('Perplexity: {}'.format(perplexity))
ax.scatter(trans_X[:, 0], trans_X[:, 1], c=color)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
plt.show()
# -
# # 作業
# * 將原始資料集換為S型圖, 觀察不同 perplexity 下的流形還原結果
# 繪製不同 perplexity 下的 t-SNE 分群圖 (請注意! 語法也要對應調整)
| Day_062_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#preamble
# %reset
# + language="html"
# <!--Script block to left align Markdown Tables-->
# <style>
# table {margin-left: 0 !important;}
# </style>
# -
# # ENGR 1330 Computational Thinking with Data Science
# Last GitHub Commit Date: 16 Mar 2021
#
# ## Lesson 16 : Hypothesis Testing (Continued)
#
# A procedure to systematically decide if two data collections are similar or substantially different
#
# <!---->
#
# ## Objectives
# - To apply fundamental concepts involved in probability estimation modeling and descriptive statistics;
# - Use of non-standard external module
# - Use of graphics to visualize hypothesis tests
# -
#
#
# ---
#
# ## Computational Thinking Concepts
# The CT concepts include:
#
# - Abstraction => Represent data behavior with a statistic
# - Pattern Recognition => Compare patterns in (our) data models to make a decision
# ---
#
# # Hypothesis Testing (Continued)
#
# Lets actually examine the textbook:
# [https://www.inferentialthinking.com/chapters/11/Testing_Hypotheses.html](https://www.inferentialthinking.com/chapters/11/Testing_Hypotheses.html)
#
# You know the URL that no-one reads, perhaps because there is a "secret" module you need to install, without instructions of how! Welcome to the world of obfuscation!
# To get access to the datascience library in the textbook you use pip. On my server I did:
#
# sudo -H /opt/conda/envs/python/bin/python -m pip install datascience
#
import datascience
#import numpy as np
#import matplotlib
#import matplotlib.pyplot as plt
# ## Mendel's Pea Flowers
#
# <NAME> (1822-1884) was an Austrian monk who is widely recognized as the founder of the modern field of genetics. Mendel performed careful and large-scale experiments on plants to come up with fundamental laws of genetics.
#
# Many of his experiments were on varieties of pea plants. He formulated sets of assumptions about each variety; these were his models. He then tested the validity of his models by growing the plants and gathering data.
#
# Let's analyze the data from one such experiment to see if Mendel's model was good.
#
# In a particular variety, each plant has either purple flowers or white. The color in each plant is unaffected by the colors in other plants. Mendel hypothesized that the plants should bear purple or white flowers at random, in the ratio 3:1.
# ### Mendel's Model
#
# For every plant, there is a 75% chance that it will have purple flowers, and a 25% chance that the flowers will be white, regardless of the colors in all the other plants.
# Approach to Assessment
#
# To go about assessing Mendel's model, we can simulate plants under the assumptions of the model and see what it predicts. Then we will be able to compare the predictions with the data that Mendel recorded.
# The Statistic
#
# Our goal is to see whether or not Mendel's model is good. We need to simulate a statistic that will help us make this decision.
#
# If the model is good, the percent of purple-flowering plants in the sample should be close to 75%. If the model is not good, the percent purple-flowering will be away from 75%. It may be higher, or lower; the direction doesn't matter.
#
# The key for us is the distance between 75% and the percent of purple-flowering plants in the sample. Big distances are evidence that the model isn't good.
#
# Our statistic, therefore, is the distance between the sample percent and 75%:
#
# $$∣\text{sample percent of purple}-\text{flowering plants}|−75\%$$
#
# ### Predicting the Statistic Under the Model
#
# To see how big the distance would be if Mendel's model were true, we can use sample_proportions to simulate the distance under the assumptions of the model.
#
# First, we have to figure out how many times to sample. To do this, remember that we are going to compare our simulation with Mendel's plants. So we should simulate the same number of plants that he had.
#
# Mendel grew a lot of plants. There were 929 plants of the variety corresponding to this model. So we have to sample 929 times.
# Generating One Value of the Statistic
#
# The steps in the calculation:
#
# Sample 929 times at random from the distribution specified by the model and find the sample proportion in the purple-flowering category.
# Multiply the proportion by 100 to get a pecent.
# Subtract 75 and take the absolute value of the difference.
#
# That's the statistic: the distance between the sample percent and 75.
#
# We will start by defining a function that takes a proportion and returns the absolute difference between the corresponding percent and 75.
#
def distance_from_75(p):
return abs(100*p - 75)
# To simulate one value of the distance between the sample percent of purple-flowering plants and 75%, under the assumptions of Mendel's model, we have to first simulate the proportion of purple-flowering plants among 929 plants under the assumption of the model, and then calculate the discrepancy from 75%.
model_proportions = [0.75, 0.25]
proportion_purple_in_sample = datascience.sample_proportions(929, model_proportions).item(0)
distance_from_75(proportion_purple_in_sample)
#proportion_purple_in_sample
# That's one simulated value of the distance between the sample percent of purple-flowering plants and 75% as predicted by Mendel's model.
# ### Running the Simulation
#
# To get a sense of how variable the distance could be, we have to simulate it many more times.
#
# We will generate 10,000 values of the distance. As before, we will first use the code we developed above to define a function that returns one simulated value Mendel's hypothesis.
#
def one_simulated_distance():
proportion_purple_in_sample = datascience.sample_proportions(929, model_proportions).item(0)
return distance_from_75(proportion_purple_in_sample)
# Next, we will use a for loop to create 10,000 such simulated distances.
# +
distances = datascience.make_array()
repetitions = 10000
for i in np.arange(repetitions):
distances = np.append(distances, one_simulated_distance())
len(distances)
#distances[999]
# -
# ### The Prediction
#
# The empirical histogram of the simulated values shows the distribution of the distance as predicted by Mendel's model.
#
# +
# Implementation of matplotlib function
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(10**7)
mu = 121
sigma = 21
x = mu + sigma * np.random.randn(1000)
num_bins = 100
n, bins, patches = plt.hist(x, num_bins,
density = 1,
color ='green',
alpha = 0.7)
y = ((1 / (np.sqrt(2 * np.pi) * sigma)) *
np.exp(-0.5 * (1 / sigma * (bins - mu))**2))
plt.plot(bins, y, '--', color ='black')
plt.xlabel('X-Axis')
plt.ylabel('Y-Axis')
plt.title('matplotlib.pyplot.hist() function Example\n\n',
fontweight ="bold")
plt.show()
# -
plt.hist(distances,10)
plt.show()
datascience.Table().with_column(
'Distance between Sample % and 75%', distances
).hist()
#
#
# Look on the horizontal axis to see the typical values of the distance, as predicted by the model. They are rather small. For example, a high proportion of the distances are in the range 0 to 1, meaning that for a high proportion of the samples, the percent of purple-flowering plants is within 1% of 75%, that is, the sample percent is in the range 74% to 76%.
#
# ### Comparing the Prediction and the Data
#
# To assess the model, we have to compare this prediction with the data. Mendel recorded the number of purple and white flowering plants. Among the 929 plants that he grew, 705 were purple flowering. That's just about 75.89%.
#
100*round(705 / 929, 4)
# So the observed value of our statistic – the distance between Mendel's sample percent and 75 – is about 0.89:
observed_statistic = distance_from_75(705/929)
round(observed_statistic, 2)
#
#
# Just by eye, locate roughly where 0.89 is on the horizontal axis of the histogram. You will see that it is clearly in the heart of the distribution predicted by Mendel's model.
#
# The cell below redraws the histogram with the observed value plotted on the horizontal axis.
#
datascience.Table().with_column(
'Distance between Sample % and 75%', distances
).hist()
matplotlib.pyplot.scatter(observed_statistic, 0.00, color='red', s=66);
# The observed statistic is like a typical distance predicted by the model. By this measure, the data are consistent with the histogram that we generated under the assumptions of Mendel's model. This is evidence in favor of the model.
# Hypothesis tests are conducted in all fields in which theory can be compared to observation.
#
# ## Elements of a Statistical Test
# The four essential elements are:
#
# 1. null hypothesis, $H_0$
# 2. alternative hypothesis, $H_a$
# 3. a test statistic
# 4. a rejection region
#
# ###
#
# Suppose we wish to test a hypothesis concerning a parameter $\theta$ based on a random sample $Y_1,Y_2, \dots ,Y_n$ from which we compute an estimate named $\hat\theta$. Assume for this lesson that the estimator has an approximately normical distribution with mean $\theta$ and variance $\sigma_{\hat\theta}^2$
#
# Suppose $\theta_0$ is a particular (target) value of $\theta$, we wish to test $H_0:\theta = \theta_0$ versus $H_a:\theta > \theta_0$.
#
# The figure below depicts sampling distributions of $\hat\theta$ for different values of $\theta$, including one that happens to center on $\theta_0$
#
# 
#
# If $\hat\theta$ is close to $\theta_0$, it is reasonable to accept $H_0$.
# However if $\theta > \theta_0$, it is more likely that $\hat\theta$ is going to be large.
#
# Thus large values of $\hat\theta$ (larger than $\theta_0$) favor the rejection of $H_0:\theta = \theta_0$ and the acceptance of $H_a:\theta > \theta_0$. Using the 4 elements as a guide, we can concisely state :
#
# |item|value|
# |:---|:---|
# |$H_0$|$\theta = \theta_0$|
# |$H_a$|$\theta > \theta_0$|
# |Test Statistic|$\hat\theta$|
# |Rejection Region|$RR = \hat\theta > k$ for some choice of $k$|
#
# The actual value of $RR$ is determined by setting the type-I-error prbability $\alpha$ and choosing $k$ accordingly, such as in the figure below.
#
# 
#
# If $H_0$ is true, $\hat\theta$ having an approximately normal distribution with mean $\theta_0$ and variance $\sigma_{\hat\theta}^2$, if we desire a type-I-error probability of $\alpha$ then
#
# $$k = \theta_0 + z_{\alpha} \sigma_{\hat\theta}$$
#
# is the choice for $k$ where $P(Z > z_{\alpha}) = \alpha $ when $Z$ is a standard normal variate (our firend the Z-score).
#
# Our $RR$ can be expressed as:
#
# $$ RR = \hat\theta : \frac{\hat\theta - \theta_0}{\sigma_{\hat\theta}} > z_{\alpha}$$
#
# Thus our test is:
#
# |item|value|
# |:---|:------|
# |$H_0$|$\theta = \theta_0$|
# |$H_a$|$\theta> \theta_0$|
# |Test Statistic|$Z=\frac{\hat\theta-\theta_0}{\sigma_{\hat\theta}}$ |
# |Rejection Region|$z>z_{\alpha}$ for some value $\alpha$|
#
# Recall we choose $\alpha$
# ### Example
#
# A machine in a factory must be stopped and maintained when its defects exceede 10% on the daily production runs. A random sample of 100 items from one day of production contain 15 defective items and the shop foreman claims the machine must be maintained/repaired. Does the sample evidence support the foreman's assertion?
#
# Lets apply our process.
#
# $Y$ denotes the number of defectives. $Y$ will be a binomial variate (with value TRUE/FALSE or 0/1) with a probability of one outcome of $p$ and the other $1-p$. Kind of like an oddly weighted coin. Lets say the defect outcome is the value $p$. We will assume 100 is large enough so that we can approximate the binomial with a Z-score statistic.
#
# |item|value|
# |:---|:---|
# |$H_0$|$p = 0.10$|
# |$H_a$|$p > 0.10$|
# |Test Statistic|$$Z=\frac{\hat p-p_0}{\sqrt{\frac{p_0(1-p_0)}{n}}}$$ |
# |Rejection Region|$z>z_{\alpha}$ for some value $\alpha$|
#
# Lets choose $\alpha = 0.01$ what is the value for $z_{\alpha}$?
# +
import math
def normdist(x,mu,sigma):
argument = (x - mu)/(math.sqrt(2.0)*sigma)
normdist = (1.0 + math.erf(argument))/2.0
return normdist
sample_count = 100
defect_count = 15
phat = defect_count/sample_count
pzero = 0.10
z_test = (phat - pzero) / math.sqrt((pzero*(1.0-pzero))/(sample_count))
z_alpha = 1.646 #2.325 #here we find by trial and error
quantile = 1-normdist(z_alpha,0,1)
print(quantile)
if z_test > z_alpha:
print("Reject Ho","Test Statistic = ",round(z_test,3), "Rejection Value = ",round(z_alpha,3)," Type-I-error probability = ",round(quantile,6))
else:
print("Do Not Reject Ho","Test Statistic = ",round(z_test,3), "Rejection Value = ",round(z_alpha,3)," Type-I-error probability = ",round(quantile,6))
# or we can compute p-value
pvalue = 1-normdist(z_test,0,1)
print("P-value at rejection = ",round(pvalue,6))
# -
# Now lets examine the actual defect distribution by simulation
# +
import random
import matplotlib.pyplot as plt
def sample_parts(how_many,proportion):
sample_list = [] #empty list to store results
for i in range(0,how_many):
temp = random.uniform(0, 1)
if temp <= proportion:
sample_list.append(0)
else:
sample_list.append(1)
return(sample_list)
#machine_parts = sample_parts(100,0.10)
#plt.hist(machine_parts, density = False)
#plt.show()
# simulate sampling then computing the defect rate
defect_rate = []
for j in range(1,100): #change from 10 to 50,70,90,100,1000,2000,3000
machine_parts = sample_parts(j,0.10)
sample_count = len(machine_parts)
defect_count = sample_count - sum(machine_parts)
defect_rate.append(defect_count/sample_count)
plt.hist(defect_rate,bins=9,density = True)
#matplotlib.pyplot.scatter(0.15, 0.00, color='red', s=66);
plt.show()
# -
# ### A/B Testing
#
# In modern data analytics, deciding whether two numerical samples come from the same underlying distribution is called A/B testing. The name refers to the labels of the two samples, A and B.
#
# We have already addressed this kind of comparison, and will do more in the laboratory, in fact we went further and even postulated some kinds of distribution data models that explain the data.
| 1-Lessons/Lesson16/PsuedoLesson/Lesson16-Dev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# `ApJdataFrames` 007: Patten2006
# ---
# `Title`: Spitzer IRAC Photometry of M, L, and T Dwarfs
# `Authors`: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
#
# Data is from this paper:
#
# %pylab inline
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
# The tables define the value and error as a string:
# `val (err)`
# which is a pain in the ass because now I have to parse the strings, which always takes much longer than it should because data wrangling is hard sometimes.
#
# I define a function that takes a column name and a data frame and strips the output.
def strip_parentheses(col, df):
'''
splits single column strings of "value (error)" into two columns of value and error
input:
-string name of column to split in two
-dataframe to apply to
returns dataframe
'''
out1 = df[col].str.replace(")","").str.split(pat="(")
df_out = out1.apply(pd.Series)
# Split the string on the whitespace
base, sufx = col.split(" ")
df[base] = df_out[0].copy()
df[base+"_e"] = df_out[1].copy()
del df[col]
return df
# ## Table 1 - Basic data on sources
# +
names = ["Name","R.A. (J2000.0)","Decl. (J2000.0)","Spectral Type","SpectralType Ref.","Parallax (error)(arcsec)",
"Parallax Ref.","J (error)","H (error)","Ks (error)","JHKRef.","PhotSys"]
tbl1 = pd.read_csv("http://iopscience.iop.org/0004-637X/651/1/502/fulltext/64991.tb1.txt",
sep='\t', names=names, na_values='\ldots')
# -
cols_to_fix = [col for col in tbl1.columns.values if "(error)" in col]
for col in cols_to_fix:
print col
tbl1 = strip_parentheses(col, tbl1)
tbl1.head()
# ## Table 3- IRAC photometry
# +
names = ["Name","Spectral Type","[3.6] (error)","n1","[4.5] (error)","n2",
"[5.8] (error)","n3","[8.0] (error)","n4","[3.6]-[4.5]","[4.5]-[5.8]","[5.8]-[8.0]","Notes"]
tbl3 = pd.read_csv("http://iopscience.iop.org/0004-637X/651/1/502/fulltext/64991.tb3.txt",
sep='\t', names=names, na_values='\ldots')
# -
cols_to_fix = [col for col in tbl3.columns.values if "(error)" in col]
cols_to_fix
for col in cols_to_fix:
print col
tbl3 = strip_parentheses(col, tbl3)
tbl3.head()
pd.options.display.max_columns = 50
del tbl3["Spectral Type"] #This is repeated
patten2006 = pd.merge(tbl1, tbl3, how="outer", on="Name")
patten2006.head()
# Convert spectral type to number
import gully_custom
patten2006["SpT_num"], _1, _2, _3= gully_custom.specTypePlus(patten2006["Spectral Type"])
# Make a plot of mid-IR colors as a function of spectral type.
sns.set_context("notebook", font_scale=1.5)
# +
for color in ["[3.6]-[4.5]", "[4.5]-[5.8]", "[5.8]-[8.0]"]:
plt.plot(patten2006["SpT_num"], patten2006[color], '.', label=color)
plt.xlabel(r'Spectral Type (M0 = 0)')
plt.ylabel(r'$[3.6]-[4.5]$')
plt.title("IRAC colors as a function of spectral type")
plt.legend(loc='best')
# -
# ## Save the cleaned data.
patten2006.to_csv('../data/Patten2006/patten2006.csv', index=False)
| notebooks/Patten2006.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="IHgmxWG_7lnE"
# # Домашняя работа 5. Бустинг
#
# *Мягкий дедлайн: 17 декабря, 21:00*
#
# *Жесткий дедлайн: 19 декабря, 21:00*
#
# -
# Максимальная оценка 5 баллов :)
# + id="GOqjUI6igeLc"
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston # sorry(not sorry)
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
# -
from scipy.optimize import minimize
# + [markdown] id="-tKaz0okgeLh"
# ### Задание 1. Градиентный бустинг своими руками (2 балла)
#
# Вам нужно реализовать упрощенный вариант градиентного бутсинга для задачи регресси.
#
#
# **Напоминание, как это работает:**
#
# Обозначим текущую композицию на $N-1$ шаге за $a_{N - 1}(x_i)$. Базовый алгоритм $b_N(x_i)$ обучается на ответах $-\frac{\partial L(y_i, z)}{\partial z}\Bigl|_{z = a_{N - 1}(x_i)}$, где $L(y_i, z)$ — значение функции потерь на объекте при правильном ответе $y_i$ и предсказании $z$. Композиция на следующем шаге получается так:
#
# $$
# a_N(x_i) = a_{N-1}(x_i) + \nu\gamma_Nb_N(x_i)
# $$
#
# Здесь $\nu \in [0, 1]$ — темп обучения (гиперпараметр), $\gamma_N$ — оптимальный вес, настраиваемый на каждом шаге алгоритма в ходе решения оптимизационной задачи:
#
# $$
# \gamma_N = \mathrm{arg}\min_\gamma \frac{1}{\ell}\sum\limits_{i=1}^{\ell}L\left(y_i, a_{N - 1}(x_i) + \gamma b_N(x_i)\right)
# $$
#
#
# Заметьте, что в формуле выше нет $\nu$. Этот гиперпараметр используется для сокращения длины шага, оптимального при составлении композиции $a_N$. Идея отклонения от оптимума должна быть вам уже знакома как способ борьбы с переобучением, когда мы специально форсим модель работать чуть хуже, чем могла бы, на текущем шаге, чтобы сохранить обобщающую способность и не подогнаться под тренировочную выборку (или под шум).
#
# С потерей в 0.5 балла можете принять $\gamma_N = 1$ для каждого $N$. На полный балл необходимо реализовать нахождение оптимального $\gamma_N$ на каждом шаге.
#
# В качестве функции потерь $L$ возьмите MSE.
# -
# В качестве базовой модели можете использовать `DecisionTreeRegressor` из `sklearn`.
# Для решения оптимизационной задачки можно воспользоваться алгоритмами из любых библиотек, например, `scipy.optimize`, или найти оптимум перебором по сетке из некоторого разумного диапазона.
#
# Можно дописывать свои функции, если необходимо.
# + id="ZB5Yt-LKgeLi"
class GradientBoosting:
def __init__(
self,
base_model_class: object = DecisionTreeRegressor,
base_model_params: dict = {'max_depth': None},
n_estimators: int = 10,
learning_rate: float = 0.1
):
"""
Args:
base_model_class: Class of the base learner.
base_model_params: Hyperparameters of the base learner.
n_estimators: Number of boosting stages.
learning_rate: Value used to shrink contribution of each base learner to the model.
"""
self.base_model_class = base_model_class
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.base_model_params = base_model_params
# list for optimal gammas at each iteration
self.gammas = []
# list for base models
self.models = []
# list for error tracking
self.errors = []
def find_optimal_gamma(self,
y: np.array,
old_predictions: np.array,
new_predictions: np.array) -> float:
"""You may add arguments if it's necessary for your optimization algorithm.
Args:
y: Target variable.
old_predictions: Prediction of the additive model at the previous stage.
new_predictions: Prediction of the base learner at the current stage.
Returns:
Optimal value for gamma.
"""
optimal_gamma = minimize(lambda gamma: np.sum((y - old_predictions - gamma*new_predictions)**2), x0 = 1).x[0]
self.gammas.append(optimal_gamma)
def _fit_base_model(self, X: np.ndarray, y: np.array):
"""Train one base learner.
Args:
X: Feature matrix
y: Target variable.
Returns:
Fitted base learner.
"""
base_model = DecisionTreeRegressor()
base_model.set_params(**self.base_model_params)
base_model.fit(X, y)
self.models.append(base_model)
return base_model
def fit(self, X: np.ndarray, y: np.array):
"""Train boosting ("sum" of base learners).
Args:
X: Feature matrix
y: Target variable.
Returns:
Fitted boosting.
"""
for i in range(self.n_estimators):
if len(self.models) == 0:
self.models.append(np.zeros(X.shape[0]))
self.gammas.append(1)
else:
s = 2*(y-self.predict(X))/len(X)
old_predictions = self.predict(X)
model = self._fit_base_model(X, s)
self.find_optimal_gamma(y, old_predictions, model.predict(X))
if i % 2 == 0:
self.errors.append(mean_squared_error(y, self.predict(X)))
return self
def predict(self, X: np.ndarray):
"""Make prediction of fitted boosting.
Args:
X: Feature matrix
Returns:
Prediction of fitted boosting.
"""
if len(self.models) > 0:
y_predicted = np.zeros(X.shape[0])
for i, base_model in enumerate(self.models[1:]):
y_predicted += self.learning_rate*self.gammas[i]*base_model.predict(X)
return y_predicted
# -
# Проверьте вашу реализацию на бостонском датасете. Подберите оптимальные гиперпараметры, чтобы победить RandomForestRegressor (не меняйте параметры сида).
boston = load_boston()
X = boston.data
y = boston.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=13)
# +
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(max_features=4, n_estimators=640, random_state=19052019)
rf.fit(X_train, y_train)
mean_squared_error(y_test, rf.predict(X_test))
# -
# Попробуем запустить `Gradient Boosting` с дефолтными параметрами. Зафисксируем случайный seed для воспроизводимости результатов.
# +
# %%time
gb = GradientBoosting(base_model_params={"random_state" : 42})
gb.fit(X_train, y_train)
print(f"MSE of this model is {mean_squared_error(y_test, gb.predict(X_test))}")
# -
# Видно, что разница очень велика. Попробуем подобрать по сетке гиперпараметры:
# %%time
params = {
"n_estimators" : np.arange(10, 60, 2),
"learning_rate" : np.arange(0.1, 0.25, 0.025),
"max_depth" : np.arange(1, 7),
"min_samples_leaf" : np.arange(2, 5),
"max_features" : np.arange(1, 5)
}
keys = list(params.keys())
combinations = np.array(np.meshgrid(*list(params.values()))).T.reshape(-1,5)
best_MSE = None
best_params = None
for i in combinations:
base_model_params = dict(zip(keys[2:], i[2:].astype(int)))
base_model_params["random_state"] = 42
gb = GradientBoosting(n_estimators=int(i[0]), learning_rate=i[1], base_model_params=base_model_params)
gb.fit(X_train, y_train)
mse = mean_squared_error(y_test, gb.predict(X_test))
if best_MSE is None:
best_MSE = mse
best_params = i
if mse < best_MSE:
best_MSE = mse
best_params = i
# Параметры и скор лучшей модели:
print("Parameters of the best model:")
for i, key in enumerate(keys):
print(f"\t{key} : {best_params[i]}")
print(f"MSE of the best model is {best_MSE}")
# В итоге, я подобрал гиперпараметры, которые победили RandomForest, хотя и кажется, что здесь randomforest лучше справляется. При переборе гиперпараметров можно было еще сделать early stopping, но я слишком поздно это понял и мне лень еще раз запускать это.
# best model once again
gb = GradientBoosting(n_estimators=58, learning_rate=0.15, base_model_params={'max_depth':6, "min_samples_leaf":4, "max_features":4, "random_state":42})
gb.fit(X_train, y_train)
mean_squared_error(y_test, gb.predict(X_test))
from matplotlib import pyplot as plt
plt.plot(np.arange(len(gb.errors))*2, gb.errors)
plt.xlabel("n_iteration")
plt.ylabel('MSE on train')
plt.title("Best model train")
# ### Задание 2. Сравнение подходов (1.5 балла)
# Скачайте данные о выдаче кредитов. Это данные с kaggle, целевая переменная `y` показывает, вернуло ли кредит физическое лицо.
# !wget -O 'bank_data.csv' -q 'https://www.dropbox.com/s/uy27mctxo0gbuof/bank_data.csv?dl=0'
df = pd.read_csv('bank_data.csv')
df.sample(5)
# +
y = df.y
df = df.drop(columns=["y"])
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.25, shuffle=True, random_state=13)
categorical_features = [1,2,3,4,5,6,7,8,9,14]
numeric_features = list(set(np.arange(19)) - set(categorical_features))
# -
X_train.iloc[:, categorical_features]
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
preprocessor = ColumnTransformer(transformers=[
("num", StandardScaler(), numeric_features),
("cat", OneHotEncoder(handle_unknown = 'ignore'), categorical_features)])
# Решите задачу предсказания возвращения кредита методами, перечисленными ниже:
#
# - Случайный лес
# - Бэггинг на деревьях (поставьте для базовых деревьев min_samples_leaf=1)
# - Бэггинг, у которого базовой моделью является бустинг с большим числом деревьев (> 100)
# - Бэггинг на логистических регрессиях
#
# Используйте логистическую регрессию, случайный лес, `GradientBoostingClassifier` и `BaggingClassifier` из `sklearn`.
#
# 1) Какая из моделей имеет лучшее качество? С чем это связано?
#
# 2) Какая из моделей сильнее всего переобучается?
#
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_auc_score
np.random.seed(42)
# +
# %%time
pipeline = Pipeline([("preprocessor", preprocessor), ('rfc', RandomForestClassifier())])
pipeline.fit(X_train, y_train)
print(f"ROC-AUC score on test is {roc_auc_score(y_train, pipeline.predict_proba(X_train)[:, 1])}")
print(f"ROC-AUC score on test is {roc_auc_score(y_test, pipeline.predict_proba(X_test)[:, 1])}")
# +
# %%time
pipeline = Pipeline([("preprocessor", preprocessor), ('bc_tree', BaggingClassifier(DecisionTreeClassifier()))])
pipeline.fit(X_train, y_train)
print(f"ROC-AUC score on test is {roc_auc_score(y_train, pipeline.predict_proba(X_train)[:, 1])}")
print(f"ROC-AUC score on test is {roc_auc_score(y_test, pipeline.predict_proba(X_test)[:, 1])}")
# +
# %%time
pipeline = Pipeline([("preprocessor", preprocessor), ('bc_boosting', BaggingClassifier(GradientBoostingClassifier(n_estimators=200)))])
pipeline.fit(X_train, y_train)
print(f"ROC-AUC score on test is {roc_auc_score(y_train, pipeline.predict_proba(X_train)[:, 1])}")
print(f"ROC-AUC score on test is {roc_auc_score(y_test, pipeline.predict_proba(X_test)[:, 1])}")
# +
# %%time
pipeline = Pipeline([("preprocessor", preprocessor), ('bc_logreg', BaggingClassifier(LogisticRegression(max_iter=200)))])
pipeline.fit(X_train, y_train)
print(f"ROC-AUC score on test is {roc_auc_score(y_train, pipeline.predict_proba(X_train)[:, 1])}")
print(f"ROC-AUC score on test is {roc_auc_score(y_test, pipeline.predict_proba(X_test)[:, 1])}")
# -
# 1) У меня получилось, что лучше всего работает бэггинг на градиентном бустинге. Я думаю, что это связано с тем, что градиентный бустинг сам по себе очень сильный алгоритм, а бэггинг только улучшает его, делая более стабильным и уменьшая дисперсию ошибки. Также он не так сильно переобучился как random forest. Кстати, второй по качеству алгоритм - RandomForest, который тоже довольно сильная модель, так как это ансамбль.
# 2) Сильнее всего переобучается random forest
# ### Задание 3. Современные бустинги (1.5 балла)
#
# Сравните на этих данных любую из трёх популярных имплементаций градиентного бустинга (xgboost, lightgbm, catboost). Подберите основные гиперпараметры (число деревьев, длина шага, глубина дерева/число листьев). Получилось ли круче, чем с моделями выше?
# Я буду использовать `catboost`.
from catboost import CatBoostClassifier
# +
# %%time
# base model
CatBoost_model = CatBoostClassifier(
iterations=100,
task_type="GPU",
eval_metric="AUC"
)
CatBoost_model.fit(
X_train, y_train,
cat_features=categorical_features,
verbose=0
)
print(f"ROC-AUC score on test is {roc_auc_score(y_train, CatBoost_model.predict_proba(X_train)[:, 1])}")
print(f"ROC-AUC score on test is {roc_auc_score(y_test, CatBoost_model.predict_proba(X_test)[:, 1])}")
# +
CatBoost_model = CatBoostClassifier(
task_type="GPU",
eval_metric="AUC",
cat_features=categorical_features,
)
grid = {'learning_rate': [0.03, 0.1],
'depth': [4, 6, 10],
'l2_leaf_reg': [1, 4, 9],
'iterations': [100,200]
}
grid_search_result = CatBoost_model.grid_search(grid,
X=X_train,
y=y_train,
verbose=0,
plot=True)
# -
# Я не уверен, как будет выглядеть grid search ячейкой выше (у меня выводится очень информативный дэшборд), поэтому прикрепляю еще график с этого дэшборда.
# +
import requests
from PIL import Image
import matplotlib.pyplot as plt
from io import BytesIO
# %matplotlib inline
url = "https://i.ibb.co/WpjPh3q/hw05-grid-search-catboost.png"
response = requests.get(url)
img_before = np.asarray(Image.open(BytesIO(response.content)))
plt.imshow(img_before)
plt.axis("off")
plt.title("CatBoost Grid Search (AUC-ROC score for each combination)");
# -
# Вот такие параметры я отобрал на gridsearch
grid_search_result["params"]
# %%time
CatBoost_model = CatBoostClassifier(
iterations=200,
learning_rate=0.1,
depth=10,
l2_leaf_reg=9,
task_type="GPU",
eval_metric="AUC"
)
CatBoost_model.fit(
X_train, y_train,
cat_features=categorical_features,
verbose = 0
)
print(f"ROC-AUC score on test is {roc_auc_score(y_train, CatBoost_model.predict_proba(X_train)[:, 1])}")
print(f"ROC-AUC score on test is {roc_auc_score(y_test, CatBoost_model.predict_proba(X_test)[:, 1])}")
# В итоге, `catboost` оказался немного (на десятитысячные доли) лучше, чем бэггинг на градиентном бустинге.
# ### Бонус (0.1 балла)
#
# Прикрепите сюда что-нибудь для новогоднего настроения 👒
# +
# сюда
url = "https://i0.wp.com/neptune.ai/wp-content/uploads/machine-learning-christmas-cartoon.jpg?resize=500%2C358&ssl=1"
response = requests.get(url)
img_before = np.asarray(Image.open(BytesIO(response.content)))
plt.imshow(img_before);
# -
# ###### Всех с наступающим
| homeworks/hw05_boosting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# from Nancy
#
# This notebook generates forcing files for the 2D domain.
#
# Plan: use 3D boundary files, average across the mouth of Juan de Fuca and set uniformly across domain.
# +
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import os
#from salishsea_tools import tidetools,nc_tools
# %matplotlib inline
# -
# # Load 3D T+S
#
# +
f = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/west/SalishSea2_Masson_DC.nc')
T = f.variables['votemper'][:]
S = f.variables['vosaline'][:]
print( S.shape)
depth = f.variables['deptht'][:]
times = f.variables['time_counter'][:]
# -
# There are 52 weeks, 40 depth levels, 87 points across mouth, and 10 points into domain.
# # Average across mouth
# +
ntime=52; ndepth=40; nrim = 10
width_3d=87
Tmean = np.zeros((ntime,ndepth,nrim))
Smean = np.zeros((ntime,ndepth,nrim))
for i in np.arange(nrim):
ind = i*width_3d
Tmean[:,:,i] = np.nanmean(T[:,:,0,ind:ind+width_3d], axis=2)
Smean[:,:,i] = np.nanmean(S[:,:,0,ind:ind+width_3d], axis=2)
plt.pcolormesh(Smean[:,:,0].T)
plt.axis([0,ntime,ndepth,0])
plt.colorbar()
# -
plt.pcolormesh(Tmean[:,:,0].T)
plt.axis([0,ntime,ndepth,0])
plt.colorbar()
plt.pcolormesh(Smean[0,:,:])
plt.axis([0,nrim,ndepth,0])
plt.colorbar()
plt.pcolormesh(Tmean[0,:,:])
plt.axis([0,nrim,ndepth,0])
plt.colorbar()
# Looks reasonable.
# #Copy across Y
# +
Ny=10 #only 8 because of masked edges
T_y=np.tile(Tmean,Ny)
T_y=T_y.reshape(ntime,ndepth,Ny,nrim)
T_y.shape
plt.pcolormesh(T_y[0,:,:,0])
plt.axis([0,Ny,ndepth,0])
plt.colorbar()
# +
S_y=np.tile(Smean,Ny)
S_y=S_y.reshape(ntime,ndepth,Ny,nrim)
S_y.shape
plt.pcolormesh(S_y[0,:,:,3])
plt.axis([0,Ny,ndepth,0])
plt.colorbar()
print (S_y.min())
# -
# #Untile
# Need to order the data from closest to the edge to furthest from the edge
T_untile = np.zeros((ntime,ndepth,1,Ny*nrim))
S_untile = np.zeros((ntime,ndepth,1,Ny*nrim))
for i in np.arange(nrim):
ind = i*Ny
T_untile[:,:,0,ind:ind+Ny] = T_y[:,:,:,i]
S_untile[:,:,0,ind:ind+Ny] = S_y[:,:,:,i]
plt.pcolormesh(T_untile[0,:,0,:])
plt.axis([0,Ny*nrim,ndepth,0])
plt.colorbar()
plt.pcolormesh(S_untile[0,:,0,:])
plt.axis([0,Ny*nrim,ndepth,0])
plt.colorbar()
# #Save to netcdf
# +
nemo = nc.Dataset('/ocean/eolson/MEOPAR/NEMO-3.6-inputs/boundary_conditions/TS_OBC.nc', 'w', zlib=True)
#start and end points
length_rim =nrim
lengthi=Ny*length_rim
#time and depth
depth_levels =ndepth
# dataset attributes
#nc_tools.init_dataset_attrs(
# nemo,
# title='Temperature and Salinty Boundary Conditions 2D domain',
# notebook_name='Generate T+S Forcing - NEMO3.6',
# nc_filepath='/data/nsoontie/MEOPAR/2Ddomain/TS_OBC.nc',
# comment='based on average values across mouth of JdF and 3D weekly climatology')
# dimensions
nemo.createDimension('xb', lengthi)
nemo.createDimension('yb', 1)
nemo.createDimension('time_counter', None)
nemo.createDimension('deptht', depth_levels)
# variables
# deptht
deptht = nemo.createVariable('deptht', 'float32', ('deptht',))
deptht.long_name = 'Vertical T Levels'
deptht.units = 'm'
deptht.positive = 'down'
deptht.valid_range = np.array((4., 428.))
deptht[:]=depth
# time_counter
time_counter = nemo.createVariable('time_counter', 'float32', ('time_counter'))
time_counter.long_name = 'Time axis'
time_counter.axis = 'T'
time_counter.units = 'weeks since beginning of year'
time_counter[:]=times
# votemper
votemper = nemo.createVariable('votemper', 'float32',
('time_counter','deptht','yb','xb'))
votemper.units = 'degC'
votemper.long_name = 'Temperature'
votemper.grid = 'SalishSea2D'
votemper[:]=T_untile
# vosaline
vosaline = nemo.createVariable('vosaline', 'float32',
('time_counter','deptht','yb','xb'))
vosaline.units = 1
vosaline.long_name = 'Practical Salinity'
vosaline.grid = 'SalishSea2D'
vosaline[:]=S_untile
# nbidta, ndjdta, ndrdta
nbidta = nemo.createVariable('nbidta', 'int32' , ('yb','xb'))
nbidta.long_name = 'i grid position'
nbidta.units = 1
nbjdta = nemo.createVariable('nbjdta', 'int32' , ('yb','xb'))
nbjdta.long_name = 'j grid position'
nbjdta.units = 1
nbrdta = nemo.createVariable('nbrdta', 'int32' , ('yb','xb'))
nbrdta.long_name = 'position from boundary'
nbrdta.units = 1
for ir in range(length_rim):
nbidta[0,ir*Ny:(ir+1)*Ny] = ir
nbjdta[0,ir*Ny:(ir+1)*Ny] = range(Ny)
nbrdta[0,ir*Ny:(ir+1)*Ny] = ir
nemo.close()
# -
| Elise/modelInput/GenerateTSForcing-NEMO36.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # MIST101 Pratical 3: Introduction to RNN (Part 1)
# + [markdown] deletable=true editable=true
# Welcome to the third tutorial of MIST101. The goal of this tutorial is to show how to use Tensorflow to train and evaluate simple recurrent neural networks for sequencial data prediction and MNIST.
# + [markdown] deletable=true editable=true
# ## Price Prediction
# + deletable=true editable=true
import random
import numpy as np
import math
import matplotlib.pyplot as plt
import tensorflow as tf
# %matplotlib inline
# Generate price data
def gen_sequence(length):
t = int(random.random() * 100.0)
prices = []
for i in range(t, t+length):
prices.append([50 + 15*(math.sin(i * 0.6))+ random.random() * 1.0])
return prices
def get_data(batch_size, step_size):
data_input = []
data_label = []
for i in range(batch_size):
prices = gen_sequence(step_size + 1)
data_input.append(prices[:step_size])
data_label.append(prices[1:])
return np.array(data_input), np.array(data_label)
# + [markdown] deletable=true editable=true
# ## Data Visualization
# + deletable=true editable=true
length = 50
sample_data_input, sample_data_label = get_data(1, length) # get one batch of data with length of 50
days = np.linspace(1, length, length)
plt.plot(days, sample_data_input[0,:,0]) # plot the data
plt.xlabel("Day")
plt.ylabel("Price ($)")
# -
# ## Construct RNN Model
#
# To construct the RNN model, we first need to construct the base model and the inputs
# + deletable=true editable=true
input_size = 1
output_size = 1
hidden_state_size = 10
batch_size = 100
step_size = 50 # back-propagate through t steps
# We initialize our placeholder across the sequence.
input_placeholder = tf.placeholder(tf.float32, shape = (None, step_size, input_size)) # We can put "None" to make the size of the tensor flexible (batch size is variable)
label_placeholder = tf.placeholder(tf.float32, shape = (None, step_size, output_size))
# Define the weights/biases used in our RNN model
weight_in = tf.Variable(tf.truncated_normal(shape = [input_size, hidden_state_size], stddev = 0.01))
weight_hidden_in = tf.Variable(tf.truncated_normal(shape = [hidden_state_size, hidden_state_size], stddev = 0.01))
bias_hidden = tf.Variable(tf.zeros(shape = [hidden_state_size]))
weight_out = tf.Variable(tf.truncated_normal(shape = [hidden_state_size, output_size], stddev = 0.01))
bias_out = tf.Variable(tf.zeros(shape = [output_size]))
# -
# We can build the model which performs a linear transformation and a nonlinear transformation to the inputs:
# $$h_t = relu(W_{in}x_t + W_{hidden_in}h_{t-1} + b_{hidden})$$
# $$y_t = W_{out}h_t + b_{out}$$
# where $h_t$ is the hidden state feedback and $x_t$, $y_t$ are the inputs and outputs of the model respectively. The function below takes in two input tensors ($x_t$ and $h_{t-1}$) and returns two output tensors ($y_t$ and $h_t$)
def model(input, hidden_in):
hidden_out = tf.nn.relu(tf.matmul(input, weight_in) + tf.matmul(hidden_in, weight_hidden_in) + bias_hidden)
output = tf.matmul(hidden_out, weight_out) + bias_out
return output, hidden_out
# After constructing the model function, we can unfold our RNN model by various steps as following. Then, we can define our loss function and training step as follows
# +
initial_state = state = tf.zeros([batch_size, hidden_state_size])
losses = []
for i in range(step_size):
output, state = model(input_placeholder[:, i, :], state) # Unfolding the RNN model by feeding the output state tensor from previous step.
losses.append(tf.reduce_mean(tf.squared_difference(output, label_placeholder[:, i, :]))) # Mean square error as loss function
loss = tf.add_n(losses) / step_size # Average the loss across sequence
train_step = tf.train.AdamOptimizer(0.01).minimize(loss) # Define training step
# -
# Now, we can train our model!
# + deletable=true editable=true
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for step in range(5000):
batch_inputs, batch_labels = get_data(batch_size, step_size) # Get a mini batch of images and labels
_, loss_value = sess.run([train_step, loss], feed_dict = {input_placeholder : batch_inputs, # Perform a training step
label_placeholder : batch_labels})
# Display
if step % 100 == 0:
print('Step %d: loss = %.2f' % (step, loss_value))
# + [markdown] deletable=true editable=true
# ## Model Visualization
#
# To see how well our trained model actually performs, we use our model to generate 50 price predictions after giving 50 previous prices.
# + deletable=true editable=true
initial_steps = 50
prediction_steps = 50
initial_cond = get_data(1, prediction_steps + initial_steps)[0] # Get the sequencial data
state = np.zeros([1, hidden_state_size]) # Initialize the RNN hidden state
model_input = tf.placeholder(tf.float32, shape = (None, input_size)) # Define the placeholders for generation
model_state_input = tf.placeholder(tf.float32, shape = (None, hidden_state_size))
model_output, model_state_output = model(model_input, model_state_input) # Define output tensors
record_given = []
record_pred = []
for i in range(initial_steps): # Feed the first 50 data points into the model for processing
_, state = sess.run([model_output, model_state_output], feed_dict = {model_input: initial_cond[:,i], model_state_input : state})
record_given.append(initial_cond[0][i][0])
pre_output = initial_cond[:,initial_steps-1]
for i in range(prediction_steps): # Predict the following 50 data points
pre_output, state = sess.run([model_output, model_state_output], feed_dict = {model_input: pre_output, model_state_input : state})
record_pred.append(pre_output[0])
record_given.append(initial_cond[0][i+initial_steps][0])
# Plot the prediction vs. actual prices
t_given = np.linspace(1, prediction_steps + initial_steps, prediction_steps + initial_steps)
t_pred = np.linspace(initial_steps + 1, initial_steps + prediction_steps, prediction_steps)
plt.plot(t_given, record_given, t_pred, record_pred)
plt.xlabel("Days")
plt.ylabel("Price ($)")
| TensorFlow/Tutorial/Tensorflow_RNN (Part 1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import gym
import gym_anytrading
import quantstats as qs
from stable_baselines import A2C
from stable_baselines import ACKTR
from stable_baselines.common.vec_env import DummyVecEnv
import matplotlib.pyplot as plt
# +
df = gym_anytrading.datasets.STOCKS_GOOGL.copy()
window_size = 10
start_index = window_size
end_index = len(df)
env_maker = lambda: gym.make(
'stocks-v0',
df = df,
window_size = window_size,
frame_bound = (start_index, end_index)
)
env = DummyVecEnv([env_maker])
# -
policy_kwargs = dict(net_arch=[64, 'lstm', dict(vf=[128, 128, 128], pi=[64, 64])])
model = ACKTR('MlpLstmPolicy', env, verbose=1, policy_kwargs=policy_kwargs)
model.learn(total_timesteps=16000)
| basline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# %matplotlib inline
import seaborn as sns
import re
data = pd.read_csv("./dataset/final_training.csv") ## path
data.head()
data.drop(["Unnamed: 0"],axis=1,inplace=True)
data.describe()
# #### 1 for toxic 0 for normal
#
toxic = data[data["Label"]==1]
normal = data[data["Label"]==0]
len(toxic),len(normal)
print("Percentage of normal comments = {}".format(len(normal)/len(data)*100))
print("Percentage of toxic comments = {}".format(len(toxic)/len(data)*100))
sns.set()
sns.countplot(data["Label"])
plt.show()
data['char_length'] = data['Review'].apply(lambda x: len(str(x)))
sns.set()
data['char_length'].hist()
plt.show()
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data['Review'], data['Label'], test_size=0.2, random_state=42)
# -
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
# +
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
vect = TfidfVectorizer(max_features = 10000, stop_words='english')
#vect = TfidfVectorizer(stop_words='english')
print(vect)
pickle.dump(vect, open("./saved_models/vectorizer.pkl", "wb"))
# -
# +
# %%time
X_train_dtm = vect.fit_transform(X_train.apply(lambda x: np.str_(x)))
X_train_dtm
# -
pickle.dump(vect, open("./saved_models/vectorizer.pkl", "wb"))
vect = pickle.load(open("./saved_models/vectorizer.pkl", 'rb'))
X_train_dtm.shape
# +
# %%time
X_test_dtm = vect.transform(X_test.apply(lambda x: np.str_(x)))
X_test_dtm
# -
X_test_dtm.shape
# +
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
logreg = LogisticRegression(C=1, max_iter = 2000)
import pickle
# train the model using X_train_dtm & y_train
logreg.fit(X_train_dtm, y_train)
filename = "./saved_models/lr_model.pkl"
pickle.dump(logreg, open(filename, 'wb'))
# compute the training accuracy
y_pred_train = logreg.predict(X_train_dtm)
print('Training accuracy is {}'.format(accuracy_score(y_train, y_pred_train)))
# compute the predicted probabilities for X_test_dtm
y_pred_test = logreg.predict(X_test_dtm)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
# -
# ### Testing trained model
import pickle
model = pickle.load(open("./saved_models/lr_model.pkl", 'rb'))
vect = pickle.load(open("./saved_models/vectorizer.pkl", 'rb'))
sen = ["<NAME>...wanna suck"]
sen_trans = vect.transform(sen)
p = model.predict(sen_trans)[0]
validity = ["allowed","toxic"]
print(validity[p])
sen = ["Hey girl...you are beautiful"]
sen_trans = vect.transform(sen)
p = model.predict(sen_trans)[0]
validity = ["allowed","toxic"]
print(validity[p])
sen = ["Hey girl...you are nice but I think you like anal"]
sen_trans = vect.transform(sen)
p = model.predict(sen_trans)[0]
validity = ["allowed","toxic"]
print(validity[p])
sen = ["When you want to show your parents a video, and Big Black cock porn from last night is still open"]
sen_trans = vect.transform(sen)
p = model.predict(sen_trans)[0]
validity = ["allowed","toxic"]
print(validity[p])
sen = ["you get what you fuckin deserve"]
sen_trans = vect.transform(sen)
p = model.predict(sen_trans)[0]
validity = ["allowed","toxic"]
print(validity[p])
sen = ["When you accidently message your teacher 'FRIENDS is my favrite show..You are a fuckin Hoe'"]
sen_trans = vect.transform(sen)
p = model.predict(sen_trans)[0]
validity = ["allowed","toxic"]
print(validity[p])
# ! pip install -q pandas-ml
# %matplotlib inline
from pandas_ml import ConfusionMatrix
confusion_matrix = ConfusionMatrix(y_true, y_pred)
confusion_matrix.plot(normalized=True)
| hatespeech/Model_Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
l_simple = [{'name': 'Alice', 'age': 25},
{'name': 'Bob'}]
print(pd.DataFrame(l_simple))
print(pd.json_normalize(l_simple))
l_nested = [{'name': 'Alice', 'age': 25, 'id': {'x': 2, 'y': 8}},
{'name': 'Bob', 'id': {'x': 10, 'y': 4}}]
print(pd.DataFrame(l_nested))
print(pd.json_normalize(l_nested))
print(pd.json_normalize(l_nested, sep='_'))
l_complex = [{'label': 'X',
'info' : {'n': 'nx', 'm': 'mx'},
'data': [{'a': 1, 'b': 2},
{'a': 3, 'b': 4}]},
{'label': 'Y',
'info' : {'n': 'ny', 'm': 'my'},
'data': [{'a': 10, 'b': 20},
{'a': 30, 'b': 40}]}]
print(pd.json_normalize(l_complex))
print(pd.json_normalize(l_complex, record_path='data'))
print(pd.json_normalize(l_complex, record_path='data', record_prefix='data_'))
print(pd.json_normalize(l_complex, record_path='data',
meta='label'))
print(pd.json_normalize(l_complex, record_path='data',
meta='label', meta_prefix='meta_'))
print(pd.json_normalize(l_complex, record_path='data',
meta='info'))
print(pd.json_normalize(l_complex, record_path='data',
meta=[['info', 'n'], ['info', 'm']]))
print(pd.json_normalize(l_complex, record_path='data',
meta=[['info', 'n'], ['info', 'm']],
sep='_'))
print(pd.json_normalize(l_complex, record_path='data',
meta=['label', ['info', 'n'], ['info', 'm']],
sep='_'))
print(pd.json_normalize(l_complex, record_path='data',
meta=[['info', 'n']]))
# +
# print(pd.json_normalize(l_complex, record_path='data',
# meta=['info', 'n']))
# KeyError: "Try running with errors='ignore' as key 'n' is not always present"
| notebook/pandas_json_normalize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-2:429704687514:environment/datascience
# ---
# + [markdown] papermill={"duration": 0.103182, "end_time": "2020-04-07T23:19:40.242023", "exception": false, "start_time": "2020-04-07T23:19:40.138841", "status": "completed"} tags=[]
# 
#
# **COVID19 - Análise do Surto Brasileiro**
# + [markdown] papermill={"duration": 0.087521, "end_time": "2020-04-07T23:19:40.420994", "exception": false, "start_time": "2020-04-07T23:19:40.333473", "status": "completed"} tags=[]
# # Modelo Epidemiológico Clássico: SEIR
#
# Neste documento investigamos a dinâmica epidemiológica da COVID19 empregando um modelo cĺássico conhecido como SEIR e algumas variações imediatas. Uma vez definido o modelo, empregamos dados históricos para estimar seus parâmetros para o caso brasileiro. Essas estimativas são usadas então para gerar dois conjuntos de dados simulados: sem e com medidas de miticação. Esses dois resultados são salvos em arquivos CSV para uso posterior em outras análises.
#
# Esta implementação serve como base para investigações mais profundas, e pode ser customizada e usado de diversos modos por eventuais interessados. É possível, por exemplo, experimentar com medidas de mitigação diferentes.
#
#
# + [markdown] papermill={"duration": 0.079732, "end_time": "2020-04-07T23:19:40.699874", "exception": false, "start_time": "2020-04-07T23:19:40.620142", "status": "completed"} tags=[]
# **ADVERTÊNCIA:** os modelos e números aqui apresentados não são afirmações formais sobre o progresso da doença, mas apenas exercícios que demonstram técnicas de modelagem e cenários de aplicação.
# + [markdown] papermill={"duration": 0.182106, "end_time": "2020-04-07T23:19:40.979293", "exception": false, "start_time": "2020-04-07T23:19:40.797187", "status": "completed"} tags=[]
# ## Preliminares
# Bibliotecas, configurações e parâmetros.
# + papermill={"duration": 1.283213, "end_time": "2020-04-07T23:19:42.360106", "exception": false, "start_time": "2020-04-07T23:19:41.076893", "status": "completed"} tags=[]
import os
import math
import hyperopt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [16, 8]
import seaborn as sns
sns.set()
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# + papermill={"duration": 0.100428, "end_time": "2020-04-07T23:19:42.554508", "exception": false, "start_time": "2020-04-07T23:19:42.454080", "status": "completed"} tags=[]
plt.style.use('fivethirtyeight')
# -
print(os.getcwd())
os.chdir('/root/covid19-analytics/results/notebooks')
os.getcwd()
# + [markdown] papermill={"duration": 0.090041, "end_time": "2020-04-07T23:19:42.728140", "exception": false, "start_time": "2020-04-07T23:19:42.638099", "status": "completed"} tags=[]
# ## Parâmetros das Simulações
#
# Empregaremos simulações para efetivamente executar os modelos que serão desenvolvidos abaixo. Para tanto, é necessário definir-se alguns parâmetros gerais.
# + [markdown] papermill={"duration": 0.080422, "end_time": "2020-04-07T23:19:42.893922", "exception": false, "start_time": "2020-04-07T23:19:42.813500", "status": "completed"} tags=[]
# Data de início da epidemia.
# + papermill={"duration": 0.096501, "end_time": "2020-04-07T23:19:43.125483", "exception": false, "start_time": "2020-04-07T23:19:43.028982", "status": "completed"} tags=[]
epidemic_start_date = pd.Timestamp(2020, 3, 12)
# + [markdown] papermill={"duration": 0.088613, "end_time": "2020-04-07T23:19:43.301364", "exception": false, "start_time": "2020-04-07T23:19:43.212751", "status": "completed"} tags=[]
# Quantida de dias.
# + papermill={"duration": 0.090183, "end_time": "2020-04-07T23:19:43.473819", "exception": false, "start_time": "2020-04-07T23:19:43.383636", "status": "completed"} tags=[]
epidemic_duration_in_days = 365
# + [markdown] papermill={"duration": 0.088318, "end_time": "2020-04-07T23:19:43.669144", "exception": false, "start_time": "2020-04-07T23:19:43.580826", "status": "completed"} tags=[]
# Tamanho da população.
# + papermill={"duration": 0.091091, "end_time": "2020-04-07T23:19:43.843947", "exception": false, "start_time": "2020-04-07T23:19:43.752856", "status": "completed"} tags=[]
population_size = 200 * 1e6 #18000000
# + [markdown] papermill={"duration": 0.094646, "end_time": "2020-04-07T23:19:44.025263", "exception": false, "start_time": "2020-04-07T23:19:43.930617", "status": "completed"} tags=[]
# Infectados inicialmente.
# + papermill={"duration": 0.095689, "end_time": "2020-04-07T23:19:44.210189", "exception": false, "start_time": "2020-04-07T23:19:44.114500", "status": "completed"} tags=[]
initially_infected = 52
# + [markdown] papermill={"duration": 0.087723, "end_time": "2020-04-07T23:19:44.438067", "exception": false, "start_time": "2020-04-07T23:19:44.350344", "status": "completed"} tags=[]
# Onde está a pasta base que contém os dados?
# + papermill={"duration": 0.139643, "end_time": "2020-04-07T23:19:44.661402", "exception": false, "start_time": "2020-04-07T23:19:44.521759", "status": "completed"} tags=[]
data_folder = '../../data/'
# + [markdown] papermill={"duration": 0.083028, "end_time": "2020-04-07T23:19:44.827601", "exception": false, "start_time": "2020-04-07T23:19:44.744573", "status": "completed"} tags=[]
# Qual coluna dos dados históricos reais devemos usar para ajustar os parâmetros dos modelos?
# + papermill={"duration": 0.123473, "end_time": "2020-04-07T23:19:45.045817", "exception": false, "start_time": "2020-04-07T23:19:44.922344", "status": "completed"} tags=[]
data_column_to_fit = 'total_cases_ESTIMATED'
# + [markdown] papermill={"duration": 0.086721, "end_time": "2020-04-07T23:19:45.234542", "exception": false, "start_time": "2020-04-07T23:19:45.147821", "status": "completed"} tags=[]
# Quando empregarmos dados reais para calibrar os parâmetros do modelo, qual país devemos usar?
# + papermill={"duration": 0.0941, "end_time": "2020-04-07T23:19:45.423248", "exception": false, "start_time": "2020-04-07T23:19:45.329148", "status": "completed"} tags=[]
target_location = 'Brazil'
# + [markdown] papermill={"duration": 0.091708, "end_time": "2020-04-07T23:19:45.602763", "exception": false, "start_time": "2020-04-07T23:19:45.511055", "status": "completed"} tags=[]
# Devemos usar os dados disponíveis para modificar algum dos parâmetros acima? Em caso positivo, dados demográficos e epidêmicos serão substituídos.
# + papermill={"duration": 0.089891, "end_time": "2020-04-07T23:19:45.781402", "exception": false, "start_time": "2020-04-07T23:19:45.691511", "status": "completed"} tags=[]
infer_parameters_from_data = True
# + [markdown] papermill={"duration": 0.092878, "end_time": "2020-04-07T23:19:45.981546", "exception": false, "start_time": "2020-04-07T23:19:45.888668", "status": "completed"} tags=[]
# Onde queremos armazenar resultados do modelo para posterior.
# + papermill={"duration": 0.09571, "end_time": "2020-04-07T23:19:46.173722", "exception": false, "start_time": "2020-04-07T23:19:46.078012", "status": "completed"} tags=[]
results_folder = '../'
# + [markdown] papermill={"duration": 0.089244, "end_time": "2020-04-07T23:19:46.349097", "exception": false, "start_time": "2020-04-07T23:19:46.259853", "status": "completed"} tags=[]
# Algum suffixo adicional para marcar o nome dos resultados salvos?
# + papermill={"duration": 0.117372, "end_time": "2020-04-07T23:19:46.563124", "exception": false, "start_time": "2020-04-07T23:19:46.445752", "status": "completed"} tags=[]
results_suffix = ''
# + [markdown] papermill={"duration": 0.198098, "end_time": "2020-04-07T23:19:46.860993", "exception": false, "start_time": "2020-04-07T23:19:46.662895", "status": "completed"} tags=[]
# ### Parâmetros Injetados Externamente
# + [markdown] papermill={"duration": 0.149925, "end_time": "2020-04-07T23:19:47.190643", "exception": false, "start_time": "2020-04-07T23:19:47.040718", "status": "completed"} tags=[]
# Este *notebook* pode também ser executado com a biblioteca [papermill](https://papermill.readthedocs.io/en/latest/index.html), de modo a customizar parâmetros sem a necessidade de editá-los diretamente aqui. Tais parâmetros customizados serão colocados na célula abaixo quando existirem, e assim tomarão o lugar dos valores dados acima.
# + papermill={"duration": 0.114098, "end_time": "2020-04-07T23:19:47.414511", "exception": false, "start_time": "2020-04-07T23:19:47.300413", "status": "completed"} tags=["parameters"]
# custom papermill parameters
# + papermill={"duration": 0.094748, "end_time": "2020-04-07T23:19:47.604982", "exception": false, "start_time": "2020-04-07T23:19:47.510234", "status": "completed"} tags=["injected-parameters"]
# Parameters
target_location = "Italy"
infer_parameters_from_data = True
results_suffix = ".italy"
# + [markdown] papermill={"duration": 0.121243, "end_time": "2020-04-07T23:19:47.824036", "exception": false, "start_time": "2020-04-07T23:19:47.702793", "status": "completed"} tags=[]
# ## Funções Auxiliares
#
# E uma função para auxiliar no desenho de gráficos. Opcionalmente, se um nome de arquivo for especificado, a função irá salvar a imagem resultante.
# + papermill={"duration": 0.122472, "end_time": "2020-04-07T23:19:48.037725", "exception": false, "start_time": "2020-04-07T23:19:47.915253", "status": "completed"} tags=[]
def plot_simulation_output(df_simulated_data, zoom_on=None, zoom_length=60, file_path_base=results_folder, file_name=None):
df_simulated_data = df_simulated_data[['S', 'E', 'I', 'R', 'E+I', 'E+I+R']]
def aux_file_path(x):
return file_path_base + 'seir_' + file_name + f'_{target_location.lower()}_{x}' + results_suffix + '.png'
ax = sns.lineplot(data=df_simulated_data)
ax.set_title('Visão Geral da Epidemia')
if file_name is not None:
ax.figure.savefig(aux_file_path('overview'), format='png')
plt.figure()
ax = sns.lineplot(data=df_simulated_data[['E', 'I', 'E+I', 'E+I+R']])
ax.set_title('Apenas Expostos e Infectados')
if file_name is not None:
ax.figure.savefig(aux_file_path('EI'), format='png')
plt.figure()
if zoom_on is not None:
zoom_end = (pd.Timestamp(zoom_on) + pd.DateOffset(days=zoom_length)).date()
ax = sns.lineplot(data=df_simulated_data[['E', 'I', 'E+I', 'E+I+R']][zoom_on:zoom_end], markers=True)
if file_name is not None:
ax.figure.savefig(aux_file_path('EI-zoom'), format='png')
ax.set_title(f'Zoom (de {zoom_on} a {zoom_end})')
plt.figure()
# + [markdown] papermill={"duration": 0.096052, "end_time": "2020-04-07T23:19:48.224622", "exception": false, "start_time": "2020-04-07T23:19:48.128570", "status": "completed"} tags=[]
# ## Leitura e Preparação de Dados para Calibragem de Modelos
#
# Mais adiante no *notebook*, empregaremos dados históricos observados para calibrar os parâmetros da simulação. Vamos empregar dados do [Our World in Data](https://ourworldindata.org/coronavirus-source-data ). Convém assim tornar esses dados disponíveis. Isso inclui não apenas dados sobre a epidemia em si, mas também algumas informações demográficas adicionais.
# + papermill={"duration": 0.140007, "end_time": "2020-04-07T23:19:48.467922", "exception": false, "start_time": "2020-04-07T23:19:48.327915", "status": "completed"} tags=[]
df_epidemy_data = pd.read_csv(f'{data_folder}ourworldindata.org/coronavirus-source-data/full_data.csv', parse_dates=['date'])
df_locations_data = pd.read_csv(f'{data_folder}ourworldindata.org/coronavirus-source-data/locations.csv')
# enrich epidemy data with additional demographic information
df_epidemy_data = df_epidemy_data.merge(df_locations_data, on='location')
df_epidemy_data.head(2)
# + [markdown] papermill={"duration": 0.085932, "end_time": "2020-04-07T23:19:48.652916", "exception": false, "start_time": "2020-04-07T23:19:48.566984", "status": "completed"} tags=[]
# Selecionemos apenas dados sobre o país de interesse.
# + papermill={"duration": 0.105907, "end_time": "2020-04-07T23:19:48.845126", "exception": false, "start_time": "2020-04-07T23:19:48.739219", "status": "completed"} tags=[]
df_data_target = df_epidemy_data[df_epidemy_data['location']==target_location].copy().set_index(['date']).drop('location', axis=1)
# + [markdown] papermill={"duration": 0.114938, "end_time": "2020-04-07T23:19:49.055010", "exception": false, "start_time": "2020-04-07T23:19:48.940072", "status": "completed"} tags=[]
# Antes de prosseguir, vamos enriquer esses dados com algumas suposições.
# + papermill={"duration": 0.63015, "end_time": "2020-04-07T23:19:49.782829", "exception": false, "start_time": "2020-04-07T23:19:49.152679", "status": "completed"} tags=[]
df_data_target['total_cases_ESTIMATED'] = (df_data_target['total_deaths'] / 0.05).shift(-7)
df_data_target['total_cases_ESTIMATED_2'] = 10 * df_data_target['total_cases']
sns.lineplot(data=df_data_target[['total_cases', 'total_deaths']])
df_data_target.tail()
# + [markdown] papermill={"duration": 0.092683, "end_time": "2020-04-07T23:19:49.995882", "exception": false, "start_time": "2020-04-07T23:19:49.903199", "status": "completed"} tags=[]
# ### Substituição de Parâmetros
#
# Se requisitado, vamos substituir alguns dos parâmetros por valores presentes nos dados.
# + papermill={"duration": 0.100373, "end_time": "2020-04-07T23:19:50.201484", "exception": false, "start_time": "2020-04-07T23:19:50.101111", "status": "completed"} tags=[]
df_data_target[df_data_target['total_cases'] >= 50]['total_cases'].index[0]
# + papermill={"duration": 0.102424, "end_time": "2020-04-07T23:19:50.397032", "exception": false, "start_time": "2020-04-07T23:19:50.294608", "status": "completed"} tags=[]
if infer_parameters_from_data:
epidemic_start_date = df_data_target[df_data_target['total_cases'] >= 50]['total_cases'].index[0]
first_date_row = df_data_target.loc[epidemic_start_date]
population_size = first_date_row['population']
initially_infected = first_date_row['total_cases']
print(f'NEW VALUES: epidemic_start_date={epidemic_start_date}, population_size={population_size}, initially_infected={initially_infected}')
# + [markdown] papermill={"duration": 0.098012, "end_time": "2020-04-07T23:19:50.586777", "exception": false, "start_time": "2020-04-07T23:19:50.488765", "status": "completed"} tags=[]
# Filtremos os dados a partir da data de interesse.
# + papermill={"duration": 0.096643, "end_time": "2020-04-07T23:19:50.776999", "exception": false, "start_time": "2020-04-07T23:19:50.680356", "status": "completed"} tags=[]
df_data_target = df_data_target[epidemic_start_date:]
# + [markdown] papermill={"duration": 0.087296, "end_time": "2020-04-07T23:19:50.979784", "exception": false, "start_time": "2020-04-07T23:19:50.892488", "status": "completed"} tags=[]
# ## Modelo SEIR
# + [markdown] papermill={"duration": 0.099288, "end_time": "2020-04-07T23:19:51.170513", "exception": false, "start_time": "2020-04-07T23:19:51.071225", "status": "completed"} tags=[]
# O [modelo SEIR](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology) é um dos modos clássicos de se representar a dinâmica de epidemias. Trata-se de um chamado "modelo de compartimento", no qual temos alguns compartimentos principais:
#
# - S(t): número de indivíduos suscetíveis em função do tempo.
# - E(t): número de expostos, mas ainda não transmissores, em função do tempo.
# - I(t): número de indivíduos infecciosos em função do tempo.
# - R(t): número de indivíduos que se recuperam em função do tempo.
#
# São também necessários dois outros parâmetros, que definem taxas de transição entre compartimentos:
#
# - beta: valor entre 0.0 e 1.0 que define a taxa de S para E (i.e., taxa de exposição)
# - alpha: valor entre 0.0 e 1.0 que define a taxa de E para I (i.e., taxa de contaminação)
# - gamma: valor entre 0.0 e 1.0 que define a taxa de I para R (i.e., taxa de recuperação)
#
# Tradicionalmente, esse modelo é tratado via equações diferenciais. Aqui, adotaremos uma implementação via simulações estocásticas, o que nos permitirá posteriormente realizar modificações variadas que talvez não sejam tratáveis analiticamente.
# + [markdown] papermill={"duration": 0.113027, "end_time": "2020-04-07T23:19:51.437265", "exception": false, "start_time": "2020-04-07T23:19:51.324238", "status": "completed"} tags=[]
# ### Funções Auxiliares
#
# Antes de prosseguir, vamos definir algumas funções auxliares que garantirão que os valores acrescidos ou subtraídos nos diversos compartimentos não ultrapassem valores limites.
# + papermill={"duration": 0.098694, "end_time": "2020-04-07T23:19:51.625291", "exception": false, "start_time": "2020-04-07T23:19:51.526597", "status": "completed"} tags=[]
def aux_enforce_max_addition(Q, delta):
if Q - delta <= 0:
return Q
else:
return delta
def aux_enforce_max_removal(Q, delta):
if Q + delta <= 0:
return -Q
else:
return delta
# + [markdown] papermill={"duration": 0.097311, "end_time": "2020-04-07T23:19:51.814910", "exception": false, "start_time": "2020-04-07T23:19:51.717599", "status": "completed"} tags=[]
# Ao longo de uma epidemia, é razoável supor que autoridades sanitárias e, de fato, a polulação como um todo, adotarão medidas que possam reduzir o contágio e melhorar as chances de recuperação. Isso pode ser modelado com parâmetros dinâmicos, supondo-se intervenções em momentos precisos do tempo. Assim, os parâmetros do modelo aqui proposto podem ser tanto escalares quanto dicionários que mapeiam instantes a valores naqueles instantes. Para que a simulação possa usar transparentemente qualquer caso, definimos a seguir uma função que extrai o valor do parâmetro conforme o caso.
# + papermill={"duration": 0.108227, "end_time": "2020-04-07T23:19:52.015244", "exception": false, "start_time": "2020-04-07T23:19:51.907017", "status": "completed"} tags=[]
def param_at(param_var, t):
if isinstance(param_var, dict):
return param_var[t]
else:
return param_var
# + [markdown] papermill={"duration": 0.097199, "end_time": "2020-04-07T23:19:52.205758", "exception": false, "start_time": "2020-04-07T23:19:52.108559", "status": "completed"} tags=[]
# Convém também ter uma função para montar as sequências de parâmetros (veremos um exemplo mais adiante para entender seu uso).
# + papermill={"duration": 0.107703, "end_time": "2020-04-07T23:19:52.422713", "exception": false, "start_time": "2020-04-07T23:19:52.315010", "status": "completed"} tags=[]
def dynamic_parameter(regimens, min_length):
i = 0
params = {}
last_value = None
min_length = int(min_length)
for value, length in regimens:
length = int(length)
param = {t: value for t in range(i, i + length)}
params.update(param)
i += length
last_value = value
# if we do not have enough data, fill in the remaining entries with the last specified value
if i < min_length:
param = {t: last_value for t in range(i, i + min_length)}
params.update(param)
return params
# + [markdown] papermill={"duration": 0.095745, "end_time": "2020-04-07T23:19:52.610471", "exception": false, "start_time": "2020-04-07T23:19:52.514726", "status": "completed"} tags=[]
# ### Funções dos Compartimentos
#
# Podemos agora definir a dinâmica dos diversos compartimentos contemplados no modelo.
# + papermill={"duration": 0.104888, "end_time": "2020-04-07T23:19:52.809374", "exception": false, "start_time": "2020-04-07T23:19:52.704486", "status": "completed"} tags=[]
def s(t, S, E, I, R, alpha, beta, gamma):
delta = aux_enforce_max_removal(S,
-((param_at(beta, t) * I * S) / population_size))
return delta
# + papermill={"duration": 0.100521, "end_time": "2020-04-07T23:19:53.007591", "exception": false, "start_time": "2020-04-07T23:19:52.907070", "status": "completed"} tags=[]
def e(t, S, E, I, R, alpha, beta, gamma):
delta_s = aux_enforce_max_addition(S,
((param_at(beta, t) * I * S) / population_size))
delta = aux_enforce_max_removal(E,
param_at(delta_s, t) - param_at(alpha, t)*E)
return delta
# + papermill={"duration": 0.100146, "end_time": "2020-04-07T23:19:53.201984", "exception": false, "start_time": "2020-04-07T23:19:53.101838", "status": "completed"} tags=[]
def i(t, S, E, I, R, alpha, beta, gamma):
delta_e = aux_enforce_max_addition(E,
param_at(alpha, t)*E)
delta = aux_enforce_max_removal(I,
param_at(delta_e, t) - param_at(gamma, t)*I)
return delta
# + papermill={"duration": 0.104935, "end_time": "2020-04-07T23:19:53.409392", "exception": false, "start_time": "2020-04-07T23:19:53.304457", "status": "completed"} tags=[]
def r(t, S, E, I, R, alpha, beta, gamma):
delta = aux_enforce_max_addition(I,
param_at(gamma, t)*I)
return delta
# + [markdown] papermill={"duration": 0.141316, "end_time": "2020-04-07T23:19:53.640220", "exception": false, "start_time": "2020-04-07T23:19:53.498904", "status": "completed"} tags=[]
# Também convém poder calcular a variação dos compartimentos E e I para posterior análise.
# + papermill={"duration": 0.098587, "end_time": "2020-04-07T23:19:53.831997", "exception": false, "start_time": "2020-04-07T23:19:53.733410", "status": "completed"} tags=[]
# how many new patients will be added to E
def e_delta(t, S, E, I, R, alpha, beta, gamma):
delta_s = aux_enforce_max_addition(S,
((param_at(beta, t) * I * S) / population_size))
return delta_s
# + papermill={"duration": 0.106899, "end_time": "2020-04-07T23:19:54.036125", "exception": false, "start_time": "2020-04-07T23:19:53.929226", "status": "completed"} tags=[]
# how many new patients will be added to I
def i_delta(t, S, E, I, R, alpha, beta, gamma):
delta_e = aux_enforce_max_addition(E,
param_at(alpha, t)*E)
return delta_e
# + papermill={"duration": 0.094825, "end_time": "2020-04-07T23:19:54.226559", "exception": false, "start_time": "2020-04-07T23:19:54.131734", "status": "completed"} tags=[]
# how many new patients will be added to R
def r_delta(t, S, E, I, R, alpha, beta, gamma):
delta_r = aux_enforce_max_addition(I,
param_at(gamma, t)*I)
return delta_r
# + [markdown] papermill={"duration": 0.133606, "end_time": "2020-04-07T23:19:54.450871", "exception": false, "start_time": "2020-04-07T23:19:54.317265", "status": "completed"} tags=[]
# ### Simulação
#
# A simulação então pode ser definida do seguinte modo.
# + papermill={"duration": 0.114302, "end_time": "2020-04-07T23:19:54.657191", "exception": false, "start_time": "2020-04-07T23:19:54.542889", "status": "completed"} tags=[]
def simulate(S, E, I, R, alpha, beta, gamma, epidemic_start_date, epidemic_duration_in_days,
s_func, e_func, i_func, r_func, e_delta_func, i_delta_func, r_delta_func):
generated_data = [] # initial data
# changes start at 0
E_delta = 0
I_delta = 0
R_delta = 0
for t in range(0, epidemic_duration_in_days):
generated_data.append((S, E, E_delta, I, I_delta, R, R_delta))
# main model components
S_next = S + s_func(t, S, E, I, R, alpha, beta, gamma)
E_next = E + e_func(t, S, E, I, R, alpha, beta, gamma)
I_next = I + i_func(t, S, E, I, R, alpha, beta, gamma)
R_next = R + r_func(t, S, E, I, R, alpha, beta, gamma)
# added information for later analyses
I_delta = i_delta_func(t, S, E, I, R, alpha, beta, gamma)
E_delta = e_delta_func(t, S, E, I, R, alpha, beta, gamma)
R_delta = r_delta_func(t, S, E, I, R, alpha, beta, gamma)
# lockstep updates
S = S_next
E = E_next
I = I_next
R = R_next
assert math.isclose(S + E + I + R, population_size, rel_tol=1e-9, abs_tol=0.0), "Population size must not change."
df = pd.DataFrame(generated_data,
columns=['S', 'E', 'E_delta', 'I', 'I_delta', 'R', 'R_delta'],
index=pd.date_range(start=epidemic_start_date, periods=epidemic_duration_in_days, freq='D'))
df['E+I'] = df['E'] + df['I']
df['E+I+R'] = df['E'] + df['I'] + df['R']
return df
# + [markdown] papermill={"duration": 0.092421, "end_time": "2020-04-07T23:19:54.845563", "exception": false, "start_time": "2020-04-07T23:19:54.753142", "status": "completed"} tags=[]
# Podemos agora simular situações de interesse definindo os parâmetros do modelo.
# + papermill={"duration": 0.103655, "end_time": "2020-04-07T23:19:55.044460", "exception": false, "start_time": "2020-04-07T23:19:54.940805", "status": "completed"} tags=[]
alpha = 0.9 # E to I rate
beta = 0.8 # S to E rate
gamma = 0.3 # I to R rate
# + papermill={"duration": 2.475277, "end_time": "2020-04-07T23:19:57.642770", "exception": false, "start_time": "2020-04-07T23:19:55.167493", "status": "completed"} tags=[]
df_simulation_data = simulate(S=population_size - initially_infected,
E =initially_infected,
I=0,
R=0,
alpha=alpha, beta=beta, gamma=gamma,
epidemic_start_date=epidemic_start_date,
epidemic_duration_in_days=epidemic_duration_in_days,
s_func=s,
e_func=e,
i_func=i,
r_func=r,
e_delta_func=e_delta,
i_delta_func=i_delta,
r_delta_func=r_delta)
plot_simulation_output(df_simulation_data, zoom_on='2020-04')
df_simulation_data.head()
# + [markdown] papermill={"duration": 0.11374, "end_time": "2020-04-07T23:19:57.881149", "exception": false, "start_time": "2020-04-07T23:19:57.767409", "status": "completed"} tags=[]
# Como isso se compara visualmente com os dados reais observados?
# + papermill={"duration": 0.742014, "end_time": "2020-04-07T23:19:58.763192", "exception": false, "start_time": "2020-04-07T23:19:58.021178", "status": "completed"} tags=[]
comparison_date = pd.Timestamp.today() - pd.DateOffset(days=1)
df_simulation_vs_real = pd.concat([df_simulation_data, df_data_target], axis=1)
sns.lineplot(data=df_simulation_vs_real[['E+I+R', 'E+I', 'total_cases', 'total_cases_ESTIMATED']]\
[comparison_date - pd.DateOffset(days=20):comparison_date],
markers=True)
# + [markdown] papermill={"duration": 0.112422, "end_time": "2020-04-07T23:19:58.986884", "exception": false, "start_time": "2020-04-07T23:19:58.874462", "status": "completed"} tags=[]
# Vejamos um exemplo agora de parâmetro dinâmico. Vamos definir o parâmetro *beta* como 0.3 nos primeiros 30 dias, 0.25 nos 60 dias seguintes e 0.2 daí em diante.
# + papermill={"duration": 0.123037, "end_time": "2020-04-07T23:19:59.245391", "exception": false, "start_time": "2020-04-07T23:19:59.122354", "status": "completed"} tags=[]
beta = dynamic_parameter([(0.3, 30),
(0.1, 60),
(0.05, epidemic_duration_in_days)], min_length=epidemic_duration_in_days)
# + papermill={"duration": 1.864725, "end_time": "2020-04-07T23:20:01.248576", "exception": false, "start_time": "2020-04-07T23:19:59.383851", "status": "completed"} tags=[]
df_simulation_data = simulate(S=population_size - initially_infected,
E =initially_infected,
I=0,
R=0,
alpha=alpha, beta=beta, gamma=gamma,
epidemic_start_date=epidemic_start_date,
epidemic_duration_in_days=epidemic_duration_in_days,
s_func=s,
e_func=e,
i_func=i,
r_func=r,
e_delta_func=e_delta,
i_delta_func=i_delta,
r_delta_func=r_delta)
plot_simulation_output(df_simulation_data)
df_simulation_data.head()
# + [markdown] papermill={"duration": 0.199218, "end_time": "2020-04-07T23:20:01.566588", "exception": false, "start_time": "2020-04-07T23:20:01.367370", "status": "completed"} tags=[]
# ## Calibragem de Parâmetros
#
# Podemos agora confrontar os modelos acima com dados reais observados e, assim, encontrar os parâmetros que melhor aproximam a realidade.
# + [markdown] papermill={"duration": 0.128856, "end_time": "2020-04-07T23:20:01.813877", "exception": false, "start_time": "2020-04-07T23:20:01.685021", "status": "completed"} tags=[]
# Faremos a busca de parâmetros empregando a biblioteca [hyperopt](https://github.com/hyperopt/hyperopt). Para tanto, precisamos definir uma função objetivo a ser otimizada. Aqui, essa função executa uma simulação com os parâmetros (*alpha*, *beta* e *gamma*) sugeridos, obtém a curva de infecções resultante e calcula o erro absoluto médio com relação às datas para as quais temos as observações reais.
# + papermill={"duration": 0.133101, "end_time": "2020-04-07T23:20:02.061838", "exception": false, "start_time": "2020-04-07T23:20:01.928737", "status": "completed"} tags=[]
def objective_for_simulation(args):
##################################################################
# What kind of parameters we are optimizing? Constant or dynamic?
##################################################################
if 'constant' in args:
alpha, beta, gamma, t_min, t_max = args['constant']
elif 'dynamic' in args:
params_sets, t_min, t_max = args['dynamic'] # TODO
alpha = []
beta = []
gamma = []
for params in params_sets:
alpha.append((params['alpha'], int(params['duration'])))
beta.append((params['beta'], int(params['duration'])))
gamma.append((params['gamma'], int(params['duration'])))
alpha = dynamic_parameter(alpha, min_length=epidemic_duration_in_days)
beta = dynamic_parameter(beta, min_length=epidemic_duration_in_days)
gamma = dynamic_parameter(gamma, min_length=epidemic_duration_in_days)
###########
# Simulate
###########
df_simulation_data = simulate(S=population_size - initially_infected,
E=initially_infected,
I=0,
R=0,
alpha=alpha,beta=beta, gamma=gamma,
epidemic_start_date=epidemic_start_date,
epidemic_duration_in_days=epidemic_duration_in_days,
s_func=s,
e_func=e,
i_func=i,
r_func=r,
e_delta_func=e_delta,
i_delta_func=i_delta,
r_delta_func=r_delta)
# run simulation
df_simulation_data_filtered = df_simulation_data[df_simulation_data.index.isin(df_data_target.index.values)]
###################
# Calculate error
###################
df_diff_cases = df_data_target[data_column_to_fit] - \
(df_simulation_data_filtered['I'] + df_simulation_data_filtered['E'] + df_simulation_data_filtered['R'])
# we can limit the temporal reach of the estimation if desired
if t_min is not None and t_max is not None:
df_diff_cases = df_diff_cases.iloc[t_min:t_max]
values = df_diff_cases.dropna().values
mae = np.mean(abs(values)) #mean absolute error
return mae
# + [markdown] papermill={"duration": 0.262372, "end_time": "2020-04-07T23:20:02.447454", "exception": false, "start_time": "2020-04-07T23:20:02.185082", "status": "completed"} tags=[]
# ### Com Parâmetros Constantes
#
# Suponhamos que os parâmetros do modelo sejam constantes, ou seja, que não haja mudanças de comportamento ou políticas ao longo da epidemia.
# + [markdown] papermill={"duration": 0.138375, "end_time": "2020-04-07T23:20:02.834100", "exception": false, "start_time": "2020-04-07T23:20:02.695725", "status": "completed"} tags=[]
# Definimos então o espaço de busca dos parâmetros e efetivamente executamos a busca.
# + papermill={"duration": 7.734989, "end_time": "2020-04-07T23:20:10.699381", "exception": false, "start_time": "2020-04-07T23:20:02.964392", "status": "completed"} tags=[]
#space = {'constant': (hyperopt.hp.uniform('alpha', 0.0, 1.0), hyperopt.hp.uniform('beta', 0.0, 1.0), hyperopt.hp.uniform('gamma', 0.0, 1.0), 0, None)}
space = {'constant':(hyperopt.hp.uniform('alpha', 0.99, 1.0), hyperopt.hp.uniform('beta', 0.0, 1.0), hyperopt.hp.uniform('gamma', 0.0, 1.0), 0, None)}
trials = hyperopt.Trials()
best = hyperopt.fmin(objective_for_simulation, space, algo=hyperopt.tpe.suggest, max_evals=300, trials=trials)
print("Best parameters found:", best)
# + papermill={"duration": 0.167747, "end_time": "2020-04-07T23:20:11.054578", "exception": false, "start_time": "2020-04-07T23:20:10.886831", "status": "completed"} tags=[]
#trials.losses()
#pd.Series(trials.losses()).plot(title='Loss during optimiztion')
# + [markdown] papermill={"duration": 0.141075, "end_time": "2020-04-07T23:20:11.407415", "exception": false, "start_time": "2020-04-07T23:20:11.266340", "status": "completed"} tags=[]
# Podemos então executar o modelo com os parâmetros encontrados e examinar as curvas de progressão da epidemia.
# + papermill={"duration": 3.246097, "end_time": "2020-04-07T23:20:14.792636", "exception": false, "start_time": "2020-04-07T23:20:11.546539", "status": "completed"} tags=[]
alpha = best['alpha']
beta = best['beta']
gamma = best['gamma']
df_simulation_data = simulate(S=population_size - initially_infected,
E=initially_infected,
I=0,
R=0,
alpha=alpha, beta=beta, gamma=gamma,
epidemic_start_date=epidemic_start_date,
epidemic_duration_in_days=epidemic_duration_in_days,
s_func=s,
e_func=e,
i_func=i,
r_func=r,
e_delta_func=e_delta,
i_delta_func=i_delta,
r_delta_func=r_delta)
plot_simulation_output(df_simulation_data, zoom_on='2020-03', file_name='constant')
df_simulation_data['2020-03-16':'2020-05-01'].head()
# + [markdown] papermill={"duration": 0.14513, "end_time": "2020-04-07T23:20:15.094776", "exception": false, "start_time": "2020-04-07T23:20:14.949646", "status": "completed"} tags=[]
# Como isso se compara visualmente com os dados reais observados?
# + papermill={"duration": 0.185519, "end_time": "2020-04-07T23:20:15.452711", "exception": false, "start_time": "2020-04-07T23:20:15.267192", "status": "completed"} tags=[]
df_data_target.tail()
# + papermill={"duration": 0.724333, "end_time": "2020-04-07T23:20:16.333063", "exception": false, "start_time": "2020-04-07T23:20:15.608730", "status": "completed"} tags=[]
comparison_date = pd.Timestamp(2020, 4, 15) #pd.Timestamp.today() - pd.DateOffset(days=7)
df_simulation_vs_real = pd.concat([df_simulation_data, df_data_target], axis=1)
sns.lineplot(data=df_simulation_vs_real[['E+I+R', 'E+I', 'total_cases', 'total_cases_ESTIMATED']]\
[comparison_date - pd.DateOffset(days=30):comparison_date],
markers=True)
# + [markdown] papermill={"duration": 0.154795, "end_time": "2020-04-07T23:20:16.651378", "exception": false, "start_time": "2020-04-07T23:20:16.496583", "status": "completed"} tags=[]
# Salvemos os resultados.
# + papermill={"duration": 0.238537, "end_time": "2020-04-07T23:20:17.049658", "exception": false, "start_time": "2020-04-07T23:20:16.811121", "status": "completed"} tags=[]
df_simulation_data.to_csv(results_folder + f'seir_model_output.{target_location.lower()}.csv')
# + [markdown] papermill={"duration": 0.15563, "end_time": "2020-04-07T23:20:17.359627", "exception": false, "start_time": "2020-04-07T23:20:17.203997", "status": "completed"} tags=[]
# ### Com Parâmetros Dinâmicos
#
# Vamos experimentar agora introduzindo a possibilidade de aprender medidas de mitigação após o início do surto. Ou seja, experimentaremos com dois grupos de parâmetros: aqueles *antes* das medidas, e aqueles *depois* das medidas. Esses dois conjuntos de parâmetros serão estimados a partir dos dados fornecidos.
# + papermill={"duration": 101.288622, "end_time": "2020-04-07T23:21:58.799031", "exception": false, "start_time": "2020-04-07T23:20:17.510409", "status": "completed"} tags=[]
space = \
{'dynamic':\
([{'alpha': hyperopt.hp.uniform('alpha_1', 0.99, 1.0), 'beta': hyperopt.hp.uniform('beta_1', 0.0, 1.0),
'gamma': hyperopt.hp.uniform('gamma_1', 0.0, 1.0),
'duration': hyperopt.hp.uniform('duration_1', 1.0, 30.0)},
{'alpha': hyperopt.hp.uniform('alpha_2', 0.99, 1.0), 'beta': hyperopt.hp.uniform('beta_2', 0.2, 1.0),
'gamma': hyperopt.hp.uniform('gamma_2', 0.0, 1.0),
'duration': hyperopt.hp.uniform('duration_2', 1.0, 30.0)},
{'alpha': hyperopt.hp.uniform('alpha_3', 0.99, 1.0), 'beta': hyperopt.hp.uniform('beta_3', 0.2, 1.0),
'gamma': hyperopt.hp.uniform('gamma_3', 0.0, 1.0),
'duration': hyperopt.hp.uniform('duration_3', 1.0, 30.0)}],
0, 60)
}
trials = hyperopt.Trials()
best = hyperopt.fmin(objective_for_simulation, space, algo=hyperopt.tpe.suggest, max_evals=5000, trials=trials)
print("Best parameters found:", best)
# + papermill={"duration": 0.385894, "end_time": "2020-04-07T23:21:59.549057", "exception": false, "start_time": "2020-04-07T23:21:59.163163", "status": "completed"} tags=[]
alpha = dynamic_parameter([(best['alpha_1'], best['duration_1']),
(best['alpha_2'], best['duration_2']),
(best['alpha_3'], best['duration_3'])],
min_length=epidemic_duration_in_days)
beta = dynamic_parameter([(best['beta_1'], best['duration_1']),
(best['beta_2'], best['duration_2']),
(best['beta_3'], best['duration_3'])],
min_length=epidemic_duration_in_days)
gamma = dynamic_parameter([(best['gamma_1'], best['duration_1']),
(best['gamma_2'], best['duration_2']),
(best['gamma_3'], best['duration_3'])],
min_length=epidemic_duration_in_days)
# + [markdown] papermill={"duration": 0.368993, "end_time": "2020-04-07T23:22:00.324695", "exception": false, "start_time": "2020-04-07T23:21:59.955702", "status": "completed"} tags=[]
# Comparando as versões com e sem mitigação, temos o seguinte.
# + papermill={"duration": 3.156567, "end_time": "2020-04-07T23:22:03.886557", "exception": false, "start_time": "2020-04-07T23:22:00.729990", "status": "completed"} tags=[]
df_simulation_mitigation_data = simulate(S=population_size - initially_infected,
E=initially_infected,
I=0,
R=0,
alpha=alpha, beta=beta, gamma=gamma,
epidemic_start_date=epidemic_start_date,
epidemic_duration_in_days=epidemic_duration_in_days,
s_func=s,
e_func=e,
i_func=i,
r_func=r,
e_delta_func=e_delta,
i_delta_func=i_delta,
r_delta_func=r_delta)
plot_simulation_output(df_simulation_mitigation_data, zoom_on='2020-03-15', zoom_length=20, file_name='dynamic')
# + [markdown] papermill={"duration": 0.369326, "end_time": "2020-04-07T23:22:04.678598", "exception": false, "start_time": "2020-04-07T23:22:04.309272", "status": "completed"} tags=[]
# Como isso essa versão com mitigação se compara visualmente com os dados reais observados?
# + papermill={"duration": 1.12002, "end_time": "2020-04-07T23:22:06.163245", "exception": false, "start_time": "2020-04-07T23:22:05.043225", "status": "completed"} tags=[]
comparison_date = pd.Timestamp(2020, 4, 10)
df_simulation_mitigated_vs_real = pd.concat([df_simulation_mitigation_data, df_data_target], axis=1)
ax = sns.lineplot(data=df_simulation_mitigated_vs_real[['E+I','E+I+R', 'total_cases', 'total_cases_ESTIMATED']]\
[comparison_date - pd.DateOffset(days=50):comparison_date],
markers=True)
ax.set_title(f'Simulação (E+I) frente aos dados históriocos. Linha vertical=estimativa da primeira intervenção.')
intervention_date_1 = epidemic_start_date + pd.DateOffset(days=int(best['duration_1']))
plt.axvline(intervention_date_1, color='red')
intervention_date_2 = epidemic_start_date + pd.DateOffset(days=int(best['duration_1']))+ pd.DateOffset(days=int(best['duration_2']))
plt.axvline(intervention_date_2, color='red')
ax.figure.savefig(results_folder + 'seir_dynamic_fit_zoom' + f'_{target_location.lower()}' + results_suffix + '.png',
format='png')
# + [markdown] papermill={"duration": 0.366277, "end_time": "2020-04-07T23:22:06.977957", "exception": false, "start_time": "2020-04-07T23:22:06.611680", "status": "completed"} tags=[]
# Finalmente, comparemos os casos com e sem mitigação.
# + papermill={"duration": 1.056678, "end_time": "2020-04-07T23:22:08.394670", "exception": false, "start_time": "2020-04-07T23:22:07.337992", "status": "completed"} tags=[]
ax = sns.lineplot(data=pd.concat([df_simulation_data['E+I'].rename('Sem mitigação'),
df_simulation_mitigation_data['E+I'].rename('Com mitigação')], axis=1)['2020-03':'2020-07'])
ax.figure.savefig(results_folder + 'seir_constant_vs_dynamic' + f'_{target_location.lower()}' + results_suffix + '.png',
format='png')
# + [markdown] papermill={"duration": 0.377395, "end_time": "2020-04-07T23:22:09.204785", "exception": false, "start_time": "2020-04-07T23:22:08.827390", "status": "completed"} tags=[]
# Salvemos os resultados.
# + papermill={"duration": 0.433703, "end_time": "2020-04-07T23:22:10.024877", "exception": false, "start_time": "2020-04-07T23:22:09.591174", "status": "completed"} tags=[]
df_simulation_mitigation_data.to_csv(results_folder + f'seir_dynamic_model_output.{target_location.lower()}.csv')
# + papermill={"duration": 0.376601, "end_time": "2020-04-07T23:22:10.852703", "exception": false, "start_time": "2020-04-07T23:22:10.476102", "status": "completed"} tags=[]
| results/notebooks/epidemic_model_seir.italy.EDITED.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import gym
import gym_pendrogone
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Imports specifically so we can render outputs in Jupyter.
from JSAnimation.IPython_display import display_animation
from matplotlib import animation
from IPython.display import display
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
def display_frames_as_gif(frames):
"""
Displays a list of frames as a gif, with controls
"""
#plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi = 72)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=25)
display(display_animation(anim, default_mode='loop'))
# +
env = gym.make('Pendrogone-v0')
print(env.observation_space.shape)
print(env.action_space.shape)
action = 0.55 * 9.81 / 2
# +
a = (action, action)
frames = []
obs = env.reset()
for _ in range(50):
obs, r, done, _ = env.step(a)
# print(obs)
frames.append(env.render(mode = 'rgb_array'))
display_frames_as_gif(frames)
# -
| test_pendrogone.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as pyplot
# + [markdown] slideshow={"slide_type": "slide"}
# # The Data Set
#
# * This Session is based on Chapter 3 of ["Hands-On Machine Learning with Scikit-Learn and TensorFlow"](http://shop.oreilly.com/product/0636920052289.do)
#
# * We will use the MNIST dataset
#
#
#
# + slideshow={"slide_type": "slide"}
data = pd.read_csv("data/mnist.tsv", index_col=[0], sep="\t")
data.head()
# + slideshow={"slide_type": "slide"}
X, y = data.iloc[:, 0:-1], data['target']
print(X.shape)
print(y.shape)
# + [markdown] slideshow={"slide_type": "slide"}
# The data we loaded contains both training and testing chunks
#
# - The training is contained in the first 60k instances
#
# - The training is contained in the remaining 10k instances
#
# + slideshow={"slide_type": "slide"}
X.iloc[36000].head(20)
# + slideshow={"slide_type": "slide"}
some_digit = X.iloc[36000]
y[36000]
# + slideshow={"slide_type": "slide"}
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
# + [markdown] slideshow={"slide_type": "slide"}
# - But the the data is oreder by class (all 0s first, then all 1s, etc...)
# - we need to shuflle it
# + slideshow={"slide_type": "slide"}
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train.iloc[shuffle_index], y_train.iloc[shuffle_index]
# + [markdown] slideshow={"slide_type": "slide"}
# # Binary classifier
# -
y_train
# + slideshow={"slide_type": "slide"}
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Classifying the Digits Data
#
# * We are going to train a simple Stochastic Gradient Descent Classifier
#
# * We will only focus on the digit 5
# + slideshow={"slide_type": "slide"}
from sklearn.neighbors import KNeighborsClassifier
knn_cls = KNeighborsClassifier(n_neighbors=25)
knn_cls.fit(X_train, y_train_5)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Testing on a Single Value
#
# * We test it on the digit (some_digit), which we know represents 5
# + slideshow={"slide_type": "slide"}
X.iloc[31986]
# + slideshow={"slide_type": "slide"}
knn_cls.predict(X.iloc[[31986]])
# + slideshow={"slide_type": "slide"}
# What is the different between both?
# X.iloc[31986]
# X.iloc[[31986]]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Predicting the Accuracy
#
#
# * We use cross-validation to predict the accuracy
#
#
# * Number of correct predictions of the digit 5
# * Not that the RMSE is not appropriate here
#
# -
y_train_pred = knn_cls.predict(X_train)
y_train_pred
y_train_5
len(X_train)
sum(y_train_pred == y_train_5)/len(X_train)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Are the Results Good?
#
#
# * The score are pretty good, but that does not mean that our classifier does well
#
#
# * There are only 5421 instance that have 5 as a label
#
#
# * If we predict all values to be non-5, our accuracy will be $ (60000 - 5421.0) / 60000 = 0.90965$
#
#
# * This is not a good indicator of accuracy with this data set
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Confusion Matrix
#
# A much better way to evaluate the data is using a confusion matrix
# - Number of times class A is classified as class B
#
# -
sum(y_train_5)
# + slideshow={"slide_type": "slide"}
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train_5, y_train_pred)
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="images/confusion_matrix.png" alt="drawing" style="width:600px;"/>
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### The Precision
#
# * Note that for a perfect predictor, the off-diagonal values will be 0
#
# * An important metric that can be extracted from the confusion matrix is the Precision
# $$
# \text{Precision} = \frac{TP}{TP+FP}
# $$
#
# * The % of correct predictions of the positive class
# * Among all the instances we predicted as 5, how many were correct?
#
# + slideshow={"slide_type": "slide"}
from sklearn.metrics import precision_score, recall_score
precision_score(y_train_5, y_train_pred)
# + [markdown] slideshow={"slide_type": "slide"}
# ### The Recall
#
#
# * Another important metric that can be extracted from the confusion matrix is the Recall
# $$
# \text{Recall} = \frac{TP}{TP+FN}
# $$
#
#
# * The recall and precision are typically used together
#
#
# * The % of instances of the prositive class we are able able to identify
#
# * Among all the instances of the class 5 in our data, how many did were correctly identified?
#
#
# + slideshow={"slide_type": "slide"}
recall_score(y_train_5, y_train_pred)
# + [markdown] slideshow={"slide_type": "slide"}
# ### The F1 Score
#
# - The F$_1$ score is a convenient metric that combines both Precision and Recall
# $$
# F_1 = 2\times\frac{Precision\times Recall}{Precision + Recall}
# $$
# - It's a harmonic mean of the Precision and Recall
#
# + slideshow={"slide_type": "slide"}
from sklearn.metrics import f1_score
f1_score(y_train_5, y_train_pred)
# + [markdown] slideshow={"slide_type": "slide"}
# ### The Precision/Recall Tradeoff
#
#
# <img src="images/precision_recall.png" alt="drawing" style="width:600px;"/>
#
#
# * Scikit has a useful function `precision_recall_curve` to compute how the precisions and recall change with the threshold
# -
y_train_probs = knn_cls.predict_proba(X_train)
# + slideshow={"slide_type": "slide"}
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_train_probs[:,1])
# precision and recall don't have the same dim as threshold.
# 1 and 0 are added for precision and recall respectively
# + slideshow={"slide_type": "slide"}
pyplot.figure(figsize=(8, 6))
pyplot.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
pyplot.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
pyplot.xlabel("Threshold", fontsize=16)
pyplot.legend(loc="upper left", fontsize=16)
pyplot.ylim([0.6, 1])
pyplot.xlim([0, 1])
# + slideshow={"slide_type": "slide"}
pyplot.figure(figsize=(8, 6))
pyplot.plot(recalls, precisions, "b-", linewidth=2)
pyplot.xlabel("Recall", fontsize=16)
pyplot.ylabel("Precision", fontsize=16)
_ = pyplot.axis([0, 1.1, 0.6, 1.1])
# + [markdown] slideshow={"slide_type": "slide"}
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Finding the Best Threshold
#
#
# * You can see that precision start to drop sharply around Recall of ~0.8
#
# * You could choose a threshold around that value
#
#
# * Remember that "there is no free lunch"
# * This is a rather well behaved exmaple
| morea/ML_intro/resources/.ipynb_checkpoints/Evaluating_classification-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8 (polygence)
# language: python
# name: polygence
# ---
# # CNN using Tensorflow Keras on MRI Image Data - failed attempt (memory out)
# ## Data Use Agreements
# The data used for this project were provided in part by OASIS and ADNI.
#
# OASIS-3: Principal Investigators: <NAME>, <NAME>, <NAME>; NIH P50 AG00561, P30 NS09857781, P01 AG026276, P01 AG003991, R01 AG043434, UL1 TR000448, R01 EB009352. AV-45 doses were provided by Avid Radiopharmaceuticals, a wholly owned subsidiary of Eli Lilly.
#
# Data collection for this project was done through the Alzheimer's Disease Neuroimaging Initiative (ADNI) (National Institutes of Health Grant U01 AG024904) and DOD ADNI (Department of Defense award number W81XWH-12-2-0012). ADNI is funded by the National Institute on Aging, the National Institute of Biomedical Imaging and Bioengineering, and through generous contributions from the following: AbbVie, Alzheimer’s Association; Alzheimer’s Drug Discovery Foundation; Araclon Biotech; BioClinica, Inc.; Biogen; Bristol-Myers Squibb Company; CereSpir, Inc.; Cogstate; Eisai Inc.; Elan Pharmaceuticals, Inc.; Eli Lilly and Company; EuroImmun; F. Hoffmann-La Roche Ltd and its affiliated company Genentech, Inc.; Fujirebio; GE Healthcare; IXICO Ltd.; <NAME> Immunotherapy Research & Development, LLC.; Johnson & Johnson Pharmaceutical Research & Development LLC.; Lumosity; Lundbeck; Merck & Co., Inc.; Meso Scale Diagnostics, LLC.; NeuroRx Research; Neurotrack Technologies; Novartis Pharmaceuticals Corporation; Pfizer Inc.; Piramal Imaging; Servier; Takeda Pharmaceutical Company; and Transition Therapeutics. The Canadian Institutes of Health Research is providing funds to support ADNI clinical sites in Canada. Private sector contributions are facilitated by the Foundation for the National Institutes of Health (www.fnih.org). The grantee organization is the Northern California Institute for Research and Education, and the study is coordinated by the Alzheimer’s Therapeutic Research Institute at the University of Southern California. ADNI data are disseminated by the Laboratory for Neuro Imaging at the University of Southern California.
# ## Setup
# ### General Imports
# +
import nibabel.freesurfer.mghformat as mgh
from tqdm.notebook import tqdm
import os, sys
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', None)
# -
# ### Set up and test Tensorflow
import tensorflow as tf
print(f"Tensor Flow Version: {tf.__version__}")
print(f"Keras Version: {tf.keras.__version__}")
print()
print(f"Python {sys.version}")
print(f"Pandas {pd.__version__}")
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
# +
tf.debugging.set_log_device_placement(True)
# Create some tensors
a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
c = tf.matmul(a, b)
print(c)
# -
# ## Load data
# ### CSV
rootdir = '/home/jack/Code/GitHub/Polygence/'
df = pd.read_csv(rootdir + 'Data/OASIS/csv_files/oasis_3.csv')
df.head()
df = df.dropna(axis=1, how='all') # Drop any empty columns
df = df.dropna(axis=0, how='any') # Drop any rows with empty values
df = df.rename(columns={'id':'FS ID', 'dx1':'Diagnosis'}) # Rename columns
df = df.drop_duplicates(subset='Subject', keep='first') # Keep only the first visit
df = df.reset_index(drop=True) # Reset the index
df.loc[df['cdr'] < 0.5, 'Diagnosis'] = 'control'
df.loc[~(df['cdr'] < 0.5), 'Diagnosis'] = 'dementia'
# df['Diagnosis'].replace(['control','dementia'], [-1,1], inplace=True)
df = df.drop(df.columns.difference(['Subject', 'MR ID', 'FS ID', 'Diagnosis']), axis=1) # Drop all columns unless needed
print(df.shape)
df.head()
sns.countplot(x='Diagnosis', data=df)
# ### MRI
# Since all the files are already transformed via the freesurfer, I don't think we'll need to do any major preprocessing like cropping, flipping, or rotating.
# ```
# main_directory/
# control/
# mr_id_001/
# brain_image.mgz
# brain_image_transformed.mgz
# talairach.xfm
# mr_id_002/
# brain_image.mgz
# brain_image_transformed.mgz
# talairach.xfm
# dementia/
# mr_id_003/
# brain_image.mgz
# brain_image_transformed.mgz
# talairach.xfm
# mr_id_004/
# brain_image.mgz
# brain_image_transformed.mgz
# talairach.xfm
# ```
# +
def read_mri_scan(path):
"""Read and load an MRI volume from file path"""
volume = mgh.load(path).get_fdata().astype("float32")
# In case the bash script missed a transform on a scan
if volume.shape != (256, 256, 256):
raise Exception(f"Image shape is not standard. You need to do resizing on {path}.")
return volume
def normalize(volume):
""" Normalize the MRI volume """
min = 0.0
max = 255.0
volume[volume < min] = min
volume[volume > max] = max
volume = (volume - min) / (max - min)
volume = volume.astype("float32")
return volume
def process_mri_scan(path):
""" Read in and process the MRI scan, if necessary """
# Read the scan
volume = read_mri_scan(path)
# print(volume.max(), volume.min(), volume.dtype)
volume = normalize(volume)
# print(volume.max(), volume.min(), volume.dtype)
return volume
# -
# #### Example
example_path = '/home/jack/Code/GitHub/Polygence/Data/OASIS/mri_data/control/OAS30001_MR_d3132/OAS30001_Freesurfer53_d3132_brain_transformed.mgz'
volume = process_mri_scan(example_path)
print(volume.max(), volume.min(), volume.dtype, volume.shape)
# +
data_dir = os.path.join(rootdir, 'Data/OASIS/mri_data/')
control_scan_paths = []
dementia_scan_paths = []
labels = os.listdir(data_dir)
for label in labels:
label_dir = os.path.join(data_dir, label)
ids = os.listdir(label_dir)
for id in tqdm(ids, desc=label):
mr_dir = os.path.join(label_dir, id)
img_file = [file for file in os.listdir(mr_dir) if "transformed" in file]
img_path = os.path.join(mr_dir, img_file[0])
if label == 'control':
control_scan_paths.append(img_path)
else:
dementia_scan_paths.append(img_path)
# -
print(len(control_scan_paths), len(dementia_scan_paths))
print(control_scan_paths[0])
print(dementia_scan_paths[0])
# ## Create the train and validation datasets
# Read in the control scans
control_scans = np.array([process_mri_scan(path) for path in tqdm(control_scan_paths)])
# Read in the dementia scans
dementia_scans = np.array([process_mri_scan(path) for path in tqdm(dementia_scan_paths)])
# Shuffle the data
np.random.shuffle(control_scans)
np.random.shuffle(dementia_scans)
# Create the label arrays
# assign 1 for dementia patients, and 0 for control
control_labels = np.full(shape=len(control_scans), fill_value=0)
dementia_labels = np.full(shape=len(dementia_scans), fill_value=1)
| CNN/tf_cnn_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
device_info = pd.read_csv('device_info.csv')
email_info = pd.read_csv('email_info.csv')
employee_info = pd.read_csv('employee_info.csv')
http_info = pd.read_csv('http_info.csv')
logon_info = pd.read_csv('logon_info.csv')
employee_info.head()
# Filter Emails Sent Within the Company
fil_dtaa = email_info['to'].apply(lambda t: t.split('@')[1] != 'dtaa.com')
df_fil_dtaa = email_info[fil_dtaa]
# Filter Group Emails
fil_group = df_fil_dtaa['to'].apply(lambda t: ';' not in t)
df_fil_group = df_fil_dtaa[fil_group]
# Filter Non-Defense Contract Company Emails
email_to_rm = ['comcast.net','aol.com','gmail.com','yahoo.com','cox.net','hotmail.com','verizon.net',
'juno.com','netzero.com','msn.com','charter.net','earthlink.net','sbcglobal.net','bellsouth.net',
'optonline.net','hp.com']
fil_common_email = df_fil_group['to'].apply(lambda t: t.split('@')[1] not in email_to_rm)
df_fil_common_email = df_fil_group[fil_common_email]
# Filter All Emails with No Attachments
fil_no_attach = df_fil_common_email['attachments'].apply(lambda t: t != 0)
df_pure = df_fil_common_email[fil_no_attach]
# Suspect Company Emails
print(df_pure['to'].apply(lambda t: t.split('@')[1]).unique())
# Create DataFrames With Only Raytheon, Boeing, Harris, Northropgrumman, and Lockheed Emails
df_ray = df_pure[df_pure['to'].apply(lambda t: t.split('@')[1] == 'raytheon.com')]
df_boeing = df_pure[df_pure['to'].apply(lambda t: t.split('@')[1] == 'boeing.com')]
df_harris = df_pure[df_pure['to'].apply(lambda t: t.split('@')[1] == 'harris.com')]
df_north = df_pure[df_pure['to'].apply(lambda t: t.split('@')[1] == 'northropgrumman.com')]
df_lock = df_pure[df_pure['to'].apply(lambda t: t.split('@')[1] == 'lockheed.com')]
df_ray_cnts = pd.DataFrame(columns=['email','counts_from'])
df_ray_cnts['email'] = list(df_ray.groupby('from').size().index)
df_ray_cnts['counts_from'] = list(df_ray.groupby('from').size())
df_ray_cnts.sort_values('counts_from',ascending=False).head(10)
x_ray = list(df_ray_cnts.email.unique())
y_ray = list(df_ray_cnts.counts_from)
#plot_ray = plt.figure(figsize=(20,5))
ax = plt.subplots(1,1,figsize=(20,5))
plt.plot(x_ray,y_ray)
plt.xticks(rotation=90,size=15)
plt.yticks(size=15)
plt.xticks(range(len(list(df_ray_cnts.email))), list(df_ray_cnts.email))
plt.title('Number of Emails Sent to Raytheon Email Address From DTAA Email Addresses',size=20)
plt.xlabel('DTAA Email Addresses',size=15)
plt.ylabel('Number Emails Sent',size=15)
plt.show()
df_boeing_cnts = pd.DataFrame(columns=['email','counts_from'])
df_boeing_cnts['email'] = list(df_boeing.groupby('from').size().index)
df_boeing_cnts['counts_from'] = list(df_boeing.groupby('from').size())
df_boeing_cnts.sort_values('counts_from',ascending=False).head(10)
df_harris_cnts = pd.DataFrame(columns=['email','counts_from'])
df_harris_cnts['email'] = list(df_harris.groupby('from').size().index)
df_harris_cnts['counts_from'] = list(df_harris.groupby('from').size())
df_harris_cnts.sort_values('counts_from',ascending=False).head(10)
df_north_cnts = pd.DataFrame(columns=['email','counts_from'])
df_north_cnts['email'] = list(df_north.groupby('from').size().index)
df_north_cnts['counts_from'] = list(df_north.groupby('from').size())
df_north_cnts.sort_values('counts_from',ascending=False).head(10)
df_lock
| Primary_Notebook_Part3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import random
import ipywidgets as widgets
from sidepanel import SidePanel
import regulus
from regulus.core import UNIT_RANGE
from ipyregulus import BaseTreeView, TreeView
# -
gauss = regulus.load('gauss4')
# ### Associate a random color id to a point
def cmap_id(context, id):
return random.uniform(0,1)
gauss.add_attr(cmap_id)
# ### Assign color based on max/min in the partition
def max_color(tree, node):
id = node.data.minmax_idx[1]
return tree['cmap_id'][id]
gauss.tree.add_attr(max_color, range=UNIT_RANGE)
def min_color(tree, node):
id = node.data.minmax_idx[0]
return tree['cmap_id'][id]
gauss.tree.add_attr(min_color, range=UNIT_RANGE)
# ### View the tree
v = TreeView(gauss)
sp = SidePanel(v)
v.add_attr('max_color')
v.add_attr('min_color')
# ### restricted version
def cmap_id(context, id):
if id < 0:
return random.uniform(0,0.5)
return random.uniform(0.5,1)
gauss.add_attr(cmap_id)
def min_color(tree, node):
id = node.data.minmax_idx[0]
return tree['cmap_id'][id]
gauss.tree.add_attr(min_color, range=UNIT_RANGE)
v = TreeView(gauss)
v.add_attr('max_color')
v.add_attr('min_color')
v
| examples/8-minmax color.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# packages and libraries
import matplotlib.pyplot as plt, tkinter as tk, tkinter.filedialog
from os.path import normpath,split
from os import getenv
from apsmodule import APS
onedrive=getenv('OneDrive')
APSdir=normpath(onedrive+'\\Data\\APS')
# clean/create filenames
filenames=[]
#%% choose files
root=tk.Tk()
root.withdraw()
filenames+=tkinter.filedialog.askopenfilenames(parent=root,initialdir=APSdir, title='Please select APS files',filetypes=[('DAT','.DAT')])
#%% load files into data
plt.close('all')
data=[]
data+=APS.import_from_files(filenames,sqrt=False,trunc=-8)
#%% analyze data
plt.close('all')
for i in data:
i.analyze(0)
#%% overlay all the data
fig=plt.figure('APS overlay')
for i in data: i.plot()
# +
#%% Saving APS and APS fit and HOMO with error
location=split(filenames[0])[0]
APS.save_aps_csv(data,location)
APS.save_aps_fit_csv(data,location)
APS.save_homo_error_csv(data,location)
# +
#%% smoothing DOS
_=[i.DOSsmooth(7,3,plot=True) for i in data]
# +
#%% overlay all the DOS
plt.figure('DOS')
for i in data: i.DOSplot()
# -
#%% Saving DOS into csv
location=split(filenames[0])[0]
APS.save_DOS_csv(data,location)
| .ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pandas as pd
test = pd.read_csv('test.csv')
train = pd.read_csv('train.csv')
test.head()
train.head()
x_train = train[['height','weight','age']]
y_train = np.ravel(train[['male']])
def run_model(model, regressor = 0):
clf = model
clf = clf.fit(x_train,y_train)
prediction = clf.predict(test)
return prediction
clf_classifier = tree.DecisionTreeClassifier()
clf_regressor = tree.DecisionTreeRegressor()
clf_rnd_forest = RandomForestClassifier(n_estimators=10)
prediction_classifier = run_model(clf_classifier)
prediction_regressor = run_model(clf_regressor)
prediction_rnd_forest = run_model(clf_rnd_forest)
results = pd.read_csv('results.csv')
results.head()
results['classifier']=prediction_classifier
results['regressor'] = prediction_regressor
results['rnd_forest'] = prediction_rnd_forest
results.head()
accuracy_classifier = results['male'].corr(results['classifier'])
accuracy_regressor = results['male'].corr(results['regressor'])
accuracy_rnd_forest = results['male'].corr(results['rnd_forest'])
print(accuracy_classifier)
print(accuracy_regressor)
print(accuracy_rnd_forest)
| .ipynb_checkpoints/gender_predictor-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Siren Exploration
#
# This is a colab to explore properties of the Siren MLP, proposed in our work [Implicit Neural Activations with Periodic Activation Functions](https://vsitzmann.github.io/siren).
#
#
# We will first implement a streamlined version of Siren for fast experimentation. This lacks the code to easily do baseline comparisons - please refer to the main code for that - but will greatly simplify the code!
#
# **Make sure that you have enabled the GPU under Edit -> Notebook Settings!**
#
# We will then reproduce the following results from the paper:
# * [Fitting an image](#section_1)
# * [Fitting an audio signal](#section_2)
# * [Solving Poisson's equation](#section_3)
# * [Initialization scheme & distribution of activations](#activations)
# * [Distribution of activations is shift-invariant](#shift_invariance)
#
# We will also explore Siren's [behavior outside of the training range](#out_of_range).
#
# Let's go! First, some imports, and a function to quickly generate coordinate grids.
# +
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import os
from PIL import Image
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
import numpy as np
import skimage
import matplotlib.pyplot as plt
import time
def get_mgrid(sidelen, dim=2):
'''Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1.
sidelen: int
dim: int'''
tensors = tuple(dim * [torch.linspace(-1, 1, steps=sidelen)])
mgrid = torch.stack(torch.meshgrid(*tensors), dim=-1)
mgrid = mgrid.reshape(-1, dim)
return mgrid
# -
# Now, we code up the sine layer, which will be the basic building block of SIREN. This is a much more concise implementation than the one in the main code, as here, we aren't concerned with the baseline comparisons.
# +
class SineLayer(nn.Module):
# See paper sec. 3.2, final paragraph, and supplement Sec. 1.5 for discussion of omega_0.
# If is_first=True, omega_0 is a frequency factor which simply multiplies the activations before the
# nonlinearity. Different signals may require different omega_0 in the first layer - this is a
# hyperparameter.
# If is_first=False, then the weights will be divided by omega_0 so as to keep the magnitude of
# activations constant, but boost gradients to the weight matrix (see supplement Sec. 1.5)
def __init__(self, in_features, out_features, bias=True,
is_first=False, omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features,
1 / self.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) / self.omega_0,
np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, input):
return torch.sin(self.omega_0 * self.linear(input))
def forward_with_intermediate(self, input):
# For visualization of activation distributions
intermediate = self.omega_0 * self.linear(input)
return torch.sin(intermediate), intermediate
class Siren(nn.Module):
def __init__(self, in_features, hidden_features, hidden_layers, out_features, outermost_linear=False,
first_omega_0=30, hidden_omega_0=30.):
super().__init__()
self.net = []
self.net.append(SineLayer(in_features, hidden_features,
is_first=True, omega_0=first_omega_0))
for i in range(hidden_layers):
self.net.append(SineLayer(hidden_features, hidden_features,
is_first=False, omega_0=hidden_omega_0))
if outermost_linear:
final_linear = nn.Linear(hidden_features, out_features)
with torch.no_grad():
final_linear.weight.uniform_(-np.sqrt(6 / hidden_features) / hidden_omega_0,
np.sqrt(6 / hidden_features) / hidden_omega_0)
self.net.append(final_linear)
else:
self.net.append(SineLayer(hidden_features, out_features,
is_first=False, omega_0=hidden_omega_0))
self.net = nn.Sequential(*self.net)
def forward(self, coords):
coords = coords.clone().detach().requires_grad_(True) # allows to take derivative w.r.t. input
output = self.net(coords)
return output, coords
def forward_with_activations(self, coords, retain_grad=False):
'''Returns not only model output, but also intermediate activations.
Only used for visualizing activations later!'''
activations = OrderedDict()
activation_count = 0
x = coords.clone().detach().requires_grad_(True)
activations['input'] = x
for i, layer in enumerate(self.net):
if isinstance(layer, SineLayer):
x, intermed = layer.forward_with_intermediate(x)
if retain_grad:
x.retain_grad()
intermed.retain_grad()
activations['_'.join((str(layer.__class__), "%d" % activation_count))] = intermed
activation_count += 1
else:
x = layer(x)
if retain_grad:
x.retain_grad()
activations['_'.join((str(layer.__class__), "%d" % activation_count))] = x
activation_count += 1
return activations
# -
# And finally, differential operators that allow us to leverage autograd to compute gradients, the laplacian, etc.
# +
def laplace(y, x):
grad = gradient(y, x)
return divergence(grad, x)
def divergence(y, x):
div = 0.
for i in range(y.shape[-1]):
div += torch.autograd.grad(y[..., i], x, torch.ones_like(y[..., i]), create_graph=True)[0][..., i:i+1]
return div
def gradient(y, x, grad_outputs=None):
if grad_outputs is None:
grad_outputs = torch.ones_like(y)
grad = torch.autograd.grad(y, [x], grad_outputs=grad_outputs, create_graph=True)[0]
return grad
# -
# # Experiments
#
# For the image fitting and poisson experiments, we'll use the classic cameraman image.
def get_cameraman_tensor(sidelength):
img = Image.fromarray(skimage.data.camera())
transform = Compose([
Resize(sidelength),
ToTensor(),
Normalize(torch.Tensor([0.5]), torch.Tensor([0.5]))
])
img = transform(img)
return img
# <a id='section_1'></a>
# ## Fitting an image
#
# First, let's simply fit that image!
#
# We seek to parameterize a greyscale image $f(x)$ with pixel coordinates $x$ with a SIREN $\Phi(x)$.
#
# That is we seek the function $\Phi$ such that:
# $\mathcal{L}=\int_{\Omega} \lVert \Phi(\mathbf{x}) - f(\mathbf{x}) \rVert\mathrm{d}\mathbf{x}$
# is minimized, in which $\Omega$ is the domain of the image.
#
# We write a little datast that does nothing except calculating per-pixel coordinates:
class ImageFitting(Dataset):
def __init__(self, sidelength):
super().__init__()
img = get_cameraman_tensor(sidelength)
self.pixels = img.permute(1, 2, 0).view(-1, 1)
self.coords = get_mgrid(sidelength, 2)
def __len__(self):
return 1
def __getitem__(self, idx):
if idx > 0: raise IndexError
return self.coords, self.pixels
# + [markdown] pycharm={"name": "#%% md\n"}
# Let's instantiate the dataset and our Siren. As pixel coordinates are 2D, the siren has 2 input features, and since the image is grayscale, it has one output channel.
# +
cameraman = ImageFitting(256)
dataloader = DataLoader(cameraman, batch_size=1, pin_memory=True, num_workers=0)
img_siren = Siren(in_features=2, out_features=1, hidden_features=256,
hidden_layers=3, outermost_linear=True)
img_siren.cuda()
# -
# We now fit Siren in a simple training loop. Within only hundreds of iterations, the image and its gradients are approximated well.
# +
total_steps = 500 # Since the whole image is our dataset, this just means 500 gradient descent steps.
steps_til_summary = 10
optim = torch.optim.Adam(lr=1e-4, params=img_siren.parameters())
model_input, ground_truth = next(iter(dataloader))
model_input, ground_truth = model_input.cuda(), ground_truth.cuda()
for step in range(total_steps):
model_output, coords = img_siren(model_input)
loss = ((model_output - ground_truth)**2).mean()
if not step % steps_til_summary:
print("Step %d, Total loss %0.6f" % (step, loss))
img_grad = gradient(model_output, coords)
img_laplacian = laplace(model_output, coords)
fig, axes = plt.subplots(1,3, figsize=(18,6))
axes[0].imshow(model_output.cpu().view(256,256).detach().numpy())
axes[1].imshow(img_grad.norm(dim=-1).cpu().view(256,256).detach().numpy())
axes[2].imshow(img_laplacian.cpu().view(256,256).detach().numpy())
plt.show()
optim.zero_grad()
loss.backward()
optim.step()
# -
# <a id='out_of_range'></a>
# ## Case study: Siren periodicity & out-of-range behavior
#
# It is known that the sum of two periodic signals is itself periodic with a period that is equal to the least common multiple of the periods of the two summands, if and only if the two periods are rational multiples of each other. If the ratio of the two periods is irrational, then their sum will *not* be periodic itself.
#
# Due to the floating-point representation in neural network libraries, this case cannot occur in practice, and all functions parameterized by Siren indeed have to be periodic.
#
# Yet, the period of the resulting function may in practice be several orders of magnitudes larger than the period of each Siren neuron!
#
# Let's test this with two sines.
with torch.no_grad():
coords = get_mgrid(2**10, 1) * 5 * np.pi
sin_1 = torch.sin(coords)
sin_2 = torch.sin(coords * 2)
sum = sin_1 + sin_2
fig, ax = plt.subplots(figsize=(16,2))
ax.plot(coords, sum)
ax.plot(coords, sin_1)
ax.plot(coords, sin_2)
plt.title("Rational multiple")
plt.show()
sin_1 = torch.sin(coords)
sin_2 = torch.sin(coords * np.pi)
sum = sin_1 + sin_2
fig, ax = plt.subplots(figsize=(16,2))
ax.plot(coords, sum)
ax.plot(coords, sin_1)
ax.plot(coords, sin_2)
plt.title("Pseudo-irrational multiple")
plt.show()
# Though the second plot looks periodic, closer inspection shows that the period of the blue line is indeed larger than the range we're sampling here.
#
# Let's take a look at what the Siren we just trained looks like outside its training domain!
# + pycharm={"name": "#%%\n"}
with torch.no_grad():
out_of_range_coords = get_mgrid(1024, 2) * 50
model_out, _ = img_siren(out_of_range_coords.cuda())
fig, ax = plt.subplots(figsize=(16,16))
ax.imshow(model_out.cpu().view(1024,1024).numpy())
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# Though there is some self-similarity, the signal is not repeated on this range of (-50, 50).
# -
# ## Fitting an audio signal
# <a id='section_2'></a>
#
# Here, we'll use Siren to parameterize an audio signal - i.e., we seek to parameterize an audio waverform $f(t)$ at time points $t$ by a SIREN $\Phi$.
#
# That is we seek the function $\Phi$ such that: $\mathcal{L}\int_\Omega \lVert \Phi(t) - f(t) \rVert \mathrm{d}t$ is minimized, in which $\Omega$ is the domain of the waveform.
#
# For the audio, we'll use the bach sonata:
# +
import scipy.io.wavfile as wavfile
import io
from IPython.display import Audio
if not os.path.exists('gt_bach.wav'):
# !wget https://vsitzmann.github.io/siren/img/audio/gt_bach.wav
# + [markdown] pycharm={"name": "#%% md\n"}
# Let's build a little dataset that computes coordinates for audio files:
# -
class AudioFile(torch.utils.data.Dataset):
def __init__(self, filename):
self.rate, self.data = wavfile.read(filename)
self.data = self.data.astype(np.float32)
self.timepoints = get_mgrid(len(self.data), 1)
def get_num_samples(self):
return self.timepoints.shape[0]
def __len__(self):
return 1
def __getitem__(self, idx):
amplitude = self.data
scale = np.max(np.abs(amplitude))
amplitude = (amplitude / scale)
amplitude = torch.Tensor(amplitude).view(-1, 1)
return self.timepoints, amplitude
# + [markdown] pycharm={"name": "#%% md\n"}
# Let's instantiate the Siren. As this audio signal has a much higer spatial frequency on the range of -1 to 1, we increase the $\omega_0$ in the first layer of siren.
# +
bach_audio = AudioFile('gt_bach.wav')
dataloader = DataLoader(bach_audio, shuffle=True, batch_size=1, pin_memory=True, num_workers=0)
# Note that we increase the frequency of the first layer to match the higher frequencies of the
# audio signal. Equivalently, we could also increase the range of the input coordinates.
audio_siren = Siren(in_features=1, out_features=1, hidden_features=256,
hidden_layers=3, first_omega_0=3000, outermost_linear=True)
audio_siren.cuda()
# -
# Let's have a quick listen to ground truth:
# +
rate, _ = wavfile.read('gt_bach.wav')
model_input, ground_truth = next(iter(dataloader))
Audio(ground_truth.squeeze().numpy(),rate=rate)
# -
# We now fit the Siren to this signal.
# +
total_steps = 1000
steps_til_summary = 100
optim = torch.optim.Adam(lr=1e-4, params=audio_siren.parameters())
model_input, ground_truth = next(iter(dataloader))
model_input, ground_truth = model_input.cuda(), ground_truth.cuda()
for step in range(total_steps):
model_output, coords = audio_siren(model_input)
loss = F.mse_loss(model_output, ground_truth)
if not step % steps_til_summary:
print("Step %d, Total loss %0.6f" % (step, loss))
fig, axes = plt.subplots(1,2)
axes[0].plot(coords.squeeze().detach().cpu().numpy(),model_output.squeeze().detach().cpu().numpy())
axes[1].plot(coords.squeeze().detach().cpu().numpy(),ground_truth.squeeze().detach().cpu().numpy())
plt.show()
optim.zero_grad()
loss.backward()
optim.step()
# + pycharm={"name": "#%%\n"}
final_model_output, coords = audio_siren(model_input)
Audio(final_model_output.cpu().detach().squeeze().numpy(),rate=rate)
# + [markdown] pycharm={"name": "#%% md\n"}
# As we can see, within few iterations, Siren has approximated the audio signal very well!
# -
# <a id='section_3'></a>
# ## Solving Poisson's equation
#
# Now, let's make it a bit harder. Let's say we want to reconstruct an image but we only have access to its gradients!
#
# That is, we now seek the function $\Phi$ such that:
# $\mathcal{L}=\int_{\Omega} \lVert \nabla\Phi(\mathbf{x}) - \nabla f(\mathbf{x}) \rVert\mathrm{d}\mathbf{x}$
# is minimized, in which $\Omega$ is the domain of the image.
# + pycharm={"name": "#%%\n"}
import scipy.ndimage
class PoissonEqn(Dataset):
def __init__(self, sidelength):
super().__init__()
img = get_cameraman_tensor(sidelength)
# Compute gradient and laplacian
grads_x = scipy.ndimage.sobel(img.numpy(), axis=1).squeeze(0)[..., None]
grads_y = scipy.ndimage.sobel(img.numpy(), axis=2).squeeze(0)[..., None]
grads_x, grads_y = torch.from_numpy(grads_x), torch.from_numpy(grads_y)
self.grads = torch.stack((grads_x, grads_y), dim=-1).view(-1, 2)
self.laplace = scipy.ndimage.laplace(img.numpy()).squeeze(0)[..., None]
self.laplace = torch.from_numpy(self.laplace)
self.pixels = img.permute(1, 2, 0).view(-1, 1)
self.coords = get_mgrid(sidelength, 2)
def __len__(self):
return 1
def __getitem__(self, idx):
return self.coords, {'pixels':self.pixels, 'grads':self.grads, 'laplace':self.laplace}
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Instantiate SIREN model
# + pycharm={"name": "#%%\n"}
cameraman_poisson = PoissonEqn(128)
dataloader = DataLoader(cameraman_poisson, batch_size=1, pin_memory=True, num_workers=0)
poisson_siren = Siren(in_features=2, out_features=1, hidden_features=256,
hidden_layers=3, outermost_linear=True)
poisson_siren.cuda()
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Define the loss function
# + pycharm={"name": "#%%\n"}
def gradients_mse(model_output, coords, gt_gradients):
# compute gradients on the model
gradients = gradient(model_output, coords)
# compare them with the ground-truth
gradients_loss = torch.mean((gradients - gt_gradients).pow(2).sum(-1))
return gradients_loss
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Train the model
# + pycharm={"is_executing": true, "name": "#%%\n"}
total_steps = 1000
steps_til_summary = 10
optim = torch.optim.Adam(lr=1e-4, params=poisson_siren.parameters())
model_input, gt = next(iter(dataloader))
gt = {key: value.cuda() for key, value in gt.items()}
model_input = model_input.cuda()
for step in range(total_steps):
start_time = time.time()
model_output, coords = poisson_siren(model_input)
train_loss = gradients_mse(model_output, coords, gt['grads'])
if not step % steps_til_summary:
print("Step %d, Total loss %0.6f, iteration time %0.6f" % (step, train_loss, time.time() - start_time))
img_grad = gradient(model_output, coords)
img_laplacian = laplace(model_output, coords)
fig, axes = plt.subplots(1, 3, figsize=(18, 6))
axes[0].imshow(model_output.cpu().view(128,128).detach().numpy())
axes[1].imshow(img_grad.cpu().norm(dim=-1).view(128,128).detach().numpy())
axes[2].imshow(img_laplacian.cpu().view(128,128).detach().numpy())
plt.show()
optim.zero_grad()
train_loss.backward()
optim.step()
# + [markdown] pycharm={"name": "#%% md\n"}
# <a id='activations'></a>
# ## Initialization scheme & distribution of activations
#
# We now reproduce the empirical result on the distribution of activations, and will thereafter show empirically that the distribution of activations is shift-invariant as well!
# + pycharm={"name": "#%%\n"}
from collections import OrderedDict
import matplotlib
import numpy.fft as fft
import scipy.stats as stats
def eformat(f, prec, exp_digits):
s = "%.*e"%(prec, f)
mantissa, exp = s.split('e')
# add 1 to digits as 1 is taken by sign +/-
return "%se%+0*d"%(mantissa, exp_digits+1, int(exp))
def format_x_ticks(x, pos):
"""Format odd tick positions
"""
return eformat(x, 0, 1)
def format_y_ticks(x, pos):
"""Format odd tick positions
"""
return eformat(x, 0, 1)
def get_spectrum(activations):
n = activations.shape[0]
spectrum = fft.fft(activations.numpy().astype(np.double).sum(axis=-1), axis=0)[:n//2]
spectrum = np.abs(spectrum)
max_freq = 100
freq = fft.fftfreq(n, 2./n)[:n//2]
return freq[:max_freq], spectrum[:max_freq]
def plot_all_activations_and_grads(activations):
num_cols = 4
num_rows = len(activations)
fig_width = 5.5
fig_height = num_rows/num_cols*fig_width
fig_height = 9
fontsize = 5
fig, axs = plt.subplots(num_rows, num_cols, gridspec_kw={'hspace': 0.3, 'wspace': 0.2},
figsize=(fig_width, fig_height), dpi=300)
axs[0][0].set_title("Activation Distribution", fontsize=7, fontfamily='serif', pad=5.)
axs[0][1].set_title("Activation Spectrum", fontsize=7, fontfamily='serif', pad=5.)
axs[0][2].set_title("Gradient Distribution", fontsize=7, fontfamily='serif', pad=5.)
axs[0][3].set_title("Gradient Spectrum", fontsize=7, fontfamily='serif', pad=5.)
x_formatter = matplotlib.ticker.FuncFormatter(format_x_ticks)
y_formatter = matplotlib.ticker.FuncFormatter(format_y_ticks)
spec_rows = []
for idx, (key, value) in enumerate(activations.items()):
grad_value = value.grad.cpu().detach().squeeze(0)
flat_grad = grad_value.view(-1)
axs[idx][2].hist(flat_grad, bins=256, density=True)
value = value.cpu().detach().squeeze(0) # (1, num_points, 256)
n = value.shape[0]
flat_value = value.view(-1)
axs[idx][0].hist(flat_value, bins=256, density=True)
if idx>1:
if not (idx)%2:
x = np.linspace(-1, 1., 500)
axs[idx][0].plot(x, stats.arcsine.pdf(x, -1, 2),
linestyle=':', markersize=0.4, zorder=2)
else:
mu = 0
variance = 1
sigma = np.sqrt(variance)
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 500)
axs[idx][0].plot(x, stats.norm.pdf(x, mu, sigma),
linestyle=':', markersize=0.4, zorder=2)
activ_freq, activ_spec = get_spectrum(value)
axs[idx][1].plot(activ_freq, activ_spec)
grad_freq, grad_spec = get_spectrum(grad_value)
axs[idx][-1].plot(grad_freq, grad_spec)
for ax in axs[idx]:
ax.tick_params(axis='both', which='major', direction='in',
labelsize=fontsize, pad=1., zorder=10)
ax.tick_params(axis='x', labelrotation=0, pad=1.5, zorder=10)
ax.xaxis.set_major_formatter(x_formatter)
ax.yaxis.set_major_formatter(y_formatter)
# + pycharm={"name": "#%%\n"}
model = Siren(in_features=1, hidden_features=2048,
hidden_layers=10, out_features=1, outermost_linear=True)
input_signal = torch.linspace(-1, 1, 65536//4).view(1, 65536//4, 1)
activations = model.forward_with_activations(input_signal, retain_grad=True)
output = activations[next(reversed(activations))]
# Compute gradients. Because we have retain_grad=True on
# activations, each activation stores its own gradient!
output.mean().backward()
plot_all_activations_and_grads(activations)
# -
# Note how the activations of Siren always alternate between a standard normal distribution with standard deviation one, and an arcsine distribution. If you have a beefy computer, you can put this to the extreme and increase the number of layers - this property holds even for more than 50 layers!
# <a id='shift_invariance'></a>
# ## Distribution of activations is shift-invariant
#
# One of the key properties of the periodic sine nonlinearity is that it affords a degree of shift-invariance. Consider the first layer of a Siren: You can convince yourself that this layer can easily learn to map two different coordinates to *the same set of activations*. This means that whatever layers come afterwards will apply the same function to these two sets of coordinates.
#
# Moreoever, the distribution of activations similarly are shift-invariant. Let's shift our input signal by 1000 and re-compute the activations:
# +
input_signal = torch.linspace(-1, 1, 65536//4).view(1, 65536//4, 1) + 1000
activations = model.forward_with_activations(input_signal, retain_grad=True)
output = activations[next(reversed(activations))]
# Compute gradients. Because we have retain_grad=True on
# activations, each activation stores its own gradient!
output.mean().backward()
plot_all_activations_and_grads(activations)
# -
# As we can see, the distributions of activations didn't change at all - they are perfectly invariant to the shift.
| explore_siren.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .ps1
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .NET (PowerShell)
# language: PowerShell
# name: .net-powershell
# ---
# # T1205 - Traffic Signaling
# Adversaries may use traffic signaling to hide open ports or other malicious functionality used for persistence or command and control. Traffic signaling involves the use of a magic value or sequence that must be sent to a system to trigger a special response, such as opening a closed port or executing a malicious task. This may take the form of sending a series of packets with certain characteristics before a port will be opened that the adversary can use for command and control. Usually this series of packets consists of attempted connections to a predefined sequence of closed ports (i.e. [Port Knocking](https://attack.mitre.org/techniques/T1205/001)), but can involve unusual flags, specific strings, or other unique characteristics. After the sequence is completed, opening a port may be accomplished by the host-based firewall, but could also be implemented by custom software.
#
# Adversaries may also communicate with an already open port, but the service listening on that port will only respond to commands or trigger other malicious functionality if passed the appropriate magic value(s).
#
# The observation of the signal packets to trigger the communication can be conducted through different methods. One means, originally implemented by Cd00r (Citation: Hartrell cd00r 2002), is to use the libpcap libraries to sniff for the packets in question. Another method leverages raw sockets, which enables the malware to use ports that are already open for use by other programs.
# ## Atomic Tests:
# Currently, no tests are available for this technique.
# ## Detection
# Record network packets sent to and from the system, looking for extraneous packets that do not belong to established flows.
# ## Shield Active Defense
# ### Network Monitoring
# Monitor network traffic in order to detect adversary activity.
#
# Network monitoring involves capturing network activity data, including capturing of server, firewall, and other relevant logs. A defender can then review them or send them to a centralized collection location for further analysis.
# #### Opportunity
# There is an opportunity to monitor network traffic for different protocols, anomalous traffic patterns, transfer of data, etc. to determine the presence of an adversary.
# #### Use Case
# The defender can implement network monitoring for and alert on anomalous traffic patterns, large or unexpected data transfers, and other activity that may reveal the presence of an adversary.
# #### Procedures
# Capture network logs for internet-facing devices and send those logs to a central collection location.
# Capture all network device (router, switches, proxy, etc.) logs on a decoy network and send those logs to a central collection location.
| playbook/tactics/persistence/T1205.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Customizing the queries
#
# As some of the examples in the basic **Pyrosm** tutorial shows, it is possible to customize the OSM parsing using a specific `custom_filter` parameter. This parameter is currently available for all methods except `get_network()`.
# The `custom_filter` can be highly useful if you want to parse only certain type of OpenStreetMap elements from the PBF, such as "residential" buildings or specific shops such as "book" -shops.
#
# In addition, Pyrosm allows the user to customize which attributes are parsed from the OSM elements into GeoDataFrame columns when parsing the data using the `extra_tags` -parameter. There is a specific set of default attributes that are always parsed from the OSM elements, but as OpenStreetMap is highly flexible in terms of what information can be associated with the data, this parameter makes it easy to parse some of the more "exotic" tags from the OSM.
#
# **Contents:**
#
# - [How to modify the parsing process with customized filters?](#Constructing-a-custom-filter)
# - [Advanced filtering](#Advanced-filtering)
#
# ## Constructing a custom filter
#
# Before diving into documentation about how to construct a custom filter, it is good to understand a bit how OpenStreetMap data is constructed. OpenStreetMap represents:
#
# >_"physical features on the ground (e.g., roads or buildings) using tags attached to its basic data structures (its nodes, ways, and relations). Each tag describes a geographic attribute of the feature being shown by that specific node, way or relation"_ ([OSM Wiki, 2020](https://wiki.openstreetmap.org/wiki/Map_Features)).
#
# Pyrosm uses these tags to filter OSM data elements according specific predefined criteria which makes it possible to parse e.g. buildings or roads from the data. Passing your own `custom_filter` can be used to modify this process.
#
# There are certain rules that comes to constructing the `custom_filter`. The filter should always be a **Python dictionary** where the `key` should be a string and the `value` should be a list of OSM tag-values matching the criteria defined by the user. The `key` should correspond to the key in OpenStreetMap tags (e.g. "building") and the value-list should correspond the OSM values that are associated with the keys. You can see a long list of possible OSM keys and associated values from [OSM Map Features](https://wiki.openstreetmap.org/wiki/Map_Features) wiki page.
#
# As an example, a filter can look something like the one below which would parse all residential and retail buildings from the data:
#
# - `{"building": ["residential", "retail"]}`
#
# This `custom_filter` can be used with `get_buildings()` or `get_osm_by_custom_criteria()` -function. With any other function, this filter does not have any effect on the results, as `"building"` tag is only associated with physical features representing buildings. Hence, if you would use this filter e.g. when parsing roads with `get_network()`, it wouldn't do anything because none of the roads contain information about buildings (*shouldn't at least*).
#
# Let's test:
# +
from pyrosm import OSM, get_data
# Get test data
fp = get_data("test_pbf")
# Initialize the reader
osm = OSM(fp)
# Read buildings with custom filter
my_filter = {"building": ["residential", "retail"]}
buildings = osm.get_buildings(custom_filter=my_filter)
# Plot
title = "Filtered buildings: " + ", ".join(buildings["building"].unique())
ax = buildings.plot(column="building", cmap="RdBu", legend=True)
ax.set_title(title);
# -
# As we can see, as a result the data now only includes buildings that have `residential` or `retail` as a value for the key "building".
# ### Different kind of filters
#
# In some cases, such as when parsing Points of Interest (POI) from the PBF, it might be useful to e.g. parse all OSM features that are [shops](https://wiki.openstreetmap.org/wiki/Key:shop). If you want to parse all kind of shops (including all), it is possible to add `True` as a value in the `custom_filter`, such as in the case `A` below.
#
# Example filters:
#
# - A: `custom_filter={"shop": True}`
# - B: `custom_filter={"shop": True, "tourism": True, "amenity": True, "leisure": True}`
# - C: `custom_filter={"shop": ["alcohol"], "tourism": True, "amenity": ["restaurant", "bar"], "leisure": ["dance"]}`
#
# All of the filters above produce slightly different results. The filter `A` would return all shops, `B` would return a broad selection of POIs including all data that relates to shops, tourism, amenities or leisure.
# Filter `C` is very specific filter that might be used by someone in a party mood and being interested in knowing the shops selling alcohol, restaurants and bars, everything related to tourism and leisure activities related to dancing.
#
# Let's test:
#
# #### Filter A
# +
from pyrosm import OSM, get_data
# Get test data
fp = get_data("helsinki_pbf")
# Initialize the reader
osm = OSM(fp)
# Read POIs with custom filter A
my_filter = {"shop": True}
pois = osm.get_pois(custom_filter=my_filter)
# Plot
ax = pois.plot(column="shop", legend=True, markersize=1, figsize=(14,6), legend_kwds=dict(loc='upper left', ncol=4, bbox_to_anchor=(1, 1)))
# -
# #### Filter B
# +
from pyrosm import OSM, get_data
# Get test data
fp = get_data("helsinki_pbf")
# Initialize the reader
osm = OSM(fp)
# Read POIs with custom filter B
my_filter={"shop": True, "tourism": True, "amenity": True, "leisure": True}
pois = osm.get_pois(custom_filter=my_filter)
# Merge poi type information into a single column
pois["shop"] = pois["shop"].fillna(' ')
pois["amenity"] = pois["amenity"].fillna(' ')
pois["leisure"] = pois["leisure"].fillna(' ')
pois["tourism"] = pois["tourism"].fillna(' ')
pois["poi_type"] = pois["amenity"] + pois["shop"] + pois["leisure"] + pois["tourism"]
# Plot
ax = pois.plot(column="poi_type", legend=True, markersize=1, figsize=(14,8), legend_kwds=dict(loc='upper left', ncol=6, bbox_to_anchor=(1, 1)))
# -
# #### Filter C
# +
from pyrosm import OSM, get_data
# Get test data
fp = get_data("helsinki_pbf")
# Initialize the reader
osm = OSM(fp)
# Read POIs with custom filter C
my_filter={"shop": ["alcohol"], "tourism": True, "amenity": ["restaurant", "bar"], "leisure": ["dance"]}
pois = osm.get_pois(custom_filter=my_filter)
# Merge poi type information into a single column
pois["shop"] = pois["shop"].fillna(' ')
pois["amenity"] = pois["amenity"].fillna(' ')
pois["leisure"] = pois["leisure"].fillna(' ')
pois["tourism"] = pois["tourism"].fillna(' ')
pois["poi_type"] = pois["amenity"] + pois["shop"] + pois["leisure"] + pois["tourism"]
# Plot
ax = pois.plot(column="poi_type", legend=True, markersize=4, figsize=(14,8), legend_kwds=dict(loc='upper left', ncol=2, bbox_to_anchor=(1, 1)))
# -
# As we can see from these examples. Using the `custom_filter` is an efficient way to customize what data is extracted from the OpenStreetMap data.
# ## Advanced filtering
# If the above methods do not meet your needs, pyrosm provides a method `get_data_by_custom_criteria()` to fully customize what kind of data will be parsed from the OSM PBF, and how the filtering is conducted. The method provides possibility to specify what kind of OSM elements are parsed (nodes, ways, relations, or any combination of these) and it also provides possibility to determine whether the specified filter should be used to `"keep`" the data or `"exclude"` the data from OSM.
#
# Let's start by looking at the help:
# +
from pyrosm import OSM, get_data
fp = get_data("helsinki_pbf")
# Initialize the reader
osm = OSM(fp)
help(osm.get_data_by_custom_criteria)
# -
# As we can see, the function contains more parameters than any of the other functions.
#
# The first two parameters `custom_filter` and `osm_keys_to_keep` can be used to filter the data on a OSM tag level.
# Pyrosm implements a data filtering system that works on **two levels**.
#
# 1. `osm_keys_to_keep` -parameter can be used to specify which kind of OSM elements should be considered as "valid" records for further filtering (i.e. a first level of filtering). For instance, by specifying `osm_keys_to_keep="highway"` tells the filtering algorithm to only consider OSM elements representing roads for further filtering. You can also pass multiple keys to this parameter inside a list, such as `osm_keys_to_keep=["amenity", "shop"]`, which would pass all OSM elements containing "amenity" and "shop" tag-keys for further consideration in the second level of filtering.
# 2. `custom_filter` -parameter specifies the second level of filtering that can be used to specify more specifically what kind of OSM elements are accepted for the final GeoDataFrame, such as `{"amenity": ["restaurant", "bar"]}`. See more details [above](#Constructing-a-custom-filter).
#
#
# **Remarks**
#
# Notice that `osm_keys_to_keep` is an optional parameter, and by default the keys are parsed directly from the `custom_filter` dictionary (the keys of it). However, there are cases when it is useful to specify the `osm_keys_to_keep` yourself. For example, if you are interested to parse schools from the data you could use `custom_filter={"amenity": ["school"]}`. By default, this would parse all amenities that have a tag `"school"`. **However**, if you would be interested to find only **buildings that are tagged as schools** you could use a combination of the two filters:
#
# - `osm_keys_to_keep="building"`
# - `custom_filter={"amenity": ["school"]}`
#
# The `osm_keys_to_keep` -parameter takes care that only such OSM elements that have a tag `"building"` are considered for further filtering, and then the `custom_filter` takes care that from buildings only such rows that have been tagged as `"school"` will be accepted to the final result.
#
# - Let's try this out:
# +
from pyrosm import OSM, get_data
osm = OSM(get_data("helsinki_region_pbf"))
# Create a custom filter that finds all schools from the data
custom_filter = {"amenity": ["school"]}
# Specify that you are only interested in such
# elements that have been tagged as buildings
osm_keys_to_keep = ["building"]
# Parse the data
schools_that_are_buildings = osm.get_data_by_custom_criteria(osm_keys_to_keep=osm_keys_to_keep,
custom_filter=custom_filter)
print("Number of schools that have been tagged as buildings:", len(schools_that_are_buildings))
# ============
# Comparison
# ============
# For comparison, let's parse all schools without the requirement of being a building
# i.e. we do not use the 'osm_keys_to_keep' parameter at all
all_schools = osm.get_data_by_custom_criteria(custom_filter=custom_filter)
print("Number of schools altogether:", len(all_schools))
# -
# As the results show, there are 72 buildings tagged as schools in the data. This is quite much fewer than the number of all schools existing in the data. Following this principle, it is possible to make highly customized queries.
#
#
# ### Controlling which OSM element types are returned
#
# It is also possible to determine what kind of OSM elements are returned to the final GeoDataFrame. By default the `get_data_by_custom_criteria()` returns all elements, i.e. **nodes**, **ways** and **relations**.
# Let's continue from the previous example and assume that you would be interested to find out all schools that are Polygons. There are different ways to filter such data (e.g. utilizing the `geom_type` attribute of a GeoDataFrame), however, one way that his can be accomplished is to **filter out** such OSM elements that are **nodes** (i.e. points).
#
# You can easily control the type OSM elements that will be returned by using parameters:
#
# - `keep_nodes`
# - `keep_ways`
# - `keep_relations`
#
# By default all of these parameters are specified as `True`. However, if for example specify `nodes=False`, pyrosm will return only ways and relations but skip nodes.
#
# Let's test this by filtering the schools that are nodes:
# +
# Continuing from the previous example ..
# Parse all schools that are not nodes
custom_filter = {"amenity": ["school"]}
# Pass keep_nodes=False to filter out nodes
schools_that_are_not_nodes = osm.get_data_by_custom_criteria(custom_filter=custom_filter,
keep_nodes=False)
print("Number of schools that are not nodes:", len(schools_that_are_not_nodes))
# -
# Now we have only 421 schools left (from 512).
#
# Let's take a look of the geometry types:
# +
# Add information about the geometry type
schools_that_are_not_nodes["geom_type"] = schools_that_are_not_nodes.geometry.geom_type
# Print the geom types
schools_that_are_not_nodes["geom_type"].unique()
# -
# Great! Now we only have Polygons and MultiPolygons in the data.
#
# <div class="alert alert-info">
#
# Note: The `way` OSM element does not necessary mean that the geometries will be Polygons. They can be also LineStrings (depends on what kind of OSM data is parsed). Hence, if you need to parse OSM data based on geometry type, it is safer to use the `GeoDataFrame.geometry.geom_type` function (as above) and select the rows using [Pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html).
#
# </div>
# ### `keep` vs `exclude` data with custom filters
# Pyrosm `get_data_by_custom_criteria()` makes it possible to also **filter out** records based on certain criteria. With parameter `filter_type` you specify whether the filters should be used as a criteria for keeping the records or excluding them.
#
# One example of when using `filter_type="exclude"` can be useful, is for example when filtering specific roads from the OSM data. In fact, the `get_network()` function works exactly in such a way.
#
# As an example of the excluding filter, let's create a custom filter that parses all the cycling roads from OSM in a similar manner as is done by get_network("cycling"):
# +
from pyrosm import OSM, get_data
# When we want to keep only roads we want to only include data having "highway" tag (i.e. a road)
# we can pass osm_keys_to_keep: This is a "first level" of filtering
osm_keys_to_keep = "highway"
# Second level of filtering is done by passing our custom filter:
custom_filter = dict(
# Areas are not parsed for networks by default
area=['yes'],
# OSM "highway" elements that have these tags, cannot be cycled
highway=['footway', 'steps', 'corridor', 'elevator', 'escalator', 'motor', 'proposed',
'construction', 'abandoned', 'platform', 'raceway', 'motorway', 'motorway_link'],
# If specifically said that cycling is not allowed, exclude such
bicycle=['no'],
# Do not include private roads
service=['private']
)
# In this case we want to EXCLUDE all the rows that have tags matching the criteria above
filter_type = "exclude"
# Run and get all cycling roads
osm = OSM(get_data("test_pbf"))
cycling = osm.get_data_by_custom_criteria(custom_filter=custom_filter,
osm_keys_to_keep=osm_keys_to_keep,
filter_type=filter_type)
cycling.plot()
# -
# Now we have filtered all the roads that can be cycled. This corresponds to the one procuded by default with `get_network("cycling")`:
cycling2 = osm.get_network("cycling")
cycling2.plot()
| docs/custom_filter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from sqlalchemy import create_engine
database_path = "Users/medinam/Desktop/Clone/SQL-Challenge"
# +
# !pip install psycopg2
engine = create_engine('postgresql://username:password@localhost:5432/SQL-Homework')
conn = engine.connect()
# -
data = pd.read_sql ("SELECT salary FROM total_employee", conn)
data
ax = data.plot.hist(bins=20, alpha=0.5)
data = pd.read_sql ("SELECT salary, title FROM total_employee", conn)
data
data.groupby(['title']).mean()
salaries_title = data.groupby(['title']).mean()
salaries_title.plot(kind="bar", figsize=(6,4), color = "b", legend=False)
plt.title("Salaries by Title")
plt.show()
plt.tight_layout()
| EmployeeSQLBONUS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/semishen/ML100Days/blob/master/Day_030_HW.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="G5hLJoCKqhYK" colab_type="text"
# # 作業 : (Kaggle)鐵達尼生存預測
# https://www.kaggle.com/c/titanic
# + [markdown] id="sc_u76j1qhYL" colab_type="text"
# # [作業目標]
# - 試著調整特徵篩選的門檻值, 觀察會有什麼影響效果
# + [markdown] id="x3nAcUnGqhYL" colab_type="text"
# # [作業重點]
# - 調整相關係數過濾法的篩選門檻, 看看篩選結果的影響 (In[5]~In[8], Out[5]~Out[8])
# - 調整L1 嵌入法篩選門檻, 看看篩選結果的影響 (In[9]~In[11], Out[9]~Out[11])
# + id="oZWBYgwFqhYM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 222} outputId="e26be55f-8cb5-413b-d917-3ada71b23b00"
# 做完特徵工程前的所有準備 (與前範例相同)
import pandas as pd
import numpy as np
import copy
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
df = pd.read_csv('titanic_train.csv')
train_Y = df['Survived']
df = df.drop(['PassengerId'] , axis=1)
print(df.shape)
df.head()
# + id="Zt3LiK6MqhYQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 325} outputId="453a001d-43f7-445a-d86b-0c4c288774c3"
# 計算df整體相關係數, 並繪製成熱圖
import seaborn as sns
import matplotlib.pyplot as plt
corr = df.corr()
sns.heatmap(corr)
plt.show()
# + id="_EA09QgDqhYS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="fe8f9c43-b3f5-4cbb-d157-a91ee2eca68c"
# 記得刪除 Survived
# df = df.drop(['Survived'] , axis=1)
#只取 int64, float64 兩種數值型欄位, 存於 num_features 中
num_features = df.columns[(df.dtypes == 'float64') | (df.dtypes == 'int64')].values
num_features
# for dtype, feature in zip(df.dtypes, df.columns):
# if dtype == 'float64' or dtype == 'int64':
# num_features.append(feature)
# print(f'{len(num_features)} Numeric Features : {num_features}\n')
# 削減文字型欄位, 只剩數值型欄位
num_df = df.loc[:,num_features]
num_df = num_df.fillna(-1)
MMEncoder = MinMaxScaler()
num_df.head()
# + [markdown] id="nqoV9oydqhYU" colab_type="text"
# # 作業1
# ### Q1: 鐵達尼生存率預測中,試著變更兩種以上的相關係數門檻值,觀察預測能力是否提升?
# ### A1: 相關係數區間從 -0.1 ~ 0.1 調到 -0.05 ~ 0.05,特徵從 'Pclass', 'Fare' 增加成 'Pclass', 'Age', 'Parch', 'Fare',分數提升了 0.025 左右。
# + id="ahLFFF2ZqhYV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="52292bd6-a1a4-4762-c4b8-8c383f5d1d21"
# 原始特徵 + 邏輯斯迴歸
train_X = MMEncoder.fit_transform(num_df)
estimator = LogisticRegression()
cross_val_score(estimator, train_X, train_Y, cv=5).mean()
# + id="i7kmSlV3qhYX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="978cfc89-bddc-430d-b711-197708b243d3"
# 篩選相關係數1 -0.1 <= corr <=0.1
high_list = list(corr[(corr['Survived']>0.1) | (corr['Survived']<-0.1)].index)
high_list.pop(0)
print(high_list)
# + id="tmpAV6VGqhYZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="13b95b89-db53-422b-88b9-20e983e62c38"
# 特徵1 + 邏輯斯迴歸
train_X = MMEncoder.fit_transform(num_df[high_list])
cross_val_score(estimator, train_X, train_Y, cv=5).mean()
# + id="vhadoTniqhYc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="1a69dcdf-c1e4-4003-8f03-a26a48b9e20b"
# 篩選相關係數2 -0.05 <= corr <=0.05
high_list = list(corr[(corr['Survived']>0.05) | (corr['Survived']<-0.05)].index)
high_list.pop(0)
print(high_list)
# + id="smfj44cRqhYe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ff284c33-2285-4bd0-f196-bcad76abf416"
# 特徵2 + 邏輯斯迴歸
train_X = MMEncoder.fit_transform(num_df[high_list])
cross_val_score(estimator, train_X, train_Y, cv=5).mean()
# + [markdown] id="VIJCJvztqhYg" colab_type="text"
# # 作業2
# ### Q2: 續上題,使用 L1 Embedding 做特徵選擇(自訂門檻),觀察預測能力是否提升?
# ### A2: 若 alpha = 0.005,篩選出了 'Pclass', 'Age', 'Parch' 三個特徵,模型預測能力降了一些。
# + id="Srth5g1LqhYg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="504d7682-5ea9-4bbd-b5fd-7b878492c40d"
from sklearn.linear_model import Lasso
L1_Reg = Lasso(alpha=0.005)
train_X = MMEncoder.fit_transform(num_df)
L1_Reg.fit(train_X, train_Y)
L1_Reg.coef_
# + id="ApW7VjvsqhYi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9c9da0b2-08dd-44df-86ec-d28b9de867d0"
from itertools import compress
L1_mask = list((L1_Reg.coef_>0) | (L1_Reg.coef_<0))
L1_list = list(compress(list(num_df), list(L1_mask)))
L1_list
# + id="uPCbKVijqhYk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="3142c1e0-6cbf-4fce-d991-af5481d185a6"
# L1_Embedding 特徵 + 線性迴歸
train_X = MMEncoder.fit_transform(num_df[L1_list])
cross_val_score(estimator, train_X, train_Y, cv=5).mean()
# + id="Aw-l-lGxqhYm" colab_type="code" colab={}
| Day_030_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/opentrainingcamp/python/blob/main/Notebook/Data/DS01_Pandas_pour_datascience.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="c-olZTHWjPyk"
# ### Avant propos: _Concernant les données utilisés dans cette série de sessions dans les notebooks_
# _Les données utilisés dans cette série de tutoriels sont récupérés du site [fivethirtyeight.com](https://data.fivethirtyeight.com/). Ces données sont issues et publiés en licence Creative [Commons Attribution 4.0 International License](https://creativecommons.org/licenses/by/4.0/)._
#
# _J'utiliserais certaines données pour créer des tutoriels et récupérer des données réalistes afin de nous permettre de raconter une histoires et réaliser des visualisations._
#
# _Les données sont publiés dans un github publique je les au récupéré dans un fork que vous pouvez récupéré ici [opentrainingcamp/data](https://github.com/opentrainingcamp/data)_
# + [markdown] id="C6pgE_9I2b00"
# # Pourquois Python et pandas?
#
# Une des taches du datascientiste commence par la récuparation des données pertinante, puis éventuellement les stocké.
#
#
# Dans ce parcours d'apprentissage complémentaire, vous vous familiariserez avec Pandas et découvrirez les tenants et les aboutissants de la façon dont vous pouvez l'utiliser pour récupérer et analyser des données avec Python et ensuite les archiver.
#
# Pandas change la donne pour la science des données et l'analyse, en particulier si vous êtes arrivé à Python parce que vous recherchiez quelque chose de plus puissant qu'Excel et VBA. Pandas utilise des structures de données rapides, flexibles et expressives conçues pour rendre le travail avec des données relationnelles ou étiquetées (NoSql) à la fois facile et intuitif.
#
# Nous utiliserons Pandas comme pivot depuis et vers les BD Nosql: mongodeb, cassandra et elasticserch
#
#
# + [markdown] id="f6ERb6j35zy-"
# SEANCE DS01: Vous disposez d'un vaste ensemble de données riche en informations intéressantes, mais vous ne savez pas par où commencer à l'explorer? Vous souhaitez générer des statistiques à partir de celui-ci, mais elles ne sont pas si faciles à extraire? Ce sont précisément les cas d'utilisation où Pandas et Python peuvent vous aider! Avec ces outils, vous serez en mesure de découper un vaste ensemble de données en parties gérables et de tirer un aperçu de ces informations.
# + [markdown] id="0mXUNOMy4TUA"
# Dans cette premières séance "Pandas", vous apprendrez à:
#
# * Calculez des métriques sur vos données
# * Effectuer des requêtes et des agrégations de base
# * Découvrez et gérez les données incorrectes, les incohérences et les valeurs manquantes
# * Visualisez vos données avec des tracés
# * Vous découvrirez également les différences entre les principales structures de données utilisées par Pandas et Python.
#
# Pour suivre, vous pouvez obtenir tous les exemples de code de ce didacticiel sur le lien ci-dessous:
#
# [Dans github](https://github.com/lipug/datascience)
# + [markdown] id="92I8ePts94dF"
# # Un utilitaire pour lire des urls (c'est du Python!)
# + colab={"base_uri": "https://localhost:8080/"} id="qLj9vnws2VCG" outputId="f404eb35-4363-468f-a234-e1e83841d220"
import requests
download_url = "https://github.com/lipug/datascience/raw/main/packages/lire.py"
target_path = "lire.py"
response = requests.get(download_url)
response.raise_for_status() # Verifier tout est ok
with open(target_path, "wb") as f:
f.write(response.content)
print("C'est fait....")
# + [markdown] id="0P-n8F6L-imf"
# .... explications ...
# + [markdown] id="HUXK1-XOK4ee"
# # Montrons un avant goût des capacités des Pandas DataFrame.
# + [markdown] id="nLkz3T6smkGw"
# _Voici le dictionnaire de données que nous utiliserons [Classement ELO historique de la NBA](https://github.com/opentrainingcamp/data/tree/master/nba-elo)_
# + id="eGz7PUlF_ufe"
l = ['https://github.com/opentrainingcamp/data/raw/master/nba-elo/nbaallelo.csv']
# + id="KwXh-OKT_yud"
import lire
# + colab={"base_uri": "https://localhost:8080/"} id="MjrsTDAc_2ht" outputId="d15e5203-58dd-4a5a-fcf8-48e074baafbf"
lire.read_from_url(l[0])
# + colab={"base_uri": "https://localhost:8080/"} id="v4V-GzvMAREm" outputId="bb7944e2-5cac-412f-a553-31c8137ac073"
import pandas as pd
nba = pd.read_csv("nbaallelo.csv")
type(nba)
# + colab={"base_uri": "https://localhost:8080/"} id="3XGXMxocLJCl" outputId="04200055-1307-40d6-e8d0-306ec65cf11f"
# ou directement
nba_f_url = pd.read_csv('https://github.com/opentrainingcamp/data/raw/master/nba-elo/nbaallelo.csv')
type(nba_f_url)
# + id="32ahvaaoLVxP" colab={"base_uri": "https://localhost:8080/", "height": 488} outputId="fda9b344-a958-49c5-f091-d2b3524968e2"
# Avons nous lu la même chose?
nba == nba_f_url
# + [markdown] id="NIVFLilQ-lTa"
# On a suivi la convention d'importation de Pandas en Python avec l'alias pd. Ensuite, vous utilisez `.read_csv()` pour lire l'ensemble de données et le stocker en tant qu'objet DataFrame dans la variable nba.
# + [markdown] id="wv7nBPIBBRQV"
# Vous pouvez voir combien de données nba contient:
# + colab={"base_uri": "https://localhost:8080/"} id="OFufMNJTBXRj" outputId="4fff3ac0-df71-4d5c-c8ef-03373b1dc3d2"
print(len(nba),nba.shape)
# + [markdown] id="5ihhq2x-BqfW"
# Vous utilisez la fonction intégrée Python `len()` pour déterminer le nombre de lignes. Vous utilisez également l'attribut `.shape du DataFrame` pour voir sa dimensionnalité. Le résultat est un tuple contenant le nombre de lignes et de colonnes.
#
# Vous savez maintenant qu'il y a 126 314 lignes et 23 colonnes dans votre ensemble de données. Mais comment être sûr que l'ensemble de données contient vraiment des statistiques de basket-ball? Vous pouvez jeter un œil aux cinq premières lignes avec `.head()`:
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="ZNB4h5rwB2T7" outputId="a8605246-f15b-4597-e338-1d26e7d0706c"
nba.head()
# + [markdown] id="HogotCf4DHJT"
# Bien qu'il soit pratique de voir toutes les colonnes, vous n'aurez probablement pas besoin de six décimales! Changez-le en deux:
# + id="hr36V-3XDGNF"
pd.set_option("display.precision", 2)
# + id="oi9Dq68Pi--I" colab={"base_uri": "https://localhost:8080/"} outputId="c33e7bb2-ff11-4dfc-ee28-120853411f89"
help(pd.set_option)
# + id="m31yMHFzc1Rh"
# + [markdown] id="-4JatnhaDK2V"
# Pour vérifier que vous avez correctement modifié les options, vous pouvez exécuter à nouveau .head () ou afficher les cinq dernières lignes avec .tail () à la place:
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="WlUUuBjUDPJX" outputId="366946ed-8320-45ce-91c0-246e0359b9eb"
nba.tail()
# + [markdown] id="qghM5AMPDnMj"
# Vous pouvez découvrir d'autres possibilités de .head () et .tail () avec un petit exercice. Pouvez-vous imprimer les trois dernières lignes de votre DataFrame? Développez le bloc de code ci-dessous pour voir la solution:
# + id="BxQsQsxsDmiQ" colab={"base_uri": "https://localhost:8080/", "height": 174} outputId="119e5776-000b-432a-dc31-6b38a798054b"
# completer
nba.head(1)
# + [markdown] id="DI_MfPnLD5h9"
# Semblable à la bibliothèque standard Python, les fonctions de Pandas sont également fournies avec plusieurs paramètres facultatifs. Chaque fois que vous tombez sur un exemple qui semble pertinent mais qui est légèrement différent de votre cas d'utilisation, consultez la [documentation officielle](https://pandas.pydata.org/pandas-docs/stable/). Il y a de bonnes chances que vous trouviez une solution en modifiant certains paramètres facultatifs!
# + [markdown] id="PqGdBJ7GEGnr"
# # Apprendre à connaître vos données
#
# Vous avez importé un fichier CSV avec la bibliothèque Pandas Python et jeté un premier coup d'œil au contenu de votre ensemble de données. Jusqu'à présent, vous n'avez vu que la taille de votre ensemble de données et ses premières et dernières lignes. Vous devez apprendre à examiner vos données de manière plus systématique.
# + [markdown] id="G7_kE_qME68i"
# ## Affichage des types de données
# La première étape pour connaître vos données consiste à découvrir les différents types de données qu'elles contiennent. Bien que vous puissiez mettre n'importe quoi dans une liste, les colonnes d'un DataFrame contiennent des valeurs d'un type de données spécifique. Lorsque vous comparez les structures de données Pandas et Python, vous verrez que ce comportement rend les Pandas beaucoup plus rapides!
#
# Vous pouvez afficher toutes les colonnes et leurs types de données avec .info ():
# + colab={"base_uri": "https://localhost:8080/"} id="1AzZzk9lFGIL" outputId="6a7a56a8-2733-4b04-d153-092aa7122de6"
nba.info()
# + [markdown] id="bFY_SbGbFzHU"
# Pour plus d'informations, consultez [le guide de démarrage officiel](https://pandas.pydata.org/pandas-docs/stable/getting_started/).
# + [markdown] id="nXOFoxg4Go2v"
# ## Affichage des statistiques de base
# Maintenant que vous avez vu quels types de données se trouvent dans votre ensemble de données, il est temps d'avoir un aperçu des valeurs que contient chaque colonne. Vous pouvez le faire avec `.describe()`:
# + colab={"base_uri": "https://localhost:8080/", "height": 364} id="XOYvfrUaGsfd" outputId="297f49a7-6289-4574-e708-c00aa6842da8"
nba.describe()
# + [markdown] id="4VZ7MaaRHBbc"
# .describe () analyse uniquement les colonnes numériques par défaut, mais vous pouvez fournir d'autres types de données si vous utilisez le paramètre include:
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 175} id="UDHA_OusHG10" outputId="fcaf1094-d372-4d85-f627-6ae342608695"
import numpy as np
nba.describe(include=np.object_)
# + [markdown] id="vQLJKUVWHiU2"
# Jetez un œil aux colonnes team_id et fran_id. Votre ensemble de données contient 104 ID d'équipe différents, mais seulement 53 ID de franchise différents. De plus, l'ID d'équipe le plus fréquent est BOS, mais l'ID de franchise le plus fréquent est Lakers. Comment est-ce possible? Vous devrez explorer un peu plus votre ensemble de données pour répondre à cette question.
# + [markdown] id="SvqIOwK1IOUg"
# ## Explorer votre ensemble de données
# L'analyse exploratoire des données peut vous aider à répondre à des questions sur votre ensemble de données. Par exemple, vous pouvez examiner la fréquence à laquelle des valeurs spécifiques apparaissent dans une colonne:
# + colab={"base_uri": "https://localhost:8080/"} id="NuX3rErcmK1C" outputId="d29f45a8-b556-41f0-f46d-7daf120a126c"
nba["team_id"]
# + colab={"base_uri": "https://localhost:8080/"} id="8_a8N1PoIXQU" outputId="c74b212c-f678-4dd5-e4db-e9844accebc6"
nba["team_id"].value_counts()
# + id="hXwda165Ie7o" colab={"base_uri": "https://localhost:8080/"} outputId="b9216be2-60bd-446b-9728-962c691cb5d5"
nba["fran_id"].value_counts()
# + [markdown] id="nbxRito8I05m"
# Il semble qu'une équipe nommée "Lakers" ait joué 6024 matchs, mais seuls 5078 d'entre eux ont été joués par les Lakers de Los Angeles (LAL). Découvrez qui est l'autre équipe "Lakers":
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="XtxQOF1Km2y2" outputId="e255e69b-942d-4209-aa31-71b45b5b299e"
nba.loc[nba["fran_id"] == "Lakers", ["team_id", "notes"]]
# select team_id, notes from nba where fran_id = 'Lakers'
# + colab={"base_uri": "https://localhost:8080/"} id="16XzZEXGJDnX" outputId="97341cf4-ccf8-4cdf-d5ec-115c506cc018"
nba.loc[nba["fran_id"] == "Lakers", "team_id"].value_counts()
# + [markdown] id="H_f6UiDyJXcf"
# En effet, les Lakers de Minneapolis («MNL») ont disputé 946 matchs. Vous pouvez même savoir quand ils ont joué à ces jeux:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="8oGOYUQ4JYmd" outputId="df15fdad-f829-44df-f329-0e4c2f629e0b"
nba.loc[nba["team_id"] == "MNL", "date_game"].min()
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="7r4rOALAJcA5" outputId="360d479f-f380-415d-f852-5e2b3ac24c69"
nba.loc[nba["team_id"] == "MNL", "date_game"].max()
# + colab={"base_uri": "https://localhost:8080/"} id="x0xX9azLJj5Q" outputId="5c6f64f6-3d13-4e36-85eb-6306d4875b6f"
nba.loc[nba["team_id"] == "MNL", "date_game"].agg(("min", "max"))
# + colab={"base_uri": "https://localhost:8080/"} id="pY06wi-RKNkQ" outputId="d16e721e-8f97-4e63-b8a1-be0320cad4d1"
nba.loc[nba["team_id"] == "BOS", "date_game"].agg(("min", "max"))
# + [markdown] id="Z3CpweMXKI0s"
# On dirait que les Lakers de Minneapolis ont joué entre les années 1949 et 1959. Cela explique pourquoi vous pourriez ne pas reconnaître cette équipe! Surtout si on est Français ;)
#
# Vous avez également découvert pourquoi l'équipe des Boston Celtics «BOS» a joué le plus de matchs de l'ensemble de données. Analysons également un peu leur histoire. Découvrez combien de points les Celtics de Boston ont marqués au cours de tous les matchs contenus dans cet ensemble de données. Développez le bloc de code ci-dessous pour la solution:
# + colab={"base_uri": "https://localhost:8080/"} id="SMaF_je9Ka5E" outputId="880ffb3d-c0ac-4f8a-806f-9cd59017076d"
nba.loc[nba["team_id"] == "BOS", "pts"].agg(("sum", "min", "max", "mean", "std"))
| Notebook/Data/DS01_Pandas_pour_datascience.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: welly
# language: python
# name: welly
# ---
# ## Plotting
#
# Some preliminaries...
# +
import numpy as np
import matplotlib.pyplot as plt
import welly
welly.__version__
# -
# ## Load a well and add deviation and a striplog
#
# Use the `from_las()` method to load a well by passing a filename as a `str`.
#
# This is really just a wrapper for `lasio` but instantiates a `Header`, `Curve`s, etc.
from welly import Well
w = Well.from_las('data/P-130_out.LAS')
w.data.keys()
w.data['GR'].plot()
# Load a deviation survey.
dev = np.loadtxt('data/P-130_deviation_survey.csv', delimiter=',', skiprows=1)
w.location.add_deviation(dev[:, :3], td=2618.3)
w.location.md2tvd(2000)
# Add a striplog.
from striplog import Legend, Striplog
legend = Legend.builtin('NSDOE')
strip = Striplog.from_image('data/P-130_25_2618.png', 25, 2618, legend=legend)
strip.plot(aspect=2)
w.data['strip'] = strip
# ## Basic plot
#
# We want to use a legend so we get the striplog to look right:
tracks = ['MD', 'strip', 'GR', 'RHOB', ['DT', 'DTS'], 'M2R9', 'MD']
w.plot(tracks=tracks, legend=legend)
# The legend doesn't have entries for the curves, so they are grey.
#
# Let's add some.
curve_legend_csv = """colour,lw,ls,xlim,xscale,curve mnemonic
#ff0000,1.0,-,"0,200",linear,GR
blue,1.0,-,,linear,RHOB
#00ff00,1.0,--,,linear,DT
#ffff00,1.0,--,,linear,DTS
black,1.0,,,log,M2R9
"""
curve_legend = Legend.from_csv(text=curve_legend_csv)
complete_legend = legend + curve_legend
complete_legend[-6:]
curve_legend.get_decor(w.data['GR'])
w.data['GR'].plot(legend=curve_legend)
w.plot(tracks=tracks, legend=complete_legend, extents=(700, 1200))
# ## 2D log plot
#
# The ordinary log plot:
w.data['GR'].plot()
w.data['GR'].plot_2d(cmap='viridis')
w.data['GR'].plot_2d(curve=True, cmap='viridis', lw=0.5, ec='r')
| tutorial/03_Plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Get Game Company Information Using Instaloader
# We are looking for the pokemon company info on the instagram and finally chose 3 of them as our investigation objects: pokemon, pokemongoapp, detectivepikachu.
# Import all the libraries needed
# +
import json
import oauth2 as oauth
import twitter
import urllib
import bs4
import numpy as np
import pandas as pd
import tweepy
import instaloader
import csv
L=instaloader.Instaloader()
profile=instaloader.Profile.from_username(L.context,'pokemon')
# Import the necessary libraries needed such like instaloader and tweepy.
# -
# Get all the endpoints we needed such like hashtag.
posts=profile.get_posts()
name=[]
id=[]
likes=[]
comments=[]
caption=[]
hashtag=[]
mediaid=[]
i=0
for post in posts:
if i <200:
table_1=post.owner_username
name.append(table_1)
table_2=post.owner_id
id.append(table_2)
table_3=post.likes
likes.append(table_3)
table_4=post.comments
comments.append(table_4)
table_5=post.caption
caption.append(table_5)
table_6=post.caption_hashtags
hashtag.append(table_6)
table_7=post.mediaid
mediaid.append(table_7)
i +=1
# Get 200 recent posts on Twitter under the hashtag #pokemon.
user_info={'name':name,'id':id,'likes':likes,'comments':comments,'caption':caption,'hashtag':hashtag,'mediaid':mediaid}
df1=pd.DataFrame(user_info)
# Now we get all the recent information posted under the account number: pokemon
df1
# To save all the collected data above to a csv file and named it "pokemon.csv".
df1.to_csv("pokemon.csv")
# Export it to csv file named pokemon.csv.
df1 = pd.read_csv("instagram.csv", usecols=['name','id','likes','comments','caption','mediaid','hashtag'])
# Below is the function to normalize the row which contains more than one hashtag.
df1 = df1.drop('hashtag',axis=1).join(df1['hashtag'].str.split(',',expand = True).stack().reset_index(level=1, drop=True).rename('hashtag'))
# To divide the row which contained more than 1 hashtag into separate one(Normalize).
df1
L=instaloader.Instaloader()
profile=instaloader.Profile.from_username(L.context,'pokemongoapp')
posts=profile.get_posts()
name=[]
id=[]
likes=[]
comments=[]
caption=[]
hashtag=[]
mediaid=[]
i=0
for post in posts:
if i <200:
table_1=post.owner_username
name.append(table_1)
table_2=post.owner_id
id.append(table_2)
table_3=post.likes
likes.append(table_3)
table_4=post.comments
comments.append(table_4)
table_5=post.caption
caption.append(table_5)
table_6=post.caption_hashtags
hashtag.append(table_6)
table_7=post.mediaid
mediaid.append(table_7)
i +=1
user_info={'name':name,'id':id,'likes':likes,'comments':comments,'caption':caption,'hashtag':hashtag,'mediaid':mediaid}
df2=pd.DataFrame(user_info)
df2
df2.to_csv("pokemongoapp.csv")
df2 = pd.read_csv("pokemongoapp.csv", usecols=['name','id','likes','comments','caption','mediaid','hashtag'])
df2 = df2.drop('hashtag',axis=1).join(df1['hashtag'].str.split(',',expand = True).stack().reset_index(level=1, drop=True).rename('hashtag'))
df2
L=instaloader.Instaloader()
profile=instaloader.Profile.from_username(L.context,'detectivepikachumovie')
posts=profile.get_posts()
name=[]
id=[]
likes=[]
comments=[]
caption=[]
hashtag=[]
mediaid=[]
i=0
for post in posts:
if i <200:
table_1=post.owner_username
name.append(table_1)
table_2=post.owner_id
id.append(table_2)
table_3=post.likes
likes.append(table_3)
table_4=post.comments
comments.append(table_4)
table_5=post.caption
caption.append(table_5)
table_6=post.caption_hashtags
hashtag.append(table_6)
table_7=post.mediaid
mediaid.append(table_7)
i +=1
user_info={'name':name,'id':id,'likes':likes,'comments':comments,'caption':caption,'hashtag':hashtag,'mediaid':mediaid}
df3=pd.DataFrame(user_info)
df3
df3.to_csv("detectivepikachu.csv")
df3 = pd.read_csv("detectivepikachu.csv", usecols=['name','id','likes','comments','caption','mediaid','hashtag'])
df3 = df3.drop('hashtag',axis=1).join(df1['hashtag'].str.split(',',expand = True).stack().reset_index(level=1, drop=True).rename('hashtag'))
df3
df1['Game_name']='POKEMON'
df2['Game_name']='POKEMONGO'
df3['Game_name']='DETECTIVE PIKACHU'
# Add a new column named Game_name and assign different values to it.
# Now we use the concat instruction to get the total sheet.
df_gamecompany_raw = pd.concat([df1,df2,df3])
df_gamecompany_raw
# Concat all the 3 table into one.
# To delete the useless column 'name' and 'id'.
df_gamecompany_raw= df_gamecompany_raw.drop(['name','id'],axis=1)
df_gamecompany_raw
import pandas as pd
GameCompany = pd.read_csv(r'GameCompanyPost_Final.csv')
GameCompany
GameCompany['Role']='GAMEDEVELOPER'
# Add a new row named Role and identify it as GAMEDEVELOPER.
GameCompany
GameCompanyFinal = GameCompany.drop(['hashtag','mediaid','caption','comments','likes'],axis=1)
# Drop all the rows which are not needed.
GameCompanyFinal
GameCompanyFinal.to_csv("GameCompanyFinal.csv",index=False)
# Export it as csv named GameCompanyFinal.csv and delete the index column.
| project/code/GameCompany_Instagram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="_o516wlPWe7T"
#mounting drive
from google.colab import drive
drive.mount('/content/drive')
# + id="d30ka84vWkTn" colab={"base_uri": "https://localhost:8080/"} outputId="ad627419-3241-47cf-e69e-2aa9ba7b4c24"
#importing required lib
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
nltk.download('punkt')
nltk.download('wordnet')
import numpy as np
import random
import string
import warnings
warnings.simplefilter("ignore")
# + id="JpkozIXBW2vR" colab={"base_uri": "https://localhost:8080/"} outputId="6f8b4011-c397-431d-ce6b-98515d8e86c6"
text_data = open('/content/drive/MyDrive/breast_cancer/breast_cancer.txt','r',errors = 'ignore')
#text_data = open('/content/breast_cancer.txt','r',errors = 'ignore')
raw_data = text_data.read()
raw_data = raw_data.lower()# converts to lowercase
nltk.download('punkt')
nltk.download('wordnet')
sent_tokens = nltk.sent_tokenize(raw_data)# converts to list of sentences
word_tokens = nltk.word_tokenize(raw_data)# converts to list of words
# + id="5VXWmsoHYPmy"
lemmer = nltk.stem.WordNetLemmatizer()
#WordNet is a semantically-oriented dictionary of English included in NLTK.
def LemTokens(tokens):
return [lemmer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
def LemNormalize(text):
return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
# + id="GAWNiKSGYrLf"
GREETING_INPUTS = ("hello", "hi", "greetings", "sup", "what's up","hey",)
GREETING_RESPONSES = ["hi", "hey", "*nods*", "hi there", "hello", "I am glad! You are talking to me"]
def greeting(sentence):
for word in sentence.split():
if word.lower() in GREETING_INPUTS:
return random.choice(GREETING_RESPONSES)
# + id="0rXz5_utbD3F"
BREAST_CANCER_INPUT = ("what is breast cancer")
BREAST_CANCER_RESPONSE = ("Breast cancer is cancer that forms in the cells of the breasts. After skin cancer, breast cancer is the most common cancer diagnosed in women in the United States. Breast cancer can occur in both men and women, but it's far more common in women.")
def breastcancer(sentence):
for word in sentence.split():
if word.lower() in BREAST_CANCER_INPUT:
return (BREAST_CANCER_RESPONSE)
# + id="nhzzIGpd0quO"
SYMPTOMS_INPUT = ("symptoms","tell me symptoms of breast cancer","what are the symptoms","the symptoms are?",'what are symptoms')
SYMPTOMS_RESPONSE = ("New lump in the breast or underarm,Thickening or swelling of part of the breast,Irritation or dimpling of breast skin,Redness or flaky skin in the nipple area or the breast,Pulling in of the nipple or pain in the nipple area,Nipple discharge other than breast milk, including blood,Any change in the size or the shape of the breast,Pain in any area of the breast")
def symptoms(sentence):
for word in sentence.split():
if word.lower() in SYMPTOMS_INPUT:
return (SYMPTOMS_RESPONSE)
# + id="7dtW2qw4161l"
RISK_INPUT = ("what are the risk factors","risk factors","risk factors for breast cancer are?")
RISK_RESPONSE = ("Not being physically active,Being overweight or obese after menopause,Taking hormones,Reproductive history,Drinking alcohol,Beginning your period at a younger age,Beginning menopause at an older age,Beginning menopause at an older age,Postmenopausal hormone therapy,")
def risk(sentence):
for word in sentence.split():
if word.lower() in RISK_INPUT:
return (RISK_RESPONSE)
# + id="Zg-q5mBJ207M"
REDUCE_RISK_INPUT = ('how to reduce risk','how to reduce risk of breast cancer','reduce risk','reduce the risk of breast cancer')
REDUCE_RISK_RESPONSE = ('Keep a healthy weight,Exercise regularly,Don’t drink alcohol, or limit alcoholic drinks,Breastfeed your children, if possible,Follow a healthy eating pattern,Think carefully about using hormone replacement therapy (HRT)')
def risk_reduce(sentence):
for word in sentence.split():
if word.lower() in REDUCE_RISK_INPUT:
return (REDUCE_RISK_RESPONSE)
# + id="BEYyKZFT394c"
DIAGNOSE_INPUT = ('how to diagnose','how it is diagnosed?','how breast cancer is diagnosed','diagnose breast cancer')
DIAGNOSE_RESPONSE = ('Breast ultrasound,Diagnostic mammogram,Magnetic resonance imaging (MRI),Biopsy')
def diagnose(sentence):
for word in sentence.split():
if word.lower() in DIAGNOSE_INPUT:
return (DIAGNOSE_RESPONSE)
# + id="Z5fossbN4zsr"
TREATMENT_INPUT = ('treatment','what are the treatments','treatments for breast cancer are?')
TREATMENT_RESPONSE = ('Surgery,Chemotherapy,Hormonal therapy,Biological therapy,Radiation therapy')
def treatments(sentence):
for word in sentence.split():
if word.lower() in TREATMENT_INPUT:
return (TREATMENT_RESPONSE)
# + id="Yi5jyG5B5idA"
KINDS_INPUT = ("types of breast cancer",'what are the types of breast cancer')
KINDS_RESPONSE =("Invasive ductal carcinoma,Invasive lobular carcinoma")
def kinds(sentence):
for word in sentence.split():
if word.lower() in KINDS_INPUT:
return (KINDS_RESPONSE)
# + id="YtULq5vN6Brg"
WEBSITES_INPUT = ('websites i should visit','websites i should checkout','websites i should know about')
WEBSITES_RESPONSE = ('www.cancer.gov , www.cancer.org , www.breastfeeding.org , www.nationalbreastcancer.org , www.bcrf.org')
def websites(sentence):
for word in sentence.split():
if word.lower() in WEBSITES_INPUT:
return (WEBSITES_RESPONSE)
# + id="WN4tncaS8sjK"
CONTRIBUTORS_INPUT = ("contibutors","who are the contributors")
CONTRIBUTORS_RESPONSE = ("Mahi,Kushagra,Shruti,Riya,Vaibhavi")
def contributors(sentence):
for word in sentence.split():
if word.lower() in CONTRIBUTORS_INPUT:
return (CONTRIBUTORS_RESPONSE)
# + id="X4ELmd80bMcJ"
INSPIRATIONAL_INPUTS = ("inspire me","im feeling low","im sad")
INSPIRATIONAL_RESPONSES = ("Don't lose hope.When the sun goes down, the stars come out.","The only way to make sense of change is to plunge into it flow with it...and join the dance.","Cancer changes your life, often for the better. You learn what's important, you learn to prioritize, and you learn not to waste your time. You tell people you love them.","You are loved You are wonderfully made. You are beautiful. You are a masterpiece. God has a great plan for you.","The wish for healing has always been half of health.","Faith is daring to go beyond what the eyes can see.","Just when the caterpillar thought the world was overit became a butterfly.","Faith is daring to go beyond what the eyes can see.","Tears are the sweat of champions","Hope will never be silent.","Cancer is WAY too serious to be taken seriously all the time.")
def inspirational(sentence):
for word in sentence.split():
if word.lower() in INSPIRATIONAL_INPUTS:
return random.choice(INSPIRATIONAL_RESPONSES)
# + id="XlYI_Jn9YwAp"
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# + id="XG8OzaQ8Y9sN"
def response(user_response):
Meera_response=''
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')
tfidf = TfidfVec.fit_transform(sent_tokens)
vals = cosine_similarity(tfidf[-1], tfidf)
idx=vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if(req_tfidf==0):
Meera_response=Meera_response+"I am sorry! I don't understand you"
return Meera_response
else:
Meera_response = Meera_response+sent_tokens[idx]
return Meera_response
# + id="EarNvh8NZA7T" colab={"base_uri": "https://localhost:8080/"} outputId="c57c2077-4400-467c-c07c-b4b10432a764"
Continue = True
print("Meera: My name is Meera. I will answer your queries about Chatbots. If you want to exit, type Bye!")
while(Continue == True):
user_response = input()
user_response = user_response.lower()
if(user_response !='bye'):
if(user_response == 'thanks' or user_response == 'thank you' ):
Continue = False
print("Meera: You are welcome..")
else:
if(greeting(user_response) != None):
print("Meera: "+greeting(user_response))
else:
if(breastcancer(user_response)!= None):
print("Meera: "+breastcancer(user_response))
else:
if(symptoms(user_response)!= None):
print("Meera: "+symptoms(user_response))
else:
if(risk(user_response)!= None):
print("Meera: "+risk(user_response))
else:
if(risk_reduce(user_response) != None):
print("Meera: "+risk_reduce(user_response))
else:
if(diagnose(user_response) != None):
print("Meera: "+diagnose(user_response))
else:
if(risk_reduce(user_response) != None):
print("Meera: "+risk_reduce(user_response))
else:
if(treatments(user_response) != None):
print("Meera: "+treatments(user_response))
else:
if(kinds(user_response) != None):
print("Meera: "+kinds(user_response))
else:
if(websites(user_response) != None):
print("Meera: "+websites(user_response))
else:
if(contributors(user_response) != None):
print("Meera: "+contributors(user_response))
else:
if(inspirational(user_response) != None):
print("Meera: "+inspirational(user_response))
else:
sent_tokens.append(user_response)
word_tokens = word_tokens+nltk.word_tokenize(user_response)
final_words = list(set(word_tokens))
print("Meera: ",end="")
print(response(user_response))
sent_tokens.remove(user_response)
else:
Continue = False
print("Meera: Bye! take care..")
# + id="qXpBh__iA5ag"
| src/Sakhi/chatbot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# TensorFlow: 静态计算图
# -------------------------
#
# A fully-connected ReLU network with one hidden layer and no biases, trained to
# predict y from x by minimizing squared Euclidean distance.
#
# This implementation uses basic TensorFlow operations to set up a computational
# graph, then executes the graph many times to actually train the network.
#
# One of the main differences between TensorFlow and PyTorch is that TensorFlow
# uses static computational graphs while PyTorch uses dynamic computational
# graphs.
#
# In TensorFlow we first set up the computational graph, then execute the same
# graph many times.
#
#
# +
import tensorflow as tf
import numpy as np
# First we set up the computational graph:
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create placeholders for the input and target data; these will be filled
# with real data when we execute the graph.
x = tf.placeholder(tf.float32, shape=(None, D_in))
y = tf.placeholder(tf.float32, shape=(None, D_out))
# Create Variables for the weights and initialize them with random data.
# A TensorFlow Variable persists its value across executions of the graph.
w1 = tf.Variable(tf.random_normal((D_in, H)))
w2 = tf.Variable(tf.random_normal((H, D_out)))
# Forward pass: Compute the predicted y using operations on TensorFlow Tensors.
# Note that this code does not actually perform any numeric operations; it
# merely sets up the computational graph that we will later execute.
h = tf.matmul(x, w1)
h_relu = tf.maximum(h, tf.zeros(1))
y_pred = tf.matmul(h_relu, w2)
# Compute loss using operations on TensorFlow Tensors
loss = tf.reduce_sum((y - y_pred) ** 2.0)
# Compute gradient of the loss with respect to w1 and w2.
grad_w1, grad_w2 = tf.gradients(loss, [w1, w2])
# Update the weights using gradient descent. To actually update the weights
# we need to evaluate new_w1 and new_w2 when executing the graph. Note that
# in TensorFlow the the act of updating the value of the weights is part of
# the computational graph; in PyTorch this happens outside the computational
# graph.
learning_rate = 1e-6
new_w1 = w1.assign(w1 - learning_rate * grad_w1)
new_w2 = w2.assign(w2 - learning_rate * grad_w2)
# Now we have built our computational graph, so we enter a TensorFlow session to
# actually execute the graph.
with tf.Session() as sess:
# Run the graph once to initialize the Variables w1 and w2.
sess.run(tf.global_variables_initializer())
# Create numpy arrays holding the actual data for the inputs x and targets
# y
x_value = np.random.randn(N, D_in)
y_value = np.random.randn(N, D_out)
for _ in range(500):
# Execute the graph many times. Each time it executes we want to bind
# x_value to x and y_value to y, specified with the feed_dict argument.
# Each time we execute the graph we want to compute the values for loss,
# new_w1, and new_w2; the values of these Tensors are returned as numpy
# arrays.
loss_value, _, _ = sess.run([loss, new_w1, new_w2],
feed_dict={x: x_value, y: y_value})
print(loss_value)
| build/_downloads/f79471dee79dd5af2d4525fc64a34cfc/tf_two_layer_net.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Titanic
#
# Goal: predict survival on the Titanic
# It's a basic learning competition on the [ML platform Kaggle](https://www.kaggle.com/c/titanic), a simple introduction to machine learning concepts, specifically binary classification (survived / not survived).
# Here we are looking into how to apply Logistic Regression to the Titanic dataset.
# # 1. Collect and understand the data
#
# The data can be downloaded directly [from Kaggle](https://www.kaggle.com/c/titanic/data)
import pandas as pd
# get titanic training file as a DataFrame
titanic = pd.read_csv("../datasets/titanic_train.csv")
titanic.shape
# preview the data
titanic.head()
# Variable Description
# ---
# Survived: Survived (1) or died (0); this is the target variable
# Pclass: Passenger's class (1st, 2nd or 3rd class)
# Name: Passenger's name
# Sex: Passenger's sex
# Age: Passenger's age
# SibSp: Number of siblings/spouses aboard
# Parch: Number of parents/children aboard
# Ticket: Ticket number
# Fare: Fare
# Cabin: Cabin
# Embarked: Port of embarkation
titanic.describe()
# Not all features are numeric:
titanic.info()
# # 2. Process the Data
# Categorical variables need to be transformed to numeric variables
# ### Transform the embarkment port
# There are three ports: C = Cherbourg, Q = Queenstown, S = Southampton
ports = pd.get_dummies(titanic.Embarked , prefix='Embarked')
ports.head()
# Now the feature Embarked (a category) has been trasformed into 3 binary features, e.g. Embarked_C = 0 not embarked in Cherbourg, 1 = embarked in Cherbourg.
# Finally, the 3 new binary features substitute the orignal one in the data frame:
titanic = titanic.join(ports)
titanic.drop(['Embarked'], axis=1, inplace=True) # then drop the original column
# ### Transform the gender feature
# This is easier, being already a binary classification (male or female).
# This was 1912.
titanic.Sex = titanic.Sex.map({'male':0, 'female':1})
# ## Extract the target variable
y = titanic.Survived.copy() # copy “y” column values out
X = titanic.drop(['Survived'], axis=1) # then, drop y column
# ### Drop not so important features
# For the first model, we ignore some categorical features which will not add too much of a signal.
X.drop(['Cabin'], axis=1, inplace=True)
X.drop(['Ticket'], axis=1, inplace=True)
X.drop(['Name'], axis=1, inplace=True)
X.drop(['PassengerId'], axis=1, inplace=True)
X.info()
# All features are now numeric, ready for regression.
# But we have still a couple of processing to do.
# ## Check if there are any missing values
X.isnull().values.any()
# +
#X[pd.isnull(X).any(axis=1)]
# -
# True, there are missing values in the data (NaN) and a quick look at the data reveals that they are all in the Age feature.
# One possibility could be to remove the feature, another one is to fille the missing value with a fixed number or the average age.
X.Age.fillna(X.Age.mean(), inplace=True) # replace NaN with average age
X.isnull().values.any()
# Now all missing values have been removed.
# The logistic regression would otherwise not work with missing values.
# ## Split the dataset into training and validation
#
# The **training** set will be used to build the machine learning models. The model will be based on the features like passengers’ gender and class but also on the known survived flag.
#
# The **validation** set should be used to see how well the model performs on unseen data. For each passenger in the test set, I use the model trained to predict whether or not they survived the sinking of the Titanic, then will be compared with the actual survival flag.
from sklearn.model_selection import train_test_split
# 80 % go into the training test, 20% in the validation test
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=7)
# # 3. Modelling
# ## Get a baseline
# A baseline is always useful to see if the model trained behaves significantly better than an easy to obtain baseline, such as a random guess or a simple heuristic like all and only female passengers survived. In this case, after quickly looging at the training dataset - where the survivial outcome is present - I am going to use the following:
#
def simple_heuristic(titanicDF):
'''
predict whether or not the passngers survived or perished.
Here's the algorithm, predict the passenger survived:
1) If the passenger is female or
2) if his socioeconomic status is high AND if the passenger is under 18
'''
predictions = [] # a list
for passenger_index, passenger in titanicDF.iterrows():
if passenger['Sex'] == 1:
# female
predictions.append(1) # survived
elif passenger['Age'] < 18 and passenger['Pclass'] == 1:
# male but minor and rich
predictions.append(1) # survived
else:
predictions.append(0) # everyone else perished
return predictions
# Let's see how this simple algorithm will behave on the validation dataset and we will keep that number as our baseline:
simplePredictions = simple_heuristic(X_valid)
correct = sum(simplePredictions == y_valid)
print ("Baseline: ", correct/len(y_valid))
# Baseline: a simple algorithm predicts correctly 73% of validation cases.
# Now let's see if the model can do better.
# ## Logistic Regression
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
# # 4. Evaluate the model
model.score(X_train, y_train)
model.score(X_valid, y_valid)
# Two things:
# - the score on the training set is much better than on the validation set, an indication that could be overfitting and not being a general model, e.g. for all ship sinks.
# - the score on the validation set is better than the baseline, so it adds some value at a minimal cost (the logistic regression is not computationally expensive, at least not for smaller datasets).
# An advantage of logistic regression (e.g. against a neural network) is that it's easily interpreatble. It can be written as a math formula:
model.intercept_ # the fitted intercept
model.coef_ # the fitted coefficients
# Which means that the formula is:
#
# $$ \boldsymbol P(survive) = \frac{1}{1+e^{-logit}} $$
#
# where the logit is:
#
# $$ logit = \boldsymbol{\beta_{0} + \beta_{1}\cdot x_{1} + ... + \beta_{n}\cdot x_{n}}$$
#
# where $\beta_{0}$ is the model intercept and the other beta parameters are the model coefficients from above, each multiplied for the related feature:
#
# $$ logit = \boldsymbol{1.4224 - 0.9319 * Pclass + ... + 0.2228 * Embarked_S}$$
# # 5. Iterate on the model
# The model could be improved, for example transforming the excluded features above or creating new ones (e.g. I could extract titles from the names which could be another indication of the socio-economic status).
# A heat map of correlation may give us a understanding of which variables are important
titanic.corr()
| 01-Regression/LogisticRegressionSKL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Note Taking Tips (WIP)
# > Sharing my journey on what motivated me to start writing things down.
#
# - toc: true
# - hide: true
# - badges: true
# - comments: true
# - categories: [jupyter]
# - image: images/notes.jpg
# # About
# 
#
# # Other important creation tips
#
# Note taking
# - Morning dump
# - save your brain from stress
# - Roam Research
# - Read Wise
| _notebooks/2021-01-01-Note-Taking-Heaven.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Fast brain decoding with random sampling and random projections #
#
# **<NAME>**, <NAME> and <NAME>
#
# [PARIETAL TEAM](https://team.inria.fr/parietal/), INRIA, CEA, University Paris-Saclay
#
# **Presented on:** the 6th International workshop on Pattern Recognition in Neuroimaging(PRNI) 2016. Trento, Italy
#
# [link to the paper](https://hal.inria.fr/hal-01313814/document)
# %matplotlib inline
import numpy as np
import time
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
from nilearn.input_data import NiftiMasker
# ## Testing on Haxby 2001, discriminating between faces and places ##
# +
# Fetching haxby dataset
from nilearn import datasets
data_files = datasets.fetch_haxby(n_subjects=1)
masker = NiftiMasker(smoothing_fwhm=4, standardize=True, mask_strategy='epi',
memory='cache', memory_level=1)
labels = np.recfromcsv(data_files.session_target[0], delimiter=" ")
# Restrict to face and house conditions
target = labels['labels']
condition_mask = np.logical_or(target == b"face", target == b"house")
# Split data into train and test samples, using the chunks
condition_mask_train = np.logical_and(condition_mask, labels['chunks'] <= 6)
condition_mask_test = np.logical_and(condition_mask, labels['chunks'] > 6)
X_masked = masker.fit_transform(data_files['func'][0])
X_train = X_masked[condition_mask_train]
X_test = X_masked[condition_mask_test]
y_train = target[condition_mask_train]
y_test = target[condition_mask_test]
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer(pos_label=1, neg_label=-1)
y_train = lb.fit_transform(y_train).ravel()
y_test = lb.transform(y_test).ravel()
# -
# ## Prediction using the whole brain (non-reduced) ##
# +
# Fit model on train data and predict on test data
from sklearn.linear_model import LogisticRegressionCV
clf = LogisticRegressionCV(Cs=10, penalty='l2')
ti = time.time()
clf.fit(X_train, y_train)
to_raw = time.time() - ti
y_pred = clf.predict(X_test)
accuracy = (y_pred == y_test).mean() * 100.
raw_coef = masker.inverse_transform(clf.coef_)
print("classification accuracy : %g%%, time %.4fs" % (accuracy, to_raw))
# -
# ## Prediction on reduced data: adding Nystrom method ##
# +
from sklearn.kernel_approximation import Nystroem
class LinearNistroem(Nystroem):
"""We are using a linear kernel only and adding the invertion method.
Parameters
-----------
n_components: int, the number of components should be at most n
random_state: int, the random seed (optional)
"""
def __init__(self, n_components=100, random_state=None):
super(LinearNistroem, self).__init__(
n_components=n_components, kernel='linear',
random_state=random_state)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def inverse_transform(self, X):
return X.dot(self.normalization_).dot(self.components_)
# +
nystroem = LinearNistroem(n_components=80)
X_train_nys = nystroem.fit_transform(X_train)
X_test_nys = nystroem.transform(X_test)
ti = time.time()
clf.fit(X_train_nys, y_train)
to_nys = time.time() - ti
y_pred = clf.predict(X_test_nys)
accuracy = (y_pred == y_test).mean() * 100.
nys_coef = masker.inverse_transform(nystroem.inverse_transform(clf.coef_))
print("classification accuracy : %g%%, time %.4fs" % (accuracy, to_nys))
# -
# ## Correlation between non-reduced and Nystrom ##
# +
from nilearn.plotting import plot_stat_map
bg_img = data_files['anat'][0]
plot_stat_map(raw_coef, display_mode='yz', bg_img=bg_img, title=r'$non-reduced$', cut_coords=(-34, -16))
plot_stat_map(nys_coef, display_mode='yz', bg_img=bg_img, title=r'$Nystr\"om$', cut_coords=(-34, -16))
from scipy.stats import pearsonr
raw_masked = masker.transform(raw_coef).squeeze()
nys_masked = masker.transform(nys_coef).squeeze()
correlation = pearsonr(raw_masked, nys_masked)[0]
print("correlation %.4f" % correlation)
# -
# **Summary:**
# * Result is consistent: High correlation of the maps
# * Reducing the computaion time: High speedup
| example/fast_decoding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
from skimage.filters import median
from skimage.exposure import equalize_adapthist
path=r'C:\Users\zelmouaffek\Desktop\Prétraitement\train-volume_2.tif'
I=io.imread(path)
# # Débruitage par filtre médian
J=median(I[50]) #POUR appliquer le filtre sur tout le stack on execute cette commande J=median(I)
fig=plt.figure(figsize=(20, 20))
plt.subplot(1,2,1)
plt.imshow(I[50],cmap='gray')
plt.title('Image bruitée')
plt.subplot(1,2,2)
plt.imshow(J,cmap='gray')
plt.title("Image Filtré")
# # Augmentation du contrast par égalisation de l'histogramme
J=equalize_adapthist(I[10]) #POUR appliquer le filtre sur tout le stack on execute cette commande J=equalize_adapthist(J)
fig=plt.figure(figsize=(20, 20))
plt.subplot(1,2,1)
plt.imshow(I[10],cmap='gray')
plt.title('Image peu contrastée')
plt.subplot(1,2,2)
plt.imshow(J,cmap='gray')
plt.title("Image trés contrastée")
| Preprocessing/Denoise+Contrast.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%capture
# !pip install wikidataintegrator
# !pip install bibtexparser
import bs4
import taxon
import gui_widgets
from wikidataintegrator import wdi_core
import bibtexparser
import requests
import pandas as pd
import json
import ipywidgets as widgets
from IPython.display import IFrame, clear_output, HTML, Image
from ipywidgets import interact, interactive, fixed, interact_manual, Layout
import math
# +
def fetch_missing_wikipedia_articles(url):
photos = json.loads(requests.get(url).text)
temp_results = []
for obs in photos["results"]:
if len(obs["taxon"]["name"].split(" "))==2:
if obs["taxon"]["wikipedia_url"] is None:
result = dict()
result["inat_obs_id"] = obs["id"]
result["inat_taxon_id"] = obs["taxon"]["id"]
result["taxon_name"] = obs["taxon"]["name"]
temp_results.append(result)
to_verify = []
for temp in temp_results:
if temp["taxon_name"] not in to_verify:
to_verify.append(temp["taxon_name"])
verified = verify_wikidata(to_verify)
results = []
for temp in temp_results:
if temp["taxon_name"] in verified:
results.append(temp)
return results
def verify_wikidata(taxon_names):
progress = widgets.IntProgress(
value=1,
min=0,
max=len(taxon_names)/50,
description='Wikidata:',
bar_style='', # 'success', 'info', 'warning', 'danger' or ''
style={'bar_color': 'blue'},
orientation='horizontal')
display(progress)
verified = []
i = 1
for chunks in [taxon_names[i:i + 50] for i in range(0, len(taxon_names), 50)]:
query = """
SELECT DISTINCT ?taxon_name (COUNT(?item) AS ?item_count) (COUNT(?article) AS ?article_count) WHERE {{
VALUES ?taxon_name {{{names}}}
{{?item wdt:P225 ?taxon_name .}}
UNION
{{?item wdt:P225 ?taxon_name .
?article schema:about ?item ;
schema:isPartOf <https://en.wikipedia.org/> .}}
UNION
{{?basionym wdt:P566 ?item ;
wdt:P225 ?taxon_name .
?article schema:about ?item ;
schema:isPartOf <https://en.wikipedia.org/> .}}
UNION
{{?basionym wdt:P566 ?item .
?item wdt:P225 ?taxon_name .
?article schema:about ?basionym ;
schema:isPartOf <https://en.wikipedia.org/> .}}
}} GROUP BY ?taxon_name
""".format(names=" ".join('"{0}"'.format(w) for w in chunks))
url = "https://query.wikidata.org/sparql?format=json&query="+query
#print(url)
progress.value = i
i+=1
try:
results = json.loads(requests.get(url).text)
except:
continue
for result in results["results"]["bindings"]:
if result["article_count"]["value"]=='0':
verified.append(result["taxon_name"]["value"])
return verified
def render_results(photos, url):
progress = widgets.IntProgress(
value=1,
min=0,
max=math.ceil(photos["total_results"]/200)+1,
description='iNaturalist:',
bar_style='', # 'success', 'info', 'warning', 'danger' or ''
style={'bar_color': 'green'},
orientation='horizontal')
display(progress)
for page in range(1, math.ceil(photos["total_results"]/200)+1):
nextpageresult = json.loads(requests.get(url+"&page="+str(page)).text)
progress.value = page+1
for obs in nextpageresult["results"]:
photos["results"].append(obs)
table = dict()
for result in photos["results"]:
if result["taxon"]["id"] not in table.keys():
table[result["taxon"]["id"]] = dict()
table[result["taxon"]["id"]]["taxon_name"] = result["taxon"]["name"]
for photo in result["observation_photos"]:
if "photos" not in table[result["taxon"]["id"]].keys():
table[result["taxon"]["id"]]["photos"] = []
table[result["taxon"]["id"]]["photos"].append(photo["photo"]["url"])
to_verify = []
for taxon in table.keys():
to_verify.append(table[taxon]['taxon_name'])
verified = verify_wikidata(to_verify)
result_rows = []
for taxon in table.keys():
if table[taxon]["taxon_name"] in verified:
result_row = []
#result_row.append(interactive(get_data, taxon_id=str(taxon)))
stub_button = widgets.Button(
description='WP stub for '+str(table[taxon]["taxon_name"]),
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
stub_button.taxon_id = str(taxon)
stub_button.on_click(get_data)
result_row.append(stub_button)
result_row.append(widgets.Label(value="id: {taxon_id}".format(taxon_id=str(taxon))))
result_row.append(widgets.Label(value="name: {taxon_name}".format(taxon_name=str(table[taxon]["taxon_name"]))))
photos = []
for photo in table[taxon]["photos"]:
photos.append(photo)
result_row.append(widgets.HTML(gallery(photos)))
result_rows.append(widgets.VBox(result_row))
return widgets.VBox(result_rows)
def fetch_by_user(username, license):
url = "https://api.inaturalist.org/v1/observations?photo_license="+license+"&quality_grade=research&per_page=200&user_id="+username
return display(render_results(json.loads(requests.get(url).text), url))
def fetch_by_taxon(taxon_id, license):
url = "https://api.inaturalist.org/v1/observations?photo_license="+license+"&taxon_id="+str(taxon_id)+"&quality_grade=research&per_page=200&subview=grid"
return display(render_results(json.loads(requests.get(url).text), url))
def fetch_by_project(project_id, license):
url = "https://api.inaturalist.org/v1/observations?photo_license="+license+"&project_id="+str(project_id)+"&quality_grade=research&per_page=200&subview=grid"
return display(render_results(json.loads(requests.get(url).text), url))
def fetch_by_country(country_code, license):
# results = fetch_by_place_code(country_code)
url = "https://api.inaturalist.org/v1/observations?photo_license="+license+"&place_id="+str(country_code)+"&quality_grade=research&per_page=200&subview=grid"
return display(render_results(json.loads(requests.get(url).text), url))
def search_by_taxon(taxon_str, rank, license):
url = "https://api.inaturalist.org/v1/taxa/autocomplete?q="+taxon_str+"&rank="+rank
results = json.loads(requests.get(url).text)
display(fetch_by_taxon(results["results"][0]["id"], license))
def search_species_place(place, license):
url = "https://api.inaturalist.org/v1/places/autocomplete?q="+str(place)
results = json.loads(requests.get(url).text)
display(fetch_by_country(results["results"][0]["id"], license))
def _src_from_data(data):
"""Base64 encodes image bytes for inclusion in an HTML img element"""
img_obj = Image(data=data)
for bundle in img_obj._repr_mimebundle_():
for mimetype, b64value in bundle.items():
if mimetype.startswith('image/'):
return f'data:{mimetype};base64,{b64value}'
def gallery(images, row_height='auto'):
"""Shows a set of images in a gallery that flexes with the width of the notebook.
Parameters
----------
images: list of str or bytes
URLs or bytes of images to display
row_height: str
CSS height value to assign to all images. Set to 'auto' by default to show images
with their native dimensions. Set to a value like '250px' to make all rows
in the gallery equal height.
"""
figures = []
for image in images:
if isinstance(image, bytes):
src = _src_from_data(image)
caption = ''
else:
src = image
figures.append(f'''
<figure style="margin: 5px !important;">
<img src="{src}" style="height: {row_height}">
</figure>
''')
return f'''
<div style="display: flex; flex-flow: row wrap; text-align: center;">
{''.join(figures)}
</div>
'''
# +
tab1 = widgets.Output()
tab2 = widgets.Output()
tab3 = widgets.Output()
tab4 = widgets.Output()
tab5 = widgets.Output()
tab6 = widgets.Output()
tab = widgets.Tab(children=[tab1,tab2, tab3, tab4, tab5, tab6])
# iNaturalistTab = IFrame(src='https://www.inaturalist.org/home', width=1000, height=600)
tab.set_title(0, 'iNaturalist')
tab.set_title(1, 'GBIF')
tab.set_title(2, '(cc0, cc-by, cc-by-sa) iNaturalist images')
tab.set_title(3, 'BHL')
tab.set_title(4, 'Commons')
tab.set_title(5, 'Wikipedia')
with tab1:
clear_output()
def paste_commons(commons_file_name):
with tab6:
print("https://en.wikipedia.org/wiki/"+data.inaturalist_data[0]["name"].replace(" ", "_"))
print("=========================")
print(data.create_wikipedia_stub(infobox_image=commons_file_name))
return commons_file_name
def get_data(b):
global data
data = taxon.external_data(inaturalist_id=b.taxon_id)
html = "<table><tr><td><img src='"+data.inaturalist_data[0]['default_photo']['medium_url']+"'><br>"+data.inaturalist_data[0]['default_photo']['attribution']+"</td>"
html += "<td>"
html += "stub-type: "+data.inaturalist_data[0]["iconic_taxon_name"]
html += "<br>iNaturalist taxon id: "+ str(data.inaturalist_data[0]["id"])
html += "<br>name: "+data.inaturalist_data[0]["name"]
if "preferrd_common_name" in data.inaturalist_data[0].keys():
html += "<br>common name: "+data.inaturalist_data[0]["preferred_common_name"]
html += "<br>rank: "+data.inaturalist_data[0]["rank"]
html += "<br>parent id: "+str(data.inaturalist_parent_data[0]["id"])
html += "<br>parent name: "+data.inaturalist_parent_data[0]["name"]
html += "<br>parent rank: "+data.inaturalist_parent_data[0]["rank"]
html += "</td></tr></table>"
output_widget = widgets.HTML(value=html)
with tab2:
clear_output()
html2 = "<table>"
for key in data.gbif_data.keys():
html2 += "<tr><td>{}</td><td>{}</td></tr>".format(key, data.gbif_data[key])
html2 += "</table>"
gbif_output = widgets.HTML(value=html2)
display(gbif_output)
with tab3:
clear_output()
url = "https://api.inaturalist.org/v1/observations?photo_license=cc0,cc-by,cc-by-sa&quality_grade=research&taxon_id="+b.taxon_id
photos = json.loads(requests.get(url).text)
i = 0
html = "<h1>images in iNaturalist with a license allowing reuse in Wikipedia (cc0, cc-by, cc-by-sa)<table><tr>"
for result in photos["results"]:
for photo in result["observation_photos"]:
i += 1
html += "<td><img src='"+photo['photo']['url'].replace("square", "medium")+"'></td>"
if i % 5 == 0:
html += "</tr><tr>"
html += "</tr></table>"
display(HTML(html))
with tab4:
clear_output()
bhlurl = "https://www.biodiversitylibrary.org/name/"+data.inaturalist_data[0]["name"].replace(" ", "_")
print("source: ", bhlurl)
fields = []
for entry in data.bhl_references:
for key in entry.keys():
if key not in fields:
fields.append(key)
fields
df = pd.DataFrame(columns= fields)
for i in range(len(data.bhl_references)):
row = dict()
for key in fields:
if key not in data.bhl_references[i].keys():
row[key]=None
else:
row[key]=data.bhl_references[i][key]
df.loc[i] = row
display(df)
with tab5:
clear_output()
commons_query = """
SELECT * WHERE {{?commons schema:about <{taxon}> ;
schema:isPartOf <https://commons.wikimedia.org/> .
}}""".format(taxon = data.wikidata["main_rank"].loc[0]["taxon"])
commons_query_result = wdi_core.WDItemEngine.execute_sparql_query(commons_query, as_dataframe=True)
if len(commons_query_result) == 0:
html5 = "<a href = 'https://commons.wikimedia.org/w/index.php?title=Category:"+data.inaturalist_data[0]["name"].replace(" ", "_")+"&action=edit'>create commons category</a><br>"
html5 += "[[Category:"+data.inaturalist_data[0]["name"].replace(" ", "|")+"]]"
else:
html5 = "<a href = 'https://commons.wikimedia.org/wiki/Category:"+data.inaturalist_data[0]["name"].replace(" ", "_")+"' target='_new'>"+data.inaturalist_data[0]["name"].replace(" ", "_")+"</a><br>"
commons_output = widgets.HTML(value=html5)
my_interact_manual = interact_manual.options(manual_name="find missing Wikipedia article")
data.selected_commons=interact_manual(paste_commons, commons_file_name="")
display(commons_output)
return output_widget
tab1tab1 = widgets.Output()
tab1tab2 = widgets.Output()
tab1tab3 = widgets.Output()
tab1tab4 = widgets.Output()
tab1tab = widgets.Tab(children=[tab1tab1,tab1tab2,tab1tab3,tab1tab4])
tab1tab.set_title(0, 'search by taxon')
tab1tab.set_title(1, 'search by user')
tab1tab.set_title(2, 'search by country')
tab1tab.set_title(3, 'search by project')
with tab1tab1:
interact_manual(search_by_taxon, taxon_str='', rank=["genus", "family", "order"], license=["cc0,cc-by,cc-by-sa", "cc0", "cc-by", "cc-by-sa"])
with tab1tab2:
interact_manual(fetch_by_user, username='', license=["cc0,cc-by,cc-by-sa", "cc0", "cc-by", "cc-by-sa"])
with tab1tab3:
interact_manual(search_species_place, place='', license=["cc0,cc-by,cc-by-sa", "cc0", "cc-by", "cc-by-sa"])
with tab1tab4:
interact_manual(fetch_by_project, project_id='', license=["cc0,cc-by,cc-by-sa", "cc0", "cc-by", "cc-by-sa"])
display(tab1tab)
data = None
#taxon_window = gui_widgets.interact_manual(get_data, taxon_id="")
display(tab)
# -
| stub_maker (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JimKing100/DS-Unit-2-Kaggle-Challenge/blob/master/Kaggle_Challenge_Sprint_Study_Guide.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="ASwd-Y2Y6V5k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="fbc0e314-d5b9-484a-ef36-f03b03fe12b7"
import os, sys
in_colab = 'google.colab' in sys.modules
# Pull files from Github repo
os.chdir('/content')
# !git init .
# !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
# !git pull origin master
# Install required python packages
# !pip install -r requirements.txt
# Change into directory for module
os.chdir('module3')
# + [markdown] id="ipz9KCMo13_3" colab_type="text"
# ### Baselines for Classification - Example 1
#
# + id="poF9UIiU0k5B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="24bef2ae-4255-4058-c22a-46f281380368"
import pandas as pd
donors = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/blood-transfusion/transfusion.data')
assert donors.shape == (748,5)
donors = donors.rename(columns={
'Recency (months)': 'months_since_last_donation',
'Frequency (times)': 'number_of_donations',
'Monetary (c.c. blood)': 'total_volume_donated',
'Time (months)': 'months_since_first_donation',
'whether he/she donated blood in March 2007': 'made_donation_in_march_2007'
})
donors.head()
# + id="YvcgqcE02Fxj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="f4402d71-f763-4141-c874-4ce013bd87c0"
donors.isnull().sum()
# + id="yraidZKq2IDy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="5f8462d2-0520-420f-c7d3-bba5aac75372"
# Guess the majority class - functional and you are correct 76% of the time
y_train = donors['made_donation_in_march_2007']
y_train.value_counts(normalize=True)
# + id="iwxTEkrW2TMD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5fe5d898-7276-4aac-ed18-e2d8629c31e1"
# Use the first value which happens to be the majority class
majority_class = y_train.mode()[0]
y_pred = [majority_class] * len(y_train)
print(len(y_pred))
# + id="vLpfJI0H2i7-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2c8071fd-224a-49f6-8762-925f1f144ffe"
# Accuracy of majority class baseline = frequency of the majority class
from sklearn.metrics import accuracy_score
accuracy_score(y_train, y_pred)
# + [markdown] id="X6mxWvtA59_x" colab_type="text"
# ### Split Data - Time Based
# + id="vbWjfVqI6FMe" colab_type="code" colab={}
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv('../data/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
# Do train/test split
# Use data from April & May 2016 to train
# Use data from June 2016 to test
df['created'] = pd.to_datetime(df['created'], infer_datetime_format=True)
cutoff = pd.to_datetime('2016-06-01')
train = df[df.created < cutoff]
test = df[df.created >= cutoff]
# Wrangle train & test sets in the same way
def engineer_features(df):
# Avoid SettingWithCopyWarning
df = df.copy()
# Does the apartment have a description?
df['description'] = df['description'].str.strip().fillna('')
df['has_description'] = df['description'] != ''
# How long is the description?
df['description_length'] = df['description'].str.len()
# How many total perks does each apartment have?
perk_cols = ['elevator', 'cats_allowed', 'hardwood_floors', 'dogs_allowed',
'doorman', 'dishwasher', 'no_fee', 'laundry_in_building',
'fitness_center', 'pre-war', 'laundry_in_unit', 'roof_deck',
'outdoor_space', 'dining_room', 'high_speed_internet', 'balcony',
'swimming_pool', 'new_construction', 'exclusive', 'terrace',
'loft', 'garden_patio', 'common_outdoor_space',
'wheelchair_access']
df['perk_count'] = df[perk_cols].sum(axis=1)
# Are cats or dogs allowed?
df['cats_or_dogs'] = (df['cats_allowed']==1) | (df['dogs_allowed']==1)
# Are cats and dogs allowed?
df['cats_and_dogs'] = (df['cats_allowed']==1) & (df['dogs_allowed']==1)
# Total number of rooms (beds + baths)
df['rooms'] = df['bedrooms'] + df['bathrooms']
# Extract number of days elapsed in year, and drop original date feature
df['days'] = (df['created'] - pd.to_datetime('2016-01-01')).dt.days
df = df.drop(columns='created')
return df
train = engineer_features(train)
test = engineer_features(test)
# + [markdown] id="v1k2TJL_7vAy" colab_type="text"
# ### Validate the Model
# + id="Qaps9dr_JCip" colab_type="code" colab={}
# + [markdown] id="Bjlixdje74QH" colab_type="text"
# ### Train/Validate/Test Split - Create train, val from train
# + id="NRYyrq9q73KK" colab_type="code" colab={}
#from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
#train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
# pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
#test = pd.read_csv('../data/tanzania/test_features.csv')
#sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val
#train, val = train_test_split(train, train_size=0.80, test_size=0.20,
# stratify=train['status_group'], random_state=42)
# + [markdown] id="mUQQbw3N7_AX" colab_type="text"
# ### Cross-Validation - Linear Model SelectKBest
# + id="v4_jRBj78Eq1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="d6bc9edc-146e-41c2-d0e9-10763df8afdd"
import category_encoders as ce
import numpy as np
from sklearn.feature_selection import f_regression, SelectKBest
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
target = 'price'
high_cardinality = ['display_address', 'street_address', 'description']
features = train.columns.drop([target] + high_cardinality)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='mean'),
StandardScaler(),
SelectKBest(f_regression, k=20),
Ridge(alpha=1.0)
)
k = 3
scores = cross_val_score(pipeline, X_train, y_train, cv=k,
scoring='neg_mean_absolute_error')
print(f'MAE for {k} folds:', -scores)
print('Mean of Scores: ', -scores.mean())
print('Standard Deviation of Scores: ', scores.std())
print('Absolute Scores:', abs(scores.std()/scores.mean()))
# + [markdown] id="WP-HFJQn-GOo" colab_type="text"
# ### Cross-Validation - RandomForest
#
# + id="neNQFbgG-MxV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="d9fd802c-3d01-4919-e8f5-3ca55c38b893"
from sklearn.ensemble import RandomForestRegressor
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.TargetEncoder(min_samples_leaf=1, smoothing=1),
SimpleImputer(strategy='median'),
RandomForestRegressor(n_estimators=100, n_jobs=-1, random_state=42)
)
k = 3
scores = cross_val_score(pipeline, X_train, y_train, cv=k,
scoring='neg_mean_absolute_error')
print(f'MAE for {k} folds:', -scores)
print('Mean of Scores: ', -scores.mean())
print('Standard Deviation of Scores: ', scores.std())
print('Absolute Scores:', abs(scores.std()/scores.mean()))
# + [markdown] id="3qHtb13l8HdP" colab_type="text"
# ### Use Pipeline to Encode Categoricals and Fit A RandomForest
# + id="hj_LufYk8W9U" colab_type="code" colab={}
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
pipeline = make_pipeline(
ce.TargetEncoder(),
SimpleImputer(),
RandomForestRegressor(random_state=42)
)
# + [markdown] id="Fdb_XYi78ZDS" colab_type="text"
# ### Get Model's Validation Accuracy and Test Accuracy
# + id="ViX0zpzk8ifR" colab_type="code" colab={}
pipeline.fit(X_train, y_train)
print ('Validation Accuracy', pipeline.score(X_val, y_val))
# + [markdown] id="tXnlxWPD8jIk" colab_type="text"
# ### Confusion Matrix for Binary Classification
#
# + id="t2KwZXWA8uaK" colab_type="code" colab={}
| Kaggle_Challenge_Sprint_Study_Guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import random
import copy
import logging
import sys
# from run_tests_201204 import *
import os
import sys
import importlib
from collections import defaultdict
sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc')
from tools_pattern import get_eucledean_dist
import compress_pickle
import my_plot
from my_plot import MyPlotData, my_box_plot
import seaborn as sns
script_n = 'plot_210626_gt_grc_pcts'
data_script = 'batch_210623_dim_gt_grc_pct'
db_path = '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/dimensionality_sim2/' \
f'{data_script}/'
scaled_noise = 0
core_noise = 0
n_mfs = 497
n_grcs = 1847
pattern_type = 'binary'
db = {}
for model in ['observed', 'local_random', 'global_random']:
for scale in [1.0, .5]:
model_name = f'{model}_{scale}'
db[model_name] = compress_pickle.load(
db_path + \
f'{data_script}_{model}_{pattern_type}_{n_grcs}_{n_mfs}_'
f'scale_{scale}_'
f'0.3_512_40.gz')
# +
name_map = {
'observed_1.0': "Observed",
'observed_0.5': "50% subsampled",
}
palette = {
# name_map['scaleup4']: sns.color_palette()[0],
# name_map['global_random']: sns.color_palette()[1],
# name_map['random']: sns.color_palette()[1],
# name_map['local_random']: sns.color_palette()[2],
}
mpd = MyPlotData()
# ress_ref = db['local_random'][0][0]
# resss_ref2 = db['local_random'][0]
for model_name in [
'observed_1.0',
'observed_0.5',
]:
resss = db[model_name]
ress_tries = resss[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
# if n_try >= len(resss_ref2):
# print(n_try)
# continue
# ress_ref2 = resss_ref2[n_try]
# noise = .3
# for noise in ress:
# print(noise)
for noise in ress:
res = ress[noise]
grc_dim2 = 1/res['grc_pop_corr']
mpd.add_data_point(
model=name_map[model_name],
noise=noise*100,
grc_dim=res['grc_dim'],
grc_dim_norm=res['grc_dim']/79,
grc_dim2=grc_dim2,
grc_dim2_norm=grc_dim2/72,
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
def custom_legend_fn(plt):
# plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.)
plt.legend(loc='lower right', frameon=False, fontsize=13)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim2_norm',
hue='model',
context='paper',
# context='talk',
# palette=palette,
linewidth=2,
# log_scale_y=True,
width=4,
height=3,
# ylim=[0, None],
y_axis_label='Norm. Dim. ($x$)',
x_axis_label='GrC (%)',
# title='noise',
# legend=None,
save_filename=f'{script_n}_fig.svg',
show=True,
custom_legend_fn=custom_legend_fn,
)
# +
name_map = {
'observed_1.0': "Observed",
'local_random_1.0': "Random",
}
palette = {
# name_map['scaleup4']: sns.color_palette()[0],
# name_map['global_random']: sns.color_palette()[1],
# name_map['random']: sns.color_palette()[1],
# name_map['local_random']: sns.color_palette()[2],
}
mpd = MyPlotData()
# ress_ref = db['local_random'][0][0]
# resss_ref2 = db['local_random'][0]
for model_name in [
'local_random_1.0',
'observed_1.0',
# 'observed_0.5',
]:
resss = db[model_name]
ress_tries = resss[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
# if n_try >= len(resss_ref2):
# print(n_try)
# continue
# ress_ref2 = resss_ref2[n_try]
# noise = .3
# for noise in ress:
# print(noise)
for noise in ress:
if noise in [.25, .35, .40, .5, .70]:
continue
res = ress[noise]
grc_dim2 = 1/res['grc_pop_corr']
mpd.add_data_point(
model=name_map[model_name],
noise=noise*100,
grc_dim=res['grc_dim'],
grc_dim_norm=res['grc_dim']/81,
grc_dim2=grc_dim2,
grc_dim2_norm=grc_dim2/72,
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
def custom_legend_fn(plt):
# plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.)
plt.legend(loc='lower right', frameon=False, fontsize=13)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim_norm',
hue='model',
context='paper',
# context='talk',
# palette=palette,
linewidth=2,
# log_scale_y=True,
width=4,
height=3,
ylim=[.5, None],
y_axis_label='Norm. Dim. ($x$)',
x_axis_label='GrC (%)',
# title='noise',
# legend=None,
# save_filename=f'{script_n}_fig_vs_random.svg',
show=True,
custom_legend_fn=custom_legend_fn,
)
# +
name_map = {
'observed_1.0': "Reconstructed",
'local_random_1.0': "Random",
}
palette = {
'Reconstructed': 'black'
}
mpd = MyPlotData()
# ress_ref = db['local_random'][0][0]
# resss_ref2 = db['local_random'][0]
for model_name in [
# 'local_random_1.0',
'observed_1.0',
# 'observed_0.5',
]:
resss = db[model_name]
ress_tries = resss[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
# if n_try >= len(resss_ref2):
# print(n_try)
# continue
# ress_ref2 = resss_ref2[n_try]
# noise = .3
# for noise in ress:
# print(noise)
for noise in ress:
if noise in [.25, .35, .40, .5, .70]:
continue
res = ress[noise]
grc_dim2 = 1/res['grc_pop_corr']
mpd.add_data_point(
model=name_map[model_name],
noise=noise*100,
inv_noise=(1-noise)*100,
grc_dim=res['grc_dim'],
# grc_dim_norm=res['grc_dim']/81,
grc_dim_norm=res['grc_dim']/81,
grc_dim_norm2=res['grc_dim']/79.75,
grc_dim2=grc_dim2,
grc_dim2_norm=grc_dim2/72,
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
def custom_legend_fn(plt):
# plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.)
plt.legend(loc='lower right', frameon=False, fontsize=13, bbox_to_anchor=(.9, 0))
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='inv_noise',
y='grc_dim_norm2',
hue='model',
context='paper',
palette=palette,
# context='talk',
# palette=palette,
linewidth=2,
# log_scale_y=True,
width=4,
height=3,
ylim=[.55, None],
xlim=[None, 100],
y_axis_label='Normalized\nDimensionality ($x$)',
x_axis_label='Removed GrCs (%)',
# title='noise',
# legend=None,
save_filename=f'{script_n}_fig_decrease.svg',
show=True,
custom_legend_fn=custom_legend_fn,
)
# -
mpd.data
# +
name_map = {
'observed_1.0': "Observed",
'observed_0.5': "50% subsampled",
}
palette = {
# name_map['scaleup4']: sns.color_palette()[0],
# name_map['global_random']: sns.color_palette()[1],
# name_map['random']: sns.color_palette()[1],
# name_map['local_random']: sns.color_palette()[2],
}
mpd = MyPlotData()
# ress_ref = db['local_random'][0][0]
# resss_ref2 = db['local_random'][0]
for model_name in [
'observed_1.0',
'observed_0.5',
]:
resss = db[model_name]
ress_tries = resss[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
# if n_try >= len(resss_ref2):
# print(n_try)
# continue
# ress_ref2 = resss_ref2[n_try]
# noise = .3
# for noise in ress:
# print(noise)
for noise in ress:
res = ress[noise]
grc_dim2 = 1/res['grc_pop_corr']
mpd.add_data_point(
model=name_map[model_name],
noise=noise*100,
grc_dim=res['grc_dim'],
grc_dim_norm=res['grc_dim']/79,
grc_dim2=grc_dim2,
grc_dim2_norm=grc_dim2/72,
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
def custom_legend_fn(plt):
# plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.)
plt.legend(loc='lower right', frameon=False, fontsize=13)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim2_norm',
hue='model',
# context='paper',
context='talk',
# palette=palette,
linewidth=2,
# log_scale_y=True,
width=6,
height=4,
ylim=[.5, None],
y_axis_label='Norm. Dim. ($x$)',
x_axis_label='GrC (%)',
# title='noise',
# legend=None,
save_filename=f'{script_n}_fig_plus.svg',
show=True,
custom_legend_fn=custom_legend_fn,
)
# +
name_map = {
'observed_1.0': "Observed",
'observed_0.5': "50% subsampled",
}
palette = {
# name_map['scaleup4']: sns.color_palette()[0],
# name_map['global_random']: sns.color_palette()[1],
# name_map['random']: sns.color_palette()[1],
# name_map['local_random']: sns.color_palette()[2],
}
mpd = MyPlotData()
# ress_ref = db['local_random'][0][0]
# resss_ref2 = db['local_random'][0]
for model_name in [
'observed_1.0',
# 'observed_0.5',
]:
resss = db[model_name]
ress_tries = resss[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
# if n_try >= len(resss_ref2):
# print(n_try)
# continue
# ress_ref2 = resss_ref2[n_try]
# noise = .3
# for noise in ress:
# print(noise)
for noise in ress:
res = ress[noise]
grc_dim2 = 1/res['grc_pop_corr']
mpd.add_data_point(
model=name_map[model_name],
noise=noise*100,
grc_dim=res['grc_dim'],
grc_dim_norm=res['grc_dim']/79,
grc_dim2=grc_dim2,
grc_dim2_norm=grc_dim2/72,
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
def custom_legend_fn(plt):
# plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.)
plt.legend(loc='lower right', frameon=False, fontsize=13)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim2_norm',
hue='model',
context='paper',
# palette=palette,
linewidth=2,
# log_scale_y=True,
width=4,
height=3,
# ylim=[0, None],
y_axis_label='Norm. Dim. ($x$)',
x_axis_label='GrC (%)',
# title='noise',
# legend=None,
save_filename=f'{script_n}_observed.svg',
show=True,
custom_legend_fn=custom_legend_fn,
)
# +
name_map = {
'scaleup4': "Observed",
'global_random': "Global Random",
'random': "Global Random",
'local_random': "Local Random",
}
palette = {
name_map['scaleup4']: sns.color_palette()[0],
name_map['global_random']: sns.color_palette()[1],
name_map['random']: sns.color_palette()[1],
name_map['local_random']: sns.color_palette()[2],
}
mpd = MyPlotData()
# ress_ref = db['local_random'][0][0]
# resss_ref2 = db['local_random'][0]
for model_name in [
# 'observed_1.0',
# 'observed_0.5',
'local_random_1.0',
'local_random_0.5',
]:
resss = db[model_name]
ress_tries = resss[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
# if n_try >= len(resss_ref2):
# print(n_try)
# continue
# ress_ref2 = resss_ref2[n_try]
# noise = .3
# for noise in ress:
# print(noise)
for noise in ress:
res = ress[noise]
grc_dim2 = 1/res['grc_pop_corr']
mpd.add_data_point(
model=model_name,
noise=noise*100,
grc_dim=res['grc_dim'],
grc_dim_norm=res['grc_dim']/79,
grc_dim2=grc_dim2,
grc_dim2_norm=grc_dim2/72,
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim2_norm',
hue='model',
context='paper',
# palette=palette,
linewidth=1,
# log_scale_y=True,
width=5.5,
# ylim=[0, None],
y_axis_label='Norm. Dim. ($x$)',
x_axis_label='GrC (%)',
# title='noise',
# legend=None,
save_filename=f'{script_n}_local.svg',
show=True,
)
# +
name_map = {
'scaleup4': "Observed",
'global_random': "Global Random",
'random': "Global Random",
'local_random': "Local Random",
}
palette = {
name_map['scaleup4']: sns.color_palette()[0],
name_map['global_random']: sns.color_palette()[1],
name_map['random']: sns.color_palette()[1],
name_map['local_random']: sns.color_palette()[2],
}
mpd = MyPlotData()
# ress_ref = db['local_random'][0][0]
# resss_ref2 = db['local_random'][0]
for model_name in [
# 'observed_1.0',
# 'observed_0.5',
# 'local_random_1.0',
# 'local_random_0.5',
'global_random_1.0',
'global_random_0.5',
]:
resss = db[model_name]
ress_tries = resss[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
# if n_try >= len(resss_ref2):
# print(n_try)
# continue
# ress_ref2 = resss_ref2[n_try]
# noise = .3
# for noise in ress:
# print(noise)
for noise in ress:
res = ress[noise]
grc_dim2 = 1/res['grc_pop_corr']
mpd.add_data_point(
model=model_name,
noise=noise*100,
grc_dim=res['grc_dim'],
grc_dim_norm=res['grc_dim']/79,
grc_dim2=grc_dim2,
grc_dim2_norm=grc_dim2/72,
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim2_norm',
hue='model',
context='paper',
# palette=palette,
linewidth=1,
# log_scale_y=True,
width=5.5,
# ylim=[0, None],
y_axis_label='Norm. Dim. ($x$)',
x_axis_label='GrC (%)',
# title='noise',
# legend=None,
save_filename=f'{script_n}_global.svg',
show=True,
)
| analysis/dimensionalty_sim/plot_210626_gt_grc_pcts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercise 25: Write to a File
# 1. Use the write function from the file descriptor object
# +
data_dict = {"India": "Delhi", "France": "Paris", "UK": "London",
"USA": "Washington"}
with open("data_temporary_files.txt", "w") as fd:
for country, capital in data_dict.items():
fd.write("The capital of {} is {}\n".format(
country, capital))
# -
# 2. Read the file using
with open("data_temporary_files.txt", "r") as fd:
for line in fd:
print(line)
# 3. Use the print function to write to a file
data_dict_2 = {"China": "Beijing", "Japan": "Tokyo"}
with open("data_temporary_files.txt", "a") as fd:
for country, capital in data_dict_2.items():
print("The capital of {} is {}".format(
country, capital), file=fd)
# 4. Read the file
with open("data_temporary_files.txt", "r") as fd:
for line in fd:
print(line)
| Chapter02/.ipynb_checkpoints/Exercise 25-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (msw-bmbf_da)
# language: python
# name: msw-bmbf_da
# ---
# +
import datetime
import os # OS operations (read/write files/folders)
import pandas as pd # operate with dataframes
from tqdm.notebook import tqdm # mother of progressbars for Python
# +
PATH = "../data/raw/EventData"
# progress bar customized format
B_FORMAT = """📄 {n_fmt} of {total_fmt} {desc} processed: {bar}
{percentage:3.0f}% ⏱️{elapsed} ⏳{remaining} ⚙️{rate_fmt}{postfix}"""
# -
part_raw = pd.read_csv("../participants_raw.csv")
part_raw = part_raw.set_index("id")
part_raw = part_raw.rename(columns={"created": "date"})
part_raw
# +
# get all event filenames
evs = os.listdir(PATH)
print(len(evs))
# filter not needed since it removes half of the files for some reason
# for f in evs:
# # filter hidden/config files and folders
# if f.startswith(".") or not f.endswith(".raw"):
# evs.remove(f) # remove hidden/config file
# len(evs)
# +
uids = part_raw.index.tolist()
# raycast progress bar
files_pbar = tqdm(
evs,
total=len(evs),
desc="🧾 participants",
dynamic_ncols=True,
bar_format=B_FORMAT,
)
cnt = 0
part_raw["condition"] = ""
for f in files_pbar:
uid = f.split("-")[2].split(".")[0]
if uid in uids:
# parse uid and condition from filename
cur = f.split(".raw")[0] # remove file extension
cur = cur.split("-") # split filename by "-"
condition = cur[-2]
date = os.path.getmtime(f"{PATH}/{f}") # creation timestamp
date = datetime.datetime.fromtimestamp(date) # translate to dt
date = date.strftime("%Y-%m-%d %H:%M") # arrange it
# store it
part_raw.loc[uid, ["date", "condition"]] = [date, condition]
part_raw
# +
# set date column as standard datetime format
part_raw.date = pd.to_datetime(part_raw.date)
# ensure participants ordering by date
part_raw = part_raw.sort_values(by="date")
part_raw
# -
part_raw.to_csv("../participants_raw.csv")
| checks/fix_dates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="0Een5e77huOx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2c6764cc-324c-4404-f39c-6084efdec592"
import keras, os
import numpy as np
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
from tensorflow.python.util import deprecation
from tensorflow.keras import backend
from tensorflow.keras.applications.resnet50 import preprocess_input
# deprecation._PRINT_DEPRECATION_WARNINGS = False
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from tensorflow.keras.models import load_model
print("import done")
# + id="s-d1sSuKh6qy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 884} outputId="24f7bf40-4caa-4cf4-8e20-8542dfc71867"
def vgg_16(weights = 'imagenet', include_top = True):
model = Sequential()
model.add(Conv2D(input_shape = (224, 224, 3), filters = 64, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(Conv2D(filters = 64, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(MaxPool2D(pool_size = (2,2), strides = (2,2)))
model.add(Conv2D(filters = 128, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(Conv2D(filters = 128, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(MaxPool2D(pool_size = (2,2), strides = (2,2)))
model.add(Conv2D(filters = 256, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(Conv2D(filters = 256, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(Conv2D(filters = 256, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(MaxPool2D(pool_size = (2,2), strides = (2,2)))
model.add(Conv2D(filters = 512, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(Conv2D(filters = 512, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(Conv2D(filters = 512, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(MaxPool2D(pool_size = (2,2), strides = (2,2)))
model.add(Conv2D(filters = 512, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(Conv2D(filters = 512, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(Conv2D(filters = 512, kernel_size = (3,3), padding = "same", activation = "relu"))
model.add(MaxPool2D(pool_size = (2,2), strides = (2,2)))
model.add(Flatten())
model.add(Dense(units = 4096, activation = "relu"))
model.add(Dense(units = 4096, activation = "relu"))
model.add(Dense(units = 2, activation = "softmax"))
opt = Adam(lr = 0.001)
model.compile(optimizer = opt, loss= keras.losses.categorical_crossentropy, metrics = ['accuracy'])
# model.summary()
return model
print(vgg_16().summary())
# + id="cg-kJs10iMk7" colab_type="code" colab={}
vgg_16_model = vgg_16()
# + id="GMTFJPBXiRJS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="a4d42b87-de30-4a45-87c2-111ee7055e40"
# !wget --no-check-certificate \
# https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
# -O cats_and_dogs_filtered.zip
# + id="Fcniy44foYfW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="3c2da416-3461-46bb-f414-4f7a8c03e70b"
# ! unzip cats_and_dogs_filtered.zip
# + id="3-fHa5A8obx2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b14905ea-a2c1-4a76-a49a-fb552ebccc5f"
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory="cats_and_dogs_filtered/train",target_size=(224,224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory="cats_and_dogs_filtered/validation", target_size=(224,224))
# + id="xio6gRCtoqSm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8493a5cb-3fa6-4250-fda9-001cf1eb6943"
vgg_16_model = vgg_16()
checkpoint = tf.keras.callbacks.ModelCheckpoint("vgg16_1.hdf5",
monitor="val_loss",
verbose = 1,
save_best_only = False,
save_weights_only = False,
mode= "auto",
save_freq= "epoch",
options=None)
early = EarlyStopping(monitor='val_accuracy',
min_delta=0,
patience=40,
verbose=1,
mode='auto')
vgg_16_model.fit(traindata,
steps_per_epoch = 2,
epochs = 100,
validation_data = testdata,
validation_steps = 1,
callbacks = [checkpoint,early])
vgg_16_model.save_weights("/content/drive/My Drive/img_dataset/vgg16_1.hdf5")
# + id="slPa5riQ5J6B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="701c149c-a4ef-47a8-d1c4-4affb24485db"
from google.colab import drive
drive.mount('/content/drive')
# + id="9uwenYsq8LML" colab_type="code" colab={}
# img = image.load_img('/content/drive/My Drive/img_dataset/rot.jpg',target_size=(224,224))
# img = np.asarray(img)
# plt.imshow(img)
# img = np.expand_dims(img, axis=0)
| vgg_16_colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: learn-env
# language: python
# name: learn-env
# ---
# # PBW, HEALTH YEAH
# ### Hackathon for Maternity Hack4Equity
#
# * So I'm thinking this would be a great place to jot down our ideas. If you go to the Projects tab, there's a pretty cool to do list we can look through.
# ### Our Main Category: Improving Individual Healthcare
#
# * A healthy pregnancy begins before conception and extends for a full year after the baby is born. How might we innovate to empower Black women in the state of Georgia to gain the knowledge, access and agency to thrive before conception, during pregnancy, birth and post-partum?
| README.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Data-Mining-the-"Big-Two"-SAP-FI-Tables" data-toc-modified-id="Data-Mining-the-"Big-Two"-SAP-FI-Tables-1"><span class="toc-item-num">1 </span>Data Mining the "Big Two" SAP FI Tables</a></span><ul class="toc-item"><li><span><a href="#Executive-Summary" data-toc-modified-id="Executive-Summary-1.1"><span class="toc-item-num">1.1 </span>Executive Summary</a></span></li><li><span><a href="#Scope" data-toc-modified-id="Scope-1.2"><span class="toc-item-num">1.2 </span>Scope</a></span><ul class="toc-item"><li><span><a href="#Data" data-toc-modified-id="Data-1.2.1"><span class="toc-item-num">1.2.1 </span>Data</a></span><ul class="toc-item"><li><span><a href="#Loading-the-data" data-toc-modified-id="Loading-the-data-1.2.1.1"><span class="toc-item-num">1.2.1.1 </span>Loading the data</a></span></li><li><span><a href="#Transforming-the-data" data-toc-modified-id="Transforming-the-data-1.2.1.2"><span class="toc-item-num">1.2.1.2 </span>Transforming the data</a></span></li></ul></li><li><span><a href="#Methodology" data-toc-modified-id="Methodology-1.2.2"><span class="toc-item-num">1.2.2 </span>Methodology</a></span></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-1.2.3"><span class="toc-item-num">1.2.3 </span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Vendor-Analysis" data-toc-modified-id="Vendor-Analysis-1.2.3.1"><span class="toc-item-num">1.2.3.1 </span>Vendor Analysis</a></span></li><li><span><a href="#User-Analysis" data-toc-modified-id="User-Analysis-1.2.3.2"><span class="toc-item-num">1.2.3.2 </span>User Analysis</a></span></li></ul></li></ul></li><li><span><a href="#Conclusion" data-toc-modified-id="Conclusion-1.3"><span class="toc-item-num">1.3 </span>Conclusion</a></span></li><li><span><a href="#Disclaimer" data-toc-modified-id="Disclaimer-1.4"><span class="toc-item-num">1.4 </span>Disclaimer</a></span></li><li><span><a href="#Appendix" data-toc-modified-id="Appendix-1.5"><span class="toc-item-num">1.5 </span>Appendix</a></span></li></ul></li></ul></div>
# -
# # Data Mining the "Big Two" SAP FI Tables
# ## Executive Summary
# Using a forensic accounting technique on SAP data, there is evidence that almost 50 vendor payments worth nearly **$4 million** are irregular and should be investigated further.
# ***
#
# ## Scope
# I am tasked with the preliminary data analysis on two SAP Financial Accounting transaction tables. Specifically I will apply a fraud detection technique to screen the dataset and determine if any further investigation will be required.
# ### Data
# The data consists of 2 CSV files with sample SAP outputs for 2017 from the BSEG and BKPF tables. Data was sourced from <NAME>. The BSEG table records the transaction line items and the BKPF table records the corresponding headers. Here are the high level ETL steps required to prepare our data:
#
# 1. Importing the necessary libraries
# 2. Loading into the dataframe
# 3. Wrangling and cleansing the data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# #### Loading the data
bseg = pd.read_csv('BSEG.csv', low_memory = False)
bkpf = pd.read_csv('BKPF.csv', low_memory = False)
bseg.head()
# As there is an unnamed column is read into the data frame, I will reimport the CSV using the 1st column as the index.
# +
bseg = pd.read_csv('BSEG.csv', low_memory = False, index_col = 0)
bseg.head()
# -
bseg.info()
bseg.columns.values
# 336 columns. Which are actually useful? With some additional research and sleuthing, I was able to narrow down the columns in this table to just 5 in particular. However, before dropping the remainder, I wanted to skim the summary statistics.
bseg.describe()
bseg.dtypes
# MANDT, BELNR, GJAHR, BUZEI, and BSCHL are accounted for. I will need to validate that BUKRS, WRBTR, and LIFNR also exists.
bseg['BUKRS'].describe()
bseg['WRBTR'].describe()
# Notice that the amount field is currently a string but contains commas and periods. These will have to be removed in order to extract the significant digits.
bseg['WRBTR'] = bseg['WRBTR'].replace({',': ''}, regex = True)
bseg['WRBTR'] = pd.to_numeric(bseg['WRBTR'], errors = 'coerce').astype(int)
bseg['WRBTR'] = bseg['WRBTR'].apply(str)
bseg['WRBTR'].sample(10, random_state = 101)
bseg['LIFNR'].describe()
bseg = bseg[['MANDT', 'BUKRS', 'BELNR', 'GJAHR', 'BUZEI', 'BSCHL', 'WRBTR', 'LIFNR']]
bseg.sample(10, random_state = 101)
bseg_dupe = bseg[bseg.duplicated()]
print("Number of duplicate rows: ", bseg_dupe.shape)
# There are no duplicate rows, otherwise the .drop_duplicates() function can be used.
print(bseg.isnull().sum())
# LIFNR has a significant amount of null values. I will keep the field but clearly further work should be done to understand the reasoning behind this. Should null vendors even be allowed into transactions?
bsegNull = bseg[bseg['LIFNR'].isnull()]
bsegNull.head()
bsegNull['WRBTR'] = pd.to_numeric(bsegNull['WRBTR'], errors = 'coerce')
bsegNull['WRBTR'].value_counts()
bseg['LIFNR'].value_counts()
# I replace the null values with an arbitrary 999999 value to include this data into the later calculations.
bseg['LIFNR'].fillna('999999', inplace = True)
print(bseg.isnull().sum())
# #### Transforming the data
# BSCHL is the posting key which is our field of interest as it indicates whether the entry is debit or credit and to what type of account it is executed on (e.g. GL, customer, vendor, assets, material). More importantly, I will extract digits from our WRBTR field which represents the amount and will enable the analysis.
# +
bseg_stg = bseg[(bseg['WRBTR'].str.len() > 3) & (bseg['WRBTR'].str.contains('-') == False)]
#bseg_stg = bseg[(bseg['WRBTR'] > 1000)]
bseg_stg['Digit 1'] = bseg_stg['WRBTR'].astype(str).str[0:1].astype(int)
bseg_stg['Digit 2'] = bseg_stg['WRBTR'].astype(str).str[1:2].astype(int)
bseg_stg['Digit 3'] = bseg_stg['WRBTR'].astype(str).str[2:3].astype(int)
bseg_stg.sample(10, random_state = 101)
# -
# For data processing, I have filtered for only amounts that are at least $1,000 while retaining the amount field as a string to extract the digits.
bkpf.head()
# +
bkpf = pd.read_csv('BKPF.csv', low_memory = False, index_col = 0)
bkpf.head()
# -
bkpf.columns.values
# 113 columns. Which are actually useful? With some additional research and sleuthing, I was able to narrow down the columns in this table to just 5 in particular. However, before dropping the remainder, I wanted to skim the summary statistics.
bkpf.describe()
bkpf.dtypes
# MANDT, BUKRS, BELNR, and GJAHR are accounted for. I will need to validate that BLART and USNAM also exists.
bkpf['USNAM'].describe()
bkpf = bkpf[['MANDT', 'BUKRS', 'BELNR', 'GJAHR', 'BLART', 'USNAM']]
bkpf.sample(10, random_state = 101)
# BLART is the document type which is our key field of interest.
bkpf_dupe = bkpf[bkpf.duplicated()]
print("Number of duplicate rows: ", bkpf_dupe.shape)
# Again, there are no duplicate rows.
print(bkpf.isnull().sum())
# There are no null values.
# +
bseg25 = bseg_stg[bseg_stg['BSCHL'] == 25]
bkpfKZ = bkpf[bkpf['BLART'] == 'KZ']
bseg25.shape, bkpfKZ.shape
# -
# The measure of interest will be vendor transactions that are debit entries (i.e. outgoing vendor payments), so I left join using the Accounting Document Number in order to retain all 342 transaction line items.
merged = pd.merge(bseg25, bkpfKZ, on = 'BELNR', how = 'left')
merged
# ### Methodology
# Now that the data has been wrangled and cleansed, I will apply Benford's law to examine the transaction data. Simply stated, Benford's law is used to analyze the validity and authenticity of financial records. In naturally occurring distributions, the leading significant sigit will follow a right-skewed distribution. In other words, the digits have non-equal chances of being the leading digit. As a limitation, Benford's law should not be used for datasets with constraints such as minimums, maximums, and numbering conventions or structures. This includes phone numbers, hourly wage rates, and heights for instance.
import math
benford = [math.log((1+1/d)) / math.log(10) for d in range(1, 10)]
d = {
'n': range(1, 10),
'b': benford
}
benford_predict = pd.DataFrame(d)
benford
# The output dictates the proportion of leading digits that is expected from a Benford distrbution. That is, the digit 1 should appear 30% of the time as the first digit in the dataset as opposed to 5% for the digit 9.
# ### Analysis
# In order to apply the Benford analysis, I will convert the vendor values into a relative count and display the vendor count as a percentage.
# +
vendor = merged.groupby(['Digit 1'])['Digit 1'].count().reset_index(name='Vendor Count')
vendor_Total = vendor.groupby(['Digit 1'])['Vendor Count'].sum().rename('Percentage')
vendor_Total
vendor_Percentage = vendor_Total / vendor_Total.sum()
vendor_Percentage
pd.DataFrame(vendor_Percentage)
# -
# Our exploratory data analysis will be to plot the distributions against each other.
# +
sns.set_style('whitegrid')
sns.set_palette('Paired')
fig, (ax1) = plt.subplots(nrows = 1, figsize = (12, 6))
sns.barplot(x = 'n', y = 'b', data = benford_predict, color = 'black', facecolor=(1, 1, 1, 0), edgecolor = '0.2', ax = ax1)
sns.pointplot(x = 'Digit 1', y = 'Percentage', data = vendor_Percentage.reset_index(), ax = ax1, )
# -
# After plotting this against the Benford distribution, it appears visually that the digit 1 is a severe deviation and 9 may also be a deviation.
#
# Let's take a look at the vendors in particular.
# #### Vendor Analysis
vendor1 = merged.groupby(['Digit 1', 'LIFNR', 'WRBTR'])['LIFNR'].count().reset_index(name='Vendor Count')
vendor1 = vendor1[vendor1['Digit 1'] == 1]
vendor1
# In order to apply the Benford analysis, I will convert the WRBTR amount into a numeric value to sum and display the vendors with the highest amounts. Recall that this sample space only includes transactions where the leading digit is 1.
vendor1.dtypes
vendor1['WRBTR'] = pd.to_numeric(vendor1['WRBTR'], errors = 'coerce')
vendor_Total = vendor1.groupby(['Digit 1', 'LIFNR'])['WRBTR'].sum().rename('Amount')
vendor_Total
vendor1_Percentage = vendor_Total / vendor_Total.groupby(level = 0).sum()
pd.DataFrame(vendor1_Percentage)
# Vendor 125135 is responsible for over 60% of the transaction amounts (leading with the digit 1) worth $31 million, so any further investigations may be best suited to begin there.
# +
vendorLIFNR = merged.groupby(['Digit 1', 'LIFNR'])['Digit 1'].count().reset_index(name='Vendor Count')
vendorLIFNR_Total = vendorLIFNR.groupby(['Digit 1', 'LIFNR'])['Vendor Count'].sum().rename('Percentage')
vendorLIFNR_Total
vendorLIFNR_Percentage = vendorLIFNR_Total / vendorLIFNR_Total.groupby(level = 0).sum()
pd.DataFrame(vendorLIFNR_Percentage)
# +
fig, (ax1) = plt.subplots(nrows = 1, figsize = (12, 6))
sns.barplot(x = 'Digit 1', y = 'Percentage', data = vendorLIFNR_Percentage.reset_index(), ax = ax1, hue = 'LIFNR')
# -
# After examining the distribution by vendor account, 125134 stands out as it comprises the majority (at over 60%) of the transactions with leading digits of 8 and 9.
#
# Next, I will run through the user data.
# #### User Analysis
user1 = merged.groupby(['Digit 1', 'USNAM', 'WRBTR'])['USNAM'].count().reset_index(name='User Count')
user1 = user1[user1['Digit 1'] == 1]
user1.head()
user1.dtypes
user1['WRBTR'] = pd.to_numeric(user1['WRBTR'], errors = 'coerce')
user1_Total = user1.groupby(['Digit 1', 'USNAM'])['WRBTR'].sum().rename('Amount')
user1_Total
user_Percentage = user1_Total / user1_Total.sum()
user_Percentage
pd.DataFrame(user_Percentage)
# GBI-005 and GBI-026 are responsible for over 90% of the transaction amounts (leading with the digit 1) worth over $40 million, so any further investigations may be best suited to begin there.
# +
userUSNAM = merged.groupby(['Digit 1', 'USNAM'])['Digit 1'].count().reset_index(name='User Count')
userUSNAM_Total = userUSNAM.groupby(['Digit 1', 'USNAM'])['User Count'].sum().rename('Percentage')
userUSNAM_Total
userUSNAM_Percentage = userUSNAM_Total / userUSNAM_Total.groupby(level = 0).sum()
pd.DataFrame(userUSNAM_Percentage)
# +
fig, (ax1, ax2) = plt.subplots(nrows = 1, ncols = 2, figsize = (12, 6))
sns.barplot(x = 'Digit 1', y = 'Percentage', data = userUSNAM_Percentage.reset_index(), ax = ax1, hue = 'USNAM')
#sns.barplot(x = 'Digit 1', y = 'Percentage', data = vendorLIFNR_Percentage.reset_index(), ax = ax2, hue = 'LIFNR')
#sns.pointplot(x = 'Digit 1', y = 'Percentage', data = userUSNAM_Percentage.reset_index(), ax = ax1, hue = 'USNAM')
sns.pointplot(x = 'Digit 1', y = 'Percentage', data = vendorLIFNR_Percentage.reset_index(), ax = ax2, hue = 'LIFNR', join = False)
# -
# After examining the distribution by user name, GBIFAC-01 stands out as it comprises the majority (at over 70%) of the transactions with leading digits of 8 and 9. GBI-026 visually stands out as leading the transactions with leading digits of 3 through 7, but recall that our preliminary Benford distribution did not highlight any significant deviations.
#
# Let's examine the GBIFAC-01 transactions using the 125134 account.
len(merged[(merged['LIFNR'] == 125134) & (merged['USNAM'] == 'GBIFAC-01')])
merged[(merged['LIFNR'] == 125134) & (merged['USNAM'] == 'GBIFAC-01')]['WRBTR'].astype('float').sum()
# There are 46 transactions worth nearly $4 million to examine.
merged[(merged['LIFNR'] == 125134) & (merged['USNAM'] == 'GBIFAC-01')]
# ***
#
# ## Conclusion
# The conclusion is that there is evidence of an anomaly within the 2017 SAP BSEG and BKPF tables. Specifically, the GBIFAC-01 and 125134 transactions should be given additional scrutiny. For additional analysis, the following tests can be conducted:
# 1. The second digit test
# 2. The leading two digits test
# 3. The leading three digits test
# 4. The trailing two digits test
#
# Lastly, with deeper knowledge of the SAP tables, supplementary data analysis could be performed for instance on incoming vendor payments.
# ***
#
# ## Disclaimer
# 1. No liability for any errors or omissions
#
# The information contained in this report has been provided for information purposes only. This information does not constitute legal, professional or commercial advice. While every care has been taken to ensure that the content is useful and accurate, there are no guarantees, undertakings or warranties in this regard, and any legal liability or responsibility for the content or the accuracy of the information so provided, or, for any loss or damage caused arising directly or indirectly in connection with reliance on the use of such information is forfeited. Any errors or omissions brought to the attention will be corrected as soon as possible.
#
# The information in this website may contain technical inaccuracies and typographical errors. The information in this website may be updated from time to time and may at times be out of date. There is no responsibility for keeping the information in this website up to date or any liability whatsoever for any failure to do so.
#
# 2. Material on this website does not constitute legal and/or professional advice
#
# Any views, opinions and guidance set out in this report are provided for information purposes only, and do not purport to be legal and/or professional advice or a definitive interpretation of any law. Anyone contemplating action in respect of matters set out in this report should obtain advice from a suitably qualified professional adviser based on their unique requirements.
# ***
#
# ## Appendix
# https://www.leanx.eu/en/sap/table/bseg.html
#
# https://www.leanx.eu/en/sap/table/bkpf.html
#
# http://sap-ficoexpert.blogspot.com/2011/12/posting-keys.html
#
# https://it.toolbox.com/question/fi-document-types-101201
| projects/forensic-accounting/sap-forensic-accounting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
embeddings_0d = tf.constant([17, 22, 35, 51])
embeddings_4d = tf.constant([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
embeddings_2x2d = tf.constant([[[1, 0], [0, 0]],
[[0, 1], [0, 0]],
[[0, 0], [1, 0]],
[[0, 0], [0, 1]]])
ids = tf.constant([1, 0, 2])
# +
lookup_0d = tf.nn.embedding_lookup(embeddings_0d, ids)
print(lookup_0d)
lookup_4d = tf.nn.embedding_lookup(embeddings_4d, ids)
print(lookup_4d)
lookup_2x2d = tf.nn.embedding_lookup(embeddings_2x2d, ids)
print(lookup_2x2d)
# -
| TFv2/ch18/Listing 18.05 - 18.08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from typing import List
class Solution:
def decode(self, encoded: List[int], first: int) -> List[int]:
res = [first]
n = len(encoded)
for i in range(n):
first ^= encoded[i]
res.append(first)
return res
# -
solution = Solution()
solution.decode(encoded = [6,2,7,3], first = 4)
1 ^ 1
| Bit Manipulation/0112/1720. Decode XORed Array.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing, cleaning and exporting flight data
#
# - Importing flight data from csv file obtained from https://www.flightradar24.com/data/airports/fra/statistics
# - Contains number of scheduled and tracked flights from **18.05.2020 to 16.06.2020**
# - We assume that the scheduled flights are similar to the number of flights that actually flew for the same time in 2019
# - We assume that the decrease in tracked flights is due to Covid-19
# +
# Importing modules for data extraction and display.
import pandas as pd
# +
# Importing frankfurt_scheduled_vs_tracked.csv file into notebook.
data_path = '../data/frankfurt_scheduled_vs_tracked.csv'
df = pd.read_csv(data_path)
df
# +
# Checking data information and data types.
df.info()
# +
# Converting column "DateTime" values to datetime dtype and renaming columns to get rid of whitespace.
def rename_cols(df):
df.columns = ['date', 'scheduled_flights', 'tracked_flights']
return df
# -
# ## Split data into flights_2019 (scheduled) and flights_2020 (tracked)
#
# - Don't forget to change the year of flights_2019
# +
# Creating flights_2019 and changing year in 'date' column.
# Used original df column 'DateTime' values, because they are still strings
def get_2019(df):
df_2019 = df[['date','scheduled_flights']]
df_2019['date'] = df.loc[:, 'date'].str.replace('2020', '2019')
return df_2019
# +
# Creating dataframe for flights_2020
def get_2020(df):
df_2020 = df[['date','tracked_flights']]
return df_2020
# +
# Renaming 'DateTime' to 'date', because there is no time included.
def change_to_datetime(df):
df = df.astype({'date': 'datetime64'})
return df
# +
# Putting everything into a function
def split_flight_data(csv_path):
df = pd.read_csv(csv_path)
df = rename_cols(df)
df1 = get_2019(df)
df2 = get_2020(df)
change_to_datetime(df1)
change_to_datetime(df2)
return df1, df2
flights_2019, flights_2020 = split_flight_data(data_path)
# -
flights_2019
flights_2020
# +
# export 2 clean dataframes to separate json files
flights_2019.to_json('../data/clean_frankfurt_flight_2019.json')
flights_2020.to_json('../data/clean_frankfurt_flight_2020.json')
| modules/import_clean_export_flightdata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
sample_request_inputs = [{
"sepal length": 6.3,
"sepal width": 3.3,
"petal length": 6.0,
"petal width": 2.5
},
{
"sepal length": 5.1,
"sepal width": 3.5,
"petal length": 1.4,
"petal width": 0.2
},
{
"sepal length": 6.4,
"sepal width": 3.2,
"petal length": 4.5,
"petal width": 1.5},
]
for input_request in sample_request_inputs:
response = requests.get("http://localhost:8000/regressor",
json=input_request)
print(response.text)
| solutions/ex_10_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from DoDCAE import dodcae
import matplotlib.pyplot as plt
Q4 = dodcae(start_date="2021-10-01", end_date="2021-12-31")
top_10_org = Q4.sort_values('Monetary', ascending=False)[['Organizations','Monetary']]
top_10_org = top_10_org.groupby(by = 'Organizations', as_index=False).agg({'Monetary':sum})
top_10_org = top_10_org.sort_values('Monetary', ascending=False)[['Organizations','Monetary']]
top_10_loc = Q4.sort_values('Location', ascending=False)[['Location','Monetary']]
top_10_loc = top_10_loc.groupby(by = 'Location', as_index=False).agg({'Monetary':sum})
top_10_loc = top_10_loc.sort_values('Monetary', ascending=False)[['Location','Monetary']]
display(Q4.sort_values('Monetary', ascending=False).head(10), Q4.sort_values('Monetary', ascending=False)[['Organizations','Monetary']].head(10).plot.bar(style='dict', ylabel='Monetary', fontsize=10,
figsize=(8,8), x='Organizations', y='Monetary'), top_10_org.head(10), top_10_loc.head(10))
| examples/DoDCAE Notebook Sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''thesis'': conda)'
# name: python3
# ---
# #### loading the libraries
import os
import sys
import pyvista as pv
import trimesh as tm
import numpy as np
import topogenesis as tg
import pickle as pk
sys.path.append(os.path.realpath('..\..')) # no idea how or why this is not working without adding this to the path TODO: learn about path etc.
from notebooks.resources import RES as res
import pygmo as pg
# #### loading the meshes and vectors
# +
# load base lattice CSV file
lattice_path = os.path.relpath('../../data/macrovoxels.csv')
macro_lattice = tg.lattice_from_csv(lattice_path)
# load environment
environment_path = os.path.relpath("../../data/movedcontext.obj")
environment_mesh = tm.load(environment_path)
# load solar vectors
vectors = pk.load(open("../../data/sunvectors.pk", "rb"))
# load vector intensities
intensity_pv = pk.load(open("../../data/ghrval.pk", "rb")) # global horizontal radiation
# load vector intensities
intensity_dl = pk.load(open("../../data/dnival.pk", "rb")) # direct normal illuminance
# -
# #### defining target FSI, number of variables
goal_FSI = 3
num_var = macro_lattice.flatten().shape[0]
# #### creating the class for running the optimization
class pygmo_optimize:
# Number of dimensions
def __init__(self, dim, reflattice, crit1_values, crit2_values, sunvectors, environment, targetFSI):
self.dim = dim
self.reflattice = reflattice
self.crit1_values = crit1_values
self.crit2_values = crit2_values
self.sunvectors = sunvectors
self.environment = environment
self.targetFSI = targetFSI
# Define objectives
def fitness(self, x):
# PyGmo minimizes. To maximize, we need to invert our objective functions
# global horizontal radiation on voxel roofs (PV potential) MAXIMIZE
f1 = - res.crit_1_PV(x, self.reflattice, self.sunvectors, self.crit1_values, self.environment)[0]
# direct normal illuminance on voxel facade (daylight potential) MAXIMIZE
f2 = - res.crit_2_DL(x, self.reflattice, self.sunvectors, self.crit2_values, self.environment)[0]
# relative compactness MINIMIZE
f3 = res.crit_3_RC(x, self.reflattice)
# Floor Space Index deviation MAXIMIZE
f4 = - res.crit_4_FSI(x, self.reflattice, self.targetFSI)
return [f1, f2, f3, f4]
# Return number of objectives
def get_nobj(self):
return 4
# Return bounds of decision variables
def get_bounds(self):
return (np.full((self.dim,),0.),np.full((self.dim,),1.))
# Return function name
def get_name(self):
return "Test function MAX"
# create User Defined Problem
prob = pg.problem(pygmo_optimize(
dim= num_var,
reflattice= macro_lattice,
crit1_values= intensity_pv,
crit2_values= intensity_dl,
sunvectors= vectors,
environment= environment_mesh,
targetFSI= goal_FSI
))
# +
# create population
pop = pg.population(prob, size=10)
# select algorithm --> ihs nsga2 maco for integers
# maco seems slow, unreliable
# nspso non dominated sorting particle swarm optimization
# ihs seems fastest but not always yields results that make sense
# nsga2 is most consistent with results, average speed
# TODO: continuous in stead of discrete/integer
algo = pg.algorithm(pg.nspso(gen=100))
# run optimization
pop = algo.evolve(pop)
# +
class my_isl:
def run_evolve(self, algo, pop):
new_pop = algo.evolve(pop)
return algo, new_pop
def get_name(self):
return "It's my island!"
isl = pg.island(algo = pg.nspso(100), prob = prob, udi = my_isl(), size=10)
# +
def _evolve_func(algo, pop): # doctest : +SKIP
new_pop = algo.evolve(pop)
return algo, new_pop
class mp_island(object): # doctest : +SKIP
def __init__(self):
# Init the process pool, if necessary.
mp_island.init_pool()
def run_evolve(self, algo, pop):
with mp_island._pool_lock:
res = mp_island._pool.apply_async(_evolve_func, (algo, pop))
return res.get()
# +
archi = pg.archipelago(n = 5, algo = pg.nspso(100), prob = prob, pop_size = 10, udi = my_isl())
archi.evolve()
# -
print(archi)
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=3, ncols=3, sharex='col', sharey='row', figsize=(15,15))
# +
# extract results
fits, vecs = pop.get_f(), pop.get_x()
# extract and print non-dominated fronts
ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(fits)
ax = pg.plot_non_dominated_fronts(fits,comp=[2,3], marker='x') # plotting the non dominated fronts #TODO: what exactly does this mean in this context
# +
fits_path = os.path.relpath("../../data/fits_NSPSO_10x100_all.obj")
pk.dump(fits, open(fits_path, "wb"))
vecs_path = os.path.relpath("../../data/vecs_NSPSO_10x100_all.obj")
pk.dump(vecs, open(vecs_path, "wb"))
# -
best = pg.sort_population_mo(points = fits)[0] # the best solutions (by population)
print("The best configuration is: \n", np.around(vecs[best]).astype(int), "\n It's fitness is: ", fits[best][0].astype(int), fits[best][1].astype(int), fits[best][2], fits[best][3], "\n This is population #", best)
np.count_nonzero(np.around(vecs[best]))
crit1, voxcrit1 = res.crit_1_PV(vecs[best], macro_lattice, vectors, intensity_pv, environment_mesh)
crit2, voxcrit2 = res.crit_2_DL(vecs[best], macro_lattice, vectors, intensity_pv, environment_mesh)
crit3 = res.crit_3_RC(vecs[best], macro_lattice)
crit4 = res.crit_4_FSI(vecs[best], macro_lattice, goal_FSI)
configuration = res.reshape_and_store_to_lattice(np.around(vecs[best]), macro_lattice)
# +
meshesroof, _, _ = res.construct_horizontal_mesh(configuration, configuration.unit)
roofmesh = tm.util.concatenate(meshesroof)
meshesfacade, _, _ = res.construct_vertical_mesh(configuration, configuration.unit)
facademesh = tm.util.concatenate(meshesfacade)
# +
# visualize configuration
p = pv.Plotter(notebook=True)
# fast visualization of the lattice
configuration.fast_vis(p,opacity=0.1)
p.add_mesh(environment_mesh)
p.add_mesh(roofmesh, cmap='fire', scalars=np.repeat(voxcrit1,2))
p.add_mesh(facademesh, cmap='fire', scalars=np.repeat(voxcrit2,2))
# plotting
p.show(use_ipyvtk=True)
# -
| notebooks/Toy_problem/Tp5_optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="zX4Kg8DUTKWO" colab_type="code" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + colab_type="code" id="dn-6c02VmqiN" colab={}
# In this exercise you will train a CNN on the FULL Cats-v-dogs dataset
# This will require you doing a lot of data preprocessing because
# the dataset isn't split into training and validation for you
# This code block has all the required inputs
import os
import zipfile
import random
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from shutil import copyfile
# + colab_type="code" id="3sd9dQWa23aj" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="e9c15ae1-be4b-4db3-b12c-faf510b38fa0"
# This code block downloads the full Cats-v-Dogs dataset and stores it as
# cats-and-dogs.zip. It then unzips it to /tmp
# which will create a tmp/PetImages directory containing subdirectories
# called 'Cat' and 'Dog' (that's how the original researchers structured it)
# If the URL doesn't work,
# . visit https://www.microsoft.com/en-us/download/confirmation.aspx?id=54765
# And right click on the 'Download Manually' link to get a new URL
# !wget --no-check-certificate \
# "https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip" \
# -O "/tmp/cats-and-dogs.zip"
local_zip = '/tmp/cats-and-dogs.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
# + colab_type="code" id="gi3yD62a6X3S" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="44a74f43-36ed-4d90-e96b-40ba206188f3"
print(len(os.listdir('/tmp/PetImages/Cat/')))
print(len(os.listdir('/tmp/PetImages/Dog/')))
# Expected Output:
# 12501
# 12501
# + colab_type="code" id="F-QkLjxpmyK2" colab={}
# Use os.mkdir to create your directories
# You will need a directory for cats-v-dogs, and subdirectories for training
# and testing. These in turn will need subdirectories for 'cats' and 'dogs'
try:
#YOUR CODE GOES HERE
cats_v_dogs_path = '/tmp/cats-v-dogs'
cats_v_dogs_training_path = '/tmp/cats-v-dogs/training'
cats_v_dogs_testing_path = '/tmp/cats-v-dogs/testing'
cats_v_dogs_training_cats_path = '/tmp/cats-v-dogs/training/cats'
cats_v_dogs_training_dogs_path = '/tmp/cats-v-dogs/training/dogs'
cats_v_dogs_testing_cats_path = '/tmp/cats-v-dogs/testing/cats'
cats_v_dogs_testing_dogs_path = '/tmp/cats-v-dogs/testing/dogs'
os.mkdir(cats_v_dogs_path)
os.mkdir(cats_v_dogs_training_path)
os.mkdir(cats_v_dogs_testing_path)
os.mkdir(cats_v_dogs_training_cats_path)
os.mkdir(cats_v_dogs_training_dogs_path)
os.mkdir(cats_v_dogs_testing_cats_path)
os.mkdir(cats_v_dogs_testing_dogs_path)
except OSError:
pass
# + colab_type="code" id="zvSODo0f9LaU" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="6bdc20d0-c799-4de9-f23c-7b720fd22936"
# Write a python function called split_data which takes
# a SOURCE directory containing the files
# a TRAINING directory that a portion of the files will be copied to
# a TESTING directory that a portion of the files will be copie to
# a SPLIT SIZE to determine the portion
# The files should also be randomized, so that the training set is a random
# X% of the files, and the test set is the remaining files
# SO, for example, if SOURCE is PetImages/Cat, and SPLIT SIZE is .9
# Then 90% of the images in PetImages/Cat will be copied to the TRAINING dir
# and 10% of the images will be copied to the TESTING dir
# Also -- All images should be checked, and if they have a zero file length,
# they will not be copied over
#
# os.listdir(DIRECTORY) gives you a listing of the contents of that directory
# os.path.getsize(PATH) gives you the size of the file
# copyfile(source, destination) copies a file from source to destination
# random.sample(list, len(list)) shuffles a list
def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE):
# YOUR CODE STARTS HERE
image_dataset = []
for unitData in os.listdir(SOURCE):
data = SOURCE + unitData
if (os.path.getsize(data) > 0):
image_dataset.append(unitData)
else:
print('Skipped ' + unitData)
print('Invalid file size! i.e Zero length.')
train_data_length = int(len(image_dataset) * SPLIT_SIZE)
test_data_length = int(len(image_dataset) - train_data_length)
shuffled_set = random.sample(image_dataset, len(image_dataset))
train_set = shuffled_set[0:train_data_length]
test_set = shuffled_set[-test_data_length:]
for data in train_set:
temp_train_data = SOURCE + data
final_train_data = TRAINING + data
copyfile(temp_train_data, final_train_data)
for data in test_set:
temp_test_data = SOURCE + data
final_test_data = TESTING + data
copyfile(temp_train_data, final_test_data)
# YOUR CODE ENDS HERE
CAT_SOURCE_DIR = "/tmp/PetImages/Cat/"
TRAINING_CATS_DIR = "/tmp/cats-v-dogs/training/cats/"
TESTING_CATS_DIR = "/tmp/cats-v-dogs/testing/cats/"
DOG_SOURCE_DIR = "/tmp/PetImages/Dog/"
TRAINING_DOGS_DIR = "/tmp/cats-v-dogs/training/dogs/"
TESTING_DOGS_DIR = "/tmp/cats-v-dogs/testing/dogs/"
split_size = .9
split_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size)
split_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size)
# Expected output
# 666.jpg is zero length, so ignoring
# 11702.jpg is zero length, so ignoring
# + colab_type="code" id="luthalB76ufC" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="d167fb6f-eab1-4a98-eb8e-76d290652ab6"
print(len(os.listdir('/tmp/cats-v-dogs/training/cats/')))
print(len(os.listdir('/tmp/cats-v-dogs/training/dogs/')))
print(len(os.listdir('/tmp/cats-v-dogs/testing/cats/')))
print(len(os.listdir('/tmp/cats-v-dogs/testing/dogs/')))
# Expected output:
# 11250
# 11250
# 1250
# 1250
# + colab_type="code" id="-BQrav4anTmj" colab={}
# DEFINE A KERAS MODEL TO CLASSIFY CATS V DOGS
# USE AT LEAST 3 CONVOLUTION LAYERS
model = tf.keras.models.Sequential([
# YOUR CODE HERE
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('cats') and 1 for the other ('dogs')
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])
# + colab_type="code" id="mlNjoJ5D61N6" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="e3e44ed0-ef74-4a64-bdd4-5a8654ad47d8"
TRAINING_DIR = "/tmp/cats-v-dogs/training"
train_datagen = ImageDataGenerator(rescale=1.0/255)
# NOTE: YOU MUST USE A BATCH SIZE OF 10 (batch_size=10) FOR THE
# TRAIN GENERATOR.
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
batch_size=10,
class_mode='binary',
target_size=(150, 150))
VALIDATION_DIR = "/tmp/cats-v-dogs/testing"
validation_datagen = ImageDataGenerator(rescale=1.0/255)
# NOTE: YOU MUST USE A BACTH SIZE OF 10 (batch_size=10) FOR THE
# VALIDATION GENERATOR.
validation_generator = validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
batch_size=10,
class_mode='binary',
target_size=(150, 150))
# Expected Output:
# Found 22498 images belonging to 2 classes.
# Found 2500 images belonging to 2 classes.
# + colab_type="code" id="KyS4n53w7DxC" colab={"base_uri": "https://localhost:8080/", "height": 547} outputId="5c0ccbbe-4f56-4f46-a6c4-6426968c7c84"
history = model.fit(train_generator,
epochs=15,
verbose=1,
validation_data=validation_generator)
# The expectation here is that the model will train, and that accuracy will be > 95% on both training and validation
# i.e. acc:A1 and val_acc:A2 will be visible, and both A1 and A2 will be > .9
# + colab_type="code" id="MWZrJN4-65RC" colab={"base_uri": "https://localhost:8080/", "height": 562} outputId="205b550c-7f23-42b7-dddf-73ae9b978504"
# PLOT LOSS AND ACCURACY
# %matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
acc=history.history['accuracy']
val_acc=history.history['val_accuracy']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(len(acc)) # Get number of epochs
#------------------------------------------------
# Plot training and validation accuracy per epoch
#------------------------------------------------
plt.plot(epochs, acc, 'r', "Training Accuracy")
plt.plot(epochs, val_acc, 'b', "Validation Accuracy")
plt.title('Training and validation accuracy')
plt.figure()
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot(epochs, loss, 'r', "Training Loss")
plt.plot(epochs, val_loss, 'b', "Validation Loss")
plt.title('Training and validation loss')
# Desired output. Charts with training and validation metrics. No crash :)
# + colab_type="code" id="LqL6FYUrtXpf" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7Ci8vIE1heCBhbW91bnQgb2YgdGltZSB0byBibG9jayB3YWl0aW5nIGZvciB0aGUgdXNlci4KY29uc3QgRklMRV9DSEFOR0VfVElNRU9VVF9NUyA9IDMwICogMTAwMDsKCmZ1bmN0aW9uIF91cGxvYWRGaWxlcyhpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IHN0ZXBzID0gdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKTsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIC8vIENhY2hlIHN0ZXBzIG9uIHRoZSBvdXRwdXRFbGVtZW50IHRvIG1ha2UgaXQgYXZhaWxhYmxlIGZvciB0aGUgbmV4dCBjYWxsCiAgLy8gdG8gdXBsb2FkRmlsZXNDb250aW51ZSBmcm9tIFB5dGhvbi4KICBvdXRwdXRFbGVtZW50LnN0ZXBzID0gc3RlcHM7CgogIHJldHVybiBfdXBsb2FkRmlsZXNDb250aW51ZShvdXRwdXRJZCk7Cn0KCi8vIFRoaXMgaXMgcm91Z2hseSBhbiBhc3luYyBnZW5lcmF0b3IgKG5vdCBzdXBwb3J0ZWQgaW4gdGhlIGJyb3dzZXIgeWV0KSwKLy8gd2hlcmUgdGhlcmUgYXJlIG11bHRpcGxlIGFzeW5jaHJvbm91cyBzdGVwcyBhbmQgdGhlIFB5dGhvbiBzaWRlIGlzIGdvaW5nCi8vIHRvIHBvbGwgZm9yIGNvbXBsZXRpb24gb2YgZWFjaCBzdGVwLgovLyBUaGlzIHVzZXMgYSBQcm9taXNlIHRvIGJsb2NrIHRoZSBweXRob24gc2lkZSBvbiBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcCwKLy8gdGhlbiBwYXNzZXMgdGhlIHJlc3VsdCBvZiB0aGUgcHJldmlvdXMgc3RlcCBhcyB0aGUgaW5wdXQgdG8gdGhlIG5leHQgc3RlcC4KZnVuY3Rpb24gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpIHsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIGNvbnN0IHN0ZXBzID0gb3V0cHV0RWxlbWVudC5zdGVwczsKCiAgY29uc3QgbmV4dCA9IHN0ZXBzLm5leHQob3V0cHV0RWxlbWVudC5sYXN0UHJvbWlzZVZhbHVlKTsKICByZXR1cm4gUHJvbWlzZS5yZXNvbHZlKG5leHQudmFsdWUucHJvbWlzZSkudGhlbigodmFsdWUpID0+IHsKICAgIC8vIENhY2hlIHRoZSBsYXN0IHByb21pc2UgdmFsdWUgdG8gbWFrZSBpdCBhdmFpbGFibGUgdG8gdGhlIG5leHQKICAgIC8vIHN0ZXAgb2YgdGhlIGdlbmVyYXRvci4KICAgIG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSA9IHZhbHVlOwogICAgcmV0dXJuIG5leHQudmFsdWUucmVzcG9uc2U7CiAgfSk7Cn0KCi8qKgogKiBHZW5lcmF0b3IgZnVuY3Rpb24gd2hpY2ggaXMgY2FsbGVkIGJldHdlZW4gZWFjaCBhc3luYyBzdGVwIG9mIHRoZSB1cGxvYWQKICogcHJvY2Vzcy4KICogQHBhcmFtIHtzdHJpbmd9IGlucHV0SWQgRWxlbWVudCBJRCBvZiB0aGUgaW5wdXQgZmlsZSBwaWNrZXIgZWxlbWVudC4KICogQHBhcmFtIHtzdHJpbmd9IG91dHB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIG91dHB1dCBkaXNwbGF5LgogKiBAcmV0dXJuIHshSXRlcmFibGU8IU9iamVjdD59IEl0ZXJhYmxlIG9mIG5leHQgc3RlcHMuCiAqLwpmdW5jdGlvbiogdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKSB7CiAgY29uc3QgaW5wdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQoaW5wdXRJZCk7CiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gZmFsc2U7CgogIGNvbnN0IG91dHB1dEVsZW1lbnQgPSBkb2N1bWVudC5nZXRFbGVtZW50QnlJZChvdXRwdXRJZCk7CiAgb3V0cHV0RWxlbWVudC5pbm5lckhUTUwgPSAnJzsKCiAgY29uc3QgcGlja2VkUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBpbnB1dEVsZW1lbnQuYWRkRXZlbnRMaXN0ZW5lcignY2hhbmdlJywgKGUpID0+IHsKICAgICAgcmVzb2x2ZShlLnRhcmdldC5maWxlcyk7CiAgICB9KTsKICB9KTsKCiAgY29uc3QgY2FuY2VsID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnYnV0dG9uJyk7CiAgaW5wdXRFbGVtZW50LnBhcmVudEVsZW1lbnQuYXBwZW5kQ2hpbGQoY2FuY2VsKTsKICBjYW5jZWwudGV4dENvbnRlbnQgPSAnQ2FuY2VsIHVwbG9hZCc7CiAgY29uc3QgY2FuY2VsUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBjYW5jZWwub25jbGljayA9ICgpID0+IHsKICAgICAgcmVzb2x2ZShudWxsKTsKICAgIH07CiAgfSk7CgogIC8vIENhbmNlbCB1cGxvYWQgaWYgdXNlciBoYXNuJ3QgcGlja2VkIGFueXRoaW5nIGluIHRpbWVvdXQuCiAgY29uc3QgdGltZW91dFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgc2V0VGltZW91dCgoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9LCBGSUxFX0NIQU5HRV9USU1FT1VUX01TKTsKICB9KTsKCiAgLy8gV2FpdCBmb3IgdGhlIHVzZXIgdG8gcGljayB0aGUgZmlsZXMuCiAgY29uc3QgZmlsZXMgPSB5aWVsZCB7CiAgICBwcm9taXNlOiBQcm9taXNlLnJhY2UoW3BpY2tlZFByb21pc2UsIHRpbWVvdXRQcm9taXNlLCBjYW5jZWxQcm9taXNlXSksCiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdzdGFydGluZycsCiAgICB9CiAgfTsKCiAgaWYgKCFmaWxlcykgewogICAgcmV0dXJuIHsKICAgICAgcmVzcG9uc2U6IHsKICAgICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICAgIH0KICAgIH07CiAgfQoKICBjYW5jZWwucmVtb3ZlKCk7CgogIC8vIERpc2FibGUgdGhlIGlucHV0IGVsZW1lbnQgc2luY2UgZnVydGhlciBwaWNrcyBhcmUgbm90IGFsbG93ZWQuCiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gdHJ1ZTsKCiAgZm9yIChjb25zdCBmaWxlIG9mIGZpbGVzKSB7CiAgICBjb25zdCBsaSA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2xpJyk7CiAgICBsaS5hcHBlbmQoc3BhbihmaWxlLm5hbWUsIHtmb250V2VpZ2h0OiAnYm9sZCd9KSk7CiAgICBsaS5hcHBlbmQoc3BhbigKICAgICAgICBgKCR7ZmlsZS50eXBlIHx8ICduL2EnfSkgLSAke2ZpbGUuc2l6ZX0gYnl0ZXMsIGAgKwogICAgICAgIGBsYXN0IG1vZGlmaWVkOiAkewogICAgICAgICAgICBmaWxlLmxhc3RNb2RpZmllZERhdGUgPyBmaWxlLmxhc3RNb2RpZmllZERhdGUudG9Mb2NhbGVEYXRlU3RyaW5nKCkgOgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAnbi9hJ30gLSBgKSk7CiAgICBjb25zdCBwZXJjZW50ID0gc3BhbignMCUgZG9uZScpOwogICAgbGkuYXBwZW5kQ2hpbGQocGVyY2VudCk7CgogICAgb3V0cHV0RWxlbWVudC5hcHBlbmRDaGlsZChsaSk7CgogICAgY29uc3QgZmlsZURhdGFQcm9taXNlID0gbmV3IFByb21pc2UoKHJlc29sdmUpID0+IHsKICAgICAgY29uc3QgcmVhZGVyID0gbmV3IEZpbGVSZWFkZXIoKTsKICAgICAgcmVhZGVyLm9ubG9hZCA9IChlKSA9PiB7CiAgICAgICAgcmVzb2x2ZShlLnRhcmdldC5yZXN1bHQpOwogICAgICB9OwogICAgICByZWFkZXIucmVhZEFzQXJyYXlCdWZmZXIoZmlsZSk7CiAgICB9KTsKICAgIC8vIFdhaXQgZm9yIHRoZSBkYXRhIHRvIGJlIHJlYWR5LgogICAgbGV0IGZpbGVEYXRhID0geWllbGQgewogICAgICBwcm9taXNlOiBmaWxlRGF0YVByb21pc2UsCiAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgYWN0aW9uOiAnY29udGludWUnLAogICAgICB9CiAgICB9OwoKICAgIC8vIFVzZSBhIGNodW5rZWQgc2VuZGluZyB0byBhdm9pZCBtZXNzYWdlIHNpemUgbGltaXRzLiBTZWUgYi82MjExNTY2MC4KICAgIGxldCBwb3NpdGlvbiA9IDA7CiAgICB3aGlsZSAocG9zaXRpb24gPCBmaWxlRGF0YS5ieXRlTGVuZ3RoKSB7CiAgICAgIGNvbnN0IGxlbmd0aCA9IE1hdGgubWluKGZpbGVEYXRhLmJ5dGVMZW5ndGggLSBwb3NpdGlvbiwgTUFYX1BBWUxPQURfU0laRSk7CiAgICAgIGNvbnN0IGNodW5rID0gbmV3IFVpbnQ4QXJyYXkoZmlsZURhdGEsIHBvc2l0aW9uLCBsZW5ndGgpOwogICAgICBwb3NpdGlvbiArPSBsZW5ndGg7CgogICAgICBjb25zdCBiYXNlNjQgPSBidG9hKFN0cmluZy5mcm9tQ2hhckNvZGUuYXBwbHkobnVsbCwgY2h1bmspKTsKICAgICAgeWllbGQgewogICAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgICBhY3Rpb246ICdhcHBlbmQnLAogICAgICAgICAgZmlsZTogZmlsZS5uYW1lLAogICAgICAgICAgZGF0YTogYmFzZTY0LAogICAgICAgIH0sCiAgICAgIH07CiAgICAgIHBlcmNlbnQudGV4dENvbnRlbnQgPQogICAgICAgICAgYCR7TWF0aC5yb3VuZCgocG9zaXRpb24gLyBmaWxlRGF0YS5ieXRlTGVuZ3RoKSAqIDEwMCl9JSBkb25lYDsKICAgIH0KICB9CgogIC8vIEFsbCBkb25lLgogIHlpZWxkIHsKICAgIHJlc3BvbnNlOiB7CiAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgIH0KICB9Owp9CgpzY29wZS5nb29nbGUgPSBzY29wZS5nb29nbGUgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYiA9IHNjb3BlLmdvb2dsZS5jb2xhYiB8fCB7fTsKc2NvcGUuZ29vZ2xlLmNvbGFiLl9maWxlcyA9IHsKICBfdXBsb2FkRmlsZXMsCiAgX3VwbG9hZEZpbGVzQ29udGludWUsCn07Cn0pKHNlbGYpOwo=", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 57} outputId="1187e63e-e78f-4791-c078-57c6d7ce1eb6"
# Here's a codeblock just for fun. You should be able to upload an image here
# and have it classified without crashing
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = '/content/' + fn
img = image.load_img(path, target_size=((150, 150)))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])
if classes[0]>0.5:
print(fn + " is a dog")
else:
print(fn + " is a cat")
# + [markdown] id="WDipX4noKw-b" colab_type="text"
# ### Visualizing Intermediate Representations
#
# To get a feel for what kind of features our convnet has learned, one fun thing to do is to visualize how an input gets transformed as it goes through the convnet.
#
# Let's pick a random cat or dog image from the training set, and then generate a figure where each row is the output of a layer, and each image in the row is a specific filter in that output feature map. Rerun this cell to generate intermediate representations for a variety of training images.
# + id="ePzVCO-5K7UJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 231} outputId="b52aa7de-8b8f-4789-84de-ac0c28627438"
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
# Let's define a new Model that will take an image as input, and will output
# intermediate representations for all layers in the previous model after
# the first.
successive_outputs = [layer.output for layer in model.layers[1:]]
#visualization_model = Model(img_input, successive_outputs)
visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)
# Let's prepare a random input image of a cat or dog from the training set.
cat_img_files = [os.path.join(train_cats_dir, f) for f in train_cat_fnames]
dog_img_files = [os.path.join(train_dogs_dir, f) for f in train_dog_fnames]
img_path = random.choice(cat_img_files + dog_img_files)
img = load_img(img_path, target_size=(150, 150)) # this is a PIL image
x = img_to_array(img) # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3)
# Rescale by 1/255
x /= 255.0
# Let's run our image through our network, thus obtaining all
# intermediate representations for this image.
successive_feature_maps = visualization_model.predict(x)
# These are the names of the layers, so can have them as part of our plot
layer_names = [layer.name for layer in model.layers]
# -----------------------------------------------------------------------
# Now let's display our representations
# -----------------------------------------------------------------------
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
#-------------------------------------------
# Just do this for the conv / maxpool layers, not the fully-connected layers
#-------------------------------------------
n_features = feature_map.shape[-1] # number of features in the feature map
size = feature_map.shape[ 1] # feature map shape (1, size, size, n_features)
# We will tile our images in this matrix
display_grid = np.zeros((size, size * n_features))
#-------------------------------------------------
# Postprocess the feature to be visually palatable
#-------------------------------------------------
for i in range(n_features):
x = feature_map[0, :, :, i]
x -= x.mean()
x /= x.std ()
x *= 64
x += 128
x = np.clip(x, 0, 255).astype('uint8')
display_grid[:, i * size : (i + 1) * size] = x # Tile each filter into a horizontal grid
#-----------------
# Display the grid
#-----------------
scale = 20. / n_features
plt.figure( figsize=(scale * n_features, scale) )
plt.title ( layer_name )
plt.grid ( False )
plt.imshow( display_grid, aspect='auto', cmap='viridis' )
# + id="tlWsCrBDRaxO" colab_type="code" colab={}
| Cats_vs_Dogs_CNN_KAGGLE_Competiton.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introducción a ecuaciones diferenciales
#
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/3/39/GodfreyKneller-IsaacNewton-1689.jpg" width="300px" height="100px" />
#
# > Las primeras ecuaciones diferenciales se encuentran históricamente con la invención del cálculo por Newton y Leibniz. En el Capítulo 2 de su trabajo "Methodus fluxionum et Serierum Infinitarum", (Newton, 1671), Newton describe ecuaciones del tipo
#
# $$\frac{dy(x)}{dx}=f(x,y(x)).$$
#
# **Referencia**
# - https://en.wikipedia.org/wiki/Differential_equation
# - https://www.mathsisfun.com/calculus/differential-equations.html
# ## 0. Nociones básicas
#
# ### Definición
# Una ecuación diferencial es una ecuación que involucra una función y una o más de sus derivadas.
#
# Por ejemplo
#
# <img style="float: left; margin: 0px 0px 15px 15px;" src="https://www.mathsisfun.com/calculus/images/diff-eq-1.svg" width="300px" height="100px" />
# una ecuación de la función $y(x)$ y su derivada $\frac{dy(x)}{dx}$.
# Pero bueno, cuando tenemos una **ecuación**, ¿qué hacemos con ella?
# ### Solución
#
# Decimos que hemos resuelto la ecuación diferencial si descubrimos la función $y(x)$ (o conjunto de funciones $y(x)$).
#
# **Ejemplo** Estudiar la ecuación diferencial:
#
# $$\frac{\text{d}x}{\text{d}t}=a x(t), \quad a \in \mathbb{R}.$$
# ___
# Cuando una ecuación puede ser resuelta, hay varios trucos para intentar resolverla. En muchos casos, no es posible o es muy difícil encontrar la solución analítica. Por eso, en el curso examinaremos la forma de encontrar solución numérica.
# ## 1. ¿Porqué son útiles las ecuaciones diferenciales?
#
# Antes qué nada, conceptualmente, **¿qué significa la derivada $\frac{dx}{dt}$?**
# Nuestro mundo, y particularmente los fenómenos que estudiamos en ingeniería, es cambiante (evoluciona) en el tiempo. De modo que las descripciones (modelos) de como cambian las cosas en el tiempo terminan como una ecuación diferencial.
# ### Ejemplos.
# **1. Biología (crecimiento poblacional de conejos)**
#
# Mientras más conejos tengamos, más bebés conejo obtendremos (los conejos tienen una grandiosa habilidad de reproducción). Luego, los bebés conejo crecen y tienen bebés a la vez. La población crece muy muy rápido.
#
# Partes importantes:
#
# - Población en el tiempo $t$: $N(t)$.
# - Tasa de crecimiento: $r$.
# - Tasa de cambio de la población: $\frac{dN}{dt}$.
#
# Imaginemos algunos valores:
#
# - La población actual (en el tiempo $t=0$) es $N(0)=1000$ conejos.
# - La tasa de crecimiento es de $0.01$ conejos por semana por cada conejo actualmente.
#
# Entonces la tasa de cambio de la población $\left.\frac{dN}{dt}\right|_{t=0}=0.01\times 1000$.
#
# Sin embargo, esto sólo es cierto en el tiempo específico $t=0$, y esto no significa que la población crece de manera constante.
#
# Recordemos que: mientras más conejos, más conejos nuevos se obtienen.
#
# De manera que es mejor decir que la tasa de cambio (en cualquier instante de tiempo $t$) es la tasa de crecimiento $r$ veces la población $N(t)$ en ese instante:
#
# $$\frac{dN}{dt}=rN,$$
#
# y eso es una ecuación diferencial, porque es una ecuación de la función $N(t)$ y su derivada.
#
# **El poder de las matemáticas... con esa simple expresión decimos que "la tasa de cambio de la población en el tiempo equivale a la tasa de crecimiento veces la población".**
# ### <font color=green>Las ecuaciones diferenciales pueden describir como cambia la población, como se dispersa el calor, como un material radioactivo se desintegra y mucho más. Son una forma natural de describir cambios o movimiento en el universo..</font>
#
# ### ¿Qué hacemos con la ecuación diferencial?
#
# En principio, las ecuaciones diferenciales son magníficas para expresar (modelar) muchos fenómenos. Sin embargo, son difíciles de usar tal cual están.
#
# De manera que intentamos **resolverlas** encontrando la(s) función(es) que satisfagan la ecuación, es decir, quitando la derivada, de manera que podamos hacer cálculos, gráficas, predecir, y todo lo demás.
# **2. Finanzas (interés continuamente compuesto)**
#
# El valor del dinero cambia en el tiempo. Esto se expresa por medio de tasas de interés. Normalmente, el interés se puede calcular en tiempo fijados como años, meses, etcétera, y esto se añade al capital inicial y se reinvierte.
#
# Esto se llama interés compuesto.
#
# Pero cuando se compone continuamente (en todo tiempo), entonces a cada instante, el interés se añade proporcionalmente a la inversión (o préstamo).
#
# Mientras más inversión (o préstamo) más interés gana.
#
# Usando $t$ para el tiempo, $r$ para la tasa de interés y $V(t)$ para el valor en el instante $t$ de la inversión, podemos expresar la siguiente ecuación:
#
# $$\frac{dV}{dt}=rV.$$
#
# Notar que es la misma ecuación que tenemos para los conejos, solo con diferentes letras. Entonces, las matemáticas muestran que esos dos fenómenos se comportan de la misma manera.
#
# Ya dijimos que como ecuación, es difícil usar esta información. Pero como ya vimos, se puede resolver (por separación de variables) y la solución es:
#
# $$V(t) = P e^{rt},$$
#
# donde $P$ es el principal (capital inicial).
#
# De forma que un préstamo continuamente compuesto de $1,000 por dos años y una tasa de interés del 10% se vuelve:
#
# $$V = 1000 × e^{2\times0.1}$$
# $$V = 1000 × 1.22140...$$
# $$V = $1,221.40$$
# **3. Mecánica Clásica**
#
# Un resorte tiene una masa amarrada:
#
# - la masa es jalada hacia la derecha,
# - cuando el resorte se estira, su tensión se incrementa,
# - la masa se detienne,
# - la tensión del resorte la jala de nuevo hacia arriba,
# - luego baja, luego sube, luego baja, etcétera...
#
# Descripción (en el tablero)...
# **Ejercicio:** Simular el sistema masa-resorte obtenido con un valor de $m=0.5\, kg$, una constante de Hook $k = 0.8$ y condiciones iniciales $x(0)=0.2\,m$, $v(0)=0\,m/s$ y en un intervalo de tiempo de $t=[0,30]\, s$
# importar librerías
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as ode
# +
#Librería para integrar numéricamente solve_ivp
# ode.solve_ivp?
# -
#Definir ecuación diferencial
def masa_resorte(t,y):
x = y[0]
v = y[1]
#Parámetros
m = 0.5
k = 0.8
#Defino el sistema de ecuaciones diferenciales
dx = v
dv = -(k/m)*x
return [dx,dv]
# +
#Condición inicial
y0 = [0.2,0]
#intervalo de tiempo
t1 = (0,30)
#solución
sol1 = ode.solve_ivp(masa_resorte,t1,y0, max_step=.01)
sol1
# -
#Obtener vector t (tiempo) y solución (y)
t = sol1.t
y = sol1.y.T
# +
#Graficas
plt.figure(figsize=(15,8))
plt.title('Sistema Masa-Resorte', fontsize=15)
plt.plot(t, y[:,0], 'k', lw =3, label='Posición del sistema masa resorte')
plt.plot(t, y[:,1], 'r', lw=3, label='Velocidad del sistema masa resorte')
plt.xlabel('Tiempo [s]',fontsize=15)
plt.grid()
plt.legend(loc='best')
plt.show()
# -
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>
# </footer>
| Módulo 3/Clase16_IntroED.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def distanceBetweenBusStops(distance, start, destination):
count = count1 = 0
i = start
while True:
count += distance[i]
if i == destination - 1:
break
i += 1
if i == len(distance):
i = 0
i = start - 1
while True:
count1 += distance[i]
print(i, distance[i], distance[destination])
if distance[i] == distance[destination]:
break
i -= 1
return min(count, count1)
print(distanceBetweenBusStops([7,10,1,12,11,14,5,0], 0, 4))
# -
| Anjani/Leetcode/Array/Distance Between Bus Stops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
from your_lib.core import *
# # Jetson Nano Haiku bot
#
# > Haiku bot for Jetson Nano, using GPT 2
# This file will become your README and also the index of your documentation.
#
# - Currently using GPT2 pytorch engine (medium size)
# - Using syllapy to count syllables (https://github.com/mholtzscher/syllapy.git)
# - Using gpt-2-Pytorch to generate text (https://github.com/graykode/gpt-2-Pytorch.git)
#
# Todo:
#
# - Implement https://github.com/huggingface/transformers
# - Implement Uber / PPLM
# - Refine syllable count
# ## Install
# Requirements:
# using python 3.6.9
# torch (using NVIDIA Jetson version for Python 3.6)
# GPT2-to-pytorch (in this project copied into folder named GPT2)
# numpy
#
# pip install:
# syllapy
# tqdm
# regex
#
# From PPLM:
# nltk
# colorama
# transformers (sentencepiece does not work - build from source per https://www.kaggle.com/sunhwan/google-sentencepiece, omitting conda instructions as working in virtualenv in jetson nano)
# torchtext
#
# `pip install your_project_name`
# ## How to use
# Fill me in please! Don't forget code examples:
1+1
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from qkids.DatabasePool import get_schedule_connection
from qkids.Models import Lesson
from qkids import Week
import matplotlib.pyplot as plt
import pandas as pd
def get_finish_lessons_weekly(week):
sql = "select r.id, schedule_id, room_type_id, course_id, lesson_id, begin_at, \
student_count from schedules s join rooms r on s.id = r.schedule_id \
and r.is_internal = 0 \
and student_count> 0 and klass_id is null and status = 1 where begin_at in \
# %s and room_type_id in (2,5,7) " % week.slots_sqlstring
df = pd.read_sql(sql, get_schedule_connection(), index_col='id')
lesson = Lesson()
df2 = df.groupby('lesson_id')['student_count'].sum()
df3 = map(lesson.get_chapter_by_lesson, list(df2.index))
df = pd.DataFrame(df2)
df['chapter'] = list(df3)
return df
def get_predict_schedule_file(week):
df = pd.read_pickle('output/{}/farforschedule.pkl'.format(week))
def plot_contrast_week(week):
df = get_finish_lessons_weekly(week)
df2 = pd.read_pickle('/home/coffee/QkidsAutomation/output/{}/farforschedule.pkl'.format(week))
predict = df2.groupby(level=2).sum()
actual = df.groupby('chapter')['student_count'].sum()
return predict, actual
week = Week().get_previous_week()
plt.figure(1,figsize=(14, 25))
#begin 201903
for i in range(6):
ax = plt.subplot(6, 1, i + 1)
predict, actual = plot_contrast_week(week)
ax.scatter(predict.index, predict, label='predict')
ax.scatter(actual.index, actual, label='actual')
ax.text(70, 25000, week, verticalalignment="top")
ax.legend()
week = week.get_previous_week()
plt.show()
# #### chapter_lesson
week = Week().get_previous_week()
week = week.get_previous_week()
print(week)
df = get_finish_lessons_weekly(week)
plt.figure(0,figsize=(14, 25))
i = 1
for c, data in df.loc[df['chapter'].isin([1,2,3,4,5,6,7,8,9,10])].groupby('chapter'):
ax = plt.subplot(5, 2, i)
ax.text(4, 0.8, c,)
ax.scatter(range(data.index.size), data.student_count, )
i += 1
| jiuqu/lesson_chapter_distribute_weekly.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear SVM Model for model analysis
# + id="NLC-Q5ccjpnX"
import pandas as pd
import numpy as np
import re
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import WordCloud
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import SnowballStemmer
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import f1_score,precision_score,recall_score
# + colab={"base_uri": "https://localhost:8080/"} id="ybzJm07bpaEt" outputId="859fe492-b1bc-4fb7-b573-1734040e5e4a"
# %%time
## sample_500k is a sample from main dataset
preprocess_data = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Stack overflow Tag /preprocessed_3title_100k.csv")
preprocess_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 306} id="hKB-EBeFrDvn" outputId="109168dc-7df3-4547-c888-10e629580560"
preprocess_data.head()
# + id="maFUOrv8piay"
def text_splitter(text):
return text.split()
# + id="TO5BUgLMpiYU"
# binary='true' will give a binary vectorizer
tag_vectorizer = CountVectorizer(tokenizer = text_splitter, binary=True)
multi_label_y = tag_vectorizer.fit_transform(preprocess_data['tags'].values.astype(str))
# + id="X0OwaY-FpiVT"
# make sum column wise
tag_column_sum = multi_label_y.sum(axis=0).tolist()[0]
# + id="RW0287DLpiN1"
# To select n number of top tags
def select_top_tags(n):
# To get sotred list (means: tags appear in maximum number of questions come first)
# top 10: [3711, 15246, 22934, 15324, 1054, 15713, 3720, 24481, 14905, 1897]
sorted_tags = sorted(range(len(tag_column_sum)), key=lambda i: tag_column_sum[i], reverse=True)
# With this line of code we get tags in our columns which are come in most of the questions
# we will get shape: (999999, n)
multi_label_n_y = multi_label_y[:,sorted_tags[:n]]
return multi_label_n_y
#
def questions_covered_fn(n):
multi_label_n_y = select_top_tags(n)
# This line will give us row wise sum of each row [[1, 2], [[3],
# [4, 3]] to [7]]
row_sum_array = multi_label_n_y.sum(axis=1)
# Counts the number of non-zero values in the array
return (np.count_nonzero(row_sum_array==0))
# With this code we checking how much percent questions are explained by how many tags
# Here we are starting from 500 because we think top 500 are most important tags we can't skip them
questions_covered=[]
total_tags=multi_label_y.shape[1]
total_qs=preprocess_data.shape[0]
for i in range(500, total_tags, 100):
questions_covered.append(np.round(((total_qs-questions_covered_fn(i))/total_qs)*100,3))
# + colab={"base_uri": "https://localhost:8080/"} id="WzcxKHI7p06r" outputId="51665d11-964a-4566-c99d-10590c3212ff"
multi_label_n_y = select_top_tags(500)
print("number of questions that are not covered :", questions_covered_fn(5500),"out of ", total_qs)
# + colab={"base_uri": "https://localhost:8080/"} id="__bsrlppp02Q" outputId="5f1ce5df-36b2-4b99-df5c-ca415dd6b67b"
print("Number of tags in sample :", multi_label_y.shape[1])
print("number of tags taken :", multi_label_n_y.shape[1],"-->",round((multi_label_n_y.shape[1]/multi_label_y.shape[1]),3)*100,"%")
# + id="p9hMtVnkp0zu"
total_size=preprocess_data.shape[0]
train_size=int(0.80*total_size)
x_train=preprocess_data.head(train_size)
x_test=preprocess_data.tail(total_size - train_size)
y_train = multi_label_n_y[0:train_size,:]
y_test = multi_label_n_y[train_size:total_size,:]
# + colab={"base_uri": "https://localhost:8080/"} id="Prtbt2v-p0w3" outputId="8a0fc490-078b-45de-def1-0583b931552b"
# %%time
# To get new features with tfidf technique get 200000 features with upto 3-grams
vectorizer = TfidfVectorizer(min_df=0.00009, max_features=200000, smooth_idf=True, norm="l2", tokenizer = text_splitter, sublinear_tf=False, ngram_range=(1,3))
# Apply this vectorizer only on question data column
x_train_multi_label = vectorizer.fit_transform(x_train['question'])
x_test_multi_label = vectorizer.transform(x_test['question'])
# + colab={"base_uri": "https://localhost:8080/"} id="QDB26LyJp8nJ" outputId="60d3b8fb-7c6b-40a2-d66a-6253866f673c"
# Now check data shapes after featurization
print("Dimensions of train data X:",x_train_multi_label.shape, "Y :",y_train.shape)
print("Dimensions of test data X:",x_test_multi_label.shape,"Y:",y_test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="ksopJMi6p8k0" outputId="5be07b63-50fb-4895-d22d-7bdc649d2e90"
from joblib import dump
dump(vectorizer, '/content/drive/MyDrive/Colab Notebooks/Stack overflow Tag /stackoverflow_tfidf_vectorizer_liner_svm__4grams_100k.pkl')
# + id="dtwgLamfp8iC"
classifier = OneVsRestClassifier(SGDClassifier(loss='hinge', alpha=0.00001, penalty='l1'), n_jobs=-1)
# + colab={"base_uri": "https://localhost:8080/"} id="s_fN2p0Rp8fT" outputId="1c01d8f3-8f05-4a71-868f-842cbf42dd21"
import time
start = time.time()
classifier.fit(x_train_multi_label, y_train)
print("Time it takes to run this :",(time.time()-start)/60,"minutes")
# + colab={"base_uri": "https://localhost:8080/"} id="5_u8AboZqO0T" outputId="be2dd57d-cf0a-4670-e83c-5db1f42b57dd"
dump(classifier, '/content/drive/MyDrive/Colab Notebooks/Stack overflow Tag /stackoverflow_model_liner_svm_4grams_100k.pkl')
# + colab={"base_uri": "https://localhost:8080/"} id="eHYCcw8wqTkM" outputId="1dabf712-ad6d-47e8-9dae-c763d3611cb1"
predictions = classifier.predict(x_test_multi_label)
print("accuracy :",metrics.accuracy_score(y_test,predictions))
print("macro f1 score :",metrics.f1_score(y_test, predictions, average = 'macro'))
print("micro f1 scoore :",metrics.f1_score(y_test, predictions, average = 'micro'))
print("hamming loss :",metrics.hamming_loss(y_test,predictions))
# + id="LfE3NvR5qZeY"
report = metrics.classification_report(y_test, predictions, output_dict=True)
report_df = pd.DataFrame(report).transpose()
report_df.to_csv("/content/report_liner_svm_100k.csv")
| Models and Tags Analysis notebooks/Linear svm model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Handcrafting-Ethereum-vocabulary" data-toc-modified-id="Handcrafting-Ethereum-vocabulary-1"><span class="toc-item-num">1 </span>Handcrafting Ethereum vocabulary</a></span><ul class="toc-item"><li><span><a href="#Proof-of-Concept" data-toc-modified-id="Proof-of-Concept-1.1"><span class="toc-item-num">1.1 </span>Proof of Concept</a></span></li><li><span><a href="#Formatting-for-JSON" data-toc-modified-id="Formatting-for-JSON-1.2"><span class="toc-item-num">1.2 </span>Formatting for JSON</a></span></li></ul></li></ul></div>
# +
import pandas as pd
import spacy
from namedentities import named_entities # HTML escaping
from gensim.utils import simple_preprocess
from gensim.models import Phrases
from gensim.models.phrases import Phraser
# -
# Download the model with 'python -m spacy download en_core_web_lg --user'
# Note: this is a 800MB model
# 'en_core_web_sm', 29MB can be used as alternative
# see https://spacy.io/models/en#section-en_core_web_lg
nlp = spacy.load('en_core_web_lg')
df = pd.read_excel('interviews_20180710.xls')
df
# +
df.columns = [
'name', # Name
'smart_contract', # How do you handle smart contract verif & security?
'bounties', # Other bounties
'who_what', # Who are you and what are you working on?
'tooling', # What are the tools/libraries/frameworks you use?
'frustrations', # What are your biggest frustrations?
'testing', # How do you handle testing?
'missing_tools', # What tools don’t exist at the moment?
'domain_questions', # Other domain specific questions?
'hardest_part', # What was the hardest part to develop with Ethereum?
'excited_about', # What are you most excited about in the short term?
'easier_expected', # Was anything easier than expected?
'people_talk_to', # Who do you think we should talk to?
'best_resources', # What are the best educational resources?
'questions_to_ask' # Are there any other questions we should be asking?
]
df.fillna('', inplace = True)
# -
# For some reason Python or Excel adds an invisible unicode \ufeff character.
df['name'] = df['name'].str.replace(u'\ufeff', '')
# # Handcrafting Ethereum vocabulary
eth_keywords = {
# Who are you and what are you working on?
'0x',
'decentralized_exchange',
'dex',
'decentralization',
'smart_contract',
'solidity',
'truffle',
'1protocol',
'proof_of_stake',
'proof_stake',
'pos',
'mining_pool',
'staking_pool',
'signature',
'token',
'raiden',
'api',
'client',
'consensys',
'token_curated_registries',
'tcr',
'adchain',
'onboarding',
'test',
'ambisafe',
'scalable',
'platform',
'devops',
'javascript',
'js',
'tx',
'transactions',
'parity',
'deployment',
'gas',
'aragon',
'aragon_os',
'organization',
'network',
'permission',
'governance',
'acl',
'access',
'dapp',
'backend',
'protocol',
'stack',
'trustless',
'metamask',
'augur',
'pantera_capital',
'bounty',
'bounties_network',
'infura',
'ethereum',
'ipfs',
'geth',
'evm',
'aion',
'shyft',
'web3',
'merkle_tree',
'audit',
'zeppelin',
'ares',
'governx',
'on-chain',
'off-chain',
'ico',
'developer',
'pm',
'community',
'content',
'course',
'resource',
'mentor',
'blockchain',
'context',
'history',
'mindset',
'ganache',
'remix',
'react',
'react_native',
'oraclize',
'lll',
'vyper',
'eip',
'ethereum_improvement_proposal',
'security',
'cryptokitties',
'sharding',
'dapphub',
'compiler',
'formal_verification',
'formal_proof',
'safe',
'engineer',
'tooling',
'erc20',
'fungible',
'nft_token',
'ipns',
'auction',
'ethalarm',
'browser',
'crypto',
'cryptographic',
'independent',
'verifier',
'investor',
'decentraland',
'open',
'dharma',
'library',
'wrapper',
'mist',
'open-source',
'colony',
'status_open_bounties',
'gitcoin',
'status',
'ens',
'ui',
'static_analysis',
'documentation',
'ethmoji',
'openzeppelin',
'ethwaterloo',
'ethdenver',
'ethvigil',
'rest',
'integration',
'email',
'slack',
'vm',
'design',
'testrpc',
'trezor',
'rpc',
'ewasm',
'research',
'plasma',
'snarks',
'starks',
'zksnarks',
'zkstarks',
'truebit',
'ethereum_wallet',
'web3.js',
'ERC725',
'network',
'p2p',
'analysis',
'hack',
'regulatory',
'giveth',
'pledge',
'chat',
'discovery',
'liquid',
'dappnode',
'makerdao',
'identity',
'gnosis',
'ddex',
'wallet',
'hellogold',
'horizon_blockchain',
'games',
'fair',
'cross-chain',
'finance',
'swap',
'contract',
'solidify',
'knowledge',
'kauri',
'tutorial',
'dispute_resolution',
'arbitration',
'dao',
'incentive',
'decision',
'production',
'payment',
'micro_payment',
'ethereumjs',
'kyokan',
'counterfactual',
'state_channel',
'ethglobal',
'database',
'leroy',
'steemit',
'video',
'infrastructure',
'ethdeploy',
'cryptozombies',
'ethfiddle',
'digital_asset',
'crowdsale',
'modular.network',
'ethpm',
'eal',
'ethereum_address_lookup',
'chrome extension',
'phishing',
'bot',
'myetherwallet',
'mew',
'mycrypto',
'bridge',
'layer_2',
'sidechain',
'ethereum_permission_client',
'wasm',
'kovan',
'trading',
'token_contract',
'consensus',
'algorithm',
'contract',
'polymath',
'ethereum_alarm_clock',
'dagger',
'multisig',
'polychain',
'trinity',
'py-evm',
'web3.py',
'usability',
'quickblocks',
'embark',
'samsara',
'raffle',
'escrow',
'auction',
'collateral',
'portfolio',
'slockit',
'iot',
'sourccred',
'filecoin',
'spankchain',
'spank',
'adtoken',
'dao',
'betting',
'zocrates',
'sweetbridge',
'the_graph',
'json_rpc',
'jsonrpc',
'graphql',
'voting_system',
'trustory',
'tokenomics',
'cryptoeconomic',
'coinbase',
'ujo_music',
'virtuepoker',
'monax',
'maker',
'vulcanizedb',
'etl',
'sql',
'tendermint',
'lightning'
'federated_exchange',
'week_in_ethereum',
'week_ethereum'
'newsletter',
'r/ethereum',
'moderator',
'ERC721',
'xlnt',
'gnarly',
'redis',
'eip0',
'ethereum magicians',
'coworking',
'crypto_nyc',
'odin',
'identity',
'uport',
# What are the tools/libaries/framework you use
'typedoc',
'web3js',
'test-rpc',
'eth.js',
'solc',
'minimetoken',
'visual_studio_code',
'docker',
'travis',
'github',
'rinkeby',
'redux',
'ethereumjs-blockstream',
'keythereum',
'sublime',
'emacs',
'vim',
'bignumber',
'blockies',
'mm',
'mythril',
'mithril',
'jetbrains',
'jquery',
'evmlab',
'solhint',
'ethgasreporter',
'mocha',
'oyente',
'manticore',
'rattle',
'antlr',
'vs_code',
'typescript',
'nodejs',
'etherscan',
'web3py',
'django',
'bootstrap',
'evm_lab',
'ethers.js',
'google_cloud',
'eth.js',
'intellij',
'spank',
'zeroclient',
'postgresql',
'ethers.cli',
'ethers.build',
'swarm',
'eth-keyring-controller',
'ethjs-utils',
'openostbase',
'parityjs',
'jupiter_notebook',
'jupyter',
'0x.js'
'kubernetes',
'github',
'webpack',
'brave',
'libp2p',
'devp2p',
'atom',
'apm',
'aragon_pakage_manager',
'pprof',
'wyvern',
'bignum',
'numeral',
'chai',
'solcoverage',
# How do you handle testing?
'surya',
# How do you handle smart contract verification and security?
'professional',
'argus',
'ci',
'human_thinking',
'linting_rule',
'linting',
'linter',
're-entrancy',
'reentrancy',
'vulnerability',
'community_auditing',
'bounty_program',
'invariant',
'unit_test',
'unit_testing',
'code_review',
'quantstamp',
'crappy_process',
'spec',
'edge_case',
'corner_case',
'intentional_break',
'crowdfunding',
'defensive_code',
'risk',
'mistake',
'isolate',
'don_hold_eth',
'escape_hatch',
'library',
'compiler',
'tester',
'tooling',
'error',
'concept',
'economics',
'funcitonal_test',
'fuzzing',
'ethers.build',
'yuet',
'tdd',
'internal_audit',
'checklist',
'integration_test',
'consensys_best_practices',
'subreddit',
'external_audit',
'bug_bounty',
'security_analysis',
'overflow',
'assumption',
# Other bounties?
'state_watching',
'nft',
'openssl',
'orderwatcher',
'consensus-building',
'vocal',
'good_idea',
'wrong_abstraction',
'tokenized_equity',
'neufund',
'eurotoken',
'C++_dev',
'payment',
'desktop_wallet',
'web_page',
'open-source_explorer',
'decentralized',
'erc223',
'watcher',
'solidity_debugger',
'trusted_relay_chain',
'side_chain',
'sidechain',
'deposit',
'lock',
'escrow',
'talented_people',
'web_developer',
'software_engineer',
'recruiting',
'gas_profiling',
'gas_cost_estimation',
'stack',
'limit',
'slot',
'iulia',
'technical_writer',
'improve_documentation',
'good_ide',
'omkara',
'smart_contract_security',
'solidity_contract',
'incentive_structure',
'reviewer',
'maintainer',
'bug-bounty',
'dev_grant',
'ethprize',
'scaffolding_tool',
'block_explorer',
'scaling',
'proof-of-authority',
'security_team',
'hire_people',
'hiring_people', # should be catched by lemmatizer
'full_time',
'ethereum_query_language',
'websocket',
'deploy',
'heroku',
'deployment_tool',
'manipulate_strings',
'educate_people',
'stay_safe',
'tutorial',
'write-up',
'how_things_work',
'resource',
'phishing',
'airdrop',
'private_key',
'onboard',
'video',
'infographic',
'computational_analysis',
'cost',
'wasm',
'black-box_test',
'ide',
'dependency',
'code_coverage',
'etherscan_verification',
'tracing_api',
'common_format',
'tracing_rpc_call',
'quickblocks',
'swarm_integration',
'coverage_tool',
'ledger_support',
'unification',
'testnet',
'type',
'safety',
'compiled_language',
'use-case',
'explain',
'data_model',
'interactive_demo',
'upgradeability',
'gas_cost',
'scheduler',
'compiler',
'token_curation_market',
# What tools don’t exist at the moment?
'prettier',
'breakpoint',
'log',
'slack',
'estimate_gas',
'software_development',
'issue',
'test_suite',
'better_querying',
'query',
'good_wiki',
'stack_visualizer',
'evmlab',
'execute',
'trace',
'step_through',
'parity_bridge',
'interoperability',
'standardization',
'bridge',
'data_analytics',
'reconciliation',
'analytics',
'data_implicit',
'decentralized_logging',
'automated_updatable_smart_contract',
'ethmix',
'syntax_highlighting',
'event',
'lightweight_user_interface',
'subscribe',
'blockchain_state',
'bamboo',
'vyper',
'serpent',
'rollback_transaction',
'roll_back_transaction',
'versioning',
'type_system',
'simulation_tool',
'open_source',
'storage_platform',
'solidity_interpreter',
'fraud_detection',
'proxy_account',
'interactive_debugger',
'stress-test',
'cryptoeconomics',
'game_theory',
'drag_and_drop',
'builder',
'user_experience',
'quick_validation'
}
# ## Proof of Concept
# +
stop_words = nlp.Defaults.stop_words
def vocab_extraction(txts):
# Transform the serie into a list of list of words,
# Remove stopwords at the same time
cleaned = []
for idx, txt in txts.iteritems():
# Remove stopwords
cleaned += [[word for word in simple_preprocess(txt, deacc = True) if word not in stop_words]]
# Build bigrams and trigrams
bigrams = Phraser(Phrases(cleaned, min_count=1, threshold=1))
trigrams = Phraser(Phrases(bigrams[cleaned], threshold=1))
# Now create the bag of words with the new trigrams
cleaned = [trigrams[bigrams[txt]] for txt in cleaned]
# Lemmatization - TODO improve
lemmatized = []
for txt in cleaned:
if txt == []:
lemmatized += set()
else:
doc = nlp(" ".join(txt))
# token.lemma_ has a tendency to replace 'ethereumjs' by 'ethereumj'
lemmatized += [set(token.lemma_ for token in doc if (
token.lemma_ in eth_keywords or token.lower_ in eth_keywords
)
)]
return lemmatized
# -
vocab_extraction(df['who_what'])
vocab_extraction(df['tooling'])
vocab_extraction(df['testing'])
# ## Formatting for JSON
def to_escaped_html(txt):
paragraphs = [f"<p>{named_entities(line)}</p>" for line in txt.splitlines()]
return ''.join(paragraphs)
def genTransformer(field, question):
def resultProc(df):
# Transform the serie into a list of list of words,
# Remove stopwords at the same time
cleaned = []
for idx, txt in df[field].iteritems():
# Remove stopwords
cleaned += [[word for word in simple_preprocess(txt, deacc = True) if word not in stop_words]]
# Build bigrams and trigrams
bigrams = Phraser(Phrases(cleaned, min_count=1, threshold=1))
trigrams = Phraser(Phrases(bigrams[cleaned], threshold=1))
# Now create the bag of words with the new trigrams
cleaned = [trigrams[bigrams[txt]] for txt in cleaned]
# Lemmatization - TODO improve
for idx, txt in enumerate(cleaned):
if txt == []:
df[field][idx] = {
'question': question,
'answer': to_escaped_html(df[field][idx]),
'keywords': set()
}
else:
doc = nlp(" ".join(txt))
df[field][idx] = {
'question': question,
'answer': to_escaped_html(df[field][idx]),
'keywords': set(token.lemma_ for token in doc if (
token.lemma_ in eth_keywords or token.lower_ in eth_keywords
))
}
return resultProc
testing_proc = genTransformer('testing', 'How do you handle testing?')
testing_proc(df)
df['testing'][0]
who_what_proc = genTransformer('who_what', 'Who are you and what are you working on?')
tooling_proc = genTransformer('tooling', 'What are the tools/libraries/frameworks you use?')
smart_contract_proc = genTransformer('smart_contract', 'How do you handle smart contract verif & security?')
other_bounties_proc = genTransformer('bounties', 'Other bounties?')
missing_tools_proc = genTransformer('missing_tools', 'What tools don’t exist at the moment?')
who_what_proc(df)
tooling_proc(df)
smart_contract_proc(df)
other_bounties_proc(df)
missing_tools_proc(df)
df['who_what'][0]
df[['name', 'who_what', 'tooling', 'testing', 'smart_contract', 'bounties', 'missing_tools']].to_json('interviews.json', orient = 'index')
| 201807_ETHPrize/ETHPrize - semisupervised approach.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Inaugural Project
# Imports and set magics:
# +
from types import SimpleNamespace
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
# %load_ext autoreload
# %autoreload 2
import inauguralproject
# -
# # Question 1
# To answer the first question, we start out by setting the parameters to the known values. Then we proceed to define the utility function, the premium function, the expected utility function when the agent is insured and lastly the agent's optimal insurance coverage.
# +
# Setting the parameters:
y = 1
p = 0.2
theta = -2
N = 100
# Defining utility:
def u(z, theta) -> float:
""" Defining utility function.
Args:
z(float): parameter
theta(float): parameter
Returns:
Utility of assets
"""
return (z**(1 + theta)) / (1 + theta)
# Defining the premium:
def pi(p, q):
""" Defining premium policy function.
Args:
p(float): probability of monetary loss
q(float): coverage amount
Returns:
Premium policy
"""
return (p * q)
# Defining expected utility if insured
def V(q, x, y, p) -> float:
""" Defining function for the expected utility if insured.
Args:
p(float): probability of monetary loss
q(float): coverage amount
x(float): monetary loss
y(float): assets
Returns:
Expected utility for insured agent.
"""
first_term = p * u(y - x + q - pi(p, q), theta)
second_term = (1-p) * u(y - pi(p, q), theta)
return first_term + second_term
# Defining optimal insurance coverage:
def q_star(x, y, p):
""" Calculating the optimal insurance coverage q for the insured agent,
using the expected utility function.
Args:
p(float): probability of monetary loss
q(float): coverage amount
x(float): monetary loss
y(float): assets
Returns:
Optimal insurance coverage.
"""
obj = lambda q: -V(q, x, y, p)
res = optimize.minimize_scalar(obj, bounds = (0, x), method = 'bounded')
return res.x
# -
# Now we create a grid for the x's, where we want the range to be between 0.01 and 0.9, and an empty grid for the q's. Then we loop over all the xs to calculate the optimal insurance coverage, q*, for each level of monetary loss, x.
# +
# ii) Grid for xs and qs
xs = np.linspace(0.01, 0.9, N)
qs = np.zeros(N)
# iii) For each x calculate q*
for i, x in enumerate(xs):
qs[i] = q_star(xs[i], y, p)
# -
# Lastly, we plot the x's and the q's and see that the graph suggests that a higher monetary loss x entails a higher optimal coverage amount q.
# +
# iv) Plot the xs and qs
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(xs, qs, color = 'green', label = 'Optimal q* per x')
ax.set_xlabel('$x$: monetary loss')
ax.set_ylabel('$q$: coverage amount')
ax.legend(loc='upper left');
# -
# # Question 2
# The goal is to find the set of acceptable contracts from the agent's point of view.
# We let the monetary loss be set to x = 0.6 and create a function for the expected value
# of not having insurance. We construct a new grid of the q's and an empty one for the pi's.
# +
# i) Setting the parameters:
x = 0.6
# Defining expected value if no insurance
def V_null(p:float) -> float:
""" Defining function for the expected utility if not insured.
Args:
p(float): probability of monetary loss
x(float): monetary loss
y(float): assets
Returns:
Expected utility for agent who is not insured.
"""
return p*u(y - x, theta) + (1 - p)*u(y, theta)
# Constructing a new grid of qs as well as an empty grid for the pi's
qs_new = np.linspace(0.01, 0.6, N)
pi_new = np.zeros(N)
# -
# We make a new function for expected utility when having an insurance,
# since pi is now a variable that has to be solved for.
# We create another objective function in order to optimize pi, with the constraint that all insurance contracts will yield an expected value at least as good as the case of not having an insurance.
# +
#ii)
def V_pi(pi, q, x, y, p) -> float:
""" Defining function for the expected utility if insured,
where premium policy pi is no longer a function.
Args:
pi(float): insurance premium
q(float): coverage amount
p(float): probability of monetary loss
x(float): monetary loss
y(float): assets
Returns:
Expected utility for insured agent.
"""
first_term = p * u(y - x + q - pi, theta)
second_term = (1-p) * u(y - pi, theta)
return first_term + second_term
# Defining optimal premium policy and optimizing pi:
def pi_star(q, x, y, p, V_null):
""" Calculating the optimal insurance premium pi for the insured agent,
using the expected utility function of being and not being insured.
Args:
p(float): probability of monetary loss
q(float): coverage amount
x(float): monetary loss
y(float): assets
V_null(func): function for expected utility if not insured
Returns:
Optimal insurance premium such that expected utility of having an insurance
is at least as good as expected utility of not having an insurance.
"""
def obj(pi):
return V_pi(pi, q, x, y, p) - V_null
obj = lambda pi: V_pi(pi, q, x, y, p) - V_null
res = optimize.root_scalar(obj, bracket=[0, q], method='brentq')
return res.root
for i, q in enumerate(qs_new):
pi_new[i] = pi_star(qs_new[i], x, y, p, V_null(p))
# -
# Lastly, we plot the results including both the optimal pi* per q, the premium policy found in question 1, as well as the set of acceptable contracts.
#iii)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(qs_new, pi_new, color = 'red', label = 'Optimal pi* per q')
ax.plot(qs_new, p*qs_new, color = 'green', label = 'Premium policy')
ax.fill_between(qs_new, p*qs_new, pi_new, color = 'lightblue', label = 'Set of acceptable contracts')
ax.set_xlabel('$q$: coverage amount')
ax.set_ylabel('$pi$: premium')
ax.grid(True)
ax.legend(loc='upper left');
# # Question 3
# In this question we define a Monte Carlo function in order to find the preferred insurance policy from the agent's point of view. The loss, x, is drawn from a beta distribution. Thus, by the Law of Large Numbers, we can approximate the true integral, representing the agent's value, with a finite sample:
# $$
# \mathbb{E}[\mathbf{V}(\gamma, \pi)] \approx \frac{1}{N}\sum_{i=1}^{N} \mathbf{V}(\gamma_i, \pi_i)
# $$
# In the first part, we set the parameters and define the new expected utility. Then we create a Monte Carlo function that, by the Law of Large Numbers, computes the agent's value.
# +
# i)
# Setting the parameters:
alpha = 2
beta = 7
N = 10000
# Defining the new expected utility
def V_beta(gamma, pi):
""" Defining function for the expected utility for insured agent,
where we know the coverage ratio gamma and x is drawn from beta
distribution.
Args:
pi(float): insurance premium
gamma(float): coverage ratio
Returns:
Expected utility for agent.
"""
return u(y - (1 - gamma)*x - pi, theta)
# Defining the monte carlo function:
def monte_carlo(y, p, N, gamma, pi):
""" Calculating expected utility for insured agent by
Monte Carlo integration.
Args:
pi(float): insurance premium
gamma(float): coverage ratio
y(float): assets
p(float): probability of monetary loss
Returns:
Expected utility for insured agent using
at least 10 000 draws.
"""
x = np.random.beta(alpha, beta, N)
return np.mean(V_beta(x, pi))
# -
# We then call the monte carlo fucntion for different values of gamma and pi.
# We define the two cases as policy1 and policy2.
# +
# ii)
policy1 = monte_carlo(y, p, N, gamma = 0.9, pi = 0.2)
policy2 = monte_carlo(y, p, N, gamma = 0.45, pi = 0.1)
print(f'Expected utility of policy 1 is: {policy1:0.3f}')
print(f'Expected utility of policy 2 is: {policy2:0.3f}')
# -
# We can see that both policies yields negative utility. However, the insurance policy where the coverage ratio gamma is 0.45 and the premium pi is 0.1 is preferable since this will yield the smallest loss in utility for the agent.
# # Question 4
# In this question, we want to consider the optimal policy from the insurance company's point of view. We set the coverage ratio, gamma, equal to 0.95.
# Given this parameter, the insurance company wants to maximize its profits
# and we will try to find the optimal premium, pi.
# +
#i)
gamma = 0.95
def pi_star2(p, x, pi, gamma):
""" Calculating profit maximizing premium by Monte carlo integration
given that a customer wants a coverage ratio gamma of 0.95.
Args:
gamma1(float): coverage ratio
p(float): probability of monetary loss
x(float): monetary loss
Returns:
Optimal insurance premium from insurance company's point of view.
"""
def obj(pi):
return p * (gamma * x)
obj = lambda pi: p * (gamma * x)
res = optimize.root(obj, [0, 0], method = 'broyden1')
return res.root
#qs_1 = np.linspace(0.01, 0.9, N)
#pi_1 = np.zeros(N)
#for i, q in enumerate(qs_1):
# pi_1[i] = pi_star2(p, qs_1[i], pi, gamma)
# -
# We have not been able get a value out of our last optimatization above. Help would be very much appreciated! :-)
| inauguralproject/inauguralproject.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# Task 1:
# Return true if a string is a palindrome.
#
# A palindrome is a word that is spelled the same forward and backward.
#
# An empty string is a palindrome.
def solution(s):
revs = s[::-1]
return s == revs
# Task 2:
# Return a number with its digits reversed.
#
# The return value should be an integer.
def solution(n):
rev = 0
while n > 0:
a = n % 10
rev = rev * 10 + a
n //= 10
return rev
# Task 3:
# This function accepts an array. It should return true if the array elements read the same forward and backward, i.e. if the array is a palindrome.
#
# An empty array is a palindrome.This function accepts an array. It should return true if the array elements read the same forward and backward, i.e. if the array is a palindrome.
#
# An empty array is a palindrome.
def solution(a):
b = a[::-1]
return b == a
# Task 4:
# Given an input array a determine the length of the leading "run" of numbers. The run is how many numbers at the front of the array are the same.
#
# If the array is empty, return 0.
def solution(a):
if len(a) == 0:
count = 0
else:
count = 1
for i in range(len(a)-1):
if a[i] == a[i+1]:
count += 1
else:
break
return count
# Task 5:
# Return a slice of array a starting from index start and ending before index end.
#
# If end is less than or equal to start, return an empty array.
#
# Note that start and end could be beyond the length of the array. They should be clamped between 0 and the length of the array minus 1.
def solution(a, start, end):
arr = []
if end <= start:
return arr
else:
if start < 0:
start = 0
if end > len(a)-1:
end = len(a)
for i in range(start, end):
arr.append(a[i])
return arr
# Task 6:
# Examine an array and determine the length of the longest run of elements. That is, the most of a particular element seen in a row.
def solution(a):
count = 1
maxcount = 1
if len(a) == 0:
maxcount = 0
else:
for i in range(len(a) - 1):
if a[i] == a[i + 1]:
count += 1
else:
count = 1
if maxcount <= count:
maxcount = count
return maxcount
# Task 7:
# Run-length encoding algorithm (RLE) works by taking the occurrence of each repeating character and outputting that number along with a single character of the repeating sequence.
#
# You need to implement an algorithm that applies the RLE to a given string.
def solution(inputString):
current_run = 0
run_char = inputString[0]
result = ''
for c in inputString:
print(run_char, current_run, c)
if run_char != c:
result += str(current_run) + run_char
current_run = 1
run_char = c
print(result)
else:
current_run += 1
print(result)
print(result)
result += str(current_run) + run_char
print(result)
return result
| Sprint2/S2Module_2_GCA_Practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inversions
#
# In this notebook, we use a synthetic example to explore aspects of inversion including:
# - assigning uncertainties to the the data
# - adjusting the regularization parameters on the smallness and smoothness terms
# - adjusting the value of the trade-off parameter beta
# ## Step 0: Imports and load survey info
#
# These steps are the same as in the previous notebook.
# +
# core python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, SymLogNorm, Normalize
import matplotlib.gridspec as gridspec
import ipywidgets
# tools in the simPEG Ecosystem
import discretize # for creating computational meshes
# linear solvers
try:
from pymatsolver import Pardiso as Solver # this is a fast linear solver
except ImportError:
from SimPEG import SolverLU as Solver # this will be slower
# SimPEG inversion machinery
from SimPEG import (
Data, maps,
data_misfit, regularization, optimization, inverse_problem,
inversion, directives
)
# DC resistivity and IP modules
from SimPEG.electromagnetics import resistivity as dc
# -
# set the font size in the plots
from matplotlib import rcParams
rcParams["font.size"] = 14
try:
import warnings
warnings.filterwarnings('ignore')
except:
pass
# ## Step 1: Load DC Survey
#
# This is the same as in the previous notebook, but included so we can re-visit any of the steps as necessary.
# +
line = "46800E"
dc_data_file = f"./century/{line}/{line[:-1]}POT.OBS"
# -
def read_dcip_data(filename, verbose=True):
"""
Read in a .OBS file from the Century data set into a python dictionary.
The format is the old UBC-GIF DCIP format.
Parameters
----------
filename : str
Path to the file to be parsed
verbose: bool
Print some things?
Returns
-------
dict
A dictionary with the locations of
- a_locations: the positive source electrode locations (numpy array)
- b_locations: the negative source electrode locations (numpy array)
- m_locations: the receiver locations (list of numpy arrays)
- n_locations: the receiver locations (list of numpy arrays)
- n_locations: the receiver locations (list of numpy arrays)
- observed_data: observed data (list of numpy arrays)
- standard_deviations: assigned standard deviations (list of numpy arrays)
- n_sources: number of sources (int)
"""
# read in the text file as a numpy array of strings (each row is an entry)
contents = np.genfromtxt(filename, delimiter=' \n', dtype=np.str)
# the second line has the number of sources, current, and data type (voltages if 1)
n_sources = int(contents[1].split()[0])
if verbose is True:
print(f"number of sources: {n_sources}")
# initialize storage for the electrode locations and data
a_locations = np.zeros(n_sources)
b_locations = np.zeros(n_sources)
m_locations = []
n_locations = []
observed_data = []
standard_deviations = []
# index to track where we have read in content
content_index = 1
# loop over sources
for i in range(n_sources):
# start by reading in the source info
content_index = content_index + 1 # read the next line
a_location, b_location, nrx = contents[content_index].split() # this is a string
# convert the strings to a float for locations and an int for the number of receivers
a_locations[i] = float(a_location)
b_locations[i] = float(b_location)
nrx = int(nrx)
if verbose is True:
print(f"Source {i}: A-loc: {a_location}, B-loc: {b_location}, N receivers: {nrx}")
# initialize space for receiver locations, observed data associated with this source
m_locations_i, n_locations_i = np.zeros(nrx), np.zeros(nrx)
observed_data_i, standard_deviations_i = np.zeros(nrx), np.zeros(nrx)
# read in the receiver info
for j in range(nrx):
content_index = content_index + 1 # read the next line
m_location, n_location, datum, std = contents[content_index].split()
# convert the locations and data to floats, and store them
m_locations_i[j] = float(m_location)
n_locations_i[j] = float(n_location)
observed_data_i[j] = float(datum)
standard_deviations_i[j] = float(std)
# append the receiver info to the lists
m_locations.append(m_locations_i)
n_locations.append(n_locations_i)
observed_data.append(observed_data_i)
standard_deviations.append(standard_deviations_i)
return {
"a_locations": a_locations,
"b_locations": b_locations,
"m_locations": m_locations,
"n_locations": n_locations,
"observed_data": observed_data,
"standard_deviations": standard_deviations,
"n_sources": n_sources,
}
dc_data_dict = read_dcip_data(dc_data_file, verbose=False)
# +
# initialize an empty list for each
source_list = []
# center the survey and work in local coordinates
x_local = 0.5*(np.min(dc_data_dict["a_locations"]) + np.max(np.hstack(dc_data_dict["n_locations"])))
for i in range(dc_data_dict["n_sources"]):
# receiver electrode locations in 2D
m_locs = np.vstack([
dc_data_dict["m_locations"][i] - x_local,
np.zeros_like(dc_data_dict["m_locations"][i])
]).T
n_locs = np.vstack([
dc_data_dict["n_locations"][i] - x_local,
np.zeros_like(dc_data_dict["n_locations"][i])
]).T
# construct the receiver object
receivers = dc.receivers.Dipole(locations_m=m_locs, locations_n=n_locs, storeProjections=False)
# construct the source
source = dc.sources.Dipole(
location_a=np.r_[dc_data_dict["a_locations"][i] - x_local, 0.],
location_b=np.r_[dc_data_dict["b_locations"][i] - x_local, 0.],
receiver_list=[receivers]
)
# append the new source to the source list
source_list.append(source)
# -
survey = dc.Survey(source_list=source_list)
# ## Step 2: Build a Mesh
#
# Similar to the previous notebook, we use a simple function to design our mesh.
def build_mesh(
survey=survey,
n_cells_per_spacing_x=4,
n_cells_per_spacing_z=4,
n_core_extra_x=4,
n_core_extra_z=4,
core_domain_z_ratio=1/3.,
padding_factor=1.3,
n_pad_x=10,
n_pad_z=10,
):
"""
A function for designing a Tensor Mesh based on DC survey parameters
Parameters
----------
survey: dc.Survey
A DC (or IP) survey object
n_cells_per_spacing_[x, z]: int
Number of [x, z]-cells per the minimum electrode spacing
n_core_extra_[x, z]: int
Number of extra cells with the same size as the core domain beyond the survey extent
core_domain_z_ratio: float
Factor that multiplies the maximum AB, MN separation to define the core mesh extent
padding_factor: float
Factor by which we expand the mesh cells in the padding region
n_pad_[x, z]: int
Number of padding cells in the x, z directions
"""
min_electrode_spacing = np.min(np.abs(survey.locations_a[:, 0] - survey.locations_b[:, 0]))
dx = min_electrode_spacing / n_cells_per_spacing_x
dz = min_electrode_spacing / n_cells_per_spacing_z
# define the x core domain
core_domain_x = np.r_[
survey.electrode_locations[:, 0].min(),
survey.electrode_locations[:, 0].max()
]
# find the y core domain
# find the maximum spacing between source, receiver midpoints
mid_ab = (survey.locations_a + survey.locations_b)/2
mid_mn = (survey.locations_m + survey.locations_n)/2
separation_ab_mn = np.abs(mid_ab - mid_mn)
max_separation = separation_ab_mn.max()
core_domain_z = np.r_[-core_domain_z_ratio * max_separation, 0.]
# add extra cells beyond the core domain
n_core_x = np.ceil(np.diff(core_domain_x)/dx) + n_core_extra_x*2 # on each side
n_core_z = np.ceil(np.diff(core_domain_z)/dz) + n_core_extra_z # just below
# define the tensors in each dimension
hx = [(dx, n_pad_x, -padding_factor), (dx, n_core_x), (dx, n_pad_x, padding_factor)]
hz = [(dz, n_pad_z, -padding_factor), (dz, n_core_z)]
mesh = discretize.TensorMesh([hx, hz], x0="CN")
return mesh, core_domain_x, core_domain_z
mesh, core_domain_x, core_domain_z = build_mesh(survey)
mesh.plotGrid()
# ## Step 3: Design our True Model
#
# Here, we will focus in on a synthetic example so we know the true solution. Again, we will use a model containing 2 blocks in a halfspace.
# ### 3.1 Define the model geometry and physical properties
#
# Here, we define a model with one conductive block and one resistive block in a halfspace.
# +
# define the resistivities
rho_background = 100
rho_resistive_block = 1000
rho_conductive_block = 10
# define the geometry of each block
xlim_resistive_block = np.r_[-500, -250]
zlim_resistive_block = np.r_[-150, -75]
xlim_conductive_block = np.r_[250, 500]
zlim_conductive_block = np.r_[-150, -75]
# +
rho = rho_background * np.ones(mesh.nC)
# resistive block
inds_resistive_block = (
(mesh.gridCC[:, 0] >= xlim_resistive_block.min()) & (mesh.gridCC[:, 0] <= xlim_resistive_block.max()) &
(mesh.gridCC[:, 1] >= zlim_resistive_block.min()) & (mesh.gridCC[:, 1] <= zlim_resistive_block.max())
)
rho[inds_resistive_block] = rho_resistive_block
# conductive block
inds_conductive_block = (
(mesh.gridCC[:, 0] >= xlim_conductive_block.min()) & (mesh.gridCC[:, 0] <= xlim_conductive_block.max()) &
(mesh.gridCC[:, 1] >= zlim_conductive_block.min()) & (mesh.gridCC[:, 1] <= zlim_conductive_block.max())
)
rho[inds_conductive_block] = rho_conductive_block
# -
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
out = mesh.plotImage(np.log10(rho), ax=ax, pcolorOpts={"cmap":"Spectral"})
plt.colorbar(out[0], ax=ax, label="log$_{10} \\rho$", orientation="horizontal", fraction=0.05, pad=0.25)
ax.set_xlim(core_domain_x)
ax.set_ylim(core_domain_z + np.r_[-100, 0])
ax.set_aspect(1.5)
# ### 3.2 Define the true model
#
# Since we will be working with log-resistivities in the inversion, our true model is defined as the log of the resistivity.
model_true = np.log(rho)
# ## Step 4: Set up and run a forward simulation
#
# These will be the "observed data" in the inversion. We add noise to the result (a default of 5%). If you would like to adjust the noise level, you can change the `relative_error`, you can pass `add_noise` to the `make_synthetic_data` function.
#
# As in the first notebook, we will invert for log-resistivity, so we use an `ExpMap` in the forward simulation. Again, we use the `storeJ` option to store the
# +
mapping = maps.ExpMap(mesh)
# Generate 2.5D DC problem
simulation_dc = dc.Simulation2DNodal(
mesh, rhoMap=mapping, solver=Solver, survey=survey, storeJ=True
)
# +
# %%time
# compute our "observed data"
synthetic_data = simulation_dc.make_synthetic_data(model_true, add_noise=True)
# +
# plot pseudosections
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
# plot a psuedosection of the data
dc.utils.plot_pseudosection(
synthetic_data, data_type="apparent resistivity",
plot_type="contourf", data_location=True, ax=ax,
cbar_opts={"pad":0.25}
)
ax.set_xlim(core_domain_x)
ax.set_aspect(1.5) # some vertical exxageration
ax.set_xlabel("Northing (m)")
plt.tight_layout()
# -
# ---
# # Recall: inversion as optimization
#
# We forumlate the inverse problem as an optimization problem consisting of a data misfit and a regularization
#
# $$
# \min_{\mathbf{m}} \Phi(\mathbf{m}) = \Phi_d(\mathbf{m}) + \beta\Phi_m(\mathbf{m}) \\ s.t. ~ \Phi_d \leq \Phi_d^* \quad \mathbf{m}_i^{\rm L} \leq \mathbf{m}_ \leq \mathbf{m}_i^{\rm U}
# $$
#
# where:
# - $\mathbf{m}$ is our inversion model - a vector containing the set of parameters that we invert for
#
#
# - $\Phi_d$ is the data misfit
# $$
# \Phi_d(\mathbf{m}) = \frac{1}{2}\|\mathbf{W_d} (\mathcal{F}(\mathbf{m}) - \mathbf{d}^{\text{obs}})\|^2
# $$
#
#
# - $\Phi_m$ is the regularization
# $$
# \Phi_m(\mathbf{m}) = \frac{1}{2}\big(\alpha_s\|\mathbf{W_s} (\mathbf{m} - \mathbf{m}_{\text{ref}})\|^2 + \alpha_x\|\mathbf{W_x} (\mathbf{m})\|^2 + \alpha_z\|\mathbf{W_z} (\mathbf{m})\|^2 \big)
# $$
#
#
# - $\beta$ is a trade-off parameter that weights the relative importance of the data misfit and regularization terms
#
#
# - $\Phi_d^*$ is our target misfit, which is typically set to $N/2$ where $N$ is the number of data (Parker, 1994) (or also see [Oldenburg & Li (2005)](https://www.researchgate.net/profile/Douglas_Oldenburg/publication/238708196_5_Inversion_for_Applied_Geophysics_A_Tutorial/links/004635282572529927000000.pdf))
# ---
#
# ## Step 5: assign standard deviations to the data
#
# The standard deviations are an estimate of the level of noise on your data. These are used to construct the weights in the $\mathbf{W_d}$ matrix of the data misfit.
#
# It is common to define the standard deviation in terms of a `relative_error` and a `noise_floor`.
#
# $$ \text{standard_deviation} = \text{relative_error}\times|d^{obs}| + \text{noise_floor}$$
#
# For DC resistivity, it is common to choose a `relative_error` between 0.02-0.1 (2% - 10% error). The `noise_floor` value defines threshold for data below which we consider those values to be close to zero. It is important to set a non-zero `noise_floor` when we can have zero-crossings in our data (e.g. both positive and negative values - which we do in DC!). The `noise_floor` ensures that we don't try to fit near-zero values to very high accuracy.
fig, ax = plt.subplots(1, 1)
ax.hist(np.log10(np.abs(synthetic_data.dobs)), 30)
ax.set_xlabel("$log_{10}(|d^{obs}|)$")
relative_error = 0.05 # 5% error
noise_floor = 1e-4
fig, ax = plt.subplots(1, 1)
ax.hist(np.log10(np.abs(synthetic_data.dobs)), 30)
ax.set_xlabel("$log_{10}(|d^{obs}|)$")
ax.axvline(np.log10(noise_floor), linestyle="dashed", color="C1")
# ### 5.1 Assign uncertainties to our data object
#
# In SimPEG, the `data` object is responsible for keeping track of the survey geometry, observed data and uncertainty values. The `standard_deviation` property is what is used to construct the $\mathbf{W_d}$ matrix in the data misfit
#
# ```
# W_d = diag(1 / data.standard_deviation)
# ```
synthetic_data.relative_error = relative_error
synthetic_data.noise_floor = noise_floor
assert(np.allclose(
relative_error * np.abs(synthetic_data.dobs) + noise_floor,
synthetic_data.standard_deviation
))
# Plot the data sorted by amplitude and associated uncertainties.
# +
inds_sort = np.argsort(np.abs(synthetic_data.dobs))
sorted_data = np.abs(synthetic_data.dobs[inds_sort])
sorted_std = synthetic_data.standard_deviation[inds_sort]
fig, ax = plt.subplots(2, 1, figsize=(12, 8))
x = np.arange(0, len(sorted_data))
ax[0].plot(x, sorted_data, '.k')
ax[1].semilogy(x, sorted_data, '.k')
ax[0].set_title("sorted data and associated uncertainties")
for a in ax:
a.fill_between(x, sorted_data - sorted_std, sorted_data + sorted_std, alpha=0.25)
a.grid(alpha=0.5)
a.set_ylabel("observed data (V)")
plt.tight_layout()
# -
# ## Step 6: Assembling the inversion
#
# Here, we are going to set up functions for constructing and running the inversion so that we can easily adjust the parameters used in the inversion.
#
# We start be defining a function that will construct our `inverse_problem` object which requires the `data_misfit`, `regularization`, and `optimization` be constructed.
def create_inverse_problem(
relative_error=relative_error, noise_floor=noise_floor,
alpha_s=1e-3, alpha_x=1, alpha_z=1, mref=np.log(rho_background),
maxIter=20, maxIterCG=20,
):
"""
A function that contructs a data misfit, regularization, and optimization and
assembles them into an inverse_problem object.
Parameters
----------
relative_error: float
relative error we assign to the data (used to construct the standard_deviation)
noise_floor: float
noise floor that we use to construct the standard deviations of the data
alpha_s: float
weight for the smallness component of the regularization
alpha_[x, z]: float
weight for the [x, z]-smoothness term in the regularization
mref: float
reference model value used in the smallness term of the regularization
maxIter: int
maximum number of iterations in the inversion
maxIterCG: int
maximum number of Conjugate Gradient iterations used to compute a step
in the Inexact Gauss Newton optimization
"""
# set the uncertainties and define the data misfit
synthetic_data.relative_error = relative_error
synthetic_data.noise_floor = noise_floor
dmisfit = data_misfit.L2DataMisfit(data=synthetic_data, simulation=simulation_dc)
# regularization
reg = regularization.Tikhonov(
mesh, alpha_s=alpha_s, alpha_x=alpha_x, alpha_y=alpha_z, mref=mref*np.ones(mesh.nC)
)
# optimization
opt = optimization.InexactGaussNewton(maxIter=maxIter, maxIterCG=maxIterCG)
opt.remember("xc")
# return the inverse problem
return inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Next, we define a simple function for assembling the inversion. There are very simiar functions in SimPEG already that write these outputs to a file (see for example `directives.SaveOutputEveryIteration`). In part, I show this here to give you a sense of how directives work in the inversion so if you are interested in writing your own, then you can use this as a template.
#
# The `initialize` method is called at the very beginning of the inversion. In the `BetaEstimate_ByEig`, which we used in notebook 1, this is where we initialze an estimate of beta.
#
# The `endIter` method is called at the end of every iteration. For example in the `BetaSchedule` directive, this is where we update the value of beta.
#
# In this directive, we will create a simple dictionary that saves some outputs of interest during the inversion.
class SaveInversionProgress(directives.InversionDirective):
"""
A custom directive to save items of interest during the course of an inversion
"""
def initialize(self):
"""
This is called when we first start running an inversion
"""
# initialize an empty dictionary for storing results
self.inversion_results = {
"iteration":[],
"beta":[],
"phi_d":[],
"phi_m":[],
"phi_m_small":[],
"phi_m_smooth_x":[],
"phi_m_smooth_z":[],
"dpred":[],
"model":[]
}
def endIter(self):
"""
This is run at the end of every iteration. So here, we just append
the new values to our dictionary
"""
# Save the data
self.inversion_results["iteration"].append(self.opt.iter)
self.inversion_results["beta"].append(self.invProb.beta)
self.inversion_results["phi_d"].append(self.invProb.phi_d)
self.inversion_results["phi_m"].append(self.invProb.phi_m)
self.inversion_results["dpred"].append(self.invProb.dpred)
self.inversion_results["model"].append(self.invProb.model)
# grab the components of the regularization and evaluate them here
# the regularization has a list of objective functions
# objfcts = [smallness, smoothness_x, smoothness_z]
# and the multipliers contain the alpha values
# multipliers = [alpha_s, alpha_x, alpha_z]
reg = self.reg.objfcts[0]
phi_s = reg.objfcts[0](self.invProb.model) * reg.multipliers[0]
phi_x = reg.objfcts[1](self.invProb.model) * reg.multipliers[1]
phi_z = reg.objfcts[2](self.invProb.model) * reg.multipliers[2]
self.inversion_results["phi_m_small"].append(phi_s)
self.inversion_results["phi_m_smooth_x"].append(phi_x)
self.inversion_results["phi_m_smooth_z"].append(phi_z)
# There are three directives that we use in this inversion
# - `BetaEstimate_ByEig`: sets an initial value for $\beta$ by estimating the largest eigenvalue of $\Phi_d$ and of $\Phi_m$ and then taking their ratio. The value is then scaled by the `beta0_ratio` value, so for example if you wanted the regularization to be ~10 times more important than the data misfit, then we would set `beta0_ratio=10`
#
#
# - `BetaSchedule`: this reduces the value of beta during the course of the inversion. Particularly for non-linear problems, it can be advantageous to start off by seeking a smooth model which fits the regularization and gradually decreasing the infulence of the regularization to increase the influence of the data.
#
#
# - `TargetMisfit`: this directive checks at the end of each iteration if we have reached the target misfit. If we have, the inversion terminates, it not, it continues. Optionally, a `chifact` can be set to stop the inversion when `phi_d <= chifact * phi_d_star`. By default (`chifact=1`)
def create_inversion(
inv_prob, beta0_ratio=1e2, cool_beta=True,
beta_cooling_factor=2, beta_cooling_rate=1,
use_target=True, chi_factor=1,
):
"""
A method for creating a SimPEG inversion with directives for estimating and
updating beta as well as choosing a stopping criteria
Parameters
----------
inv_prob: inverse_problem
An inverse problem object that describes the data misfit, regularization and optimization
beta0_ratio: float
a scalar that multiplies ratio of the estimated largest eigenvalue of phi_d and phi_m
cool_beta: bool
True: use a beta cooling schedule. False: use a fixed beta
beta_cooling_factor: float
reduce beta by a factor of 1/beta_cooling factor
beta_cooling_rate: int
number of iterations we keep beta fixed for
(we cool beta by the cooling factor every `beta_cooling_rate` iterations)
chi_factor: float
Stop the inversion when phi_d <= chifact * phi_d_star. By default (`chifact=1`)
"""
# set up our directives
beta_est = directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio)
target = directives.TargetMisfit(chifact=chi_factor)
save = SaveInversionProgress()
directives_list = [beta_est, save]
if use_target is True:
directives_list.append(target)
if cool_beta is True:
beta_schedule = directives.BetaSchedule(coolingFactor=beta_cooling_factor, coolingRate=beta_cooling_rate)
directives_list.append(beta_schedule)
return inversion.BaseInversion(inv_prob, directiveList=directives_list), target, save
# ### 6.1 Create and run the inversion
rho0 = rho_background
# +
inv_prob = create_inverse_problem(
relative_error=0.05, noise_floor=1e-4,
alpha_s=1e-3, alpha_x=1, alpha_z=1, mref=np.log(rho0),
maxIter=20, maxIterCG=20,
)
inv, target_misfit, inversion_log = create_inversion(
inv_prob, beta0_ratio=1e2, cool_beta=True,
beta_cooling_factor=2, beta_cooling_rate=1,
use_target=False, chi_factor=1
)
phi_d_star = survey.nD / 2
target = target_misfit.chifact * phi_d_star
# -
m0 = np.log(rho0) * np.ones(mesh.nC)
model_recovered = inv.run(m0)
inversion_results = inversion_log.inversion_results
# +
# inversion_results_app = ipywidgets.interact(
# plot_results_at_iteration,
# iteration = ipywidgets.IntSlider(min=0, max=inversion_results["iteration"][-1]-1, value=0)
# )
# inversion_results_app
# -
# ### 6.2 analyze the results - data misfit
def plot_normalized_misfit(iteration=None, observed_data=synthetic_data, ax=None):
"""
Plot the normalized data misfit
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
if iteration is None:
dpred = inversion_results["dpred"][-1]
else:
dpred = inversion_results["dpred"][iteration]
normalized_misfit = (dpred - observed_data.dobs)/observed_data.standard_deviation
out = dc.utils.plot_pseudosection(
observed_data, dobs=normalized_misfit, data_type="misfit",
plot_type="contourf", data_location=True, ax=ax,
cbar_opts={"pad":0.25}
)
ax.set_title("normalized misfit")
ax.set_yticklabels([])
ax.set_aspect(1.5) # some vertical exxageration
ax.set_xlabel("Northing (m)")
ax.set_ylabel("n-spacing")
cb_axes = plt.gcf().get_axes()[-1]
cb_axes.set_xlabel('normalized misfit')
return ax
# plot misfit
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
plot_normalized_misfit(ax=ax)
# ### 6.3 analyze the results - how did we get here?
# +
def plot_beta(inversion_results, x="iteration", ax=None):
"""
Plot the beta-value as a function of iteration
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
if x not in ["iteration", "beta"]:
raise Exception("beta should be plotted as a function of iteration or beta")
ax.plot(inversion_results[x], inversion_results["beta"], "-oC0", label="$\\beta$")
ax.set_ylabel("$\\beta$")
ax.legend(loc=2)
return ax
def plot_misfit_and_regularization(inversion_results, x="iteration", ax=None):
"""
Plot the data misfit and regularization as a function of iteration
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
if x not in ["iteration", "beta"]:
raise Exception("misfit and regularization should be plotted as a function of iteration or beta")
ax.plot(inversion_results[x], inversion_results["phi_d"], "-oC1", label="$\Phi_d$")
ax.axhline(phi_d_star, linestyle="--", color="k", label="$\Phi_d^*$")
ax.axhline(phi_d_star, linestyle="-.", color="k", label="$\chi\Phi_d^*$")
ax.set_ylabel("$\Phi_d$")
ax_phim = ax.twinx()
ax_phim.plot(inversion_results[x], inversion_results["phi_m"], "-oC2", label="$\Phi_m$")
ax_phim.set_ylabel("$\Phi_m$")
ax.set_xlabel(x)
ax.legend(loc=2)
ax_phim.legend(loc=1)
return ax
def plot_regularization_components(inversion_results, x="iteration", ax=None):
"""
Plot the smallness and smoothness terms in the regularization as a function of iteration
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
if x not in ["iteration", "beta"]:
raise Exception("regularization components should be plotted as a function of iteration or beta")
ax.plot(inversion_results[x], inversion_results["phi_m_small"], "-oC3", label="$\\alpha_s\Phi_s$")
ax.plot(inversion_results[x], inversion_results["phi_m_smooth_x"], "-oC4", label="$\\alpha_x\Phi_x$")
ax.plot(inversion_results[x], inversion_results["phi_m_smooth_z"], "-oC5", label="$\\alpha_z\Phi_z$")
ax.set_ylabel("$\Phi_m$")
ax.set_xlabel(x)
ax.legend(loc=2)
return ax
def plot_tikhonov_curve(inversion_results, ax=None):
"""
Plot the Tikhonov curve: Phi_d as a function of Phi_m
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
ax.plot(inversion_results["phi_m"], inversion_results["phi_d"], "-oC6", label="$\Phi_d$")
ax.axhline(phi_d_star, linestyle="--", color="k", label="$\Phi_d^*$")
ax.axhline(phi_d_star, linestyle="-.", color="k", label="$\chi\Phi_d^*$")
ax.set_ylabel("$\Phi_d$")
ax.set_xlabel(x)
ax.set_xlabel("$\Phi_m$")
ax.set_ylabel("$\Phi_d$")
ax.legend(loc=1)
return ax
# +
fig, ax = plt.subplots(3, 1, figsize=(10, 8), sharex=True)
plot_beta(inversion_results, ax=ax[0])
plot_misfit_and_regularization(inversion_results, ax=ax[1])
plot_regularization_components(inversion_results, ax=ax[2])
ax[2].set_xlabel("iteration")
# -
plot_tikhonov_curve(inversion_results)
# ### 6.3 recovered model
def plot_model(
model=model_recovered, ax=None, clim=np.exp(np.r_[np.min(model_true), np.max(model_true)]),
colorbar=True, zlim=np.r_[-400, 0]
):
"""
A plotting function for our recovered model
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
out = mesh.plotImage(
mapping * model, pcolorOpts={'norm':LogNorm(), 'cmap':'Spectral'}, ax=ax,
clim=clim
)
ax.set_xlim(core_domain_x)
ax.set_ylim(zlim)
ax.set_ylabel('Elevation (m)')
ax.set_xlabel('Easting (m)')
ax.set_aspect(1.5) # some vertical exxageration
if colorbar is True:
cb = plt.colorbar(out[0], fraction=0.05, orientation='horizontal', ax=ax, pad=0.25)
cb.set_label("Resistivity ($\Omega$m)")
return ax
# ### 6.4 Explore the results as a function of iteration
def plot_results_at_iteration(iteration=0):
"""
A function to plot inversion results as a function of iteration
"""
fig = plt.figure(figsize=(12, 8))
spec = gridspec.GridSpec(ncols=6, nrows=2, figure=fig)
ax_tikhonov = fig.add_subplot(spec[:, :2])
ax_misfit = fig.add_subplot(spec[0, 2:])
ax_model = fig.add_subplot(spec[1, 2:])
plot_tikhonov_curve(inversion_results, ax=ax_tikhonov)
ax_tikhonov.plot(
inversion_results["phi_m"][iteration], inversion_results["phi_d"][iteration],
'ks', ms=10
)
ax_tikhonov.set_title(f"iteration {iteration}")
plot_normalized_misfit(iteration=iteration, ax=ax_misfit)
plot_model(inversion_results["model"][iteration], ax=ax_model, )
# make the tikhonov plot square
ax_tikhonov.set_aspect(1./ax_tikhonov.get_data_ratio())
plt.tight_layout()
inversion_results_app = ipywidgets.interact(
plot_results_at_iteration,
iteration = ipywidgets.IntSlider(min=0, max=inversion_results["iteration"][-1]-1, value=0)
)
inversion_results_app
# ### 6.5 Compare with the True model
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
clim = np.exp(np.r_[model_true.min(), model_true.max()])
plot_model(model_recovered, clim=clim, ax=ax[0])
plot_model(model_true, clim=clim, ax=ax[1])
# ## Homework ✏️
#
# What happens if you introduce a layer above the blocks?
# - Define a layer from z=-50 to z=-100.
# - Start with a resistive layer.
# - How does this change our ability to resolve the blocks?
# - Now try with a conductive layer.
# - Get creative! Build a model that interests you
#
# Send us your images and discussion on slack!
| 3-inversions-dc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# name: python38264bita77b78139ed74c0680b374d0446532ec
# ---
import pandas as pd
import urllib
# +
fields=['Id','PostTypeId','ParentId','Tags']
df = pd.read_csv('data/gaming/tsv/Posts_text.tsv', delimiter='\t', dtype={'Id': 'string'})
df = df[df.PostTypeId.isin([4,5])]
print(f'Tag wiki posts {int(len(df)/2)}')
# -
df.isna().sum()/(len(df))*100
# df.head()
df.Body.fillna('',inplace=True)
df.isna().sum()/(len(df))*100
df.Body = df.Body.apply(urllib.parse.unquote)
df.Body
(df.PostTypeId == 5).sum()
dfTags = pd.read_csv('data/gaming/csv/Tags.csv', dtype={'TagName': 'string', 'ExcerptPostId': 'string', 'WikiPostId':'string'})
dfTags.rename(columns={'Id': 'TagId'}, inplace=True)
dfTags.head()
dfTags.dtypes
df_minecraft = dfTags[dfTags.TagName.str.contains('minecraft')]
df_minecraft.merge(
df[['Id', 'Body' ]],
how='left',
left_on='ExcerptPostId',
right_on='Id',
suffixes=('_tags','_excerpt')) \
.drop(columns=['Id']) \
.rename(columns={'Body':'TagExcerpt'}) \
.merge(
df[['Id', 'Body' ]],
how='left',
left_on='WikiPostId',
right_on='Id',
suffixes=('_tags','_wiki')) \
.drop(columns=['Id']) \
.rename(columns={'Body':'TagDescription'})
df.dtypes
dfTagsFull = dfTags.merge(
df[['Id', 'Body' ]],
how='left',
left_on='ExcerptPostId',
right_on='Id',
suffixes=('_tags','_excerpt')) \
.drop(columns=['Id']) \
.rename(columns={'Body':'TagExcerpt'}) \
.merge(
df[['Id', 'Body' ]],
how='left',
left_on='WikiPostId',
right_on='Id',
suffixes=('_tags','_wiki')) \
.drop(columns=['Id']) \
.rename(columns={'Body':'TagDescription'})
dfTagsFull[dfTagsFull.TagName.str.contains('minecraft')]
dfTagsFull['TagExcerpt_Qoute'] = dfTagsFull['TagExcerpt'].apply(
lambda x: urllib.parse.quote(x) if x == x else '')
dfTagsFull['TagDescription_Qoute'] = dfTagsFull['TagDescription'].apply(
lambda x: urllib.parse.quote(x) if x == x else '')
dfTagsFull.columns
cols = ['TagId', 'TagName',
'Count', 'ExcerptPostId', 'WikiPostId',
'TagDescription_Qoute',
'TagExcerpt_Qoute']
dfTagsFull[cols].to_csv('data/outputs/tags-with-wiki.csv', index=False)
| 04.04-tag-wiki.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Define multiple classes
# + tags=[] language="sh"
#
# rm -f files/nested_values.yml
# + tags=[]
from dataclasses import dataclass
from datafiles import datafile
@dataclass
class Nested:
alpha: bool
beta: bool
@datafile('files/nested_dataclass.yml')
class Sample:
foo: int
bar: Nested
# + [markdown] tags=[]
# # Initialize a root instance
# + tags=[]
sample = Sample(42, Nested(1, 0))
# + tags=[] language="sh"
#
# cat files/nested_dataclass.yml
# + [markdown] tags=[]
# # Modify the nested object
# + tags=[]
sample.bar.beta = 1
# + [markdown] tags=[]
# # View modified file contents
# + tags=[] language="sh"
#
# cat files/nested_dataclass.yml
| notebooks/nested_dataclass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import hvplot.pandas
import numpy as np
import matplotlib.dates as dates
import warnings
warnings.filterwarnings('ignore')
import datetime
def dateparse (date_string):
return datetime.datetime.strptime(date_string, '%m/%d/%Y %H:%M:%S')
# ### South 2013-2015
bpr_file = '/home/jovyan/data/bpr/Axial_Deformation/nemo2013-2015-BPR-South-1-15sec-driftcorr-detided-lpf.txt'
df_nemoS13 = pd.read_csv(bpr_file, parse_dates=True, date_parser=dateparse, index_col='Date',
dtype = {'Date': object,'Depth': np.float64,
'Temp': np.float64, 'SpotlDetidedDepth': np.float64,
'LPFDetidedDepth': np.float64})
df_nemoS13.head()
df_nemoS13.hvplot.scatter(x='Date', y= 'RawDep', datashade=True, flip_yaxis=True)
| notebooks/bpr_south.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/training-data-analyst/blob/master/courses/fast-and-lean-data-science/02_Dataset_playground.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="89B27-TGiDNB" colab_type="text"
# ## Imports
# + id="U6rgexPXmY37" colab_type="code" colab={}
import os, math
import numpy as np
from matplotlib import pyplot as plt
os.environ['GCS_READ_CACHE_MAX_SIZE_MB'] = '1' # Little wrinkle: this hack is needed to work around a Colab/GCS cache OOM bug
import tensorflow as tf
tf.enable_eager_execution()
# + [markdown] id="MSDlLsAZh_se" colab_type="text"
# ## Colab auth
# + id="5gAaIXGJmRuU" colab_type="code" colab={}
from google.colab import auth
auth.authenticate_user()
# Little wrinkle: without auth, Colab will be extremely slow in accessing data from a GCS bucket, even public
# + id="MPkvHdAYNt9J" colab_type="code" cellView="form" colab={}
#@title "display utilities [RUN ME]"
def display_9_images_from_dataset(dataset):
plt.figure(figsize=(13,13))
subplot=331
for i, (image, label, one_hot_label) in enumerate(dataset):
plt.subplot(subplot)
plt.axis('off')
plt.imshow(image.numpy().astype(np.uint8))
plt.title(label.numpy().decode("utf-8") + ' ' + str(one_hot_label.numpy()), fontsize=16)
subplot += 1
if i==8:
break
plt.tight_layout()
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.show()
# + [markdown] id="w9S3uKC_iXY5" colab_type="text"
# ## Configuration
# + id="d8K6hL_kiWve" colab_type="code" colab={}
GCS_PATTERN = 'gs://flowers-public/*/*.jpg'
CLASSES = [b'daisy', b'dandelion', b'roses', b'sunflowers', b'tulips'] # flower labels (folder names in the data)
# + [markdown] id="kvPXiovhi3ZZ" colab_type="text"
# ## Read images and labels [WORK REQUIRED]
# 1. Use `fileset=`[`tf.data.Dataset.list_files`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#list_files) to scan the data folder
# 1. Iterate through the dataset of filenames: `for filename in fileset:...` .
# * Does it work ?
# * No! But Python iteration though a Dataset works in eager mode. Enable eager mode in the first cell, restart the runtime and try again.
# * tip: to limit the size of the dataset for display, you can use [`Dataset.take()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#take). Like this: `for data in dataset.take(10): ....`
# * It works but why are Tensors returned ? Get proper values by applyting .numpy() to the tensors.
# 1. Use [`tf.data.Dataset.map`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map) to decode the JPEG files. You will find useful TF code snippets below.
# * Iterate on the image dataset. You can use .numpy().shape to only see the data sizes.
# * Are all images of the same size ?
# 1. Now create a training dataset: you have images but you also need labels:
# * the labels (flower names) are the directory names. You will find useful TF code snippets below for parsing them.
# * If you do "`return image, label`" in the decoding function. You will have a Dataset of pairs (image, label).
# * The function `decode_jpeg_and_label` in the snippets below adds a third value: the one-hot encoded label. It will be useful for training.
# 1. Look at the flowers with the `display_9_images_from_dataset` function. It expects the Dataset to have `(image, label, one_hot_label)` elements.
# 1. Code for iterating on a dataset in non-eager mode is also provided in the snippets below. Have a look, it is a bit more complex...
# + id="nwsZ8X59mu24" colab_type="code" outputId="bdec34e4-c17e-4d75-c445-a831bdc4ace8" colab={"base_uri": "https://localhost:8080/", "height": 34}
nb_images = len(tf.gfile.Glob(GCS_PATTERN))
print("Pattern matches {} images.".format(nb_images))
#
# YOUR CODE GOES HERE
#
#display_9_images_from_dataset(dataset)
# + [markdown] id="ZX6Vg0YZwRCP" colab_type="text"
# ## Useful code snippets
# + [markdown] id="FszNm593wnky" colab_type="text"
# ### Decode a JPEG in Tensorflow
# + id="pmPz2WM2wTbS" colab_type="code" colab={}
def decode_jpeg(filename):
bits = tf.read_file(filename)
image = tf.image.decode_jpeg(bits)
return image
# + [markdown] id="Dax9B6W7wuxt" colab_type="text"
# ### Decode a JPEG and extract folder name in TF
# + id="kPhQl3BlxB7D" colab_type="code" colab={}
def decode_jpeg_and_label(filename):
bits = tf.read_file(filename)
image = tf.image.decode_jpeg(bits)
label = tf.strings.split(tf.expand_dims(filename, axis=-1), sep='/')
label = label.values[-2]
one_hot_label = tf.tile(tf.expand_dims(label, axis=-1), [len(CLASSES)])
one_hot_label = tf.cast(tf.math.equal(one_hot_label, CLASSES), tf.uint8)
return image, label, one_hot_label
# + [markdown] id="fG9ctQ84wWHo" colab_type="text"
# ### Read from dataset in non-eager mode
# + id="lkegOfvWMrxb" colab_type="code" colab={}
assert not tf.executing_eagerly(), "This cell will only work in non-eager mode"
next_data_item = dataset.make_one_shot_iterator().get_next()
with tf.Session() as ses:
while True:
try:
image, label, one_hot_label = ses.run(next_data_item)
# ses.run returns numpy data
print(image.shape, label, one_hot_label)
except tf.errors.OutOfRangeError:
print("the end")
break;
| courses/fast-and-lean-data-science/02_Dataset_playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import division, print_function, absolute_import
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import numpy
import PIL
from PIL import Image
np.random.seed(1337) # for reproducibility
from math import sqrt
import random
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Flatten
from keras.optimizers import RMSprop
from keras import backend as K
from keras.layers import Concatenate, Dense, LSTM, Input, concatenate
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# +
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/deep-learning-projects/Siamese_Networks/Dataset/PaviaCentre.mat')
arr = mat['pavia']
arr = np.array(arr)
print(arr.shape)
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/deep-learning-projects/Siamese_Networks/Dataset/PaviaCentre_gt.mat')
arr1 = mat['pavia_gt']
arr1 = np.array(arr1)
print(arr1.shape)
a=[]
label=[]
k=0
for i in range(0,arr1.shape[0]):
for j in range(0,arr1[i].shape[0]):
a.append(arr[i][j])
label.append(arr1[i][j])
a=np.array(a)
label=np.array(label)
X_train=[]
y_train=[]
for i in range (0,a.shape[0]):
if(label[i]==2):
y_train.append(0)
if(label[i]==3):
y_train.append(1)
if(label[i]==4):
y_train.append(2)
if(label[i]==5):
y_train.append(3)
if(label[i]==7):
y_train.append(4)
if(label[i]==8):
y_train.append(5)
if(label[i]==9):
y_train.append(6)
if (label[i]==2 or label[i]==3 or label[i]==4 or label[i]==5 or label[i]==7 or label[i]==8 or label[i]==9):
X_train.append(a[i])
X_train=np.array(X_train)
y_train=np.array(y_train)
print(X_train.shape)
print(y_train.shape)
# +
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train, random_state = 0)
from sklearn.preprocessing import StandardScaler
X_train = StandardScaler().fit_transform(X_train)
from sklearn.decomposition import PCA
pca = PCA(n_components=64)
X_train = pca.fit_transform(X_train)
print(X_train.shape)
# +
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/deep-learning-projects/Siamese_Networks/Dataset/PaviaU.mat')
arr = mat['paviaU']
arr = np.array(arr)
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/deep-learning-projects/Siamese_Networks/Dataset/PaviaU_gt.mat')
arr1 = mat['paviaU_gt']
arr1 = np.array(arr1)
print(arr1.shape)
a=[]
label=[]
k=0
for i in range(0,arr1.shape[0]):
for j in range(0,arr1[i].shape[0]):
a.append(arr[i][j])
label.append(arr1[i][j])
a=np.array(a)
label=np.array(label)
print(a.shape)
print(label.shape)
X_train1=[]
y_train1=[]
for i in range (0,a.shape[0]):
if(label[i]==4):
y_train1.append(0)
if(label[i]==1):
y_train1.append(1)
if(label[i]==8):
y_train1.append(2)
if(label[i]==7):
y_train1.append(3)
if(label[i]==9):
y_train1.append(4)
if(label[i]==2):
y_train1.append(5)
if(label[i]==6):
y_train1.append(6)
if (label[i]==4 or label[i]==1 or label[i]==8 or label[i]==7 or label[i]==9 or label[i]==2 or label[i]==6):
X_train1.append(a[i])
X_train1=np.array(X_train1)
y_train1=np.array(y_train1)
from sklearn.utils import shuffle
X_train1, y_train1 = shuffle(X_train1, y_train1, random_state = 0)
from sklearn.preprocessing import StandardScaler
X_train1 = StandardScaler().fit_transform(X_train1)
from sklearn.decomposition import PCA
pca = PCA(n_components=64)
X_train1 = pca.fit_transform(X_train1)
print(X_train1.shape)
# -
print(X_train.max())
print(X_train1.max())
X_train=X_train.astype('float32')
X_train1=X_train1.astype('float32')
X_train=X_train/100
X_train1=X_train1/100
# +
X_test=X_train[50000:72933,:]
y_test=y_train[50000:72933]
X_train=X_train[0:50000,:]
y_train=y_train[0:50000]
print(X_train.shape)
print(X_train1.shape)
print(X_test.shape)
# +
learning_rate = 0.01
num_steps = 20
batch_size = 20
total_numbers = 291
display_step = 1000
examples_to_show = 10
# Network Parameters
num_hidden_1 = 32 # 1st layer num features
num_hidden_2 = 16 # 2nd layer num features (the latent dim)
num_input = 64
num_classes = 7
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, num_input])
Y = tf.placeholder("float", [None, num_classes])
weights = {
'encoder_h1': tf.Variable(tf.random_uniform([num_input, num_hidden_1], minval=-4*np.sqrt(6.0/(num_input + num_hidden_1)), maxval=4*np.sqrt(6.0/(num_input + num_hidden_1)))),
'encoder_h2': tf.Variable(tf.random_uniform([num_hidden_1, num_hidden_2], minval=-4*np.sqrt(6.0/(num_hidden_1 + num_hidden_2)), maxval=4*np.sqrt(6.0/(num_hidden_1 + num_hidden_2)))),
'decoder_h1': tf.Variable(tf.random_uniform([num_hidden_2, num_hidden_1], minval=-4*np.sqrt(6.0/(num_hidden_1 + num_hidden_2)), maxval=4*np.sqrt(6.0/(num_hidden_1 + num_hidden_2)))),
'decoder_h2': tf.Variable(tf.random_uniform([num_hidden_1, num_input], minval=-4*np.sqrt(6.0/(num_input + num_hidden_1)), maxval=4*np.sqrt(6.0/(num_input + num_hidden_1)))),
'classifier1_h': tf.Variable(tf.random_uniform([num_hidden_2, 10], minval=-4*np.sqrt(6.0/(10 + num_hidden_2)), maxval=4*np.sqrt(6.0/(10 + num_hidden_2)))),
'classifier_h': tf.Variable(tf.random_uniform([10, num_classes], minval=-4*np.sqrt(6.0/(10 + num_classes)), maxval=4*np.sqrt(6.0/(10 + num_classes)))),
}
biases = {
'encoder_b1': tf.Variable(tf.truncated_normal([num_hidden_1])/sqrt(num_hidden_1)),
'encoder_b2': tf.Variable(tf.truncated_normal([num_hidden_2])/sqrt(num_hidden_2)),
'decoder_b1': tf.Variable(tf.truncated_normal([num_hidden_1])/sqrt(num_hidden_1)),
'decoder_b2': tf.Variable(tf.truncated_normal([num_input])/sqrt(num_hidden_2)),
'classifier1_b': tf.Variable(tf.truncated_normal([10])/sqrt(10)),
'classifier_b': tf.Variable(tf.truncated_normal([num_classes])/sqrt(num_classes)),
}
# +
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Encoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Decoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
classify1 = tf.nn.sigmoid(tf.add(tf.matmul(encoder_op, weights['classifier1_h']), biases['classifier1_b']))
label_pred = tf.nn.softmax(tf.add(tf.matmul(classify1, weights['classifier_h']), biases['classifier_b']))
y_clipped = tf.clip_by_value(label_pred, 1e-10, 0.9999999)
# Targets (Labels) are the input data.
y_true = X
label_true = Y
# Define loss and optimizer, minimize the squared error
loss_autoencoder = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
cross_entropy_loss = -tf.reduce_mean(tf.reduce_sum(label_true * tf.log(y_clipped)
+ (1 - label_true) * tf.log(1 - y_clipped), axis=1))
loss_total = loss_autoencoder+cross_entropy_loss
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss_total)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# -
from keras.utils import np_utils
y_test11 = np_utils.to_categorical(y_test)
y_train11 = np_utils.to_categorical(y_train1)
print(y_train11.shape)
print(y_test11.shape)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(label_pred, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# +
# Start Training
# Start a new TF session
sess = tf.Session()
# Run the initializer
sess.run(init)
batch_size = 64
num_batch = 614
# Training
for i in range(0,400):
k = 0
# Prepare Data
# Get the next batch of MNIST data (only images are needed, not labels)
avg_cost = 0
for j in (0,num_batch):
batch_x = X_train1[k:k+batch_size,:]
batch_y = y_train11[k:k+batch_size,:]
k += 64
#print(j)
# Run optimization op (backprop) and cost op (to get loss value)
_, l = sess.run([optimizer, loss_total], feed_dict={X: batch_x, Y: batch_y})
avg_cost += l / num_batch
print("Epoch:", (i + 1), "cost =", "{:.8f}".format(avg_cost))
print("Epoch:", (i + 1), "accuracy =", "{:.8f}".format(sess.run(accuracy, feed_dict={X: X_train1, Y: y_train11})))
# +
# on 200 epoch
print(sess.run([accuracy], feed_dict={X: X_test, Y: y_test11}))
# +
# on 400 epoch
print(sess.run([accuracy], feed_dict={X: X_test, Y: y_test11}))
# -
| Autoencoder_Train_PaviaU_Test_PaviaC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (ICTK)
# language: python
# name: ictk
# ---
# # Iris Model
# ***
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ### 1. Load data into the python environment
# ***
data = pd.read_excel('./iris.xls')
data.head()
data.Classification.value_counts()
# From the above result it can be concluded that the above problem statement belongs to **"Multi-Class Classification Problem"**
# ### 2. Pre-processing
# ***
# shape
data.shape
# info
data.info()
# There are 4 columns in iris dataset out of which the predictor column 'Classification' belongs to object type. Rest columns belongs to float
# check for null values
data.isna().sum()
# There are null values present in dataset
for column in data.columns[:-1]:
data[column].plot.hist()
plt.show()
# From the histogram it is clear that columns in dataset are almost normally distributed, so null values can be replaced with mean
data.describe().T
# Filling missing values
for column in data.columns[:-1]:
data[column].fillna(round(data[column].mean(), 1), inplace= True)
data.isna().sum()
# All the null values have been removed
data.describe().T
# Filling with mean didnot introduce observable change in dataset
# Label Encode 'Classification' Column
from sklearn.preprocessing import LabelEncoder
data['Classification'] = LabelEncoder().fit_transform(data['Classification'])
data.Classification.value_counts()
# ### 3. Model Building
# ***
# Function to check model performances
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, confusion_matrix
def check_model_metrices(y_test, y_pred):
print('Model Accuracy = ', accuracy_score(y_test, y_pred))
print('Model Precision = ', precision_score(y_test, y_pred, average='micro'))
print('Model Recall = ', recall_score(y_test, y_pred, average='micro'))
print('Model F1 Score = ', f1_score(y_test, y_pred, average='micro'))
print('Confusion Matrix = \n', confusion_matrix(y_test, y_pred))
# feature selection
X = data[data.columns[:-1]] # features
y = data['Classification'] # target
print(f'Feature shape: {X.shape}')
# Spliting to training and testing sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
print(f'X_train shape: {X_train.shape}')
print(f'X_test shape: {X_test.shape}')
# #### Model : Random Forest
# ***
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
check_model_metrices(y_test, rf_pred)
import pickle
pickle_file = open('./iris-model.pkl', 'wb')
pickle.dump(rf, pickle_file)
pickle_file.close()
| projects/IrisPredictor-WebApp/iris-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Time Series Analysis
#
# Forecasting time series, that contains information on sequential events, can be carried out by fitting the data to a probability distribution. This enables us to formulate past and future trends as the results of a parametric equation.
#
# In the ideal scenario, if we know the probability function for quantities we aim to forecast, for a given initial value, we could find values of all future states. However, in reality, such as when predicting a stock value, we do not all factors influencing the stock, and therefore cannot formaulate a probability function that governs relationship between all these factors.
#
# Therefore, we use a numeric approximation by discretizing time into small intervals and computing the evolution of values based on available information, such as trends within the past evolution of states, and some other boundary conditions, such as news media articles, strike price of an option, etc.
#
# 
#
# Deep learning approaches used in predicting such trends in stock prices uses trends in multiple correlated variables, such as price trends of similar stocks, market sentiment etc., without any assumption on their probability distribution. Deep learning techniques leverage short and long term memory of micro/macro cycles observed in the past by training the model to learn what matters and what doesn't.
#
# In the original **Recurrent Neural Network (RNN)** design, a delay unit is used, which memorizes the state at a given time step and re-injects it into the deep layers at the next time step, recurrently over a chosen period of time (called lags). The algorithm learns to infer a sequence of future values (called horizon) based on a given lag by learning over multiple pairs of lag-horizon taken across the available timeline. This is referred to as sequence to sequence learning.
#
# 
#
# The recurrence equation can make the network very deep because it replicates the entire hidden network at every step and leads to vanishing memory (i.e. the influence of the deepest layers vanishes due to repeated multiplications of small number derivatives needed to reach these layers during the process of gradient optimization). More advanced gated systems such as **Gated Recurrent Units (GRU)** help with the vanishing memory issue, by re-injecting prior time-step learning into more recent time-step learning to benefit from both long term and short term memory.
#
# In this workshop, you'll create a dynamic network based on Gated Recurrent Units for target and covariates. As target, you'll use closing price of a chosen stock, with some other metrices such as opening price, and maximum and minimum prices of the same stock, as well as few other stocks, whose movement show some similarity (based on the clustering analysis you did in the previous module).
#
# As described in the readme section of the workshop repository, you'll use code written in separate [training](../container/rnn/train) and [prediction](../container/rnn/predictor.py) files.
# # Stock Price Prediction of a single stock, using custom RNN based model
#
# The training and prediciton algorithm used for this exercise is pre-written for you, using TensorFlow with Keras binding, and is available under the container folder of the repository you cloned through notebook configuration. The code to be used for [training](../container/rnn/train) and [prediction](../container/rnn/predictor.py) is already available, and within the scope of this exercise, you'll simply package the available code within a container image, and use SageMaker's high level Estimator API for training the model and hosting the trained model behind an HTTP endpoint.
#
# The following diagram provides an overview of the container architecture, to be used in this scenario.
# 
#
# The container model used here, is derived from the [blog post](https://aws.amazon.com/blogs/machine-learning/train-and-host-scikit-learn-models-in-amazon-sagemaker-by-building-a-scikit-docker-container/) describing how to train and host Sciki-Learn models on SageMaker. With the only difference being yuou'll be using Keras with TensorFlow, instead of Scikit-Learn, the container architecture remains the same.
#
# This also serves to demonstrate that SageMaker capabilities can be extend to virtually any algorithm you use, using any popular Machine Leanrning libraries, with ease.
# +
import os
import boto3
import pandas as pd
import sagemaker
from sagemaker.estimator import Estimator
from sagemaker.predictor import RealTimePredictor
from sagemaker.predictor import csv_serializer
from ipywidgets import interact_manual, SelectionSlider
from IPython.display import display
import custom_rnn_util as util
# -
# ## Hyperparameters
#
# The training code is written, so that it is customizable via several hyperparamaters. These are passed to the `Estimator` simply as a JSON variable. You can investigate the training code and see that the code is written to assume default values for all parameters, if not specified.
#
# Following is an overview of few, that you can change if you like, before executing training job:
#
# - interval : Use only values `D`(for day) or `H`(for hours). During data preparation we had raw data resampled at various interval levels and saved in S3 buckets. This parameter would tell you which to use. Using smaller interval, such as minute will cause the training to take much longer, whereas with larger interval, number of observed samples will be too low to extract any meaningful patterns.
# - lag, horizon : These specifies how far back the model reaches out while generating prediction, and how far forward it can forecast.
# - target_stock : You can use any stock symbol out of the ones that are available in traingin data file.
# - covariate_stocks : This allows you to provide a list of stocks, preferably use some stocks that you found clustered within the same group during the clustering analysis you did in previous module.
# - target_column : You can choose any metrics available within training data, by default, the algorithm works to use `EndPrice` as target variable
# - covariate_columns : Optionally you can specify some additional series that have correlation with the main time series, such as opening price, and maximum and minimum price during past intervals.
#
# **In a real world stock prediction example, you'd ideally want to use covariates based on some broader metrices, such as news analysis, options market related to underlying securities and so on.** Keep in mind though analyzing what supporting metrices to use to increase model accuracy is out of scope of this workshop. As such, the model you'll train here would have limited value in actually predicting the stock values in real world.
# +
#Define parameters
interval = 'D' #Use D or H
assert interval == 'D' or interval == 'H'
if interval == 'D':
lag = 10 # Use 10 for D, 80 for H
horizon = 5 #Use 5 for D, 40 for H
dateformat = '%Y-%m-%d'
elif interval == 'H':
lag = 80 # Use 10 for D, 80 for H
horizon = 40 #Use 5 for D, 40 for H
dateformat = '%Y-%m-%d %H:%M:%S'
target_stock = "BMW"
covariate_stocks = "CON, DAI, PAH3, VOW3"
target_column = "EndPrice"
covariate_columns = "StartPrice, MinPrice, MaxPrice"
num_epochs = 1000
percent_train = 85.0
num_units = 256
batch_size = 4096
dropout_ratio = 0.1
hyperparameters = {
"interval": interval,
"lag": str(lag),
"horizon": str(horizon),
"num_epochs": str(num_epochs),
"batch_size": str(batch_size),
"percent_train": str(percent_train),
"num_units": str(num_units),
"target_stock": target_stock,
"covariate_stocks": covariate_stocks,
"target_column": target_column,
"covariate_columns": covariate_columns,
"dropout_ratio": str(dropout_ratio)
}
# -
# ## Training Data
#
# During data preparation steps, you uploaded the resampled data to your S3 bucket, attached to your SageMaker session, under an appropriate prefix, depending on resampling interval. Here you refer to the data in the corresponding location, based on the interval you choose now, before submitting the training job.
# +
#Define training data location
artifactname = 'dbg-custom-rnn'
base_job_name = "{}-{}-{}".format(artifactname, interval, target_stock)
role = sagemaker.get_execution_role()
session = sagemaker.Session()
s3_bucket = session.default_bucket()
s3_data_key = 'dbg-stockdata/source'
data_location = "s3://{}/{}/{}/resampled_stockdata.csv".format(s3_bucket, s3_data_key, interval)
output_location = "s3://{}/{}/{}/output".format(s3_bucket, artifactname, interval)
s3 = boto3.client('s3')
# -
# ## ECR Repository
#
# With the code ready at hand, proceed to create a repository on Amazon ECR.
#
# We provide an utility [script](../container/build_and_push.sh) that:
# - Identifies the AWS region you are using
# - Creates a repository with if it doesn't exist already
# - Run Docker locally to create the container image
# - Retrieves ECR Login command using the credential you are using with SageMaker
# - Pushes the image, as latest, to the ECR repository
# Define model artifact name and image
account = session.boto_session.client('sts').get_caller_identity()['Account']
region = session.boto_session.region_name
image = '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account, region, artifactname)
os.chdir("../container")
# !sh build_and_push.sh $artifactname
# Once the repository is created, you can check your [ECR Console](https://console.aws.amazon.com/ecs/home#/repositories) to verify that the repository named `dbg-custom-rnn` have been created.
# ## Model Training
#
# To train a model in [Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html), you create a training job. In this workshop, you'll do so using SageMaker's high level [Estimator API](https://sagemaker.readthedocs.io/en/latest/estimators.html).
#
# Estimator is a generic interface that allows you to train using any supplied algorithm, which in this case is the algorithm you just packaged and pulished to your Amazon ECR repository.
#
# Following are the necessary inputs while submitting a training job:
# - Uniquely identifiable job name
# - Amazon ECR registry path where the training code is stored
# - URL of the Amazon S3 bucket where you have the training data stored
# - URL of the S3 bucket where you want to store the output of the job (upon training completion SageMaker archives whatever files your code stores under the path `/opt/ml/model` within the container, and makes those available as a tar-file named `model.tar.gz` at the specified location on S3)
# +
# %%time
# Instantiate estimator with container image of artifact and backend EC2 instance(s)
rnn = Estimator(image,
role, 1, 'ml.c5.18xlarge',
output_path=output_location,
base_job_name = base_job_name,
sagemaker_session=session)
rnn.set_hyperparameters(**hyperparameters)
# Train the model
rnn.fit(data_location)
estimator_job = rnn.latest_training_job.job_name
model_archive = "{}/{}/output/{}/output/model.tar.gz".format(artifactname,interval,estimator_job)
print("Estimator created at completion of training job {}".format(estimator_job))
# -
# You can observe in the [training](../container/rnn/train) code, that loss obtained after each epoch is stored alongwith the trained model, in the output location.
#
# Following utility function, as implemented in [custom_rnn_util](./custom_rnn_util.py) Python file, extracts the loss history file and plots the loss. You should verify that loss is progressively lower, as trainign progresses, ths validating that gradient descent converges over the epochs and the loss is minimized.
util.plot_loss(s3, s3_bucket, model_archive, "loss_history.csv")
# ## Model Deployment
#
# Deploying a model using Amazon SageMaker hosting services is a three-step process:
# - Creating a model : tell SageMaker where it can find model components
# - Creating configuration for an HTTPS endpoint: specify name of one or more model variants and the number and size of compute instances to use in serving prediction requests
# - Creating an HTTPS endpoint: launch required ML compute instances and deploy the model onto those instances
#
# While you can execute all of these steps from within your AWS Console, using the high level `Estimator` API, provides a quick way to execute all pf these steps at once.
# %%time
# Create an endpoint on a web server
predictor = rnn.deploy(1, 'ml.m4.xlarge', serializer=csv_serializer)
# While the model is being deployed, you can verify that Model and Endpoint configuration is created, and that the Endpoint is being provisioned, from [SageMaker console](https://console.aws.amazon.com/sagemaker/home#/endpoints).
#
# Once the deployment is complete, the status of the endpoint would change to `In Service`, at which point you are ready to use the endpoint to generate predictions.
# ## Forecasting and Plotting
#
# Upon completion of deployment, your predictor will have an handle to the endpoint you just provisioned.
#
# If using this notebook at any later time, or with a previously deployed endpoint, you can simply obtain a handle to the the endpoint, by specifying the right estimator job name, and instantiating a `RealTimePredictor` with the job name and content type.
# +
#Use a job name from previous training,
#if you need to connect to a pre-deployed endpoint.
#When running in one session, predictor is already initialized when 'deploy' method returns.
estimator_job = 'dbg-custom-rnn-D-BMW-2018-11-19-23-11-02-036'
predictor = RealTimePredictor(estimator_job, content_type = "text/csv")
model_archive = "{}/{}/output/{}/output/model.tar.gz".format(artifactname,interval,estimator_job)
# -
# Within the training code, we split the test data into few samples of size spanning the combined lag and horizon interval, and stored the CSV files for the whole test set, as well as the split test set, in model output location. These files are therefore available within the model archive.
#
# First you use utility methods, as implemented in [custom_rnn_util](./custom_rnn_util.py) Python file to extract and load these files. The functions also saves uncompressed CSV files in the output location on S3, alongside the model archive.
# +
filepaths = util.extract_matching_csv_files_from_s3_tarfile(s3, s3_bucket, model_archive, "test[0-9]+.csv", model_archive[:model_archive.find("/model")], 0)
testfilename = "testdata.csv"
testdata = util.load_csv_from_s3_tarfile(s3, s3_bucket, model_archive, testfilename, model_archive[:model_archive.find("/model")], 0)
# -
# If you follow the prediction code at [predictor.py](../container/rnn/predictor.py), you'll notice that the `transform` method, that gets invoked when a request is sent, is written to handle either an S3 location containing the CSV file for the data to be used for forecasting, or serialized CSV data directly.
#
# While sending serialized CSV as request payload is convenient, for very large size models requiring data spanning long interval, might increase the latency and network traffic. As opposed to that, if you have your data already stored in S3, ability to just specify the data lcoation on S3 helps in containing network traffic and this faster turnaround.
#
# As a first test, you invoke prediction by simply passing the extracted S3 location of test sample files to the predictor. The utility function also plots the forecasted data, alongside the observed data.
#
util.plot_sample_predictions(predictor, filepaths, target_stock, target_column, lag)
# As a second test, to verify the ability to generate predictions by passing serialized CSV data, you use the following utility function, with a specified increment, that generates a series of predictions at the incremental level specified, and plots similar graphs.
util.plot_sample_test_performance(predictor, testdata, target_stock, covariate_stocks, target_column, lag, horizon, horizon)
# As you can observe in the above plots, the predictor does a good job in predicting on certain dates, not so good on others.
#
# You can also use another utility function, as follows, to plot the forecasts slightly differently, superimposing all the predicitons upon an end to end plot of test data.
util.plot_overall_test_performance(predictor, testdata, target_stock, covariate_stocks, target_column, lag, horizon, horizon)
# If you chose to use data sampled at daily intervals, you should see that the forecasts closely followed the observed values in slightly more than 50% of cases. This indicates that our rudimentary model does a better job than at least a random guess, in predicting stock price movements.
dateoptions = util.get_date_range(testdata, target_stock, dateformat, lag, horizon, interval)
style = {'description_width': 'initial'}
@interact_manual(
forecast_date = SelectionSlider(options=dateoptions,style=style)
)
def plot_interact(forecast_date):
try:
forecast_date_index = list(testdata.index).index(forecast_date.strftime(dateformat))
print("Generating forecast for {} onwards".format(forecast_date.strftime(dateformat)))
util.predict_and_plot(predictor, testdata, forecast_date_index, target_stock, covariate_stocks, target_column, lag, horizon)
except ValueError:
print("Data for {} doesn't exist".format(forecast_date.strftime(dateformat)))
# ## Some Final Thoughts
#
# 1. In our custom RNN based code, we used a particular stock's price as main time series, and optionally used some others as exogenous time series. As you might have noticed, for example if you trained your model using `BMW` as main series, and `Chrysler`, `Volkswagon`, `Contiental` and `Porsche` as covariates, you cannot use the same model to generate predictions for any of those other stocks. In order to forecast using this approach, you'll have to train one model each for every stock you want to forecast.
#
# 1. Our choice of using opening and maximum and minimum price may not be a good choice in actually forecasting stock price movement, simply because the correlation observed in thse metrices may just be an effect of some external factor, and metrices among themselves might not have any causality.
#
# 1. We used clustering in an attempt to find stocks whose movement might affect each other. Following the same argument however, there might be no causality, instead the stocks move together due to some external factor influencing all such stocks in a similar fashion, because they are somehow related, either due to to being in same industry, or otherwise.
#
#
# Feel free to repeat these experiments using other data sources, in conjuntion with the DBG data set, such as options market data, news headlines and such.
#
# You can share your findings by emailing the author of the workshop at [<EMAIL>](mailto:<EMAIL>)
#
# ## Delete EndPoint
#
# Keep in mind that a running EndPoint has the chosen compute instances running, regardless of whether or not requests are being sent.
#
# Therefore, in order to contain cost overrun, you should always delete the unused EndPoints. This doesn't delete the trained model artefacts, and at any point, the EndPoint can be reprovisioned, simply by using the correspodning training job name.
predictor.delete_endpoint()
| notebooks/dbg-custom-rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Python programming.
#
# ### [<NAME>](http://www.imperial.ac.uk/people/g.gorman), [<NAME>](http://christianjacobs.uk/)
#
# ### Updated for MPECDT by [<NAME>](http://www.imperial.ac.uk/people/david.ham)
# # Lecture 1: Computing with formulas
#
# ## Learning objectives:
#
# * Execute a Python statement from within IPython.
# * Learn what a program variable is and how to express a mathematical expression in code.
# * Print program outputs.
# * Access mathematical functions from a Python module.
# ## Programming a mathematical formula
#
# Here is a formula for the position of a ball in vertical motion, starting at ground level (i.e. $y=0$) at time $t=0$:
# $$ y(t) = v_0t- \frac{1}{2}gt^2 $$
#
# where:
#
# * $y$ is the height (position) as a function of time $t$
# * $v_0$ is the initial velocity (at $t=0$)
# * $g$ is the acceleration due to gravity
#
# The computational task is: given $v_0$, $g$ and $t$, compute the value $y$.
#
# **How do we program this task?** A program is a sequence of instructions given to the computer. However, while a programming language is much **simpler** than a natural language, it is more **pedantic**. Programs must have correct syntax, i.e., correct use of the computer language grammar rules, and no misprints.
#
# So let's execute a Python statement based on this example. Evaluate $y(t) = v_0t- \frac{1}{2}gt^2$ for $v_0=5$, $g=9.81$ and $t=0.6$. If you were doing this on paper you would probably write something like this: $$ y = 5\cdot 0.6 - {1\over2}\cdot 9.81 \cdot 0.6^2.$$ Happily, writing this in Python is very similar:
print(5*0.6 - 0.5*9.81*0.6**2)
# Go ahead and mess with the code above to see what happens when you change values and rerun. To see what I mean about programming being pedantic, see what happens if you replace `**` with `^`:
print(5*0.6 - 0.5*9.81*0.6**2)
# or `write` rather than `print`:
write 5*0.6 - 0.5*9.81*0.6**2
# While a human might still understand these statements, they do not mean anything to the Python interpreter. Rather than throwing your hands up in the air whenever you get an error message like the above (you are going to see many during the course of these lectures!!!) train yourself to read the message patiently to get an idea what it is complaining about and re-read your code from the perspective of the pedantic Python interpreter.
#
# Error messages can look bewildering (frustrating etc.) at first, but it gets much **easier with practise**.
# ## Storing numbers in variables
# From mathematics you are already familiar with variables (e.g. $v_0=5,\quad g=9.81,\quad t=0.6,\quad y = v_0t -{1\over2}gt^2$) and you already know how important they are for working out complicated problems. Similarly, you can use variables in a program to make it easier to read and understand.
v0 = 5
g = 9.81
t = 0.6
y = v0*t - 0.5*g*t**2
print(y)
a=2
print(type(a))
a=2.5
print(type(a))
# This program spans several lines of text and uses variables, otherwise the program performs the same calculations and gives the same output as the previous program.
#
# In mathematics we usually use one letter for a variable, resorting to using the Greek alphabet and other characters for more clarity. The main reason for this is to avoid becoming exhausted from writing when working out long expressions or derivations. However, when programming you should use more descriptive names for variable names. This might not seem like an important consideration for the trivial example here but it becomes increasingly important as the program gets more complicated and if someone else has to read your code. **Good variable names make a program easier to understand!**
#
# Permitted variable names include:
#
# * One-letter symbols.
# * Words or abbreviation of words.
# * Variable names can contain a-z, A-Z, underscore ("'_'") and digits 0-9, **but** the name cannot start with a digit.
# * In Python 3, variable names can also include letters from other alphabets, such as α or π.
#
# Variable names are case-sensitive (i.e. "'a'" is different from "'A'"). Let's rewrite the previous example using more descriptive variable names:
initial_velocity = 5
g = 9.81
TIME = 0.6
VerticalPositionOfBall = initial_velocity*TIME - 0.5*g*TIME**2
print(VerticalPositionOfBall)
math
from math import pi as π
radius = 2
area = π * radius ** 2
print(area)
# Certain words have are **reserved** in Python and **cannot be used as variable names**. These are: *and, as, assert, break, class, continue, def, del, elif, else, except, exec, finally, for, from, global, if, import, in, is, lambda, not, or, pass, print, raise, return, try, with, while,* and *yield*.
# ## Adding comments to code
#
# Not everything written in a computer program is intended for execution. In Python anything on a line after the '#' character is ignored and is known as a **comment**. You can write whatever you want in a comment. Comments are intended to be used to explain what a snippet of code is intended for. It might for example explain the objective or provide a reference to the data or algorithm used. This is both useful for you when you have to understand your code at some later stage, and indeed for whoever has to read and understand your code later.
# Program for computing the height of a ball in vertical motion.
v0 = 5 # Set initial velocity in m/s.
g = 9.81 # Set acceleration due to gravity in m/s^2.
t = 0.6 # Time at which we want to know the height of the ball in seconds.
y = v0*t - 0.5*g*t**2 # Calculate the vertical position
print(y)
# ## <span style="color:blue">Exercise: Convert from meters to Imperial length units</span>
# Make a program where you set a length given in meters and then compute and write out the corresponding length measured in inches, in feet, in yards, and in miles. Use the fact that one inch is 2.54 cm, one foot is 12 inches, one yard is 3 feet, and one British mile is 1760 yards. As a verification, a length of 640 meters corresponds to 25196.85 inches, 2099.74 feet, 699.91 yards, or 0.3977 miles.
# ## Formatting numbers as strings
# Often we want to print out results using a combination of text and numbers, e.g. "'At t=0.6 s, y is 1.23 m'". Particularly when printing out floating point numbers we should **never** quote numbers to a higher accuracy than they were measured. Python provides a *printf formatting* syntax exactly for this purpose. We can see in the following example that the *slot* `%g` was used to express the floating point number with the minimum number of significant figures, and the *slot* `%.2f` specified that only two digits are printed out after the decimal point.
print("At t=%gs, y is %.2fm." % (t, y))
# Notice in this example how the values in the tuple `(t, y)` are inserted into the *slots*.
#
# Sometimes we want a multi-line output. This is achieved using a triple quotation (*i.e.* `"""`):
print("""At t=%f s, a ball with
initial velocity v0=%.3E m/s
is located at the height %.2f m.
""" % (t, v0, y))
# ## <span style="color:blue">Exercise: Compute the air resistance on a football</span>
# The drag force, due to air resistance, on an object can be expressed as
# $$F_d = \frac{1}{2}C_D\rho AV^2$$
# where $\rho$ is the density of the air, $V$ is the velocity of the object, $A$ is the cross-sectional area (normal to the velocity direction), and $C_D$ is the drag coefficient, which depends heavily on the shape of the object and the roughness of the surface.</br></br>
# The gravity force on an object with mass $m$ is $F_g = mg$, where $g = 9.81ms^{−2}$.</br></br>
# Write a program that computes the drag force and the gravity force on an object. Write out the forces with one decimal in units of Newton ($N = kgm/s^2$). Also print the ratio of the drag force and the gravity force. Define $C_D$, $\rho$, $A$, $V$, $m$, $g$, $F_d$, and $F_g$ as variables, and put a comment with the corresponding unit.</br></br>
# As a computational example, you can initialize all variables with values relevant for a football kick. The density of air is $\rho = 1.2 kg m^{−3}$. For any ball, we have obviously that $A = \pi a^2$, where $a$ is the radius of the ball, which can be taken as $11cm$ for a football. The mass of the ball is $0.43kg$. $C_D$ can be taken as $0.2$.</br></br>
# Use the program to calculate the forces on the ball for a hard kick, $V = 120km/h$ and for a soft kick, $V = 10km/h$ (it is easy to make the mistake of mixing inconsistent units, so make sure you compute with V expressed in m/s). Make sure you use the *printf* formatting style introduced above.
from math import pi as π
... your code here.
# ## How are arithmetic expressions evaluated?
# Consider the random mathematical expression, ${5\over9} + 2a^4/2$, implemented in Python as `5.0/9 + 2*a**4/2`.
#
# The rules for evaluating the expression are the same as in mathematics: proceed term by term (additions/subtractions) from the left, compute powers first, then multiplication and division. Therefore in this example the order of evaluation will be:
#
# 1. `r1 = 5.0/9`
# 2. `r2 = a**4`
# 3. `r3 = 2*r2`
# 4. `r4 = r3/2`
# 5. `r5 = r1 + r4`
#
# Use parenthesis to override these default rules. Indeed, many programmers use parenthesis for greater clarity.
# ## <span style="color:blue">Exercise: Compute the growth of money in a bank</span>
# Let *p* be a bank's interest rate in percent per year. An initial amount *A* has then grown to $$A\left(1+\frac{p}{100}\right)^n$$ after *n* years. Write a program for computing how much money 1000 euros have grown to after three years with a 5% interest rate.
# ## Standard mathematical functions
# What if we need to compute $\sin x$, $\cos x$, $\ln x$, etc. in a program? Such functions are available in Python's *math module*. In fact there is a vast universe of functionality for Python available in modules. We just *import* in whatever we need for the task at hand.
#
# In this example we compute $\sqrt{2}$ using the *sqrt* function in the *math* module:
import math
r = math.sqrt(2)
print(r)
# or:
from math import sqrt
r = sqrt(2)
print(r)
# or:
from math import * # import everything in math
r = sqrt(2)
print(r)
# Another example:
from math import sin, cos, log
x = 1.2
print(sin(x)*cos(x) + 4*log(x)) # log is ln (base e)
# ## <span style="color:blue">Exercise: Evaluate a Gaussian function</span>
# The bell-shaped Gaussian function,
# $$f(x)=\frac{1}{\sqrt{2\pi}s}\exp\left(-\frac{1}{2} \left(\frac{x-m}{s}\right)^2\right)$$
# is one of the most widely used functions in science and technology. The parameters $m$ and $s$ are real numbers, where $s$ must be greater than zero. Write a program for evaluating this function when $m = 0$, $s = 2$, and $x = 1$. Verify the program's result by comparing with hand calculations on a calculator.
dir(math)
| notebook/.ipynb_checkpoints/Python-1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table> <tr>
# <td style="background-color:#ffffff;">
# <a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="25%" align="left"> </a></td>
# <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
# prepared by <a href="http://abu.lu.lv" target="_blank"><NAME></a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
# </td>
# </tr></table>
# <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# <h2> <font color="blue"> Solutions for </font> Basics of Python: Loops </h2>
# <a id="task1"></a>
# <h3> Task 1 </h3>
#
# Calculate the value of summation $ 3+6+9+\cdots+51 $, and then print the result.
#
# The result should be 459.
# <h3>Solution</h3>
# +
total1 = 0
total2 = 0
for i in range(3,52,3):
total1 = total1 + i
total2 += i # shorter form
print("The summation is",total1)
print("The summation is",total2)
# -
# <a id="task2"></a>
# <h3> Task 2 </h3>
#
# $ 3^k $ means $ 3 \cdot 3 \cdot \cdots \cdot 3 $ ($ k $ times) for $ k \geq 2 $.
#
# Moreover, $ 3^0 $ is 1 and $ 3^1 = 3 $.
#
# Calculate the value of summation $ 3^0 + 3^1 + 3^2 + \cdots + 3^8 $, and then print the result.
#
# The result should be 9841.
# <h3>Solution</h3>
# +
T = 0
current_number = 1
for i in range(9):
T = T + current_number
print("3 to",i,"is",current_number)
current_number = 3 * current_number
print("summation is",T)
# +
# Python has also exponent operator: **
# we can also directly use it
T = 0
for i in range(9):
print("3 to",i,"is",3**i)
T = T + 3 ** i
print("summation is",T)
# -
# <a id="task3"></a>
# <h3> Task 3 </h3>
#
# Consider the summation $ T(n) = 1 + \dfrac{1}{2} + \dfrac{1}{4}+ \dfrac{1}{8} + \cdots + \dfrac{1}{2^n} $ for some natural number $ n $.
#
# Remark that $ T(0) = \dfrac{1}{2^0} = \dfrac{1}{1} = 1 $.
#
# This summation can be arbitrarily close to $2$.
#
# Let's find the minimum value of $ n $ such that the closeness of $ T(n) $ to $2$ is less than $ 0.01 $.
#
# In other words, let's find the minimum value of $n$ such that $ T(n) > 1.99 $.
#
# The operator for "less than or equal to" in Python is "$ < = $".
# <h3> Solution</h3>
# +
T = 0
n = 2 # this value iteratively will be first halved and then added to the summation T
how_many_terms = 0
while T<=1.99:
n = n/2 # half the value of n
print("n = ",n)
T = T + n # update the value of T
how_many_terms = how_many_terms + 1
print("T = ",T)
print("how many terms in the summation:",how_many_terms)
# +
# our result says that there should be 8 terms in our summation
# let's calculate the summations of the first seven and eight terms, and verify our results
T7 = 0
n = 2 # this value iteratively will be first halved and then added to the summation
for i in range(7):
n = n/2
print("n =",n)
T7 = T7 + n
print("the summation of the first seven terms is",T7)
# +
T8 = 0
n = 2 # this value iteratively will be first halved and then added to the summation
for i in range(8):
n = n/2
print("n =",n)
T8 = T8 + n
print("the summation of the first eight terms is",T8)
print("(the summation of the first seven terms is",T7,")")
# -
# <a id="task4"></a>
# <h3> Task 4 </h3>
#
# Randomly pick number(s) between 0 and 9 until hitting 3, and then print the number of attempt(s).
#
# We can use <i>randrange</i> function from <i>random</i> module for randomly picking a number in a given range.
# <h3> Solution</h3>
# +
from random import randrange
r = 0
attempt = 0
while r != 3: # the loop iterates as long as r is not equal to 3
r = randrange(10) # randomly pick a number
attempt = attempt + 1 # increase the number of attempts by 1
print (attempt,"->",r) # print the number of attempt and the randomly picked number
print("total number of attempt(s) is",attempt)
# -
# <a id="task5"></a>
# <h3> Task 5 </h3>
#
# This task is challenging.
#
# It is designed for the usage of double nested loops: one loop is inside of the other loop.
#
# In the fourth task above, the expected number of attempt(s) to hit number 3 is 10.
#
# Let's do a series of experiments by using your solution for Task 4.
#
# Experiment 1: Execute your code 20 times, and then calculate the average attempts.
#
# Experiment 2: Execute your code 200 times, and then calculate the average attempts.
#
# Experiment 3: Execute your code 2000 times, and then calculate the average attempts.
#
# Experiment 4: Execute your code 20000 times, and then calculate the average attempts.
#
# Experiment 5: Execute your code 200000 times, and then calculate the average attempts.
#
# <i>Your experimental average should get closer to 10 when the number of executions is increased.</i>
#
# Remark that all five experiments can be automatically done by using triple loops.
# <h3> Solution</h3>
# +
# be aware of single and double indentions
number_of_execution = 2000 # change this with 200, 2000, 20000, 200000 and reexecute this cell
total_attempts = 0
from random import randrange
for i in range(number_of_execution): # the outer loop iterates number_of_execution times
r = 0
attempt = 0
while r != 3: # the while-loop iterates as long as r is not equal to 3
r = randrange(10) # randomly pick a number
attempt = attempt + 1 # increase the number of attempts by 1
# I am out of scope of while-loop
total_attempts = total_attempts + attempt # update the total number of attempts
# I am out of scope of for-loop
print(number_of_execution,"->",total_attempts/number_of_execution)
# +
# let's use triple nested loops
for number_of_execution in [20,200,2000,20000,200000]: # we will use the same code by indenting all lines by one more level
total_attempts = 0
for i in range(number_of_execution): # the middle loop iterates number_of_execution times
r = 0
attempt = 0
while r != 3: # the while-loop iterates as long as r is not equal to 3
r = randrange(10) # randomly pick a number
attempt = attempt + 1 # increase the number of attempts by 1
# I am out of scope of while-loop
total_attempts = total_attempts + attempt # update the total number of attempts
# I am out of scope of for-loop
print(number_of_execution,"->",total_attempts/number_of_execution)
# you can include 2 million to the list, but you should WAIT for a while to see the result
# can your computer compete with exponential growth?
# if you think "yes", please try 20 million, 200 million, and so on
# -
| python/Python12_Basics_Loops_Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 第8章 推定問題と直交射影
import numpy as np
import matplotlib.pyplot as plt
# #### 乱数シードの指定(テキスト掲載用)
# 拙著に掲載した標本を再現するため,シードを明示しておく.本来は必要ないので,コメントアウトしてよい.
np.random.seed(321)
# ## 8.1 最小二乗法
# +
def SampleMean(Π):
N = len(Π)
if N == 0:
return False #計算不可能
else:
return sum(Π)/len(Π)
def SampleVariance(Π):
m=SampleMean(Π) #標本平均
return SampleMean( (Π-m*np.ones_like(Π))**2 )
def Cov(X,Y):
mx = SampleMean(X)
my = SampleMean(Y)
return SampleMean(X*Y)-mx*my
# -
# ### サンプルデータの生成
means = [0,1] # 期待値ベクトル
covmat = [[1,0.5],[0.5,0.3]] # 分散共分散行列
# サンプルデータ(2次元正規分布)
sample_data = np.random.multivariate_normal(means, covmat, 500)
plt.figure(figsize=(4,3))
plt.plot(sample_data[:,0],sample_data[:,1],"k.",markersize=2)
plt.xlabel('x',fontsize=12)
plt.ylabel('y',fontsize=12)
plt.xlim([-3.5,3.5]); plt.ylim([-1,3.1]);
plt.tight_layout()
plt.savefig('figs/Ch08-1.eps')
# #### サンプルデータの平均,分散,相関係数
mx = SampleMean(sample_data[:,0])
my = SampleMean(sample_data[:,1])
sx = np.sqrt(SampleVariance(sample_data[:,0]))
sy = np.sqrt(SampleVariance(sample_data[:,1]))
ρ= Cov(sample_data[:,0],sample_data[:,1])/(sx*sy)
# ### 条件付き期待値の理論計算
xx = np.linspace(-3.5,3.5,100)
yy = ρ*(sy/sx)*(xx-mx)+my
plt.figure(figsize=(4,3))
plt.plot(xx,yy,"k-")
plt.plot(sample_data[:,0],sample_data[:,1],"k.",markersize=2)
plt.xlim([-3.5,3.5]); plt.ylim([-1,3.1]);
plt.xlabel('x',fontsize=12)
plt.ylabel('y',fontsize=12)
plt.tight_layout()
plt.savefig('figs/Ch08-2.eps')
# ## 8.4 確率変数の推定
# ## 8.4.3 条件付き期待値との関係
# ### サンプルデータの生成(標本数を増やした)
# +
means = [0,1] #期待値ベクトル
covmat = [[1,0.5],[0.5,0.3]] #分散共分散行列
sample_huge = np.random.multivariate_normal(means, covmat, 1000000)
mx = SampleMean(sample_huge[:,0])
my = SampleMean(sample_huge[:,1])
sx = np.sqrt(SampleVariance(sample_huge[:,0]))
sy = np.sqrt(SampleVariance(sample_huge[:,1]))
ρ= Cov(sample_huge[:,0],sample_huge[:,1])/(sx*sy)
# -
# ### 条件付き期待値の理論計算
xx = np.linspace(-6,6,50)
yy = ρ*(sy/sx)*(xx-mx)+my
# ### 条件付き期待値(数値計算)
# +
# x軸上にとる狭い区間の幅
eps = 0.01*(sample_huge[:,0].max() - sample_huge[:,0].min())
# 区間内のyの平均
def conditional_mean( data, x_val, eps ):
x_data = data[:,0]
y_data = data[:,1]
indexes, = np.where( np.abs(x_data - x_val) < eps ) #xが区間に収まる点のインデックス
extracted = y_data[indexes]
return SampleMean(extracted)
# -
# #### 計算結果
xx_num = np.linspace(sample_huge[:,0].min(),sample_huge[:,0].max(),50)
yy_num = [conditional_mean(sample_huge, x, eps) for x in xx_num]
plt.figure(figsize=(5,2.5))
plt.plot(xx_num,yy_num, label="Numerical", markersize=5, color="w", marker="o", markeredgecolor="k" )
plt.plot(xx,yy,"k-", label="Theoretical", alpha=0.5)
plt.xlim([-6,6])
plt.xlabel('$X$',fontsize=12)
plt.ylabel('$\hat Y = E[Y|X]$',fontsize=12)
plt.legend(); plt.grid()
plt.tight_layout()
plt.savefig('figs/Ch08-CondMean.eps')
# 実線は理論式(算法8.3)$$\hat Y = \rho_{XY}\frac{\sigma_Y}{\sigma_X}(X - m_X) + m_Y$$のグラフ,○印は数値計算による近似値を表す.
# 周辺部は点が疎らなため,数値計算(○印)の精度が落ちている.
| Jupyter/Ch08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Benchmark results reporting
# ## Setup
# ### Prerequirements
# This notebook requires a kernel running Python 3.5+.
# You can skip this section if the kernel is already configured.
# + run_control={"marked": false}
# !pip install numpy
# !pip install pandas
# !pip install matplotlib
# !pip install seaborn
# #!pip install jupyter_contrib_nbextensions
# #!jupyter contrib nbextension install --user
# #!jupyter nbextension enable python-markdown/main
# #!pip install jupyter_nbextensions_configurator
# #!jupyter nbextensions_configurator enable --user
# -
# ### Imports and selection of the results directory
# + run_control={"marked": false}
from IPython import display as idisplay
import functools as ft
import os
import pandas as pd
import numpy as np
import matplotlib as mp
import scipy as sp
import seaborn as sb
import warnings
warnings.filterwarnings('ignore')
# + active=""
# #disabling this cell
# results_dir = "./reports"
# print("current working dir: {}".format(os.getcwd()))
# try:
# os.chdir(results_dir)
# except:
# pass
# os.getcwd()
# -
# ## Results
# ##### Parameters
# + run_control={"marked": false}
nfolds = 10
ff = '%.6g'
colormap = 'tab10'
# colormap = 'Set2'
# colormap = 'Dark2'
renamings = dict(
constantpredictor_enc='constantpredictor'
)
excluded_frameworks = ['oboe']
binary_score_label = 'AUC'
multiclass_score_label = 'logloss'
# impute_missing_with = 'constantpredictor'
impute_missing_with = 'randomforest'
zero_one_refs = ('constantpredictor', 'tunedrandomforest')
all_results_files = {
'old': [
"results_valid_ref.csv", "results_valid.csv",
"results_small-2c1h_ref.csv", "results_small-2c1h.csv",
"results_medium-4c1h_ref.csv", "results_medium-4c1h.csv",
"results_medium-4c4h_ref.csv", "results_medium-4c4h.csv",
],
'1h': [
"results_small-8c1h_ref.csv", "results_small-8c1h.csv",
"results_medium-8c1h_ref.csv", "results_medium-8c1h.csv",
],
'4h': [
"results_small-8c4h_ref.csv", "results_small-8c4h.csv",
"results_medium-8c4h_ref.csv", "results_medium-8c4h.csv",
"results_large-8c4h_ref.csv", "results_large-8c4h.csv",
],
'8h': [
"results_large-8c8h_ref.csv", "results_large-8c8h.csv",
]
}
results_group = '4h'
results_files = all_results_files[results_group]
# -
# #### Loading results, formatting and adding columns
# - `result` is the raw result metric computed from predictions at the end the benchmark.
# For classification problems, it is usually `auc` for binomial classification and `logloss` for multinomial classification.
# - `score` ensures a standard comparison between tasks: **higher is always better**.
# - `norm_score` is a normalization of `score` on a `[0, 1]` scale, with `{{zero_one_refs[0]}}` score as `0` and `{{zero_one_refs[1]}}` score as `1`.
# - `imp_result` and `imp_score` for imputed results/scores. Given a task and a framework:
# - if **all folds results/scores are missing**, then no imputation occurs, and the result is `nan` for each fold.
# - if **only some folds results/scores are missing**, then the missing result is imputed by the `{{impute_missing_with}}` result for this fold.
# + run_control={"marked": false}
def load_results(files=results_files):
return pd.concat([pd.read_csv(file) for file in files], ignore_index=True)
def create_file(*path_tokens):
path = os.path.realpath(os.path.join(*path_tokens))
if not os.path.exists(path):
dirname, basename = os.path.split(path)
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
if basename:
open(path, 'a').close()
return path
def display(fr, pretty=True, float_format=ff):
with pd.option_context(
'display.max_rows', len(fr),
'display.float_format', lambda f: float_format % f
):
if type(fr) is pd.Series:
fr = fr.to_frame()
if pretty and type(fr) is pd.DataFrame:
fr.style.set_properties(**{'vertical-align':'top'})
idisplay.display(idisplay.HTML(fr.to_html()))
else:
print(fr)
def build_classification_type_map(results_df):
cp = results_df.loc[(results_df.framework=='constantpredictor')&(results_df.fold==0)]
# binary_tasks = cp.where(pd.notna(cp.auc))['task'].dropna().tolist()
return (cp.apply(lambda r: pd.Series([r.task, 'binary' if not np.isnan(r.auc) else 'multiclass'],
index=['task', 'type']),
axis=1,
result_type='expand')
.set_index('task')['type']
.to_dict())
def classification_type(row, type_map):
return type_map.get(row.task)
def impute_result(row, results_df, res_col='result', ref_framework=impute_missing_with):
if pd.notna(row[res_col]):
return row[res_col]
# if all folds are failed or missing, don't impute
if pd.isna(results_df.loc[(results_df.task==row.task)&(results_df.framework==row.framework)][res_col]).all():
return np.nan
# impute with ref framework corresponding value
return (results_df.loc[(results_df.framework==ref_framework)
&(results_df.task==row.task)
&(results_df.fold==row.fold)][res_col]
.item())
def imputed(row):
return pd.isna(row.result) and pd.notna(row.imp_result)
def score(row, res_col='result'):
return row[res_col] if row[res_col] in [row.auc, row.acc]\
else - row[res_col]
def norm_score(row, results_df, score_col='score', zero_one_refs=zero_one_refs):
zero, one = (results_df.loc[(results_df.framework==ref)
&(results_df.task==row.task)
&(results_df.fold==row.fold)][score_col]
.item()
for ref in zero_one_refs)
return (row[score_col] - zero) / (one - zero)
def sorted_ints(arr):
return sorted(list(map(int, arr[~np.isnan(arr)])))
all_results = load_results().replace(renamings)
all_results = all_results.loc[~all_results.framework.isin(excluded_frameworks)]
all_results.task = all_results.task.str.lower()
all_results.framework = all_results.framework.str.lower()
all_results.fold = all_results.fold.apply(int)
all_frameworks = all_results.framework.unique()
all_frameworks.sort()
all_tasks = all_results.task.unique()
all_tasks.sort()
all_folds = all_results.fold.unique()
class_type_map = build_classification_type_map(all_results)
all_done = all_results.set_index(['task', 'fold', 'framework'])
if not all_done.index.is_unique:
print("Duplicate entries:")
display(all_done[all_done.index.duplicated(keep=False)].sort_values(by=all_done.index.names),
pretty=False)
assert all_done.index.is_unique
all_missing = pd.DataFrame([(task, fold, framework, 'missing')
for task in all_tasks
for fold in range(nfolds)
for framework in all_frameworks
if (task, fold, framework) not in all_done.index],
columns=[*all_done.index.names, 'info'])\
.set_index(all_done.index.names)
assert all_missing.index.is_unique
all_failed = all_results.loc[pd.notna(all_results['info'])]\
.set_index(all_done.index.names)
assert all_failed.index.is_unique
# extending the data frame
all_results = all_results.append(all_missing.reset_index())
all_results['type'] = [classification_type(row, class_type_map) for _, row in all_results.iterrows()]
all_results['score'] = [score(row) for _, row in all_results.iterrows()]
all_results['imp_result'] = [impute_result(row, all_results) for _, row in all_results.iterrows()]
all_results['imp_score'] = [impute_result(row, all_results, 'score') for _, row in all_results.iterrows()]
all_results['norm_score'] = [norm_score(row, all_results, 'imp_score') for _, row in all_results.iterrows()]
all_results.to_csv(create_file("tables", results_group, "all_results.csv"),
index=False,
float_format=ff)
# -
# ### Tasks list
# + run_control={"marked": false}
tasks = (all_results.groupby(['task', 'type'])['id']
.unique()
.map(lambda id: id[0]))
display(tasks)
# -
# ### Completed tasks/folds
# + run_control={"marked": false}
done = (all_done.reset_index()
.groupby(['task', 'framework'])['fold']
.unique())
display(done, pretty=False)
# -
# ### Missing or crashed/aborted tasks/folds
# + run_control={"marked": false}
# not_done = pd.DataFrame([(task, framework) for task in all_tasks
# for framework in all_frameworks
# if (task, framework) not in done.index],
# columns=['task', 'framework'])
# missing = all_results.append(not_done)\
# .groupby(['task', 'framework'])['fold']\
# .unique()\
# .map(sorted_ints)\
# .map(lambda arr: sorted(list(set(range(0, nfolds)) - set(arr))))\
# .where(lambda values: values.map(lambda arr: len(arr) > 0))\
# .dropna()
missing = (all_missing.reset_index()
.groupby(['task', 'framework'])['fold']
.unique())
display(missing, pretty=False)
# -
# ### Failing tasks/folds
# + run_control={"marked": false}
# failed = all_results.where(np.isnan(all_results.result))\
# .groupby(['task', 'framework'])['fold']\
# .unique()\
# .map(sorted_ints)
failed = (all_failed.reset_index()
.groupby(['task', 'framework'])['fold']
.unique())
display(failed, pretty=False)
# -
# ### Results anomalies
# +
def list_outliers(col, results=all_results, z_threshold=3):
df = results.pivot_table(index=['type','task', 'framework'], columns='fold', values=col)
df_mean = df.mean(axis=1)
df_std = df.std(axis=1)
z_score = (df.sub(df_mean, axis=0)
.div(df_std, axis=0)
.abs())
return z_score.where(z_score > z_threshold).dropna(axis=0, how='all')
display(list_outliers('result',
z_threshold=2.5,
# results=all_results.loc[all_results.framework=='h2oautoml']
))
# -
# ## Data reports
# ### Results summary
# Averaging using arithmetic mean over fold `result` or `score`.
# In following summaries, if not mentioned otherwise, the means are computed over imputed results/scores.
# Given a task and a framework:
# - if **all folds results/scores are missing**, then no imputation occured, and the mean result is `nan`.
# - if **only some folds results/scores are missing**, then the amount of imputed results that contributed to the mean are displayed between parenthesis.
# + run_control={"marked": false}
def add_imputed_mark(values, imp, val_type=float, val_format=None):
formats = dict(float="{:,.6g}{}", int="{0:d}{}", str="{}{}")
format_value = (val_format if val_format is not None
else lambda *val: formats[val_type.__name__].format(*val))
return (values.astype(object)
.combine(imp,
lambda val, imp: format_value(val, " ({:.0g})".format(imp) if imp else '')))
def render_summary(col, results=all_results, show_imputations=True, filename=None, float_format=ff):
res_group = results.groupby(['type', 'task', 'framework'])
df = res_group[col].mean().unstack()
if show_imputations:
imputed_df = (res_group['result', 'imp_result']
.apply(lambda df: sum(imputed(row) for _, row in df.iterrows()))
.unstack())
df = df.combine(imputed_df, ft.partial(add_imputed_mark,
val_format=lambda *v: (float_format+"%s") % tuple(v)))
display(df, float_format=float_format)
if filename is not None:
df.to_csv(create_file("tables", results_group, filename), float_format=float_format)
summary_results = all_results
# -
# #### Number of models trained
#
# When available, displays the average amount of models trained by the framework for each dataset.
#
# This amount should be interpreted differently for each framework.
# For example, with *RandomForest*, this amount corresponds to the number of trees.
# + run_control={"marked": false}
render_summary('models',
results=summary_results,
filename="models_summary.csv",
float_format="%.f")
# -
# #### Results mean
# + run_control={"marked": false}
render_summary('result',
results=summary_results)
# + run_control={"marked": false}
render_summary('imp_result',
results=summary_results,
filename="result_summary.csv")
# -
# #### Score mean
# + run_control={"marked": false}
render_summary('imp_score',
results=summary_results,
filename="score_summary.csv")
# + run_control={"marked": false}
render_summary('norm_score',
results=summary_results,
filename="norm_score_summary.csv")
# -
# ### Tasks leaderboard
# + run_control={"marked": false}
def rank(scores):
sorted_scores = pd.Series(scores.unique()).sort_values(ascending=False)
ranks = pd.Series(index=scores.index)
for idx, value in scores.items():
try:
ranks.at[idx] = np.where(sorted_scores == value)[0][0]+1
except IndexError:
ranks.at[idx] = np.nan
return ranks
def render_leaderboard(col, results=all_results, aggregate=False, show_imputations=False, filename=None):
res_group = results.groupby(['type', 'task', 'framework'])
df = (res_group[col].mean().unstack() if aggregate
else results.pivot_table(index=['type','task', 'fold'], columns='framework', values=col))
df = (df.apply(rank, axis=1, result_type='broadcast')
.astype(object))
if show_imputations:
imputed_df = (res_group['result', 'imp_result']
.apply(lambda df: sum(imputed(row) for _, row in df.iterrows()))
.unstack())
df = df.combine(imputed_df, add_imputed_mark)
display(df)
if filename is not None:
df.to_csv(create_file("tables", results_group, filename), float_format='%.f')
leaderboard_results = all_results.loc[~all_results.framework.isin(['constantpredictor', 'randomforest'])]
# + run_control={"marked": false}
render_leaderboard('imp_score',
results=leaderboard_results,
aggregate=True,
show_imputations=True,
filename="tasks_leaderboard.csv")
# -
# ### Folds leaderboard
# + run_control={"marked": false}
render_leaderboard('score', filename="folds_leaderboard.csv");
# -
# ## Visualizations
# + run_control={"marked": false}
def savefig(fig, path):
fig.savefig(path, bbox_inches='tight')
def task_labels(index):
max_length = 16
return (index.droplevel('type')
.map(lambda x: x if len(x) <= max_length else u'{}…'.format(x[:max_length-1]))
.values)
def set_labels(axes,
title=None,
xlabel=None, ylabel=None,
x_labels=None, y_labels=None,
legend_title=None):
axes.set_title(title, fontsize='xx-large')
axes.set_xlabel(xlabel, fontsize='x-large')
axes.set_ylabel(ylabel, fontsize='x-large')
axes.tick_params(labelsize='x-large')
if x_labels is not None:
axes.set_xticklabels(x_labels)
if y_labels is not None:
axes.set_yticklabels(y_labels)
legend = axes.get_legend()
if legend is not None:
legend_title = legend_title or legend.get_title().get_text()
legend.set_title(legend_title, prop=dict(size='x-large'))
for text in legend.get_texts():
text.set_fontsize('x-large')
def set_scales(axes, xscale=None, yscale=None):
if isinstance(xscale, str):
axes.set_xscale(xscale)
elif isinstance(xscale, tuple):
axes.set_xscale(xscale[0], **xscale[1])
if isinstance(yscale, str):
axes.set_yscale(yscale)
elif isinstance(yscale, tuple):
axes.set_yscale(yscale[0], **yscale[1])
# -
# ### Heatmaps
# + run_control={"marked": false}
def draw_heatmap(df,
x_labels=True, y_labels=True,
title=None, xlabel=None, ylabel=None,
**kwargs):
with sb.axes_style('white'), sb.plotting_context('paper'):
# print(sb.axes_style())
# print(sb.plotting_context())
axes = sb.heatmap(df, xticklabels=x_labels, yticklabels=y_labels,
annot=True, cmap='RdYlGn', robust=True,
**kwargs)
axes.tick_params(axis='y', labelrotation=0)
set_labels(axes, title=title, xlabel=xlabel, ylabel=ylabel)
fig = axes.get_figure()
fig.set_size_inches(10, df.shape[0]/2)
fig.set_dpi(120)
return fig
def draw_score_heatmap(col, results=all_results, type_filter='all', filename=None, **kwargs):
df = (results.groupby(['type', 'task', 'framework'])[col]
.mean()
.unstack())
df = (df if type_filter == 'all'
else df[df.index.get_loc(type_filter)])
fig = draw_heatmap(df,
y_labels=task_labels(df.index),
# xlabel="Framework", ylabel="Task",
**kwargs)
if filename is not None:
savefig(fig, create_file("graphics", results_group, filename))
return fig
# heatmap_results = all_results.loc[~all_results.framework.isin(['constantpredictor', 'randomforest'])]
heatmap_results = all_results.loc[~all_results.framework.isin(['constantpredictor'])]
# + run_control={"marked": false}
draw_score_heatmap('imp_score',
results=heatmap_results,
type_filter='binary',
title=f"Scores ({binary_score_label}) on {results_group} binary classification problems",
filename="binary_score_heat.png",
center=0.5);
# + run_control={"marked": false}
draw_score_heatmap('imp_score',
results=heatmap_results,
type_filter='multiclass',
title=f"Scores ({multiclass_score_label}) on {results_group} multi-class classification problems",
filename="multiclass_score_heat.png",
center=0);
# + run_control={"marked": true}
draw_score_heatmap('norm_score',
results=heatmap_results,
type_filter='binary',
title=f"Normalized scores on {results_group} binary classification problems",
filename="binary_norm_score_heat.png",
center=0);
# + run_control={"marked": false}
draw_score_heatmap('norm_score',
results=heatmap_results,
type_filter='multiclass',
title=f"Normalized scores on {results_group} multi-class classification problems",
filename="multiclass_norm_score_heat.png",
center=0);
# -
# ### Linear plots
# + run_control={"marked": false}
def draw_parallel_coord(df, class_column,
x_labels=True, yscale='linear',
title=None, xlabel=None, ylabel=None,
legend_loc='best', legend_title=None, colormap=colormap):
with sb.axes_style('ticks', rc={'grid.linestyle': 'dotted'}), sb.plotting_context('paper'):
# print(sb.axes_style())
parallel_fig = mp.pyplot.figure(dpi=120, figsize=(10, df.shape[0]))
# select the first colors from the colormap to ensure we use the same colors as in the stripplot later
colors = mp.cm.get_cmap(colormap).colors[:len(df[class_column].unique())]
axes = pd.plotting.parallel_coordinates(df,
class_column=class_column,
colors=colors,
axvlines=False,
)
axes.tick_params(axis='x', labelrotation=90)
set_scales(axes, yscale=yscale)
handles, labels = axes.get_legend_handles_labels()
axes.legend(handles, labels, loc=legend_loc, title=legend_title)
set_labels(axes, title=title, xlabel=xlabel, ylabel=ylabel, x_labels=x_labels)
return parallel_fig
def draw_score_parallel_coord(col, results=all_results, type_filter='all',
ylabel=None, filename=None, **kwargs):
res_group = results.groupby(['type', 'task', 'framework'])
df = res_group[col].mean().unstack(['type', 'task'])
df = df if type_filter == 'all' \
else df.iloc[:, df.columns.get_loc(type_filter)]
df.reset_index(inplace=True)
fig = draw_parallel_coord(df,
'framework',
x_labels=task_labels(df.columns.drop('framework')),
# xlabel="Task",
ylabel=ylabel or "Score",
legend_title="Framework",
**kwargs)
if filename is not None:
savefig(fig, create_file("graphics", results_group, filename))
return fig
# parallel_coord_results = all_results.loc[~all_results.framework.isin(['randomforest'])]
parallel_coord_results = all_results
# + run_control={"marked": false}
draw_score_parallel_coord('imp_score',
results=parallel_coord_results,
type_filter='binary',
title=f"Scores ({binary_score_label}) on {results_group} binary classification problems",
ylabel=binary_score_label,
legend_loc='lower left',
filename="binary_score_parallel_ccord.png");
# + run_control={"marked": false}
draw_score_parallel_coord('imp_score',
results=parallel_coord_results,
type_filter='multiclass',
title=f"Scores ({multiclass_score_label}) on {results_group} multi-class classification problems",
ylabel=multiclass_score_label,
yscale=('symlog', dict(linthreshy=0.5)),
legend_loc='lower left',
filename="multiclass_score_parallel_ccord.png");
# + run_control={"marked": false}
draw_score_parallel_coord('norm_score',
results=parallel_coord_results,
type_filter='binary',
title=f"Normalized scores on {results_group} binary classification problems",
filename="binary_norm_score_parallel_ccord.png");
# + run_control={"marked": false}
draw_score_parallel_coord('norm_score',
results=parallel_coord_results,
type_filter='multiclass',
title=f"Normalized scores on {results_group} multi-class classification problems",
filename="multiclass_norm_score_parallel_ccord.png",
yscale='symlog',
);
# -
# ### Scatterplots
# + run_control={"marked": false}
def draw_stripplot(df, x, y, hue,
xscale='linear', xbound=None,
xlabel=None, ylabel=None, y_labels=None, title=None,
legend_title=None, legend_loc='best', colormap=colormap):
with sb.axes_style('whitegrid', rc={'grid.linestyle': 'dotted'}), sb.plotting_context('paper'):
# print(sb.axes_style())
# Initialize the figure
strip_fig, axes = mp.pyplot.subplots(dpi=120, figsize=(10, len(df.index.unique())))
set_scales(axes, xscale=xscale)
if xbound is not None:
axes.set_autoscalex_on(False)
axes.set_xbound(*xbound)
# axes.invert_xaxis()
sb.despine(bottom=True, left=True)
# Show each observation with a scatterplot
sb.stripplot(x=x, y=y, hue=hue,
data=df, dodge=True, jitter=True, palette=colormap,
alpha=.25, zorder=1)
# Show the conditional means
sb.pointplot(x=x, y=y, hue=hue,
data=df, dodge=.5, join=False, palette=colormap,
markers='d', scale=.75, ci=None)
# Improve the legend
handles, labels = axes.get_legend_handles_labels()
dist = int(len(labels)/2)
axes.legend(handles[dist:], labels[dist:], title=legend_title or hue,
handletextpad=0, columnspacing=1,
loc=legend_loc, ncol=1, frameon=True)
set_labels(axes, title=title, xlabel=xlabel, ylabel=ylabel, y_labels=y_labels)
return strip_fig
def draw_score_stripplot(col, results=all_results, type_filter='all', filename=None, **kwargs):
scatterplot_df = results.set_index(['type', 'task']).sort_index()
df = scatterplot_df if type_filter == 'all' \
else scatterplot_df[scatterplot_df.index.get_loc(type_filter)]
fig = draw_stripplot(
df,
x=col,
y=df.index,
hue='framework',
# ylabel='Task',
y_labels=task_labels(df.index.unique()),
legend_title="Framework",
**kwargs
)
if filename is not None:
savefig(fig, create_file("graphics", results_group, filename))
return fig
# scatterplot_results = (all_results.loc[~all_results.framework.isin(['randomforest'])]
# .sort_values(by=['framework'])) # sorting for colors consistency
scatterplot_results = all_results.sort_values(by=['framework']) # sorting for colors consistency
# + run_control={"marked": false}
draw_score_stripplot('imp_result',
results=scatterplot_results,
type_filter='binary',
title=f"Scores on {results_group} binary classification problems",
xlabel=binary_score_label,
filename="binary_results_stripplot.png");
# + run_control={"marked": false}
draw_score_stripplot('imp_result',
results=scatterplot_results,
type_filter='multiclass',
# xbound=(0,10),
xscale=('symlog', dict(linthreshx=0.5)),
title=f"Scores on {results_group} multi-class classification problems",
xlabel=multiclass_score_label,
filename="multiclass_results_stripplot.png");
# + run_control={"marked": false}
draw_score_stripplot('norm_score',
results=scatterplot_results,
type_filter='binary',
xbound=(-0.2, 2),
xscale='linear',
title=f"Normalized scores on {results_group} binary classification problems",
filename="binary_norm_score_stripplot.png");
# + run_control={"marked": false}
draw_score_stripplot('norm_score',
results=scatterplot_results,
type_filter='multiclass',
xbound=(-0.2, 2.5),
xscale='linear',
title=f"Normalized scores on {results_group} multi-class classification problems",
filename="multiclass_norm_score_stripplot.png");
# -
# ## Playground
# + run_control={"marked": false}
all_results.loc[(all_results.task.str.contains('jungle'))&(all_results.framework=='tunedrandomforest')];
# + run_control={"marked": false}
done.iloc[done.index.get_level_values('framework').isin(['autosklearn', 'h2oautoml', 'tpot'])]\
.apply(sorted_ints);
# + run_control={"marked": false}
failures = all_failed.groupby(['task', 'fold', 'framework'])['info']\
.unique()
#display(failures)
# + run_control={"marked": false}
| reports/reports.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
# *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
#
# *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
# <!--NAVIGATION-->
# < [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb) | [Contents](Index.ipynb) | [IPython Magic Commands](01.03-Magic-Commands.ipynb) >
# # Keyboard Shortcuts in the IPython Shell
# If you spend any amount of time on the computer, you've probably found a use for keyboard shortcuts in your workflow.
# Most familiar perhaps are the Cmd-C and Cmd-V (or Ctrl-C and Ctrl-V) for copying and pasting in a wide variety of programs and systems.
# Power-users tend to go even further: popular text editors like Emacs, Vim, and others provide users an incredible range of operations through intricate combinations of keystrokes.
#
# The IPython shell doesn't go this far, but does provide a number of keyboard shortcuts for fast navigation while typing commands.
# These shortcuts are not in fact provided by IPython itself, but through its dependency on the GNU Readline library: as such, some of the following shortcuts may differ depending on your system configuration.
# Also, while some of these shortcuts do work in the browser-based notebook, this section is primarily about shortcuts in the IPython shell.
#
# Once you get accustomed to these, they can be very useful for quickly performing certain commands without moving your hands from the "home" keyboard position.
# If you're an Emacs user or if you have experience with Linux-style shells, the following will be very familiar.
# We'll group these shortcuts into a few categories: *navigation shortcuts*, *text entry shortcuts*, *command history shortcuts*, and *miscellaneous shortcuts*.
# ## Navigation shortcuts
#
# While the use of the left and right arrow keys to move backward and forward in the line is quite obvious, there are other options that don't require moving your hands from the "home" keyboard position:
#
# | Keystroke | Action |
# |-----------------------------------|--------------------------------------------|
# | ``Ctrl-a`` | Move cursor to the beginning of the line |
# | ``Ctrl-e`` | Move cursor to the end of the line |
# | ``Ctrl-b`` or the left arrow key | Move cursor back one character |
# | ``Ctrl-f`` or the right arrow key | Move cursor forward one character |
# ## Text Entry Shortcuts
#
# While everyone is familiar with using the Backspace key to delete the previous character, reaching for the key often requires some minor finger gymnastics, and it only deletes a single character at a time.
# In IPython there are several shortcuts for removing some portion of the text you're typing.
# The most immediately useful of these are the commands to delete entire lines of text.
# You'll know these have become second-nature if you find yourself using a combination of Ctrl-b and Ctrl-d instead of reaching for Backspace to delete the previous character!
#
# | Keystroke | Action |
# |-------------------------------|--------------------------------------------------|
# | Backspace key | Delete previous character in line |
# | ``Ctrl-d`` | Delete next character in line |
# | ``Ctrl-k`` | Cut text from cursor to end of line |
# | ``Ctrl-u`` | Cut text from beginning of line to cursor |
# | ``Ctrl-y`` | Yank (i.e. paste) text that was previously cut |
# | ``Ctrl-t`` | Transpose (i.e., switch) previous two characters |
# ## Command History Shortcuts
#
# Perhaps the most impactful shortcuts discussed here are the ones IPython provides for navigating the command history.
# This command history goes beyond your current IPython session: your entire command history is stored in a SQLite database in your IPython profile directory.
# The most straightforward way to access these is with the up and down arrow keys to step through the history, but other options exist as well:
#
# | Keystroke | Action |
# |-------------------------------------|--------------------------------------------|
# | ``Ctrl-p`` (or the up arrow key) | Access previous command in history |
# | ``Ctrl-n`` (or the down arrow key) | Access next command in history |
# | ``Ctrl-r`` | Reverse-search through command history |
# The reverse-search can be particularly useful.
# Recall that in the previous section we defined a function called ``square``.
# Let's reverse-search our Python history from a new IPython shell and find this definition again.
# When you press Ctrl-r in the IPython terminal, you'll see the following prompt:
#
# ```ipython
# In [1]:
# (reverse-i-search)`':
# ```
#
# If you start typing characters at this prompt, IPython will auto-fill the most recent command, if any, that matches those characters:
#
# ```ipython
# In [1]:
# (reverse-i-search)`sqa': square??
# ```
#
# At any point, you can add more characters to refine the search, or press Ctrl-r again to search further for another command that matches the query. If you followed along in the previous section, pressing Ctrl-r twice more gives:
#
# ```ipython
# In [1]:
# (reverse-i-search)`sqa': def square(a):
# """Return the square of a"""
# return a ** 2
# ```
#
# Once you have found the command you're looking for, press Return and the search will end.
# We can then use the retrieved command, and carry-on with our session:
#
# ```ipython
# In [1]: def square(a):
# """Return the square of a"""
# return a ** 2
#
# In [2]: square(2)
# Out[2]: 4
# ```
#
# Note that Ctrl-p/Ctrl-n or the up/down arrow keys can also be used to search through history, but only by matching characters at the beginning of the line.
# That is, if you type **``def``** and then press Ctrl-p, it would find the most recent command (if any) in your history that begins with the characters ``def``.
# ## Miscellaneous Shortcuts
#
# Finally, there are a few miscellaneous shortcuts that don't fit into any of the preceding categories, but are nevertheless useful to know:
#
# | Keystroke | Action |
# |-------------------------------|--------------------------------------------|
# | ``Ctrl-l`` | Clear terminal screen |
# | ``Ctrl-c`` | Interrupt current Python command |
# | ``Ctrl-d`` | Exit IPython session |
#
# The Ctrl-c in particular can be useful when you inadvertently start a very long-running job.
# While some of the shortcuts discussed here may seem a bit tedious at first, they quickly become automatic with practice.
# Once you develop that muscle memory, I suspect you will even find yourself wishing they were available in other contexts.
# <!--NAVIGATION-->
# < [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb) | [Contents](Index.ipynb) | [IPython Magic Commands](01.03-Magic-Commands.ipynb) >
| PythonDataScienceHandbook/notebooks/01.02-Shell-Keyboard-Shortcuts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:python_sas]
# language: python
# name: conda-env-python_sas-py
# ---
# # Problem Definition
# Source: [Kaggle - Flight Delay Prediction](https://www.kaggle.com/divyansh22/flight-delay-prediction)
# <img src="misc/kaggle_flight_delay_prediction.png" width=90%/>
from swat import CAS
import numpy as np
import seaborn as sn
s = CAS(hostname='localhost', port=5570, username='sas', password='<PASSWORD>')
s.setsessopt(messagelevel='NONE')
# ## Read local data
# +
column_names = ['OP_CARRIER', # Code assigned by IATA and commonly used to identify a carrier.
'OP_CARRIER_FL_NUM', # Flight number
'ORIGIN', # Origin
'DEST', # Destination
'DAY_OF_MONTH', # Day of month
'DAY_OF_WEEK', # Day of week
'DEP_TIME', # Departure Time
'ARR_TIME', # Arrival Time
'DISTANCE', # Flight Distance
'CANCELLED', # Cancel Indicator (Binary)
'DIVERTED', # DIVERTED Indicator (Binary)
'ARR_DEL15', # Delay Indicator (Binary)
'YEAR'] # Year (will be used for partitioning)
data_types = {'OP_CARRIER_FL_NUM': str,
'ORIGIN': str,
'DEST': str,
'CANCELLED': str,
'DIVERTED': str,
'ARR_DEL15': str}
flight_data = s.read_csv(filepath_or_buffer = 'data/flights_january_2019.csv',
usecols = column_names,
dtype = data_types,
casout = dict(name='flight_data', replace=True))[column_names]
# -
flight_data.tableinfo()
flight_data.columninfo()
flight_data.head()
# +
numeric_variables = flight_data.select_dtypes('numeric').columns.to_list()
numeric_variables.remove('YEAR') # Remove partition variable
categorical_variables = flight_data.select_dtypes('character').columns.to_list()
print("\nNumeric Variables:\n{}\n".format(numeric_variables))
print("\nCategorical Variables:\n{}".format(categorical_variables))
# -
# ## Simple Statistics & Plotting
s.help()
# +
# s.simple?
# -
# ### Simple descriptive statistics
flight_data.simple.summary(inputs=numeric_variables)
# ### Frequency distribution for variables
# Simple frequency for OP_CARRIER
frequency_by_carrier = flight_data.simple.freq(inputs='OP_CARRIER')['Frequency']
frequency_by_carrier
frequency_by_carrier.plot.bar(x='CharVar', y='Frequency', figsize=(12,5), rot=0)
# Crosstab for OP_CARRIER -> Which airlines have the most delays?
flight_data_crosstab = flight_data.simple.crosstab(row='OP_CARRIER', aggregator='sum', includeMissing=False, col='ARR_DEL15')['Crosstab']
flight_data_crosstab.rename(columns={'Col1':'Pünktlich', 'Col2':'Verspätet'}, inplace=True)
flight_data_crosstab.plot.bar(x='OP_CARRIER', y=['Pünktlich','Verspätet'], stacked=True, figsize=(15,5))
# ### Correlation of numerical variables
pearson_correlation = flight_data.simple.correlation(inputs=numeric_variables)
pearson_correlation
# SAS mirrors a lot of the Pandas-functionality
pearson_correlation = flight_data[numeric_variables].corr()
# Visualize with open source libraries
sn.set(font_scale=1.2)
sn.heatmap(pearson_correlation)
# ### SAS mirrors a lot of the pandas API functionality
type(flight_data) # flight_data is a CAS-table
# Pandas: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.describe.html
# SAS: https://sassoftware.github.io/python-swat/generated/swat.cas.table.CASTable.describe.html
flight_data.describe(include='all')
# Pandas: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html
# SAS: https://sassoftware.github.io/python-swat/generated/swat.cas.table.CASTable.sample.html
flight_data.sample(n=3).head()
# ## Modelling
# #### Data Preparation
# 1. We remove missing values
# 2. We load data from 2020
# 3. We combine data from 2019 and 2020
# 4. We train our models on data from 2019 and evaluate its performance on data from 2020
# Remove missing values and create partition variable
flight_data_cleaned = flight_data.dropna()
print('\nDropped {} rows containing missing values from 2019 data.\n'.format(len(flight_data)-len(flight_data_cleaned)))
flight_data_cleaned.head()
# Load and clean test data
flight_data_test = s.read_csv('data/flights_january_2020.csv',
usecols=column_names,
dtype=data_types,
casout=dict(name='flight_data_test', replace=True))[column_names]
flight_data_test_cleaned = flight_data_test.dropna()
print('\nDropped {} rows containing missing values from 2020 data.\n'.format(len(flight_data_test)-len(flight_data_test_cleaned)))
flight_data_test_cleaned.head()
# Combine data from 2019 and 2020
flight_data_cleaned_2019_2020 = flight_data_cleaned.append(flight_data_test_cleaned)
flight_data_cleaned_2019_2020.head()
# ### Logistic Regression
# https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.5&docsetId=casactstat&docsetTarget=cas-regression-logistic.htm&locale=en#PYTHON.cas-regression-logistic-partfit
s.loadactionset('regression')
# +
inputs = ['DAY_OF_MONTH',
'DAY_OF_WEEK',
'DEP_TIME',
'DISTANCE',
'OP_CARRIER']
nominals = ['DAY_OF_MONTH',
'DAY_OF_WEEK',
'OP_CARRIER',
'ARR_DEL15']
target = 'ARR_DEL15'
# -
s.regression.logistic(
table = flight_data_cleaned_2019_2020, # input table
inputs = inputs, # input variables
nominals = nominals, # nominal variables
target = target, # target variable
selection = dict(method='STEPWISE'), # Variable selection
partbyvar = dict(name='YEAR', train='2019', validate='2020'), # partition by YEAR variable
partfit = True, # fit-statistics for partitioned data
display = dict(names=['ModelInfo','FitStatistics']), # display ModelInfo and Fit statistics
store = dict(name='logreg_model', replace=True) # table to store regression model
)
s.regression.logisticscore(
restore = 'logreg_model',
table = flight_data_cleaned,
casout = dict(name='logreg_scored', replace=True),
copyVars = 'ALL'
)
s.CASTable('logreg_scored').head(5)
# ### Gradient Boosting
# https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.5&docsetId=casanpg&docsetTarget=cas-decisiontree-gbtreetrain.htm&locale=en#SAS.cas-decisiontree-gbtreetrain-nominals
s.loadactionset('decisiontree')
s.help(actionset='decisiontree')
s.decisiontree.gbtreetrain(
table = flight_data_cleaned, # input table
inputs = inputs, # input variables
nominals = nominals, # nominal variables
target = target, # target variable
validtable = flight_data_test_cleaned, # validation table
ntree = 10, # Number of trees
maxlevel = 3, # Maximum tree depth
casOut = dict(name='gbtree_model', replace=True) # table to store boosting model
)
s.decisiontree.gbtreescore(
modeltable = 'gbtree_model',
table = flight_data_test_cleaned,
casOut = dict(name='gbtree_scored', replace=True),
copyVars = list(flight_data_test_cleaned.columns)
)
s.CASTable('gbtree_scored').head(5)
# ### Auto ML
s.loadactionset(actionset="dataSciencePilot")
flight_data_cleaned.columns
flight_data
# +
inputs = ['DAY_OF_MONTH',
'DAY_OF_WEEK',
'DEP_TIME',
'DISTANCE',
'OP_CARRIER']
nominals = ['DAY_OF_MONTH',
'DAY_OF_WEEK',
'OP_CARRIER',
'ARR_DEL15']
# -
flight_data[inputs+[target]].head()
s.dataSciencePilot.dsAutoMl(
table = flight_data[inputs+[target]],#{"name":'flight_data', "vars":inputs+[target]},
target = "ARR_DEL15",
explorationPolicy = {},
screenPolicy = {},
selectionPolicy = {},
transformationPolicy = {"missing":True, "skewness":True, "kurtosis":True, "Outlier":True},
modelTypes = ["decisionTree","forest","gradboost","logistic"],
objective = "MCE",
sampleSize = 8,
topKPipelines = 8,
kFolds = 2,
transformationOut = {"name" : "TRANSFORMATION_OUT", "replace" : True},
featureOut = {"name" : "FEATURE_OUT", "replace" : True},
pipelineOut = {"name" : "PIPELINE_OUT", "replace" : True},
saveState = {"name" : "ASTORE_OUT", "replace" : True}
)
# Display table of created pipelines
s.fetch(table = {"name": "PIPELINE_OUT"})
# Display table of created feature transformations
pd.set_option('display.max_colwidth', 0)
s.fetch(table = {"name": "FEATURE_OUT"}).Fetch[['FeatureId','InputVar1','Label','IsGenerated']]
| swat/airport_statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3.6
# ---
# # GenePattern Notebook Tutorial
#
# The GenePattern Notebook Environment provides a variety of features for both basic and advanced users. This tutorial will familiarize you with some of its most important features.
#
# <div class="alert alert-info">
# <b>All instructions for you to follow will appear in a blue panel like this one.</b>
# </div>
# ## GenePattern Notebook Introduction Video
#
# Below is a brief video introduction to the GenePattern Notebook Environment. This video introduces many of the basic concepts and features provided by the tool. If you would perfer a more "hands on" introduction, scroll down and follow the subsequent interactive tutorial.
# <div class="alert alert-info"><strong><p>To view the video, click the Play button in the middle of the video cell.</p><p>If the video is not visible, highlight the cell below and press the Run Cell ( <i class="fa-step-forward fa"></i> ) button in the toolbar to see the video.</strong></p></div>
#
# +
from IPython.display import IFrame
IFrame('https://www.youtube.com/embed/8npzyGLpUHU', width="854", height="480")
# -
# ## Basic Features
#
# These are the most commonly used features in the GenePattern Notebook environment. They also form the building blocks for most advanced use cases.
# ### Notebook Cells
#
# All notebooks consist of some number of cells. These cells may interleave text, images, tables, code or interactive widgets. Try clicking different sections of this notebook and will you notice the different cells as they are selected.
# #### Inserting cells
#
# New cells can be inserted by clicking the Insert Cell button ( <i class="fa-plus fa"></i> ) on the toolbar or by using the Insert menu at the top of the screen.
# <div class="alert alert-info">
# <b>1. Select this cell by clicking to the left of the text.<br>
# 2. Click the Insert Cell button ( <i class="fa-plus fa"></i> ) to add a new cell below this one.</b>
# </div>
# #### Executing cells
#
# Cells can be executed by clicking the Run Cell button ( <i class="fa-step-forward fa"></i> ) on the toolbar or using the menu at the top of the screen to select *Cell > Run Cells*. Depending on the type of cell, when a cell executes it will run any code contained in the cell or render any HTML/markdown as formatted text.
# <div class="alert alert-info">
# <b>
# 1. Select this cell by clicking to the left of the text.<br>
# 2. Create a new cell below this one and type or paste <code> 3 + 7 </code> into the input box. <br>
# 3. Click the Run Cell button or type Shift+Enter to execute the cell.</b>
# </div>
# #### Changing cell type
#
# Every cell has a type. Cell types include code cells, markdown cells and GenePattern cells. Code cells contain code that can be executed. Markdown cells contain either markdown or HTML that is rendered when the cell is executed. GenePattern cells contain interactive widgets that allow you to access GenePattern's many analyses.
#
# To change cell type, select a cell and then use the dropdown menu on the toolbar above. Alternatively, you can use the menu at the top of the page to select *Cell > Cell Type*.
# ### GenePattern Cells
#
# The GenePattern Notebook environment provides a number of graphical widgets that make performing analyses easy, even for non-programming users. These widgets take the form of GenePattern Cells that allow a user to prepare analyses, launch jobs and visualize results.
#
# To insert a GenePattern Cell, insert a new cell, then select the cell and then change the cell type to GenePattern either by using the Cell > Cell Type > GenePattern menu or by going to the dropdown menu in the notebook toolbar and selecting GenePattern from the list of options.
# <div class="alert alert-info">
# <b>1. Insert a new cell.<br/>
# 2. Change the cell type to GenePattern. <br/>
# 3. You should now have a new cell that looks exactly like the one shown below.</b>
# </div>
# <b>Below is an example GenePattern authentication cell. The cell you have just created above should look identical.</b>
# + genepattern={"name": "Login", "server": "https://cloud.genepattern.org/gp", "type": "auth"}
# Requires GenePattern Notebook: pip install genepattern-notebook
import gp
import genepattern
# Username and password removed for security reasons.
genepattern.display(genepattern.session.register("https://cloud.genepattern.org/gp", "", ""))
# -
# ### Authentication Cells
#
# The first GenePattern cell that you have encountered is an Authentication Cell. This cell allows a user to sign into a GenePattern server. Doing this allows GenePattern to keep a user's results private, and to remember a user's settings.
#
# Authentication cells look like a login form with the additional option of selecting which GenePattern server to sign into. If the user has already authenticated, such as when usng the GenePattern Notebook Repository, the user will instead be prompted to either sign in as the current user or to cancel and sign in as a different user
# <div class="alert alert-info">
# <b>1. Sign into the authentication cell above.<br/>
# </b>
# </div>
# ### Analysis Cells
#
# After a user has signed in using an Authentication Cell, the next task is usually to insert an Analysis Cell to perform an analysis. To do this click the <em><i class="fa-th fa"></i> Tools</em> button in the notebook toolbar. This wll open a dialog box with a list of available GenePattern analyses. Search or browse through the list and then click one of the options to insert an Analysis Cell of that type.
#
# Every Analysis Cell has a number of parameters, which can be used to upload data and to select other options for the analysis. Once you fill in these parameters, click Run to submit them as a job on the GenePattern server.
#
# Once the Run button has been clicked, all selected files will upload and then a Job Cell will be inserted below to indicate the status of the job in GenePattern’s queue (see Job Cells below).
# <b>Below is an example GenePattern analysis cell for a module that does some basic formatting. While this analysis isn't particually biologically interesting, it nevertheless serves as a concise example of analysis cell interface.</b>
#
# <b>You are are instead seeing an error message stating "You must be authenticated...," you likely are not yet logged into the GenePattern server. You will first need to scroll up and sign into the GenePattern authentication cell before you can view the available analyses.</b>
# + genepattern={"param_values": {"input.filename": null, "job.cpuCount": null, "job.memory": null, "job.queue": null, "job.walltime": null, "output.file": null}, "type": "task"}
convertlineendings_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00002')
convertlineendings_job_spec = convertlineendings_task.make_job_spec()
convertlineendings_job_spec.set_parameter("input.filename", "")
convertlineendings_job_spec.set_parameter("output.file", "<input.filename_basename>.cvt.<input.filename_extension>")
genepattern.display(convertlineendings_task)
# -
# <div class="alert alert-info" style="font-weight: bold;">
# <ol>
# <li>Highlight this cell and then click the <i><i class="fa-th fa"></i> Tools</i> button in the toolbar. This should open a dialog with a list of the available GenePattern analyses.</li>
# <li>Scroll through the list or use the search box in the upper right of the dialog to find the <i>PreprocessDataset</i> module. This module is used to preprocess data ahead of further analysis.</li>
# <li>Click the listing for <i>PreprocessDataset</i>. This should insert a new analysis cell below, representing the preprocessing analysis.</li>
# </ol>
# </div>
# ### Job Cells
#
# The third and final kind of GenePattern cell are Job Cells. These cells represent an analysis job that has been submitted to GenePattern queue for processing. Jobs progress through the states of Pending, Running and finally either to Completed or Error.
#
# Once a job has completed or experienced an error, the cell will update to display a list of outputs. These outputs are files which can be displayed in the browser, downloaded or sent as input to another GenePattern analysis. Outputs are indicated by the <i class="fa fa-info-circle" style="color: rgb(128, 128, 128);"></i> icon.
#
# To see a list of all output file options, simply click that output in the list and a menu wll open, displaying the available options.
#
# If this analysis includes visualization, the visualization will load and appear inside the Job Cell as well.
# <b>Below is a screenshot of a GenePattern job cell. We have not embedded one directly because GenePattern jobs are private to the user running the analysis.</b>
#
# <img src="http://genepattern-notebook.org/wp-content/uploads/2017/02/content_screen_shot_2015-08-24_at_10_33_20.png" />
# <div class="alert alert-info" style="font-weight: bold;">
# <ol>
# <li>We are going to run the ProprocessDataset analysis inserted earlier. To do this we need to provide the analysis with a data file on which to perform processing. A suitable file is linked below. This file contains gene expression data comparing ALL samples with AML samples.
# <a style="display: block; padding: 10px;" href="https://datasets.genepattern.org/data/all_aml/all_aml_test.gct">https://datasets.genepattern.org/data/all_aml/all_aml_test.gct</a>
# </li>
# <li>Copy the URL for this file, then scroll back up to the <i>ProprocessDataset</i> cell and paste the URL into the <i>input filename</i> parameter. Alternatively, you could download this file and then use the <i>Upload File</i> button next to the <i>input filename</i> parameter</li>
# <li>The default values of the other input settings should be sufficient for this file. If you want more information about them you can read the documentation by going to the Gear menu ( <span class="fa fa-cog"></span> ) on the cell and selecting <i>Documentation</i>.</li>
# <li>Click the <i>Run</i> button to begin the analysis. This will create a new GenePattern job cell.</li>
# </ol>
# </div>
# ### Markdown Cells
#
# Markdown cells are another cell type available in the GenePattern Notebook environment. They allow a notebook author to take notes, document methods or embed images in a notebook document.
#
# To insert a markdown cell, first insert a new cell, either through the Insert > Insert Cell Below menu or by clicking the <i class="fa-plus fa"></i> button in the notebook toolbar. Once a new cell has been inserted, you can select the cell and then change the cell type to Markdown either by using the Cell > Cell Type > Markdown menu or by going to the dropdown menu in the notebook toolbar and selecting Markdown from the list of options.
#
# Markdown cells allow the user to format text using either <a href="https://developer.mozilla.org/en-US/docs/Web/HTML" target="_blank">HTML</a> or the <a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown</a> syntax. Additionally, we provide a "What You See is What You Get" rich text editor (see the feature below).
# <div class="alert alert-info" style="font-weight: bold;">
# <ol>
# <li>Insert a new cell below</li>
# <li>Use the dropdown menu in the toolbar to change the cell type to <i>Markdown</i>.</li>
# </ol>
# </div>
# ### Rich Text Editor
# The GenePattern Rich Text Editor allows a user to format notes and documentation in a notebook in much the same way that one might use Microsoft Word or Libre Office — without the need to write a single HTML tag or line of markdown.
#
# To use the Rich Text Editor, first insert a markdown cell (see the instructions above). Once a cell has been changed to the markdown type, two buttons should appear to the left of the cell. The <i class="fa fa-file-code-o"></i> button opens the Rich Text Editor and the <i class="fa-step-forward fa"></i> button finalizes the cell and renders the text.
#
# Click the <i class="fa fa-file-code-o"></i> button and the editor will appear. A toolbar will show above the cell, allowing the user to format text, insert headers or add links. Style the text as desired, and when finished, click the <i class="fa-step-forward fa"></i> button to finish editing.
# <div class="alert alert-info" style="font-weight: bold;">
# <ol>
# <li>Highlight the markdown cell you created above and click the <i class="fa fa-file-code-o"></i> button on the left side of the cell to activate the Rich Text Editor. If you don't see the button, make sure the cell is in editing mode by double-clicking the cell.</li>
# <li>Edit the text of the cell and then click the <i class="fa-step-forward fa"></i> button to display the rendered text.</li>
# </ol>
# </div>
# ## Publishing & Sharing Features
#
# When using the <a href="https://notebook.genepattern.org" target="_blank">GenePattern Notebook Repository</a>, users have the ability to publicly share their notebooks and to run notebooks shared by others.
# ### Browsing Public Notebooks
#
# * To view the available notebooks, click the "Public Notebooks" tab and browse through the list.
# * To obtain a copy and run the shared notebook, click the notebook in the list and then select "Get a Copy" on the confirmation dialog that pops up. This will make a copy of the chosen notebook in your current directory, accessible by clicking on the Files tab.
# * To run the notebook, click the "Click here if you would like to open this notebook" link in the resulting dialog, or go back to the "Files" tab and run the notebook as normal from there.
# <div class="alert alert-info" style="font-weight: bold;">
# <ol>
# <li>To browse public notebooks you will need to be logged into the GenePattern Notebook Repository and on the index page displaying your list of notebook files. To view this page <a href="https://notebook.genepattern.org" target="_blank">click here</a> and log in if required.</li>
# <li>On that page click the "Public Notebooks" tab found near the top. This will display the list of available notebooks. Mouse over the name of a notebook to read a description.</li>
# <li>Find a notebook in the list you want to run and then click it. This will open a dialog with the full description. Click the "Get a Copy" button to get a copy of the selected notebook.</li>
# <li>Click "OK" to close the dialog, then click on the "Files" tab at the top of the page. This will bring you back to your list of notebooks. You should see that the selected notebook is now in the list. You can click on it to run it from here, just as you would any other notebook.</li>
# </ol>
# </div>
# ### Publicly Sharing Notebooks
# * To publicly share a notebook, click the checkbox next to that notebook and then click the "Publish" button on the toolbar above.
# * This will open a dialog with a short form, prompting you to enter the author's name, the quality and a brief description. Fill out this information and then click the "Publish" button on the dialog.
# * Your notebook should now be available to the public and should appear on the "Public Notebooks" tab. The published version of the notebook is checkpoint of the notebook at the time it was published. Any changes you make to the notebook in the future are not copied over to the published version unless you explicitly choose to update the notebook in the public repository.
#
# <img src="http://genepattern-notebook.org/wp-content/uploads/2017/02/content_share-notebook.jpg" style="border: black solid 1px;" />
# ### Updating or Removing a Public Notebook
# * You can update or remove a notebook that you have made public by first clicking the checkbox next to the notebook and then clicking the "Publish" button.
# * In the dialog that comes up you will have the option to edit any of the notebook's metadata, such as author name, description, etc.
# * To update or remove the notebook, click the "Update" or "Unpublish" buttons, respectively.
# ## Programmatic Features
#
# In addition to the basic and publishing features intended for use by both non-programming and programming users alike, the GenePattern Notebook environment also provides a variety of features intended primarily for use by coders.
# ### Code Cells
#
# Code cells allow a user to type in execute Python code. They are the default type of cell. Simply enter the Python code you want to run and then click the <i class="fa-step-forward fa"></i> (run cell) button in the notebook toolbar.
#
# More information on code cells and their various options is available in the <a href="https://jupyter.readthedocs.io/en/latest/">Jupyter documentation</a>.
# ### GenePattern Python Library
#
# A library is available for using Python to interact with available GenePattern's service. This allows a user to programmatically prepare analyses, launch jobs and retrieve results.
#
# A complete tutorial on using the GenePattern Python Library is available as a public notebook in the GenePattern Notebook Repository. Additional information is available in our <a href="http://software.broadinstitute.org/cancer/software/genepattern/programmers-guide#_Using_GenePattern_from_Python" target="_blank">documentation</a>.
# ### Send to Code
#
# The GenePattern Python Library seamlessly integrates with GenePattern cells. Code examples of how to reference GenePattern jobs or GenePattern result files are available in GenePattern Job Cells by clicking a job result and selecting “Send to Code” in the menu.
#
# <img src="http://genepattern-notebook.org/wp-content/uploads/2017/02/content_screen-shot-2015-10-15-at-13_50.jpg" />
# ### Send to Dataframe
#
# The GenePattern Python Library also provides functionality for common GenePattern file formats, allowing them seamlessly integrate with <a href="http://pandas.pydata.org/">Pandas</a>, a popular Python data analysis library.
#
# Both the <a href="http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide" target="_blank">GCT and ODF file formats</a> are easily loaded as Pandas Dataframes. Code examples of how to load these files are available in GenePattern Job Cells by clicking a GCT or ODF job result and selecting “Send to Dataframe” in the menu.
# ### Python Variable Input
#
# As part of the seamless integration between Python and GenePattern, Python variables may be directly used as input in GenePattern Analysis Cells. To use a Python variable as input, first define the variable in a code cell, then in a GenePattern Analysis cell, enter the variable name surrounded by two sets of curly brackets. For example, using the variable <code> foo </code> would appear as <code> {{ foo }} </code>.
#
# When the Run button is clicked to launch the analysis job, the notebook will first obtain the value of the variable used as input and then send that to the GenePattern server. As the GenePattern services expect a string, non-string variables will be cast as strings when they are evaluated.
| GenePattern Notebook Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from skimage import color,io
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from skimage.util import view_as_blocks
test=skimage.io.imread('./Downloads/demin.jpg')import skimage
plt.imshow(test)
test=color.rgb2gray(test)
plt.imshow(test, cmap='gray')
test.shape
block_shape=(4,3)
test_blocks=view_as_blocks(test,block_shape)
test_blocks.shape
flattend_blocks=test_blocks.reshape(test_blocks.shape[0],test_blocks.shape[1],-1)
mean_blocks=np.mean(flattend_blocks,axis=2)
plt.imshow(mean_blocks,interpolation='nearest',cmap='gray')
mean_blocks=np.median(flattend_blocks,axis=2)
plt.imshow(mean_blocks,interpolation='nearest',cmap='gray')
| BlockViews.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Arc
// language: javascript
// name: arc
// ---
// %conf streaming=true
// ## Markdown cell
// A markdown cell to ensure the conversion doesn't break.
{
"type": "RateExtract",
"name": "create a streaming source",
"environments": [
"production",
"test"
],
"outputView": "stream0"
}
// %sql outputView=abc_def environments=production,test persist=true sqlParams=inputView=${INPUTVIEW_ARGUMENT}
SELECT *
FROM ${inputView}
// + active=""
// a raw NBConvert cell
// -
// %configplugin
{
"type": "ai.tripl.arc.plugins.TestDynamicConfigurationPlugin",
"environments": ["test"],
"key": "testValue"
}
// %lifecycleplugin
{
"type": "ai.tripl.arc.plugins.TestLifecyclePlugin",
"name": "test",
"environments": ["test"],
"outputViewBefore": "before",
"outputViewAfter": "after",
"value": "testValue"
}
// %arc numRows=10
{
"type": "RateExtract",
"name": "create a streaming second source",
"environments": [
"production",
"test"
],
"outputView": "stream2"
}
// %sqlvalidate environments=production,test sqlParams=message=${INPUTVIEW_ARGUMENT}
SELECT
TRUE AS valid
,"${message}" AS message
// %log environments=production,test sqlParams=message=${INPUTVIEW_ARGUMENT}
SELECT
"${message}" AS message
| src/test/resources/conf/inlinesql.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import re
from nltk.corpus import stopwords
# The `stop` is defined as earlier in this chapter
# Added it here for convenience, so that this section
# can be run as standalone without executing prior code
# in the directory
stop = stopwords.words('english')
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) +\
' '.join(emoticons).replace('-', '')
tokenized = [w for w in text.split() if w not in stop]
return tokenized
def stream_docs(path):
with open(path, 'r', encoding='utf-8') as csv:
next(csv) # skip header
for line in csv:
text, label = line[:-3], int(line[-2])
yield text, label
# -
next(stream_docs(path='movie_data.csv'))
def get_minibatch(doc_stream, size):
docs, y = [], []
try:
for _ in range(size):
text, label = next(doc_stream)
docs.append(text)
y.append(label)
except StopIteration:
return None, None
return docs, y
# +
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
tokenizer=tokenizer)
# +
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
clf = SGDClassifier(loss='log', random_state=1)
doc_stream = stream_docs(path='movie_data.csv')
# +
import pyprind
pbar = pyprind.ProgBar(45)
classes = np.array([0, 1])
for _ in range(45):
X_train, y_train = get_minibatch(doc_stream, size=1000)
if not X_train:
break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes=classes)
pbar.update()
# +
#X_test, y_test = get_minibatch(doc_stream, size=5000)
#X_test = vect.transform(X_test)
#print('Accuracy: %.3f' % clf.score(X_test, y_test))
##Accuracy: 0.868
# -
| .ipynb_checkpoints/Untitled1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # STA 208: Homework 3
# This is based on the material in Chapter 4 of 'Elements of Statistical Learning' (ESL), in addition to lectures 7-8. Chunzhe Zhang came up with the dataset and the analysis in the second section.
# ## Instructions
#
# We use a script that extracts your answers by looking for cells in between the cells containing the exercise statements (beginning with __Exercise X.X__). So you
#
# - MUST add cells in between the exercise statements and add answers within them and
# - MUST NOT modify the existing cells, particularly not the problem statement
#
# To make markdown, please switch the cell type to markdown (from code) - you can hit 'm' when you are in command mode - and use the markdown language. For a brief tutorial see: https://daringfireball.net/projects/markdown/syntax
#
# In the conceptual exercises you should provide an explanation, with math when necessary, for any answers. When answering with math you should use basic LaTeX, as in
# $$E(Y|X=x) = \int_{\mathcal{Y}} f_{Y|X}(y|x) dy = \int_{\mathcal{Y}} \frac{f_{Y,X}(y,x)}{f_{X}(x)} dy$$
# for displayed equations, and $R_{i,j} = 2^{-|i-j|}$ for inline equations. (To see the contents of this cell in markdown, double click on it or hit Enter in escape mode.) To see a list of latex math symbols see here: http://web.ift.uib.no/Teori/KURS/WRK/TeX/symALL.html
#
# When writing pseudocode, you should use enumerated lists, such as
#
# __Algorithm: Ordinary Least Squares Fit__
# (Input: X, y;
# Output: $\beta$)
# 1. Initialize the $p \times p$ Gram matrix, $G \gets 0$, and the vector $b \gets 0$.
# 2. For each sample, $x_i$:
# 1. $G \gets G + x_i x_i^\top$.
# 2. $b \gets b + y_i x_i$
# 3. Solve the linear system $G \beta = b$ and return $\beta$
#
# __Exercise 1.1__ (10 pts - 2 each)
#
# Recall that surrogate losses for large margin classification take the form, $\phi(y_i x_i^\top \beta)$ where $y_i \in \{-1,1\}$ and $\beta, x_i \in \mathbb R^p$.
#
# The following functions are used as surrogate losses for large margin classification. Demonstrate if they are convex or not, and follow the instructions.
#
# 1. exponential loss: $\phi(x) = e^{-x}$
# 1. truncated quadratic loss: $\phi(x) = (\max\{1-x,0\})^2$
# 1. hinge loss: $\phi(x) = \max\{1-x,0\}$
# 1. sigmoid loss: $\phi(x) = 1 - \tanh(\kappa x)$, for fixed $\kappa > 0$
# 1. Plot these as a function of $x$.
#
# (This problem is due to notes of <NAME>.)
#
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# __1. Exponential Loss__
# First, $\phi'(x) = -e^{-x}$.
# Now, $\phi''(x) = e^{-x}$, which is $>0 \ \ \forall x$. Thus, it is convex.
# The plot is below.
x1 = np.linspace(-15,15,100)
y1 = np.exp(-x1)
plt.title('Exponential Loss')
plt.plot(x1,y1)
plt.show()
# __2. Truncated Quadratic Loss__
# First, $\phi'(x) = (2x-2)1_{\{x \le 1\}}$.
# Now, $\phi''(x) = 2*1_{\{x \le 1\}}$, which is $>0 \ \ \forall x \le 1$.
# Thus, this loss is only convex for $x \le 1$.
# The plot is below.
x2 = np.linspace(-15,1,100)
y2 = np.power(1-x2,2)
plt.title('Truncated Quadratic Loss')
plt.plot(x2,y2)
plt.show()
# __3. Hinge Loss__
# First, $\phi'(x) = (-1)1_{\{x \le 1\}}$.
# Now, $\phi''(x) = 0$.
# Thus, hinge loss is not convex anywhere.
# The plot is below.
x3 = np.linspace(-15,1,100)
y3 = 1-x3
plt.title('Hinge Loss')
plt.plot(x3,y3)
plt.show()
# __4. Sigmoid Loss__
# First, $\phi'(x) = -ksech^2(kx)$.
# Now, $\phi''(x) = 2k^2tanh(kx)sech^2(kx)$, which is positive for $x > 0$.
# Thus, this loss is convex for $x>0$.
# The plot is below.
x4 = np.linspace(-15,15,100)
y4 = 1-np.tanh(x4)
plt.title('Sigmoid Loss')
plt.plot(x4,y4)
plt.show()
# __Exercise 1.2__ (10 pts)
#
# Consider the truncated quadratic loss from (1.1.2). For brevity let $a_+ = max\{a,0\}$ denote the positive part of $a$.
#
# $$\ell(y_i,x_i,\beta) = \phi(y_i x_i^\top \beta) = (1-y_i x_i^\top \beta)_+^2$$
#
# 1. Consider the empirical risk, $R_n$ (the average loss over a training set) for the truncated quadratic loss. What is gradient of $R_n$ in $\beta$? Does it always exists?
# 1. Demonstrate that the gradient does not have continuous derivative everywhere.
# 1. Recall that support vector machines used the hinge loss $(1 - y_i x_i^\top)_+$ with a ridge regularization. Write the regularized optimization method for the truncated quadratic loss, and derive the gradient of the regularized empirical risk.
# 1. Because the loss does not have continuous Hessian, instead of the Newton method, we will use a quasi-Newton method that replaces the Hessian with a quasi-Hessian (another matrix that is meant to approximate the Hessian). Consider the following quasi-Hessian of the regularized objective to be $$G(\beta) = \frac 1n \sum_i 2 (x_i x_i^\top 1\{ y_i x_i^\top \beta > 1 \}) + 2 \lambda.$$ Demonstrate that the quasi-Hessian is positive definite, and write pseudo-code for quasi-Newton optimization. (There was a correction in the lectures, that when minimizing a function you should subtract the gradient $\beta \gets \beta - H^{-1} g$).
# #### Solutions
# __1.__ First, we compute the gradient of $\ell (y_i, x_i, \beta)$ in $\beta$.
# Expanding, we have $\ell(y_i,x_i,\beta) = (1-y_ix_{i1}\beta_0-...-y_ix_{ip}\beta_{p-1})_+^2$.
# For ease of notation, let $w = (1-y_ix_{i1}\beta_0-...-y_ix_{ip}\beta_{p-1})$
# Now, $\nabla_\beta\ell = (\partial_{\beta_0}\ell,...,partial_{\beta_{p-1}}\ell = (-2y_ix_{i1}w,...,-2y_ix_{ip}w)$.
# So $\nabla_\beta R_n = \frac{1}{n}\sum_i(-2y_ix_{i1}w,...,-2y_ix_{ip}w)1_{\{y_ix_i^\top\beta \le 1\}}$
# __2.__ The gradient does not have continuous derivative at $y_ix_i^\top\beta = 1$.
# __3.__ For truncated quadratic loss, the regularized optimization problem is
# $\min_{\beta} \sum_i(1-y_ix_i^\top\beta)_+^2 + \lambda ||\beta||_2^2$,
# with $\nabla_\beta\lambda||\beta||_2^2 = (\frac{\lambda\beta_0}{||\beta||_2^2},...,\frac{\lambda\beta_p}{||\beta||_2^2})$.
# Thus, $\nabla_\beta R_n^* = \frac{1}{n}\sum_i(-2y_ix_{i1}w,...,-2y_ix_{ip}w)1_{\{y_ix_i^\top\beta \le 1\}} + (\frac{\lambda\beta_0}{||\beta||_2^2},...,\frac{\lambda\beta_p}{||\beta||_2^2})$.
# __4.__ Psuedo-code for quasi-Newton method:
# 1. Start with initial estimate for $\beta: \beta^{(0)}$
# 2. For $t = 0,1,...$, compute
# $\beta^{(t+1)} = \beta^{(t)} - \frac{\nabla_\beta R_n^*}{G(\beta)}$
# # HW3 Logistic, LDA, SVM
import sklearn.linear_model as skl_lm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, roc_curve, auc
from sklearn import preprocessing, svm
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
# dataset path
data_dir = "~/Dropbox/208/208-HW3-pamelot317"
# The following code reads the data, subselects the $y$ and $X$ variables, and makes a training and test split. This is the Abalone dataset and we will be predicting the age. V9 is age, 1 represents old, 0 represents young.
# +
sample_data = pd.read_csv(data_dir+"/hw3.csv", delimiter=',')
sample_data.V1=sample_data.V1.factorize()[0]
X = np.array(sample_data.iloc[:,range(0,8)])
y = np.array(sample_data.iloc[:,8])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=0)
# -
# __Exercise 2.1__ (10 pts) Perform logistic regression using Newton conjugate gradient. You should save the predicted probabilities, and save the roc and pr curves (using roc_curve and precision_recall_curve) computed using the test set.
clf = skl_lm.LogisticRegression(solver='newton-cg') #use Newton conjugate gradient#
clf.fit(X_train,y_train)
prob1 = clf.predict_proba(X_test)
y_score1 = clf.fit(X_train, y_train).decision_function(X_test)
fpr1, tpr1, roc_thresholds1 = roc_curve(y_test, y_score1)
precision1, recall1, thresholds1 = precision_recall_curve(y_test, y_score1)
# __Exercise 2.2__ (10 pts) Do the same for linear discriminant analysis.
lda = LinearDiscriminantAnalysis()
pred = lda.fit(X_train, y_train).predict(X_test)
pred_p = lda.predict_proba(X_test)
y_score2 = lda.fit(X_train, y_train).decision_function(X_test)
fpr2, tpr2, roc_thresholds2 = roc_curve(y_test, y_score2)
precision2, recall2, thresholds2 = precision_recall_curve(y_test, y_score2)
# __Exercise 2.3__ (10 pts) Do the same for support vector machines.
svm1 = svm.SVC(probability=True)
svm1.fit(X_train, y_train)
prob2 = svm1.predict_proba(X_test)
y_score3 = svm1.fit(X_train, y_train).decision_function(X_test)
fpr3, tpr3, roc_thresholds3 = roc_curve(y_test, y_score3)
precision3, recall3, thresholds3 = precision_recall_curve(y_test, y_score3)
# __Exercise 2.4__ (10 pts) Plot and compare the ROC and PR curves for the above methods.
plt.title('Receiver Operating Characteristic')
plt.plot(fpr1,tpr1,'b-',label='Logistic Regression')
plt.plot(fpr2,tpr2,'r-',label='LDA')
plt.plot(fpr3,tpr3,'y-',label='SVM')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc='best')
plt.show()
# The ROC curves look very similar among the three methods. Overall, the methods did well in terms of prediction, with better tpr comparied to fpr. However, it appears that SVM did not perform quite as well as the others, though it still did well.
plt.title('Precision Recall Curve')
plt.plot(recall1,precision1,'b-',label='Logistic Regression')
plt.plot(recall2,precision2,'r-',label='LDA')
plt.plot(recall3,precision3,'y-',label='SVM')
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.legend(loc='best')
plt.show()
# Overall, the three methods have similar precision recall curves, though it appears that SVM is not as good at higher recall than the other two methods.
| HW3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import variables
# !pip install hyperopt
from hyperopt import tpe
from hyperopt import STATUS_OK
from hyperopt import Trials
from hyperopt import hp
from hyperopt import fmin
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
import pandas
import mlflow
# # Load Data
pandas_df = pandas.read_csv("training_data.csv")
X=pandas_df.iloc[:,:-1]
y=pandas_df.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=4284, stratify=y)
# # Define objective function
# +
N_FOLDS = 4
MAX_EVALS = 10
def objective(params, n_folds = N_FOLDS):
"""Objective function for Logistic Regression Hyperparameter Tuning"""
# Perform n_fold cross validation with hyperparameters
# Use early stopping and evaluate based on ROC AUC
mlflow.sklearn.autolog()
with mlflow.start_run(nested=True):
clf = LogisticRegression(**params,random_state=0,verbose =0)
scores = cross_val_score(clf, X_train, y_train, cv=5, scoring='f1_macro')
# Extract the best score
best_score = max(scores)
# Loss must be minimized
loss = 1 - best_score
# Dictionary with information for evaluation
return {'loss': loss, 'params': params, 'status': STATUS_OK}
# -
# # Define parameter space
# +
space = {
'warm_start' : hp.choice('warm_start', [True, False]),
'fit_intercept' : hp.choice('fit_intercept', [True, False]),
'tol' : hp.uniform('tol', 0.00001, 0.0001),
'C' : hp.uniform('C', 0.05, 3),
'solver' : hp.choice('solver', ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']),
'max_iter' : hp.choice('max_iter', range(5,1000))
}
# -
# # Create experiment
mlflow.set_experiment("Hyperopt_Optimization")
# # Define Optimization Trials
# +
# Algorithm
tpe_algorithm = tpe.suggest
# Trials object to track progress
bayes_trials = Trials()
with mlflow.start_run():
best = fmin(fn = objective, space = space, algo = tpe.suggest, max_evals = MAX_EVALS, trials = bayes_trials)
# -
best
| Chapter04/gradflow/notebooks/hyperopt_optimization_logistic_regression_mlflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RamanEbrahimi/ComputationalPhysics/blob/main/RandomDeposition.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 627} id="_rPWgL9kzqig" outputId="4ba974f5-1473-421b-dac1-006e63c60db2"
import matplotlib.pyplot as plt
import numpy as np
import random as rn
# Graphical Visualization
# First we set our initial conditions and variables
total_number_of_particles = 1000000
time = 1
lattice_size = 200
place = [0] * lattice_size
temp_place = place.copy()
step = 100000
while time < total_number_of_particles:
# Randomly adding particles:
place[rn.randint(0, lattice_size-1)] += 1
# Plotting and coloring conditions:
time += 1
if time % step == 0:
if (time/step) % 2 == 0:
for i in range(0, lattice_size):
plt.bar(i, place[i]-temp_place[i], bottom=temp_place[i], width=1, align='edge', color='blue')
temp_place = place.copy()
else:
for i in range(0, lattice_size):
plt.bar(i, place[i]-temp_place[i], bottom=temp_place[i], width=1, align='edge', color='green')
temp_place = place.copy()
plt.title('L = ' + str(lattice_size) + ', particles = ' + str(total_number_of_particles))
plt.style.use('bmh')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 322} id="DZVo4BtmIdba" outputId="33969396-c694-4634-ba88-815f37faff83"
# Variance-Time plot
# Setting our initial conditions and variables:
total_number_of_particles = 4000000
time = 1
lattice_size = 200
place = [0] * lattice_size
step = 2
variance_points = []
time_points = []
while time < total_number_of_particles:
# Random placement loop:
for i in range(time, step * time):
place[rn.randint(0, lattice_size-1)] += 1
time *= step
# Variance calculation loop:
var = 0
for i in range(0, lattice_size):
var += np.power(place[i]-time/lattice_size, 2)
var = np.power(var, 0.5)
variance_points.append(var)
time_points.append(time)
plt.loglog(time_points, variance_points, 'b.')
plt.title('L = ' + str(lattice_size) + ' , particles = ' + str(total_number_of_particles))
plt.xlabel('t')
plt.ylabel('w(t)')
plt.style.use('bmh')
m, b = np.polyfit(np.log10(time_points), np.log10(variance_points), 1)
print('Slope is = ' + str(m))
plt.show()
# + id="ENoR1jJDJEuX"
# for style in plt.style.available:
# print(style)
| RandomDeposition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import lambda_function
# -
context = None
event = {
"body": "{\"query\":\"тендер на канцтовары\", \"user\":\"90872\"}",
"resource": "/{proxy+}",
"requestContext": {
"resourceId": "123456",
"apiId": "1234567890",
"resourcePath": "/{proxy+}",
"httpMethod": "POST",
"requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef",
"accountId": "123456789012",
"identity": {
"apiKey": None,
"userArn": None,
"cognitoAuthenticationType": None,
"caller": None,
"userAgent": "Custom User Agent String",
"user": None,
"cognitoIdentityPoolId": None,
"cognitoIdentityId": None,
"cognitoAuthenticationProvider": None,
"sourceIp": "127.0.0.1",
"accountId": None
},
"stage": "prod"
},
"queryStringParameters": {
"code": "4/BAEIPRHLj6EoJZwLSJVJ3Y2nSGykpGhrr-ProDBlgCdbdcWUGD07-V9bDPB49zqPKjh_awnfnzAAiAWURHHwPb4",
"scope": "https://www.googleapis.com/auth/drive.file",
"state": "{\"id\": \"1\", \"salt\": \"$6$3YM57aicXf18WbdG\"}"
},
"headers": {
"Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)",
"Accept-Language": "en-US,en;q=0.8",
"CloudFront-Is-Desktop-Viewer": "true",
"CloudFront-Is-SmartTV-Viewer": "false",
"CloudFront-Is-Mobile-Viewer": "false",
"X-Forwarded-For": "127.0.0.1, 127.0.0.2",
"CloudFront-Viewer-Country": "US",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Upgrade-Insecure-Requests": "1",
"X-Forwarded-Port": "443",
"Host": "1234567890.execute-api.us-east-1.amazonaws.com",
"X-Forwarded-Proto": "https",
"X-Amz-Cf-Id": "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA==",
"CloudFront-Is-Tablet-Viewer": "false",
"Cache-Control": "max-age=0",
"User-Agent": "Custom User Agent String",
"CloudFront-Forwarded-Proto": "https",
"Accept-Encoding": "gzip, deflate, sdch"
},
"pathParameters": {
"proxy": "path/to/resource"
},
"httpMethod": "POST",
"stageVariables": {
"baz": "qux"
},
"path": "/path/to/resource"
}
print(lambda_function.lambda_handler(event, context))
| lambda_func/bot_test_integration_request.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('/home/jiajunb/neural-dimension-reduction')
# +
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.nn import functional as F
from src.models.distance_modeling import SurveyorDataSet, Surveyor, thesis_kl_div_add_mse_loss
import copy
from sklearn.metrics import classification_report
torch.manual_seed(0)
# +
def far_func2(sorted_dist: torch.tensor, indices: torch.tensor):
return sorted_dist[:, 21:], indices[:, 21:]
# n, d = sorted_dist.shape
# tmp1, tmp2 = list(), list()
# for i in range(n):
# idx = torch.randint(low=(d - 21), high=(d - 1), size=(1, 20))
# tmp1.append(sorted_dist[i][idx])
# tmp2.append(indices[i][idx])
# return torch.cat(tmp1, dim=0), torch.cat(tmp2, dim=0)
def close_func2(sorted_dist: torch.tensor, indices: torch.tensor):
return sorted_dist[:, 1:21], indices[:, 1:21]
test_dataset = SurveyorDataSet.from_df('/home/jiajunb/neural-dimension-reduction/data/processed/sample/dev.csv', close_func2, far_func2)
# +
def val_one_epoch(val_loader, model):
model.eval()
loss_fn1 = nn.CrossEntropyLoss()
loss_fn2 = thesis_kl_div_add_mse_loss
preds_list = list()
labels_list = list()
val_xentropy_loss = 0.
val_thesis_loss = 0.
with torch.no_grad():
for i, batch in enumerate(val_loader):
x1, x2, labels, q = batch
x1, x2, q = x1.to(device), x2.to(device), q.to(device)
logits, p, out1, out2 = model(x1, x2, q, labels=None)
preds = torch.argmax(F.softmax(logits, dim=1), dim=1)
preds_list.append(preds.cpu())
labels_list.append(labels.cpu())
labels = labels.to(device)
val_xentropy_loss += loss_fn1(logits, labels).item()
val_thesis_loss += loss_fn2(p, q).item()
y_preds = torch.cat(preds_list)
y_golds = torch.cat(labels_list)
accuracy = float((y_preds == y_golds).sum().item()) / len(y_preds)
print(classification_report(y_golds, y_preds))
return val_xentropy_loss / len(y_preds), val_thesis_loss / len(y_preds), accuracy
# -
device = torch.device('cuda:1')
best_model = Surveyor()
best_model.load_state_dict(torch.load('../saves/surveyor.on.multi-neg.sample.100')['best_model'])
best_model.eval()
best_model = best_model.to(device)
# +
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=1000, pin_memory=True)
avg_xentropy_loss, avg_thesis_loss, val_accuracy = val_one_epoch(test_loader, best_model)
print(f'val_cross_entropy_loss: {avg_xentropy_loss:.4f} '
f'val_thesis_loss: {avg_thesis_loss:.4f} '
f'val_accuracy: {val_accuracy:.4f} ')
# -
len(test_dataset)
torch.sum(test_dataset.labels)
len(test_dataset.labels)
torch.sum(1 - test_dataset.labels)
len(test_dataset)
| notebooks/binary_evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="eCMLmXwiWXnQ"
import pandas as pd
# + id="KBwJYXZsWjqn"
df=pd.read_csv("/content/diabetes.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="jxTuqhfeWoJA" outputId="07081ce7-d3bb-4d19-d877-c5664222b3da"
df
# + colab={"base_uri": "https://localhost:8080/", "height": 346} id="0DD2pmlTEMEk" outputId="67af3e58-c606-4f5b-efc7-d7b59b19e81e"
df.corr()
# + id="y9TQSjU9Wq4x" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="49acc6fb-550c-475b-cfb4-edbd89b4be98"
df = df.drop(columns=["BloodPressure","SkinThickness"])
df
# + [markdown] id="FYHW61RJJeC4"
# #**1.SVM**
# + id="dCT9OQ05EtOF"
x = df.iloc[:,0:-1].values
y = df.iloc[:,-1].values
# + id="K1EOUQiPW-Hc"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1, random_state = 0)
# + id="0jo7IhcRXU_b"
from sklearn.svm import SVC
# + id="HkeV09TYFRDX"
svc=SVC(kernel="linear",random_state=0)
# + id="SJt9URIQFSj3"
svc1=SVC(kernel="rbf",random_state=0)
# + colab={"base_uri": "https://localhost:8080/"} id="X0zFLDnKFS0_" outputId="cd2e978f-87e3-4d6a-ffb0-81aa5a88aa21"
svc.fit(x_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="fg-DoxUDFZfz" outputId="8846628d-6092-49ec-bcaf-87d0a8dcd0f2"
svc1.fit(x_train,y_train)
# + id="1GN7AIICFeUf"
y_pred=svc.predict(x_test)
# + id="Dbg-TkDgFhOG"
y_pred1=svc1.predict(x_test)
# + colab={"base_uri": "https://localhost:8080/"} id="b73gqWPaFjna" outputId="ae5e2511-86fb-4106-9623-50bb61251db7"
y_pred
# + colab={"base_uri": "https://localhost:8080/"} id="VMYZ-_YBFlR2" outputId="9bcb0ccb-840d-43eb-f4f3-03b1af7e62b9"
y_pred1
# + colab={"base_uri": "https://localhost:8080/"} id="wMXWdqbUFoSS" outputId="149a7661-efc5-41ba-8039-c8f772f872fe"
y_test
# + id="2DkU0W-_Foi_"
from sklearn.metrics import confusion_matrix,accuracy_score
# + colab={"base_uri": "https://localhost:8080/"} id="5dT1BXooFvn-" outputId="7829f359-6090-42eb-e3b7-8ce628bf415f"
confusion_matrix(y_test,y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="Q4Lh0anoFweg" outputId="9e58d94d-efdb-46b6-a49b-0c9e5577f579"
accuracy_score(y_test,y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="iXAEq0LgFw5Y" outputId="bdda8303-00cb-4b15-a67e-906e20b6e38f"
confusion_matrix(y_test,y_pred1)
# + colab={"base_uri": "https://localhost:8080/"} id="rKobvX1-F4Sj" outputId="29269504-1f8d-4257-cfb0-8f158c056c89"
accuracy_score(y_test,y_pred1)
# + [markdown] id="iQ978r_gbpwX"
# # **2.Logistic Regression**
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="CcGr0oF-bteQ" outputId="abbbd3fa-afe7-403a-bd47-5046c596533a"
df
# + id="G2LHAaEucL4X"
x = df.iloc[:,0:-1].values
y = df.iloc[:,-1].values
# + colab={"base_uri": "https://localhost:8080/"} id="gnhceGa9cdvP" outputId="d922026c-dac7-44f8-bdec-9b9898807e40"
x
# + colab={"base_uri": "https://localhost:8080/"} id="c3K04GOgceEu" outputId="f8a55d67-f5c2-4e3a-aef7-0288134a1287"
y
# + id="j6IP0tyLcfhq"
from sklearn.model_selection import train_test_split
# + id="tnRjqFCjcl-A"
x_train,x_test,y_train,y_test = train_test_split(x,y, test_size = 0.1, random_state=0)
# + id="xo3BetL7ctm8"
from sklearn.linear_model import LogisticRegression
# + id="Js249bDNcuz7"
Log = LogisticRegression()
# + colab={"base_uri": "https://localhost:8080/"} id="70Ufe_jpcv4P" outputId="8e45dd61-2762-4072-d375-cdd77fd918b8"
Log.fit(x_train, y_train)
# + id="moxs2jtMcxMp"
y_pred = Log.predict(x_test)
# + colab={"base_uri": "https://localhost:8080/"} id="m4b9MST6cylo" outputId="fa6c3160-c33f-4fa5-975c-667595b84116"
y_pred
# + colab={"base_uri": "https://localhost:8080/"} id="XmTzyoqxczld" outputId="fb12a22c-8c6f-43c8-f1da-5a50521fefbd"
y_test
# + id="bWpual7xc1b5"
from sklearn.metrics import accuracy_score, confusion_matrix
# + colab={"base_uri": "https://localhost:8080/"} id="o4zYeVlqc2pk" outputId="66d2a89a-58e3-458c-c6cb-3873ca6714bf"
accuracy_score(y_test, y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="1n8DRUc7c5jG" outputId="4213d789-37a4-469a-f4d2-d74a993f5a81"
confusion_matrix(y_test, y_pred)
# + id="98sBKgYggK2b"
# + [markdown] id="w9cGJS7HUgI6"
# #**Decision Tree**
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="7TV-wOF1UkPv" outputId="1145bfe6-3d2f-45aa-f295-dd334edc2e44"
df
# + id="qd6PxVNWVGVa"
x = df.iloc[:,:-1].values
y = df.iloc[:,-1].values
# + id="NxOhROJFVKfR"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1, random_state = 0)
# + colab={"base_uri": "https://localhost:8080/"} id="EFv-QEwQVLl6" outputId="b7c7debe-7555-44ad-aab1-99c2fe4edfec"
x_test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="z9WfssWnVMkC" outputId="32550d3c-5e50-43ba-b19b-31589c7d3ee4"
y_test.shape
# + id="mf96uDKAVN3o"
from sklearn.tree import DecisionTreeClassifier
# + id="rh9VAkGmVO7x"
dtc = DecisionTreeClassifier(random_state=0)
# + colab={"base_uri": "https://localhost:8080/"} id="MMekcP1RVQES" outputId="8052c5e7-4326-4292-be03-24a4993322ae"
dtc.fit(x_train, y_train)
# + id="d9gtEz3eVRXg"
y_pred = dtc.predict(x_test)
# + colab={"base_uri": "https://localhost:8080/"} id="YHIcAx-yVUUu" outputId="4c9d82a4-c096-472c-d8f0-eb90354e348e"
y_pred
# + id="dEcqsvBfVV3e"
from sklearn.metrics import confusion_matrix, accuracy_score
# + colab={"base_uri": "https://localhost:8080/"} id="pDVr9Ea5VXrI" outputId="8e6a55ef-a573-4a33-f2ca-381c2a40e6d9"
confusion_matrix(y_test, y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="yd5asIUaVYyw" outputId="1001abdf-df89-47c4-ed7a-a8b239f2638b"
accuracy_score(y_test, y_pred)
# + id="zPZ9X9dMVbhJ"
# + [markdown] id="dW6gPwlbVoWW"
# # **KNN**
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="qXz7cG3fVrYU" outputId="d6eb2a5a-cbf9-4985-c096-e3a451568192"
df
# + id="vQuUiQ8GVuvm"
x=df.iloc[:,0:-1].values
y=df.iloc[:,-1].values
# + colab={"base_uri": "https://localhost:8080/"} id="Gsfny5n2VyAY" outputId="0a852a5b-0ff9-4d75-9414-ab38df1a96d3"
x
# + id="hvZr_H8GWTJt"
x_train,x_test,y_train,y_test = train_test_split(x,y, test_size = 0.1, random_state= 0)
# + id="K-V363KKVzUf"
from sklearn.neighbors import KNeighborsClassifier
# + id="zFUGjguhV5dM"
Knn = KNeighborsClassifier(n_neighbors=5, metric="euclidean")
# + colab={"base_uri": "https://localhost:8080/"} id="w_rPt12KV7Oo" outputId="5eef3e26-54fe-4ed6-f6da-be3ce44d3c45"
Knn.fit(x_train, y_train)
# + id="fn0odmWuV8ed"
y_pred = Knn.predict(x_test)
# + colab={"base_uri": "https://localhost:8080/"} id="2Caqkpw9V9x_" outputId="c5c7722b-3551-4f02-b0eb-801d7dc422bf"
y_pred
# + colab={"base_uri": "https://localhost:8080/"} id="TWlGRJ8wV-up" outputId="10925f48-d17e-4f1b-897a-8fe2da0350db"
y_test
# + colab={"base_uri": "https://localhost:8080/"} id="Irenf4V2WA0p" outputId="223cc081-9e89-4103-d56c-e3a09feee46c"
accuracy_score(y_test, y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="RJ59IZOMWEdZ" outputId="f0140a73-4c26-45fc-fa68-c25dfd64822c"
confusion_matrix(y_test, y_pred)
# + id="QZTORu0BWGT9"
| Major_Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib notebook
# %gui qt
# ## Peer Review Demo
#
# This demo uses a laptop web camera as an **Ophyd** device to illustrate how **typhon** and a **Jupyter Notebook** can work in tandem to help guide an operator through a specific process
#
# First, let us load the laptop and create and automatically create a working display:
from laptop import lp
# The laptop should be ready to use. Simply calling `trigger` will grab a new image and push it through the facial recognition pipeline
status = lp.trigger()
# Let's take a look at the image in the notebook real quick using `matplotlib`
import matplotlib.pyplot as plt
plt.imshow(lp.image.get())
# ## Display Setup
# **Typhon** should quickly be able to autogenerate a screen for us. We just have to add it to the suite
import typhon
suite = typhon.TyphonSuite()
suite.show()
suite.add_device(lp)
suite.show_subdisplay(lp)
# Not bad, we can do better. Let us load some of the nice templates we made before. In practice, the rules of which devices use which screens will be kept in a common file so this step will be unnecessary. We can also switch to the darker style that ships with **Typhon**
# +
import os.path
embedded = os.path.abspath('embedded_screen.ui')
detailed = os.path.abspath('detailed_screen.ui')
suite.setStyleSheet("""\
TyphonDeviceDisplay[device_class='laptop.Laptop']
{{qproperty-force_template: '{}';}}
TyphonDeviceDisplay[device_class='laptop.Laptop'][display_type='1']
{{qproperty-force_template: '{}';}}
""".format(embedded, detailed))
# -
typhon.use_stylesheet(dark=True)
# ## Automated Procedures
#
# Besides giving us a nice way to inspect the structure of devices, **Ophyd** also gives a uniform interface to **bluesky**. In other words, since we have implemented the **trigger** function. **bluesky** can use our laptop as a detector in any of its builtin scans. Let's setup a **RunEngine** and check it out
# +
import bluesky.plans as bp
from bluesky import RunEngine
from bluesky.callbacks import LivePlot
from bluesky.utils import install_qt_kicker, install_nb_kicker
install_qt_kicker(update_rate=0.01)
install_nb_kicker()
# -
RE = RunEngine()
face_plot = LivePlot('teddy_laptop_faces')
# We can contract the ROI that our webcam uses by an arbitrary value either through the command line or through the GUI
lp.h.put(200)
# Now we are going to execute a very simple scan where we move our ROI vertically downwards. We want to use the laptop as our only detector, and then we can use the **y** configuration attribute to scan from two points with a specified number of steps. We also pass in our plotting callback to give some in-situ feedback
RE(bp.scan([lp], lp.y, 100, 500, 10), (face_plot))
# Now we may have a routine that requires some action from the operator. In these cases we often know the end condition we are looking for, just not how to get there. For instance, the scan below will continue taking pictures until we see a U.S President.
president_plot = LivePlot('teddy_laptop_presidents')
from plans import wait_for_value
RE(wait_for_value([lp], 'teddy_laptop_presidents', 1, 0), (face_plot, president_plot))
| jupyter_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [SQL](https://www.kaggle.com/learn/intro-to-sql) course. You can reference the tutorial at [this link](https://www.kaggle.com/dansbecker/as-with).**
#
# ---
#
# # Introduction
#
# You are getting to the point where you can own an analysis from beginning to end. So you'll do more data exploration in this exercise than you've done before. Before you get started, run the following set-up code as usual.
# Set up feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.sql.ex5 import *
print("Setup Complete")
# You'll work with a dataset about taxi trips in the city of Chicago. Run the cell below to fetch the `chicago_taxi_trips` dataset.
# +
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "chicago_taxi_trips" dataset
dataset_ref = client.dataset("chicago_taxi_trips", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# -
# # Exercises
#
# You are curious how much slower traffic moves when traffic volume is high. This involves a few steps.
#
# ### 1) Find the data
# Before you can access the data, you need to find the table name with the data.
#
# *Hint*: Tab completion is helpful whenever you can't remember a command. Type `client.` and then hit the tab key. Don't forget the period before hitting tab.
# Your code here to find the table name
tables = list(client.list_tables(dataset_ref))
[table.table_id for table in tables]
# +
# Write the table name as a string below
table_name = 'taxi_trips'
# Check your answer
q_1.check()
# -
# For the solution, uncomment the line below.
# +
#q_1.solution()
# -
# ### 2) Peek at the data
#
# Use the next code cell to peek at the top few rows of the data. Inspect the data and see if any issues with data quality are immediately obvious.
# Your code here
table_ref = dataset_ref.table('taxi_trips')
table = client.get_table(table_ref)
client.list_rows(table, max_results=5).to_dataframe()
# After deciding whether you see any important issues, run the code cell below.
# Check your answer (Run this code cell to receive credit!)
q_2.solution()
# ### 3) Determine when this data is from
#
# If the data is sufficiently old, we might be careful before assuming the data is still relevant to traffic patterns today. Write a query that counts the number of trips in each year.
#
# Your results should have two columns:
# - `year` - the year of the trips
# - `num_trips` - the number of trips in that year
#
# Hints:
# - When using **GROUP BY** and **ORDER BY**, you should refer to the columns by the alias `year` that you set at the top of the **SELECT** query.
# - The SQL code to **SELECT** the year from `trip_start_timestamp` is <code>SELECT EXTRACT(YEAR FROM trip_start_timestamp)</code>
# - The **FROM** field can be a little tricky until you are used to it. The format is:
# 1. A backick (the symbol \`).
# 2. The project name. In this case it is `bigquery-public-data`.
# 3. A period.
# 4. The dataset name. In this case, it is `chicago_taxi_trips`.
# 5. A period.
# 6. The table name. You used this as your answer in **1) Find the data**.
# 7. A backtick (the symbol \`).
# +
# Your code goes here
rides_per_year_query = """
SELECT EXTRACT(YEAR FROM trip_start_timestamp) AS year, COUNT(1) AS num_trips
FROM bigquery-public-data.chicago_taxi_trips.taxi_trips
GROUP BY year
ORDER BY year
"""
# Set up the query (cancel the query if it would use too much of
# your quota)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
rides_per_year_query_job = client.query(rides_per_year_query, job_config=safe_config) # Your code goes here
# API request - run the query, and return a pandas DataFrame
rides_per_year_result = rides_per_year_query_job.to_dataframe() # Your code goes here
# View results
print(rides_per_year_result)
# Check your answer
q_3.check()
# -
# For a hint or the solution, uncomment the appropriate line below.
# +
#q_3.hint()
#q_3.solution()
# -
# ### 4) Dive slightly deeper
#
# You'd like to take a closer look at rides from 2017. Copy the query you used above in `rides_per_year_query` into the cell below for `rides_per_month_query`. Then modify it in two ways:
# 1. Use a **WHERE** clause to limit the query to data from 2017.
# 2. Modify the query to extract the month rather than the year.
# +
# Your code goes here
rides_per_month_query = """
SELECT EXTRACT(MONTH FROM trip_start_timestamp) AS month, COUNT(1) AS num_trips
FROM bigquery-public-data.chicago_taxi_trips.taxi_trips
WHERE EXTRACT(YEAR FROM trip_start_timestamp) = 2017
GROUP BY month
ORDER BY month
"""
# Set up the query
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
rides_per_month_query_job = client.query(rides_per_month_query, job_config=safe_config) # Your code goes here
# API request - run the query, and return a pandas DataFrame
rides_per_month_result = rides_per_month_query_job.to_dataframe() # Your code goes here
# View results
print(rides_per_month_result)
# Check your answer
q_4.check()
# -
# For a hint or the solution, uncomment the appropriate line below.
q_4.hint()
#q_4.solution()
# ### 5) Write the query
#
# It's time to step up the sophistication of your queries. Write a query that shows, for each hour of the day in the dataset, the corresponding number of trips and average speed.
#
# Your results should have three columns:
# - `hour_of_day` - sort by this column, which holds the result of extracting the hour from `trip_start_timestamp`.
# - `num_trips` - the count of the total number of trips in each hour of the day (e.g. how many trips were started between 6AM and 7AM, independent of which day it occurred on).
# - `avg_mph` - the average speed, measured in miles per hour, for trips that started in that hour of the day. Average speed in miles per hour is calculated as `3600 * SUM(trip_miles) / SUM(trip_seconds)`. (The value 3600 is used to convert from seconds to hours.)
#
# Restrict your query to data meeting the following criteria:
# - a `trip_start_timestamp` between **2017-01-01** and **2017-07-01**
# - `trip_seconds` > 0 and `trip_miles` > 0
#
# You will use a common table expression (CTE) to select just the relevant rides. Because this dataset is very big, this CTE should select only the columns you'll need to create the final output (though you won't actually create those in the CTE -- instead you'll create those in the later **SELECT** statement below the CTE).
#
# This is a much harder query than anything you've written so far. Good luck!
# +
# Your code goes here
speeds_query = """
WITH RelevantRides AS
(
SELECT EXTRACT(HOUR FROM trip_start_timestamp) AS hour_of_day, trip_miles, trip_seconds
FROM bigquery-public-data.chicago_taxi_trips.taxi_trips
WHERE trip_start_timestamp > '2017-01-01' AND trip_start_timestamp < '2017-07-01' AND trip_seconds > 0 AND trip_miles > 0
)
SELECT hour_of_day, COUNT(1) AS num_trips, 3600 * SUM(trip_miles) / SUM(trip_seconds) AS avg_mph
FROM RelevantRides
GROUP BY hour_of_day
ORDER BY hour_of_day
"""
# Set up the query
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
speeds_query_job = client.query(speeds_query, job_config = safe_config) # Your code here
# API request - run the query, and return a pandas DataFrame
speeds_result = speeds_query_job.to_dataframe() # Your code here
# View results
print(speeds_result)
# Check your answer
q_5.check()
# -
# For the solution, uncomment the appropriate line below.
q_5.solution()
# That's a hard query. If you made good progress towards the solution, congratulations!
# # Keep going
#
# You can write very complex queries now with a single data source. But nothing expands the horizons of SQL as much as the ability to combine or **JOIN** tables.
#
# **[Click here](https://www.kaggle.com/dansbecker/joining-data)** to start the last lesson in the Intro to SQL micro-course.
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161314) to chat with other Learners.*
| Intro to SQL/5 As & With/exercise-as-with.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## For process the raw data collected from Mturk
#
# input: main_batches.csv; initial_batches.csv \\\
# generate: all_processed
# +
import pandas as pd
import ast
import json
import re
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import random
import traceback
import html #for unescape & < >
import emoji
import os
# +
data_3000=pd.read_csv('main_batches.csv')
ppdict = {n: grp.loc[n].to_dict('index') for n, grp in data_3000.set_index(['InputId', 'WorkerId']).groupby(level='InputId')}
new_dict={}
for tweet_id,collection in ppdict.items():
basic_info=list(collection.values())[0]
# seperate strings but keep delimiters
txt = html.unescape(basic_info['Input.tweet'])
temp = list(filter(None, re.split('([,.!?:()[\]"\s+])', txt)))
# remove space strings from list and convert into np array
tweet_split = np.array(list(filter(str.strip, temp)))
# tweet_split = np.array(re.split('[,.!?:()[\]"\s+]', basic_info['Input.tweet']))
# gold_standard = basic_info['Input.prediction']
workerids = list(collection.keys())
sentence_score = []
entity_label = []
related_label = []
workertime = []
assignmentID = []
for workerid,record in collection.items():
sentence_score.append(record['Answer.optradio'])
workertime.append(record['WorkTimeInSeconds'])
token_labels = np.array(['O']*len(tweet_split),dtype=np.dtype(('U',10)))
relation_lables = np.array([0]*len(tweet_split))
assignmentID.append(record['AssignmentId'])
try:
if record['Answer.related_index'] != '[]' :
# print(record['Answer.related_index'])
# print('undefined' in record['Answer.related_index'])
relation_lables_idx_str = sum([i.split(' ') for i in ast.literal_eval(record['Answer.related_index'])],[])
relation_lables_idx = list(map(int, relation_lables_idx_str))
relation_lables[relation_lables_idx] = 1
if np.isnan(record['Answer.no-entity']) and re.split('[|]', record["Answer.html_output"])[1]!='': # the value is 1 when there is no entity to label
html_output_list = ast.literal_eval(re.split('[|]', record["Answer.html_output"])[1])
for e in html_output_list:
if 'idx' in list(e.keys()):
if ' ' in e['idx']:
idx = list(map(int, e['idx'].split(' ')))
else:
idx = ast.literal_eval(e['idx'])
if type(idx) is int:
# assert tweet_split[idx] == e['text']
token_labels[idx] = 'B-'+e['className'].split('-')[1]
else:
# print(' '.join(tweet_split[idx]))
# print(e['text'])
# if tweet_split[idx][0] != e['text'].split()[0] and tweet_split[idx][-1] != e['text'].split()[-1]:
# print(tweet_split[idx],e['text'])
# assert tweet_split[idx][0] == e['text'].split()[0] and tweet_split[idx][-1] == e['text'].split()[-1]
idx=list(idx)
token_labels[idx[0]] = 'B-'+e['className'].split('-')[1]
token_labels[idx[1:]] = 'I-' + e['className'].split('-')[1]
except Exception:
traceback.print_exc()
print('AssignmentId:',record['AssignmentId'],'Answer.related_index:',record['Answer.related_index'])
entity_label.append(token_labels.tolist())
related_label.append(relation_lables.tolist())
new_dict[tweet_id]={'tweet':txt,'tweet_tokens':tweet_split.tolist(),
'workerid':workerids,'workertime':workertime,'sentence_score':sentence_score,
'entity_label':entity_label,'related_label':related_label,'assignmentID':assignmentID}
# +
csvfile='initial_batch.csv'
data_200=pd.read_csv(csvfile)
# set tweet_id+worker_id as index and group by tweet_id
# set tweet_id+worker_id as index and group by tweet_id
ppdict = {n: grp.loc[n].to_dict('index') for n, grp in data_200.set_index(['InputId', 'WorkerId']).groupby(level='InputId')}
new_dict_i={}
for tweet_id,collection in ppdict.items():
basic_info=list(collection.values())[0]
# seperate strings but keep delimiters
txt = html.unescape(basic_info['Input.tweet'])
temp = list(filter(None, re.split('([,.!?:()[\]\\/"“”\s+])', txt)))
# remove space strings from list and convert into np array
tweet_split = np.array(list(filter(str.strip, temp)))
# tweet_split = np.array(re.split('[,.!?:()[\]"\s+]', basic_info['Input.tweet']))
# gold_standard = basic_info['Input.prediction']
workerids = list(collection.keys())
sentence_score = []
entity_label = []
related_label = []
workertime = []
assignmentID = []
for workerid,record in collection.items():
sentence_score.append(record['Answer.optradio'])
workertime.append(record['WorkTimeInSeconds'])
token_labels = np.array(['O']*len(tweet_split),dtype=np.dtype(('U',10)))
relation_lables = np.array([0]*len(tweet_split))
assignmentID.append(record['AssignmentId'])
try:
if record['Answer.related_index'] != '[]' :
# print(record['Answer.related_index'])
# print('undefined' in record['Answer.related_index'])
relation_lables_idx_str = sum([i.split(' ') for i in ast.literal_eval(record['Answer.related_index'])],[])
relation_lables_idx = list(map(int, relation_lables_idx_str))
relation_lables[relation_lables_idx] = 1
if np.isnan(record['Answer.no-entity']) and re.split('[|]', record["Answer.html_output"])[1]!='': # the value is 1 when there is no entity to label
html_output_list = ast.literal_eval(re.split('[|]', record["Answer.html_output"])[1])
for e in html_output_list:
if 'idx' in list(e.keys()):
if ' ' in e['idx']:
idx = list(map(int, e['idx'].split(' ')))
else:
idx = ast.literal_eval(e['idx'])
if type(idx) is int:
assert tweet_split[idx] == e['text']
token_labels[idx] = 'B-'+e['className'].split('-')[1]
else:
# print(' '.join(tweet_split[idx]))
# print(e['text'])
# if tweet_split[idx][0] != e['text'].split()[0] and tweet_split[idx][-1] != e['text'].split()[-1]:
# print(tweet_split[idx],e['text'])
assert tweet_split[idx][0] == e['text'].split()[0] and tweet_split[idx][-1] == e['text'].split()[-1]
idx=list(idx)
token_labels[idx[0]] = 'B-'+e['className'].split('-')[1]
token_labels[idx[1:]] = 'I-' + e['className'].split('-')[1]
except Exception:
traceback.print_exc()
print('AssignmentId:',record['AssignmentId'],'Answer.related_index:',record['Answer.related_index'])
entity_label.append(token_labels.tolist())
related_label.append(relation_lables.tolist())
new_dict_i[tweet_id]={'tweet':txt,'tweet_tokens':tweet_split.tolist(),
'workerid':workerids,'workertime':workertime,'sentence_score':sentence_score,
'entity_label':entity_label,'related_label':related_label,'assignmentID':assignmentID}
d_3000=pd.DataFrame.from_dict(new_dict).T
d_200=pd.DataFrame.from_dict(new_dict_i).T
d_all_processed=pd.concat([d_3000,d_200]).drop_duplicates(subset='tweet')
# -
| data_all/process_all_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # *CitedLoci* pipeline: a step-by-step quick start guide
# This notebook will show you step-by-step how to use the *CitedLoci* pipeline to index canonical citations (e.g. Hom. *Il.* 1,1-10) from plain text documents.
#
# The diagram below indicates the pipeline components that are involved at each step of the process.
# 
# # Running the pipeline
# ## Introduction
# Upon installation, the Python library `CitationExtractor` will also install a command-line script called `citedloci-pipeline` which allows you execute the various steps of the citation extraction pipeline directly from the command-line.
# !citedloci-pipeline --version
# To simplify the pipeline execution, all configuration parameters are stored in the file [`config/project.ini`](config/project.ini), which looks as follows:
# !cat config/project.ini
# To speed up processing, some Python objects that have longer initialisation time (e.g. because they require some training) are already pre-computed and stored as **pickled objects** in the folder [`pickles/`](pickles/).
#
# Input and output data (both intermediate and final) can be found respectively in [`data/orig/`](data/orig/) (input) and [`data/json/`](data/json/) (final JSON output).
# ## Pre-processing
# Pre-processing is applied to all input text files, and consists of the following operations:
# - sentence splitting
# - tokenization and part-of-speech tagging (using `TreeTagger`)
# - language identification (using `langid`)
#
# The pre-processed files are then written to [`data/iob/`](data/iob/) as an IOB-formatted file.
# %%time
# !citedloci-pipeline do preproc --config=config/project.ini
# ## Named entity recognition (NER)
# The NER step is responsible for extracting citation components that can be found in a text.
# Each component is tagged with a named entity tag:
# - a mention of "Homer" will be tagged as `<AAUTHOR>Homer</AAUTHOR>` – where `AAUTHOR` means ancient author)
# - "*Iliad*" will be tagged as `<AWORK>Iliad</AWORK>` – where `AWORK` means ancient work)
# - and "Hom. Il. 1.1-10" will be tagged as `<REFAUWORK>Hom. Il. </REFAUWORK>` and `<REFSCOPE>1.1-10</REFSCOPE>`
# Process all files:
# !citedloci-pipeline do ner --config=config/project.ini
# Instead of the batch processing mode, it's also possible to process one specific document (from input folders):
# +
# this command is equivalent to the one below as there is only that one
# input text document in any case
# !citedloci-pipeline do ner --config=config/project.ini --doc=bmcr_2013-01-10.txt
# -
# At this point, the JSON output file will contain, among other things, a list of the extracted named entities (i.e. citation components).
# cat data/json/bmcr_2013-01-10.json | jq ".entities"
# ## Relation extraction
# The relation extraction step groups together components that are part of the same citation. This step is necessary to reconstruct the existing logical relation between consecutive citations to the same work.
# !citedloci-pipeline do relex --config=config/project.ini
# Each relation receives an ID (e.g. `R4`) and is made of two components (that we call *arguments*). Each argument is the ID of the corresponding entity.
# cat data/json/bmcr_2013-01-10.json | jq ".|.relations"
# ## Named entity linking
# Finally, the last step consists in assigning a unique identifier (CTS URN) to each canonical citation (relation) that has been previously extracted from text.
# ⚠️If you are executing this notebook from Binder, this step is likely to take a very long time to execute, due to the limitations of the remote knowledge base that is used by default. For faster processing times, it's recommended to set up a local triple store for the knowledge base, and then point to it in the `[ned]` section of the project configuration file ([`config/project.ini`](config/project.ini)).
# !citedloci-pipeline do ned --config=config/project.ini
# If you inspect again now the JSON output file, you will notice that some entities were enriched with attributes like `urn` and `work_uri`. These attributes indicates that the entity was disambiguated and linked with the corresponding record in the HuCit knowledge base.
# cat data/json/bmcr_2013-01-10.json | jq ".|.entities"
# # Read extracted citations
# Now that the processing is complete, let's see how to compile a list of extracted citations, together with their identifiers.
# To do so, it is necessary to read the JSON output file and prepare the data so that it can stored, for example, into a pandas' `DataFrame`.
# + code_folding=[5]
import os
import codecs
import json
import pandas as pd
def read_json(doc_dir, doc_id):
inp_file_path = os.path.join(doc_dir, doc_id)
records = []
# read input file
with codecs.open(inp_file_path, 'r', 'utf-8') as inpfile:
doc = json.load(inpfile)
# iterate through the extracted relations
for relation in doc['relations']:
# for each relation, resolve the entity ID
# to get the corresponding record from the JSON document
arg1_id, arg2_id = doc['relations'][relation]
arg1 = doc['entities'][arg1_id]
arg2 = doc['entities'][arg2_id]
# if the current relation is unlinked (it has a NIL identifier)
# it will have no scope, so treat it differently
if arg1['urn'] != 'urn:cts:GreekLatinLit:NIL':
passage_urn = arg1['urn'] + ":" + arg2['norm_scope']
else:
passage_urn = None
# append a dictionary to the list of records
records.append({
"docid": doc_id,
"surface": arg1['surface'] + " " + arg2['surface'],
"passage_urn": passage_urn,
"work_urn": arg1['urn'],
"work_uri": arg1['work_uri'] if 'work_uri' in arg1 else None
})
return records
# +
# we use a custom function (see cell above) to read the
# output JSON file into a DataFrame
data = pd.DataFrame(
read_json('data/json/', 'bmcr_2013-01-10.json')
).set_index('docid')
# -
df
# Now you can compare the above list of extracted citations with the original input document. As you can see, some references were correctly identified while others were missed.
#
# Interestingly "Dike 12-13," looked like a canonical citation to the extractor, but ultimately it did not receive a URN.
# !cat data/orig/bmcr_2013-01-10.txt
| step-by-step.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.0 64-bit
# name: python3
# ---
# # Poisson Image Editing
#
# In this notebook, the [poisson image editing](https://www.cs.virginia.edu/~connelly/class/2014/comp_photo/proj2/poisson.pdf) paper is implemented and studied. Possion image editting is ...
#
# Our goal is to extract the Statue of Liberty from this image (the source image):
# 
#
# And merge it to the target image:
# 
#
# The `./figs/mask1.jpg` is used to extract the Statue of Liberty part out from `./figs/source1.jpg`. This mask is a black and white image. When we apply it to `source1.jpg`, we'll filter out everything except for the Statue of Liberty and a little bit surrounding area. I will discuss how to generate this mask later. If we merge the extracted part directly to our target image, then we'll get:
# 
#
# We can see apparently the statue is from another image. Note even if the mask is perfect, the result still won't be ideal, the boundary of the two images can be easily identified. Poisson image editing can be used to seamlessly blend these two images.
#
# When we directly merge the two image, we set the pixels in the merging region of the target image to be those of the source image. The pixel values accross the boundary will have sharp changes. One way is to set a transition region and make the pixel values change smoothly from one image to the other. In this way, the boundary will become less visible, but the colors and lighting will still be unnatural since we didn't make any changes outside this transition region around the boundary.
#
# Instead of copy-and-past, we interpolate the copied region by solving the Poisson equation. More specifically, Eq. (10) in [Pérez 2003] is used here for seamless cloning. Let $\mathbf{f}$ be the pixel values in the region to paste (which is what we're trying to solve), let $\mathbf{g}$ be the corresponding region in the source image. Instead of letting $\mathbf{f}=\mathbf{g}$, we let $\nabla \mathbf{f} = \nabla \mathbf{g}$, and let the pixel values on the boundary to be the same as those from the target image. In this way, the pixel values $\mathbf{f}$'s distribution is "guided" by the source image, and gradually blends to the target image on the boundary. More rigorous derivations can be found in [Pérez 2003], the main idea is to let the pixel values be the same on the boundary, and find a distribution that resembles the source image and changes to the target image smoothly.
#
# The only technical difficulty is to implement the Laplacian operator. [This](https://en.wikipedia.org/wiki/Discrete_Poisson_equation) is a good reference. We first build the matrix $\mathbf{D}$, then build matrix $\mathbf{A}$ by setting $\mathbf{D}$ as block diagonal element, and set the $\mathbf{I}$'s:
#
# +
import scipy.sparse
def laplacian_matrix(n, m):
mat_D = scipy.sparse.lil_matrix((m, m))
mat_D.setdiag(-1, -1)
mat_D.setdiag(4)
mat_D.setdiag(-1, 1)
mat_A = scipy.sparse.block_diag([mat_D] * n).tolil()
mat_A.setdiag(-1, 1*m)
mat_A.setdiag(-1, -1*m)
return mat_A
laplacian_matrix(3, 3).todense()
# -
# Now our equation becomes:
# $\mathbf{Af} = \mathbf{Ag}$, inside the region;
# $\mathbf{f} = \mathbf{t}$, outside the region.
#
# where $\mathbf{g}$ is the pixel value of the source image, $\mathbf{t}$ is the pixel value of the target image.
#
# Now we are ready to implement our Poisson editting algorithm. First, we load the images and the masks:
# +
from os import path
import cv2
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
scr_dir = 'figs/example1'
source = cv2.imread(path.join(scr_dir, "source1.jpg"))
target = cv2.imread(path.join(scr_dir, "target1.jpg"))
mask = cv2.imread(path.join(scr_dir, "mask1.png"), cv2.IMREAD_GRAYSCALE)
offset = (0, 66)
print('Source image size:', source.shape[:-1])
plt.imshow(source[:,:,::-1]) # this is a trick to display the image in here
plt.show()
print('Target image size:', target.shape[:-1])
plt.imshow(target[:,:,::-1])
plt.show()
print('Mask size:', mask.shape)
plt.imshow(mask, cmap='gray')
plt.show()
# -
# Note the mask tells us what region to extract from the source image, when we insert to the target image, we may want to translate it, so we need an offset parameter. In here, I set the offset value directly, I'll talk about how to find the desired value later.
#
# Now we translate the source image according to the offset:
# +
y_max, x_max = target.shape[:-1]
y_min, x_min = 0, 0
x_range = x_max - x_min
y_range = y_max - y_min
M = np.float32([[1, 0, offset[0]], [0, 1, offset[1]]])
source = cv2.warpAffine(source, M, (x_range, y_range))
print('Source image size:', source.shape[:-1])
plt.imshow(source[:,:,::-1])
plt.show()
# -
# Convert the mask's value to {0, 1} (it was {0, 255}):
mask = mask[y_min:y_max, x_min:x_max]
mask[mask != 0] = 1
# Now we need to generate the matrix $\mathbf{A}$. First, apply our function to get the Laplacian matrix:
mat_A = laplacian_matrix(y_range, x_range)
laplacian = mat_A.tocsc()
# We only want to apply the Laplacian operator inside the blending region, so for the outside part, we set it to identity. Note for each row in mat_A, if it takes the Laplacian, then the row will have a "4" on the diagonal and four "-1", so to set it to identity, we want to set the "4" to "1", and the rest to "0":
for y in range(1, y_range - 1):
for x in range(1, x_range - 1):
if mask[y, x] == 0:
k = x + y * x_range
mat_A[k, k] = 1
mat_A[k, k + 1] = 0
mat_A[k, k - 1] = 0
mat_A[k, k + x_range] = 0
mat_A[k, k - x_range] = 0
mat_A = mat_A.tocsc()
# Now
# +
from scipy.sparse.linalg import spsolve
mask_flat = mask.flatten()
for channel in range(source.shape[2]):
source_flat = source[y_min:y_max, x_min:x_max, channel].flatten()
target_flat = target[y_min:y_max, x_min:x_max, channel].flatten()
# inside the mask:
# \Delta f = div v = \Delta g
alpha = 1
mat_b = laplacian.dot(source_flat)*alpha
# outside the mask:
# f = t
mat_b[mask_flat == 0] = target_flat[mask_flat == 0]
x = spsolve(mat_A, mat_b)
x = x.reshape((y_range, x_range))
x[x > 255] = 255
x[x < 0] = 0
x = x.astype('uint8')
target[y_min:y_max, x_min:x_max, channel] = x
# +
plt.imshow(target[:,:,::-1])
plt.show()
out_dir = 'figs'
cv2.imwrite(path.join(out_dir, "possion1.png"), target)
# -
# # Reference
#
# <NAME>, <NAME>, and <NAME>. 2003. *Poisson image editing*. ACM Trans. Graph. 22, 3 (July 2003), 313-318. DOI: https://doi.org/10.1145/882262.882269
| ImageBot/data_augmentation/poisson_merge/poisson_image_editing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from nlp_tools.preprocessing import Preprocessing
from nlp_tools.loaders import MdLoader
from nlp_tools.representations import MergedMatrixRepresentation
from nlp_tools.classifiers import ClassificationProcessor, NaiveBayseTfIdfClassifier
TRAIN_PATH = './demo_training.md'
def build_classifier():
loader = MdLoader(TRAIN_PATH)
processor = Preprocessing(loader)
repres = MergedMatrixRepresentation(processor.data)
classifier = ClassificationProcessor(NaiveBayseTfIdfClassifier(), repres.data)
classifier.train()
def predict(text: str):
message = repres.process_new_data(processor.process_sentence(text))
intent, score = classifier.predict(message)
return intent, score
return predict
# -
predict = build_classifier()
predict('je ne sais pas quoi regarder ce soir')
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W0D4_Calculus/student/W0D4_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Tutorial 1: Basics of Differential and Integral Calculus
# **Week 0, Day 4: Calculus**
#
# **By Neuromatch Academy**
#
# __Content creators:__ <NAME>, <NAME> with help from <NAME>
#
# __Content reviewers:__ ??
#
# __Production editors:__ <NAME>, <NAME>
# ---
# # Tutorial Objectives
#
# In this tutorial, we will cover aspects of differential calculus that will be frequently used in the main NMA course. We assume that you have some familiarty with differential calculus, but may be a bit rusty or may not have done much practice. Specifically the objectives of this tutorial are
#
# * Get an intuitive understanding of derivative and integration operations
# * Learn to calculate the derivatives of 1- and 2-dimensional functions/signals numerically
# * Familiarize with the concept of neuron transfer function in 1- and 2-dimensions.
# * Familiarize with the idea of numerical integration using Riemann sum
# * Learn about the notion of eigenfunction
#
#
#
# + cellView="form"
#@title Video 1: Why do we care about calculus?
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="781o_1hRtpk", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# -
# ---
# # Setup
# +
# Imports
# !pip install sympy --quiet
import numpy as np
import scipy.optimize as opt # import root-finding algorithm
import sympy as sp # Python toolbox for symbolic maths
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # Toolbox for rendring 3D figures
from mpl_toolkits import mplot3d # Toolbox for rendring 3D figures
# + cellView="form"
# @title Figure Settings
import ipywidgets as widgets # interactive display
from ipywidgets import interact
# %config InlineBackend.figure_format = 'retina'
# use NMA plot style
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
my_layout = widgets.Layout()
fig_w, fig_h = 12, 4.5
my_fontsize = 16
my_params = {'axes.labelsize': my_fontsize,
'axes.titlesize': my_fontsize,
'figure.figsize': [fig_w, fig_h],
'font.size': my_fontsize,
'legend.fontsize': my_fontsize-4,
'lines.markersize': 8.,
'lines.linewidth': 2.,
'xtick.labelsize': my_fontsize-2,
'ytick.labelsize': my_fontsize-2}
plt.rcParams.update(my_params)
# + cellView="form"
# @title Plotting Functions
def move_sympyplot_to_axes(p, ax):
backend = p.backend(p)
backend.ax = ax
backend.process_series()
backend.ax.spines['right'].set_color('none')
backend.ax.spines['bottom'].set_position('zero')
backend.ax.spines['top'].set_color('none')
plt.close(backend.fig)
def plot_functions(function, show_derivative, show_integral):
# For sympy we first define our symbolic variable
x, y, z, t, f = sp.symbols('x y z t f')
# We define our function
if function == 'Linear':
f = -2*t
name = r'$-2t$'
elif function == 'Parabolic':
f = t**2
name = r'$t^2$'
elif function == 'Exponential':
f = sp.exp(t)
name = r'$e^t$'
elif function == 'Sine':
f = sp.sin(t)
name = r'$sin(t)$'
elif function == 'Sigmoid':
f = 1/(1 + sp.exp(-(t-5)))
name = r'$\frac{1}{1+e^{-(t-5)}}$'
if show_derivative and not show_integral:
# Calculate the derivative of sin(t) as a function of t
diff_f = sp.diff(f)
print('Derivative of', f, 'is ', diff_f)
p1 = sp.plot(f, diff_f, show=False)
p1[0].line_color='r'
p1[1].line_color='b'
p1[0].label='Function'
p1[1].label='Derivative'
p1.legend=True
p1.title = 'Function = ' + name + '\n'
p1.show()
elif show_integral and not show_derivative:
int_f = sp.integrate(f)
int_f = int_f - int_f.subs(t, -10)
print('Integral of', f, 'is ', int_f)
p1 = sp.plot(f, int_f, show=False)
p1[0].line_color='r'
p1[1].line_color='g'
p1[0].label='Function'
p1[1].label='Integral'
p1.legend=True
p1.title = 'Function = ' + name + '\n'
p1.show()
elif show_integral and show_derivative:
diff_f = sp.diff(f)
print('Derivative of', f, 'is ', diff_f)
int_f = sp.integrate(f)
int_f = int_f - int_f.subs(t, -10)
print('Integral of', f, 'is ', int_f)
p1 = sp.plot(f, diff_f, int_f, show=False)
p1[0].line_color='r'
p1[1].line_color='b'
p1[2].line_color='g'
p1[0].label='Function'
p1[1].label='Derivative'
p1[2].label='Integral'
p1.legend=True
p1.title = 'Function = ' + name + '\n'
p1.show()
else:
p1 = sp.plot(f, show=False)
p1[0].line_color='r'
p1[0].label='Function'
p1.legend=True
p1.title = 'Function = ' + name + '\n'
p1.show()
def plot_alpha_func(t, f, df_dt):
plt.figure()
plt.subplot(2,1,1)
plt.plot(t, f, 'r', label='Alpha function')
plt.xlabel('Time (au)')
plt.ylabel('Voltage')
plt.title('Alpha function (f(t))')
#plt.legend()
plt.subplot(2,1,2)
plt.plot(t, df_dt, 'b', label='Derivative')
plt.title('Derivative of alpha function')
plt.xlabel('Time (au)')
plt.ylabel('df/dt')
#plt.legend()
def plot_rate_and_gain(I, rate, gain):
plt.figure()
plt.subplot(1,2,1)
plt.plot(I,rate)
plt.xlabel('Injected current (au)')
plt.ylabel('Output firing rate (normalized)')
plt.title('Transfer function')
plt.subplot(1,2,2)
# Uncomment to plot
plt.plot(I[0:-1], gain)
plt.xlabel('Injected current (au)')
plt.ylabel('Gain')
plt.title('Gain')
def plot_charge_transfer(t, PSP, numerical_integral):
fig, axes = plt.subplots(1, 2)
axes[0].plot(t, PSP)
axes[0].set(xlabel = 't', ylabel = 'PSP')
axes[1].plot(t, numerical_integral)
axes[1].set(xlabel = 't', ylabel = 'Charge Transferred')
# -
# ---
# # Section 1: What is differentiation and integration?
#
# + cellView="form"
#@title Video 2: What is differentiation and integration?
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="eOyGG3m-7gA", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# -
# Calculus is a part of mathematics concerned with **continous change**. There are two branches of calculus: differential calculus and integral calculus. Both of these concepts are useful not only in science, but also in daily life. We encounter differentiation and integration everywhere.
#
#
# Differentiation of a function $f(t)$ gives you the derivative of that function $\frac{d(f(t))}{dt}$. A derivative captures how sensitive a function is to slight changes in the input for different ranges of inputs. Geometrically, the derivative of a function at a certain input is the slope of the function at that input. For example, as you drive, the distance traveled changes continuously with time. If you take the derivative of the distance traveled with respect to time, you get the velocity of the vehicle at each point in time. The velocity tells you the rate of change of the distance traveled at different points in time. If you have slow velocity (a small derivative), the distance traveled doesn't change much for small changes in time. A high velocity (big derivative) means that the distance traveled changes a lot for small changes in time.
#
# The sign of the derivative of a function (or signal) tells whether the signal is increasing or decreasing. For a signal going through changes as a function of time, the derivative will become zero when the signal changes its direction of change (e.g. from increasing to decreasing). That is, at local minimum or maximum values, the slope of the signal will be zero. This property is used in optimizing problems. But we can also use it to find peaks in a signal.
#
# Integration can be thought of as the reverse of differentation. If we integrate the velocity with respect to time, we can calculate the distance traveled. By integrating a function, we are basically trying to find functions that would have the original one as their derivative. When we integrate a function, our integral will have an added unknown scalar constant, $C$.
# For example, if $$ g(t) = 1.5t^2 + 4t - 1$$,
# our integral function $f(t)$ will be:
# $$ f(t) = \int g(t) dt = 0.5t^3 + 2t^2 - t + C$$.
#
# This constant exists because the derivative of a constant is 0 so we cannot know what the constant should be. This is an indefinite integral. If we compute a definite integral, that is the integral between two limits of the input, we will not have this unknown constant and the integral of a function will capture the area under the curve of that function between those two limits.
#
# Some functions, when differentiated or integrated, equal a scalar times the same function. This is a similar idea to eigenvectors of a matrix being those that, when multipled by the matrix, equal a scalar times themselves, as you saw yesterday!
#
# When
#
# \begin{align*}
# \frac{d(f(t)}{dt} = a\cdot f(t),
# \end{align*}
#
# we say that $f(t)$ is an **eigenfunction** for derivative operator, where $a$ is a scaling factor. Similarly, when
#
# \begin{align*}
# \int f(t)dt = a\cdot f(t),
# \end{align*}
#
# we say that $f(t)$ is an **eigenfunction** for integral operator.
#
# As you can imagine, working with eigenfunctions can make mathematical analysis easy.
#
# ### Interactive Demo 1: Geometrical understanding
#
# In the interactive demo below, you can pick different functions to examine in the drop down menu. You can then choose to show the derivative function and/or the integral function.
#
# For the integral, we have chosen the unknown constant $C$ so that the integral function at the left x-axis limit is 0 (f(t = -10) = 0). So the integral will reflect the area under the curve starting from that position.
#
# For each function:
#
# * Examine just the function first. Discuss and predict what the derivative and integral will look like. Remember that derivative = slope of function, integral = area under curve from t = -10 to that t.
# * Check the derivative - does it match your expectations?
# * Check the integral - does it match your expectations?
# * Identify whether the function is an eigenfunction for the derivative operator, an eigenfunction for the integral operator, or neither.
#
#
# + cellView="form"
# @markdown Execute this cell to enable the widget
function_options = widgets.Dropdown(
options=['Linear', 'Parabolic', 'Exponential', 'Sine', 'Sigmoid'],
description='Function',
disabled=False,
)
derivative = widgets.Checkbox(
value=False,
description='Show derivative',
disabled=False,
indent=False
)
integral = widgets.Checkbox(
value=False,
description='Show integral',
disabled=False,
indent=False
)
def on_value_change(change):
derivative.value = False
integral.value = False
function_options.observe(on_value_change, names='value')
interact(plot_functions, function = function_options, show_derivative = derivative, show_integral = integral);
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_2ce1573c.py)
#
#
# -
# ---
# # Section 2: Analytical & Numerical Differentiation
# + cellView="form"
#@title Video 3: Analytical & Numerical Differentiation
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="C7U8zgI5rdk", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# -
#
# In this section, we will delve into how we actually find the derivative of a function, both analytically and numerically.
#
# ## Section 2.1: Analytical Differentiation
#
# When we find the derivative analytically, we are finding the exact formula for the derivative function.
#
# To do this, instead of having to do some fancy math every time, we can often consult [an online resource](https://en.wikipedia.org/wiki/Differentiation_rules) for a list of common derivatives, in this case our trusty friend Wikipedia.
#
# If I told you to find the derivative of $f(t) = x^3$, you could consult that site and find in Section 2.1, that if $f(t) = x^r$, then $\frac{d(f(t))}{dt} = rx^{r-1}$. So you would be able to tell me that the derivative of $f(t) = x^3$ is $\frac{d(f(t))}{dt} = 3x^{2}$.
#
# This list of common derivatives often contains only very simple functions. Luckily, as we'll see in the next two sections, we can often break the derivative of a complex function down into the derivatives of more simple components.
# ### Section 2.1.1: Product Rule
# Sometimes we encounter functions which are the product of two functions that both depend on the variable.
# How do we take the derivative of such functions? For this we use the [Product Rule](https://en.wikipedia.org/wiki/Product_rule).
#
# \begin{align}
# f(t) = u(t)\cdot v(t)\\
# \frac{d(f(t))}{dt} = v\cdot \frac{du}{dt} + u\cdot \frac{dv}{dt}\\
# \end{align}
#
# #### Coding Exercise 2.1.1: Derivative of the postsynaptic potential alpha function
#
# Let's use the product rule to get the derivative of the post-synaptic potential alpha function. As we saw in Video 3, the shape of the postsynaptic potential is given by the so called alpha function:
#
# \begin{align*}
# f(t) = t \cdot exp(-\frac{t}{\tau})
# \end{align*}
#
# Here $f(t)$ is a product of $t$ and $exp(-\frac{t}{\tau})$. The variable $\tau$ is the time constant of the synapse.
#
# We have defined $u(t)$ and $v(t)$ in the code below, in terms of the variable $t$ which is an array of time steps from 0 to 10. Define $\frac{du}{dt}$ and $\frac{dv}{dt}$, the compute the full derivative of the alpha function using the product rule. You can always consult wikipedia to figure out $\frac{du}{dt}$ and $\frac{dv}{dt}$!
# +
# Define time, time constant
t = np.arange(0, 10, .1)
tau = 0.5
# Compute alpha function
f = t * np.exp(-t/tau)
# Define u(t), v(t)
u_t = t
v_t = np.exp(-t/tau)
# Define du/dt, dv/dt
du_dt = ...
dv_dt = ...
# Define full derivative
df_dt = ...
# Uncomment below to visualize
#plot_alpha_func(t, f, df_dt)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_366c0574.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=843 height=303 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D4_Calculus/static/W0D4_Tutorial1_Solution_366c0574_0.png>
#
#
# -
# ### Section 2.1.2: Chain Rule
# Many times we encounter situations in which the variable $a$ is changing with time ($t$) and affecting another variable $r$. How can we estimate the derivative of $r$ with respect to $a$ i.e. $\frac{dr}{da} = ?$
#
# To calculate $\frac{dr}{da}$ we use the [Chain Rule](https://en.wikipedia.org/wiki/Chain_rule).
#
# \begin{align}
# \frac{dr}{da} = \frac{dr}{dt}\cdot\frac{dt}{da}
# \end{align}
#
# That is, we calculate the derivative of both variables with respect to time and divide the time derivative of $r$ by that of time derivative of $a$.
#
# We will step back from applications for a second: we can use this to simplify taking derivatives of complex functions, as you will see in the next exercise.
#
#
# #### Math Exercise 2.1.2: Chain Rule
#
# Let's say that:
# $$ r(a) = e^{a^4 + 1} $$
#
# What is $\frac{dr}{da}$? This is a more complex function so we can't simply consult a table of common derivatives. Can you use the chain rule to help?
#
# Hint: we didn't define t but you could set t equal to the function in the exponent
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_a0e42694.py)
#
#
# -
# ### Section 2.2.3: Derivatives in Python using Sympy
#
# There is a useful Python library for getting the analytical derivatives of functions: Sympy. We actually used in Interactive Demo 1, under the hood.
#
# See the following cell for an example of setting up a sympy function and finding the derivative.
# +
# For sympy we first define our symbolic variables
f, t = sp.symbols('f, t')
# Function definition (sigmoid)
f = 1/(1 + sp.exp(-(t-5)))
# Get the derivative
diff_f = sp.diff(f)
# Print the resulting function
print('Derivative of', f, 'is ', diff_f)
# -
# ## Section 2.2: Numerical Differentiation
#
#
# Formally, the derivative of a function $\mathcal{f}(x)$ at any value $a$ is given by the finite difference formula (FD):
#
# \begin{align*}
# FD = \frac{f(a+h) - f(a)}{h}
# \end{align*}
#
# As $h\rightarrow 0$, the FD approaches the actual value of the derivative. Let's check this.
#
# *Note that the numerical estimate of the derivative will result
# in a time series whose length is one short of the original time series.*
# ### Interactive Demo 2.2: Numerical Differentiation of the Sine Function
#
# Below, we find the numerical derivative of the sine function for different values of $h$, and and compare the result the analytical solution.
#
# - What values of h result in more accurate numerical derivatives?
# + cellView="form"
# @markdown *Execute this cell to enable the widget.*
def numerical_derivative_demo(h = 0.2):
# Now lets create a sequence of numbers which change according to the sine function
dt = 0.01
tx = np.arange(-10, 10, dt)
sine_fun = np.sin(tx)
# symbolic diffrentiation tells us that the derivative of sin(t) is cos(t)
cos_fun = np.cos(tx)
# Numerical derivative using difference formula
n_tx = np.arange(-10,10,h) # create new time axis
n_sine_fun = np.sin(n_tx) # calculate the sine function on the new time axis
sine_diff = (n_sine_fun[1:] - n_sine_fun[0:-1]) / h
fig = plt.figure()
ax = plt.subplot(111)
plt.plot(tx, sine_fun, label='sine function')
plt.plot(tx, cos_fun, label='analytical derivative of sine')
with plt.xkcd():
# notice that numerical derivative will have one element less
plt.plot(n_tx[0:-1], sine_diff, label='numerical derivative of sine')
plt.xlim([-10, 10])
plt.xlabel('Time (au)')
plt.ylabel('f(x) or df(x)/dt')
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),
ncol=3, fancybox=True)
plt.show()
_ = widgets.interact(numerical_derivative_demo, h = (0.01, 0.5, .02))
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_36cd3b93.py)
#
#
# -
# ## Section 2.3: Transfer Function and Gain of a Neuron
# When we inject a constant current (DC) in a neuron, its firing rate changes as a function of strength of the injected current. This is called the **input-output transfer function** or just the *transfer function* or *I/O Curve* of the neuron. For most neurons this can be approximated by a sigmoid function e.g.
#
# \begin{align}
# rate(I) = \frac{1}{1+\text{e}^{-a*(I-\theta)}} - \frac{1}{exp(a*\theta)} + \eta
# \end{align}
#
# where $I$ is injected current, $rate$ is the neuron firing rate and $\eta$ is noise (Gaussian noise with zero mean and $\sigma$ standard deviation).
#
# *You will visit this equation in a different context in Week 3*
# ### Coding Exercise 2.1: Calculating the Transfer Function and Gain of a Neuron
# The slope of a neurons input-output transfer function ($\frac{d(r(I)}{dI}$) is called the **gain** of the neuron, as it tells how the neuron output will change if the input is changed.
#
# Estimate the gain of the following neuron transfer function using numerical differentiaton. We will use our timestep as h.
#
#
# + cellView="form"
# @markdown *Execute this cell to enable the numerical differentiation function: `numerical_derivative`*
def numerical_derivative(x, h):
'''Numerical derivative calculation
Args:
x: array of number
h: time step for differentiation
Returns:
Numerical derivative of f for a time step of h
'''
dxdt = np.zeros(len(x)-1)
dxdt = (x[1:] - x[0:-1])/h
return dxdt
# +
def compute_rate_and_gain(I, a, theta, current_timestep):
""" Compute rate and gain of neuron based on parameters
Args:
I (ndarray): different possible values of the current
a (scalar): parameter of the transfer function
theta (scalar): parameter of the transfer function
current_timestep (scalar): the time we're using to take steps
Returns:
(ndarray, ndarray): rate and gain for each possible value of I
"""
########################################################################
## TODO for students: calculate the gain of the neural firing rate ##
## Complete line of code and remove
raise NotImplementedError("Calculate the gain")
########################################################################
# Compute rate
rate = (1+np.exp(-a*(I-theta)))**-1 - (1+np.exp(a*theta))**-1
# Compute gain
gain = ...
return rate, gain
current_timestep = 0.1
I = np.arange(0, 8, current_timestep)
# Neuron transfer function
a = 1.2 # You can change this value
theta = 5 # You can change this value
# Compute rate and gain
rate, gain = compute_rate_and_gain(I, a, theta, current_timestep)
# Visualize rate and gain
plot_rate_and_gain(I, rate, gain)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_9fc5d678.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=843 height=303 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D4_Calculus/static/W0D4_Tutorial1_Solution_9fc5d678_0.png>
#
#
# -
# The slope of the transfer function tells us in which range of inputs the neuron is most sensitive to changes in its input. Change the parameters of the neuron transfer function (i.e. $a$ and $\theta$) and see if you can predict the value of $I$ for which the neuron has maximal slope and which parameter determines the peak value of the gain.
# # Section 3: Functions of Multiple Variables
#
#
# + cellView="form"
#@title Video 4: Functions of Multiple Variables
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="rLsLOWsNOGw", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# -
# In the previous section, you looked at function of single variable $t$ or $x$. In most cases, we encounter functions of multiple variables. For example, in the brain, the firing rate of a neuron is a function of both excitatory and inhibitory input rates. In the following, we will look into how to calculate derivatives of such functions.
# First, let's create a function of two variables. We take the example of a neuron driven by excitatory and inhibitory inputs. Because this is for illustrative purposes, we will not go in the details of the numerical range of the input and output variables.
#
# In the function below, we assume that the firing rate of a neuron increases motonotically with an increase in excitation and decreases monotonically with an increase in inhibition. The inhibition is modelled as a subtraction. Like for the 1-dimensional transfer function, here we assume that we can approximate the transfer function as a sigmoid function.
# + cellView="form"
# @markdown Execute this cell to visualize the neuron firing rate surface
def sigmoid_function(x,a,theta):
'''
Population activation function.
Expects:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
the population activation response F(x) for input x
'''
# add the expression of f = F(x)
f = (1+np.exp(-a*(x-theta)))**-1 - (1+np.exp(a*theta))**-1
return f
# Neuron Transfer function
step_size = 0.1
exc_input = np.arange(2,9,step_size)
inh_input = np.arange(0,7,step_size)
exc_a = 1.2
exc_theta = 2.4
inh_a = 1.
inh_theta = 4.
rate = np.zeros((len(exc_input),len(inh_input)))
for ii in range(len(exc_input)):
for jj in range(len(inh_input)):
rate[ii,jj] = sigmoid_function(exc_input[ii],exc_a,exc_theta) - sigmoid_function(inh_input[jj],inh_a,inh_theta)*0.5
with plt.xkcd():
X, Y = np.meshgrid(exc_input, inh_input)
fig = plt.figure(figsize=(15,15))
ax1 = fig.add_subplot(2,2,1)
lg_txt = 'Inhibition = ' + str(inh_input[0])
ax1.plot(exc_input,rate[:,0],label=lg_txt)
lg_txt = 'Inhibition = ' + str(inh_input[20])
ax1.plot(exc_input,rate[:,20],label=lg_txt)
lg_txt = 'Inhibition = ' + str(inh_input[40])
ax1.plot(exc_input,rate[:,40],label=lg_txt)
ax1.legend()
ax1.set_xlabel('Excitatory input (au)')
ax1.set_ylabel('Neuron output rate (au)');
ax2 = fig.add_subplot(2,2,2)
lg_txt = 'Excitation = ' + str(exc_input[0])
ax2.plot(inh_input,rate[0,:],label=lg_txt)
lg_txt = 'Excitation = ' + str(exc_input[20])
ax2.plot(inh_input,rate[20,:],label=lg_txt)
lg_txt = 'Excitation = ' + str(exc_input[40])
ax2.plot(inh_input,rate[40,:],label=lg_txt)
ax2.legend()
ax2.set_xlabel('Inhibitory input (au)')
ax2.set_ylabel('Neuron output rate (au)');
ax3 = fig.add_subplot(2, 1, 2, projection='3d')
surf= ax3.plot_surface(Y.T, X.T, rate, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax3.set_xlabel('Inhibitory input (au)')
ax3.set_ylabel('Excitatory input (au)')
ax3.set_zlabel('Neuron output rate (au)');
fig.colorbar(surf)
# -
# In the **Top-Left** plot, we see how the neuron output rate increases as a function of excitatory input (e.g. the blue trace). However, as we increase inhibition, expectedly the neuron output decreases and the curve is shifted downwards. This constant shift in the curve suggests that the effect of inhibition is subtractive, and the amount of subtraction does not depend on the neuron output.
#
# We can alternatively see how the neuron output changes with respect to inhibition and study how excitation affects that. This is visualized in the **Top-Right** plot.
#
# This type of plotting is very intuitive, but it becomes very tedious to visualize when there are larger numbers of lines to be plotted. A nice solution to this visualization problem is to render the data as color, as surfaces, or both.
#
# This is what we have done in the plot on the bottom. The colormap on the right shows the output of the neuron as a function of inhibitory input and excitatory input. The output rate is shown both as height along the z-axis and as the color. Blue means low firing rate and yellow means high firing rate (see the color bar).
#
# In the above plot, the output rate of the neuron goes below zero. This is of course not physiological. In models, we either choose the operating point such that the output does not go below zero, or else we clamp the neuron output to zero if it goes below zero. You will learn about it more in Week 3.
# ## Section 3.1: Partial derivatives
# The above function is like a surface and when we are thinking of the derivative of the surface we can make a physical analogy.
#
# Consider putting a ball on this surface. In which direction the ball will move?
#
# The movement along one of the directions (along the x-axis) will be determined by inhibitory input and in the other direction (along y-axis) it will be determined by excitatory inputs. The effective movement direction will be the vector sum of the two (perhaps you recall vector sum from yesterday).
#
# That is, we can calculate the derivative of the surface for the inhibitory input and then for the excitatory inputs.
#
# When we take the derrivative of a multivariable function with respect to one of the variables it is called the **partial derivative**. For example if we have a function:
#
# \begin{align}
# f(x,y) = x^2 + 2xy + y^2
# \end{align}
#
# The we can define the partial derivatives as
#
# \begin{align}
# \frac{\partial(f(x,y))}{\partial x} = 2x + 2y + 0 \\\\
# \frac{\partial(f(x,y))}{\partial y} = 0 + 2x + 2y
# \end{align}
#
# In the above, the derivative of the last term ($y^2$) with respect to $x$ is zero because it does not change with respect to $x$. Similarly, the derivative of $x^2$ with respect to $y$ is also zero.
#
#
# ### Interactive Demo 3.1: Visualize partial derivatives
#
# In the demo below, you can input any function of x and y and then visualize both the function and partial derivatives.
#
# We visualized the 2-dimensional function as a surface plot in which the values of the function are rendered as color. Yellow represents a high value and blue represents a low value. The height of the surface also shows the numerical value of the function. The first plot is that of our function. And the two bottom plots are the derivative surfaces with respect to $x$ and $y$ variables.
#
# 1. Ensure you understand how the plots relate to each other - if not, review the above material
# 2. Can you come up with a function where the partial derivative with respect to x will be a linear plane and the derivative with respect to y will be more curvy?
# 3. What happens to the partial derivatives if there are no terms involving multiplying x and y together?
#
#
# + cellView="form"
# @markdown Execute this widget to enable the demo
# Let's use sympy to calculate Partial derivatives of a function of 2-variables
@interact(f2d_string = 'x**2 + 2*x*y + y**2')
def plot_partial_derivs(f2d_string):
f, x, y = sp.symbols('f, x, y')
f2d = eval(f2d_string)
f2d_dx = sp.diff(f2d,x)
f2d_dy = sp.diff(f2d,y)
print('Partial derivative of ', f2d, 'with respect to x is', f2d_dx)
print('Partial derivative of ', f2d, 'with respect to y is', f2d_dy)
p1 = sp.plotting.plot3d(f2d, (x, -5, 5), (y, -5, 5),show=True,xlabel='x', ylabel='y', zlabel='f(x,y)',title='Our function')
p2 = sp.plotting.plot3d(f2d_dx, (x, -5, 5), (y, -5, 5),show=True,xlabel='x', ylabel='y', zlabel='df(x,y)/dx',title='Derivative w.r.t. x')
p3 = sp.plotting.plot3d(f2d_dy, (x, -5, 5), (y, -5, 5),show=True,xlabel='x', ylabel='y', zlabel='df(x,y)/dy',title='Derivative w.r.t. y')
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_5deca1d0.py)
#
#
# -
# ### Section 3.2: Numerical calculation of partial derivatives
# Now that you have an intuition about multivariable functions and partial derivatives we can go back to the neuron transfer function we evaluated earlier.
# To evaluate the partial derivatives we can use the same numerical differentiation as before but now we apply it to each row and column separately.
# + cellView="form"
# @markdown Execute this cell to visualize the transfer function
# Neuron Transfer Function
step_size = 0.1
exc_input = np.arange(1,10,step_size)
inh_input = np.arange(0,7,step_size)
exc_a = 1.2
exc_theta = 2.4
inh_a = 1.
inh_theta = 4.
rate = np.zeros((len(exc_input),len(inh_input)))
for ii in range(len(exc_input)):
for jj in range(len(inh_input)):
rate[ii,jj] = sigmoid_function(exc_input[ii],exc_a,exc_theta) - sigmoid_function(inh_input[jj],inh_a,inh_theta)*0.5
# Derivative with respect to excitatory input rate
rate_de = np.zeros((len(exc_input)-1,len(inh_input)))# this will have one row less than the rate matrix
for ii in range(len(inh_input)):
rate_de[:,ii] = (rate[1:,ii] - rate[0:-1,ii])/step_size
# Derivative with respect to inhibitory input rate
rate_di = np.zeros((len(exc_input),len(inh_input)-1))# this will have one column less than the rate matrix
for ii in range(len(exc_input)):
rate_di[ii,:] = (rate[ii,1:] - rate[ii,0:-1])/step_size
with plt.xkcd():
X, Y = np.meshgrid(exc_input, inh_input)
fig = plt.figure(figsize=(20,8))
ax1 = fig.add_subplot(1, 3, 1, projection='3d')
surf1 = ax1.plot_surface(Y.T, X.T, rate, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax1.set_xlabel('Inhibitory input (au)')
ax1.set_ylabel('Excitatory input (au)')
ax1.set_zlabel('Neuron output rate (au)')
ax1.set_title('Rate as a function of Exc. and Inh');
ax1.view_init(45, 10)
fig.colorbar(surf1)
Xde, Yde = np.meshgrid(exc_input[0:-1], inh_input)
ax2 = fig.add_subplot(1, 3, 2, projection='3d')
surf2 = ax2.plot_surface(Yde.T, Xde.T, rate_de, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax2.set_xlabel('Inhibitory input (au)')
ax2.set_ylabel('Excitatory input (au)')
ax2.set_zlabel('Neuron output rate (au)');
ax2.set_title('Derivative wrt Excitation');
ax2.view_init(45, 10)
fig.colorbar(surf2)
Xdi, Ydi = np.meshgrid(exc_input, inh_input[:-1])
ax3 = fig.add_subplot(1, 3, 3, projection='3d')
surf3 = ax3.plot_surface(Ydi.T, Xdi.T, rate_di, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax3.set_xlabel('Inhibitory input (au)')
ax3.set_ylabel('Excitatory input (au)')
ax3.set_zlabel('Neuron output rate (au)');
ax3.set_title('Derivative wrt Inhibition');
ax3.view_init(15, -115)
fig.colorbar(surf3)
# -
# Is this what you expeced? Vary the inputs and see if your intuitions are correct. Change the time varying variable and test your intuitions.
# ---
# # Section 4: Numerical Integration
#
# + cellView="form"
#@title Video 5: Numerical Integration
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="sj_83_811j0", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# -
# Geometrically, integration is the area under the curve. This interpretation gives two formal ways to calculate the integral of a function numerically.
#
# **[Riemann sum](https://en.wikipedia.org/wiki/Riemann_sum)**:
# If we wish to integrate a function $f(t)$ with respect to $t$, then first we divide the function into $n$ intervals of size $dt = a-b$, where $a$ is the starting of the interval. Thus, each interval gives a rectangle with height $f(a)$ and width $dt$. By summing the area of all the rectangles, we can approximate the area under the curve. As the size $dt$ approaches to zero, our estimate of the integral approcahes the analytical calculation. Essentially, the Riemann sum is cutting the region under the curve in vertical stripes, calculating area of the each stripe and summing them up.
#
# **[Lebesgue integral](https://en.wikipedia.org/wiki/Lebesgue_integral)**: In the Lebesgue integral, we divide the area under the curve into horizontal stripes. That is, instead of the independent variable, the range of the function $f(t)$ is divided into small intervals.
# ## Section 4.1: Demonstration of the Riemann Sum
# ### Interactive Demo 4.1: Riemann Sum vs. Analytical Integral with changing step size
#
# Below, we will compare numerical integration using the Riemann Sum with the analytical solution. You can change the interval size $dt$ using the slider.
#
#
#
# 1. What values of dt result in the best numerical integration?
# 2. What is the downside of choosing that value of dt?
# 3. With large dt, why are we underestimating the integral (as opposed to overestimating?
#
#
#
# + cellView="form"
# @markdown Run this cell to enable the widget!
def riemann_sum_demo(dt = 0.5):
step_size = 0.1
min_val = 0.
max_val = 10.
tx = np.arange(min_val, max_val, step_size)
# Our function
ftn = tx**2 - tx + 1
# And the integral analytical formula calculates using sympy
int_ftn = tx**3/3 - tx**2/2 + tx
# Numerical integration of f(t) using Riemann Sum
n = int((max_val-min_val)/dt)
r_tx = np.zeros(n)
fun_value = np.zeros(n)
for ii in range(n):
a = min_val+ii*dt
fun_value[ii] = a**2 - a + 1
r_tx[ii] = a;
# Riemann sum is just cumulative sum of the fun_value multiplied by the
r_sum = np.cumsum(fun_value)*dt
with plt.xkcd():
plt.figure(figsize=(20,5))
ax = plt.subplot(1,2,1)
plt.plot(tx,ftn,label='Function')
for ii in range(n):
plt.plot([r_tx[ii], r_tx[ii], r_tx[ii]+dt, r_tx[ii]+dt], [0, fun_value[ii], fun_value[ii], 0] ,color='r')
plt.xlabel('Time (au)')
plt.ylabel('f(t)')
plt.title('f(t)')
plt.grid()
plt.subplot(1,2,2)
plt.plot(tx,int_ftn,label='Analytical')
plt.plot(r_tx+dt,r_sum,color = 'r',label='Riemann Sum')
plt.xlabel('Time (au)')
plt.ylabel('int(f(t))')
plt.title('Integral of f(t)')
plt.grid()
plt.legend()
plt.show()
_ = widgets.interact(riemann_sum_demo, dt = (0.1, 1., .02))
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_fd942e45.py)
#
#
# -
# There are more advanced methods of numerical integration, such as Rungga-Kutta. In any case, the Riemann sum is the basis of Euler's method of integration for solving ordinary differential equations - something you will do in a later tutorial today.
#
# See Bonus Section 1 to work through some examples of neural applications of numerical integration.
# ## Section 4.2: Neural Applications of Numerical Integration
# ### Coding Exercise 4.2: Calculating Charge Transfer with Excitatory Input
# An incoming spike elicits a change in the post-synaptic membrane potential which can be captured by the following function
#
# \begin{align}
# PSP(t) = J\times t\times exp\big(-\frac{t-t_{sp}}{\tau_{s}}\big)
# \end{align}
#
# where $J$ is the synaptic amplitude, $t_{sp}$ is the spike time and $\tau_s$ is the synaptic time constant.
#
# Estimate the total charge transfered to the postsynaptic neuron during an PSP with amplitude $J=1.0$, $\tau_s = 1.0$ and $t_{sp} = 1.$ (that is the spike occured at 1ms). The total charge will be the integral of the PSP function.
# +
# Set up parameters
J = 1
tau_s = 1
t_sp = 1
dt = .1
t = np.arange(0, 10, dt)
# Code PSP formula
PSP = ...
# Compute numerical integral
# We already have PSP at every time step (height of rectangles). We need to
#. multiply by width of rectangles (dt) to get areas
rectangle_areas = ...
# Cumulatively sum rectangles (hint: use np.cumsum)
numerical_integral = ...
# Visualize
# plot_charge_transfer(t, PSP, numerical_integral)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_200c1e98.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=843 height=303 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D4_Calculus/static/W0D4_Tutorial1_Solution_200c1e98_0.png>
#
#
# -
# You can see from the figure that the total charge transferred is a little over 2.5.
# ---
# # Section 5: Integration and Differentiation as Filtering Operations
#
# + cellView="form"
#@title Video 6: Filtering Operations
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="TQ0t-S3__OA", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# -
# In the above, we used the notions that geometrically integration is the area under the curve and differentiation is the slope of the curve. There is another interpretation of these two operations.
#
# As we calculate the derivative of a function, we take the difference of adjacent values of the function. This results in the removal of common part between the two values. As a consequence, we end up removing the unchanging part of the signal. If we now think in terms of frequencies, differentiation removes low frequencies, or slow changes. That is, differentiation acts as a high pass filter.
#
# Integration does the opposite because in the estimation of an integral we keep adding adjacent values of the signal. So, again thinking in terms of frequencies, integration is akin to the removal of high frequencies or fast changes (low-pass filter). The shock absorbers in your bike are an example of integrators.
#
# We can see this behavior the demo below. Here we will not work with functions, but with signals. As such, functions and signals are the same. Just that in most cases our signals are measurements with respect to time.
# + cellView="form"
# @markdown Execute this cell to see visualization
h = 0.01
tx = np.arange(0,2,h)
noise_signal = np.random.uniform(0,1,(len(tx)))*0.5
x1 = np.sin(0.5*np.pi*tx) + noise_signal # This will generate a 1 Hz sin wave
# In the signal x1 we have added random noise which contributs the high frequencies
# Take the derivative equivalent of the signal i.e. subtract the adjacent values
x1_diff = (x1[1:] - x1[:-1])
# Take the integration equivalent of the signal i.e. sum the adjacent values. Ans divide by 2 (take average essentially)
x1_integrate = (x1[1:] + x1[:-1])/2
plt.figure(figsize=(15,10))
plt.subplot(3,1,1)
plt.plot(tx,x1,label='Original Signal')
#plt.xlabel('Time (sec)')
plt.ylabel('Signal Value(au)')
plt.legend()
plt.subplot(3,1,2)
plt.plot(tx[0:-1],x1_diff,label='Differentiated Signal')
# plt.xlabel('Time (sec)')
plt.ylabel('Differentiated Value(au)')
plt.legend()
plt.subplot(3,1,3)
plt.plot(tx,x1,label='Original Signal')
plt.plot(tx[0:-1],x1_integrate,label='Integrate Signal')
plt.xlabel('Time (sec)')
plt.ylabel('Integrate Value(au)')
plt.legend()
# -
# Notice how the differentiation operation amplifies the fast changes which were contributed by noise. By contrast, the integration operation supresses the fast changing noise. Such sums and subtractions form the basis of digital filters.
# Vary the signal characteristic to see how fast you can use these operations to enhance or supress noise.
#
# Also if you want to be adventurous, you may try to use these operations in series and see what happens.
# ---
# # Summary
# * Geometrically, integration is the area under the curve and differentiation is the slope of the function
# * The concepts of slope and area can be easily extended to higher dimensions. We saw this when we took the derivative of a 2-dimensional transfer function of a neuron
# * Numerical estimates of both derivatives and integrals require us to choose a time step $h$. The smaller the $h$, the better the estimate, but for small values of $h$, more computations are needed. So there is always some tradeoff.
# * Partial derivatives are just the estimate of the slope along one of the many dimensions of the function. We can combine the slopes in different directions using vector sum to find the direction of the slope.
# * Because the derivative of a function is zero at the local peak or trough, derivatives are used to solve optimization problems.
# * When thinking of signal, integration operation is equivalent to smoothening the signals (i.e. remove fast changes)
# * Differentiation operations remove slow changes and enhance high frequency content of a signal
| tutorials/W0D4_Calculus/student/W0D4_Tutorial1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# + tags=["remove-cell"]
library(repr) ; options(repr.plot.width = 5, repr.plot.height = 6) # Change plot sizes (in cm)
# -
# # Model Fitting the Bayesian Way
# ## Introduction
#
#
# In this Chapter we will work through various examples of model fitting to biological data using Bayesian Methods. It is recommended that you see the [lecture](https://github.com/mhasoba/TheMulQuaBio/tree/master/content/lectures/ModelFitting) on model fitting in Ecology and Evolution.
# Recall from the [lectures](https://github.com/vectorbite/VBiTraining2/tree/master/lectures) that for Bayesian model fitting/inference, we need to:
#
#
# 1. **Assess MCMC convergence**: MCMC is family of algorithm for sampling probability distributions so that it can be adequately characterized (in the Bayesian context the posterior distribution). The MCMC procedure reaches * convergence * once we have sufficient random draws from the posterior distribution. To assess convergence we look at trace plots. The goal is to get "fuzzy caterpillars"-looking curves.
#
# 2. **Summarize MCMC draws**: Summarize and visualize outcome of the random draws using histograms for all draws for each parameter, and calculate expectation, variance, credibility interval, etc.
#
# 3. **Prior Sensitivity**: Assess prior sensitivity by changing prior values and check whether it affects the results or not. If it does, that means that the results are too sensitive to that prior, not good!
#
# 4. **Make inferences**: We use the values from item (2) to make inferences and answer the research question.
#
# Because likelihoods form the basis for Bayesian model fitting, we will first do an exercise to understand their calculation.
# We will use R. For starters, clear all variables and graphic devices and load necessary packages:
rm(list = ls())
graphics.off()
# ## A Likelihoods exercise
#
# ### The Binomial Distribution
#
# The Binomial distribution is used to model the number of "successes" in a set of trials (e.g., number of heads when you flip a coin $N$ times). The pmf is
#
# $$
# {N \choose x} p^x(1-p)^{N-x}
# $$
#
# such that $\mathrm{E}[x] = Np $. Throughout this "experiment", you will assume that your experiment consists of flipping 20 coins, so that $N = 20$.
#
# Let's use the Binomial distribution to practice two methods of estimating parameters for a probability distribution: method of moments and maximum likelihood.
#
# #### Simulating from the Binomial using R
#
# First take 50 draws from a binomial (using _rbinom_) for each $p\in$ 0.1, 0.5, 0.8 with $N=20$. For this, lets set seed so that we can reproduce this exact sequence of sampling (why?):
set.seed(54321)
## 50 draws with each p
pp <- c(0.1, 0.5, 0.8)
N <- 20
reps <- 50
# Now plot the histograms of these draws together with the density functions.
# +
## histograms + density here
x <- seq(0, 50, by=1)
par(mfrow=c(1,3), bty="n")
# Write more code here
# -
# **Q1: Do the histograms look like the distributions for all 3 values of $p$? If not, what do you think is going on?**
#
# You'll notice that for $p=0.1$ the histogram and densities don't look quite the same -- the `hist()` function is lumping together the zeros and ones which makes it look off. This is typical for distributions that are truncated.
#
# ### Method of Moments (MoM) Estimators
#
# To obtain a method of moments estimator, we equate the theoretical moments (which will be a function of the parameters of the distribution) with the corresponding sample moments, and solve for the parameters in order to obtain an estimate. For the binomial distribution, there is only one parameter, $p$.
#
# **Q2: Given the analytic expected value, above, and assuming that the sample mean is $m$ (the mean number of observed heads across replicates), what is the MoM estimator for $p$?**
#
# Now calculate the MoM estimator for each of your 3 sets of simulated data sets to get the estimates for each of your values of $p$.
## MOM estimators for 3 simulated sets
# **Q3: How good are your estimates for $p$? Do you get something close to the true value?**
#
# For 1 of your values of $p$, take 20 draws from the binomial with $N=20$ and calculate the MoM. Repeat this 100 times (hint: the `replicate()` and `lapply` functions may be useful.) Make a histogram of your estimates, and add a line/point to the plot to indicate the real value of $p$ that you used to simulate the data.
# +
## MoM estimates, histogram
# -
# **Q4: Is the MoM successfully estimating $p$? Does your histogram for $p$ look more or less normal? If so, what theorem might explain why this would be the case?**
#
# ### MLE for Binomial Distribution
#
# #### Likelihood and Log Likelihood
#
# Imagine that you flip a coin $N$ times, and then repeat the experiment $n$ times. Thus, you have data $x=x`1, x`2, \dots x`n$ that are the number of times you observed a head in each trial. $p$ is the probability of obtaining a head.
#
# **Q5: Write down the likelihood and log-likelihood for the data. Take the derivative of the negative log-likelihood, set this equal to zero, and find the MLE, $\hat{p}$.**
# ### Computing the likelihood and MLE in R
#
# Simulate some data with $p=0.25$, $N=10$, and 10 replicates. Calculate the negative log-likelihood of your simulated data across a range of $p$ (from 0 to 1), and plot them. You may do this by using the built in functions in R (specifically `dbinom`) or write your own function. This is called a "likelihood profile''. Plot your likelihood profile with a line indicating the true value of $p$. Add lines indicating the MLE $\hat{p}$ and the MoM estimator for $p$ to your likelihood profile.
# +
pp <- .25
N <- 10
reps <- 10
## Make one set of data
## the likelihood is always exactly zero
## at p=0,1, so I skip those values
ps <- seq(0.01, 0.99, by=0.01)
## Likelihood
## MLE/MoM estimators
## now plot the negative log likelihood profile
# -
# **Q6: How does your MLE compare to the true parameter value? How could you estimate the MLE from the likelihood profile if you didn't have a way to calculate the MLE directly? If you chose another version of the random seed, do you get the same answer?**
# ## Example: Midge Wing Length
#
# We will use this simple example to go through the steps of assessing a Bayesian model and we'll see that MCMC can allow us to approximate the posterior distribution.
#
# Grogan and Wirth (1981) provide data on the wing length (in millimeters) of nine members of a species of midge (small, two-winged flies).
#
# From these measurements we wish to make inference about the population mean $\mu$.
# +
WL.data <- read.csv("../data/MidgeWingLength.csv")
Y <- WL.data$WingLength
n <- length(Y)
hist(Y,breaks=10,xlab="Wing Length (mm)")
# -
# ### Non-Bayesian analysis
#
# We might expect that these midge data could be draws from a _Normal_ distribution $\mathcal{N}(\mu, \sigma^2)$. [Recall](./21-ModelFitting-MLE.ipynb) that the MLEs for $\mu$ and $\sigma^2$ here are simply the *sample mean* and *sample variance* respectively:
m <- sum(Y)/n
s2 <- sum((Y-m)^2)/(n-1)
round(c(m, s2), 3)
x <- seq(1.4,2.2, length=50)
hist(Y,breaks=10,xlab="Wing Length (mm)", xlim=c(1.4, 2.2), freq=FALSE)
lines(x, dnorm(x, mean=m, sd=sqrt(s2)), col=2)
# ```{note}
#
# We have plotted the estimate of the _population_ distribution here, but this is not the *predictive distribution* (which would be a Student's $t$ because we're estimating both the mean and variance.)
# ```
#
# The non-Bayesian version here has the advantage of being quick and familiar. However, from our point of view it has two weaknesses:
#
# 1. Because we have so few data points estimates of the accuracy of our predictions aren't available. 9 points is only barely enough to estimate a mean, so we don't trust any of the variance calculations.
#
# 2. We can't easily incorporate things that we might already know about midges into our analysis.
#
# Let's see how we can do a similar analysis using a Bayesian approach, first analytically, and the with JAGS.
#
# ### Setting up the Bayesian Model
#
# We need to define the likelihood and the priors for our Bayesian analysis. Given the analysis that we've just done, let's assume that our data come from a normal distribution with unknown mean, $\mu$ but that we know the variance is $\sigma^2 = 0.025$. That is:
#
# $$
# \mathbf{Y} \stackrel{\mathrm{iid}}{\sim} \mathcal{N}(\mu, 0.025^2)
# $$
#
#
# #### Prior Information
#
# Studies from other populations suggest that wing lengths are usually around 1.9 mm, so we set $\mu_0 = 1.9$
#
# We also know that lengths must be positive ($\mu >0$)
#
# We can approximate this restriction with a normal prior distribution for $\mu$ as follows:
#
# Since most of the normal density is within two standard deviations of the mean we choose $\tau^2_0$ so that
#
# $$ \mu_0 - 2\sigma_0 >0 \Rightarrow \sigma_0 <1.9/2 = 0.95 $$
# I will choose $\sigma_0=0.8$ here. Thus our prior for mu will be:
# $$
# \mu \sim \mathcal{N}(1.9, 0.8^2)
# $$
#
# ----
#
# Together, then, our full model is:
#
# \begin{align*}
# \mathbf{Y} & \stackrel{\mathrm{iid}}{\sim} \mathcal{N}(\mu, 0.025^2)\\
# \mu &\sim \mathcal{N}(1.9, 0.8^2)
# \end{align*}
#
# ### Analytic Posterior
#
# For this very simple case it is easy to write down the posterior distribution (up to some constant). First, note that the likelihood for the data can be written as
#
# \begin{align*}
# \mathcal{L} &\propto \prod_{i=1}^n \frac{1}{\sigma} \exp\left(-\frac{1}{2\sigma^2}(Y_i-\mu)^2 \right) \\
# & = \frac{1}{\sigma^n} \exp\left(-\frac{1}{2\sigma^2}\sum_{i=1}^n (Y_i-\mu)^2 \right)\\
# & \propto \exp\left(-\frac{n}{2\sigma^2} (\bar{Y}-\mu)^2 \right)
# \end{align*}
#
# Multiplying the prior through we get the following for the posterior:
#
# $$
# \mathrm{P}(\mu|\mathbf{Y}) \propto \exp \left(-\frac{n}{2\sigma^2} (\bar{Y}-\mu)^2 \right) \exp\left(-\frac{1}{2\sigma_0^2}(\mu-\mu_0)^2 \right)
# $$
#
# You can re-arrange, complete the square, etc, to get a new expression that is like
#
# $$
# \mathrm{P}(\mu|\mathbf{Y}) \propto \exp \left(-\frac{1}{2\sigma_p^2} (\mu_p-\mu)^2 \right)
# $$
#
# where
#
# \begin{align*}
# \mu_p & = \frac{n\sigma_0^2}{\sigma^2 + n\sigma_0^2} \bar{Y} + \frac{\sigma^2}{\frac{\sigma^2}{n} + \sigma_0^2} \mu_0\\
# & \\
# \sigma_p^2 & = \left( \frac{n}{\sigma^2} + \frac{1}{\sigma_0^2} \right)^{-1}
# \end{align*}
#
# Instead of writing this last in terms of the variances, we could instead use precision (the inverse variance) which gives a simpler expression:
#
# $$
# \tau_p = n\tau + \tau_0
# $$
#
# Just like in our earlier example, our estimate of the mean is a weighted average of the data and the prior, with the variance being determined by the data and prior variances.
#
# So lets write a little function to calculate $\mu_p$ and $\tau_p$ and the plug in our numbers:
tau.post <- function(tau, tau0, n){n * tau + tau0}
mu.post <- function(Ybar, mu0, sig20, sig2, n){
weight <- sig2+n * sig20
return(n * sig20 * Ybar/weight + sig2 * mu0/weight)
}
# Let's plot 3 things together -- the data histogram, the prior, and the posterior:
# +
mu0 <- 1.9
s20 <- 0.8
s2 <- 0.025 ## "true" variance
mp <- mu.post(Ybar=m, mu0=mu0, sig20=s20, sig2=s2, n=n)
tp <- tau.post(tau=1/s2, tau0=1/s20, n=n)
# -
# Let's plot the result:
x <- seq(1.3,2.3, length=1000)
hist(Y,breaks=10,xlab="Wing Length (mm)", xlim=c(1.3, 2.3),
freq=FALSE, ylim=c(0,8))
lines(x, dnorm(x, mean=mu0, sd=sqrt(s20)), col=2, lty=2, lwd=2) ## prior
lines(x, dnorm(x, mean=mp, sd=sqrt(1/tp)), col=4, lwd=2) ## posterior
legend("topleft", legend=c("prior", "posterior"), col=c(2,4), lty=c(2,1), lwd=2)
# ### Exercise: Prior sensitivity
#
# Change the values of the mean and the variance that you choose for the prior ("hyperparameters"). What does this do to the posterior distribution. E.g., what happens if the variance you choose is small, and $\mu_0 =2.5$ or so. Is this what you expect?
#
#
# ### Numerical evaluation of the posterior with JAGS
#
# Let's show that we can get the same thing from JAGS that we were able to get from the analytic results. You'll need to make sure you have installed JAGS (which must be done outside of R) and then the libraries `rjags` and `coda`.
# Load libraries
require(rjags) # does the fitting
require(coda) # makes diagnostic plots
##require(mcmcplots) # another option for diagnostic plots
# #### Specifying the model
#
# First we must encode our choices for our data model and priors to pass them to the fitting routines in JAGS. This involves setting up a ${\tt model}$ that includes the likelihood for each data point and a prior for every parameter we want to estimate. Here is an example of how we would do this for the simple model we fit for the midge data (note that JAGS uses the precision instead of the variance or sd for the normal distribution:
# +
model1 <- "model{
## Likelihood
for(i in 1:n){
Y[i] ~ dnorm(mu,tau)
}
## Prior for mu
mu ~ dnorm(mu0,tau0)
} ## close model
"
# -
# Now create the JAGS model:
model <- jags.model(textConnection(model1),
n.chains = 1, ## usually do more
data = list(Y=Y,n=n, ## data
mu0=mu0, tau0=1/s20, ## hyperparams
tau = 1/s2 ## known precision
),
inits=list(mu=3) ## setting an starting val
)
# Now we'll run the MCMC and, see how the output looks for a short chain with no burnin:
# +
samp <- coda.samples(model,
variable.names=c("mu"),
n.iter=1000, progress.bar="none")
plot(samp)
# -
# MCMC is a rejection algorithm that often needs to converge or "burn-in" -- that is we need to potentially move until we're taking draws from the correct distribution. Unlike for optimization problems, this does not mean that the algorithm :eads toward a single value. Instead we're looking for a pattern where the draws are seemingly unrelated and random. To assess convergence we look at trace plots, the goal is to get traces that look like "fuzzy caterpillars".
#
# Sometimes at the beginning of a run, if we start far from the area near the posterior mean of the parameter, we will instead get something that looks like a trending time series. If this is the case we have to drop the samples that were taken during the burn-in phase. Here's an example of how to do that:
# +
update(model, 10000, progress.bar="none") # Burnin for 10000 samples
samp <- coda.samples(model,
variable.names=c("mu"),
n.iter=20000, progress.bar="none")
plot(samp)
# -
# This is a very fuzzy caterpillar!
#
# We can also use the summary function to examine the samples generated:
summary(samp)
# Let's compare these draws to what we got with our analytic solution:
x <- seq(1.3,2.3, length=1000)
hist(samp[[1]], xlab="mu", xlim=c(1.3, 2.3),
freq=FALSE, ylim=c(0,8), main ="posterior samples")
lines(x, dnorm(x, mean=mu0, sd=sqrt(s20)), col=2, lty=2, lwd=2) ## prior
lines(x, dnorm(x, mean=mp, sd=sqrt(1/tp)), col=4, lwd=2) ## posterior
legend("topleft", legend=c("prior", "analytic posterior"), col=c(2,4), lty=c(2,1), lwd=2)
# It worked!
#
#
# As with the analytic approach, it's always a good idea when you run your analyses to see how sensitive is your result to the priors you choose. Unless you are purposefully choosing an informative prior, we usually want the prior and posterior to look different.
#
#
# ### Estimating the population variance
#
# One advantage of the numerical approach is that we can choose almost anything we want for the priors on multiple parameters without worrying if they are conjugate, or if we want to include additional information. For example, let's say that, not, we want to force the mean to be positive (and also the data, perhaps), and concurrently estimate the variance. Here is a possible model.
# +
model2 <- "model{
# Likelihood
for(i in 1:n){
Y[i] ~ dnorm(mu,tau) T(0,) ## truncates at 0
}
# Prior for mu
mu ~ dnorm(mu0,tau0)
# Prior for the precision
tau ~ dgamma(a, b)
# Compute the variance
s2 <- 1/tau
}"
## hyperparams for tau
a <- 0.01
b <- 0.01
m2 <- jags.model(textConnection(model2),
n.chains = 1,
data = list(Y=Y, n=n,
mu0=mu0, tau0=1/s20, ## mu hyperparams
a=a, b=b ## tau hyperparams
),
inits=list(mu=3, tau=10) ## starting vals
)
samp <- coda.samples(m2,
variable.names=c("mu","s2"),
n.iter=1000, progress.bar="none")
plot(samp)
summary(samp)
# -
# Now we plot each with their priors:
# +
par(mfrow=c(1,2), bty="n")
hist(samp[[1]][,1], xlab="samples of mu", main="mu")
lines(x, dnorm(x, mean=mu0, sd=sqrt(s20)),
col=2, lty=2, lwd=2) ## prior
x2 <- seq(0, 200, length=1000)
hist(1/samp[[1]][,2], xlab="samples of tau", main="tau")
lines(x2, dgamma(x2, shape = a, rate = b),
col=2, lty=2, lwd=2) ## prior
# -
# We also want to look at the joint distribution of $\mu$ and $\sigma^2$:
plot(as.numeric(samp[[1]][,1]), samp[[1]][,2], xlab="mu", ylab="s2")
# ### Exercise: Updating the Bayesian model
#
# Redo the previous analysis placing a gamma prior on $\mu$ as well. Set the prior so that the mean and variance are the same as in the normal example from above (use moment matching). Do you get something similar?
# ## Aedes data revisited using Bayesian fitting
#
# Now let's do some Bayesian model fitting to * Aedes * thermal performance data. Lets try out the `R2jags` package for this.
require(R2jags) # fitting
require(coda) # diagnostic plots
set.seed(1234)
# Load the data:
Aaeg.data <- read.csv("../data/AeaegyptiTraitData.csv")
# ### The Data
#
# These data are traits from * Aedes aegypti * mosquitoes measured across temperature in lab experiments. The traits we have data on thermal performance are:
# - pEA: proportion surviving from egg to adulthood
# - MDR: mosquito development rate
# - PDR: parasite development rate (= 1/EIP the extrinsic incubation period)
# - $\mu$ (mu): death rate (= 1/longevity)
#
# Note that some of the traits come in multiple forms (e.g., $\mu$ and 1/$\mu$, PDR and EIP).
#
# Have a look at the data:
head(Aaeg.data)
mu.data <- subset(Aaeg.data, trait.name == "mu")
lf.data <- subset(Aaeg.data, trait.name == "1/mu")
par(mfrow=c(1,2), bty="l")
plot(trait ~ T, data = mu.data, ylab="mu")
plot(trait ~ T, data = lf.data, ylab="1/mu")
# Note that the $\mu$ data is u-shaped and the lifespan data is unimodal (hump-shaped).
#
# Since thermal biology theory is based on unimodal thermal responses, we want to fit the trait as lifespan instead of $\mu$. Thus, we'll need to convert the $\mu$ data to lifespan by taking the inverse. The combined data should have a nice unimodal shape that we can fit a function to:
# +
mu.data.inv <- mu.data # make a copy of the mu data
mu.data.inv$trait <- 1/mu.data$trait # take the inverse of the trait values to convert mu to lifespan
lf.data.comb <- rbind(mu.data.inv, lf.data) # combine both lifespan data sets together
plot(trait ~ T, data = lf.data.comb, ylab="1/mu")
# -
# ### Two thermal performance curve models
# Most thermal response curves can be reasonably fit using one of two thermal reponses. Traits that respond unimodally but symmetrically to temperature can be fit with a quadratic function:
#
# $ B = q (T-T_0) (T-T_m)$
#
# Traits that respond unimodally but asymmetrically can be fitted with a Briere function (see definition [here](Miniproj-TPCs-Models)).
#
# In both models, $T_0$ is the lower thermal limit, $T_m$ is the upper thermal limit (i.e., where the trait value goes to zero on either end), and $q$ scales the elevation of the curve, (and so also the value at the optimum temperature).
#
# ### The thermal response model file
# Unlike the previous bayesian \example, here we will provide jags with the model written as a `.txt` file. THis can be in your working directory, or elsewhere (but then inout the full path to it --- ideally a relative path).
#
# You can either write the text yourself directly to the file, or create it using the sink() function via your R script (see below):
sink("quad.txt") # create a file
cat("
model{
## Priors
cf.q ~ dunif(0, 1)
cf.T0 ~ dunif(0, 24)
cf.Tm ~ dunif(25, 45)
cf.sigma ~ dunif(0, 1000)
cf.tau <- 1 / (cf.sigma * cf.sigma)
## Likelihood
for(i in 1:N.obs){
trait.mu[i] <- -1 * cf.q * (temp[i] - cf.T0) * (temp[i] - cf.Tm) * (cf.Tm > temp[i]) * (cf.T0 < temp[i])
trait[i] ~ dnorm(trait.mu[i], cf.tau)
}
## Derived Quantities and Predictions
for(i in 1:N.Temp.xs){
z.trait.mu.pred[i] <- -1 * cf.q * (Temp.xs[i] - cf.T0) * (Temp.xs[i] - cf.Tm) * (cf.Tm > Temp.xs[i]) * (cf.T0 < Temp.xs[i])
}
} # close model
",fill=T)
sink()
# Note that the model file `quad.txt` has two mandatory sections (the priors and the likelihood) and one optional section (derived measures calculated from your fitted parameters).
#
# In the example below for a quadratic function, most of the priors are specified via uniform distributions (the two arguments specific the lower and upper bounds, respectively). Note that unlike in R and most other programs, in jags, the inverse of the variance of the normal distribution is used, denoted by $\tau (= \frac{1}{\sigma^2}$).
#
# The likelihood for can be interpreted as follows: the observed data are normally distributed where the mean at a given temperature follows the quadratic equation.
# Now, prepare the data for jags:
# +
# Parameters to Estimate
parameters <- c("cf.q", "cf.T0", "cf.Tm","cf.sigma", "z.trait.mu.pred")
# Initial values for the parameters
inits <- function(){list(
cf.q = 0.01,
cf.Tm = 35,
cf.T0 = 5,
cf.sigma = rlnorm(1))}
# MCMC Settings: number of posterior dist elements = [(ni - nb) / nt ] * nc
ni <- 25000 # number of iterations in each chain
nb <- 5000 # number of 'burn in' iterations to discard
nt <- 8 # thinning rate - jags saves every nt iterations in each chain
nc <- 3 # number of chains
# Temperature sequence for derived quantity calculations
Temp.xs <- seq(0, 45, 0.2)
N.Temp.xs <- length(Temp.xs)
### Fitting the trait thermal response; Pull out data columns as vectors
data <- lf.data.comb # this lets us reuse the same generic code: we only change this first line
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
# Bundle all data in a list for JAGS
jag.data <- list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
# -
# Now run the fitting using jags:
lf.fit <- jags(data=jag.data, inits=inits, parameters.to.save=parameters,
model.file="quad.txt", n.thin=nt, n.chains=nc, n.burnin=nb,
n.iter=ni, DIC=T, working.directory=getwd())
# Change into "mcmc" type samples for visualization with the `coda` package:
lf.fit.mcmc <- as.mcmc(lf.fit)
# ### Running diagnostics
#
# View the parameters (only the first 5 lines, or it will also show you all of your derived quantities):
lf.fit$BUGSoutput$summary[1:5,]
# Plot the chains:
plot(lf.fit.mcmc[,c(1,3,4)])
# ### Plot the fits
plot(trait ~ T, xlim = c(0, 45), ylim = c(0,42), data = lf.data.comb, ylab = "Lifespan for Ae. aegypti", xlab = "Temperature")
lines(lf.fit$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(lf.fit$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(lf.fit$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
# ### Additional analyses
#
# You can use the `which.max()` function to find the optimal temperature for adult lifespan:
Temp.xs[which.max(as.vector(lf.fit$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"]))]
# You can also pull out the lifespan values for each iteration of the MCMC chain over the temperature gradient to calculate $R_0$:
lf.grad <- lf.fit$BUGSoutput$sims.list$z.trait.mu.pred
dim(lf.grad) # A matrix with 7500 iterations of the MCMC chains at 226 temperatures
# ## Fitting of abundance data
#
# We will now perform a bayesian analysis of population growth data.
require(R2jags) # does the fitting
require(coda) # makes diagnostic plots
library(IDPmisc) # makes nice colored pairs plots to look at joint posteriors
# ### The Data
#
# These data are observations of the amphibian fungal pathogen _Batrachochytrium dendrobatidis_ being grown in liquid culture at multiple different temperatures. The experiment is conducted in 96 well plates with a fixed initial innoculation of fungal spores in each well, and the plate placed in a constant temperature incubator. Each day, 8 wells per plate are observed and the optical density (OD) is measured. We will focus on a single temperature trial across mulitple plates with OD as the response.
#
# We will fit a logistic model to these growth data.
#
# Let's have a look at the data first:
dat <- read.csv("../data/lb_ab_temps.csv")
head(dat)
# We are only interested in a subset of these data, so we will subset out only those from experiment 2, and a temperature of 12$^\circ$C.
d2 <- dat[which(dat$EXP==2),2:8]
d2 <- d2[which(d2$TEMP==12),]
summary(d2)
# Now plot it:
# +
Temps <- seq(0,max(d2$DAY)-1, by=0.05)
mycol <- 1
my.ylim <- c(0, 0.5)
my.title <- "LB-AB isolate, T=12C"
plot(d2$DAY-1, d2$OD, xlim=c(0,max(Temps)), ylim=my.ylim,
pch=(mycol+20),
xlab="time (days)", ylab="",
main=my.title,
col=mycol+1, cex=1.5)
# -
# ### Specifying the growth curve
#
# Although logistic growth is often written as a differential equation, here we will work with the analytic solution of the model:
#
# $$
# \mu(t) = \frac{KY_0}{Y_0+(K-Y_0)\exp{(-rt)}}
# $$
#
# This gives the mean function that we want to fit. We will assume log-normal noise around this response, as the optical density is bounded to be greater than 0 and since we also have increasing variance over time (as the optical density increases).
#
#
# ### The thermal response model file
#
# JAGS needs the model written as a `.txt` or `.bug` file inside the working directory. You can either make the text file directly, or create it using the `sink()` function in your R script, as follows:
sink("jags-logistic.bug")
cat("
model {
## Likelihood
for (i in 1:N) {
Y[i] ~ dlnorm(log(mu[i]), tau)
mu[i] <- K * Y0/(Y0+(K-Y0) * exp(-r * t[i]))
}
## Priors
r~dexp(1000)
K ~ dunif(0.01, 0.6)
Y0 ~ dunif(0.09, 0.15)
tau <- 1/sigma^2
sigma ~ dexp(0.1)
} # close model
",fill=T)
sink()
# Note that the model file has two mandatory sections (the priors and the likelihood) and one optional section (derived quantiaties calculated from your fitted parameters).
#
# In the example below we will build the model function with the log-normal likelihood for the logistic growth function. Priors are a combination of uniform and exponential distributions. As with the normal distribution, jags uses $\tau$ to parameterize the variance of the normal distribution ($\tau = 1/(\sigma^2)$). However it can be easier to specify the prior on sigma directly. In this example we will generate posterior samples of derived quantities outside of JAGS (so you can see what this is actually doing).
#
#
# ### Additional settings for jags
#
# Now for some additional settings/specifications for jags:
# +
# Parameters to Estimate
parameters <- c('Y0', 'K', 'r', 'sigma')
# Initial values for the parameters
inits <- function(){list(
Y0 = 0.1,
K = 0.4,
r = 0.1,
sigma = rlnorm(1))}
# MCMC Settings: number of posterior dist elements = [(ni - nb) / nt ] * nc
ni <- 6000 # number of iterations in each chain
nb <- 1000 # number of 'burn in' iterations to discard
nt <- 1 # thinning rate - jags saves every nt iterations in each chain
nc <- 5 # number of chains
# -
# ### Fitting the model
#
# Now we can run jags:
# +
# Pull out data columns as vectors
data <- d2 # this lets us reuse the same generic code: we only change this first line
Y <- data$OD
N <- length(Y)
t <- data$DAY
# Bundle all data in a list for JAGS
jag.data <- list(Y = Y, N = N, t = t)
# -
# Run JAGS
OD.12C <- jags(data=jag.data, inits=inits, parameters.to.save=parameters,
model.file="jags-logistic.bug", n.thin=nt, n.chains=nc, n.burnin=nb,
n.iter=ni, DIC=T, working.directory=getwd())
# Change into "mcmc" type samples for visualization with `coda`:
OD.12C.mcmc <- as.mcmc(OD.12C)
# ### Diagnostics
#
# As you did in the [Traits bayesian fitting example](#Aedes-data-revisited-using-Bayesian-fitting), there are a number of model diagnostics that we need to check. First we want to look at the chains and confirm that they look like "fuzzy caterpillars" -- no linear/non-linear patterns across the chains, low auto-correlation, etc.
#
# First view the fitted parameters:
OD.12C$BUGSoutput$summary
# Plot the chains using the coda package:
plot(OD.12C.mcmc[,c(1,2,4)])
# We can examine the ACF of the chains as well, similarly to a time series:
s1 <- as.data.frame(OD.12C.mcmc[[1]])
par(mfrow=c(2,2))
for(i in 2:5) acf(s1[,i], lag.max=20)
# There is still a bit of autocorrelation, but it isn't too bad. The chain for $\sigma$ is mixing best. We could reduce the autocorrelation even further by thinning the chain (i.e., change the `nt` parameter to 5 or 10).
#
# The last important diagnostic is to compare the prior and posterior distributions. Various packages in R have bespoke functions to do this. Here we use functions that we provide in the `mcmc_utils.R` file provided on the website.
source("../code/mcmc_utils.R")
# We also can write a function to put the samples into a convenient format for visualizing, etc:
# +
samps <- NULL
for(i in 1:nc){
samps <- rbind(samps, as.data.frame(OD.12C.mcmc[[i]]))
}
samps <- samps[,c(5,2,3,4)]
# -
# And also, we can building a list to hold all the information about the priors for each parameter:
priors <- list()
priors$names <- c("Y0", "K", "r","sigma")
priors$fun <- c("uniform", "uniform", "exp","exp")
priors$hyper <- matrix(NA, ncol=4, nrow=3)
priors$hyper[,1] <- c(0.09, 0.15, NA)
priors$hyper[,2] <- c(0.01, 0.6, NA)
priors$hyper[,3] <- c(1000, NA, NA)
priors$hyper[,4] <- c(0.1, NA, NA)
# Now we can plot the histograms of the posterior samples together with the prior distributions:
plot.hists(samps, my.par=c(2,2), n.hists=4, priors=priors, mai=c(0.5, 0.5, 0.25, 0.2))
# The prior distribution here is very different from the posterior. These data are highly informative for the parameters of interest and are very unlikely to be influenced much by the prior distribution (although you can always change the priors to check this). However, notice that $Y_0$ (the initial condition) is truncated by the prior. This is a fairly strong prior, because we know something about the initial optical density that is typical for the esperimental set up with the density of innoculum used and with a properly calibrated set-up.
# ### Visualizing the joint posterior of parameters
#
# It's often useful to also look at the joint distbution of all of your parameters together. Of course, if you have a high dimensional posterior, rendering a 2-D representation can be difficult. Instead, the standard is to examine the pair-wise posterior distribution, for instance as follows (using the `s1` data frame we created above):
ipairs(s1[,2:5], ztransf = function(x){x[x<1] <- 1; log2(x)})
# As you can see, estimates of $r$ and $K$ are highly correlated -- not surprising given the interplay between them in the logistic growth function. This correlation is an important feature of the system, and we use the full posterior distribution that includes this correlation when we want to build the corresponding posterior distribution of the behavior of the logistic function.
#
# ### The posterior distribution of the mean function
#
# The final step is to check how well we are fitting the data. To do this we usually examine the posterior distribution of the mean function of our system, in this case the distribution of the logistic solution and compare this to the data. To do this, for each of our posterior samples (or a thinned subset), we plug the parameters for the $i^{\mathrm th}$ sample $\theta_i$ into our function of interest, and evaluate the function as a desired set of $x$'s. For instance, for logistic growth, we'll evaluate
# $$
# \mu(t) = \frac{K_iY_{0,i}}{Y_{0,i}+(K_i-Y_{0,i})\exp{(-r_it)}}
# $$
# for the $i^{\mathrm th}$ set of parameters for a sequence of times, $t$. This we obtain points describing the curve $\mu_i(t)$ for each set of parameters. Here is one way to do this:
# +
my.logistic <- function(t, Y0, K, r){
return(K * Y0/(Y0+(K-Y0) * exp(-r * t)))
}
ts <- seq(0, 40, length=100)
ss <- seq(1, dim(samps)[1], by=10)
my.curves <- matrix(NA, nrow=length(ss), ncol=length(ts))
for(i in 1:length(ss)){
my.curves[i,] <- my.logistic(t=ts, Y0=samps$Y0[i], K=samps$K[i], r=samps$r[i])
}
# -
# We can now plot all of these curves:
plot(ts, my.curves[1,], col=1, type="l", ylim=c(0.09, 0.36),
ylab="predicted OD", xlab="time (days)")
for(i in 2:length(ss)) lines(ts, my.curves[i,], col=i)
# Then we can summarize this posterior using the `apply` function to find the mean and the (for simplicity) quantile based 95% CI:
m.log <- apply(my.curves, 2, mean)
l.log <- apply(my.curves, 2, quantile, probs=0.025)
u.log <- apply(my.curves, 2, quantile, probs=0.975)
# For comparison, here is how to find the 95% HPD Interval across time, using the `HPDinterval` function from the `coda` package:
hpd.log <- NULL
for(i in 1:length(ts)){
hpd.log <- cbind(hpd.log, as.numeric(HPDinterval(mcmc(my.curves[,i]))))
}
# And plot these together with the data (in this case the HPD and quantile based intervals are indistinguishable):
# +
my.ylim <- c(0.09, 0.45)
my.title <- "LB-AB isolate, T=12C"
plot(d2$DAY-1, d2$OD, xlim=c(0,max(Temps)), ylim=my.ylim,
pch=(mycol+20),
xlab="time (days)", ylab="",
main=my.title,
col="grey", cex=1.5)
lines(ts, m.log, col=1, lwd=2)
lines(ts, l.log, col=2, lwd=2, lty=2)
lines(ts, u.log, col=2, lwd=2, lty=2)
lines(ts, hpd.log[1,], col=3, lwd=2, lty=3)
lines(ts, hpd.log[2,], col=3, lwd=2, lty=3)
# -
# Note that this only shows the uncertainty in the * mean function * -- the assumed model with log normal noise says that the observations simply have this mean. The fit is attributing the majority of the observed noise to process error rather than parameter uncertainty.
# ## Readings and Resources <a id='Readings'></a>
#
# * <NAME>. Ecological models and data in R. (Princeton University Press, 2008).
#
# * https://cran.r-project.org/web/packages/bayestestR/vignettes/bayestestR.html
| content/_build/jupyter_execute/notebooks/5-ModelFitting-Bayesian.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Distribucion chi cuadrado
import scipy.stats as ss
import pandas as pd
# +
# ss.chisquare?
# -
# **1.**
# +
f_observed = [15, 11, 10, 12]
f_expected = [12, 12, 12, 12]
k_chi = 4
m_chi = 0
df = k_chi - m_chi - 1
# -
ss.chisquare(f_observed, f_expected, df)
ss.chi2.ppf(0.95, df)
# ****
# **2.**
np = lambda n,p : n*p
# +
f_observed = [62, 10, 13]
f_expected = [np(85,0.6), np(85, 0.1), np(85, 0.3)]
k_chi = 3
m_chi = 0
df_chi = k_chi - m_chi - 1
# -
ss.chisquare(f_observed, f_expected, df_chi)
ss.chi2.ppf(0.90, df_chi)
# ***
# **3.**
mu = 600
sd = 10
n = 1000
f_observed = [20, 142, 310, 370, 128, 30]
...
# ***
# **4.**
# +
ingreso_bajo = [83, 52, 63]
ingreso_medio = [62, 71, 58]
ingreso_alto = [37, 49, 63]
importancia_grande = [83, 62, 37]
importancia_moderado = [52, 71, 49]
importancia_poco = [63, 58, 63]
alpha = 0.01
# -
f_observed = ingreso_bajo + ingreso_medio + ingreso_alto
f_expected = [66.98, 63.32, 67.72] + [64.62, 61.06, 65.32] + [50.41, 47.64, 50.96]
df = (3 - 1)*(3 - 1)
df
ss.chisquare(f_observed, f_expected, df)
ss.chi2.ppf(0.99, df)
# ***
# **5.**
f_observed = [55, 47, 98]
f_expected = [np()]
| examples/CHI+CUADRADO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br>
# # Python for Finance (2nd ed.)
#
# **Mastering Data-Driven Finance**
#
# © Dr. <NAME> | The Python Quants GmbH
#
# <img src="http://hilpisch.com/images/py4fi_2nd_shadow.png" width="300px" align="left">
# # Trading Strategies (b)
import numpy as np
import pandas as pd
import datetime as dt
from pylab import mpl, plt
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
np.random.seed(1000)
# %matplotlib inline
# ## Linear OLS Regression
# ### The Data
raw = pd.read_csv('../../source/tr_eikon_eod_data.csv',
index_col=0, parse_dates=True).dropna()
raw.columns
symbol = 'EUR='
data = pd.DataFrame(raw[symbol])
data['returns'] = np.log(data / data.shift(1))
data.dropna(inplace=True)
data['direction'] = np.sign(data['returns']).astype(int)
data.head()
data['returns'].hist(bins=35, figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_01.png')
lags = 2
def create_lags(data):
global cols
cols = []
for lag in range(1, lags + 1):
col = 'lag_{}'.format(lag)
data[col] = data['returns'].shift(lag)
cols.append(col)
create_lags(data)
data.head()
data.dropna(inplace=True)
data.plot.scatter(x='lag_1', y='lag_2', c='returns',
cmap='coolwarm', figsize=(10, 6), colorbar=True)
plt.axvline(0, c='r', ls='--')
plt.axhline(0, c='r', ls='--');
# plt.savefig('../../images/ch15/strat_ml_02.png');
# ### Regression
from sklearn.linear_model import LinearRegression
model = LinearRegression()
data['pos_ols_1'] = model.fit(data[cols], data['returns']).predict(data[cols])
data['pos_ols_2'] = model.fit(data[cols], data['direction']).predict(data[cols])
data[['pos_ols_1', 'pos_ols_2']].head()
data[['pos_ols_1', 'pos_ols_2']] = np.where(
data[['pos_ols_1', 'pos_ols_2']] > 0, 1, -1)
data['pos_ols_1'].value_counts()
data['pos_ols_2'].value_counts()
(data['pos_ols_1'].diff() != 0).sum()
(data['pos_ols_2'].diff() != 0).sum()
data['strat_ols_1'] = data['pos_ols_1'] * data['returns']
data['strat_ols_2'] = data['pos_ols_2'] * data['returns']
data[['returns', 'strat_ols_1', 'strat_ols_2']].sum().apply(np.exp)
(data['direction'] == data['pos_ols_1']).value_counts()
(data['direction'] == data['pos_ols_2']).value_counts()
data[['returns', 'strat_ols_1', 'strat_ols_2']].cumsum(
).apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_03.png');
# ## Clustering
from sklearn.cluster import KMeans
model = KMeans(n_clusters=2, random_state=0) # <1>
model.fit(data[cols])
data['pos_clus'] = model.predict(data[cols])
data['pos_clus'] = np.where(data['pos_clus'] == 1, -1, 1)
data['pos_clus'].values
plt.figure(figsize=(10, 6))
plt.scatter(data[cols].iloc[:, 0], data[cols].iloc[:, 1],
c=data['pos_clus'], cmap='coolwarm');
# plt.savefig('../../images/ch15/strat_ml_04.png');
data['strat_clus'] = data['pos_clus'] * data['returns']
data[['returns', 'strat_clus']].sum().apply(np.exp)
(data['direction'] == data['pos_clus']).value_counts()
data[['returns', 'strat_clus']].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_05.png');
# ## Frequency Approach
def create_bins(data, bins=[0]):
global cols_bin
cols_bin = []
for col in cols:
col_bin = col + '_bin'
data[col_bin] = np.digitize(data[col], bins=bins)
cols_bin.append(col_bin)
create_bins(data)
data[cols_bin + ['direction']].head()
grouped = data.groupby(cols_bin + ['direction'])
grouped.size()
res = grouped['direction'].size().unstack(fill_value=0)
def highlight_max(s):
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
res.style.apply(highlight_max, axis=1)
data['pos_freq'] = np.where(data[cols_bin].sum(axis=1) == 2, -1, 1)
(data['direction'] == data['pos_freq']).value_counts()
data['strat_freq'] = data['pos_freq'] * data['returns']
data[['returns', 'strat_freq']].sum().apply(np.exp)
data[['returns', 'strat_freq']].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_06.png');
# ## Classification Algorithms
from sklearn import linear_model
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
C = 1
models = {
'log_reg': linear_model.LogisticRegression(C=C),
'gauss_nb': GaussianNB(),
'svm': SVC(C=C)
}
def fit_models(data):
mfit = {model: models[model].fit(data[cols_bin], data['direction'])
for model in models.keys()}
fit_models(data)
def derive_positions(data):
for model in models.keys():
data['pos_' + model] = models[model].predict(data[cols_bin])
derive_positions(data)
def evaluate_strats(data):
global sel
sel = []
for model in models.keys():
col = 'strat_' + model
data[col] = data['pos_' + model] * data['returns']
sel.append(col)
sel.insert(0, 'returns')
evaluate_strats(data)
sel.insert(1, 'strat_freq')
data[sel].sum().apply(np.exp)
data[sel].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_07.png')
data = pd.DataFrame(raw[symbol])
data['returns'] = np.log(data / data.shift(1))
data['direction'] = np.sign(data['returns'])
lags = 5
create_lags(data)
data.dropna(inplace=True)
create_bins(data)
cols_bin
data[cols_bin].head()
data.dropna(inplace=True)
fit_models(data)
derive_positions(data)
evaluate_strats(data)
data[sel].sum().apply(np.exp)
data[sel].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_08.png');
mu = data['returns'].mean()
v = data['returns'].std()
bins = [mu - v, mu, mu + v]
bins
create_bins(data, bins)
data[cols_bin].head()
fit_models(data)
derive_positions(data)
evaluate_strats(data)
data[sel].sum().apply(np.exp)
data[sel].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_09.png')
# ### Sequential Train-Test Split
split = int(len(data) * 0.5)
train = data.iloc[:split].copy()
fit_models(train)
test = data.iloc[split:].copy()
derive_positions(test)
evaluate_strats(test)
test[sel].sum().apply(np.exp)
test[sel].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_10.png');
# ### Randomized Train-Test Split
from sklearn.model_selection import train_test_split
train, test = train_test_split(data, test_size=0.5,
shuffle=True, random_state=100)
train = train.copy().sort_index()
train[cols_bin].head()
test = test.copy().sort_index()
fit_models(train)
derive_positions(test)
evaluate_strats(test)
test[sel].sum().apply(np.exp)
test[sel].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_11.png');
# ## Deep Neural Network
# ### DNN with scikit-learn
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=2 * [250], random_state=1)
# %time model.fit(data[cols_bin], data['direction'])
data['pos_dnn_sk'] = model.predict(data[cols_bin])
data['strat_dnn_sk'] = data['pos_dnn_sk'] * data['returns']
data[['returns', 'strat_dnn_sk']].sum().apply(np.exp)
data[['returns', 'strat_dnn_sk']].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_12.png');
train, test = train_test_split(data, test_size=0.5, random_state=100)
train = train.copy().sort_index()
test = test.copy().sort_index()
model = MLPClassifier(solver='lbfgs', alpha=1e-5, max_iter=500,
hidden_layer_sizes=3 * [500], random_state=1)
# %time model.fit(train[cols_bin], train['direction'])
test['pos_dnn_sk'] = model.predict(test[cols_bin])
test['strat_dnn_sk'] = test['pos_dnn_sk'] * test['returns']
test[['returns', 'strat_dnn_sk']].sum().apply(np.exp)
test[['returns', 'strat_dnn_sk']].cumsum().apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_13.png');
# ### DNN with TensorFlow
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
fc = [tf.contrib.layers.real_valued_column('lags', dimension=lags)]
model = tf.contrib.learn.DNNClassifier(hidden_units=3 * [500],
n_classes=len(bins) + 1,
feature_columns=fc)
def input_fn():
fc = {'lags': tf.constant(data[cols_bin].values)}
la = tf.constant(data['direction'].apply(lambda x: 0 if x < 0 else 1).values,
shape=[data['direction'].size, 1])
return fc, la
# %time model.fit(input_fn=input_fn, steps=500)
model.evaluate(input_fn=input_fn, steps=1)
pred = np.array(list(model.predict(input_fn=input_fn)))
pred[:10]
data['pos_dnn_tf'] = np.where(pred > 0, 1, -1)
data['strat_dnn_tf'] = data['pos_dnn_tf'] * data['returns']
data[['returns', 'strat_dnn_tf']].sum().apply(np.exp)
data[['returns', 'strat_dnn_tf']].cumsum(
).apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_14.png');
model = tf.contrib.learn.DNNClassifier(hidden_units=3 * [500],
n_classes=len(bins) + 1,
feature_columns=fc)
data = train
# %time model.fit(input_fn=input_fn, steps=2500)
data = test
model.evaluate(input_fn=input_fn, steps=1)
pred = np.array(list(model.predict(input_fn=input_fn)))
test['pos_dnn_tf'] = np.where(pred > 0, 1, -1)
test['strat_dnn_tf'] = test['pos_dnn_tf'] * test['returns']
test[['returns', 'strat_dnn_sk', 'strat_dnn_tf']].sum().apply(np.exp)
test[['returns', 'strat_dnn_sk', 'strat_dnn_tf']].cumsum(
).apply(np.exp).plot(figsize=(10, 6));
# plt.savefig('../../images/ch15/strat_ml_15.png');
# <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br>
#
# <a href="http://tpq.io" target="_blank">http://tpq.io</a> | <a href="http://twitter.com/dyjh" target="_blank">@dyjh</a> | <a href="mailto:<EMAIL>"><EMAIL></a>
| code/ch15/15_trading_strategies_b.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# load packages
import pandas as pd
import statsmodels.tsa.stattools as stats
import statsmodels.graphics.tsaplots as sg
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
import sys
from datetime import datetime
import numpy as np
import networkx as nx
from nxpd import draw
from nxpd import nxpdParams
nxpdParams['show'] = 'ipynb'
sys.path.append("../pipelines")
import Pipelines as tdw
data_folder = "/projects/p20519/roller_output/optimizing_window_size/RandomForest/insilico_size10_1/"
output_path = "/home/jjw036/Roller/insilico_size10_1"
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
data_folder = "../output/insilico_size10_1"
file_path = "../data/gnw_insilico/network_data/Yeast/Yeast-3_timeseries.tsv"
run_params = {'data_folder': data_folder,
'file_path':file_path,
'td_window':10,
'min_lag':1,
'max_lag':3,
'n_trees':10,
'permutation_n':10,
'lag_method':'mean_mean',
'calc_mse':False,
'bootstrap_n':1000,
'n_trials':1,
'run_time':current_time,
'sort_by':'rank',
'iterating_param':'td_window',
}
roc,pr, tdr = tdw.get_td_stats(**run_params)
# -
def get_experiment_list(filename):
# load files
timecourse = pd.read_csv(filename, sep="\t")
# divide into list of dataframes
experiments = []
expected_experiments = 10
for i in range(0,expected_experiments*20 ,21):
experiments.append(timecourse.ix[i:i+20])
#reformat
for idx,exp in enumerate(experiments):
exp = exp.set_index('Time')
experiments[idx]=exp
return(experiments)
experiments=get_experiment_list("../data/gnw_insilico/network_data/Yeast/Yeast-3_dream4_timeseries.tsv")
kinds = 10
colors = plt.get_cmap('hsv')(np.linspace(0, 0.8, kinds))
# +
# plot time series for each experiment
for idx,experiment in enumerate(experiments):
ax = experiment['G1'].plot(linewidth=2, label=str(idx+1), color=colors[idx])
ax.set_ylabel('Normalized Intensity', fontweight='bold')
ax.set_xlabel('Time',fontweight='bold')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# -
tdr.
| scripts/notebooks/Checking the shape of residuals of certain experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from collections import defaultdict
import json
import requests
from IPython.display import Image
import networkx as nx
from py2cytoscape.util import from_networkx
# -
#XXX
def get_psiquic_uniprot(query, **kwargs):
kwargs['format'] = kwargs.get('format', 'tab27')
server = 'http://www.ebi.ac.uk/Tools/webservices/psicquic/uniprot/webservices/current/search/query'
req = requests.get('%s/%s' % (server, query), params=kwargs)
return req.content.decode('utf-8') #XXX
# +
genes_species = defaultdict(set)
interactions = {}
def get_gene_name(my_id, alt_names):
toks = alt_names.split('|')
for tok in toks:
if tok.endswith('(gene name)'):
return tok[tok.find(':') + 1: tok.find('(')]
return my_id + '?' # no name...
def get_vernacular_tax(tax):
return tax.split('|')[0][tax.find('(') + 1:-1]
def add_interactions(species):
for rec in species.split('\n'):
toks = rec.rstrip().split('\t')
if len(toks) < 15:
continue # empty line at the end
id1 = toks[0][toks[0].find(':') + 1:]
id2 = toks[1][toks[1].find(':') + 1:]
gene1, gene2 = get_gene_name(id1, toks[4]), get_gene_name(id2, toks[5])
#print(toks[6])
tax1, tax2 = get_vernacular_tax(toks[9]), get_vernacular_tax(toks[10])
inter_type = toks[11][toks[11].find('(') + 1:-1]
miscore = float(toks[14].split(':')[1])
genes_species[tax1].add(gene1)
genes_species[tax2].add(gene2)
interactions[((tax1, gene1), (tax2, gene2))] = {'score': miscore, 'type': inter_type}
# -
human = get_psiquic_uniprot('uniprotkb:P04637')
add_interactions(human)
rat = get_psiquic_uniprot('uniprotkb:P10361')
add_interactions(rat)
mouse = get_psiquic_uniprot('uniprotkb:P02340')
add_interactions(mouse)
# +
def get_node_id(species, gene):
if species == 'human':
return gene
elif species in ['mouse', 'rat']:
return '%s (%s)' % (gene, species[0])
else:
return '%s (%s)' % (gene, species)
graph = nx.Graph()
for species, genes in genes_species.items():
#print(species)
for gene in genes:
name = get_node_id(species, gene)
#print(gene, name)
graph.add_node(get_node_id(species, gene),
species=species, gene=gene)
for (i1, i2), attribs in interactions.items():
tax1, gene1 = i1
tax2, gene2 = i2
graph.add_edge(get_node_id(tax1, gene1),
get_node_id(tax2, gene2),
interaction=attribs['type'],
score=attribs['score'])
# +
#XXX
server = 'http://localhost:1234/v1'
p53_interactions = from_networkx(graph)
p53_net = requests.post(server + '/networks', data=json.dumps(p53_interactions),
headers={'Content-Type': 'application/json'})
net_id = p53_net.json()['networkSUID']
requests.get('%s/apply/layouts/circular/%d' % (server, net_id))
requests.get('%s/apply/styles/Gradient1/%d' % (server, net_id)) #XXX
Image('%s/networks/%d/views/first.png' % (server, net_id))
# -
res = requests.get(server + '/networks',
headers={'Content-Type': 'application/json'})
print(res.content)
res = requests.get(server + '/apply/styles',
headers={'Content-Type': 'application/json'})
print(res.json())
res = requests.get(server + '/styles',
headers={'Content-Type': 'application/json'})
res.content
res = requests.get(server + '/styles/default',
headers={'Content-Type': 'application/json'})
print(json.dumps(json.loads(res.content), indent=4))
ustyle = {
'title': 'Color style',
'mappings': [
{'mappingType': 'discrete',
'map': [
{'key': 'human', 'value': '#00FF00'},
{'key': 'rat', 'value': '#FF00FF'},
{'key': 'mouse', 'value': '#00FFFF'}],
'visualProperty': 'NODE_FILL_COLOR',
'mappingColumnType': 'String',
'mappingColumn': 'species'},
{
'mappingType': 'passthrough',
'visualProperty': 'NODE_LABEL',
'mappingColumnType': 'String',
'mappingColumn': 'gene'},
{
'mappingType': 'passthrough',
'visualProperty': 'EDGE_TOOLTIP',
'mappingColumnType': 'String',
'mappingColumn': 'interaction'
}],
'defaults': [ {"visualProperty": "NODE_FILL_COLOR",
"value": "#FFFFFF"}]}
# +
res = requests.post(server + "/styles", data=json.dumps(ustyle),
headers={'Content-Type': 'application/json'})
requests.get('%s/apply/layouts/force-directed/%d' % (server, net_id))
res = requests.get('%s/apply/styles/Color style/%d' % (server, net_id),
headers={'Content-Type': 'application/json'})
Image('%s/networks/%s/views/first.png' % (server, net_id))
# -
| Chapter10/Cytoscape.ipynb |