code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Coriolis data # + c_mediantime_u2c = [18.64910125732422, 20.380020141601562, 22.42565155029297, 23.2696533203125, 25.140047073364258, 26.30472183227539, 27.899742126464844, 29.104948043823242, 30.965805053710938, 32.634735107421875, 33.63490104675293, 34.868717193603516, 36.48042678833008, 37.41025924682617, 38.77878189086914, 40.209293365478516, 41.86868667602539, 42.83547401428223, 44.42930221557617, 45.56536674499512, 46.65970802307129, 48.329830169677734, 49.550533294677734, 50.54473876953125, 51.83100700378418, 52.85024642944336, 54.35943603515625, 55.435895919799805, 56.77461624145508, 58.32552909851074, 59.34953689575195, 60.42957305908203, 62.09135055541992, 63.7054443359375, 65.28019905090332, 66.13016128540039, 67.2292709350586, 69.31543350219727, 69.99969482421875, 71.04992866516113, 72.25990295410156, 73.55928421020508, 75.40583610534668, 77.24523544311523, 77.76021957397461, 78.98449897766113, 80.52587509155273, 81.6798210144043, 83.30106735229492, 84.11169052124023, 85.39557456970215, 86.91906929016113, 88.25063705444336, 89.25914764404297, 90.78025817871094, 91.69578552246094, 92.79012680053711, 94.05970573425293, 94.85006332397461, 96.37951850891113] c_mediantime_kdl = [4.117488861083984, 6.435513496398926, 9.155511856079102, 10.486602783203125, 13.03708553314209, 14.961957931518555, 16.76809787750244, 18.2039737701416, 22.426128387451172, 22.84705638885498, 25.113463401794434, 26.46350860595703, 28.499364852905273, 29.76202964782715, 31.69703483581543, 30.338525772094727, 34.574031829833984, 35.83359718322754, 37.642478942871094, 39.30199146270752, 40.798068046569824, 43.19453239440918, 43.807387351989746, 45.5775260925293, 46.96798324584961, 48.28500747680664, 49.95584487915039, 51.602959632873535, 53.21156978607178, 56.24985694885254, 56.090593338012695, 57.94048309326172, 63.721060752868645, 65.39106369018555, 66.51949882507324, 68.55809688568115, 70.23537158966064, 72.20292091369629, 74.01394844055176, 75.62804222106934, 76.41994953155518, 78.02450656890869, 81.16114139556885, 81.99048042297363, 83.39691162109375, 84.87904071807861, 86.87245845794678, 88.41896057128906, 89.12348747253418, 90.92998504638672, 92.13197231292725, 93.87993812561035, 96.56190872192383, 97.88048267364502, 99.04098510742188, 100.4629135131836, 101.98497772216797, 104.00497913360596, 104.94303703308105, 106.24003410339355] c_mediantime_rbdl = [2.4919509887695312, 3.7300586700439453, 4.9304962158203125, 6.162047386169434, 7.366418838500977, 8.558392524719238, 9.774565696716309, 11.063575744628906, 12.217521667480469, 13.454079627990723, 14.842033386230469, 15.93053340911865, 17.27902889251709, 18.459439277648926, 19.8519229888916, 21.511554718017578, 22.70197868347168, 24.052023887634277, 25.29597282409668, 26.21936798095703, 27.533411979675293, 28.84650230407715, 30.360937118530273, 31.448006629943848, 32.40799903869629, 33.50496292114258, 35.588979721069336, 36.52298450469971, 38.152098655700684, 39.792537689208984, 41.00656509399414, 42.181968688964844, 43.37489604949951, 44.217467308044434, 45.8369255065918, 46.6158390045166, 48.394083976745605, 49.49784278869629, 50.67849159240723, 52.19447612762451, 53.591012954711914, 54.6414852142334, 55.84454536437988, 57.07049369812012, 58.89463424682617, 60.02843379974365, 61.81204319000245, 64.35596942901611, 66.10047817230225, 66.4759874343872, 68.28546524047852, 69.91684436798096, 70.11997699737549, 71.64037227630615, 73.20737838745117, 73.36342334747314, 74.43904876708984, 76.19643211364746, 77.24857330322266, 77.93164253234863] c_mediantime_pb = [2.1539926528930664, 3.2196044921875, 4.279494285583496, 5.330085754394531, 6.478548049926758, 7.5664520263671875, 8.534550666809082, 9.559154510498047, 10.650992393493652, 11.770963668823242, 12.817025184631348, 13.890981674194336, 15.01917839050293, 16.110539436340332, 17.28653907775879, 18.354058265686035, 19.620418548583984, 20.54297924041748, 21.502017974853516, 22.614598274230957, 23.53835105895996, 24.584054946899414, 25.924086570739746, 27.024507522583008, 27.907967567443848, 28.985977172851562, 29.915571212768555, 31.078100204467773, 32.52303600311279, 33.42747688293457, 34.480929374694824, 35.5679988861084, 36.55600547790527, 37.66000270843506, 38.70546817779541, 39.62206840515137, 41.22352600097656, 41.996002197265625, 43.47407817840576, 44.171929359436035, 44.97992992401123, 46.469926834106445, 47.39546775817871, 48.85852336883545, 49.990057945251465, 50.899386405944824, 52.05643177032471, 53.40993404388428, 54.5344352722168, 55.592894554138184, 56.026458740234375, 57.17957019805908, 58.703064918518066, 59.72254276275635, 60.86409091949463, 62.18349933624268, 63.02201747894288, 64.37647342681885, 65.29247760772705, 67.31152534484863] # - # # Inertia Matrix data m_mediantime_u2c = [11.04593276977539, 12.140035629272461, 12.778043746948242, 13.712882995605469, 14.522075653076172, 15.7470703125, 16.708850860595703, 17.9140567779541, 18.76997947692871, 20.158052444458008, 20.84803581237793, 21.976947784423828, 22.9339599609375, 24.44291114807129, 25.02894401550293, 26.507854461669922, 27.024030685424805, 28.831005096435547, 29.722213745117188, 31.453847885131832, 31.87084198, 32.1400165558, 34.2885255814, 36.2010002136, 36.9510650635, 38.7690067291, 40.4341220856, 41.5549278259, 42.4410104752, 44.2705154419, 45.3280210495, 46.9764471054, 48.0484962463, 49.4686365128, 50.2220392227, 51.1219501495, 52.4309873581, 53.1059503555, 56.6220283508, 57.7000379562, 60.578584671, 61.3634586334, 61.4830255508, 62.8745555878, 61.9555711746, 64.868927002, 64.6095275879, 65.4614181519, 66.7331218719, 67.8889751434, 67.9500102997, 68.8090324402, 69.8690414429, 73.560880661, 75.0380754471, 75.9049654007, 79.2629718781, 80.1260786057, 81.2560319901, 82.5889110565] m_mediantime_kdl = [3.7598609924316406, 5.660057067871094, 7.52568244934082, 8.740425109863281, 11.289119720458984, 12.55035400390625, 13.728141784667969, 15.025138854980469, 18.210411071777344, 20.13564109802246, 21.409988403320312, 22.93109893798828, 23.870468139648438, 25.799274444580078, 27.085542678833008, 25.129318237304688, 29.768943786621094, 31.45933151245117, 32.56082534790039, 34.050941467285156, 34.815073013305664, 35.88557243347168, 38.71560096740723, 40.79937934875488, 42.10472106933594, 41.43953323364258, 43.43986511230469, 44.7392463684082, 46.42963409423828, 48.06995391845703, 49.15475845336914, 51.03468894958496, 58.06922912597656, 58.441162109375, 61.889886856079094, 62.069892883300774, 64.81051445007324, 65.74392318725586, 66.75004959106445, 69.78034973144531, 69.94962692260742, 72.00479507446289, 73.78101348876953, 75.93035697937012, 76.81012153625488, 80.22427558898926, 80.1396369934082, 82.40938186645508, 83.86492729187012, 85.42537689208984, 88.87529373168945, 90.53468704223633, 92.30375289916992, 92.35501289367676, 95.46041488647461, 96.46058082580566, 98.85072708129883, 102.41508483886719, 102.89430618286133, 103.7454605102539] m_mediantime_rbdl = [1.7786026000976562, 2.460479736328125, 3.3807754516601562, 3.9386749267578125, 4.730224609375, 5.509853363037109, 6.279945373535156, 7.169246673583984, 8.001327514648438, 8.838176727294922, 9.739398956298828, 10.671615600585938, 11.699199676513672, 12.481212615966797, 13.430118560791016, 14.710426330566406, 15.687942504882812, 16.56055450439453, 17.53091812133789, 18.59903335571289, 19.750595092773438, 20.580291748046875, 21.93927764892578, 23.349523544311523, 25.415420532226562, 25.04110336303711, 26.434659957885742, 27.32992172241211, 28.960704803466797, 30.00020980834961, 31.51893615722656, 32.47976303100586, 33.75053405761719, 35.130977630615234, 36.48996353149414, 37.51516342163086, 39.2913818359375, 40.19021987915039, 41.66126251220703, 42.94872283935547, 44.44003105163574, 45.76921463012695, 47.39046096801758, 48.82097244262695, 50.339698791503906, 52.79421806335449, 53.46059799194336, 54.80527877807617, 56.75554275512695, 58.48407745361328, 60.23883819580078, 61.34986877441406, 63.33589553833007, 64.71037864685059, 66.46990776062012, 68.69077682495117, 69.6706771850586, 71.9451904296875, 73.62484931945801, 74.72038269042969] m_mediantime_pb = [2.7108192443847656, 3.6787986755371094, 4.544258117675781, 5.738735198974609, 6.527900695800781, 7.5244903564453125, 8.6212158203125, 9.940862655639648, 10.960102081298828, 12.30478286743164, 13.320446014404297, 14.74142074584961, 15.990734100341795, 17.51422882080078, 18.949508666992188, 20.890235900878906, 22.3541259765625, 23.903846740722656, 26.170015335083008, 27.489662170410156, 29.64496612548828, 30.98964691162109, 32.78493881225586, 35.250186920166016, 36.67473793029785, 38.90514373779297, 40.58122634887695, 42.78421401977539, 44.83938217163086, 46.999454498291016, 49.09992218017578, 52.36029624938965, 54.035186767578125, 57.28006362915039, 58.90965461730957, 61.26999855041504, 64.95952606201172, 66.38050079345703, 71.3646411895752, 72.91555404663086, 76.06983184814453, 78.63044738769531, 79.74386215209961, 83.30464363098145, 86.55428886413574, 95.36981582641602, 92.64945983886719, 96.67634963989258, 99.20120239257812, 99.69949722290039, 104.3844223022461, 113.25478553771973, 115.97037315368652, 120.66483497619629, 124.19939041137694, 127.22015380859375, 131.1182975769043, 137.40062713623047, 141.27016067504883, 143.55897903442383] # + #m_mediantime_pb = [3.5175085067749023, 4.42659854888916, 5.229473114013672, 6.476044654846191, 7.431387901306152, 8.818984031677246, 9.890556335449219, 11.23189926147461, 12.416481971740723, 13.454437255859375, 14.88792896270752, 16.224026679992676, 17.449378967285156, 19.863009452819824, 20.735502243041992, 22.911548614501953, 24.59096908569336, 25.933504104614258, 27.73904800415039, 29.400110244750977, 31.47304058074951, 34.17706489562988, 35.48848628997803, 37.6816987991333, 39.52789306640625, 42.05167293548584, 43.267011642456055, 45.38094997406006, 47.11496829986572, 49.68106746673584, 52.46901512145996, 56.107401847839355, 57.186007499694824, 60.510993003845215, 62.11853027343749, 67.51465797424316, 68.53759288787842, 71.05803489685059, 75.4094123840332, 76.94792747497559, 81.26354217529297, 83.05299282073975, 85.18445491790771, 87.94105052947998, 92.79251098632812, 94.04850006103516, 98.14047813415527, 101.12738609313965, 104.63082790374756, 103.20150852203369, 108.00695419311523, 115.7374382019043, 122.65110015869142, 126.07145309448244, 128.32999229431152, 133.07499885559082, 135.86747646331787, 142.05753803253174, 144.62244510650635, 150.02501010894775] #m_mediantime_rbdl = [1.611948013305664, 2.3889541625976562, 3.1033754348754883, 3.9070844650268555, 4.903554916381836, 5.527496337890625, 6.384611129760742, 7.376432418823242, 8.125066757202148, 9.069561958312988, 9.950995445251465, 10.905027389526367, 11.819124221801758, 12.701988220214844, 13.746023178100586, 14.968514442443848, 16.173481941223145, 16.9374942779541, 18.002033233642578, 19.014477729797363, 20.142555236816406, 21.61848545074463, 22.63045310974121, 23.71954917907715, 24.70099925994873, 25.843501091003418, 27.00495719909668, 27.99665927886963, 29.49702739715576, 30.93957901000977, 32.195091247558594, 33.450961112976074, 34.926533699035645, 36.1325740814209, 37.31942176818848, 38.79594802856445, 40.521979331970215, 41.765570640563965, 43.196916580200195, 44.57402229309082, 48.748016357421875, 47.58501052856445, 49.16954040527344, 50.568580627441406, 51.74684524536133, 53.44057083129883, 55.696964263916016, 60.89198589324951, 62.875986099243164, 59.81254577636719, 61.38849258422852, 62.783956527709954, 64.93496894836426, 66.14506244659424, 67.85190105438232, 69.44000720977783, 71.39551639556885, 73.59004020690918, 75.18362998962402, 77.1646499633789] #m_mediantime_kdl = [3.6329030990600586, 5.401849746704102, 7.786393165588378, 8.836984634399414, 11.50202751159668, 12.853860855102539, 14.652490615844727, 15.48302173614502, 18.937110900878906, 20.114541053771973, 22.31597900390625, 23.79012107849121, 25.232434272766113, 26.260972023010254, 28.041958808898926, 26.27706527709961, 30.606985092163086, 31.948804855346676, 34.66308116912842, 34.18588638305664, 36.9490385055542, 37.578582763671875, 39.68203067779541, 40.749430656433105, 41.86356067657471, 43.27356815338135, 45.53103446960449, 45.881032943725586, 48.54893684387207, 50.22537708282471, 52.15859413146973, 53.415894508361816, 59.69703197479248, 61.68866157531738, 62.31260299682617, 65.6435489654541, 66.50495529174805, 68.09592247009277, 69.52095031738281, 70.75798511505127, 74.12254810333252, 74.91445541381836, 76.97951793670654, 78.46498489379883, 81.7270278930664, 84.07139778137207, 84.91158485412598, 85.77144145965576, 87.10205554962158, 88.8904333114624, 93.06061267852783, 94.34759616851807, 94.12848949432373, 97.33104705810547, 98.50549697875977, 100.13353824615479, 101.1359691619873, 104.6684980392456, 106.06098175048828, 110.97145080566406] # - # # Gravity data g_mediantime_pb = [1.8095755577087402, 2.475109100341797, 3.139989376068115, 3.834383487701416, 4.501025676727295, 5.107426643371581, 5.9264326095581055, 6.471765041351318, 7.154989242553711, 7.872254848480224, 8.479957580566404, 9.129445552825928, 9.955053329467773, 10.631892681121826, 11.319694519042969, 11.981468200683594, 12.754757404327394, 13.193230628967285, 13.812751770019531, 14.689686298370361, 15.489659309387207, 16.187498569488522, 16.861143112182617, 17.383425235748287, 17.9993462562561, 18.74884605407715, 19.566819667816162, 19.97016668319702, 20.650088787078857, 21.367990970611572, 22.234876155853275, 23.235337734222416, 23.58630418777466, 24.453125, 25.355980396270752, 25.67467451095581, 26.227688789367672, 27.818915843963627, 28.383493423461914, 29.031071662902832, 29.52975988388061, 29.992406368255615, 30.54370641708374, 31.40413761138916, 31.935918331146247, 32.356059551239014, 33.39998006820679, 33.85247945785522, 34.60090160369873, 35.58570146560669, 35.895805358886726, 36.80224895477295, 37.76356935501099, 38.18687677383423, 39.00727987289429, 40.049707889556885, 40.52453279495239, 41.6756272315979, 43.04287672042847, 42.77008056640625] g_mediantime_rbdl = [2.374606132507324, 3.210878372192383, 3.9236021041870113, 4.79036808013916, 5.509002208709717, 6.341042518615722, 7.135868072509766, 8.132548332214355, 9.01064395904541, 9.65226411819458, 10.557887554168701, 11.405014991760254, 12.382495403289795, 13.136789798736572, 14.110348224639893, 15.024368762969969, 15.87709426879883, 16.514391899108887, 17.251763343811035, 18.253695964813232, 19.114110469818115, 20.11986494064331, 20.9744930267334, 21.523876190185543, 22.66432046890259, 23.577570915222168, 24.190232753753662, 24.87872838973999, 26.12510919570923, 27.62578010559082, 28.67181062698364, 28.32266330718994, 29.303653240203857, 31.35446071624756, 32.73613452911377, 31.62386655807495, 32.811436653137214, 33.948259353637695, 35.29504299163819, 35.70797920227051, 36.50183439254761, 37.649619579315186, 38.77591848373413, 39.26527500152588, 40.43872594833374, 41.17545127868652, 42.14541196823121, 42.697439193725586, 43.35010051727295, 43.437981605529785, 45.01125574111939, 45.605230331420906, 46.17865800857543, 46.91054105758667, 48.045477867126465, 49.06139135360718, 49.68689203262329, 51.94620609283447, 52.8057861328125, 53.72889518737793] g_mediantime_kdl = [3.8357138633728023, 5.305647850036621, 7.698192596435548, 8.42416763305664, 10.901355743408203, 12.145826816558838, 13.322329521179201, 14.325551986694336, 17.49695301055908, 18.587660789489746, 20.375888347625732, 22.024636268615723, 22.582294940948483, 23.733305931091305, 24.401531219482422, 23.799991607666016, 26.30746364593506, 28.33914041519165, 28.336141109466553, 30.34139633178711, 30.964453220367435, 32.15212106704711, 34.600629806518555, 33.99909019470215, 35.706915855407715, 36.98373079299927, 37.346930503845215, 38.474719524383545, 39.72282886505127, 41.01938724517822, 42.141242027282715, 43.00230264663696, 48.83130550384521, 49.98097896575928, 50.92024803161621, 52.06825733184815, 53.52915287017822, 54.38394546508789, 55.24159669876098, 56.782608032226555, 59.30697441101075, 58.48400115966797, 60.49091577529908, 60.87111711502075, 62.77466297149658, 64.17330265045166, 64.39862489700316, 65.3045892715454, 65.96392393112183, 67.58105278015135, 68.10322046279907, 70.16756534576415, 70.81200122833252, 71.9815993309021, 73.02575826644896, 73.77649068832397, 75.3312611579895, 78.17374706268312, 78.52348089218138, 78.82068157196046] g_mediantime_u2c = [11.18612289428711, 12.346982955932617, 13.24009895324707, 13.983011245727539, 14.953851699829102, 15.485048294067383, 16.077041625976562, 16.869783401489258, 17.420053482055664, 18.084049224853516, 18.81694793701172, 19.488096237182617, 20.1570987701416, 20.964860916137695, 21.463871002197266, 22.57704734802246, 23.0710506439209, 24.495840072631836, 24.66106414794922, 25.84099769592285, 26.08013153076172, 27.240991592407227, 27.80294418334961, 28.54299545288086, 29.22201156616211, 29.72698211669922, 30.545949935913086, 31.442880630493164, 31.620025634765625, 32.8369140625, 33.22410583496094, 33.644914627075195, 34.81888771057129, 35.501956939697266, 36.06414794921875, 36.914825439453125, 37.652015686035156, 38.491010665893555, 38.89179229736328, 39.78395462036133, 39.94917869567871, 40.708065032958984, 41.62406921386719, 42.08707809448242, 42.79589653015137, 43.52903366088867, 44.10505294799805, 44.899940490722656, 45.185089111328125, 46.53620719909668, 46.64897918701172, 47.99818992614746, 47.95408248901367, 49.02291297912598, 49.076080322265625, 50.55403709411621, 50.6739616394043, 52.08301544189453, 51.86915397644043, 53.266048431396484] # # Inverse Dynamics data id_mediantime_pb = [2.716064453125, 4.394412040710449, 6.011486053466797, 7.660984992980957, 9.321451187133789, 10.712146759033203, 12.46500015258789, 14.139413833618164, 15.620946884155275, 17.244458198547363, 18.814563751220703, 20.50197124481201, 22.353053092956543, 23.831963539123535, 25.29597282409668, 27.240514755249023, 28.882503509521484, 30.49290180206299, 31.927585601806637, 33.678412437438965, 35.1715087890625, 37.46938705444336, 38.6735200881958, 40.16900062561035, 41.730642318725586, 43.32256317138672, 44.8685884475708, 47.110557556152344, 48.954010009765625, 51.19752883911133, 51.80704593658447, 53.558945655822754, 55.091023445129395, 56.85746669769287, 58.35151672363281, 59.750914573669434, 61.977505683898926, 63.22908401489258, 64.31436538696289, 66.25556945800781, 67.84951686859131, 69.72885131835938, 71.2820291519165, 73.5020637512207, 74.96249675750732, 76.97057723999023, 78.07600498199463, 80.12938499450684, 82.9010009765625, 83.9163064956665, 85.19697189331055, 86.49599552154541, 88.84108066558838, 90.39199352264404, 91.98546409606934, 92.95153617858887, 94.66993808746338, 96.55797481536865, 98.4489917755127, 99.70200061798096] id_mediantime_rbdl = [3.462553024291992, 5.229949951171875, 7.110953330993652, 8.92651081085205, 10.658502578735352, 12.4284029006958, 14.149069786071777, 16.25990867614746, 18.267512321472168, 20.40565013885498, 21.80945873260498, 23.618459701538086, 25.47454833984375, 27.588367462158203, 29.30295467376709, 31.32450580596924, 33.35404396057129, 35.01307964324951, 36.7358922958374, 38.78462314605713, 40.28940200805664, 42.88351535797119, 44.388413429260254, 45.758843421936035, 48.317909240722656, 50.14336109161377, 51.94294452667236, 54.701924324035645, 56.66804313659668, 58.359503746032715, 60.22942066192627, 62.2025728225708, 63.89200687408448, 65.32740592956543, 67.43156909942627, 69.04804706573486, 71.82538509368896, 73.0050802230835, 74.80347156524658, 76.67112350463867, 78.51755619049072, 80.08849620819092, 81.74693584442139, 84.50710773468018, 86.22097969055176, 88.9195203781128, 91.24112129211426, 93.97399425506592, 97.02098369598389, 97.47946262359619, 99.93398189544678, 100.66854953765869, 103.25801372528076, 104.8588752746582, 106.2229871749878, 108.0085039138794, 110.26036739349365, 112.0675802230835, 113.17145824432373, 114.68005180358887] # + #id_mediantime_u2c = [25.963544845581055, 28.63490581512451, 30.745029449462894, 32.66751766204834, 35.02500057220459, 36.80062294006348, 39.017438888549805, 40.37344455718994, 43.07401180267334, 44.404029846191406, 46.78499698638916, 48.265933990478516, 50.560951232910156, 54.018378257751465, 54.13353443145752, 55.191993713378906, 58.24851989746094, 59.35096740722656, 61.963558197021484, 63.26007843017578, 65.75798988342285, 66.79248809814453, 69.28050518035889, 70.1138973236084, 72.57997989654541, 73.81808757781982, 76.83050632476807, 77.506422996521, 79.49197292327881, 81.35604858398438, 82.89647102355957, 84.3355655670166, 86.9990587234497, 88.53960037231445, 90.80660343170166, 92.62394905090332, 93.87099742889404, 96.14193439483643, 97.72396087646484, 99.43461418151855, 100.72100162506104, 103.76596450805664, 104.26783561706543, 105.71408271789551, 106.01359367370605, 106.14699935913086, 107.916593552, 108.526919365, 110.712051392, 112.300515175, 115.656992912, 117.425918579, 122.579455376, 122.116668224, 122.965565109, 123.91607666, 123.323559761, 124.918937683, 128.096461296, 130.904555321] id_mediantime_u2c = [25.963544845581055, 28.63490581512451, 30.745029449462894, 32.66751766204834, 35.02500057220459, 36.80062294006348, 39.017438888549805, 40.37344455718994, 43.07401180267334, 44.404029846191406, 46.78499698638916, 48.265933990478516, 50.560951232910156, 54.018378257751465, 54.13353443145752, 55.191993713378906, 58.24851989746094, 59.35096740722656, 61.963558197021484, 63.26007843017578, 65.75798988342285, 66.79248809814453, 69.28050518035889, 70.1138973236084, 72.57997989654541, 73.81808757781982, 76.83050632476807, 77.506422996521, 79.49197292327881, 81.35604858398438, 82.89647102355957, 84.3355655670166, 86.9990587234497, 88.53960037231445, 90.80660343170166, 92.62394905090332, 93.87099742889404, 96.14193439483643, 97.72396087646484, 97.0984697341919, 99.40099716186523, 101.8819808959961, 105.04662990570068, 105.99148273468018, 107.30350017547607, 110.05055904388428, 111.13643646240234, 113.37780952453613, 115.81432819366455, 116.9663667678833, 119.4080114364624, 120.4071044921875, 122.72107601165773, 123.86465072631835, 125.98204612731935, 126.9575357437134, 129.64951992034912, 130.59663772583008, 132.85958766937256, 134.30416584014893] id_avgtime_u2c = [26.291744709014893, 28.748497962951664, 30.858638286590576, 32.759544849395745, 35.13521671295166, 36.8880009651184, 39.089438915252686, 40.830135345458984, 43.16704750061035, 44.51874017715454, 46.956140995025635, 48.37707757949829, 50.65943956375122, 53.82273674011231, 54.85280990600586, 55.2659010887146, 58.34175109863282, 59.4596791267395, 62.081618309020996, 63.31167221069337, 66.01354837417603, 66.9071912765503, 69.39935445785522, 70.2116584777832, 72.64317989349365, 73.9315414428711, 77.55669116973878, 77.62797355651855, 79.54386472702028, 81.43540382385254, 82.97004699707031, 84.39784288406373, 87.07303285598755, 89.12618160247803, 90.97753047943115, 92.84667015075682, 94.00177478790283, 96.26190423965454, 97.8384280204773, 99.53284740447998, 100.91297626495361, 103.80102634429932, 104.44962978363037, 105.84684848785399, 108.6009407043457, 110.42568683624268, 112.16062068939209, 115.34934043884277, 120.7785439491272, 117.74121761322021, 122.4449348449707, 121.50227308273314, 127.06228494644165, 126.55387878417969, 130.4536509513855, 126.70815944671631, 131.68154239654544, 131.79272174835205, 137.77571201324463, 135.4137110710144] id_mintime_u2c = [25.65598487854004, 28.400897979736328, 30.483007431030273, 32.444000244140625, 34.85298156738281, 36.589860916137695, 38.789987564086914, 40.17901420593262, 42.78397560119629, 44.0831184387207, 46.58913612365723, 48.06399345397949, 50.364017486572266, 51.431894302368164, 53.79009246826172, 55.0079345703125, 57.97982215881348, 59.16404724121094, 61.742067337036126, 63.0350112915039, 65.51122665405273, 66.62201881408691, 69.07892227172852, 69.90790367126465, 72.39413261413574, 73.60410690307617, 76.19404792785645, 77.2390365600586, 79.23293113708496, 81.09617233276367, 82.63111114501953, 84.14912223815918, 86.72595024108887, 88.10782432556152, 90.46196937561035, 92.32211112976074, 93.65510940551758, 95.87597846984863, 97.45597839355469, 99.20501708984375, 100.48913955688477, 103.50418090820312, 103.93214225769043, 105.47494888305664, 108.28304290771484, 109.90500450134277, 111.76800727844238, 113.99388313293457, 118.57008934020996, 117.02609062194824, 121.68288230895996, 120.49007415771484, 125.37789344787599, 123.48008155822754, 127.69317626953124, 126.32918357849123, 131.32214546203613, 130.706787109375, 135.47086715698242, 133.3320140838623] # - print len(id_mediantime_u2c) # # Forward Dynamics data # fd_mediantime_rbdl = [3.3309459686279297, 5.360603332519531, 7.278919219970703, 9.443521499633789, 10.923624038696289, 12.730956077575684, 14.852523803710938, 16.733527183532715, 19.748449325561523, 21.495580673217773, 23.22089672088623, 24.94049072265625, 26.54111385345459, 28.814077377319336, 30.373454093933105, 34.60049629211426, 36.70454025268555, 38.233399391174316, 39.50190544128418, 43.06745529174805, 43.43247413635254, 45.30441761016846, 48.589348793029785, 49.01242256164551, 53.09700965881348, 56.04696273803711, 59.83603000640869, 59.84354019165039, 62.66200542449952, 63.12739849090577, 65.85502624511719, 65.27304649353027, 68.59898567199707, 71.33102416992188, 71.69544696807861, 74.28395748138428, 76.62606239318848, 77.54397392272949, 80.5286169052124, 82.07094669342041, 87.89694309234619, 86.32588386535645, 88.24443817138672, 90.81900119781494, 91.76361560821533, 95.39604187011719, 99.31445121765137, 98.43003749847412, 101.27794742584229, 103.0435562133789, 105.50343990325928, 108.22558403015137, 110.76986789703369, 116.33944511413574, 114.38167095184326, 115.34810066223145, 120.19062042236328, 124.35853481292723, 122.61199951171876, 128.07011604309082] # + #uten gravity fd_mediantime_aba = [25.479912757873535, 27.719974517822266, 29.914140701293945, 32.25445747375488, 34.28757190704346, 36.71741485595703, 38.57243061065674, 41.708946228027344, 43.16890239715576, 45.942068099975586, 47.30796813964844, 50.406455993652344, 52.01137065887451, 54.500579833984375, 56.10239505767822, 58.4559440612793, 60.538530349731445, 62.65711784362793, 64.85247611999512, 67.19303131103516, 68.67086887359619, 70.95146179199219, 73.0900764465332, 75.0119686126709, 80.91557025909424, 78.33552360534668, 83.8710069656372, 82.275390625, 88.89651298522949, 86.54356002807617, 93.50800514221191, 89.96546268463135, 97.29409217834473, 94.41256523132324, 103.19292545318604, 98.79112243652344, 106.781005859375, 101.53055191040039, 111.16290092468262, 106.37903213500977, 116.23251438140869, 111.45544052124023, 120.74100971221924, 113.30389976501465, 123.98552894592285, 117.01250076293945, 127.00605392456055, 120.8735704421997, 130.5074691772461, 124.49049949645995, 133.87656211853027, 130.4309368133545, 136.80708408355713, 132.83050060272217, 139.89746570587158, 137.06040382385254, 143.3091163635254, 140.30420780181885, 144.57201957702637, 143.93043518066406] #med gravity fd_mediantime_aba = [25.32219886779785, 28.49602699279785, 30.61699867248535, 32.87196159362793, 35.49504280090332, 37.28294372558594, 39.38102722167969, 41.53609275817871, 43.59316825866699, 45.81093788146973, 47.966957092285156, 50.12392997741699, 52.3531436920166, 54.4888973236084, 56.52189254760742, 58.54392051696777, 62.92796134948731, 63.18283081054687, 65.2279853821, 65.8234357834, 67.0300722122, 70.6385374069, 72.8874206543, 73.6606121063, 76.0700702667, 78.9189338684082, 81.4218521118164, 82.82208442687988, 85.05487442016602, 86.82703971862793, 88.81187438964844, 90.25382995605469, 93.56307983398438, 95.33214569091797, 97.43404388427734, 99.0760326385498, 101.43494606018066, 102.42104530334473, 105.24415969848633, 106.97698593139648, 108.56103897094727, 111.41681671142578, 113.22307586669922, 115.21100997924805, 116.38402938842773, 116.369962692, 119.8740005493164, 121.870517731, 123.52395057678223, 122.743606567, 125.586032867, 126.476837158, 130.911111832, 134.246826171875, 135.8180046081543, 139.8448944091797, 141.16597175598145, 141.24798774719238, 143.5680389404297, 146.8369960784912] fd_mediantime_crba = [27.7111530304, 30.4946899414, 33.3769321442, 35.7730388641, 37.8789901733, 40.1902198792, 43.3161258698, 46.7920303345, 50.5328178406, 52.7658462524, 56.6718578339, 59.2319965363, 63.2162094116, 67.6121711731, 71.5041160583, 76.315164566, 80.8000564575, 86.7438316345, 92.2029018402, 103.83105278, 103.554010391, 109.008073807, 112.440109253, 121.117115021, 129.366874695, 141.524076462, 144.619941711, 154.141902924, 165.724039078, 172.572851181, 183.619976044, 188.264846802, 199.967861176, 215.013027191, 226.817131042, 238.371133804, 250.201940536, 263.705015182, 277.276992798, 293.082714081, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None] # + fd_mediantime_aba40 = [None]*40 fd_mediantime_crba40 = [None]*40 fd_mediantime_rbdl40 = [None]*40 for i in range(0, 40): fd_mediantime_aba40[i] = fd_mediantime_aba[i] fd_mediantime_crba40[i] = fd_mediantime_crba[i] fd_mediantime_rbdl40[i] = fd_mediantime_rbdl[i] # + c1 = ["#802B67", "#AA8039", "#2C4870", "#84A136"] c2 = [ "#2D0320", "#3C2704","#061327", "#2A3803"] c3 = [ "#571142", "#735117","#142B4C", "#556D15"] c4 = [ "#AA508F", "#E1B56A","#4D6A95", "#B7D564"] c5 = [ "#D380BA", "#FFDA9A","#7C98C2", "#DCF594"] # - # # Graphs # # ## Gravity # + import matplotlib.pyplot as plt color = ["#DA9815", "#8ECB14", "#AF116A", "#1C4092"] ndofs = 60 joint_nr = list(range(ndofs + 1)) joint_nr.pop(0) g_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, g_mediantime_kdl, c=c4[2], label='KDL') #plt.scatter(joint_nr, mediantime_u2c, c='hotpink', label='u2c') plt.scatter(joint_nr, g_mediantime_u2c, c=c4[0], label='u2c') plt.scatter(joint_nr, g_mediantime_rbdl, c=c4[3], label='RBDL') plt.scatter(joint_nr, g_mediantime_pb, c=c4[1], label='PyBullet') plt.xlabel("number of dof") plt.ylabel("time (us)") #plt.title("evaluation time G") #plt.xlim(-2, 2) plt.ylim(0, 85) plt.legend(loc='upper left') g_fig.show() plt.savefig("./plots/G60.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + #data timing_scores = [g_mediantime_pb, g_mediantime_rbdl, g_mediantime_u2c, g_mediantime_kdl] names = ["PyBullet", "RBDL", "u2c", "KDL"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) color1 = [c1[1], c1[3], c1[0], c1[2]] color5 = [c5[1], c5[3], c5[0], c5[2]] for patch, color, color2 in zip(bp['boxes'], color5, color5): patch.set_facecolor(color) patch.set_color(color2) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/G60box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + c_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, c_mediantime_kdl, c=c4[2], label='KDL') plt.scatter(joint_nr, c_mediantime_u2c, c=c4[0], label='u2c') plt.scatter(joint_nr, c_mediantime_rbdl, c=c4[3], label='RBDL') plt.scatter(joint_nr, c_mediantime_pb, c=c4[1], label='PyBullet') plt.xlabel("number of dof") plt.ylabel("time (us)") #plt.title("evaluation time C") #plt.xlim(-2, 2) plt.ylim(0, 120) plt.legend(loc='upper left') c_fig.show() plt.savefig("./plots/C60.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + #data timing_scores = [c_mediantime_pb, c_mediantime_rbdl, c_mediantime_u2c, c_mediantime_kdl] names = ["PyBullet", "RBDL", "u2c", "KDL"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) color1 = [c1[1], c1[3], c1[0], c1[2]] color5 = [c5[1], c5[3], c5[0], c5[2]] for patch, color, color2 in zip(bp['boxes'], color5, color5): patch.set_facecolor(color) patch.set_color(color2) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/C60box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + id_fig = plt.figure(figsize = (8, 8)) #plt.scatter(joint_nr, c_mediantime_kdl, c=color[3], label='KDL') #plt.scatter(joint_nr, mediantime_u2c, c='hotpink', label='u2c') plt.scatter(joint_nr, id_mediantime_u2c, c=c4[0], label='u2c') plt.scatter(joint_nr, id_mediantime_rbdl, c=c4[2], label='RBDL') plt.scatter(joint_nr, id_mediantime_pb, c=c4[1], label='PyBullet') plt.xlabel("number of dof") plt.ylabel("time (us)") #plt.title("evaluation time ID") #plt.xlim(-2, 2) plt.ylim(0, 140) plt.legend(loc='upper left') id_fig.show() plt.savefig("./plots/ID60.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + #data timing_scores = [id_mediantime_pb, id_mediantime_rbdl, id_mediantime_u2c] names = ["PyBullet", "RBDL", "u2c", "KDL"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) color1 = [c1[1], c1[3], c1[0], c1[2]] color5 = [c5[1], c5[3], c5[0], c5[2]] for patch, color, color2 in zip(bp['boxes'], color5, color5): patch.set_facecolor(color) patch.set_color(color2) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/ID60box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + id_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, m_mediantime_kdl, c=c4[2], label='KDL') plt.scatter(joint_nr, m_mediantime_u2c, c=c4[0], label='u2c') plt.scatter(joint_nr, m_mediantime_rbdl, c=c4[3], label='RBDL') plt.scatter(joint_nr, m_mediantime_pb, c=c4[1], label='PyBullet') plt.xlabel("number of joints") plt.ylabel("time (us)") plt.ylim(0, 140) plt.legend(loc='upper left') id_fig.show() plt.savefig("./plots/M60.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + #data timing_scores = [m_mediantime_pb, m_mediantime_rbdl, m_mediantime_u2c, m_mediantime_kdl] names = ["PyBullet", "RBDL", "u2c", "KDL"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) color1 = [c1[1], c1[3], c1[0], c1[2]] color5 = [c5[1], c5[3], c5[0], c5[2]] for patch, color, color2 in zip(bp['boxes'], color5, color5): patch.set_facecolor(color) patch.set_color(color2) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/M60box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # - # ## forward dynamics # + id_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, fd_mediantime_rbdl, c=c4[3], label='RBDL') plt.scatter(joint_nr, fd_mediantime_aba, c=c4[0], label='u2c (ABA)') plt.scatter(joint_nr, fd_mediantime_crba, c=c4[2], label='u2c (CRBA)') plt.xlabel("number of joints") plt.ylabel("time (us)") plt.ylim(0, 300) plt.legend(loc='upper left') id_fig.show() plt.savefig("./plots/FD60.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # + #data timing_scores = [fd_mediantime_rbdl40, fd_mediantime_aba40, fd_mediantime_crba40] names = ["RBDL", "u2c (ABA)", "u2c (CRBA)"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) color1 = [c1[3], c1[0], c1[2]] color5 = [c5[3], c5[0], c5[2]] for patch, color, color2 in zip(bp['boxes'], color5, color5): patch.set_facecolor(color) patch.set_color(color2) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/FD60box40.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # - # # Variable graphs c5 = ["#D58EAF", "#6B949E", "#FFF3AA"] c4 = ["#B15983", "#447784", "#D4C56A"] c3 = ["#8E2F5C", "#255C69", "#AA9A39"] c2 = ["#6A123C", "#0F434F", "#807015"] #c5 = ["#", "#", "#"] # ## u2c # + u2c_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, g_mediantime_u2c, c=c4[2], label='G') plt.scatter(joint_nr, c_mediantime_u2c, c=c4[1], label='C') plt.scatter(joint_nr, id_mediantime_u2c, c=c4[0], label='ID') #plt.scatter(joint_nr, m_mediantime_pb, c=color[0], label='pybullet') plt.xlabel("number of dof") plt.ylabel("time (us)") #plt.title("evaluation time u2c") #plt.xlim(-2, 2) plt.ylim(0, 140) plt.legend(loc='upper left') u2c_fig.show() plt.savefig("./plots/rnea_var_u2c.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) #data timing_scores = [g_mediantime_u2c, c_mediantime_u2c, id_mediantime_u2c] names = ["G", "C", "ID"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) #color1 = [c1[3], c1[0], c1[0]] color5 = [c5[2], c5[1], c5[0]] color2 = [c2[1], c2[2], c2[0]] color1 = [c1[1], c1[2], c1[0]] for patch, color in zip(bp['boxes'], color5): patch.set_facecolor(color) patch.set_color(color) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/rnea_var_u2c_box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # - # ## kdl # + kdl_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, g_mediantime_kdl, c=c4[2], label='G') plt.scatter(joint_nr, c_mediantime_kdl, c=c4[1], label='C') plt.xlabel("number of dof") plt.ylabel("time (us)") plt.ylim(0, 120) plt.legend(loc='upper left') u2c_fig.show() plt.savefig("./plots/rnea_var_kdl.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) #data timing_scores = [g_mediantime_kdl, c_mediantime_kdl] names = ["G", "C", "ID"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) #color1 = [c1[3], c1[0], c1[0]] color5 = [c5[2], c5[1], c5[0]] color2 = [c2[1], c2[2], c2[0]] color1 = [c1[1], c1[2], c1[0]] for patch, color in zip(bp['boxes'], color5): patch.set_facecolor(color) patch.set_color(color) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/rnea_var_kdl_box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # - # ## rbdl # + rbdl_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, g_mediantime_rbdl, c=c4[2], label='G') plt.scatter(joint_nr, c_mediantime_rbdl, c=c4[1], label='C') plt.scatter(joint_nr, id_mediantime_rbdl, c=c4[0], label='ID') #plt.scatter(joint_nr, m_mediantime_pb, c=color[0], label='pybullet') plt.xlabel("number of dof") plt.ylabel("time (us)") #plt.title("evaluation time rbdl") #plt.xlim(-2, 2) plt.ylim(0, 120) plt.legend(loc='upper left') rbdl_fig.show() plt.savefig("./plots/rnea_var_rbdl.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) #data timing_scores = [g_mediantime_rbdl, c_mediantime_rbdl, id_mediantime_rbdl] names = ["G", "C", "ID"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) #color1 = [c1[3], c1[0], c1[0]] color5 = [c5[2], c5[1], c5[0]] color2 = [c2[1], c2[2], c2[0]] color1 = [c1[1], c1[2], c1[0]] for patch, color in zip(bp['boxes'], color5): patch.set_facecolor(color) patch.set_color(color) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/rnea_var_rbdl_box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # - # ## pybullet # + pb_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, g_mediantime_pb, c=c4[2], label='G') plt.scatter(joint_nr, c_mediantime_pb, c=c4[1], label='C') plt.scatter(joint_nr, id_mediantime_pb, c=c4[0], label='ID') #plt.scatter(joint_nr, m_mediantime_pb, c=color[0], label='pybullet') plt.xlabel("number of dof") plt.ylabel("time (us)") #plt.title("evaluation time pb") #plt.xlim(-2, 2) plt.ylim(0, 110) plt.legend(loc='upper left') pb_fig.show() plt.savefig("./plots/rnea_var_pb.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) #data timing_scores = [g_mediantime_pb, c_mediantime_pb, id_mediantime_pb] names = ["G", "C", "ID"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) #color1 = [c1[3], c1[0], c1[0]] color5 = [c5[2], c5[1], c5[0]] color2 = [c2[1], c2[2], c2[0]] color1 = [c1[1], c1[2], c1[0]] for patch, color in zip(bp['boxes'], color5): patch.set_facecolor(color) patch.set_color(color) for patch, color in zip(bp['medians'], color1): patch.set_color(color) plt.savefig("./plots/rnea_var_pb_box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # - # # FD vs ID (#operations) # + pb_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, fd_mediantime_aba, c=c6[2], label='FD(ABA)') plt.scatter(joint_nr, id_mediantime_u2c, c=c6[1], label='ID') plt.xlabel("number of dof") plt.ylabel("time (us)") plt.ylim(0, 150) plt.legend(loc='upper left') pb_fig.show() plt.savefig("./plots/rnea_aba_u2c.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) #data timing_scores = [fd_mediantime_aba, id_mediantime_u2c] names = ["FD (ABA)", "ID"] #figure instance boxplot = plt.figure(figsize = (8, 8)) plt.ylabel("time (us)") #create ax instance ax = boxplot.add_subplot(111) #create box plot bp = ax.boxplot(timing_scores, patch_artist = True) ax.set_xticklabels(names) #color1 = [c1[3], c1[0], c1[0]] cdark = ['#350037', '#551700'] clight = ['#A46FA6', '#FFC1AA'] color5 = [c6[2], c6[1]] color2 = [c2[1], c2[2]] color1 = [c1[1], c1[2]] for patch, color in zip(bp['boxes'], color5): patch.set_facecolor(color) patch.set_color(color) for patch, color in zip(bp['medians'], c): patch.set_color(color) plt.savefig("./plots/rnea_aba_u2c_box.pdf", dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="pdf", transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None) # - # # All dynamics 1 library # # ## u2c # + #colors dynamics c6 = ["#88468A", "#D4876A", "#555794", '#BDCE67', "#499371", '#D4B96A'] c5 = ["#A46FA6", "#D4876A", "#7F81B2", '#BDCE67', "#499371", '#D4B96A'] # + u2c_fig = plt.figure(figsize = (8, 8)) plt.scatter(joint_nr, g_mediantime_u2c, color = c6[0], label='G') plt.scatter(joint_nr, c_mediantime_u2c, color = c6[1], label='C') plt.scatter(joint_nr, id_mediantime_u2c, color = c6[2], label='ID') plt.scatter(joint_nr, m_mediantime_u2c, color = c6[3], label='M') plt.scatter(joint_nr, fd_mediantime_aba, color = c6[4], label='FD') plt.scatter(joint_nr, fd_mediantime_crba, color = c6[5], label='FD') #plt.scatter(joint_nr, m_mediantime_pb, c=color[0], label='pybullet') plt.xlabel("number of dof") plt.ylabel("time (us)") #plt.title("evaluation time u2c") #plt.xlim(-2, 2) plt.ylim(0, 145) plt.legend(loc='upper left') u2c_fig.show() timing_scores = [g_mediantime_u2c, c_mediantime_rbdl, id_mediantime_u2c, m_mediantime_u2c] names = ["G", "C", "ID", "M"]#, "RBDL", "pybullet"] boxplot = plt.figure(figsize = (8, 8)) boxplot.suptitle("Library Timing Comparison") ax = boxplot.add_subplot(111) ax.boxplot(timing_scores) #plt.boxplot(timing_scores) ax.set_xticklabels(names)
examples/timing/60dof/timing_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 모듈 불러오기 # + import tensorflow as tf import numpy as np from konlpy.tag import Twitter import pandas as pd import tensorflow as tf import enum import os import re import json from sklearn.model_selection import train_test_split from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint import matplotlib.pyplot as plt from preprocess import * # - # # 시각화 함수 def plot_graphs(history, string): plt.plot(history.history[string]) plt.plot(history.history['val_'+string], '') plt.xlabel("Epochs") plt.ylabel(string) plt.legend([string, 'val_'+string]) plt.show() # # 학습 데이터 경로 정의 DATA_IN_PATH = './data_in/' DATA_OUT_PATH = './data_out/' TRAIN_INPUTS = 'train_inputs.npy' TRAIN_OUTPUTS = 'train_outputs.npy' TRAIN_TARGETS = 'train_targets.npy' DATA_CONFIGS = 'data_configs.json' # # 랜덤 시드 고정 SEED_NUM = 1234 tf.random.set_seed(SEED_NUM) # # 파일 로드 index_inputs = np.load(open(DATA_IN_PATH + TRAIN_INPUTS, 'rb')) index_outputs = np.load(open(DATA_IN_PATH + TRAIN_OUTPUTS , 'rb')) index_targets = np.load(open(DATA_IN_PATH + TRAIN_TARGETS , 'rb')) prepro_configs = json.load(open(DATA_IN_PATH + DATA_CONFIGS, 'r')) # # 모델 하이퍼파라메터 정의 # + char2idx = prepro_configs['char2idx'] end_index = prepro_configs['end_symbol'] model_name = 'transformer' vocab_size = prepro_configs['vocab_size'] BATCH_SIZE = 64 MAX_SEQUENCE = 25 EPOCHS = 20 VALID_SPLIT = 0.1 kargs = {'model_name': model_name, 'num_layers': 2, 'd_model': 512, 'num_heads': 8, 'dff': 2048, 'input_vocab_size': vocab_size, 'target_vocab_size': vocab_size, 'maximum_position_encoding': MAX_SEQUENCE, 'end_token_idx': char2idx[end_index], 'rate': 0.1 } # - # # 모델 선언 및 컴파일 # ## 패딩 및 포워드 마스킹 def create_padding_mask(seq): seq = tf.cast(tf.math.equal(seq, 0), tf.float32) # add extra dimensions to add the padding # to the attention logits. return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len) def create_look_ahead_mask(size): mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0) return mask # (seq_len, seq_len) def create_masks(inp, tar): # Encoder padding mask enc_padding_mask = create_padding_mask(inp) # Used in the 2nd attention block in the decoder. # This padding mask is used to mask the encoder outputs. dec_padding_mask = create_padding_mask(inp) # Used in the 1st attention block in the decoder. # It is used to pad and mask future tokens in the input received by # the decoder. look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask enc_padding_mask, look_ahead_mask, dec_padding_mask = create_masks(index_inputs, index_outputs) # ## 포지셔널 인코딩 def get_angles(pos, i, d_model): angle_rates = 1 / np.power(10000, (2 * i//2) / np.float32(d_model)) return pos * angle_rates def positional_encoding(position, d_model): angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model) # apply sin to even indices in the array; 2i angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) # apply cos to odd indices in the array; 2i+1 angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) pos_encoding = angle_rads[np.newaxis, ...] return tf.cast(pos_encoding, dtype=tf.float32) # + pos_encoding = positional_encoding(50, 512) print (pos_encoding.shape) plt.pcolormesh(pos_encoding[0], cmap='RdBu') plt.xlabel('Depth') plt.xlim((0, 512)) plt.ylabel('Position') plt.colorbar() plt.show() # - # ## 어텐션 def scaled_dot_product_attention(q, k, v, mask): """Calculate the attention weights. q, k, v must have matching leading dimensions. k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v. The mask has different shapes depending on its type(padding or look ahead) but it must be broadcastable for addition. Args: q: query shape == (..., seq_len_q, depth) k: key shape == (..., seq_len_k, depth) v: value shape == (..., seq_len_v, depth_v) mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k). Defaults to None. Returns: output, attention_weights """ matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask to the scaled tensor. if mask is not None: scaled_attention_logits += (mask * -1e9) # softmax is normalized on the last axis (seq_len_k) so that the scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output, attention_weights # ## 멀티헤드 어텐션 class MultiHeadAttention(tf.keras.layers.Layer): def __init__(self, **kargs): super(MultiHeadAttention, self).__init__() self.num_heads = kargs['num_heads'] self.d_model = kargs['d_model'] assert self.d_model % self.num_heads == 0 self.depth = self.d_model // self.num_heads self.wq = tf.keras.layers.Dense(kargs['d_model']) self.wk = tf.keras.layers.Dense(kargs['d_model']) self.wv = tf.keras.layers.Dense(kargs['d_model']) self.dense = tf.keras.layers.Dense(kargs['d_model']) def split_heads(self, x, batch_size): """Split the last dimension into (num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) """ x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask): batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights # ## 포인트 와이즈 피드포워드 네트워크 def point_wise_feed_forward_network(**kargs): return tf.keras.Sequential([ tf.keras.layers.Dense(kargs['dff'], activation='relu'), # (batch_size, seq_len, dff) tf.keras.layers.Dense(kargs['d_model']) # (batch_size, seq_len, d_model) ]) # ## 인코더 레이어 class EncoderLayer(tf.keras.layers.Layer): def __init__(self, **kargs): super(EncoderLayer, self).__init__() self.mha = MultiHeadAttention(**kargs) self.ffn = point_wise_feed_forward_network(**kargs) self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.dropout1 = tf.keras.layers.Dropout(kargs['rate']) self.dropout2 = tf.keras.layers.Dropout(kargs['rate']) def call(self, x, mask): attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model) attn_output = self.dropout1(attn_output) out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model) ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model) ffn_output = self.dropout2(ffn_output) out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model) return out2 # ## 디코더 레이어 class DecoderLayer(tf.keras.layers.Layer): def __init__(self, **kargs): super(DecoderLayer, self).__init__() self.mha1 = MultiHeadAttention(**kargs) self.mha2 = MultiHeadAttention(**kargs) self.ffn = point_wise_feed_forward_network(**kargs) self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.dropout1 = tf.keras.layers.Dropout(kargs['rate']) self.dropout2 = tf.keras.layers.Dropout(kargs['rate']) self.dropout3 = tf.keras.layers.Dropout(kargs['rate']) def call(self, x, enc_output, look_ahead_mask, padding_mask): # enc_output.shape == (batch_size, input_seq_len, d_model) attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model) attn1 = self.dropout1(attn1) out1 = self.layernorm1(attn1 + x) attn2, attn_weights_block2 = self.mha2( enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model) attn2 = self.dropout2(attn2) out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model) ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model) ffn_output = self.dropout3(ffn_output) out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model) return out3, attn_weights_block1, attn_weights_block2 # ## 인코더 class Encoder(tf.keras.layers.Layer): def __init__(self, **kargs): super(Encoder, self).__init__() self.d_model = kargs['d_model'] self.num_layers = kargs['num_layers'] self.embedding = tf.keras.layers.Embedding(kargs['input_vocab_size'], self.d_model) self.pos_encoding = positional_encoding(kargs['maximum_position_encoding'], self.d_model) self.enc_layers = [EncoderLayer(**kargs) for _ in range(self.num_layers)] self.dropout = tf.keras.layers.Dropout(kargs['rate']) def call(self, x, mask): seq_len = tf.shape(x)[1] # adding embedding and position encoding. x = self.embedding(x) # (batch_size, input_seq_len, d_model) x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32)) x += self.pos_encoding[:, :seq_len, :] x = self.dropout(x) for i in range(self.num_layers): x = self.enc_layers[i](x, mask) return x # (batch_size, input_seq_len, d_model) # ## 디코더 class Decoder(tf.keras.layers.Layer): def __init__(self, **kargs): super(Decoder, self).__init__() self.d_model = kargs['d_model'] self.num_layers = kargs['num_layers'] self.embedding = tf.keras.layers.Embedding(kargs['target_vocab_size'], self.d_model) self.pos_encoding = positional_encoding(kargs['maximum_position_encoding'], self.d_model) self.dec_layers = [DecoderLayer(**kargs) for _ in range(self.num_layers)] self.dropout = tf.keras.layers.Dropout(kargs['rate']) def call(self, x, enc_output, look_ahead_mask, padding_mask): seq_len = tf.shape(x)[1] attention_weights = {} x = self.embedding(x) # (batch_size, target_seq_len, d_model) x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32)) x += self.pos_encoding[:, :seq_len, :] x = self.dropout(x) for i in range(self.num_layers): x, block1, block2 = self.dec_layers[i](x, enc_output, look_ahead_mask, padding_mask) attention_weights['decoder_layer{}_block1'.format(i+1)] = block1 attention_weights['decoder_layer{}_block2'.format(i+1)] = block2 # x.shape == (batch_size, target_seq_len, d_model) return x, attention_weights # ## 트렌스포머 모델 class Transformer(tf.keras.Model): def __init__(self, **kargs): super(Transformer, self).__init__(name=kargs['model_name']) self.end_token_idx = kargs['end_token_idx'] self.encoder = Encoder(**kargs) self.decoder = Decoder(**kargs) self.final_layer = tf.keras.layers.Dense(kargs['target_vocab_size']) def call(self, x): inp, tar = x enc_padding_mask, look_ahead_mask, dec_padding_mask = create_masks(inp, tar) enc_output = self.encoder(inp, enc_padding_mask) # (batch_size, inp_seq_len, d_model) # dec_output.shape == (batch_size, tar_seq_len, d_model) dec_output, _ = self.decoder( tar, enc_output, look_ahead_mask, dec_padding_mask) final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size) return final_output def inference(self, x): inp = x tar = tf.expand_dims([STD_INDEX], 0) enc_padding_mask, look_ahead_mask, dec_padding_mask = create_masks(inp, tar) enc_output = self.encoder(inp, enc_padding_mask) predict_tokens = list() for t in range(0, MAX_SEQUENCE): dec_output, _ = self.decoder(tar, enc_output, look_ahead_mask, dec_padding_mask) final_output = self.final_layer(dec_output) outputs = tf.argmax(final_output, -1).numpy() pred_token = outputs[0][-1] if pred_token == self.end_token_idx: break predict_tokens.append(pred_token) tar = tf.expand_dims([STD_INDEX] + predict_tokens, 0) _, look_ahead_mask, dec_padding_mask = create_masks(inp, tar) return predict_tokens # ## 모델 로스 정의 # + loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy') def loss(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) def accuracy(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) mask = tf.expand_dims(tf.cast(mask, dtype=pred.dtype), axis=-1) pred *= mask acc = train_accuracy(real, pred) return tf.reduce_mean(acc) # - model = Transformer(**kargs) model.compile(optimizer=tf.keras.optimizers.Adam(1e-4), loss=loss, metrics=[accuracy]) # # Callback 선언 # + # overfitting을 막기 위한 ealrystop 추가 earlystop_callback = EarlyStopping(monitor='val_accuracy', min_delta=0.0001, patience=10) # min_delta: the threshold that triggers the termination (acc should at least improve 0.0001) # patience: no improvment epochs (patience = 1, 1번 이상 상승이 없으면 종료) checkpoint_path = DATA_OUT_PATH + model_name + '/weights.h5' checkpoint_dir = os.path.dirname(checkpoint_path) # Create path if exists if os.path.exists(checkpoint_dir): print("{} -- Folder already exists \n".format(checkpoint_dir)) else: os.makedirs(checkpoint_dir, exist_ok=True) print("{} -- Folder create complete \n".format(checkpoint_dir)) cp_callback = ModelCheckpoint( checkpoint_path, monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True) # - # # 모델 학습 history = model.fit([index_inputs, index_outputs], index_targets, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_split=VALID_SPLIT, callbacks=[earlystop_callback, cp_callback]) # # 결과 플롯 plot_graphs(history, 'accuracy') plot_graphs(history, 'loss') # # 베스트 모델 불러오기 # + DATA_OUT_PATH = './data_out/' SAVE_FILE_NM = 'weights.h5' model.load_weights(os.path.join(DATA_OUT_PATH, model_name, SAVE_FILE_NM)) # - # # 모델 결과 출력하기 char2idx = prepro_configs['char2idx'] idx2char = prepro_configs['idx2char'] # + text = "남자친구 승진 선물로 뭐가 좋을까?" test_index_inputs, _ = enc_processing([text], char2idx) outputs = model.inference(test_index_inputs) print(' '.join([idx2char[str(o)] for o in outputs]))
6.CHATBOT/6.5.transformer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Crawling #2 : API # + # 만약에 어떠한 이유(?)로 인증이 필요없이 정보를 가져올 수 있는 API를 발견하실 수 있으면, # 가장 쉽게 외부 플랫폼으로 부터 정보를 가져오실 수 있습니다. # => "하지만, 원래 크롤링은 하시면 안됩니다." # + import json import pandas as pd import requests # + # 인증 과정이 별도로 필요없는 ( 실제 서비스 개발 시에는 이렇게 개발하시면 안됩니다. ) # 직방 API v1 을 통해서, 직방의 매물 정보 데이터 ( 위치, 가격, 연락처 등 ) 를 크롤링(X), 그냥 가져옵(O)니다. response = requests.get("https://api.zigbang.com/v1/items?detail=true&item_ids=3430448&item_ids=3367854&item_ids=3288446&item_ids=3467204&item_ids=3150497&item_ids=3440906&item_ids=3376834&item_ids=3139708&item_ids=3331511&item_ids=3373198&item_ids=3236734&item_ids=3376434&item_ids=3322860&item_ids=3303061&item_ids=3287167&item_ids=3262172&item_ids=3228631&item_ids=3505011&item_ids=3249401&item_ids=3330951&item_ids=3494055&item_ids=3317227&item_ids=3405679&item_ids=3103384&item_ids=3418616&item_ids=3240028&item_ids=3456814&item_ids=3416910&item_ids=3429455&item_ids=3181675&item_ids=3309372&item_ids=3501566&item_ids=3240513&item_ids=3346510&item_ids=3406670&item_ids=3412596&item_ids=3214438&item_ids=3459715&item_ids=3493254&item_ids=3475114&item_ids=3501607&item_ids=3484623&item_ids=3190410&item_ids=3350050&item_ids=3495453&item_ids=3446619&item_ids=3451078&item_ids=3248718&item_ids=3345445&item_ids=3332380&item_ids=3450803&item_ids=3471049&item_ids=3438747&item_ids=3330749&item_ids=3446920&item_ids=3151682&item_ids=3501554&item_ids=3258532&item_ids=3435418&item_ids=3501551") # - zigbang_result = json.loads(response.text) zigbang_result.get("count_agent") # 현재 몇 개의 매물 정보를 가지고 있는가? # 이런 key ( "count_agent" ) 들을 미리 알고 있는게 아니라 출력해서 확인해본 것 입니다. room_df = pd.DataFrame(columns=[ "직방 매물번호", "연락처", "이메일", "부동산", "보증금", "월세", ]) for room_information in zigbang_result.get("items"): zigbang_id = room_information.get("item").get("id") phonenumber = room_information.get("item").get("original_user_phone") email = room_information.get("item").get("agent_address1") agent_name = room_information.get("item").get("agent_name") deposit = room_information.get("item").get("deposit") rent = room_information.get("item").get("rent") room_df.loc[len(room_df)] = [zigbang_id, phonenumber, email, agent_name, deposit, rent] room_df room_df.to_csv("zigbang_room.csv", index=False)
crawling/crawling-with-api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # File Authentication System # - GOAL: build a file authentication system that lets browsers authenticate and play video chunks without having to wait for the entire file. from hashlib import sha256 import os def read_block_reverse(filepath, buffersize): with open(filepath, 'rb') as f: f.seek(0, os.SEEK_END) filesize = f.tell() firstchunk = filesize % buffersize if firstchunk != 0: f.seek(filesize - firstchunk) yield f.read(firstchunk) f.seek(-firstchunk-buffersize, os.SEEK_END) move = -2*buffersize while True: yield f.read(buffersize) if f.tell() <= buffersize: break f.seek(move, 1) def compute_first_hash(filepath, buffersize=1024): gen = read_block_reverse(filepath, buffersize) h = bytes() for i in gen: h = sha256(i + h).digest() return h # + def write_all_hashes(filepath, hashfilepath=None, buffersize=1024): if hashfilepath is None: hashfilepath = filepath + "hash" f = open(hashfilepath,"wb") gen = read_block_reverse(filepath, buffersize) h = bytes() for i in gen: h = sha256(i + h).digest() f.write(h) f.close() def read_block_hash(filepath, hashfilepath=None, buffersize=1024, hashsize=32): if hashfilepath is None: hashfilepath = filepath + "hash" with open(hashfilepath, "rb") as hashfile: hashfile.seek(0, os.SEEK_END) hashfile.seek(-hashsize, os.SEEK_END) yield hashfile.read(hashsize) move = -2 * hashsize with open(filepath, 'rb') as file: while True: hashfile.seek(move, 1) yield file.read(buffersize) + hashfile.read(hashsize) if hashfile.tell() == hashsize: yield file.read(buffersize) break # + answer1 = '5b96aece304a1422224f9a41b228416028f9ba26b0d1058f400200f06a589949' answer2 = "03c08f4ee0b576fe319338139c045c89c3e8e9409633bea29442e21425006ea8" filepath1 = './data/6.1.intro.mp4_download' filepath2 = './data/6.2.birthday.mp4_download' h1 = compute_first_hash(filepath1) assert h1.hex() == answer1 h2 = compute_first_hash(filepath2) assert h2.hex() == answer2 # - def is_authentic(filepath, hashsize=32): gen = read_block_hash(filepath) h = next(gen) for chunk in gen: if sha256(chunk).digest() != h: return False h = chunk[-hashsize:] return True write_all_hashes(filepath1) write_all_hashes(filepath2) assert is_authentic(filepath1) assert is_authentic(filepath2)
simple-exercises/basic-cryptography/3-integrity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # 2. Crear vocabulario # # En un principio partiremos de las características de HoG para crear nuestro vocabulario, aunque se podría hacer con cualquier otras. # # Importamos las características: # + import pickle path = '../../rsc/obj/' X_train_path = path + 'X_train.sav' train_features = pickle.load(open(X_train_path, 'rb')) # import pickle # Módulo para serializar # import numpy as np # path = '..//..//rsc//obj//BoW_features//' # for i in (15000,30000,45000,53688): # daisy_features_path = path + 'BoW_features'+ str(i) +'.sav' # if i == 15000: # train_features = pickle.load(open(daisy_features_path, 'rb')) # set_to_add = pickle.load(open(daisy_features_path, 'rb')) # train_features = np.vstack((train_features,set_to_add)) # - # # B. Construcción del vocabulario mediante un algoritmo de clustering # # La razón por la que utilizamos un algoritmo de clustering es para la agrupación de dichas palabras en un determinado número de grupos. De manera que estos grupos de palabras resulten en patrones visuales que aporten mayor información al clasificador y, por lo tanto, nos permitan llevar a cabo una clasificación más eficiente. # # Vamos a proceder a entrenar dos variantes de algoritmos de clustering : # - KMeans # - MiniBatchKMeans # # # + # %%time from sklearn.cluster import MiniBatchKMeans as MiniKMeans import warnings warnings.filterwarnings("ignore") # Se inicializa el algoritmo de Kmeans indicando el número de clusters mini_kmeans = MiniKMeans(500) #se construye el clusterr con todas las características del conjunto de entramiento mini_kmeans.fit(train_features) # - # ## Serializamos Kmeans # + import pickle # Módulo para serializar path = '../../rsc/obj/' mini_kmeans_path = path + 'mini_kmeans.sav' pickle.dump(mini_kmeans, open(mini_kmeans_path, 'wb'))
code/notebooks/Prototypes/BoW/2B_Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.1 64-bit (''.venv'': poetry)' # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from jax import random from numpyro import diagnostics, infer from sklearn import datasets from sklearn import linear_model from myapp import spike_and_slab np.random.seed(12345) # - # # Data # + x, y = datasets.load_diabetes(return_X_y=True) y = y[:, None] x.shape, y.shape # - # # Full scratch hyperparams = spike_and_slab.SpikeAndSlabHyperParams( a_w=1, b_w=1, nu_psi=5, q_psi=4, r=0.001 ) posterior_samples = spike_and_slab.gibbs_sampling(x, y, hyperparams) # + plt.figure(figsize=(12, 8)) plt.subplot(321) plt.plot(posterior_samples.mu) plt.title("mu") plt.subplot(322) plt.plot(posterior_samples.alpha) plt.title("alpha") plt.subplot(323) plt.plot(posterior_samples.sigma_2) plt.title("sigma_2") plt.subplot(324) plt.plot(posterior_samples.delta) plt.title("delta") plt.subplot(325) plt.plot(posterior_samples.psi) plt.title("psi") plt.subplot(326) plt.plot(posterior_samples.w) plt.title("w") plt.tight_layout() plt.show() # - # # Prediction y_pred = spike_and_slab.predict(x, posterior_samples) plt.scatter(y.ravel(), np.median(y_pred, 0).ravel()) plt.plot([20, 350], [20, 350], "--r") plt.xlabel("True") plt.ylabel("Prediction") plt.show()
notes/spike_and_slab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py36 # language: python # name: py36 # --- # + # Given a string S, check whether it contains any special character or not. Print 'YES' if it does else 'NO'. # Input Format: # The first line contains the string S # Output Format: # Print 'YES' or 'NO' # Example: # Input: # Hi$my*name # Output: # YES # - ip=input() l=['0','1','2','3','4','5','6','7','8','9',' '] for i in range(65,90): l.append(chr(i)) for i in range(97,122): l.append(chr(i)) for i in ip: if i not in l: print("YES",end='') break else: print("NO",end='')
Week_10_Programming_Assignments/1_Special Character.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={} # ## JSSP Genetic Algorithm Example # # This notebook demonstrates how to use the genetic algorithm. # + pycharm={} from JSSP import CSVData # initialize data data_directory = '../data/given_data' instance_data = CSVData(data_directory + '/sequenceDependencyMatrix.csv', data_directory + '/machineRunSpeed.csv', data_directory + '/jobTasks.csv') # uncomment this to print the data that was read in # print(instance_data) # + from JSSP.genetic_algorithm import GASelectionEnum output_dir = './example_output' # optional output_results = True auto_open = True benchmark = True verbose = True progress_bar = True # GA parameters runtime = 30 # in seconds iterations = 100 population_size = 500 selection_method = GASelectionEnum.FITNESS_PROPORTIONATE selection_size = 5 mutation_probability = 0.1 # + pycharm={} from JSSP.solver import Solver # create solver solver = Solver(instance_data) # run genetic algorithm solution = solver.genetic_algorithm_time(runtime=runtime, population_size=population_size, selection_method_enum=selection_method, selection_size=selection_size, mutation_probability=mutation_probability, benchmark=benchmark, verbose=verbose, progress_bar=progress_bar ) print('\nSolution') print(solution) if output_results: if benchmark: solver.output_benchmark_results(output_dir, name='example_benchmark', auto_open=auto_open) else: solution.create_schedule(output_dir) # - # #### You can also run the genetic algorithm for a certain number of iteratins instead of time # + pycharm={"name": "#%%\n"} from JSSP.solver import Solver # create solver solver = Solver(instance_data) # run genetic algorithm solution = solver.genetic_algorithm_iter(iterations=iterations, population_size=population_size, selection_method_enum=selection_method, selection_size=selection_size, mutation_probability=mutation_probability, benchmark=benchmark, verbose=verbose ) print('\nSolution') print(solution) if output_results: if benchmark: solver.output_benchmark_results(output_dir, name='example_benchmark', auto_open=auto_open) else: solution.create_schedule_xlsx_file(output_dir)
examples/GA_Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sv_utils import torch import numpy as np import pylab as plt from matplotlib import pyplot from matplotlib.pyplot import figure from os.path import dirname, realpath from singleNeuron import preSpikes, nextSpikes, STDPLIFDensePopulation # + alpha=float(np.exp(-1e-3/10e-3)) beta=float(np.exp(-1e-3/2e-5)) print(alpha) print(beta) # - # ## Sequence Visualizer # El proposito de esta notebook es el poder graficar tanto la entrada como salida de la red DSNN que reconoce patrones en una secuencia de numeros aleatorios. Sin_tmp = np.load('./sequences/all_nums/all_nums.npy') print(Sin_tmp.shape) Sin = np.reshape(Sin_tmp, (Sin_tmp.shape[0]*Sin_tmp.shape[1], Sin_tmp.shape[2])) Sin = np.transpose(Sin) print(Sin.shape) # ### Tren de spikes de entrada plt.rcParams["figure.figsize"] =(15,6) sv_utils.plotLIF(U=None, S=Sin[:200]) print("Spikes: " + str(np.count_nonzero(Sin))) # ### Tren de spikes de salida (Sin delay) Sin_out_s = torch.load('../cleanDSNN/results/layer_5_conv.pt') sv_utils.plotLIF(U=None, S=Sin_out_s[:20000].data.numpy()) print(Sin_out_s.shape) print("Spikes: " + str(np.count_nonzero(Sin_out_s))) # ### Tren de spikes de salida (Con delay) Sin_out_d = torch.load('../cleanDSNN/results/delayed_layer_5_conv.pt') sv_utils.plotLIF(U=None, S=Sin_out_d[:200].data.numpy()) print(Sin_out_d.shape) print("Spikes: " + str(np.count_nonzero(Sin_out_d))) # ### Concatenamos ambas salidas Sin_out = torch.cat((Sin_out_s, Sin_out_d), 1) sv_utils.plotLIF(U=None, S=Sin_out[:2000].data.numpy()) print(Sin_out.shape) print("Spikes: " + str(np.count_nonzero(Sin_out))) # ## Entrenamos a la neurona de salida # + pat_times = np.load('./sequences/all_nums/pat_all_nums.npy') Sin_tmp = Sin_out_s.clone().detach() T = Sin_tmp.shape[0] N_out = 1 N_in = Sin_tmp.shape[1] th = 3.5 singleNeuron = STDPLIFDensePopulation(in_channels=N_in, out_channels=N_out, weight=0.7, alpha=float(np.exp(-1e-3/10e-3)), beta=float(np.exp(-1e-3/2e-5)), delay=0, th=th, a_plus=.03125, a_minus=.055625, w_max=1.) Sin = Sin_tmp.clone().detach() # Pre-procesamos PSpikes y NSpikes dt_ltp = 20 # Cantidad de timesteps que miro hacia atras dt_ltd = 60 # Cantidad de timesteps que miro hacia delante PSpikes = preSpikes(T, dt_ltp, torch.zeros(T, N_in), Sin) NSpikes = nextSpikes(T, dt_ltd, torch.zeros(T, N_in), Sin) # Realizamos el entrenamiento STDP Uprobe = np.empty([T, N_out]) Iprobe = np.empty([T, N_out]) Sprobe = np.empty([T, N_out]) for n in range(T): state = singleNeuron.forward(Sin[n].unsqueeze(0), PSpikes[n], NSpikes[n-1]) Uprobe[n] = state.U.data.numpy() Iprobe[n] = state.I.data.numpy() Sprobe[n] = state.S.data.numpy() sv_utils.plotLIF(U=Uprobe[0:5000], S=Sprobe[0:5000], pat_times=pat_times[0:5000], th=th) sv_utils.plotLIF(U=Uprobe[5000:10000], S=Sprobe[5000:10000], pat_times=pat_times[5000:10000], th=th) sv_utils.plotLIF(U=Uprobe[10000:15000], S=Sprobe[10000:15000], pat_times=pat_times[10000:15000], th=th) sv_utils.plotLIF(U=Uprobe[15000:20000], S=Sprobe[15000:20000], pat_times=pat_times[15000:20000], th=th) sv_utils.plotLIF(U=Uprobe[20000:25000], S=Sprobe[20000:25000], pat_times=pat_times[20000:25000], th=th) sv_utils.plotLIF(U=Uprobe[25000:30000], S=Sprobe[25000:30000], pat_times=pat_times[25000:30000], th=th) sv_utils.plotLIF(U=Uprobe[30000:35000], S=Sprobe[30000:35000], pat_times=pat_times[30000:35000], th=th) sv_utils.plotLIF(U=Uprobe[35000:37500], S=Sprobe[35000:37500], pat_times=pat_times[35000:37500], th=th) # -
cleanDSNN/results/old_results/all_nums/with_decay/SequenceVisualizer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Inference from pyannote.database import get_protocol, FileFinder protocol = get_protocol('Debug.SpeakerDiarization.Debug', preprocessors={"audio": FileFinder()}) # ## Train and apply a voice activity detection model from pyannote.audio.tasks import VoiceActivityDetection from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel import pytorch_lightning as pl vad = VoiceActivityDetection(protocol, duration=2., batch_size=16, num_workers=4) model = SimpleSegmentationModel(task=vad) trainer = pl.Trainer(max_epochs=1, default_root_dir='inference/vad') _ = trainer.fit(model) from pyannote.audio import Inference inference = Inference(model, step=0.1, batch_size=128) # inference dev_file = next(protocol.development()) scores = inference(dev_file) scores # inference on an excerpt from pyannote.core import Segment scores = inference.crop(dev_file, Segment(10, 15)) scores # inference on an excerpt shorter than sliding window duration scores = inference.crop(dev_file, Segment(10, 11.5)) scores # inference on a whole chunk inference = Inference(model, window="whole") scores = inference.crop(dev_file, Segment(10, 15)) # inference with progress bar inference = Inference(model, step=0.1, batch_size=32, progress_hook='Processing...') scores = inference(dev_file) inference = Inference(model, step=0.1, batch_size=1, progress_hook=True) scores = inference.crop(dev_file, Segment(10, 15)) # ## Load and apply a pretrained VAD model from pyannote.audio import Model model = Model.from_pretrained('inference/vad/lightning_logs/version_0/checkpoints/epoch=0-step=8.ckpt') inference = Inference(model, step=0.1, batch_size=128) scores = inference(dev_file) scores # ## Train and apply a speaker embedding model from pyannote.audio.tasks import SpeakerEmbedding emb = SpeakerEmbedding(protocol, duration=2., num_workers=4) from pyannote.audio.models.embedding.debug import SimpleEmbeddingModel model = SimpleEmbeddingModel(task=emb) trainer = pl.Trainer(max_epochs=1, default_root_dir='inference/emb') _ = trainer.fit(model) # + # inference using a sliding window inference = Inference(model, duration=1., step=0.5) embeddings = inference(dev_file) data, window = embeddings.data, embeddings.sliding_window data.shape, window.start, window.duration, window.step # + # inference using a sliding window on an excerpt embeddings = inference.crop(dev_file, Segment(5, 12)) data, window = embeddings.data, embeddings.sliding_window data.shape, window.start, window.duration, window.step # + # inference using a sliding window on an excerpt shorter than sliding window embeddings = inference.crop(dev_file, Segment(11.1, 12)) data, window = embeddings.data, embeddings.sliding_window data.shape, window.start, window.duration, window.step # + # inference on a whole chunk inference = Inference(model, window="whole") embeddings = inference.crop(dev_file, Segment(5, 12)) embeddings.shape # + # inference on a whole chunk shorter than training duration embeddings = inference.crop(dev_file, Segment(5, 5.2)) embeddings.shape # + # inference on a collection of chunks embeddings = inference.crop(dev_file, [Segment(0, 4), Segment(5, 12)]) embeddings.shape
notebook/inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this notebook we test our implementation of bayesian regression. We build a gradient-descent version and compare it to the sk-learn result to see if it works. # + # General imports import numpy as np import torch from deepymod.data import Dataset from deepymod.data.burgers import BurgersDelta from sklearn.linear_model import BayesianRidge, ARDRegression # - # # Making data # + # Making dataset v = 0.1 A = 1.0 x = np.linspace(-3, 4, 100) t = np.linspace(0.5, 5.0, 50) x_grid, t_grid = np.meshgrid(x, t, indexing='ij') dataset = Dataset(BurgersDelta, v=v, A=A) y = dataset.time_deriv(x_grid.reshape(-1, 1), t_grid.reshape(-1, 1)) # observations X = dataset.library(x_grid.reshape(-1, 1), t_grid.reshape(-1, 1), poly_order=2, deriv_order=3) # covariates print(y.shape, X.shape) # - y += np.std(y) * 0.1 * np.random.randn(*y.shape) # # Baseline regressor = BayesianRidge(fit_intercept=False, compute_score=True, alpha_1=0, alpha_2=0, lambda_1=0, lambda_2=0) regressor.fit(X, y.squeeze()) baseline_coeffs = regressor.coef_[:, None] print(baseline_coeffs) baseline_noise_precision = regressor.alpha_ print(baseline_noise_precision) baseline_prior_precision = regressor.lambda_ print(baseline_prior_precision) regressor.scores_ # # Own implementation X = torch.tensor(X) y = torch.tensor(y) # + alpha_ = torch.tensor(baseline_noise_precision) lambda_ = torch.tensor(baseline_prior_precision) N = X.shape[0] M = X.shape[1] # - Sn = torch.inverse(lambda_ * torch.eye(M) + alpha_ * X.T @ X) mn = alpha_ * Sn @ X.T @ y print(mn) # This matches the SK learn values, so thats correct. Now to calculate the neg LL # %%time mu_post = X @ mn sigma_post = alpha_ * torch.eye(N) + X @ Sn @ X.T L = torch.inverse(sigma_post) log_p = 1/2 * (-torch.trace(torch.log(L)) - (y - mu_post).T @ L @ (y - mu_post) - N * np.log(2*np.pi)) print(log_p) # Seems close enough to the sklearn implementation, although we can make many things much more efficient (e.g. use woodbury inversion etc). Now let's try and do gradient descent: # + alpha_ = torch.nn.Parameter(1/torch.var(y)) lambda_ = torch.nn.Parameter(torch.ones(1)) X = torch.tensor(X) y = torch.tensor(y) N = X.shape[0] M = X.shape[1] # - optimizer = torch.optim.Adam([alpha_, lambda_], lr=1.0) max_epochs = 1e4 for epoch in torch.arange(max_epochs): Sn = torch.inverse(lambda_ * torch.eye(M) + alpha_ * X.T @ X) mn = alpha_ * Sn @ X.T @ y mu_post = X @ mn sigma_post = alpha_ * torch.eye(N) + X @ Sn @ X.T L = torch.inverse(sigma_post) log_p = 1/2 * (-torch.trace(torch.log(L)) - (y - mu_post).T @ L @ (y - mu_post) - N * np.log(2*np.pi)) loss = -log_p optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 1e3 == 0: print(log_p) # # Own implementation, efficient: # + alpha_ = torch.tensor(baseline_noise_precision) lambda_ = torch.tensor(baseline_prior_precision) N = X.shape[0] M = X.shape[1] Sn = lambda_ * torch.eye(M) + alpha_ * X.T @ X mn = alpha_ * torch.inverse(Sn) @ X.T @ y # - E = alpha_ / 2 * torch.sum((y - X @ mn)**2) + lambda_ / 2 * mn.T @ mn log_p = M / 2 * torch.log(lambda_) + N / 2 * torch.log(alpha_) - E - 1/2 * torch.trace(torch.log(Sn)) - N / 2 * np.log(2*np.pi) # + alpha_ = torch.nn.Parameter(1/torch.var(y)) lambda_ = torch.nn.Parameter(torch.ones(1)) X = torch.tensor(X) y = torch.tensor(y) N = X.shape[0] M = X.shape[1] # - optimizer = torch.optim.Adam([alpha_, lambda_]) max_epochs = 1e5 for epoch in torch.arange(max_epochs): A = lambda_ * torch.eye(M) + alpha_ * X.T @ X mn = alpha_ * torch.inverse(A) @ X.T @ y E = alpha_ / 2 * torch.sum((y - X @ mn)**2) + lambda_ / 2 * mn.T @ mn log_p = M / 2 * torch.log(lambda_) + N / 2 * torch.log(alpha_) - E - 1/2 * torch.trace(torch.log(Sn)) - N / 2 * np.log(2*np.pi) loss = -log_p optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 1e4 == 0: print(log_p) alpha_ lambda_ # Cool, it works. Now let's optimize for big and small values: # + X = torch.tensor(X) y = torch.tensor(y) N = X.shape[0] M = X.shape[1] a = torch.nn.Parameter(-torch.log(torch.var(y))) l = torch.nn.Parameter(torch.zeros(M)) # - optimizer = torch.optim.Adam([a, l], lr=1.0) max_epochs = 1e4 for epoch in torch.arange(max_epochs): lambda_ = torch.exp(l) alpha_ = torch.exp(a) A = lambda_ * torch.eye(M) + alpha_ * X.T @ X mn = alpha_ * torch.inverse(A) @ X.T @ y E = alpha_ * torch.sum((y - X @ mn)**2) + lambda_ * mn.T @ mn loss = E + torch.sum(torch.log(torch.diag(A))) - (M * l + N * a) optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 1e3 == 0: print(loss) mn lambda_ alpha_ a # # Sparse bayesian learning # + X = torch.tensor(X) y = torch.tensor(y) N = X.shape[0] M = X.shape[1] a = torch.nn.Parameter(-torch.log(torch.var(y))) l = torch.nn.Parameter(torch.zeros(M, dtype=torch.float64)) # - optimizer = torch.optim.Adam([a, l], lr=1.0) max_epochs = 1e4 for epoch in torch.arange(max_epochs): lambda_ = torch.exp(l) alpha_ = torch.exp(a) A = torch.diag(lambda_) + alpha_ * X.T @ X mn = alpha_ * torch.inverse(A) @ X.T @ y E = alpha_ * torch.sum((y - X @ mn)**2) + mn.T @ torch.diag(lambda_) @ mn loss = E + torch.sum(torch.log(torch.diag(A))) - (torch.sum(l) + N * a) optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 1e3 == 0: print(loss) X = torch.tensor(X) y = torch.tensor(y) # + N = X.shape[0] M = X.shape[1] a = torch.nn.Parameter(-torch.log(torch.var(y))) l = torch.nn.Parameter(torch.zeros(M, dtype=torch.float64)) threshold = 1e4 # - optimizer = torch.optim.Adam([a, l], lr=1.0) max_epochs = 1e4 for epoch in torch.arange(max_epochs): lambda_ = torch.exp(l) alpha_ = torch.exp(a) A = torch.diag(lambda_) + alpha_ * X.T @ X mn = (lambda_ < threshold)[:, None] * (alpha_ * torch.inverse(A) @ X.T @ y) E = alpha_ * torch.sum((y - X @ mn)**2) + mn.T @ torch.diag(lambda_) @ mn loss = E + torch.sum(torch.log(torch.diag(A)[lambda_ < threshold])) - (torch.sum(l[lambda_ < threshold]) + N * a) optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 1e3 == 0: print(loss) mn torch.diag(A) mn l[lambda_ < threshold] torch.min(lambda_, torch.tensor(1e4, dtype=torch.float64)) # # SBL alternative regressor = ARDRegression(fit_intercept=False, compute_score=True, alpha_1=0, alpha_2=0, lambda_1=0, lambda_2=0) regressor.fit(X, y.squeeze()) baseline_coeffs = regressor.coef_[:, None] print(baseline_coeffs) regressor.sigma_ regressor.alpha_ regressor.lambda_ # + beta = torch.tensor(regressor.alpha_) alpha = torch.tensor(regressor.lambda_) M = X.shape[1] N = X.shape[0] # - Cinv = beta * (torch.eye(N) - X @ torch.inverse(beta**-1 * torch.diag(alpha) + X.T @ X) @ X.T) - 1/2 * (N * np.log(2*np.pi) - torch.sum(torch.log(torch.diag(Cinv))) + y.T @ Cinv @ y) regressor.scores_
notebooks/Bayes/Bayesian_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Predicting Respiratory Rate # #### Team Name: The Pandas # # Part of the AI for Healthcare Hackathon 2019 # # <NAME>, <NAME>, <NAME> # import libraries import matplotlib.pyplot as plt import pandas as pd import math import pycwt as wavelet import numpy as np # + # set up the working directory path = r'.' # use your path #Link to download data: https://www.dropbox.com/s/5j0r1c8phl4891e/bidmc_csv_zip.zip?dl=0 #Original data had to be scraped in order to make it easier to batch download all of the individual csv files. #Click direct download in the link above, and then set your path as shown in the 2nd line. # create an empty list that will contain the files li = [] # read files and add them too the list 'li' for n in range(1,53): s = str(n) if len(s) == 1: s = "0" + s else: s = s pb = path + "/bidmc_" + s + "_Breaths.csv" ps = path + "/bidmc_" + s + "_Signals.csv" df1 = pd.read_csv(pb, index_col=None, header=0) df2 = pd.read_csv(ps, index_col=None, header=0) df2['breaths'] = 0 # if there is a breath at a certain point in time, then put 1, otherwise 0 for i in range(df1.shape[0]): o = df1.iloc[i,0] if math.isnan(o) == False: c = df2.shape[1]-1 df2.iloc[int(o),c] = 1 df2['id'] = n li.append(df2) # - # display summary statistics of the first file li[0].describe() # + # create the wavelet function def calculateCWT(t,s,steps): mother = wavelet.Morlet(7) deltaT = t[1] - t[0] dj = 1 / steps s0 = 2 * deltaT wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(s, deltaT, dj, s0, -1, mother) power = (np.abs(wave)) ** 2 return power,scales,coi,freqs def cwt_output(power,scale): id=next(i for i, v in enumerate(scale) if v>60) xp=power[:id,:] maxidx=np.argmax(xp,axis=0) return scale[maxidx] #Credit for function: https://github.com/danielefranceschi/wavelets-climatological-time-series # + # for each dataset, apply wavelet function to remove noise from the PLETH column # the new column will be used as independent variable to predict 'Resp' colum from 'Signals' data for i in range(0, 52): power,scales,coi,freqs=calculateCWT(range(0,len(li[i])),li[i][' PLETH'],50) wcl=cwt_output(power,scales) li[i]['WavePLETH'] = wcl # show the head of the first dataset; last column was computed above li[0].head() # - # for the first patient, display 'Resp' vs 'Pleth' vs 'WavePleth' (new column) line_fig = plt.figure(figsize = (12, 6)) _ = plt.plot(li[0][0:3500][' RESP']) _ = plt.plot(li[0][0:3500][' PLETH']) _ = plt.plot(li[0][0:3500]['WavePLETH'] - 42) # substracting 42 to show the graphs closer to each other _ = plt.legend(loc=2) _ = plt.xlabel("Reading Time (in 0.008 seconds)", fontsize = 15) _ = plt.ylabel("Data Value", fontsize = 15) _ = plt.title("Resp, Pleth & WavePleth over Time", fontsize = 15) plt.show() # ##### The Pleth variable doesn't show any clear trend that would help predict Resp. After decomposing the Pleth variable, the WavePleth variable seems to have a similar pattern to the Resp variable. # ##### Therefore, WavePleth will be used in LSTM model to predict RESP. # ## LSTM (Long Short-Term Memory) Modeling # + from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Input from sklearn.preprocessing import MinMaxScaler # + #Restrict patients to retain no more than 60,000 records. This will ensure everyone will have the same amount of records. list_full = [] for i in range(52): df = li[i].loc[0:59999,:] list_full.append(df) # + #Shape the data into the proper input/output for LSTM. The predictor is WavePLETH, and the target is RESP. scaler = MinMaxScaler() #Training #X_Input X_input_list = [] for i in range(52): df = pd.DataFrame(columns=['WavePLETH']) df['WavePLETH'] = list_full[i]['WavePLETH'] df['WavePLETH'] = scaler.fit_transform(np.array(df['WavePLETH']).reshape(60000,1)) X_input_array = np.asarray(df).reshape((60000,1,1)) X_input_list.append(X_input_array) #Y_Input Y_input_list = [] for i in range(52): df = pd.DataFrame(columns=['RESP']) df['RESP'] = list_full[i][' RESP'] df['RESP'] = scaler.fit_transform(np.array(df['RESP']).reshape(60000,1)) Y_input_array = np.asarray(df).reshape((60000,1)) Y_input_list.append(Y_input_array) # + #Model building model5 = Sequential() model5.add(LSTM(500, batch_input_shape = (None,1,1), return_sequences = True)) model5.add(LSTM(1, activation = 'relu', return_sequences = False)) #Compile model model5.compile(loss = 'mse', optimizer = 'adam', metrics = ['accuracy']) model5.summary() # + #Fitting the model with the training data (first 40 patients). for i in range(40): print("Fitting to", i) history = model5.fit(X_input_list[i], Y_input_list[i], epochs = 5, batch_size = 6000) # - # The above model was trained on the data for the first 40 patients. We then use the trained model to predict the RESP time series for unseen data. Here is an example of the trained model predicting patient 42, (top), compared to the actual RESP of patient 42 (bottom). The important thing to note is that we care only about the peaks, since this indicates breathing and can be used to get the respiratory rate. As seen below, the model does do a good job in this case of matching the peaks in the correct time span. (Subset was used because all 60,000 is difficult to interpret all at once). # + #Predicting for out-of-sample patients preds42 = model5.predict(X_input_list[43]) plt.plot(preds42[5000:6000]) # - plt.plot(Y_input_list[43][5000:6000]) # Below is another example from a different out-of-sample patient. # + preds43 = model5.predict(X_input_list[44]) plt.plot(preds43[2000:3000]) # - plt.plot(Y_input_list[44][2000:3000]) # ### Final thoughts: # # This analysis showed great potential in being able to identify respiratory rates based on more available and cheaper methods. While our model did not show the best accuracy, we view this as an excellent proof of concept, and show that a fairly shallow LSTM model is able to detect similar peaks on unseen data. We would like to see this work further improved in the future with more complicated modeling. As this project was done within a 24 hour period, we believe that this is a great starting point.
Predict Respiratory Rate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import zipfile covid = pd.read_csv("covid19_cleaned.zip", low_memory = False) covid[:3] # + dropcols = ["admin_id", "notes_for_discussion", "source", "geo_resolution", "lives_in_Wuhan", "additional_information", "sequence_available", "data_moderator_initials", "location", "country_new", "Unnamed: 0", "Unnamed: 0.1"] covid = covid.drop(covid[dropcols], axis = 1) print(covid.shape) # + #cleaning of the age column age = np.array(covid.age) sym = ["weeks", "+", "-", " "] for x,y in enumerate(age): if type(y) is float: age[x] = y elif sym[0] in y: age[x] = 0 elif sym[1] in y: age[x] = y.replace('+','') age[x] = int(round(float(age[x]))) elif sym[2] in y: if len(y) >= 4: mat = y.split("-") if "Sep" in mat: age[x] = 0 else: age[x] = (int(mat[0]) + int(mat[1]))/2 else: age[x] = y.replace('-','') age[x] = int(round(float(age[x]))) elif sym[3] in y: num, month = y.split(" ") num = int(num) if num < 6: age[x] = 0 elif num >= 6 and num < 18: age[x] = 1 else: age[x] = 2 for x,y in enumerate(age): age[x] = float(y) covid["age"] = age covid["age"].fillna(covid["age"].mean(), inplace = True) covid['age'] = covid['age'].astype(float) # + #converts bool to binary. covid.chronic_disease_binary = [1 if x == True else 0 for x in covid.chronic_disease_binary] # + #drop missing country values. conveniently drops missing lat and long vals. covid = covid.dropna(subset = ["country"]) # + #nan becomes a zero, if they traveled it becomes a one covid.travel_history_binary = [1 if x == True else 0 for x in covid["travel_history_binary"]] # + # craft a boolean array and for values False (NaN), cut in half and assign half female/male?? sex_e = np.array(["male", "female"]) sex = np.array(covid.sex) sex_is_bool = [True if x == "male" or x == "female" else False for x in covid["sex"]] for x, y in enumerate(sex_is_bool): if y == False: sex[x] = np.random.choice(sex_e) covid.sex = sex covid.sex.value_counts() # - covid.to_csv("covid19cleaned_sexandothers.zip", compression = "zip")
Cleaning - Ages and Genders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from __future__ import print_function from typing import List, Tuple, Sequence from collections import namedtuple import random import simpy from serversim import * def simulate_deployment_scenario(num_users, weight1, weight2, server_range1, server_range2): # type: (int, float, float, Sequence[int], Sequence[int]) -> Result Result = namedtuple("Result", ["num_users", "weight1", "weight2", "server_range1", "server_range2", "servers", "grp"]) def cug(mid, delta): """Computation units generator""" def f(): return random.uniform(mid - delta, mid + delta) return f def ld_bal(svc_name): """Application server load-balancer.""" if svc_name == "svc_1": svr = random.choice(servers1) elif svc_name == "svc_2": svr = random.choice(servers2) else: assert False, "Invalid service type." return svr simtime = 200 hw_threads = 10 sw_threads = 20 speed = 20 svc_1_comp_units = 2.0 svc_2_comp_units = 1.0 quantiles = (0.5, 0.95, 0.99) env = simpy.Environment() n_servers = max(server_range1[-1] + 1, server_range2[-1] + 1) servers = [Server(env, hw_threads, sw_threads, speed, "AppServer_%s" % i) for i in range(n_servers)] servers1 = [servers[i] for i in server_range1] servers2 = [servers[i] for i in server_range2] svc_1 = CoreSvcRequester(env, "svc_1", cug(svc_1_comp_units, svc_1_comp_units*.9), ld_bal) svc_2 = CoreSvcRequester(env, "svc_2", cug(svc_2_comp_units, svc_2_comp_units*.9), ld_bal) weighted_txns = [(svc_1, weight1), (svc_2, weight2) ] min_think_time = 2.0 # .5 # 4 max_think_time = 10.0 # 1.5 # 20 svc_req_log = [] # type: List[Tuple[str, SvcRequest]] grp = UserGroup(env, num_users, "UserTypeX", weighted_txns, min_think_time, max_think_time, quantiles, svc_req_log) grp.activate_users() env.run(until=simtime) return Result(num_users=num_users, weight1=weight1, weight2=weight2, server_range1=server_range1, server_range2=server_range2, servers=servers, grp=grp) # - random.seed(123456) sc1 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1, server_range1=range(0, 10), server_range2=range(0, 10)) # + import seaborn as sns from typing import TYPE_CHECKING, Sequence, Tuple from collections import OrderedDict import matplotlib.pyplot as plt import pandas as pd if TYPE_CHECKING: from serversim import UserGroup def bands(time_resolution, grp): # type: (float, UserGroup) -> Tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float]] times = ((svc_req.time_dict["submitted"] // time_resolution) * time_resolution for (_, svc_req) in grp.svc_req_log if svc_req.is_completed) vals = (svc_req.time_dict["completed"] - svc_req.time_dict["submitted"] for (_, svc_req) in grp.svc_req_log if svc_req.is_completed) series = pd.Series(vals, index=times) grouped = series.groupby(level=0) counts_ser = grouped.count() ts = counts_ser.index.values counts = counts_ser.values means = grouped.mean().values q_50 = grouped.quantile(.50).values q_95 = grouped.quantile(.95).values q_05 = grouped.quantile(.05).values return ts, counts, means, q_50, q_95, q_05 def errorfill(x, y, ylow, yhigh, color='blue', alpha_fill=0.3, ax=None): ax = ax if ax is not None else plt.gca() if color is None: color = ax._get_lines.color_cycle.next() ax.plot(x, y, color=color) ax.fill_between(x, ylow, yhigh, color=color, alpha=alpha_fill) plt.show() ts, _, means, _, q_95, q_05 = bands(5, sc1.grp) errorfill(ts, means, q_05, q_95)
_ErrorFillExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.9 64-bit (conda) # name: python3 # --- # # Population Segmentation Dataset # ### Objective # - An unsupervised learning model to seek clusters in the Population Segmentation Data and US Census Data that reveal some sort of demographic traits the define their similarities for different regions (Indexed by concatenating State-County) in the United States # # # ## 1. DATA COLLECTION WITH AWS S3 # - Use AWS S3 bucket is a storage location to Pop Segmentation dataset # # ## 2. EXPLORATORY DATA ANALYSIS & DATA CLEANING # - Find missing values (replace them with mean/media/mode, KNN, or KMEANs) # - plot distribution # - plot pie chart # # ## 3. FEATURE ENGINEERING # - KMeans and DBSCAN uses euclidean distance # - We want to minimize the distance length between two points in Euclidean space (it's required to scale the data) # - Find different ways to scale the data (Gussian Distribution) # - Logarithmic Transformation # - Box-Cox Transformation # - Standard Scaler (z-score) # # ## 4. FEATURE SELECTION # - Dropping low variance variables # - Pearson Correlation # - Principal Component Analysis import pandas as pd import numpy as np import boto3 import io import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.feature_selection import VarianceThreshold import scipy.stats as stats from pprint import pprint import pylab # # 1. DATA COLLECTION WITH AWS S3 # + # use boto3 client to get S3 bucket s3_client = boto3.client('s3') obj_list=s3_client.list_objects(Bucket='aws-ml-blog-sagemaker-census-segmentation') # print objects in S3 bucket files=[] for contents in obj_list['Contents']: files.append(contents['Key']) file_name=files[0] data_object = s3_client.get_object(Bucket='aws-ml-blog-sagemaker-census-segmentation', Key=file_name) data_body = data_object["Body"].read() data_stream = io.BytesIO(data_body) # create a pandas dataframe df = pd.read_csv(data_stream, header=0, delimiter=",") df.head() # - # Combine 'State' and 'County' and index the column df.index = df['State'] + '-' + df['County'] # drop the 'CensusId', 'State', 'County' columns df = df.drop(['CensusId', 'State', 'County'], axis=1) # # EXPLORATORY DATA ANALYSIS & DATA CLEANING # provides a quantitative summary for each column (count, mean, std, min, max, etc) df.describe() print(f'Number of rows of DataFrame {df.shape[0]}') print(f'Number of columns of DataFrame {df.shape[1]}') # get datatype for each feature (string, float, int, bool, object, etc) # State and County are object pprint(df.columns.to_series().groupby(df.dtypes).groups) # + features = df.columns numerical_feat = [features for features in df.columns if df[features].dtypes != 'O'] print(f'Number of Numerical Features: {len(numerical_feat)}') categorical_feat = [features for features in df.columns if df[features].dtypes == 'O'] print(f'Number of Categorical Features: {len(categorical_feat)}') # - # # Find Missing Values # find null values in DataFrame if df.isnull().sum().any() == False: print('Data is Clean, No Null values found') else: print('Found Null Values') # create a pandas dataframe with data_type, num_null_values, and percentage of null_values for each feature df_details = pd.DataFrame(df.dtypes).T.rename(index={0:'datatype'}) df_details = df_details.append(pd.DataFrame(df.isnull().sum()).T.rename(index={0:'num_null_values'})) df_details = df_details.append(pd.DataFrame(df.isnull().sum()/len(df)*100).T.rename(index={0:'pct_null_values'})) df_details # # Fill Null Values with Median since there's outliers # Null values can be dropped, replaced by Median, Mean, or t-SNE # Before filling Null values, analyz the data distribution for [ChildPoverty, IncomeErr, Income] null_col = ['ChildPoverty', 'IncomeErr', 'Income'] plt.subplots(figsize=(15,8)) sns.boxplot(data=df[null_col], orient="h", palette="Set2") # given the boxplot, there's alot of extreme values in IncomeErr and Income # fill in null values with median, not mean (mean is sensitive to extreme values) df = df.fillna(df.median()) # Verify if DataFrame still has null values if df.isnull().sum().any() == False: print('Data is Clean, No Null values found') else: print('Found Null Values') df.columns # + # income_list features historgram income_list = ['Income', 'IncomeErr', 'IncomePerCap', 'IncomePerCapErr', 'Poverty', 'ChildPoverty'] plt.subplots(figsize=(15,8)) sns.histplot(df[income_list].astype(float)) # pie chart of income sum sizes = [df['Income'].sum(), df['IncomeErr'].sum(), df['IncomePerCap'].sum(), df['IncomePerCapErr'].sum(), df['Poverty'].sum(), df['ChildPoverty'].sum() ] fig1, ax1 = plt.subplots(figsize=(8,8)) ax1.pie(sizes, labels=income_list, autopct='%1.2f%%') plt.show() # + # occupation_list features historgram occupation_list = ['Professional', 'Service', 'Office', 'Construction', 'Production'] plt.subplots(figsize=(15,8)) sns.histplot(df[occupation_list].astype(float)) sizes = [df['Professional'].sum(), df['Service'].sum(), df['Office'].sum(), df['Construction'].sum(), df['Production'].sum(), ] fig1, ax1 = plt.subplots(figsize=(8,8)) ax1.pie(sizes, labels=occupation_list, autopct='%1.2f%%', shadow=False) plt.show() # + # transportation features historgram transportation_list = ['Drive', 'Carpool', 'Transit', 'Walk', 'OtherTransp'] plt.subplots(figsize=(15,8)) sns.histplot(df[transportation_list].astype(float)) # pie chart of transportation_list sizes = [df['Drive'].sum(), df['Carpool'].sum(), df['Transit'].sum(), df['Walk'].sum(), df['OtherTransp'].sum(), ] fig1, ax1 = plt.subplots(figsize=(8,8)) ax1.pie(sizes, labels=transportation_list, autopct='%1.2f%%', shadow=False) plt.show() # + # race features historgram race_list = ['Hispanic', 'White', 'Black', 'Native', 'Asian','Pacific'] plt.subplots(figsize=(15,8)) sns.histplot(df[race_list].astype(float)) # pie chart of race_list sizes = [df['Hispanic'].sum(), df['White'].sum(), df['Black'].sum(), df['Native'].sum(), df['Asian'].sum(), df['Pacific'].sum() ] fig1, ax1 = plt.subplots(figsize=(8,8)) ax1.pie(sizes, labels=race_list, autopct='%1.2f%%', shadow=False) plt.show() # - # # VISUALIZE PEARSON CORRELATION # ## Helps identify any strong correlation between variables # + # compute corr array and generate a mask for the upper triangle corr = df.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.subplots(figsize=(15,8)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # plt heatmap with mask and ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) # looks like [Poverty, ChildPoverty] are highly correlated with [Income, Income Cap] # - # # FEATURE ENGINEERING # ## Define Index as State + County old_columns = df.columns # # Box-Plot # ## Identifies outliers gender_list = ['Men', 'Women',] plt.subplots(figsize=(20,7)) sns.boxplot(data=df[gender_list], orient="h", palette="Set2") # # Box Plot for Race Features plt.subplots(figsize=(20,7)) sns.boxplot(data=df[race_list], orient="h", palette="Set2") # # Box Plot for Occupations Features plt.subplots(figsize=(20,7)) sns.boxplot(data=df[occupation_list], orient="h", palette="Set2") # # Box Plot for Transportation Features plt.subplots(figsize=(20,7)) sns.boxplot(data=df[transportation_list], orient="h", palette="Set2") # # Box Plot for Income Features plt.subplots(figsize=(20,7)) sns.boxplot(data=df[income_list], orient="h", palette="Set2") # # Implement Guassian Transformation for each cluster # - Logarithmic Transformation # - Reciprocal Transformation # - Square-Root Transformation # - Exponential Transformation # - Box-Cox Transformation # plot two diagram (distribution histogram and boxplot) def plot_distribution(df, feature): plt.figure(figsize=(10,6)) # first row, 1st column plt.subplot(1, 2, 1) df[feature].hist() # first row, 2 column plt.subplot(1, 2, 2) stats.probplot(df[feature], dist='norm', plot=pylab) plt.show() # plot two diagram (distribution histogram and boxplot) def plot_data_transformation(df, feature, feature_transform): plt.figure(figsize=(20,6)) plt.subplot(1, 4, 1) stats.probplot(df[feature], dist='norm', plot=pylab) plt.subplot(1, 4, 2) df[feature].hist() plt.subplot(1, 4, 3) stats.probplot(df[feature_transform], dist='norm', plot=pylab) plt.subplot(1, 4, 4) df[feature_transform].hist() plt.show() # + # Gender Transformation using Logarithmic def transform_feature_log(df, feature): df[feature+'_log'] = np.log(df[feature]) for i in gender_list: transform_feature_log(df, i) plot_data_transformation(df, 'Women', 'Women_log') # + def transform_feature_scale(df, feature): scaler = StandardScaler() df[[feature+'_scale']] = scaler.fit_transform(df[[feature]]) for i in race_list: transform_feature_scale(df, i) # + # Occupation Tranformation def transform_feature_box(df, feature): df[feature+'_boxcox'], _ = stats.boxcox(df[feature] + 1) for i in occupation_list: transform_feature_log(df, i) plot_data_transformation(df, 'Professional', 'Professional_log') # + # Transportation Transformation using Boxcox for i in transportation_list: transform_feature_box(df, i) plot_data_transformation(df, 'Walk', 'Walk_boxcox') # + # Income Transformation using Logarithmic for i in income_list: transform_feature_log(df, i) plot_data_transformation(df, 'IncomePerCapErr', 'IncomePerCapErr_log') # - transform_feature_log(df, 'TotalPop') plot_data_transformation(df, 'TotalPop', 'TotalPop_log') # + scaler = StandardScaler() df[["MeanCommute_scale"]] = scaler.fit_transform(df[["MeanCommute"]]) plot_distribution(df, 'MeanCommute_scale') # - df = df.drop(old_columns, axis = 1) # # Feature Selection # - Dropping low variance variables # - Pearson Correlation # - Principal Component Analysis # + def clean_dataset(df): assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame" df.dropna(inplace=True) indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(1) return df[indices_to_keep].astype(np.float64) df = clean_dataset(df) # + # compute corr array and generate a mask for the upper triangle corr = df.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.subplots(figsize=(15,8)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # plt heatmap with mask and ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) # + var_threshold = VarianceThreshold(threshold=0.05) var_threshold.fit(df) # get the number of feature coumns with 0 variance constant_col = [col for col in df.columns if col not in df.columns[var_threshold.get_support()]] # drop feature with 0 variance (constanct features) df.drop(constant_col, axis=1, inplace=True) print(constant_col) # + def drop_corr(df, threshold): # set of all the names of correlated columns col_corr = set() corr_matrix = df.corr() for i in range(len(corr_matrix.columns)): for j in range(i): # get the coefficient value if corr_matrix.iloc[i, j] > threshold: colname = corr_matrix.columns[i] col_corr.add(colname) return col_corr corr_features = drop_corr(df, 0.80) print(corr_features) df.drop(corr_features, axis=1, inplace=True) # + # compute corr array and generate a mask for the upper triangle corr = df.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.subplots(figsize=(15,8)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # plt heatmap with mask and ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) # - df.to_csv('../inputs/population_seg_clean.csv', index=False) # # implement this in another rep py file # PCA for dimensionality reduction # Clustering transformed data with k-means # Use Kmeans clustering model and evalute model
notebook/population_segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # **</>** Coding with Clarkey # # > **Homework Volume 1!** # # _This aims to build a little on what we did with dictionaries this week._ # # # Welcome to our library! # We aren't very good yet though. We don't have many books, and we don't know much about them # ___ # # ## -> A quick refresher # # In programming there are structures called 'key/value' stores, which store information about a certain _something_ # e.g. the dewey decimal system for books, where the reference number is the `key`, and the `value` that is stored is the book itself. # # `dictionaries` are the key/value structure in python. In a somewhat meta example, here is how you would store the data found when you google [for the definition of the word 'wow'](https://www.google.com.au/search?ei=SLEXW42iOM308AWF36zgAw&q=define%3A+wows&oq=define%3A+wows&gs_l=psy-ab.3..0i10i67k1j0l6j0i10k1l2j0.4894.5478.0.5482.4.4.0.0.0.0.267.267.2-1.1.0....0...1c.1.64.psy-ab..3.1.267....0.eo7n_HHkw9M#dobs=wow) # + wow = { 'formality': 'informal', 'type': 'noun', 'plural': 'wows', 'definition': 'a sensational success.', 'example': "your play's a dick" } # You access the information by using the 'keys', # e.g. to get the example of the word, use: wow['example'] # - # ___ # # ## -> Our dictionary # # I've made a slightly larger dictionary that contains several items, meant to represent a library # # Currently, it only contains two bits of info on each book: the author and the year it was published. # # > Currently, it's filled with 4 of my all time favourites :) # wow["yolo"] library = { 'The Count of Monte Cristo': { 'author': '<NAME>', 'year': 1844 }, 'The Thorn Birds': { 'author': '<NAME>', 'year': 1977 }, 'The Handmaid\'s Tale': { 'author': '<NAME>', 'year': 1985 }, 'The Book Thief': { 'author': '<NAME>', 'year': 2005 } } # Now that we have our `library`, let's make a simple method to `display` what we have on offer. # # # > **Side note:** This method contains a `docstring`, a sentence surrounded by single quotes that describes what the method does. _It's not always needed, but is often present in nice code._ def display(): 'Displays information about the books in the library' for title, info in library.items(): print(title, info) # Run me! display() print(library['The Book Thief']['year']) # ___ # # # ## -> Printing 'stuff' # # Printing is one of the most underrated areas of programming, but one of the hardest to master! Even using very quick, basic techniques can make a huge difference. # # ### Decisions, decisions # # There are several ways to print variables in python! This is unusual among programming languages, and you'll probably end up just using one. # + # 1. using `.format` activity = 'coding' person = 'clarkey' '{} with {}'.format(activity, person) # + # 2. using an `f` string activity = 'coding' person = 'clarkasdfey' emoji = '🌹' f'{activity} with {emoji} {person}' # - # > Recommending a single version is difficult: I prefer the `f` strings, but they only exist in Python 3.6+, so if you use an older version of Python then it won't work. # # I'd probably recommend being familiar with both methods, they are very similar, which becomes apparent as you use them more. # # ___ # # Let's try a better way of displaying our books by accessing their info: # + def display(): 'Displays information about the books in the library' for title, info in library.items(): # title, print(f"-- {title} ({info['year']}), by {info['author']}") # Now let's run our new improved method display() # - # ___ # # Here's a little peek of the power of string formatting in python - instead of using the usual `{}` to print the `title` variable, you can align things! # Let's see what happens when we align things left and right, with a certain width '1{:>10}'.format('test') '{:<10}'.format('test') # ### Combining all this... # # Now, taking what we've tried out with **print formatting** and accessing **dictionary keys/values**, we can give our display another red-hot go. # # + def display(): 'Displays information about the books in the library' for title, info in library.items(): print("-- {:<30} ({}), by {}".format(title, info['year'], info['author'])) # Now let's try our new equivalent display() # - # ___ # # _**<insert_mic_drop>**_ # ## Exercises to the reader # # 1. Retrieve and print all books by a certain author # 2. _(optional bonus)_ How might you find and print the oldest book? # 1. def books_by_author(author): for title, info in library.items(): if info['author']== author: print(f'found book: {title}') books_by_author('<NAME>') # + # 2. def oldest_book(): current_year = 2018 oldest_title = '' for title, info in library.items(): print('-----------------') print(f'comparing, current year is: {current_year}') print(f'comparing against: {title}, {info}') if info['year'] < current_year: print('=> found new oldest book! resetting current') oldest_title = title current_year = info['year'] print(f'the oldest book found was {oldest_title}') oldest_book() # -
beginner/lesson001/homework001_print_formatting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- SUMMARIES_FOLDER = "/n/sd7/trung/csp/log/swda_split_20" import os import pandas as pd # + with open(os.path.join(SUMMARIES_FOLDER, "eval_ret.txt"), "r") as f: lines = f.read().split('\n') lines = list(filter(lambda s: s.strip() != "" and s[:3] != "LER", lines)) res = [(int(line.split('\t')[0]), int(line.split('\t')[1])) for line in lines] # + data = [] da_tagids = {'fo_o_fw_"_by_bc': 0, 'qw': 1, 'h': 2, 'sd': 3, 'sv': 4, 'b': 5, 'x': 6, '%': 7, '+': 8, 'qy': 9, 'qrr': 10, 'na': 11, 'bk': 12, 'ba': 13, 'ny': 14, '^q': 15, 'aa': 16, 'nn': 17, 'fc': 18, 'ad': 19, 'qo': 20, 'qh': 21, 'no': 22, 'ng': 23, '^2': 24, 'bh': 25, 'qy^d': 26, 'br': 27, 'b^m': 28, '^h': 29, 'bf': 30, 'fa': 31, 'oo_co_cc': 32, 'ar': 33, 'bd': 34, 't1': 35, 'arp_nd': 36, 't3': 37, 'ft': 38, '^g': 39, 'qw^d': 40, 'fp': 41, 'aap_am': 42} for i in range(43): cnt = sum(r[0] == i for r in res) for key in da_tagids: if da_tagids[key] == i: tag = key tp = sum(r[0] == i and r[1] == i for r in res) fp = sum(r[0] == i and r[1] != i for r in res) fn = sum(r[0] != i and r[1] == i for r in res) prec = tp / (tp + fp) if tp + fp != 0 else None recall = tp / (tp + fn) if tp + fn != 0 else None f1 = 2 / (1 / prec + 1 / recall) if prec != None and prec != 0 and recall != None and recall != 0 else None data.append([tag, cnt, tp, prec, recall, f1]) df = pd.DataFrame(data, columns=["Tag", "No.", "tp", "precision", "recall", "f1"]) df.index += 1 df # -
src/inspect/da.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from pandas import DataFrame, Series, Index import pandas as pd # + # check that CENSUS_KEY is defined import settings assert settings.CENSUS_KEY is not None # - # The census documentation has example URLs but needs your API key to work. In this notebook, we'll use the IPython notebook HTML display mechanism to help out. # # http://api.census.gov/data/2010/sf1/geo.html from IPython.core.display import HTML HTML("<iframe src='http://api.census.gov/data/2010/sf1/geo.html' width='800px'/>") # %%HTML <b>hi there</b> # + try: from urllib.parse import urlparse, urlencode, parse_qs, urlunparse except ImportError: from urlparse import urlparse, parse_qs, urlunparse from urllib import urlencode from IPython.core.display import HTML def add_census_key(url, api_key=settings.CENSUS_KEY): """Take an input example Census API call and a key parameter""" pr = urlparse(url) # we're going to modify the query, which is the 5th element in the tuple (index 4) pr1 = list(pr) # convert pr.query from string to dict # see http://stackoverflow.com/a/10233141/7782 for meaning of doseq pr_query = parse_qs(pr.query) pr_query["key"]= api_key pr1[4] = urlencode(pr_query, doseq=True) return urlunparse(pr1) def c_url (url, title=None, api_key=settings.CENSUS_KEY): url_with_key = add_census_key(url, api_key) if title is None: title = url return HTML("""<a href="{url}">{title}</a>""".format(url=url_with_key, title=title)) #add_census_key("http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*") c_url("http://api.census.gov/data/2010/sf1?get=NAME,P0010001&for=state:*") # - # # Scraping the examples # + import requests from lxml.html import parse, fromstring url = "http://api.census.gov/data/2010/sf1/geo.html" r = requests.get(url).content doc = fromstring(r) rows = doc.xpath("//table/tr") # first row is the header headers = [col.text for col in rows[0].findall('th')] headers # next rows are the census URL examples # + row = rows[1] cols = row.findall('td') # col[s0]: Summmary Level print (cols[0].text) # cols[1]: Description print (cols[1].text) # + from itertools import islice from lxml.html import parse # let's actually now decorate the urls def decorated_parse_examples(examples, api_key=settings.CENSUS_KEY): for row in examples: new_row = row.copy() # need to change URLs example_urls_col = new_row[headers[2]] #urls_with_key = [add_census_key(url) for url in example_urls_col] new_row[headers[2]] = "<br/>".join( ["""<a href="{url_with_key}">{url}</a>""".format( url=url, url_with_key=add_census_key(url) ) for url in example_urls_col ]) yield new_row def parse_urls_col(col): # http://stackoverflow.com/a/15074386/7782 return [child for child in col.itertext()] def parse_census_examples(): url = "http://api.census.gov/data/2010/sf1/geo.html" doc = parse(url) rows = doc.xpath("//table/tr") # first row is the header headers = [col.text for col in rows[0].findall('th')] for row in rows[1:]: cols = row.findall('td') yield ({headers[0]:cols[0].text, headers[1]:cols[1].text, headers[2]:parse_urls_col(cols[2])}) #parsed_examples = list(islice(parse_census_examples(),None)) parsed_examples = parse_census_examples() # + # let's redisplay the table with from IPython.display import HTML from jinja2 import Template URLS_TEMPLATE= """ <table> <tr> {% for header in headers %} <th>{{header}}</th> {% endfor %} </tr> {% for row in rows %} <tr> {% for header in headers %} <td>{{row[header]}}</td> {% endfor %} </tr> {% endfor %} </table>""" template = Template(URLS_TEMPLATE) HTML(template.render(headers=headers, rows=decorated_parse_examples(parsed_examples))) # -
03_02_Displaying_Census_URLs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # Loading the coil coordinates from NBS eXimia session file # # + pycharm={"metadata": false, "name": "#%%\n", "is_executing": false} filename = 's01_eximia_coords.txt' with open(filename, 'r') as f: x = f.readlines() print(x) subj = x[1] # + pycharm={"metadata": false, "name": "#%%\n", "is_executing": false} print(x[0]) subj = x[1] print(subj) nasion = x[10] leftear = x[11] rightear = x[12] print(nasion) # + pycharm={"metadata": false, "name": "#%%\n"}
load_nbe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 # %matplotlib widget import os import sys import time import numpy as np import math import json from tqdm import tqdm_notebook as tqdm import pathlib from pathlib import Path import time import matplotlib.pyplot as plt # %matplotlib inline import torch import torch.nn as nn import torch.nn.functional as F import torch_geometric from torch_geometric.nn import (NNConv, GMMConv, GraphConv, Set2Set) from torch_geometric.nn import (SplineConv, graclus, max_pool, max_pool_x, global_mean_pool) #from neuralnet_pytorch.metrics import chamfer_loss import trimesh from visualization_utils import plot_mesh_3d import deep_sdf import deep_sdf.workspace as ws from models import * from datasets import * from custom_utils import * from sklearn.neighbors import KDTree # + def computeAvgTransform(): objects = list() for (dirpath, dirnames, filenames) in os.walk("/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/transforms/"): objects += [os.path.join(dirpath, file) for file in filenames if file[-4:] == '.npy'] matricies = [] for obj in objects: matricies.append(np.load(obj)) return np.mean(np.array(matricies), axis=0) AvgTransform = computeAvgTransform() def transformPoints(points, matrix): matrix = torch.cuda.FloatTensor(matrix) column = torch.zeros((len(points), 1), device="cuda:0") + 1 stacked = torch.cat([points, column], dim=1) transformed = torch.matmul(matrix, stacked.T).T[:, :3] return transformed def make_mesh_from_points(points, ply_mesh): transformed_points = transformPoints(points, AvgTransform) edges = trimesh.geometry.faces_to_edges(ply_mesh['face']['vertex_indices']) np_points = transformed_points.cpu().detach().numpy() edge_attr = [np_points[a] - np_points[b] for a, b in edges] data = torch_geometric.data.Data(x = transformed_points, pos= transformed_points, face = torch.tensor(ply_mesh['face']['vertex_indices'], dtype=torch.long).to('cuda:0').t(), edge_attr=torch.tensor(edge_attr, dtype=torch.float).to('cuda:0'), edge_index=torch.tensor(edges, dtype=torch.long).t().contiguous().to('cuda:0')) return data def boundsLoss(points, box=[(-1, 1, 0)]): loss = 0 for l, r, i in box: loss += torch.sum(F.relu(-points[:, i] + l)) + torch.sum(F.relu(points[:, i] - r)) return loss def innerBoundsLoss(points, r=1, center=(0, 0, 0)): radiuses = torch.sum( (points - torch.Tensor(center).to('cuda:0')) ** 2 , dim=1) return torch.sum(F.relu(r - radiuses)) def calculate_loss(mesh, local_preds, signs=None, axis=0, constraint_rad=0.1): # if signs is None: # signs = SAVED_SIGNS loss = (1 - axis) * compute_lift_faces_diff(mesh, local_preds, axis=0) + \ axis * compute_lift_faces_diff(mesh, local_preds, axis=1) loss += boundsLoss(points, box=[(-0.99, 0.99, 0)]) loss += innerBoundsLoss(points, r=(constraint_rad * 2)**2) \ + innerBoundsLoss(points, r=constraint_rad**2, center=(0.55, 0, 0)) #loss = compute_lift_faces_diff_signs(mesh, local_preds, axis=axis) return loss # + import torch import torch.nn as nn import torch.nn.functional as F import torch_geometric from torch_geometric.nn import (NNConv, GMMConv, GraphConv, Set2Set) from torch_geometric.nn import (SplineConv, graclus, max_pool, max_pool_x, global_mean_pool) class SplineBlock(nn.Module): def __init__(self, num_in_features, num_outp_features, mid_features, kernel=3, dim=3): super(SplineBlock, self).__init__() self.conv1 = SplineConv(num_in_features, mid_features, dim, kernel, is_open_spline=False) # self.batchnorm1 = torch.nn.BatchNorm1d(mid_features) self.conv2 = SplineConv(mid_features, 2 * mid_features, dim, kernel, is_open_spline=False) self.batchnorm2 = torch.nn.BatchNorm1d(2 * mid_features) self.conv3 = SplineConv(2 * mid_features + 3, num_outp_features, dim, kernel, is_open_spline=False) def forward(self, res, data): # res = F.elu(self.batchnorm1(self.conv1(res, data.edge_index, data.edge_attr))) res = F.elu(self.conv1(res, data.edge_index, data.edge_attr)) res = F.elu(self.batchnorm2(self.conv2(res, data.edge_index, data.edge_attr))) # res = F.elu(self.conv2(res, data.edge_index, data.edge_attr)) res = torch.cat([res, data.pos], dim=1) res = self.conv3(res, data.edge_index, data.edge_attr) return res class SplineCNN8(nn.Module): def __init__(self, num_features, kernel=3, dim=3): super(SplineCNN8, self).__init__() self.block1 = SplineBlock(num_features, 16, 8, kernel, dim) self.block2 = SplineBlock(16, 64, 32, kernel, dim) self.block3 = SplineBlock(64, 64, 128, kernel, dim) self.block4 = SplineBlock(64, 8, 16, kernel, dim) self.block5 = SplineBlock(8, 32, 16, kernel, dim) self.block6 = SplineBlock(32, 64, 32, kernel, dim) self.block7 = SplineBlock(64, 64, 128, kernel, dim) self.block8 = SplineBlock(64, 4, 16, kernel, dim) def forward(self, data): res = data.x res = self.block1(res, data) res = self.block2(res, data) res = self.block3(res, data) res = self.block4(res, data) res = self.block5(res, data) res = self.block6(res, data) res = self.block7(res, data) res = self.block8(res, data) return res # + # data_instance= make_data_instance_from_stl( # '/cvlabdata2/home/artem/Data/cars_remeshed_dsdf/outputs/fld/0003_0015.fld') # /cvlabdata2/home/artem/Data/cars_refined/simulated/fld/0002_0005.fld model = SplineCNN8(3) model.load_state_dict(torch.load("Expirements/SplineCNN8.nn")) device = "cuda:0" model = model.to(device) # + experiment_directory = "/cvlabdata2/home/artem/DeepSDF/examples/cars_cleared/" checkpoint = "latest" decoder = load_model(experiment_directory, checkpoint) latent_vectors = ws.load_latent_vectors(experiment_directory, checkpoint) latent_size = latent_vectors[0].size()[0] print(f"{len(latent_vectors)} of latent vectors, each {latent_size} long") # - # # Vertex-vise Optimization # + def optimize_shape_by_vertex(model, inp, num_iters=30, save_to_dir='Expirements/Optimization', lr=0.05, decreased_by=2, adjust_lr_every=10, verbose=None, constraint_rad=0.1, axis=0): def adjust_learning_rate( initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every ): lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every)) for param_group in optimizer.param_groups: param_group["lr"] = lr return lr if not os.path.exists(os.path.join(save_to_dir, 'meshes')): os.makedirs(os.path.join(save_to_dir, 'meshes')) if not os.path.exists(os.path.join(save_to_dir, 'predictions')): os.makedirs(os.path.join(save_to_dir, 'predictions')) try: model.eval() data_instance = inp.clone() data_instance.x.requires_grad = True optimizer = torch.optim.SGD([data_instance.x], lr=lr) meshes = [] lifts = [] lr_plot = [] for i in range(num_iters): cur_lr = adjust_learning_rate(lr, optimizer, i, decreased_by, adjust_lr_every) save_path = os.path.join(save_to_dir, 'meshes/' + str(i).zfill(5) + ".ply") preds_save_path = os.path.join(save_to_dir, 'predictions/' + str(i).zfill(5) + ".npy") optimizer.zero_grad() local_preds = model(data_instance) # Don't ask me why points_in_deepSDF = transformPoints(data_instance.x, np.linalg.inv(AvgTransform)) #print(points_in_deepSDF) loss = calculate_loss(data_instance, local_preds, axis, constraint_rad=constraint_rad) loss.backward() #print('Avg grad: ', torch.mean(torch.sum(data_instance.x.grad ** 2, axis=1))) tri_mesh = get_trimesh_from_torch_geo_with_colors(data_instance, local_preds) tri_mesh.export(save_path) np.save(preds_save_path, local_preds.cpu().detach().numpy()) # data_instance.x.grad[torch.isnan(data_instance.x.grad)] = 0 optimizer.step() if verbose is not None and i % verbose == 0: print('Iter ', i, 'Loss: ', loss.detach().cpu().numpy(), ' LR: ', cur_lr) #plot_points_from_torch(points) lifts.append(loss.detach().cpu().numpy()) lr_plot.append(cur_lr) np.save(os.path.join(save_to_dir, "loss_plot.npy"), lifts) np.save(os.path.join(save_to_dir, "lr_plot.npy"), lr_plot) except KeyboardInterrupt: print("Paused") # + # optimize_shape_by_vertex(model, MESH_TO_OPTIMIZE, verbose=1, save_to_dir='Expirements/OptimizationPaper/NewGenVertexDragTight', # lr=0.002, adjust_lr_every=20) # - # # BB Optimization def optimize_shape_by_scaling(model, inp, num_iters=100, save_to_dir='Expirements/Optimization', lr=0.05, decreased_by=2, adjust_lr_every=10, verbose=None, constraint_rad=0.1, axis=0): def transform(data, scale): answ = data.clone() answ.x = data.x * scale return answ def adjust_learning_rate( initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every ): lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every)) for param_group in optimizer.param_groups: param_group["lr"] = lr return lr if not os.path.exists(os.path.join(save_to_dir, 'meshes')): os.makedirs(os.path.join(save_to_dir, 'meshes')) if not os.path.exists(os.path.join(save_to_dir, 'predictions')): os.makedirs(os.path.join(save_to_dir, 'predictions')) try: model.eval() data_instance = inp.clone() data_instance.x.requires_grad = True scale = torch.Tensor([1.0, 1.0, 1.0]).to("cuda:0") scale.requires_grad = True optimizer = torch.optim.SGD([scale], lr=lr) meshes = [] lifts = [] lr_plot = [] for i in range(num_iters): cur_rl = adjust_learning_rate(lr, optimizer, i, decreased_by, adjust_lr_every) save_path = os.path.join(save_to_dir, 'meshes/' + str(i).zfill(5) + ".ply") preds_save_path = os.path.join(save_to_dir, 'predictions/' + str(i).zfill(5) + ".npy") optimizer.zero_grad() transformed_mesh = transform(data_instance, scale) # Restriction # min_dist = compute_min_distance(transformed_mesh) # scale_clapped = scale * min(1, GLOBAL_MIN_DIST / min_dist) #transformed_mesh = transform(data_instance, scale) local_preds = model(transformed_mesh) points_in_deepSDF = transformPoints(transformed_mesh.x, np.linalg.inv(AvgTransform)) loss = calculate_loss(transformed_mesh, local_preds, axis=axis, constraint_rad=constraint_rad) loss.backward() optimizer.step() if verbose is not None and i % verbose == 0: print('Iter ', i, 'Loss: ', loss.detach().cpu().numpy(), ' LR: ', cur_rl) #plot_points_from_torch(points) tri_mesh = get_trimesh_from_torch_geo_with_colors(transformed_mesh, local_preds) tri_mesh.export(save_path) np.save(preds_save_path, local_preds.cpu().detach().numpy()) lifts.append(loss.detach().cpu().numpy()) lr_plot.append(cur_rl) np.save(os.path.join(save_to_dir, "loss_plot.npy"), lifts) np.save(os.path.join(save_to_dir, "lr_plot.npy"), lr_plot) except KeyboardInterrupt: print("Paused") # + # optimize_shape_by_scaling(model, MESH_TO_OPTIMIZE, verbose=1, save_to_dir='Expirements/OptimizationPaper/BboxDragDiff', lr=0.05) # - # # Free Foam Optimization def optimize_shape_as_pierre(model, inp, num_iters=100, save_to_dir='Expirements/Optimization', lr=0.05, decreased_by=2, adjust_lr_every=10, verbose=None, constraint_rad=0.1, axis=0): def transform(data, scale): answ = data.clone() answ.x = \ data.x * (scale[1] + scale[2] * data.x + \ scale[3] * torch.cos(data.x[:, (2, 0, 1)] * 2 * math.pi) + \ scale[4] * torch.cos(data.x[:, (1, 2, 0)] * 2 * math.pi) + \ scale[5] * torch.sin(data.x[:, (2, 0, 1)] * 2 * math.pi) + \ scale[5] * torch.sin(data.x[:, (1, 2, 0)] * 2 * math.pi)) return answ def adjust_learning_rate( initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every ): lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every)) for param_group in optimizer.param_groups: param_group["lr"] = lr return lr if not os.path.exists(os.path.join(save_to_dir, 'meshes')): os.makedirs(os.path.join(save_to_dir, 'meshes')) if not os.path.exists(os.path.join(save_to_dir, 'predictions')): os.makedirs(os.path.join(save_to_dir, 'predictions')) try: model.eval() data_instance = inp.clone() data_instance.x.requires_grad = True scale = torch.Tensor([[0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0]]).to("cuda:0").t() scale.requires_grad = True optimizer = torch.optim.SGD([scale], lr=lr) meshes = [] lifts = [] lr_plot = [] for i in range(num_iters): cur_rl = adjust_learning_rate(lr, optimizer, i, decreased_by, adjust_lr_every) save_path = os.path.join(save_to_dir, 'meshes/' + str(i).zfill(5) + ".ply") preds_save_path = os.path.join(save_to_dir, 'predictions/' + str(i).zfill(5) + ".npy") optimizer.zero_grad() transformed_mesh = transform(data_instance, scale) local_preds = model(transformed_mesh) points_in_deepSDF = transformPoints(transformed_mesh.x, np.linalg.inv(AvgTransform)) loss = calculate_loss(transformed_mesh, local_preds, axis=axis, constraint_rad=constraint_rad) loss.backward() # print("Grad: ", scale.grad) optimizer.step() if verbose is not None and i % verbose == 0: print('Iter ', i, 'Loss: ', loss.detach().cpu().numpy(), ' LR: ', cur_rl) #plot_points_from_torch(points) tri_mesh = get_trimesh_from_torch_geo_with_colors(transformed_mesh, local_preds) tri_mesh.export(save_path) np.save(preds_save_path, local_preds.cpu().detach().numpy()) lifts.append(loss.detach().cpu().numpy()) lr_plot.append(cur_rl) np.save(os.path.join(save_to_dir, "loss_plot.npy"), lifts) np.save(os.path.join(save_to_dir, "lr_plot.npy"), lr_plot) except KeyboardInterrupt: print("Paused") # + # optimize_shape_as_pierre(model, MESH_TO_OPTIMIZE, verbose=1, # save_to_dir='Expirements/OptimizationPaper/PierreDragDiff', lr=0.005) # - # # Free Foam Furrier Optimization def optimize_shape_as_pierre_furrier(model, inp, num_params=88, num_iters=100, save_to_dir='Expirements/Optimization', lr=0.05, decreased_by=2, adjust_lr_every=10, verbose=None, constraint_rad=0.1, axis=0): def transform(data, scale): answ = data.clone() answ.x = scale[0] + \ data.x * (scale[1] + scale[2] * data.x) for k, idx in enumerate(range(3, num_params - 3, 4)): answ.x += data.x * (scale[idx + 0] * torch.cos((k + 1) * data.x[:, (2, 0, 1)] * 2 * math.pi) + \ scale[idx + 1] * torch.cos((k + 1) * data.x[:, (1, 2, 0)] * 2 * math.pi) + \ scale[idx + 2] * torch.sin((k + 1) * data.x[:, (2, 0, 1)] * 2 * math.pi) + \ scale[idx + 3] * torch.sin((k + 1) * data.x[:, (1, 2, 0)] * 2 * math.pi)) return answ def adjust_learning_rate( initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every ): lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every)) for param_group in optimizer.param_groups: param_group["lr"] = lr return lr if not os.path.exists(os.path.join(save_to_dir, 'meshes')): os.makedirs(os.path.join(save_to_dir, 'meshes')) if not os.path.exists(os.path.join(save_to_dir, 'predictions')): os.makedirs(os.path.join(save_to_dir, 'predictions')) try: model.eval() data_instance = inp.clone() data_instance.x.requires_grad = True scale = torch.zeros((3, num_params), dtype=torch.float).to("cuda:0").t() scale[1] = 1 scale.requires_grad = True optimizer = torch.optim.SGD([scale], lr=lr) meshes = [] lifts = [] lr_plot = [] for i in range(num_iters): cur_rl = adjust_learning_rate(lr, optimizer, i, decreased_by, adjust_lr_every) save_path = os.path.join(save_to_dir, 'meshes/' + str(i).zfill(5) + ".ply") preds_save_path = os.path.join(save_to_dir, 'predictions/' + str(i).zfill(5) + ".npy") optimizer.zero_grad() transformed_mesh = transform(data_instance, scale) local_preds = model(transformed_mesh) points_in_deepSDF = transformPoints(transformed_mesh.x, np.linalg.inv(AvgTransform)) loss = calculate_loss(transformed_mesh, local_preds, axis=axis, constraint_rad=constraint_rad) loss.backward() optimizer.step() if verbose is not None and i % verbose == 0: print('Iter ', i, 'Loss: ', loss.detach().cpu().numpy(), ' LR: ', cur_rl) #plot_points_from_torch(points) tri_mesh = get_trimesh_from_torch_geo_with_colors(transformed_mesh, local_preds) tri_mesh.export(save_path) np.save(preds_save_path, local_preds.cpu().detach().numpy()) lifts.append(loss.detach().cpu().numpy()) lr_plot.append(cur_rl) np.save(os.path.join(save_to_dir, "loss_plot.npy"), lifts) np.save(os.path.join(save_to_dir, "lr_plot.npy"), lr_plot) except KeyboardInterrupt: print("Paused") # + # optimize_shape_as_pierre_furrier(model, MESH_TO_OPTIMIZE, verbose=1, # save_to_dir='Expirements/OptimizationPaper/FurrierDragDiff', lr=0.005) # - # # DeepSDF Optimization # + def method4_to_arbitatry_loss(points, ply_mesh, model, constraint_rad=0.1, axis=0): initial_dir = points.grad.clone() points.grad.data.zero_() mesh = make_mesh_from_points(points, ply_mesh) #signs = compute_signs_for_loss(mesh, transformPoints(normals, AvgTransform)) local_preds = model(mesh) loss = calculate_loss(mesh, local_preds, axis=axis, constraint_rad=constraint_rad) loss.backward() sign = [-p1.dot(p2) for p1, p2 in zip(initial_dir, points.grad)] return sign, loss, local_preds, mesh def optimize_shape_deepSDF(decoder, latent, ref_latent, initial_points=None, num_points=None, num_iters=100, point_iters=100, punch_lr_at_reindex_by=1, num_neignours_constr=10, reindex_latent_each=50, reindex_num_iterations=500, reindex_num_samples=100, lr=0.2, decreased_by=2, adjust_lr_every=10, alpha_penalty=0.05, multiplier_func=method4_to_arbitatry_loss, verbose=None, save_to_dir=None, N=256): def adjust_learning_rate( initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every ): lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every)) \ * ((punch_lr_at_reindex_by) ** (num_iterations // reindex_latent_each)) for param_group in optimizer.param_groups: param_group["lr"] = lr return lr if not os.path.exists(os.path.join(save_to_dir, 'meshes')): os.makedirs(os.path.join(save_to_dir, 'meshes')) if not os.path.exists(os.path.join(save_to_dir, 'predictions')): os.makedirs(os.path.join(save_to_dir, 'predictions')) decoder.eval() latent = latent.clone() latent.requires_grad = True optimizer = torch.optim.SGD([latent], lr=lr) loss_plot = [] latent_dist = [] lr_plot = [] if initial_points is not None: points = initial_points.clone() else: points = get_points_from_latent(decoder, latent, N=N, point_num=num_points) for i in range(num_iters): time_start = time.time() save_path = os.path.join(save_to_dir, 'meshes/' + str(i).zfill(5) + ".ply") preds_save_path = os.path.join(save_to_dir, 'predictions/' + str(i).zfill(5) + ".npy") # if i > 0 and i == reindex_latent_each: # new_latent = get_latent_from_mesh(decoder, latent_size=latent.size()[1], # num_iterations=reindex_num_iterations, # num_samples=reindex_num_samples) # latent = torch.Tensor(new_latent.cpu().detach().numpy()).cuda() # latent.requires_grad = True # optimizer = torch.optim.Adam([latent], lr=lr) cur_rl = adjust_learning_rate(lr, optimizer, i, decreased_by, adjust_lr_every) with torch.no_grad(): ply_mesh = create_mesh( decoder, latent, N=N, max_batch=int(2 ** 18), offset=None, scale=None) points = torch.cuda.FloatTensor(np.hstack(( ply_mesh['vertex']['x'][:, None], ply_mesh['vertex']['y'][:, None], ply_mesh['vertex']['z'][:, None]))) #points = get_points_from_latent(decoder, latent, N=128, point_num=num_points, save_path=save_path) # grid #points = get_points_on_surface(decoder, latent, points, num_iters=point_iters) # optimization points.requires_grad = True sdf_value = deep_sdf.utils.decode_sdf(decoder, latent, points) sdf_value.backward(torch.ones([len(points), 1], dtype=torch.float32).cuda()) mults, loss_value, preds, transformed_mesh = multiplier_func(points, ply_mesh) multipliers = torch.cuda.FloatTensor(mults) optimizer.zero_grad() sdf_value = torch.squeeze(deep_sdf.utils.decode_sdf(decoder, latent, points)) final_loss = torch.sum(sdf_value * multipliers) final_loss.backward() # print( "Latent size: ", torch.sum(latent ** 2) ) # print( "Latent grad: ", torch.sum(latent.grad ** 2) ) # sdf_value.backward(multipliers) # Soft-constraints distances, indeces = LATENT_KD_TREE.query(latent.cpu().detach(), k=num_neignours_constr) penalty = torch.mean( torch.stack([torch.sum( (latent - latent_vectors[indeces[0][i]]) ** 2 ) for i in range(len(indeces[0]))] ) ) apenalty = penalty * alpha_penalty apenalty.backward() #print("Latent grad penalized: ", torch.sum(latent.grad ** 2)) optimizer.step() # Hard-constraints # if (torch.sum(latent ** 2) > 1.2): # latent *= 1.2 / torch.sum(latent ** 2) # loss_value, preds = loss_func(points, ply_mesh) tri_mesh = get_trimesh_from_torch_geo_with_colors(transformed_mesh, preds) tri_mesh.export(save_path) np.save(preds_save_path, preds.cpu().detach().numpy()) if save_to_dir is not None: plot_points_from_torch loss_plot.append(loss_value.cpu().detach().numpy()) latent_dist.append(torch.sum((latent - ref_latent) ** 2 ).cpu().detach().numpy() ) lr_plot.append(penalty) time_end = time.time() if verbose is not None and i % verbose == 0: print('Iter ', i, 'Loss: ', loss_value.detach().cpu().numpy(), ' LD: ', lr_plot[-1]) #plot_points_from_torch(points) # print("Time: ", time_end - time_start) np.save(os.path.join(save_to_dir, "loss_plot.npy"), loss_plot) np.save(os.path.join(save_to_dir, "latent_dist.npy"), latent_dist) np.save(os.path.join(save_to_dir, "lr_plot.npy"), lr_plot) def make_full_transformation(initial_latent, ref_latent, experiment_name, decoder, model, alpha_penalty=0.05, constraint_rad=0.1, axis=0, **kwargs): ''' kwargs: num_iters=1000, adjust_lr_every=10, decreased_by=1.2, lr=0.005, reindex_latent_each=100, punch_lr_at_reindex_by=1, reindex_num_iterations=500, reindex_num_samples=100, verbose=10, ''' #ref_points = get_points_from_latent(decoder, ref_latent, N=128) save_to_dir = experiment_name if not os.path.exists(save_to_dir): os.makedirs(save_to_dir) #np.save(os.path.join(save_to_dir, "target_verts.npy"), ref_points) optimize_shape_deepSDF(decoder, initial_latent, ref_latent, initial_points=None, alpha_penalty=alpha_penalty, num_points=None, point_iters=2, multiplier_func=lambda x, y: method4_to_arbitatry_loss(x, y, model, constraint_rad=constraint_rad, axis=axis), save_to_dir=save_to_dir, **kwargs) # - # ## Run Everything # + experiment_dir = 'FairLossOld' num_iters = 30 N = 256 nice_shapes = [20, 16, 15, 19, 22] for i in nice_shapes + list(range(30, 100)): LATENT_TO_OPTIMIZE = latent_vectors[i] LATENT_KD_TREE = KDTree(np.array([lv.cpu().detach().numpy()[0] for lv in latent_vectors])) with torch.no_grad(): ply_mesh = create_mesh( decoder, LATENT_TO_OPTIMIZE, N=N, max_batch=int(2 ** 18), offset=None, scale=None) points = torch.cuda.FloatTensor(np.hstack(( ply_mesh['vertex']['x'][:, None], ply_mesh['vertex']['y'][:, None], ply_mesh['vertex']['z'][:, None]))) normals = torch.cuda.FloatTensor(np.hstack(( ply_mesh['normals']['x'][:, None], ply_mesh['normals']['y'][:, None], ply_mesh['normals']['z'][:, None]))) MESH_TO_OPTIMIZE = make_mesh_from_points(points, ply_mesh) #SAVED_SIGNS = compute_signs_for_loss(MESH_TO_OPTIMIZE, transformPoints(normals, AvgTransform)) print("Starting") for a, name in zip([1, 0.5], ['Down', 'Mix']): # DeepSDF mesh = make_full_transformation(LATENT_TO_OPTIMIZE.detach(), LATENT_TO_OPTIMIZE.clone(), 'Expirements/OptimizationPaper/' + experiment_dir + '/DeepSDF' + name + '/' + str(i), decoder, model, alpha_penalty=0.2, axis=a, num_iters=num_iters, adjust_lr_every=20, decreased_by=1.1, lr=0.2, verbose=None, N=N, num_neignours_constr=10, reindex_latent_each=10000, punch_lr_at_reindex_by=1, reindex_num_iterations=500, reindex_num_samples=100) # FreeFormFurrier optimize_shape_as_pierre_furrier(model, MESH_TO_OPTIMIZE, verbose=None, constraint_rad=0.1, num_iters=num_iters, save_to_dir='Expirements/OptimizationPaper/' + experiment_dir + '/FreeformFurrie' + name + '/' + str(i), lr=0.005, adjust_lr_every=20, axis=a) # FreeForm optimize_shape_as_pierre(model, MESH_TO_OPTIMIZE, verbose=None, constraint_rad=0.1, num_iters=num_iters, save_to_dir='Expirements/OptimizationPaper/' + experiment_dir + '/Freeform' + name + '/' + str(i), lr=0.05, adjust_lr_every=20, axis=a) # Scaling optimize_shape_by_scaling(model, MESH_TO_OPTIMIZE, verbose=None, constraint_rad=0.1, num_iters=num_iters, save_to_dir='Expirements/OptimizationPaper/' + experiment_dir + '/Scaling' + name + '/' + str(i), lr=0.5, adjust_lr_every=20, axis=a) # Vertex optimize_shape_by_vertex(model, MESH_TO_OPTIMIZE, verbose=None, constraint_rad=0.1, num_iters=num_iters, save_to_dir='Expirements/OptimizationPaper/' + experiment_dir + '/Vertex' + name + '/' + str(i), lr=0.05, adjust_lr_every=20, axis=a) # - # # Check Simulation from numpy import genfromtxt my_data = genfromtxt('../Expirements/SavedTransforms/DeepSDF-CFD8-m3-holes/report/00000/output/cloud_p_k_omega_nut.csv', delimiter=',', skip_header=1) mesh = trimesh.load("../Data/cars_remeshed_dsdf/inputs/0_input.stl") norm = mpl.colors.Normalize(vmin= -2, vmax=2) cmap = cm.hot m = cm.ScalarMappable(norm=norm, cmap=cmap) # + fld_tree = KDTree(my_data[:, :3]) distances, indeces = fld_tree.query(mesh.vertices, k=1) interpolations = my_data[:, 0][indeces].squeeze() mesh = trimesh.Trimesh(vertices=mesh.vertices, faces=mesh.faces, vertex_colors=list(map(lambda c: m.to_rgba(c), interpolations))) # - mesh = trimesh.load("../Data/cars_refined/simulated/stl/0005.stl") mesh.show() # #### From processed di = make_data_instance_from_stl("/cvlabdata2/home/artem/Data/check_simulations/Processed/fld/0002_0005.fld") di.to("cuda:0") mesh = get_trimesh_from_torch_geo_with_colors(di, di.y) lift = compute_lift_faces(di, di.y) print('Lift: ', lift) plt.hist(di.y[:, 0].detach().cpu().numpy(), bins=100) plt.show() mesh.show() npar = np.genfromtxt("/cvlabdata2/home/artem/Data/check_simulations/Processed/fld/0002_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) npar = np.genfromtxt("/cvlabdata2/home/artem/Data/check_simulations/Processed/fld/0000_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) npar = np.genfromtxt("/cvlabdata2/home/artem/Data/check_simulations/Processed/fld/0001_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) plt.show() npar = np.genfromtxt("/cvlabdata2/home/artem/Data/cars_refined/simulated/fld/0002_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) npar = np.genfromtxt("/cvlabdata2/home/artem/Data/cars_refined/simulated/fld/0003_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) npar = np.genfromtxt("/cvlabdata2/home/artem/Data/cars_refined/simulated/fld/0007_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) plt.show() npar = np.genfromtxt("/cvlabdata2/home/artem/Data/check_simulations/ListenerCheck/processed/fld/0000_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) npar = np.genfromtxt("/cvlabdata2/home/artem/Data/check_simulations/ListenerCheck/processed/fld/0001_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) npar = np.genfromtxt("/cvlabdata2/home/artem/Data/check_simulations/ListenerCheck/processed/fld/0002_0005.fld", delimiter=',', skip_header=1) plt.hist(npar[:, 3], bins=100, alpha = 0.5) plt.show() di0 = make_data_instance_from_stl("../Expirements/SimulatedResults/foam_npy/fld/0000_0005.fld") di1 = make_data_instance_from_stl("../Expirements/SimulatedResults/foam_npy/fld/0001_0005.fld") plt.hist(di0.y[:, 0], bins=100) plt.show() plt.hist(di1.y[:, 0], bins=100) plt.show() mesh = trimesh.load('/cvlabdata2/home/artem/Data/cars_refined/simulated/stl/0005.stl') mesh.show() path = '../Expirements/SavedTransforms/DeepSDF-CFD8-m3-holes/meshes/00000.ply' tri_mesh = trimesh.load(path) points = torch.tensor(tri_mesh.vertices, dtype=torch.float).to("cuda:0") innerBoundsLoss(points, r=0.12**2, center=(0.55, 0, 0)) from numpy import genfromtxt bvecs = genfromtxt('examples/cars/vine_latentVecs_tll_0.1_1.csv', delimiter=' ', skip_header=1) bvecs = [torch.FloatTensor(v).to('cuda:0') for v in bvecs] # + ply_mesh = create_mesh( decoder, latent_vectors[0], N=128, max_batch=int(2 ** 18), offset=None, scale=None) points = torch.cuda.FloatTensor( np.array([ply_mesh['vertex']['x'], ply_mesh['vertex']['y'], ply_mesh['vertex']['z']]).transpose() ) transformed_points = make_mesh_from_points(points, ply_mesh) # - pred = model(transformed_points) mesh = get_trimesh_from_torch_geo_with_colors(transformed_points, pred.cpu().detach()) # + # mesh.show() # - n = mesh.export('examples/cars/BeautifiedMeshes/o0.ply') get_latent_from_mesh_cpu() edges = trimesh.geometry.faces_to_edges(ply_mesh['face']['vertex_indices']) edge_attr = torch.stack([points[a] - points[b] for a, b in edges]) print(edge_attr[:10]) cloud = PyntCloud(pd.DataFrame(points.cpu().detach().numpy(), columns=['x', 'y', 'z'])) cloud.plot(background='white', initial_point_size=0.003) cloud = PyntCloud(pd.DataFrame(points_transformed.cpu().detach().numpy(), columns=['x', 'y', 'z'])) cloud.plot(background='white', initial_point_size=0.003) di = make_data_instance_from_stl('/cvlabdata2/home/artem/Data/cars_refined/simulated/fld/0007_0005.fld') cm = get_trimesh_from_torch_geo_with_colors(di, di.y) preds = model() cm.show() transformed_points cloud = PyntCloud(pd.DataFrame(di.pos.cpu().detach().numpy(), columns=['x', 'y', 'z'])) cloud.plot(background='white', initial_point_size=0.003) cloud = PyntCloud(pd.DataFrame(transformed_points.pos.cpu().detach().numpy(), columns=['x', 'y', 'z'])) cloud.plot(background='white', initial_point_size=0.003) # + with torch.no_grad(): ply_mesh = create_mesh( decoder, latent, N=128, max_batch=int(2 ** 18), offset=None, scale=None) points = torch.cuda.FloatTensor(np.hstack(( ply_mesh['vertex']['x'][:, None], ply_mesh['vertex']['y'][:, None], ply_mesh['vertex']['z'][:, None]))) mesh = trimesh.Trimesh(vertices=points.cpu().detach(), faces=ply_mesh['face']['vertex_indices']) edge_attr = [mesh.vertices[a] - mesh.vertices[b] for a, b in mesh.edges] data = torch_geometric.data.Data(x = points, pos= torch.tensor(mesh.vertices, dtype=torch.float).to('cuda:0'), faces = torch.tensor(mesh.faces, dtype=torch.long).to('cuda:0'), edge_attr = torch.tensor(edge_attr, dtype=torch.float).to('cuda:0'), edge_index= torch.tensor(mesh.edges, dtype=torch.long).t().contiguous().to('cuda:0')) # - data_instance= make_data_instance_from_stl( '/cvlabsrc1/cvlab/dataset_shapenet/code/foam_npy/fld/0100_0005.fld', data_step=1) mesh = trimesh.Trimesh(vertices=data_instance.pos, faces=data_instance.faces) a = mesh.export('../Expirements/data/original_mesh.ply') # # Expirements with PreprocessMesh mesh.show() latent = get_latent_from_mesh_cpu(decoder, latent_size, mesh, num_iterations=300, num_samples=200000) latent_gpu = get_latent_from_mesh(decoder, latent_size, mesh_path='../Expirements/data/original_mesh.ply', num_iterations=300, num_samples=200000) gpu_points = deep_sdf.data.read_sdf_samples_into_ram('../Expirements/data/original_SDF.npz') gpu_points[1][:, 3].max() cloud = PyntCloud(pd.DataFrame(np.array(gpu_points[0][:, :3]), columns=['x', 'y', 'z'])) cloud.plot(background='white', initial_point_size=0.03, elev=-45, azim=45) points, sdf = sample_sdf_near_surface(mesh) sdfs = np.hstack((points, sdf[:, None])) data_sdf = [torch.from_numpy(sdfs[sdfs[:, 3] > 0, :]), torch.from_numpy(sdfs[sdfs[:, 3] < 0, :])] print(data_sdf[0].shape) print(gpu_points[0].shape) cloud = PyntCloud(pd.DataFrame(np.array(data_sdf[0][:, :3]), columns=['x', 'y', 'z'])) cloud.plot(background='white', initial_point_size=0.03, elev=-45, azim=45) plot_mesh_from_vector(decoder, latent, N=128) plydata = create_mesh(decoder, latent_gpu) plydata.write("../Expirements/mesh.ply")
OptimisationRepeatOld.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from chempy.units import default_units as u from chempy.util.regression import least_squares, plot_fit, irls, plot_least_squares_fit, least_squares_units # %matplotlib inline help(least_squares) x = [0, 1, 2, 3, 4+1e-9] y = [3, 3.8, 5.2, 5.2, 10.8] # Simple OLS with plotting: res = least_squares(x, y) plot_least_squares_fit(x, y, res) # Iterative least squares: res_irls = irls(x, y, irls.gaussian, itermax=20) plot_least_squares_fit(x, y, res_irls) x y*u.m res_units = least_squares_units(x*u.s, y*u.m) #plot_least_squares(x*u.s, y*u.m, res_units, x_unit=u.s, y_unit=u.m) res_units err = [.1, .2, .2, .8, 3]*u.m res_weighted = least_squares_units(x*u.s, y*u.m, err**-2) plot_least_squares_fit(x*u.s, y*u.m, res_weighted, err, x_unit=u.s, y_unit=u.m)
examples/_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from math import ceil, log from functools import reduce import re from scipy.stats import linregress as linear_regression import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns pd.set_option("display.precision", 16) # - class DBMeta(object): def __init__(self, T, K, Z, B, E, bpe, L, phi=1): self.T, self.K, self.Z, self.B, self.E, self.L = int(T), int(K), int(Z), int(B), int(E), int(L) self.bpe, self.phi = bpe, phi self.is_tiering = True if (K == T - 1) else False self.N = self.estimate_N() self.mfilt = int(self.bpe * self.N) >> 3 def estimate_N(self): num_entries = 0 for level in range(1, int(self.L) + 1): num_entries += (self.B / self.E) * (self.T - 1) * (self.T ** (level - 1)) return num_entries def level(self): return ceil(log((self.N * self.E) / self.B + 1, self.T)) def write_cost(self): cost = self.T * (1 + self.phi) * (self.L / self.B) cost /= self.T if self.is_tiering else 2 return cost def fp(self, curr_level): alpha = (self.T ** (self.T / (self.T - 1))) / (self.T ** (self.L + 1 - curr_level)) return alpha * (np.e ** (-1 * self.mfilt * (np.log(2) ** 2) / self.N)) def non_empty_read_cost(self): cost = 0 run_probs = [self.B * (self.T ** (i)) / (self.E * self.N) for i in range(0, self.L)] fp_levels = [self.fp(i + 1) for i in range(0, self.L)] fp_levels_sum = [0] + list(np.cumsum(fp_levels)) if self.is_tiering: for i in range(0, self.L): for r in range(1, self.T): cost += run_probs[i] * (1 + (self.T - 1) * fp_levels_sum[i] + (r - 1) * fp_levels[i]) else: for i in range(0 , self.L): cost += (self.T - 1) * run_probs[i] * (1 + fp_levels_sum[i]) return cost def old_non_empty_read_cost(self): cost = 1 + self.empty_read_cost() sub = np.e ** ((-1 * self.mfilt * (np.log(2) ** 2) / (self.N * self.L)) + (1 / (self.L * (self.T - 1)))) if self.is_tiering: sub *= (self.T / (2 * (self.T - 1))) cost -= sub return cost def empty_read_cost(self): if self.is_tiering: alpha = self.T ** (self.T / (self.T - 1)) else: alpha = (self.T ** (self.T / (self.T - 1))) / (self.T - 1) beta = np.e ** (-1 * self.mfilt * (np.log(2) ** 2) / self.N) return alpha * beta def dost_write_cost(self): return (1 / self.B) * (((self.T - 1) * (self.level() - 1) / (self.K + 1)) + (self.T - 1) / (self.Z + 1)) # # Generating Data # + def set_style(): # This sets reasonable defaults for font size for a paper sns.set_context("paper") # Set the font to be serif sns.set(font='serif')#, rc={'text.usetex' : True}) # Make the background white, and specify the specific font family sns.set_style("white", { "font.family": "serif", "font.serif": ["Times", "Palatino", "serif"] }) # Set tick size for axes sns.set_style("ticks", {"xtick.major.size": 6, "ytick.major.size": 6}) fsz = 14 plt.rc('font', size=fsz) plt.rc('axes', titlesize=fsz) plt.rc('axes', labelsize=fsz) plt.rc('xtick', labelsize=fsz) plt.rc('ytick', labelsize=fsz) plt.rc('legend', fontsize=fsz) plt.rc('figure', titlesize=fsz) def plot_old_new_model(df, xaxis='T'): is_tiering = df.iloc[-1]['is_tiering'] fig, ax = plt.subplots(figsize=(15,5)) ax.set_xlabel(xaxis) ax.set_ylabel('Expected I/Os') ax.plot(df[xaxis], df['model_non_empty_read_cost'], color='red') ax.plot(df[xaxis], df['old_non_empty_read_cost'], color='black') ax.axhline(y=1, color='black', alpha=0.5, linestyle='--') ax.tick_params(axis='y') ax.set_ylim(bottom=0) ax.legend(['New', 'Old']) B, E = df.iloc[-1][['B', 'E']] title = [ f'Buffer, Entry, Policy : ({B / (1 << 20)} MB, {E / (1 << 10)} KB, {"Tiering" if is_tiering else "Leveling"})' ] ax.set_title('\n'.join(title)) return fig # + def row(T=2, K=1, Z=1, L=3, B=(8*1048576), E=1024, bpe=9.0, is_tiering=False): return {'T' : T, 'K' : K, 'Z' : Z, 'L' : L, 'B' : B, 'E' : E, 'bpe' : bpe, 'is_tiering' : is_tiering} MIN_T, MAX_T = 2, 25 set_style() df = [row(T=T, K=1, Z=1, is_tiering=False) for T in range(MIN_T, MAX_T)] df = pd.DataFrame(df) df['model_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).non_empty_read_cost(), axis=1) df['old_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).old_non_empty_read_cost(), axis=1) _ = plot_old_new_model(df, 'T') df = [row(T=T, K=T-1, Z=T-1, is_tiering=True) for T in range(MIN_T, MAX_T)] df = pd.DataFrame(df) df['model_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).non_empty_read_cost(), axis=1) df['old_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).old_non_empty_read_cost(), axis=1) _ = plot_old_new_model(df, 'T') # + set_style() MIN_BPE, MAX_BPE = 1, 10 T = 20 df = [row(T=T, K=1, Z=1, is_tiering=False, bpe=bpe) for bpe in range(MIN_BPE, MAX_BPE)] df = pd.DataFrame(df) df['model_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).non_empty_read_cost(), axis=1) df['old_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).old_non_empty_read_cost(), axis=1) _ = plot_old_new_model(df, 'bpe') df = [row(T=T, K=T-1, Z=T-1, is_tiering=True, bpe=bpe) for bpe in range(MIN_BPE, MAX_BPE)] df = pd.DataFrame(df) df['model_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).non_empty_read_cost(), axis=1) df['old_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).old_non_empty_read_cost(), axis=1) _ = plot_old_new_model(df, 'bpe') # -
script/visualization/model_func_graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="S9_vgfdxISgG" colab_type="text" # # DCGAN # + [markdown] id="5KmzubwgIovr" colab_type="text" # ## Imports # + id="nfen8HBmIXD3" colab_type="code" colab={} import numpy as np import itertools import time import matplotlib.pyplot as plt import tensorflow as tf from sklearn.utils import shuffle import pdb from tensorflow.examples.tutorials.mnist import input_data from google.colab import files import warnings # + [markdown] id="NpYy68hFIq8Y" colab_type="text" # ## Load data # + id="6Kvar8EjI9Z4" colab_type="code" outputId="a5ea8c26-e0aa-4c2a-bad6-2b857018d0bc" colab={"base_uri": "https://localhost:8080/", "height": 391} IMAGE_SIZE = 28 tf.reset_default_graph() mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=[]) X_train = mnist.train.images X_train = (X_train - 0.5) / 0.5 def leaky_relu(X, leak=0.2): f1 = 0.5 * (1 + leak) f2 = 0.5 * (1 - leak) return f1 * X + f2 * tf.abs(X) # + [markdown] id="4bWwxBHw3KzA" colab_type="text" # ## Placeholder # + id="6O1mDTe83NkL" colab_type="code" colab={} x = tf.placeholder(tf.float32, shape=(None, IMAGE_SIZE, IMAGE_SIZE, 1)) noise = tf.placeholder(tf.float32, shape=(None, 1, 1, 100)) Training = tf.placeholder(dtype=tf.bool) keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob') # + [markdown] id="_qMaCMk2JAoA" colab_type="text" # ## Generator # + id="0VdTQKIZJFsG" colab_type="code" colab={} def Generator(x, keep_prob=keep_prob, Training=True, reuse=False): with tf.variable_scope('Generator', reuse=reuse): W = tf.truncated_normal_initializer(mean=0.0, stddev=0.02) b = tf.constant_initializer(0.0) out_1 = tf.layers.conv2d_transpose(x, 256, [7, 7], strides=(1, 1), padding='valid', kernel_initializer=W, bias_initializer=b) out_1 = tf.layers.dropout(out_1, keep_prob) out_1 = tf.layers.batch_normalization(out_1, training=Training)#batch norm out_1 = leaky_relu(out_1, 0.2) out_2 = tf.layers.conv2d_transpose(out_1, 128, [5, 5], strides=(2, 2), padding='same', kernel_initializer=W, bias_initializer=b) out_2 = tf.layers.dropout(out_2, keep_prob) out_2 = tf.layers.batch_normalization(out_2, training=Training)#batch norm out_2 = leaky_relu(out_2, 0.2) out_3 = tf.layers.conv2d_transpose(out_2, 1, [5, 5], strides=(2, 2), padding='same', kernel_initializer=W, bias_initializer=b) out_3 = tf.nn.tanh(out_3) return out_3 # + [markdown] id="6ajNg7X6JNRm" colab_type="text" # ## Discriminator # + id="uPGatNMnJOqa" colab_type="code" colab={} def Discriminator(x, keep_prob=keep_prob, Training=True, reuse=False): with tf.variable_scope('Discriminator', reuse=reuse): W = tf.truncated_normal_initializer(mean=0.0, stddev=0.02) b = tf.constant_initializer(0.0) out_1 = tf.layers.conv2d(x, 128, [5, 5], strides=(2, 2), padding='same', kernel_initializer=W, bias_initializer=b) out_1 = tf.layers.dropout(out_1, keep_prob) out_1 = tf.layers.batch_normalization(out_1, training=Training)#batch norm out_1 = leaky_relu(out_1, 0.2) out_2 = tf.layers.conv2d(out_1, 256, [5, 5], strides=(2, 2), padding='same', kernel_initializer=W, bias_initializer=b) out_2 = tf.layers.dropout(out_2, keep_prob) out_2 = tf.layers.batch_normalization(out_2, training=Training)#batch norm out_2 = leaky_relu(out_2, 0.2) logits = tf.layers.conv2d(out_2, 1, [7, 7], strides=(1, 1), padding='valid', kernel_initializer=W, bias_initializer=b) out_3 = tf.nn.sigmoid(logits) return out_3 ,logits # + [markdown] id="QiTPVdsbKjVI" colab_type="text" # ## Parameters # + id="tn3RJESPKm19" colab_type="code" colab={} EPOCH = 20 BATCH_SIZE = 200 keep_prob_train = 0.6 BETA1 = 0.5 lr = 0.0002 label_smooth = 1 # + [markdown] id="TycJaszI0_QV" colab_type="text" # ## Loss function # + id="8b0WLH2nJ613" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="46960bb8-f86d-475f-b57d-b15ec32df4ae" # Generate images G_noise = Generator(noise, keep_prob, Training) # D D_real, D_real_logits = Discriminator(x, Training) D_fake, D_fake_logits = Discriminator(G_noise, Training, reuse=True) # D real loss Dis_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.multiply(tf.ones_like(D_real_logits), (label_smooth)))) # D generated image loss Dis_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros([BATCH_SIZE, 1, 1, 1]))) # D total loss Dis_loss = Dis_loss_real + Dis_loss_fake # G loss Gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones([BATCH_SIZE, 1, 1, 1]))) # get all variables tf_vars = tf.trainable_variables() Dis_vars = [var for var in tf_vars if var.name.startswith('Discriminator')] Gen_vars = [var for var in tf_vars if var.name.startswith('Generator')] # optimise with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): D_optim = tf.train.AdamOptimizer(lr, beta1=BETA1).minimize(Dis_loss, var_list=Dis_vars) G_optim = tf.train.AdamOptimizer(lr, beta1=BETA1).minimize(Gen_loss, var_list=Gen_vars) # + [markdown] id="POC9cA26KMoM" colab_type="text" # ## Training # + id="D2rrQ0mLKQ_8" colab_type="code" outputId="1775345e-72af-422a-8237-412a4fa94814" colab={"base_uri": "https://localhost:8080/", "height": 595} saver = tf.train.Saver() num_examples = len(X_train) k = num_examples % BATCH_SIZE num_examples = num_examples - k G_loss = [] D_loss = [] D_r = [] D_f = [] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(EPOCH): start = time.time() X_train = shuffle(X_train) for offset in range(0, num_examples, BATCH_SIZE): train_d = True train_g = True end = offset + BATCH_SIZE batch = X_train[offset:end] noise_ = np.random.normal(0, 1, (BATCH_SIZE, 1, 1, 100)) #calculate loss d_ls = sess.run(Dis_loss,{noise: noise_, x: batch, Training: False}) g_ls = sess.run(Gen_loss,{noise: noise_, x: batch, Training: False}) #Gobal loss # d_ls_real, d_ls_fake = sess.run([Dis_loss_real, Dis_loss_fake], {noise: noise_,x: batch, Training: False}) d_r = sess.run([D_real], {x: batch, Training: False}) d_f = sess.run([D_fake], {noise: noise_, Training: False}) d_r = np.mean(d_r) d_f = np.mean(d_f) #break D_r.append(d_r) D_f.append(d_f) D_loss.append(d_ls) G_loss.append(g_ls) if g_ls * 1.5 < d_ls: train_g = False pass if d_ls * 3 < g_ls: train_d = False pass #Update D if train_d: sess.run(D_optim, {x: batch, noise: noise_,keep_prob: keep_prob_train,Training: True}) #Update G if train_g: sess.run(G_optim, {noise: noise_, x: batch,keep_prob: keep_prob_train, Training: True}) end = time.time() elapsed = end - start #break if ((i+1)%2 == 0)or(i==0): print("EPOCH {} ...".format(i+1)) print("G_loss = {:.3f} D_loss = {:.3f} Time used = {:.3f}".format(g_ls, d_ls,elapsed)) print() saver.save(sess, './lenet') print("Model saved") # + [markdown] id="9boaRjia6EDt" colab_type="text" # ## D real and fake loss # + id="AMwoncA46EqV" colab_type="code" outputId="809fb114-d1de-4354-f8d8-d62edb16817f" colab={"base_uri": "https://localhost:8080/", "height": 952} D_r_mean = [] D_f_mean = [] N = len(D_r) length = N // (EPOCH) for k in range(0,EPOCH): D_r_mean.append( np.mean(D_r[(k+1)*length -10 : (k+1)*length + 10] )) D_f_mean.append( np.mean(D_f[(k+1)*length -10 : (k+1)*length + 10] )) print("Average D real loss") print(D_r_mean) print("Average D fake loss") print(D_f_mean) index = np.arange(1,EPOCH+1,1) f_d = plt.figure(1) plt.plot(index, D_r_mean, 'r',label='D Real') plt.plot(index, D_f_mean, 'b',label='D Fake') plt.ylabel("D Loss") plt.xlabel("EPOCH") plt.legend(framealpha=1, frameon=True) plt.show() f_d.savefig('Real and fake Loss.png', dpi=600) files.download('Real and fake Loss.png') # + [markdown] id="2j4rAJOm6RAc" colab_type="text" # ## Plot loss # + id="e3TjGFfl6R3d" colab_type="code" outputId="8a86f33d-73c3-4c1d-80d4-9a5698613135" colab={"base_uri": "https://localhost:8080/", "height": 429} d_s_mean = [] g_s_mean = [] N = len(D_loss) length = N // (EPOCH) for k in range(0,EPOCH): d_s_mean.append( np.mean(D_loss[(k+1)*length -10 : (k+1)*length + 10] )) g_s_mean.append( np.mean(G_loss[(k+1)*length -10 : (k+1)*length + 10] )) print("Average D loss") print(d_s_mean) print("Average G loss") print(g_s_mean) index = np.arange(1,EPOCH+1,1) f = plt.figure(1) plt.plot(index, d_s_mean, 'r',label='D Loss') plt.plot(index, g_s_mean, 'b',label='G Loss') plt.xlabel("EPOCH") plt.legend(framealpha=1, frameon=True) plt.show() f.savefig('Loss.png', dpi=600) files.download('Loss.png') # + [markdown] id="xRAQaJPmLPKZ" colab_type="text" # ## Visualization # + id="OfSdVjgBNg_u" colab_type="code" outputId="cf372a09-09c8-4aa7-8c76-df285797c91f" colab={"base_uri": "https://localhost:8080/", "height": 398} def plot_images(images,save = True): assert len(images) == 100 img_shape = (28, 28) # Create figure with 3x3 sub-plots. fig, axes = plt.subplots(10, 10) fig.subplots_adjust(hspace=0.1, wspace=0.1) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(img_shape), cmap='binary') # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) plt.show() if save: fig.savefig('G images_white.png', dpi=600) files.download('G images_white.png') n = np.random.normal(0.0, 1.0, [100,1,1,100]).astype(np.float32) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) images = sess.run(G_noise, {noise: n, Training: False}) plot_images(images)
results/dcgan_more_d/DCGAN_more_d.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np print("Opening...") with open("a1a.train") as f: train_raw = f.read() with open("a1a.test") as f: test_raw = f.read() print("done opening.") def process_data(raw_data): train_lines = raw_data.splitlines() num_examples = len(train_lines) num_features = 123 X = np.zeros((num_examples, num_features)) Y = np.zeros((num_examples, 1)) for i, line in enumerate(train_lines): tokens = line.split() #label = tokens[0] label = (int(tokens[0]) + 1) / 2 # Change label from {-1,1} to {0,1} Y[i] = label for token in tokens[1:]: index = int(token[:-2]) - 1 X[i, index] = 1 return X, Y def normalize_data(Xtrain, Xtest): normalizer = max(np.max(np.linalg.norm(Xtrain, axis=1)), np.max(np.linalg.norm(Xtest, axis=1))) Xtrain = Xtrain / normalizer Xtest = Xtest / normalizer return Xtrain, Xtest print("Processing...") Xtrain, Ytrain = process_data(train_raw) Xtest, Ytest = process_data(test_raw) print("done processing.") print("Normalizing...") Xtrain, Xtest = normalize_data(Xtrain, Xtest) print("done normalizing.") # + from math import exp, sqrt from scipy.special import erf from scipy.optimize import root_scalar def get_eps_AGM(sigma, GS, delta, min_eps=1e-6, max_eps=10, tol=1e-12): # Compute the epsilon corresponding to a Gaussian perturbation normalized_sigma = sigma / GS def Phi(t): return 0.5*(1.0 + erf(float(t)/sqrt(2.0))) def get_delta(s, e): return Phi(-e*s+1.0/(2*s)) - exp(e)*Phi(-e*s-1.0/(2*s)) def f(x): return get_delta(normalized_sigma, x) - delta assert get_delta(normalized_sigma, min_eps) >= delta assert get_delta(normalized_sigma, max_eps) <= delta sol = root_scalar(f, bracket=[min_eps,max_eps], xtol=tol) assert sol.converged return sol.root # - get_eps_AGM(1,0.1,1e-6) # + import psgd psgd.get_eps_AGM(0.1,1.3,1e-6,max_eps=500) # + # ProjSGDClassifier is an sklearn model that needs to be compiled locally # See README in parent folder from sklearn.linear_model import ProjSGDClassifier def dp_proj_sgd(Xtrain, Xtest, reg_lambda=0.001, sigma=0.1, delta=1e-6, R=10): # Define the model clf = ProjSGDClassifier(loss="log", penalty="l2", learning_rate="bolton", alpha=reg_lambda, radius=1.0/reg_lambda, max_iter=10, verbose=0, fit_intercept=False) #print(clf.get_params()) scores = [] for r in range(R): # Train the model clf.fit(Xtrain, Ytrain.ravel()) # Privatize the model Z = sigma*np.random.standard_normal(size=clf.coef_.shape) clf.coef_ += Z # Evaluate the model accuracy score = clf.score(Xtest, Ytest) scores.append(score) # Evaluate the model privacy # Compute the global sensitivity m = Xtrain.shape[0] GS = 4.0/(m*reg_lambda) epsilon = get_eps_AGM(sigma, GS, delta) return np.average(scores), epsilon # - dp_proj_sgd(Xtrain, Xtest, reg_lambda=0.01, sigma=0.1, delta=1e-6)
experiments/output_perturbation/scikit-learn/sgd-adult.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Visualising crystal structures with fresnel and matplotlib # This example takes some crystal structures and visualises them with fresnel and matplotlib. # It requires fresnel to be installed from conda-forge with `conda install fresnel -c conda-forge`. from matador.utils.viz_utils import fresnel_plot from matador.utils.optimade_utils import optimade2dict import requests crystals = [ optimade2dict(structure) for structure in requests.get("https://optimade.materialsproject.org/v1/structures?page_limit=6").json()["data"] ] import matplotlib.pyplot as plt fig, axes, scenes = fresnel_plot(crystals, fig_rows=2, figsize=(8, 8)) plt.tight_layout() # Optional: install PySide2 to use interactive scene views with fresnel. Useful for finding the desired camera angle for a given structure. from PySide2 import QtCore # %gui qt import fresnel.interact view = fresnel.interact.SceneView(scenes[5]) view.show() breakpoint() # Move camera to desired position and observe changes repr(scenes[5].camera) from matador.utils.viz_utils import rerender_scenes_to_axes rerender_scenes_to_axes(scenes, axes) fig
docs/src/notebooks/interactive/visualisation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Images in Python # + jupyter={"outputs_hidden": false} import numpy as np import matplotlib.pyplot as plt # - # ### Working with images in python is essentially a visual way of working with 2-d arrays (matrices) # + jupyter={"outputs_hidden": false} my_matrix = np.array( [[0, 3, 7, 8, 2, 3], [2, 1, 8, 7, 3, 0], [4, 9, 3, 2, 9, 2], [3, 8, 4, 0, 8, 5], [8, 0, 2, 1, 3, 9], [9, 2, 4, 2, 4, 8]] ) # + jupyter={"outputs_hidden": false} my_matrix # - # ### All of the normal numpy commands work with matrices (of any dimension) # + jupyter={"outputs_hidden": false} my_matrix.mean() # mean of the whole matrix # - # ### You can work over just the rows or columns of the matrix # # * axis = 0 (COLUMNS) # * axis = 1 (ROWS) # + jupyter={"outputs_hidden": false} my_matrix.mean(axis=0) # mean of the columns # + jupyter={"outputs_hidden": false} my_matrix.mean(axis=0)[0] # mean of the 0th column # + jupyter={"outputs_hidden": false} my_matrix.mean(axis=1) # mean of the rows # + jupyter={"outputs_hidden": false} my_matrix.flatten() # convert to 1D (useful for some plotting) # - # ### Math on matrices/images applies to every value # + jupyter={"outputs_hidden": false} another_matrix = my_matrix + 2 print(another_matrix) # - # ## `.imshow` will display 2-d arrays as images # + jupyter={"outputs_hidden": false} plt.style.use('ggplot') plt.rc('axes', grid=False) # turn off the background grid for images # + jupyter={"outputs_hidden": false} plt.imshow(my_matrix, cmap=plt.cm.GnBu); # - # ### Show the image represenation with a colorbar # + jupyter={"outputs_hidden": false} plt.imshow(my_matrix, cmap=plt.cm.GnBu) plt.colorbar(); # - # ### Colormap reference: http://matplotlib.org/examples/color/colormaps_reference.html # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,5) fig.set_size_inches(16,4) fig.tight_layout() ax[0].imshow(my_matrix, cmap=plt.cm.viridis) ax[0].set_xlabel('viridis', fontsize = 20) ax[1].imshow(my_matrix, cmap=plt.cm.hot) ax[1].set_xlabel('hot', fontsize = 20) ax[2].imshow(my_matrix, cmap=plt.cm.magma) ax[2].set_xlabel('magma', fontsize = 20) ax[3].imshow(my_matrix, cmap=plt.cm.Spectral) ax[3].set_xlabel('Spectral', fontsize = 20) ax[4].imshow(my_matrix, cmap=plt.cm.gray) ax[4].set_xlabel('gray', fontsize = 20); # - # ### Images are just arrays so they can be sliced. # # * The origin is the upper left hand corner `[0,0]` # * `[Start_Row:End_Row, Start_Column:End_Column]` # - Start: first element you want # - End: First element you DON'T want # - `:` is the whole range # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,4) fig.set_size_inches(16,4) fig.tight_layout() ax[0].imshow(my_matrix, cmap=plt.cm.GnBu) ax[0].set_xlabel('Original', fontsize = 20) ax[1].imshow(my_matrix[:,0:2], cmap=plt.cm.GnBu) ax[1].set_xlabel('[:,0:2]', fontsize = 20) # all rows, 2 columns ax[2].imshow(my_matrix[0:2,:], cmap=plt.cm.GnBu) # 2 rows, all columns ax[2].set_xlabel('[0:2,:]', fontsize = 20) ax[3].imshow(my_matrix[2:4,2:4], cmap=plt.cm.GnBu); ax[3].set_xlabel('[2:4,2:4]', fontsize = 20) ; # center # - # ### Images are just arrays so they can be [manipulated](https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.array-manipulation.html). # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,5) fig.set_size_inches(16,4) fig.tight_layout() ax[0].imshow(my_matrix, cmap=plt.cm.GnBu) ax[0].set_xlabel('my_matrix', fontsize = 20) ax[1].imshow(np.flip(my_matrix,axis=0), cmap=plt.cm.GnBu) # axis = 0 (COLUMNS) ax[1].set_xlabel('np.flip(my_matrix,axis=0)', fontsize = 15) ax[2].imshow(np.flip(my_matrix,axis=1), cmap=plt.cm.GnBu) # axis = 1 (ROWS) ax[2].set_xlabel('np.flip(my_matrix,axis=1)', fontsize = 15) ax[3].imshow(np.rot90(my_matrix), cmap=plt.cm.GnBu) # Rotate 90 counter-clockwise ax[3].set_xlabel('np.rot90(my_matrix)', fontsize = 15) ax[4].imshow(np.roll(my_matrix,1,axis=0), cmap=plt.cm.GnBu) # axis = 0 (COLUMNS) ax[4].set_xlabel('np.roll(my_matrix,1,axis=0)', fontsize = 15); # - # --- # ### WARNING! Common image formats DO NOT preserve dynamic range of original data!! # - Common image formats: jpg, gif, png, tiff # - Common image formats will re-scale your data values to [0:1] # - Common image formats are **NOT** suitable for scientific data! # + jupyter={"outputs_hidden": false} # Write my_matrix to a PNG file plt.imsave('Square.png', my_matrix, cmap=plt.cm.gray) # Read in the PNG file my_png = plt.imread('Square.png') # + jupyter={"outputs_hidden": false} print(f"The original data has a min = {my_matrix.min():.2f} and a max = {my_matrix.max():.2f}") print(f"The PNG file has a min = {my_png.min():.2f} and a max = {my_png.max():.2f}") # - # --- # # ## [FITS file (Flexible Image Transport System)](https://en.wikipedia.org/wiki/FITS) # # # * The data format most widely used within astronomy # * Primarily designed to store scientific data sets # * Multidimensional arrays (images) # * 2-dimensional tables organized into rows and columns of information. # + jupyter={"outputs_hidden": false} import astropy.io.fits as fits # - # ## FITS files consist of at least two parts - `Header` and `Data` # # * A FITS file is comprised of segments called Header/Data Units (HDUs). # * The first `[0]` HDU is called the `Primary HDU`. # * The primary data array can contain a 1-D spectrum, a 2-D image, or a 3-D data cube. # * Every HDU consists of an ASCII formatted `Header Unit`. # * Each header unit contains a sequence of fixed-length 80-character keyword (`Cards`) # + jupyter={"outputs_hidden": false} data_file = "./Data/bsg01.fits" fits.info(data_file) # + jupyter={"outputs_hidden": false} image_data = fits.getdata(data_file, 0) image_header = fits.getheader(data_file, 0) # + jupyter={"outputs_hidden": false} image_header # + jupyter={"outputs_hidden": false} image_data[0:2] # + jupyter={"outputs_hidden": false} print(f"The image has a shape [height,width] of {image_data.shape}") # - # ## FITS format preserves the full dynamic range of data # + jupyter={"outputs_hidden": false} print(f"The image has a maximum value of {image_data.max():.2f}") print(f"The image has a minimum value of {image_data.min():.2f}") # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,2) fig.set_size_inches(12,6) fig.tight_layout() ax[0].imshow(image_data,cmap=plt.cm.gray) ax[0].set_title("OK, Boomer!") ax[1].set_xlabel("Pixel Values") ax[1].set_ylabel("Number of Pixels") ax[1].hist(image_data.flatten(),bins=20); # - # ## Just show some values # # * `vmin =` lowest value # * `vmax =` highest value # + fig, ax = plt.subplots(1,2) fig.set_size_inches(12,6) fig.tight_layout() ax[0].imshow(image_data, cmap=plt.cm.gray, vmin = 60, vmax = 101) ax[0].set_title("OK, Boomer!") ax[1].set_xlabel("Pixel Values") ax[1].set_ylabel("Number of Pixels") ax[1].hist(image_data.flatten(),bins=20) ax[1].vlines(60, 0, 50000, color = 'Navy', linewidth = 5, linestyle = '--'); # - # ## Modifying pixel values # + jupyter={"outputs_hidden": false} copy_data = np.copy(image_data) # make a copy of the data to work with copy_data.min(), copy_data.mean(), copy_data.max() # - # #### Set all pixel_values > 2 * mean pixel_value to pixel_value maximum # + jupyter={"outputs_hidden": false} copy_data[copy_data > 2 * copy_data.mean()] = copy_data.max() # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,2) fig.set_size_inches(12,6) fig.tight_layout() ax[0].imshow(copy_data,cmap=plt.cm.gray) my_bins = np.arange(0,110,5) ax[1].set_xlabel("Pixel Values") ax[1].set_ylabel("Number of Pixels") ax[1].hist(image_data.flatten(), bins=my_bins) ax[1].hist(copy_data.flatten(), bins=my_bins, histtype = "step", linewidth = "4", color = "b"); # - # ## You can add and subtract images # + jupyter={"outputs_hidden": false} another_image_file = "./Data/bsg02.fits" another_image_data = fits.getdata(another_image_file) # a quick way to just get the data # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,2) fig.set_size_inches(8,6) fig.tight_layout(w_pad = 3.0) ax[0].set_title("Original Boomer Image") ax[1].set_title("Another Boomer Image") ax[0].imshow(image_data, cmap=plt.cm.gray) ax[1].imshow(another_image_data, cmap=plt.cm.gray); # - # ## The two images above may look the same but they are not! # # ### Subtracting the two images reveals the truth. # + jupyter={"outputs_hidden": false} real_image = image_data - another_image_data # Subtract the images pixel by pixel # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,3) fig.set_size_inches(12,6) fig.tight_layout(w_pad = 3.0) ax[0].set_title("Original Boomer Image") ax[1].set_title("Another Boomer Image") ax[2].set_title("Real Boomer Image!") ax[0].imshow(image_data, cmap=plt.cm.gray) ax[1].imshow(another_image_data, cmap=plt.cm.gray); ax[2].imshow(real_image, cmap=plt.cm.gray); # + jupyter={"outputs_hidden": false} print(f"The real image has a maximum value of {real_image.max():.2f}") print(f"The real image has a minimum value of {real_image.min():.2f}") # - # --- # ## FITS Images - An astronomical example # + jupyter={"outputs_hidden": false} data_file = "./Data/M51_Blue.fits" fits.info(data_file) # + jupyter={"outputs_hidden": false} object_data = fits.getdata(data_file, 0) object_header = fits.getheader(data_file, 0) # - object_header # + jupyter={"outputs_hidden": false} print(f"Image min = {object_data.min():9.1f}") print(f"Image max = {object_data.max():9.1f}") print(f"Image mean = {object_data.mean():9.1f}") print(f"Image std = {object_data.std():9.1f}") # - # #### Notice the VERY wide range of pixel values (dynamic range) # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,2) fig.set_size_inches(12,6) fig.tight_layout(w_pad = 5) ax[0].set_title("Object Image") ax[0].imshow(object_data, cmap=plt.cm.gray) ax[1].set_xlabel("Image Pixel Values") ax[1].set_ylabel("Number of Pixels") ax[1].hist(object_data.flatten(),bins=100); # - # #### Notice the origin is in the upper left corner (the image is upside down) # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots(1,2) fig.set_size_inches(12,6) fig.tight_layout(w_pad = 5) ax[0].set_title("Object Image") ax[0].imshow(object_data, cmap=plt.cm.gray, origin = "lower") ax[1].set_xlabel("Image Pixel Values") ax[1].set_ylabel("Number of Pixels") ax[1].hist(object_data.flatten(),bins=100); # - # #### Better, the origin is in the lower left corner # --- # # ### Like most astronomical data, all of the interesting stuff is in a very small percentage of the total pixels in the image. # + fig, ax = plt.subplots(1,2) fig.set_size_inches(12,6) fig.tight_layout(w_pad = 5) ax[0].set_title("Object Image") ax[0].imshow(object_data, cmap=plt.cm.gray, origin = "lower", vmin = 10000, vmax = 25000) ax[1].set_xlabel("Image Pixel Values") ax[1].set_ylabel("Number of Pixels") ax[1].set_ylim(0,50000) ax[1].hist(object_data.flatten(),bins=100) ax[1].vlines(10000, 0, 50000, color = 'Navy', linewidth = 5, linestyle = '--'); # - np.size(object_data) np.size(object_data[object_data > 10000]) np.size(object_data[object_data > 10000]) / np.size(object_data) * 100 # --- # ## World Coordinate System `WCS` # # * Most FITS images contain coordinate information # * This information is part of the FITS header # + jupyter={"outputs_hidden": false} from astropy.wcs import WCS # + jupyter={"outputs_hidden": false} my_wcs = WCS(object_header) # Ignore the warning # + jupyter={"outputs_hidden": false} my_wcs # - # #### To plot the coordinates on your image you have to use a different version of the `subplot` command # # * You have to define EACH axes with a `fig.add_subplot()` command # + jupyter={"outputs_hidden": false} fig = plt.figure() ax1 = fig.add_subplot(121, projection = my_wcs) ax2 = fig.add_subplot(122) fig.set_size_inches(12,6) fig.tight_layout(w_pad = 6) ax1.set_title("Object Image") ax1.set_xlabel("RA") ax1.set_ylabel("Dec") ax1.imshow(object_data, cmap=plt.cm.gray, origin="lower") ax2.set_xlabel("Image Pixel Values") ax2.set_ylabel("Number of Pixels") ax2.hist(object_data.flatten(),bins=100); # + jupyter={"outputs_hidden": false}
Python_Images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Install TensorTrade # !python3 -m pip install git+https://github.com/tensortrade-org/tensortrade.git # ## Setup Data Fetching # + import pandas as pd import tensortrade.env.default as default from tensortrade.data.cdd import CryptoDataDownload from tensortrade.feed.core import Stream, DataFeed from tensortrade.oms.exchanges import Exchange from tensortrade.oms.services.execution.simulated import execute_order from tensortrade.oms.instruments import USD, BTC, ETH from tensortrade.oms.wallets import Wallet, Portfolio from tensortrade.agents import DQNAgent # %matplotlib inline # + cdd = CryptoDataDownload() data = cdd.fetch("Bitstamp", "USD", "BTC", "1h") # - data.head() # ## Create features with the feed module # + def rsi(price: Stream[float], period: float) -> Stream[float]: r = price.diff() upside = r.clamp_min(0).abs() downside = r.clamp_max(0).abs() rs = upside.ewm(alpha=1 / period).mean() / downside.ewm(alpha=1 / period).mean() return 100*(1 - (1 + rs) ** -1) def macd(price: Stream[float], fast: float, slow: float, signal: float) -> Stream[float]: fm = price.ewm(span=fast, adjust=False).mean() sm = price.ewm(span=slow, adjust=False).mean() md = fm - sm signal = md - md.ewm(span=signal, adjust=False).mean() return signal features = [] for c in data.columns[1:]: s = Stream.source(list(data[c]), dtype="float").rename(data[c].name) features += [s] cp = Stream.select(features, lambda s: s.name == "close") features = [ cp.log().diff().rename("lr"), rsi(cp, period=20).rename("rsi"), macd(cp, fast=10, slow=50, signal=5).rename("macd") ] feed = DataFeed(features) feed.compile() # - for i in range(5): print(feed.next()) # ## Setup Trading Environment # + bitstamp = Exchange("bitstamp", service=execute_order)( Stream.source(list(data["close"]), dtype="float").rename("USD-BTC") ) portfolio = Portfolio(USD, [ Wallet(bitstamp, 10000 * USD), Wallet(bitstamp, 10 * BTC) ]) renderer_feed = DataFeed([ Stream.source(list(data["date"])).rename("date"), Stream.source(list(data["open"]), dtype="float").rename("open"), Stream.source(list(data["high"]), dtype="float").rename("high"), Stream.source(list(data["low"]), dtype="float").rename("low"), Stream.source(list(data["close"]), dtype="float").rename("close"), Stream.source(list(data["volume"]), dtype="float").rename("volume") ]) env = default.create( portfolio=portfolio, action_scheme="managed-risk", reward_scheme="risk-adjusted", feed=feed, renderer_feed=renderer_feed, renderer=default.renderers.PlotlyTradingChart(), window_size=20 ) # - env.observer.feed.next() # ## Setup and Train DQN Agent # + agent = DQNAgent(env) agent.train(n_steps=200, n_episodes=2, save_path="agents/")
examples/train_and_evaluate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="XTMDgP4kloQC" # %tensorflow_version 1.x from __future__ import print_function import tensorflow.keras from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras import backend as K from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.optimizers import SGD,Adam from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from tensorflow.keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, ReduceLROnPlateau # Loading the required libraries for the modeling import numpy as np import os import cv2 import matplotlib.pyplot as plt # %matplotlib inline import pandas as pd from skimage.color import rgb2gray from scipy import ndimage import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import errno # Image processing import matplotlib.pyplot as plt import seaborn as sns import imageio import skimage import skimage.io import skimage.transform from imageio import imread import math # MATHEMATICAL OPERATIONS from glob import glob # FILE OPERATIONS import itertools # + colab={"base_uri": "https://localhost:8080/"} id="salTYkKOlu_H" outputId="76135e5d-88c0-4374-de72-dc3ae931a5cb" from google.colab import drive drive.mount('/content/drive/') # + id="7AzisCGimFfd" MODEL_PATH='/content/drive/My Drive/Conf_COVID19/' # + id="JVAh1OIhl3-v" from tensorflow.keras import models classification_model = models.load_model(MODEL_PATH+'model_corona.h5') # + id="5sWyk-4Hm2jF" img='79858395-3452-426d-bb99-e01af7961b44.png' classification_target_size=(224,224) mask_rcnn_target_size=(256,256) PNEU='/pneumonia/' NORMAL='/normal/' # + id="OOJg6LvfmyWM" test_img = cv2.imread(MODEL_PATH+img) test_img = cv2.resize(test_img,classification_target_size) # + id="tXcPEPIEnyoA" test_img= test_img.reshape(1,224,224,3) # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="jWGNfOUgniel" outputId="b483222e-befd-469d-9b03-d61123e54ad6" y = classification_model.predict(test_img) y_test_non_category = [ np.argmax(t) for t in y ] for pred in y_test_non_category: if pred == 0: prediction = 'pneumonia' else: prediction = 'normal' prediction # + colab={"base_uri": "https://localhost:8080/"} id="2VhA4hKUqlZ4" outputId="d59dab3e-dd17-4db0-f398-10a352960077" # Import Mask RCNN #sys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN')) # To find local version of the library # !pip install mrcnn from mrcnn.config import Config from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize from mrcnn.model import log # + colab={"base_uri": "https://localhost:8080/"} id="tLoZMNPFr7sF" outputId="6147777e-21bd-475f-d503-b600070a3b67" # The following parameters have been selected to reduce running time for demonstration purposes # These are not optimal class DetectorConfig(Config): """Configuration for training pneumonia detection on the RSNA pneumonia dataset. Overrides values in the base Config class. """ # Give the configuration a recognizable name NAME = 'pneumonia' # Train on 1 GPU and 8 images per GPU. We can put multiple images on each # GPU because the images are small. Batch size is 8 (GPUs * images/GPU). GPU_COUNT = 1 IMAGES_PER_GPU = 8 BACKBONE = 'resnet50' NUM_CLASSES = 2 # background + 1 pneumonia classes IMAGE_MIN_DIM = 256 IMAGE_MAX_DIM = 256 RPN_ANCHOR_SCALES = (16, 32, 64, 128) TRAIN_ROIS_PER_IMAGE = 32 MAX_GT_INSTANCES = 4 DETECTION_MAX_INSTANCES = 3 DETECTION_MIN_CONFIDENCE = 0.78 ## match target distribution DETECTION_NMS_THRESHOLD = 0.01 STEPS_PER_EPOCH = 200 config = DetectorConfig() config.display() # + id="Fn2r-INQrJSW" class InferenceConfig(DetectorConfig): GPU_COUNT = 1 IMAGES_PER_GPU = 1 inference_config = InferenceConfig() model = modellib.MaskRCNN(mode='inference', config=inference_config, model_dir=MODEL_PATH) model_path = os.path.join(MODEL_PATH,'Mask_pneumonia.h5') model.load_weights(model_path, by_name=True) # + id="m0HE5VontE-7" original_image = cv2.imread(MODEL_PATH+img) original_image = cv2.resize(original_image,mask_rcnn_target_size) # + id="5v7NKBhit9ue" # set color for class def get_colors_for_class_ids(class_ids): colors = [] for class_id in class_ids: if class_id == 1: colors.append((.941, .204, .204)) return colors # + id="9PrwFqQAxQUA" # COCO Class names class_names = ['normal','pneumonia'] # + colab={"base_uri": "https://localhost:8080/"} id="U7sp0Bnz1eYA" outputId="2b2b8870-e39e-4221-ddf4-f38a9ad411fd" from numpy import expand_dims original_image = expand_dims(original_image, 0) original_image.shape # + colab={"base_uri": "https://localhost:8080/", "height": 589} id="P8Pv7_aMs3Yt" outputId="89cb5002-6a7e-4e43-f901-2377abc8194b" i=0 results = model.detect(original_image,verbose=0) #, verbose=1) r = results[0] fig = plt.figure(figsize=(10, 30)) plt.subplot(1, 1, 1*i + 1) visualize.display_instances(original_image.reshape(256,256,3), r['rois'], r['masks'], r['class_ids'], class_names, r['scores'],colors=get_colors_for_class_ids(r['class_ids']), ax=fig.axes[-1])
Group1_Stage1+stage2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This code demonstrates the LPME algorithm. LPME assumes the hidden (cost) metric $s$ can be expressed as: # $$ s = <a, r> + c $$. # # It uses only oracle comparisons to recover $a$. It works using a binary-search like algorithm on each coordinate. # + # %load_ext autoreload # %autoreload 2 # + import numpy as np from scipy import spatial import matplotlib.pyplot as plt import sys # sys.path.append('../') from common import Sphere, Oracle, normalize from lpme import LPME, compute_vector # + np.random.seed(7) nc = 5 # number of classes r = np.random.uniform(0, 10) # sphere radius search_tol = 1e-2 # search tolerance q = nc**2 - nc # + # search space is a Sphere sphere = Sphere(np.random.randn(q), r, q) # linear performance metric a = np.random.randn(q) a = normalize(a) # B = 0 makes oracle linear B = np.matrix(np.zeros((q, q))) # - oracle = Oracle(a, B) lpm = LPME(sphere, oracle, search_tol) ahat = lpm.run_lpme(0.) lpm = LPME(sphere, oracle, search_tol) ahat = lpm.run_lpme(0.1) a ahat print("error:", np.linalg.norm(ahat - a))
PTA algorithm/lpme.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ex2 - Getting and Knowing your Data # This time we are going to pull data directly from the internet. # Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. # # ### Step 1. Import the necessary libraries import pandas as pd import numpy as np # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv). # ### Step 3. Assign it to a variable called chipo. chipo = pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv', delimiter='\t') # ### Step 4. See the first 10 entries chipo.head(10) # ### Step 5. What is the number of observations in the dataset? # Solution 1 len(chipo.index) # Solution 2 chipo.describe(include="all") # ### Step 6. What is the number of columns in the dataset? len(chipo.columns) # ### Step 7. Print the name of all the columns. for col in chipo.columns: print(col) # ### Step 8. How is the dataset indexed? chipo.index #chipo.notna() chipo.isnull().sum() # ### Step 9. Which was the most-ordered item? # + #chipo.describe(include="all")["item_name"]["top":"freq"] #chipo.describe(include="all").loc["top":"freq", "item_name"] # order is reversed when using loc item_quants = chipo.groupby(["item_name"]).agg({"quantity": "sum"}) item_quants.sort_values(by="quantity", ascending=False)[:10] # - # ### Step 10. For the most-ordered item, how many items were ordered? #chipo.describe(include="all").loc["top":"freq", "item_name"] item_quants.sort_values(by="quantity", ascending=False)[:1] # ### Step 11. What was the most ordered item in the choice_description column? # + #chipo.describe(include="all").loc["top":"freq", "choice_description"] choice_description_quants = chipo.groupby(["choice_description"]).agg({"quantity": "sum"}) choice_description_quants.sort_values(by="quantity", ascending=False)[:10] # - # ### Step 12. How many items were orderd in total? chipo["quantity"].sum() # ### Step 13. Turn the item price into a float chipo_float = chipo.copy() #chipo_float["item_price"] = chipo_float["item_price"].str.replace("$", "") chipo_float["item_price"] = chipo_float["item_price"].str.slice(1).astype(np.float) #chipo_float["item_price"] = chipo_float["item_price"].astype(np.float) # #### Step 13.a. Check the item price type chipo_float["item_price"].dtype # #### Step 13.b. Create a lambda function and change the type of item price change_type_lambda = lambda x: float(x[1:]) chipo_float["item_price"] = chipo_float["item_price"].apply(change_type_lambda) # #### Step 13.c. Check the item price type chipo_float["item_price"].dtype # ### Step 14. How much was the revenue for the period in the dataset? chipo_float["item_total"] = chipo_float["item_price"]*chipo_float["quantity"] revenue = chipo_float["item_total"].sum() revenue chipo_float.head() # ### Step 15. How many orders were made in the period? #len(chipo_float["order_id"].unique().tolist()) unique_order_count = chipo_float.order_id.nunique() unique_order_count # ### Step 16. What is the average revenue amount per order? # Solution 1 revenue / unique_order_count # + # Solution 2 # - # ### Step 17. How many different items are sold? chipo_float["item_name"].nunique()
01_Getting_&_Knowing_Your_Data/Chipotle/Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd d = pd.read_excel("http://zhuang.harvard.edu/MERFISHData/data_for_release.zip", parse_cols=[0, 1, 2, 3, 4, 6]) # - readouts = pd.DataFrame() readouts["experiment"] = d["experiment"] readouts["cell"] = d["cellID"] readouts["feat"] = d["geneName"] readouts["codeword"] = d["intCodeword"].apply("{:016b}".format) readouts.to_csv("140genesData.readouts.txt", index=False, sep="\t")
tests/data/collect-readouts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import statsmodels.api as sm # + df=pd.read_csv("soft_pf0.2_sp015_lp020_reduced10.overlaps",delim_whitespace=True) df = df[df.time < 4.2] df['n_tangled'] = df['n_total_crossings_init']-df['n_total_crossings_complete'] df['net']=df['n_tangled']-df['n_instant_bond_overlaps'] df2=pd.read_csv("soft_pf0.2_sp025_lp020_reduced10.overlaps",delim_whitespace=True) df2 = df2[df2.time < 4.2] df2['n_tangled'] = df2['n_total_crossings_init']-df2['n_total_crossings_complete'] df2['net']=df2['n_tangled']-df2['n_instant_bond_overlaps'] df3=pd.read_csv("soft_pf0.2_sp050_lp020_reduced10.overlaps",delim_whitespace=True) df3 = df3[df3.time < 4.2] df3['n_tangled'] = df3['n_total_crossings_init']-df3['n_total_crossings_complete'] df3['net']=df3['n_tangled']-df3['n_instant_bond_overlaps'] # - fig,ax = plt.subplots(1,3,figsize=(12,3),sharey=True) df.plot(x="time",y="n_instant_bond_overlaps",ax=ax[0]) df.plot(x="time",y="n_tangled",ax=ax[0]) ax[0].set_title(r'$U_{max}=15$') ax[1].set_title(r'$U_{max}=25$') ax[2].set_title(r'$U_{max}=50$') df2.plot(x="time",y="n_instant_bond_overlaps",ax=ax[1]) df2.plot(x="time",y="n_tangled",ax=ax[1]) df3.plot(x="time",y="n_instant_bond_overlaps",ax=ax[2]) df3.plot(x="time",y="n_tangled",ax=ax[2]) for a in ax: a.set_ylabel("Number of overlaps",fontsize=16) a.set_xlabel(r"Sim time ($\tau$)",fontsize=16) #a.legend([]) a.legend(['Bond overlaps','Filament crossings'],loc=4) a.grid(True,linestyle='--') fig.tight_layout(w_pad=0) fig,ax = plt.subplots(1,1,figsize=(7,5)) df.plot(x="time",y="n_total_crossings_init",ax=ax,linewidth=4,style='red') dftemp = df[df.time > 1] model=sm.OLS(dftemp.n_total_crossings_init,sm.add_constant(dftemp.time)) model=model.fit() fit=lambda y: model.params[0] + model.params[1]*y xx=np.linspace(0,4.2,10) ax.plot(xx,fit(xx),'k--') df2.plot(x="time",y="n_total_crossings_init",ax=ax,linewidth=4) df2temp = df2[df2.time > 1] model2=sm.OLS(df2temp.n_total_crossings_init,sm.add_constant(df2temp.time)) model2=model2.fit() fit2=lambda y: model2.params[0] + model2.params[1]*y ax.plot(xx,fit2(xx),'k-.') df3.plot(x="time",y="n_total_crossings_init",ax=ax,linewidth=4) df3temp = df3[df3.time > 1] model3=sm.OLS(df3temp.n_total_crossings_init,sm.add_constant(df3temp.time)) model3=model3.fit() fit3=lambda y: model3.params[0] + model3.params[1]*y ax.plot(xx,fit3(xx),'k.') ax.set_ylabel("Number of overlaps initiated",fontsize=16) ax.set_xlabel(r"Sim time ($\tau$)",fontsize=16) ax.legend([r'$U_{max}=15$','fit slope = %5.0f'%model.params[1],r'$U_{max}=25$','fit slope = %5.0f'%model2.params[1],r'$U_{max}=50$','fit slope = %5.0f'%model3.params[1]]) ax.grid(True,linestyle='--') fig,ax = plt.subplots(1,1,figsize=(7,5)) df.plot(x="time",y="n_tangled",ax=ax) df.plot(x="time",y="net",ax=ax) ax.set_ylabel("Number of tangled filaments",fontsize=16) ax.set_xlabel(r"Sim time ($\tau$)",fontsize=16) ax.legend([]) ax.grid(True,linestyle='--')
notebooks/overlapAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # pip install numpy # conda install numpy import numpy as np # + print(type([1,2,3])) print(type((1,2,3))) print(type(np.array([1,2,3]))) print(type(np.array((1,2,3)))) # - np.zeros(5) # 1 solo numero para un arreglo np.zeros([5,5]) # 1 lista o tupla para una matriz de n dimensiones np.zeros((5,5)) np.zeros((2,3,4)) np.zeros((1,2,2,2,2,2)) #6 dimensiones np.zeros((1000,200)) # 1000 items x 2 posiciones 1000 filas x 200 columnas np.eye(5) np.arange(5) # 1 parametros, # de elementos np.arange(4,20) # [4, 20[ np.arange(-12,1) # [-12, 1[ np.arange(0,20,2) # Del [0, 20[ en intervalos de 2 np.arange(0,2.1,0.1) # Del [0, 20[ en intervalos de 0.1 np.arange(9).reshape(3,3) np.zeros(20).reshape(2,5,2).reshape(20) len(np.ones(12)) #len devuelve la longitud del arreglo len(np.ones(12).reshape(4,3)) np.ones(12).reshape(4,3) #Cambiamos las dimensiones np.ones(12).reshape(4,3).shape # Devuelve el número de dimensiones np.arange(12).reshape(4,3)[1,2] # Dev los valores en la posicion del arreglo a = np.array([10,10,10]) b = np.array([5,5,5]) a + b # Suma a - b # Resta a + 7 # Suma a - 8 # Resta a * b # Multiplicacion a * 3 # Multiplicacion a / b # Division a / 2 # Division a % b # Módulo a % 3 # Módulo muestra = np.arange(20).reshape(5,2,2) muestra respuesta_glucosa = muestra # Poner condición a el arreglo respuesta_glucosa muestra[respuesta_glucosa] # Filtra valores dentro de un arreglo # + # Mayores a 5 glucosa y menores a 14 y que sean pares. # - muestra[(respuesta_glucosa > 5) & (respuesta_glucosa < 14) & (respuesta_glucosa % 2 == 0)] A = np.arange(4).reshape((2,2)) A B = np.arange(4,8).reshape((2,2)) B # Producto Punto A.dot(B) np.dot(A,B) B.dot(A) np.dot(B,A) # Creación del Arreglo arreglo_doce = np.arange(12) arreglo_doce #Suma de Elementos del Arreglo arreglo_doce.sum() # Sacar Mínimo del Arreglo arreglo_doce.min() # Sacar Máximo del Arreglo arreglo_doce.max() # Seleccionar el rango a Sumar arreglo_doce[5:].sum() arreglo_doce.reshape(3,2,2) arreglo_doce.reshape(3,2,2).sum() arreglo_doce.reshape(3,2,2).sum(axis = 0) arreglo_doce.reshape(3,2,2).sum(axis = 1) arreglo_doce.reshape(3,2,2).sum(axis = 2)
02-NUMPY/01-INTRODUCCION.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.13 64-bit (''venv'': conda)' # name: python3 # --- import os os.chdir('..') import glob from utils.geotif_io import readTiff, writeTiff from utils.raster_vec import raster2vec # + paths_pred = sorted(glob.glob('data/dset-l578/scene/*_pred.tif')) len(paths_pred) paths_pred[0] # + for i in range(len(paths_pred)): print('pred_map:', paths_pred[i]) path_save = paths_pred[i].split('.')[0]+'.gpkg' if os.path.exists(path_save): continue raster2vec(raster_path=paths_pred[i], \ output_path=path_save, \ dn_values=[1]) print('saved gpkg-->:', path_save)
notebooks/raster2vec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_amazonei_pytorch_latest_p36 # language: python # name: conda_amazonei_pytorch_latest_p36 # --- # %pip install -q -r requirements.txt # %load_ext autoreload # %autoreload 2 import numpy as np # load hyperparams instance from hyper_params import HyperParams params = HyperParams() DATA_DIR='data' from dataproc import io io.DATA_DIR='data' # # Prepare data for training and testing, without AutoEncoder # ## 1. build all features dataset, for all 54k admissions # build all features dataset import featues_datasets_all_patients featues_datasets_all_patients.DATA_DIR = DATA_DIR df_final_dataset = featues_datasets_all_patients.run(params, binning_numerics=False, create_patients_list_view=False, create_lab_events=True ) # ## 2. build a labeled cohort df_final_dataset = io.load_dataframe('df_final_dataset') df_final_dataset.shape # create a cohort dataframe and numpy file import build_cohort_dataset df_cohort = build_cohort_dataset.build_cohort(params, df_final_dataset) # ## To train a model: # # The cohort data is now available in either `df_cohort` dataframe, and also on `data/fulldata.npy` # # Good luck! # # df_cohort.drop(columns=['index']).to_csv('data/fulldata.csv', sep=',', index=False) # # build features dataset for use with Autoencoder # %%time # build all features dataset for autoencoder import featues_datasets_all_patients featues_datasets_all_patients.DATA_DIR = DATA_DIR df_final_dataset_binned = featues_datasets_all_patients.run(params, binning_numerics=True, create_patients_list_view=True, create_lab_events=True ) io.write_dataframe(df_final_dataset_binned, 'df_final_dataset_binned') # ## 2. build a labeled cohort df_final_dataset_binned = io.load_dataframe('df_final_dataset_binned') df_final_dataset_binned.shape # create a cohort dataframe and numpy file import build_cohort_dataset df_cohort = build_cohort_dataset.build_cohort(params, df_final_dataset_binned) dnp = df_final_dataset.to_numpy() import numpy as np np.max(dnp), np.min(dnp) df_final_dataset=df_final_dataset.drop(columns=['hadm_id']) df_final_dataset.describe() df_final_dataset = io.load_dataframe('df_final_dataset') training_matrix = np.load('data/autoencoder_fulldata.npy') training_matrix.shape, np.max(training_matrix), np.min(training_matrix) training_matrix = training_matrix[:10000,:] np.save('data/autoencoder_fulldata_mini.npy', training_matrix) 0.25**2 0.075**0.5 # # Build multi labels cohort # create a cohort dataframe and numpy file import build_cohort_dataset df_cohort_multi_bacteria_labels = build_cohort_dataset.build_cohort_multi_bact(params, df_final_dataset_binned) df_cohort_multi_bacteria_labels io.write_dataframe(df_cohort_multi_bacteria_labels, 'df_cohort_multi_bacteria_labels') df_cohort_multi_bacteria_labels = io.load_dataframe('df_cohort_multi_bacteria_labels') df_cohort_multi_bacteria_labels.columns[:20] # # TSNE import pandas as pd uniquevals = df_cohort.nunique() uniquevals.sort_values(ascending=False) from sklearn.manifold import TSNE # dataset_file = 'data/fulldata.npy' dataset_file = 'data/fulldata_multi_bacteria_label.npy' dataset = np.load(dataset_file) dataset.shape X = dataset[:,0:-6] y_all = dataset[:,-6:-1] y = y_all[:,2] X.shape, y.shape X = dataset[:,9:] y_all = dataset[:,:9] y = y_all[:,2] X.shape, y.shape y = np.argmax(y_all, axis=1).reshape(-1,1) y.shape, X.shape, y_all.shape dataset_for_encoding = np.concatenate([X, y_all], axis=1) dataset_for_encoding.shape np.save( 'data/np_autoencoded_multi_labeled_data.npy', dataset_for_encoding) y_multi = dataset[:-6:-1] len(np.unique(y)) import seaborn as sns sns.set(rc={'figure.figsize':(11.7,8.27)}) tsne = TSNE() X_embedded = tsne.fit_transform(X) palette = sns.color_palette("bright", len(np.unique(y))) sns.scatterplot(X_embedded[:,0], X_embedded[:,1], hue=y, legend='full', palette=palette) np_labeled_data = np.load('data/np_autoencoded_labeled_data.npy') X = np_labeled_data[:,:-1] y = np_labeled_data[:,-1] X.shape, y.shape tsne = TSNE() X_embedded = tsne.fit_transform(X) y.mean() palette = sns.color_palette("bright", len(np.unique(y))) sns.scatterplot(X_embedded[:,0], X_embedded[:,1], hue=y, legend='full', palette=palette) # ## Tools to explore too many unique values df_dataset_unprocessed[categorical_cols].nunique().sort_values(ascending=False)[0:20] allvals = df_dataset_unprocessed['51508'].unique().tolist() len(allvals), df_dataset_unprocessed['51508'].notna().sum() df_dataset_unprocessed['51508'].value_counts() [x for x in allvals if x and not x == np.nan and not str(x).replace('.','').isnumeric()] pd.Series(sorted([float(x) for x in allvals if x and str(x).replace('.','').isnumeric() and float(x) > 1000.0])).describe() df_dataset_unprocessed.nunique().sort_values(ascending=False) # ## THE END import torch torch.__version__ fulldata
research/build_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Evaluating performance of FFT2 and IFFT2 and checking for accuracy. <br><br> # Note that the ffts from fft_utils perform the transformation in place to save memory.<br><br> # As a rule of thumb, it's good to increase the number of threads as the size of the transform increases until one hits a limit <br><br> # pyFFTW uses lower memory and is slightly slower.(using icc to compile fftw might fix this, haven't tried it) import numpy as np import matplotlib.pyplot as plt #from multislice import fft_utils import pyfftw,os import scipy.fftpack as sfft # %load_ext memory_profiler # %run obj_fft # Loading libraries and the profiler to be used N = 15000 #size of transform t = 12 #number of threads. # Creating a test signal to perform on which we will perform 2D FFT a= np.random.random((N,N))+1j*np.random.random((N,N)) print('time for numpy forward') # %timeit np.fft.fft2(a) del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) print('time for scipy forward') # %timeit sfft.fft2(a,overwrite_x='True') del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) fft_obj = FFT_2d_Obj(np.shape(a),direction='FORWARD',flag='PATIENT',threads=t) print('time for pyFFTW forward') # %timeit fft_obj.run_fft2(a) del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) print('Memory for numpy forward') # %memit np.fft.fft2(a) del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) print('Memory for scipy forward') # %memit sfft.fft2(a,overwrite_x='True') del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) print('Memory for pyFFTW forward') # %memit fft_obj.run_fft2(a) del(a) # The results depend on how the libraries are complied. mkl linked scipy is fast but the fftw uses less memory. Also note that the fftw used in this test wasn't installed using icc. # Creating a test signal to perform on which we will perform 2D IFFT. a= np.random.random((N,N))+1j*np.random.random((N,N)) print('time for numpy backward') # %timeit np.fft.ifft2(a) del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) print('time for scipy backward') # %timeit sfft.ifft2(a,overwrite_x='True') del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) del fft_obj fft_obj = FFT_2d_Obj(np.shape(a),direction='BACKWARD',flag='PATIENT',threads=t) print('time for pyFFTW backward') # %timeit fft_obj.run_ifft2(a) del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) print('Memory for numpy forward') # %memit np.fft.ifft2(a) del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) print('Memory for scipy forward') # %memit sfft.ifft2(a,overwrite_x='True') del(a) a = np.random.random((N,N))+1j*np.random.random((N,N)) print('Memory for pyFFTW backward') # %memit fft_obj.run_ifft2(a) del(a) # The results depend on how the libraries are complied. mkl linked scipy is fast but the fftw uses less memory. Also note that the fftw used in this test wasn't installed using icc. # Testing for accuracy of 2D FFT: N = 5000 a = np.random.random((N,N)) + 1j*np.random.random((N,N)) fft_obj = FFT_2d_Obj(np.shape(a),threads=t) A1 = np.fft.fft2(a) fft_obj.run_fft2(a) np.allclose(A1,a) # Testing for accuracy of 2D IFFT: N = 5000 a = np.random.random((N,N)) + 1j*np.random.random((N,N)) A1 = np.fft.ifft2(a) fft_obj.run_ifft2(a) np.allclose(A1,a)
tests/test_fft.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import classification_report from sklearn import metrics from sklearn import tree data = pd.read_csv('C:/Users/Lenovo/Desktop/Crop_recommendation.csv') data.head() # + x=data.drop("label",axis=1) y=data["label"] from sklearn.model_selection import train_test_split Xtrain, Xtest, Ytrain, Ytest = train_test_split(x,y,test_size = 0.2,random_state =0) # + from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=20,max_depth=5,n_jobs=-1) print(clf) clf.fit(Xtrain,Ytrain) predicted_values = clf.predict(Xtest) x = metrics.accuracy_score(Ytest, predicted_values) print(x) print(classification_report(Ytest,predicted_values)) # - import pickle pickle.dump(clf, open('C:/Users/Lenovo/Desktop/croprecommender.pkl','wb')) model = pickle.load(open('C:/Users/Lenovo/Desktop/croprecommender.pkl','rb')) print(model.predict([[90,42,43,20.879744,82.002744,6.502985,202.935536]]))
Recommender/CropRecommender/CROP-RECOMMENDER.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # This notebook is part of the `galgebra` documentation: https://galgebra.readthedocs.io/. # - # # Introduction to using GAlgebra # # This is a tutorial to introduce you to `galgebra`, a symbolic geometric algebra library for python. # # A geometric algebra is defined by a set of symbols that represent the basis vectors of a real vector space, a metric tensor, and possible a set of coordinate symbols. If coordinates are defined the metric tensor can be a function of them. # # The following cell imports all the functions needed in the tutorial from `sympy`, `ga`, and `printer`. # + import sympy from galgebra.ga import Ga from galgebra.printer import latex from IPython.display import Math # tell sympy to use our printing by default sympy.init_printing(latex_printer=latex, use_latex='mathjax') # - # ## Printing in sympy # # Sympy will show pretty $\LaTeX$ renderings of symbolic expressions by default sympy.S('n')**2 # But if we want to append freeform text on the same line, we must use `Math`, `latex`, and f-strings in tandem: Math(f"y = { latex(sympy.S('n')**2) }") # ## Creating an algebra # # To start with we will define the geometric algebra of a 3 dimensional Euclidaen vector space, `o3d`, with coordinates $x$, $y$, and $z$ and unit vectors $e_x$, $e_y$, and $e_z$. xyz = (x, y, z) = sympy.symbols('x y z', real=True) o3d = Ga('e_x e_y e_z', g=[1, 1, 1], coords=xyz) grad = o3d.grad # The metric tensor $g$ is: Math(f'g = {latex(o3d.g)}') # ## Creating multivectors # The most general element of a geometric algebra is a multivector. To define a scalar `S`, a vector `V`, a bivector `B`, and a pseudo-scalar `P` (these are the only pure grade multivectors we can have in three dimensions): o3d.mv('S', 'scalar') o3d.mv('V', 'vector') o3d.mv('B', 'bivector') o3d.mv('I', 'pseudo') # We can also extract the basis vectors from `o3d`. If we name them `ex`, `ey`, and `ez` and form vectors from linear combinations of them: ex, ey, ez = o3d.mv() Math(f'{latex(ex)}, {latex(ey)}, {latex(ez)}') # ## Multivector operators # Binary operations that we can apply to vectors or multivectors in general are addition, `+`, subtraction, `-`, geometric product, `*`, inner (dot) product, `|`, outer (wedge) product, `^`, left contraction, `<`, right contraction, `>`. # Because operator precedence is immuatable in Python we need to always use parenthesis to determine the correct order of the operations in our expression. Examples for `+`, `-`, `*`, `|`, and `^` follow: a = o3d.mv('a','vector') b = o3d.mv('b','vector') Math(fr''' \begin{{align}} a &= {latex(a)} \\ b &= {latex(b)} \end{{align}} ''') Math(fr''' \begin{{align}} a+b &= {latex(a+b)} \\ a-b &= {latex(a-b)} \\ ab &= {latex(a*b)} \\ a\cdot b &= {latex(a|b)} \\ a \rfloor b &= {latex(a<b)} \\ a \lfloor b &= {latex(a>b)} \\ a\wedge b &= {latex(a^b)} \end{{align}} ''') B = o3d.mv('B','bivector') B Math(fr''' \begin{{align}} BB &= {latex(B*B)} \\ a+B &= {latex(a+B)} \\ a-B &= {latex(a-B)} \\ aB &= {latex(a*B)} \\ a\cdot B &= {latex(a|B)} \\ a \rfloor B &= {latex(a<B)} \\ a \lfloor B &= {latex(a>B)} \\ a\wedge B &= {latex(a^B)} \\ \end{{align}} ''') # ## More examples # # Additionally, we can define multivector fields that are functions of the coordinates. Some concrete examples are (vector and bivector fields): Vf = x**2*ex + y**2*ey + z**2*ez Bf = x*(ey^ez) + y*(ex^ez) + z*(ex^ey) Math(fr''' \begin{{align}} \text{{Vector Field:}} && V_f &= {latex(Vf)} \\ \text{{Bivector Field:}} && B_f &= {latex(Bf)} \end{{align}} ''') # In addition to binary algebraic operations the most important member functions for multivectors are `grade(i)`, `rev()`, and `norm2()`. For a general multivector, `M`, we have: M = o3d.mv('M', 'mv') Math('M = %s' % latex(M)) Math(fr''' \begin{{align}} \text{{Grade 0:}} && \left<M\right>_0 &= {latex(M.grade(0))} \\ \text{{Grade 1:}} && \left<M\right>_1 &= {latex(M.grade(1))} \\ \text{{Grade 2:}} && \left<M\right>_2 &= {latex(M.grade(2))} \\ \text{{Grade 3:}} && \left<M\right>_3 &= {latex(M.grade(3))} \\ \text{{Reverse:}} && M^\dagger &= {latex(M.rev())} \end{{align}} ''') # ## More printing options # A problem in displaying multivectors is that the expression can be very long and does not display nicely on the page. To alleviate this problem one can use the multivector member function `Fmt()`. The default is `Fmt(1)` which displays the multivector on one line, `Fmt(2)` displayes the multivector one grade per line, and `Fmt(3)` displayes the mulitvector one base or basis blade per line. Some examples are: M.Fmt(1) M.Fmt(2) M.Fmt(3)
examples/ipython/tutorial_algebra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/DJCordhose/ml-workshop/blob/master/notebooks/tf2/tf-keras-classifier-metrics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="XdfXfXK2-CiG" # # Metrics on binary classifications with TensorFlow 2 # # To generate models first run: https://colab.research.google.com/github/DJCordhose/ml-workshop/blob/master/notebooks/tf2/tf-keras-classifier.ipynb # + id="j6tt1Z9cEV9m" colab_type="code" colab={} import matplotlib.pyplot as plt # plt.xkcd() # plt.style.use('ggplot') # %matplotlib inline import matplotlib as mpl mpl.rcParams['figure.figsize'] = (20, 8) # + id="32Gu35CuCthq" colab_type="code" outputId="9ce2fe47-6eea-41f1-e412-9e05eaa73b6d" colab={"base_uri": "https://localhost:8080/", "height": 513} #@title Configure our example { display-mode: "form", run: "auto" } # https://colab.research.google.com/notebooks/forms.ipynb n = 100 #@param {type:"slider", min:1, max:1000, step:1} m = -1 #@param {type:"slider", min:-10, max:10, step: 0.1} b = 1 #@param {type:"slider", min:-10, max:10, step: 0.1} noise_level = 0.1 #@param {type:"slider", min:0.0, max:1.0, step:0.1} dim_1_label = 'x1' #@param {type:"string"} dim_2_label = 'x2' #@param {type:"string"} import numpy as np # all points X = np.random.uniform(0, 1, (n, 2)) # below or above line determines which category they belong to (plus noise) noise = np.random.normal(0, noise_level, n) y = X[:, 1] > m*X[:, 0]+b + noise y = y.astype(int) from matplotlib.colors import ListedColormap plt.xlabel(dim_1_label) plt.ylabel(dim_2_label) plt.title('Categories expressed as colors') plt.scatter(X[:,0], X[:,1], c=y, cmap=ListedColormap(['#FF6666', '#6666FF']), marker='o', edgecolors='k'); # + [markdown] id="s0dSyyNzSOtE" colab_type="text" # ## Load our two competing models # + colab_type="code" id="u8IonVMfAelY" outputId="13c64c5e-47f0-40ab-9f73-c86e6d2210e9" colab={"base_uri": "https://localhost:8080/", "height": 52} try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass import tensorflow as tf tf.__version__ # + colab_type="code" id="a9IjwBVlvnCk" outputId="f48995f0-c758-451a-d7b6-06866465cb77" colab={"base_uri": "https://localhost:8080/", "height": 34} x = tf.constant(X, dtype='float32') y_true = tf.constant(y, dtype='float32') x.shape, y.shape # + id="B_M2IRlUQ1ap" colab_type="code" outputId="1dca8166-48db-4cf5-e806-bed5e548a965" colab={"base_uri": "https://localhost:8080/", "height": 192} # !curl -O https://raw.githubusercontent.com/DJCordhose/ml-workshop/master/notebooks/tf2/linear-mse.h5 # !curl -O https://raw.githubusercontent.com/DJCordhose/ml-workshop/master/notebooks/tf2/sigmoid-crossentropy.h5 # !ls -l # + id="heP1B2sU5Rf_" colab_type="code" outputId="be1d243a-9cdd-4a6c-e3b7-9e4ddcd71dfa" colab={"base_uri": "https://localhost:8080/", "height": 499} model_linear_mse = tf.keras.models.load_model('linear-mse.h5') y_pred_model_linear_mse = model_linear_mse.predict(x) plt.hist(y_pred_model_linear_mse, bins=n) plt.title('Distribution of predictions for linear model using MSE'); # + id="HJxV8Dc6_XPF" colab_type="code" outputId="04fe9ca2-bf72-4856-dee2-e6b3418f65e8" colab={"base_uri": "https://localhost:8080/", "height": 571} model_sigmoid_crossentropy = tf.keras.models.load_model('sigmoid-crossentropy.h5') y_pred_model_sigmoid_crossentropy = model_sigmoid_crossentropy.predict(x) plt.hist(y_pred_model_sigmoid_crossentropy, bins=n) plt.title('Distribution of predictions for sigmoid model using crossentropy'); # + [markdown] id="l-1mjovL10O3" colab_type="text" # ## Basic Metrics # # * https://en.wikipedia.org/wiki/Precision_and_recall # * https://en.wikipedia.org/wiki/F1_score # + [markdown] id="uEpOlk5TZqhX" colab_type="text" # ### Most typical metrics # # * _Precision_: ability not to label a sample as positive (1) that is negative (0) # # * _Recall_: ability to find all the positive (1) samples # # * _F1-Score_ # * harmonic mean of the precision and recall # * harmonic mean is a good mean for ratios # * best value at 1 and worstat 0 # # * _Support_: number of occurrences of samples for each class # # + id="ufaBsWxjQgH7" colab_type="code" colab={} # TensorFlow: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/metrics # Scikit-learn: https://scikit-learn.org/stable/modules/model_evaluation.html # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html from sklearn.metrics import precision_recall_fscore_support, classification_report # + [markdown] id="eFqiyFo-UBh_" colab_type="text" # ### Scores for linear model with MSE, higher is better # + id="aS-c3o3gTBrs" colab_type="code" outputId="f6d151ce-e196-45b6-c181-5593547aa120" colab={"base_uri": "https://localhost:8080/", "height": 87} precision, recall, f1_score, support = precision_recall_fscore_support(y_true, (y_pred_model_linear_mse > 0.5).astype(int).reshape(-1)) precision, recall, f1_score, support # + id="q55OMzGXXcSM" colab_type="code" outputId="5fdebb26-78a2-4284-fd13-711c455722d5" colab={"base_uri": "https://localhost:8080/", "height": 174} print(classification_report(y_true, (y_pred_model_linear_mse > 0.5).astype(int).reshape(-1))) # + [markdown] id="1p6fHOKIUHWD" colab_type="text" # ### Scores for sigmode model with cross entropy, higher is better # + id="r6fIOXBqTD1l" colab_type="code" outputId="825ac474-e245-4bca-fefc-72f12b3442ad" colab={"base_uri": "https://localhost:8080/", "height": 87} precision, recall, f1_score, support = precision_recall_fscore_support(y_true, (y_pred_model_sigmoid_crossentropy > 0.5).astype(int).reshape(-1)) precision, recall, f1_score, support # + id="4iEjcqPfXz70" colab_type="code" outputId="88660378-248d-4bad-ec8c-b93e61434bef" colab={"base_uri": "https://localhost:8080/", "height": 174} print(classification_report(y_true, (y_pred_model_sigmoid_crossentropy > 0.5).astype(int).reshape(-1))) # + [markdown] id="_iyvuq1b5L6w" colab_type="text" # ## How would the scores change if we changed the threshold? # + [markdown] id="Q3eR9rda4Z0G" colab_type="text" # ### ROC and AUC # # _graphical plot which illustrates the performance of a binary classifier system as its discrimination threshold is varied_ # # _ROC curves typically feature true positive rate on the Y axis, and false positive rate on the X axis. This means that the top left corner of the plot is the “ideal” point - a false positive rate of zero, and a true positive rate of one. This is not very realistic, but it does mean that a larger area under the curve (AUC) is usually better._ # # * https://towardsdatascience.com/understanding-auc-roc-curve-68b2303cc9c5 # * https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc # # + id="tb-sT6GXQ2yX" colab_type="code" outputId="d397bf2e-13e5-457e-fa8f-d38fbe43c663" colab={"base_uri": "https://localhost:8080/", "height": 139} # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.auc.html # https://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics from sklearn.metrics import roc_curve, auc # # roc_curve? fpr, tpr, thresholds = roc_curve(y_true, y_pred_model_linear_mse) fpr, tpr, thresholds # + id="ImYTSMC7SFtj" colab_type="code" outputId="293b8824-2f24-4900-8f2a-00649245371e" colab={"base_uri": "https://localhost:8080/", "height": 34} roc_auc = auc(fpr, tpr) roc_auc # + id="tyiiOKRFWDGM" colab_type="code" outputId="5f69bc73-ec14-4dee-9414-46be518771cd" colab={"base_uri": "https://localhost:8080/", "height": 513} # code more or less taken from # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic, linear model using MSE') plt.legend(loc="lower right"); # + id="U9-NAmNJTLpr" colab_type="code" outputId="02e98572-14af-4fa2-ed9c-834a2461335f" colab={"base_uri": "https://localhost:8080/", "height": 513} fpr, tpr, thresholds = roc_curve(y_true, y_pred_model_sigmoid_crossentropy) roc_auc = auc(fpr, tpr) lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic, sigmoid model using crossentropy') plt.legend(loc="lower right"); # + id="pO0lhmBmVysA" colab_type="code" colab={}
notebooks/tf2/tf-keras-classifier-metrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv_project_X # language: python # name: venv_project_x # --- # # Principal Component Analysis # https://jakevdp.github.io/PythonDataScienceHandbook/05.09-principal-component-analysis.html # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import seaborn as sns; sns.set() rng = np.random.RandomState(1) X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T plt.scatter(X[:, 0], X[:, 1]) plt.axis('equal'); from sklearn.decomposition import PCA pca = PCA(n_components=2) pca.fit(X) print(pca.components_) print(pca.explained_variance_) # + def draw_vector(v0, v1, ax=None): ax = ax or plt.gca() arrowprops=dict(arrowstyle='->', linewidth=2, shrinkA=0, shrinkB=0) ax.annotate('', v1, v0, arrowprops=arrowprops) # plot data plt.scatter(X[:, 0], X[:, 1], alpha=0.2) for length, vector in zip(pca.explained_variance_, pca.components_): v = vector * 3 * np.sqrt(length) draw_vector(pca.mean_, pca.mean_ + v) plt.axis('equal'); # - # ## Plot data in the features space and in the principal components space def draw_vector(v0, v1, ax=None): ax = ax or plt.gca() arrowprops=dict(arrowstyle='->', linewidth=2, shrinkA=0, shrinkB=0) ax.annotate('', v1, v0, arrowprops=arrowprops) # + rng = np.random.RandomState(1) X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T pca = PCA(n_components=2, whiten=True) pca.fit(X) fig, ax = plt.subplots(1, 2, figsize=(16, 6)) fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1) # plot data ax[0].scatter(X[:, 0], X[:, 1], alpha=0.2) for length, vector in zip(pca.explained_variance_, pca.components_): v = vector * 3 * np.sqrt(length) draw_vector(pca.mean_, pca.mean_ + v, ax=ax[0]) ax[0].axis('equal'); ax[0].set(xlabel='x', ylabel='y', title='input') # plot principal components X_pca = pca.transform(X) ax[1].scatter(X_pca[:, 0], X_pca[:, 1], alpha=0.2) draw_vector([0, 0], [0, 3], ax=ax[1]) draw_vector([0, 0], [3, 0], ax=ax[1]) ax[1].axis('equal') ax[1].set(xlabel='component 1', ylabel='component 2', title='principal components', xlim=(-5, 5), ylim=(-3, 3.1)) # - # ## PCA as dimensionality redution pca = PCA(n_components=1) pca.fit(X) X_pca = pca.transform(X) print("original shape: ", X.shape) print("transformed shape:", X_pca.shape) # copmute inverse transform and plot with original data X_new = pca.inverse_transform(X_pca) plt.scatter(X[:, 0], X[:, 1], alpha=0.2) plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8) plt.axis('equal'); # ## PCA for visualization: Hand-written digits # download hand written digits from sklearn.datasets import load_digits digits = load_digits() digits.data.shape pca = PCA(2) projected = pca.fit_transform(digits.data) print(digits.data.shape) print(projected.data.shape) # Plot points projected on the two first principal components plt.scatter(projected[:, 0], projected[:, 1], c=digits.target, edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('Spectral', 10)) plt.xlabel('component 1') plt.ylabel('component 2') plt.colorbar(); # ## Choosing the number of components pca = PCA().fit(digits.data) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); pca = PCA().fit(digits.data) plt.plot(pca.explained_variance_ratio_) plt.xlabel('number of components') plt.ylabel('explained variance ratio');
PrincipalComponentAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plan robust trajectory for spacecraft using randUP # + # %load_ext autoreload # %autoreload 2 import sys, os sys.path.append('../..') import numpy as np import matplotlib.pyplot as plt from matplotlib import rc from scipy.spatial import ConvexHull import time from models.spacecraft import Model, SpacecraftProblem # - # ### Define Problem # + s13 = np.sqrt(1./3.) x_init = np.array([1,0.2,0.0, 1e-4,1e-4,1e-4, -0.5,0.5,-0.5,0.5, 0,0,0]) x_goal = np.array([1.2,4.8,2.0, 1e-4,1e-4,1e-4, s13,0.,s13,s13, 0,0,0]) N = 21 problem = SpacecraftProblem(x0=x_init, xgoal=x_goal, N=N) # - # ### Add obstacles problem.sphere_obstacles[0] = [[1.25, 1.5, 0.0], 0.4] problem.sphere_obstacles[1] = [[1., 3.50, 0.0], 0.4] problem.sphere_obstacles.append([[0,3.25,0.],0.4]) # ### Load model model = Model() model # ### Define controller from ccscp.ccscp_controller import CCSCPController controller = CCSCPController(model=model, problem=problem) print(controller.model.UP_config['N_MC']) print(controller.UP_config['N_MC']) print(controller.UP_config['B_feedback']) print(controller.model.UP_config['B_feedback']) # ### Plan! from time import time # + start = time() Xtraj, Utraj, UP_vals, UP_config = controller.plan(N=N, init_method='straightline') print('elapsed_time=',time()-start) start = time() Xtraj_2, Utraj_2, UP_vals_2, UP_config_2 = controller.plan(N=N, init_method='left') print('elapsed_time=',time()-start) # - # ### Plot results # + from utils.spacecraft_plot import * idx=[0,1] from matplotlib import rc from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 14 fig, ax = plt.subplots(figsize=(4, 7)) # plot trajectory for t in range(1,N): hull = ConvexHull(UP_vals['parts'][:,idx,t]) for simplex in hull.simplices: plt.plot(UP_vals['parts'][simplex,idx[0],t], UP_vals['parts'][simplex,idx[1],t], 'b--', alpha=0.7) plt.plot(Xtraj[0,:], Xtraj[1,:], 'b-o', alpha=0.8, markersize=5, linewidth=2) for obs in problem.sphere_obstacles: pos, radius = obs[0], obs[1] ax = plot_circle(ax, pos, radius, color='r', alpha=0.3) plt.text(pos[idx[0]]-0.23, pos[idx[1]]-0.1, r'$\mathcal{X}_{obs}$', fontsize=26) rc('text', usetex=True) plt.scatter(x_init[idx[0]], x_init[idx[1]], s=400, color='black', marker='+', linewidth=3) plt.scatter(x_goal[idx[0]], x_goal[idx[1]], s=400, color='black', marker='+', linewidth=3) plt.text( x_init[idx[0]]+0.2, x_init[idx[1]]-0.1, r'$x_{0}$', fontsize=32, weight="bold") plt.text( x_goal[idx[0]]-1.2, x_goal[idx[1]]+0.15, r'${x}_{N}$', fontsize=32, weight="bold") plt.arrow(x_goal[idx[0]]-0.7, x_goal[idx[1]]+0.2, 0.5, -0.1, color='black', width=0.015) ax.tick_params("both", labelsize=26) plt.xlim([-0.5,2.5]) plt.ylim([0,5.5]) # + from utils.spacecraft_plot import * idx=[0,1] from matplotlib import rc from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 14 fig, ax = plt.subplots(figsize=(4, 7)) # plot trajectory for t in range(1,N): for simplex in ConvexHull(UP_vals_2['parts'][:,idx,t]).simplices: plt.plot(UP_vals_2['parts'][simplex,idx[0],t], UP_vals_2['parts'][simplex,idx[1],t], 'r--', alpha=0.7) plt.plot(Xtraj_2[idx[0],:], Xtraj_2[idx[1],:], 'r-o', alpha=0.8, markersize=5, linewidth=2) for obs in problem.sphere_obstacles: pos, radius = obs[0], obs[1] ax = plot_circle(ax, pos, radius, color='r', alpha=0.3) plt.text(pos[idx[0]]-0.23, pos[idx[1]]-0.1, r'$\mathcal{X}_{obs}$', fontsize=26) rc('text', usetex=True) plt.scatter(x_init[idx[0]], x_init[idx[1]], s=400, color='black', marker='+', linewidth=3) plt.scatter(x_goal[idx[0]], x_goal[idx[1]], s=400, color='black', marker='+', linewidth=3) plt.text( x_init[idx[0]]+0.2, x_init[idx[1]]-0.1, r'$x_{0}$', fontsize=32, weight="bold") plt.text( x_goal[idx[0]]-1.2, x_goal[idx[1]]+0.15, r'${x}_{N}$', fontsize=32, weight="bold") plt.arrow(x_goal[idx[0]]-0.7, x_goal[idx[1]]+0.2, 0.5, -0.1, color='black', width=0.015) ax.tick_params("both", labelsize=26) plt.xlim([-0.5,2.5]) plt.ylim([0,5.5]) # + from utils.spacecraft_plot import * idx=[0,1] from matplotlib import rc from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 14 fig, ax = plt.subplots(figsize=(4, 9)) # plot trajectory for t in range(1,N): for simplex in ConvexHull(UP_vals_2['parts'][:,idx,t]).simplices: plt.plot(UP_vals_2['parts'][simplex,idx[0],t], UP_vals_2['parts'][simplex,idx[1],t], 'r--', alpha=0.6) for simplex in ConvexHull(UP_vals['parts'][:,idx,t]).simplices: plt.plot(UP_vals['parts'][simplex,idx[0],t], UP_vals['parts'][simplex,idx[1],t], 'b--', alpha=1) plt.plot(Xtraj_2[idx[0],:], Xtraj_2[idx[1],:], 'r-o', alpha=0.7, markersize=5, linewidth=2) plt.plot(Xtraj[idx[0],:], Xtraj[idx[1],:], 'b-o', alpha=1, markersize=5, linewidth=2) for obs in problem.sphere_obstacles: pos, radius = obs[0], obs[1] ax = plot_circle(ax, pos, radius, color='r', alpha=0.25) plt.text(pos[idx[0]]-0.23, pos[idx[1]]-0.1, r'$\mathcal{X}_{obs}$', fontsize=26) rc('text', usetex=True) plt.scatter(x_init[idx[0]], x_init[idx[1]], s=400, color='black', marker='+', linewidth=3) plt.scatter(x_goal[idx[0]], x_goal[idx[1]], s=400, color='black', marker='+', linewidth=3) plt.text( x_init[idx[0]]+0.2, x_init[idx[1]]-0.1, r'$x_{0}$', fontsize=32, weight="bold") plt.text( x_goal[idx[0]]-1.2, x_goal[idx[1]]+0.15, r'${x}_{N}$', fontsize=32, weight="bold") plt.arrow(x_goal[idx[0]]-0.7, x_goal[idx[1]]+0.2, 0.5, -0.1, color='black', width=0.015) ax.tick_params("both", labelsize=26) plt.xlim([-0.5,2.1]) plt.ylim([-0.25,5.75]) # - problem.sphere_obstacles[0] # ### positions plt.plot(np.arange(len(Xtraj[0,:])), Xtraj[0,:], label='x') plt.plot(np.arange(len(Xtraj[0,:])), Xtraj[1,:], label='y') plt.plot(np.arange(len(Xtraj[0,:])), Xtraj[2,:], label='z') plt.legend() # ### quaternions plt.plot(np.arange(len(Xtraj[0,:])), Xtraj[6,:]) plt.plot(np.arange(len(Xtraj[0,:])), Xtraj[7,:]) plt.plot(np.arange(len(Xtraj[0,:])), Xtraj[8,:]) plt.plot(np.arange(len(Xtraj[0,:])), Xtraj[9,:]) # ### controls plt.plot(np.arange(len(Utraj[0,:])), Utraj[0,:]) plt.plot(np.arange(len(Utraj[0,:])), Utraj[1,:]) plt.plot(np.arange(len(Utraj[0,:])), Utraj[2,:]) # ## Parameters sensitivity analysis vec_t_close = np.array([8, 14]) vec_j_close = [] for t_close in vec_t_close: t = t_close if t_close < 10: obs_close = problem.sphere_obstacles[0] else: obs_close = problem.sphere_obstacles[1] pos_obs = obs_close[0][:2] # replicate to compute distances N_MC = UP_vals['parts'].shape[0] pos_obs = np.repeat(np.array([pos_obs]), N_MC, axis=0) # compute distances pos_t_randUP = UP_vals['parts'][:,idx,t] dists_to_obs = np.linalg.norm(pos_t_randUP-pos_obs, axis=1) j_closest = np.argmin(dists_to_obs) vec_j_close.append(j_closest) # print('t close = ', t_close) print('mass closest = ', model.masses_MC[j_closest]) print('J00 closest = ', model.Js_MC[j_closest,0,0]) print('J11 closest = ', model.Js_MC[j_closest,1,1]) print('J22 closest = ', model.Js_MC[j_closest,2,2]) print('w_2 closest = ', 1e3 * model.ws_MC[j_closest,0,:3], ' (pos)') print('w_2 closest = ', 1e3 * model.ws_MC[j_closest,0,3:6], ' (vel)') model.ws_MC[j_closest,6,3:6] * 1e3 vec_j_close # + from utils.spacecraft_plot import * from utils.viz import plot_pts_2d, plot_trajs_2d idx=[0,1] from matplotlib import rc from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 14 fig, ax = plt.subplots(figsize=(4, 9)) # plot trajectory for t in range(1,N): if t in vec_t_close: j_closest = vec_j_close[np.argwhere(vec_t_close==t)[0,0]] plot_pts_2d(UP_vals['parts'][:,idx,t], idx=idx, color='c', alpha=0.6, markerwidth=10) for simplex in ConvexHull(UP_vals['parts'][:,idx,t]).simplices: plt.plot(UP_vals['parts'][simplex,idx[0],t], UP_vals['parts'][simplex,idx[1],t], 'b--', alpha=1) plt.scatter(UP_vals['parts'][j_closest,idx[0],t], UP_vals['parts'][j_closest,idx[1],t], color='r', s=70) plt.plot(Xtraj[idx[0],:], Xtraj[idx[1],:], 'b-o', alpha=1, markersize=5, linewidth=2) for obs in problem.sphere_obstacles: pos, radius = obs[0], obs[1] ax = plot_circle(ax, pos, radius, color='r', alpha=0.25) plt.text(pos[idx[0]]-0.18, pos[idx[1]]-0.08, r'$\mathcal{X}_{obs}$', fontsize=35) rc('text', usetex=True) plt.scatter(x_init[idx[0]], x_init[idx[1]], s=400, color='black', marker='+', linewidth=3) plt.scatter(x_goal[idx[0]], x_goal[idx[1]], s=400, color='black', marker='+', linewidth=3) ax.tick_params("both", labelsize=26) plt.xlim([0.5,2.]) plt.ylim([1.,4.25])
exps/spacecraft/3spheres_2homotopyClasses.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # `ApJdataFrames` Muzic2015 # --- # `Title`: SUBSTELLAR OBJECTS IN NEARBY YOUNG CLUSTERS (SONYC) IX: THE PLANETARY-MASS DOMAIN OF CHAMAELEON-I AND UPDATED MASS FUNCTION IN LUPUS-3 # `Authors`: <NAME>. # # Data is from this paper: # http://iopscience.iop.org/article/10.1088/0004-637X/810/2/159/meta # + # %pylab inline import seaborn as sns sns.set_context("notebook", font_scale=1.5) #import warnings #warnings.filterwarnings("ignore") # - import pandas as pd # ## Table 2 - Photometric Candidates Included in the Spectroscopic Follow-up with Sofi # *Awful* mutliple line formatting. Whyyyy? # # **Data Cleaning:** # You have to do a regex search and replace using `\n{8}\t` as the search match, and replace with nothing. Once that is complete, trim the footer and then save the file. # + #tbl2 = pd.read_csv("http://iopscience.iop.org/0004-637X/810/2/159/suppdata/apj517985t2_ascii.txt", # sep='\t|\n{8}', skiprows=11, skipfooter=3, engine='python', na_values="cdots") #tbl2 # - names = ['id','alpha(J2000)','delta(J2000)','Date','Slit','I','J','K','Comments'] tbl2 = pd.read_csv('../data/Muzic2015/muzic2015_tbl2.txt', sep='\t', na_values='cdots', names=names, header=1, index_col=False) tbl2 # The $I-$band data is Cousins $I$ for Cha, and DENIS $i$ for Lupus. tbl2.to_csv("../data/Muzic2015/tbl2.csv", index=False) # ## Plots I_J = tbl2.I - tbl2.J sns.distplot(I_J, axlabel='$I-J$')
notebooks/Muzic2015.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="12IqtL0xW4ay" import pandas as pd import seaborn as sns # + id="ahKDbBzRYekv" df = pd.read_csv("/content/drive/MyDrive/dataset_aaic/demo_data/3.concertriccir2.csv", names = ['x', 'y', 'class']) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="JnfKuSv0Yiqb" outputId="ef482436-dffe-4060-e6ff-9c6b8d8e9d82" df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 350} id="lhUoFpqTYjoK" outputId="470191d5-1114-433e-b761-03a33feea320" x = df.iloc[:, 0:2] y = df['class'] sns.scatterplot(df['x'],df['y'], hue = df['class']) # + id="BtCWAQqJY3am" from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.pipeline import make_pipeline from sklearn.svm import SVC # + id="UvQQVwR2eQ-e" x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.2) # + id="bcaMlYpVhy0Z" params = {'C':list(range(1,100)), "kernel":['linear', 'poly', 'rbf', 'sigmoid']} # + id="gUL8VFJQ8ZTp" gs = GridSearchCV(SVC(), params, n_jobs=-1, verbose=1) # + colab={"base_uri": "https://localhost:8080/"} id="Smxfvdnv9ek1" outputId="0b4ff244-df51-4d9e-ff62-c31677d5eaad" best_model = gs.fit(x_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="IIh1Fjcv-pOO" outputId="f9be6440-ac32-4c48-9098-2a3c3d2868ed" print(best_model.best_estimator_) # + colab={"base_uri": "https://localhost:8080/"} id="2CtHtFpMCUSn" outputId="39cf707b-e855-457b-e990-8e49c61ec21f" svc = best_model.best_estimator_.fit(x_train, y_train) svc_pred = svc.predict(x_test) svcaccuracy = accuracy_score(y_test, svc_pred) print(svcaccuracy) # + colab={"base_uri": "https://localhost:8080/"} id="QD2E1frDDT32" outputId="4bee3e0f-d6ab-4753-d0c5-43bb0af36e10" confusion_matrix(y_test, svc_pred) # + id="lvZlwyq_KT_B"
SVM/svc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + import argparse import os import torch import pytorch_lightning as pl import matplotlib.pyplot as plt from tqdm.notebook import tqdm, trange from diffmask.utils.callbacks import CallbackToyTask, CallbackToyTaskDiffMask from diffmask.models.toy_task import ToyTaskModel from diffmask.models.toy_task_diffmask import ToyTaskModelDiffMask from diffmask.attributions.integrated_gradient import integrated_gradient from diffmask.attributions.schulz import schulz_explainer, toy_hidden_states_statistics from diffmask.attributions.guan import guan_explainer from diffmask.utils.getter_setter import toy_getter, toy_setter # - # # Train the Toy Task model # + parser = argparse.ArgumentParser() parser.add_argument("--gpu", type=str, default="0") parser.add_argument("--epochs", type=int, default=50) parser.add_argument("--num_embeddings", type=int, default=10) parser.add_argument("--embedding_dim", type=int, default=64) parser.add_argument("--hidden_size", type=int, default=64) parser.add_argument("--data_size", type=int, default=10000) parser.add_argument("--data_length", type=int, default=10) parser.add_argument("--batch_size", type=int, default=128) parser.add_argument("--learning_rate", type=float, default=1e-3) parser.add_argument("--learning_rate_alpha", type=float, default=3e-1) parser.add_argument("--eps", type=float, default=5e-2) parser.add_argument("--seed", type=float, default=0) parser.add_argument( "--model_path", type=str, default="./outputs/toy/version_2/checkpoints", ) hparams, _ = parser.parse_known_args() torch.manual_seed(hparams.seed) os.environ["CUDA_VISIBLE_DEVICES"] = hparams.gpu # - model_orig = ToyTaskModel(hparams) trainer = pl.Trainer( gpus=int(hparams.gpu != ""), progress_bar_refresh_rate=10, max_epochs=hparams.epochs, logger=pl.loggers.TensorBoardLogger("outputs", name="toy"), callbacks=[CallbackToyTask()] ) trainer.fit(model_orig) hparams.model_path_ = os.path.join(hparams.model_path, os.listdir(hparams.model_path)[0]) model_orig = ToyTaskModel.load_from_checkpoint(hparams.model_path_) model_orig.prepare_data() # # Create an example # + device = "cpu:0" model_orig.to(device) i = 0 query_ids, input_ids, mask, labels = model_orig.val_dataset[:] filtered = ( (labels == 1) & (mask.sum(-1) == 6) & (input_ids == query_ids[:,0].unsqueeze(-1)).any(-1) & (input_ids == query_ids[:,1].unsqueeze(-1)).any(-1) ) query_ids, input_ids, mask, labels = tuple(e[filtered][i].unsqueeze(0).to(device) for e in model_orig.val_dataset[:]) attributions_gt = ((input_ids == query_ids[0, 0]) | (input_ids == query_ids[0, 1])).float().cpu() query_ids, input_ids, mask, labels, attributions_gt # - def plot_attribution( query_ids, input_ids, attributions, attributions_gt, name=None, legend=False, save=False, ymax=1 ): fig, axs = plt.subplots(1, 1,figsize=(6, 4), sharex=True) fig.patch.set_facecolor('white') plt.xticks( torch.arange(input_ids.shape[-1]), [r"$\bf{{{}}}$".format(e) if e in query_ids else "${}$".format(e) for e in input_ids], size=32 ) axs.bar( torch.arange(input_ids.shape[-1]), attributions / attributions.sum(), label="attribution" ) axs.locator_params(axis='y', nbins=5) axs.axis(ymin=0, ymax=ymax) axs.tick_params(axis='y', labelsize=24) if save: plt.savefig(name, bbox_inches="tight", pad_inches=0) else: plt.show() hidden_state_idx = 1 # # Sundararajan et al. (2017) - Integrated Gradient attributions_ig = integrated_gradient( model_orig, {"query_ids": query_ids, "input_ids": input_ids, "mask": mask, "labels": labels}, getter=toy_getter, setter=toy_setter, label_getter=lambda outputs, inputs_dict: outputs, hidden_state_idx=hidden_state_idx, steps=500, ).sum(-1).abs().cpu() plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_ig[0,:mask.sum()], attributions_gt[0,:mask.sum()], ymax=1.1 ) # # Schulz et al. (2020) all_q_z_loc, all_q_z_scale = toy_hidden_states_statistics(model_orig) attributions_schulz = schulz_explainer( model_orig, {"query_ids": query_ids, "input_ids": input_ids, "labels": labels, "mask": mask}, getter=toy_getter, setter=toy_setter, q_z_loc=all_q_z_loc[hidden_state_idx].to(device).unsqueeze(0).detach(), q_z_scale=all_q_z_scale[hidden_state_idx].to(device).unsqueeze(0).detach(), loss_fn=lambda outputs, hidden_states, inputs_dict: torch.nn.functional.binary_cross_entropy_with_logits( outputs, inputs_dict["labels"].float(), reduction="none" ).mean(-1), loss_kl_fn=lambda kl, inputs_dict: (( kl * inputs_dict["mask"]).sum(-1) / inputs_dict["mask"].sum(-1)).mean(-1), hidden_state_idx=hidden_state_idx, steps=500, lr=1e-1, la=10, ).cpu() plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_schulz[0,:mask.sum()], attributions_gt[0,:mask.sum()], ymax=1.1 ) # # Guan et al. (2019) attributions_guan = 1 / guan_explainer( model_orig, {"query_ids": query_ids, "input_ids": input_ids, "labels": labels, "mask": mask}, getter=toy_getter, setter=toy_setter, s_fn=lambda outputs, hidden_states: outputs, loss_l2_fn=lambda s, inputs_dict: sum(s_i.sum(-1).mean(-1) for s_i in s), loss_h_fn=lambda h, inputs_dict: (h * inputs_dict["mask"]).sum(-1).mean(-1), hidden_state_idx=hidden_state_idx, steps=500, lr=1e-1, la=1, ).cpu() plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_guan[0,:mask.sum()], attributions_gt[0,:mask.sum()], ymax=1.1, ) # # Plotting latent space # + _, hidden_states = toy_getter(model_orig,{ "query_ids": model_orig.val_dataset[:][0].to(device), "input_ids": model_orig.val_dataset[:][1].to(device), "mask": model_orig.val_dataset[:][2].to(device), "labels": model_orig.val_dataset[:][3].to(device), },) hidden_state = hidden_states[2].flatten(0, 1).detach().cpu()[ model_orig.val_dataset[:][2].flatten(0, 1).bool() ] plt.figure(figsize=(6, 6)) mask0 = (model_orig.val_dataset[:][1] == model_orig.val_dataset[:][0][:,:1]).flatten(0, 1)[ model_orig.val_dataset[:][2].flatten(0, 1).bool() ] plt.scatter(*hidden_state[mask0].T, marker="x", s=100, label="$=m$") mask1 = (model_orig.val_dataset[:][1] == model_orig.val_dataset[:][0][:,1:]).flatten(0, 1)[ model_orig.val_dataset[:][2].flatten(0, 1).bool() ] plt.scatter(*hidden_state[mask1].T, marker="+", s=100, label="$=n$") plt.scatter(*hidden_state[~mask0 & ~mask1].T, marker=".", s=100, label=r"$\neq m,n$") plt.legend(fontsize=16, ) plt.tick_params(axis='both', labelsize=16) # plt.savefig("plots/toy-hidden-projection.pdf", bbox_inches="tight", pad_inches=0) plt.show() # - # # Training DiffMask model = ToyTaskModelDiffMask(hparams) model.load_state_dict(model_orig.state_dict(), strict=False) model.train_dataset = model_orig.train_dataset model.val_dataset = model_orig.val_dataset model.hparams.learning_rate = 3e-4 model.hparams.learning_rate_alpha = 3e-1 model.hparams.epochs = 100 model.hparams.eps = 0.05 # + _, hidden_states = toy_getter(model_orig,{ "query_ids": model_orig.val_dataset[:][0].to(device), "input_ids": model_orig.val_dataset[:][1].to(device), "mask": model_orig.val_dataset[:][2].to(device), "labels": model_orig.val_dataset[:][3].to(device), },) model.placeholder[0,0,0].data[:] = hidden_states[1].flatten(0, 1).detach().cpu()[ model_orig.val_dataset[:][2].flatten(0, 1).bool() ].mean(0) model.placeholder[0,1,0].data[:2] = hidden_states[2].flatten(0, 1).detach().cpu()[ model_orig.val_dataset[:][2].flatten(0, 1).bool() ].mean(0) # + # Uncomment to load a trained model # hparams.model_path = "./outputs/toy-diffmask/version_0/checkpoints" # hparams.model_path_ = os.path.join(hparams.model_path, os.listdir(hparams.model_path)[0]) # model = ToyTaskModelDiffMask.load_from_checkpoint(hparams.model_path_).to(device) # model.to(device) # model.train_dataset = model_orig.train_dataset # model.val_dataset = model_orig.val_dataset # - trainer = pl.Trainer( gpus=int(hparams.gpu != ""), progress_bar_refresh_rate=10, max_epochs=hparams.epochs, logger=pl.loggers.TensorBoardLogger("outputs", name="toy-diffmask"), callbacks=[CallbackToyTaskDiffMask()], ) trainer.fit(model) # + model.to(device) attributions_ours = [ model.forward_explainer( query_ids=query_ids, input_ids=input_ids, mask=mask, layer_drop=0, layer_pred=0, )[3].exp().cpu().detach(), model.forward_explainer( query_ids=query_ids, input_ids=input_ids, mask=mask, layer_drop=1, layer_pred=1, )[3].exp().cpu().detach(), ] # + plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_ours[0][0,:mask.sum()], attributions_gt[0,:mask.sum()], name="plots/toy-ours-input.pdf", ymax=1.1, ) plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_ours[1][0,:mask.sum()], attributions_gt[0,:mask.sum()], name="plots/toy-ours-hidden.pdf", ymax=1.1, ) # + plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_ig[0,:mask.sum()], attributions_gt[0,:mask.sum()], name="plots/toy-ig.pdf", ymax=1.1, save=True ) plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_schulz[0,:mask.sum()], attributions_gt[0,:mask.sum()], "plots/toy-schulz.pdf", ymax=1.1, save=True ) plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_guan[0,:mask.sum()], attributions_gt[0,:mask.sum()], "plots/toy-guan.pdf", ymax=1.1, save=True ) plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_ours[0][0,:mask.sum()], attributions_gt[0,:mask.sum()], "plots/toy-ours-input.pdf", ymax=1.1, save=True, legend=True ) plot_attribution( query_ids[0], input_ids[0,:mask.sum()], attributions_ours[1][0,:mask.sum()], attributions_gt[0,:mask.sum()], "plots/toy-ours-hidden.pdf", ymax=1.1, save=True, legend=True ) plot_attribution( query_ids[0], input_ids[0,:mask.sum()], torch.tensor([1.] + [0] * (mask.sum().item() - 1)), attributions_gt[0,:mask.sum()], "plots/toy-exact.pdf", ymax=1.1, save=True, legend=True ) # - # # Evaluate on all validation set hidden_state_idx = 2 attributions_ig_all = integrated_gradient( model_orig, { "query_ids": model_orig.val_dataset[:][0].to(device), "input_ids": model_orig.val_dataset[:][1].to(device), "mask": model_orig.val_dataset[:][2].to(device), "labels": model_orig.val_dataset[:][3].to(device), }, getter=toy_getter, setter=toy_setter, label_getter=lambda outputs, inputs_dict: outputs, hidden_state_idx=hidden_state_idx, steps=500, ).sum(-1).abs().cpu() * model_orig.val_dataset[:][2] attributions_schulz_all = schulz_explainer( model_orig, { "query_ids": model.val_dataset[:][0].to(device), "input_ids": model.val_dataset[:][1].to(device), "mask": model.val_dataset[:][2].to(device), "labels": model.val_dataset[:][3].to(device), }, getter=toy_getter, setter=toy_setter, q_z_loc=all_q_z_loc[hidden_state_idx].to(device).unsqueeze(0).detach(), q_z_scale=all_q_z_scale[hidden_state_idx].to(device).unsqueeze(0).detach(), loss_fn=lambda outputs, hidden_states, inputs_dict: torch.nn.functional.binary_cross_entropy_with_logits( outputs, inputs_dict["labels"].float(), reduction="none" ).mean(-1), loss_kl_fn=lambda kl, inputs_dict: kl.mean(-1).mean(-1), hidden_state_idx=hidden_state_idx, steps=500, lr=1e-1, la=10, ).cpu() * model.val_dataset[:][2] attributions_guan_all = 1 / guan_explainer( model_orig, { "query_ids": model.val_dataset[:][0].to(device), "input_ids": model.val_dataset[:][1].to(device), "mask": model.val_dataset[:][2].to(device), "labels": model.val_dataset[:][3].to(device), }, getter=toy_getter, setter=toy_setter, s_fn=lambda outputs, hidden_states: outputs, loss_l2_fn=lambda s, inputs_dict: sum(s_i.sum(-1).mean(-1) for s_i in s), loss_h_fn=lambda h, inputs_dict: h.sum(-1).mean(-1), hidden_state_idx=hidden_state_idx, steps=500, lr=1e-1, la=1, ).cpu() * model.val_dataset[:][2] attributions_ours_all = model.forward_explainer( query_ids=model.val_dataset[:][0].to(device), input_ids=model.val_dataset[:][1].to(device), mask=model.val_dataset[:][2].to(device), layer_drop=1, layer_pred=1, )[3].exp().cpu() * model.val_dataset[:][2] attributions_gt_all = ((model.val_dataset[:][1] == model.val_dataset[:][0][:,:1]) | ( model.val_dataset[:][1] == model.val_dataset[:][0][:,1:])).float().cpu() * model.val_dataset[:][2] attributions_exact_tmp = (torch.where( model.val_dataset[:][3].unsqueeze(-1).bool(), (model.val_dataset[:][1] == model.val_dataset[:][0][:,:1]).float(), (model.val_dataset[:][1] == model.val_dataset[:][0][:,1:]).float(), ) * model.val_dataset[:][2]) attributions_exact = torch.zeros_like(attributions_exact_tmp) attributions_exact[attributions_exact_tmp.sum(-1) > 0, attributions_exact_tmp[attributions_exact_tmp.sum(-1) > 0].argmax(-1),] = 1 import pandas as pd from collections import defaultdict # + metrics = defaultdict(dict) for method_name, attr in zip( ("exact", "ig", "cv", "nlp", "ours"), (attributions_exact, attributions_ig_all, attributions_schulz_all, attributions_guan_all, attributions_ours_all) ): attr = attr[attributions_gt_all.sum(-1) > 0] p = torch.distributions.Categorical(probs=attr / attr.sum(-1, keepdim=True)) q = torch.distributions.Categorical( probs=(attributions_gt_all / attributions_gt_all.sum(-1, keepdim=True))[attributions_gt_all.sum(-1) > 0] ) m = torch.distributions.Categorical(probs=0.5 * (p.probs + q.probs)) metrics[method_name]["$D_\mathrm{KL}$"] = torch.distributions.kl_divergence(q, p) metrics[method_name]["$D_\mathrm{JS}$"] = 0.5 * ( torch.distributions.kl_divergence(p, m) + torch.distributions.kl_divergence(q, m) ) # - pd.DataFrame( {k: {kk: "{:.4f} +/- {:.3f}".format(vv.mean().item(), vv.std().item()) for kk, vv in v.items()} for k, v in metrics.items()} ).T # + name_map = { "exact": "Exact", "ig": "\citet{sundararajan2017axiomatic}", "cv": "\citet{schulz2020restricting}", "nlp": "\citet{guan2019towards}", "ours": "\textsc{DiffMask}", } print( pd.DataFrame( {name_map[k]: {kk: "${:.2f}$".format(vv.mean().item(), vv.std().item()) for kk, vv in v.items() } for k, v in metrics.items() } ).T.to_latex() .replace("\$", "$") .replace(r"\textbackslash ", "\\") .replace("\{", "{") .replace("\}", "}") .replace("\_", "_") ) # -
ToyTask.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <center>PIPELINE</center> # + active="" # - ADH: atlas de desenvolvimento humano # - # # ADH # # O Atlas do Desenvolvimento Humano no Brasil é um site que traz o Índice de Desenvolvimento Humano Municipal (IDHM) e outros 200 indicadores de demografia, educação, renda, trabalho, habitação e vulnerabilidade para os municípios brasileiros. # # https://basedosdados.org/en/dataset/mundo-onu-adh # # https://basedosdados.org/en/dataset/mundo-onu-adh/resource/2fa04257-b35c-4a6a-a0db-69ae665318ac import os import pandas as pd # ! pip install base dos dados import basedosdados as bd # Para carregar o dado direto no pandas df = bd.read_table(dataset_id='mundo_onu_adh', table_id='municipios', billing_project_id='lgdccovid19malaria') df output_path = os.path.join(os.getcwd(), 'data_06') output_file = 'data_06_brazil_indicators.csv' df.to_csv(os.path.join(output_path, output_file))
sprint_01_data_collection/data_collection_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This cell is added by sphinx-gallery # !pip install mrsimulator --quiet # %matplotlib inline import mrsimulator print(f'You are using mrsimulator v{mrsimulator.__version__}') # - # # # ¹⁷O 2D DAS NMR of Coesite # # Coesite is a high-pressure (2-3 GPa) and high-temperature (700°C) polymorph of silicon # dioxide $\text{SiO}_2$. Coesite has five crystallographic $^{17}\text{O}$ # sites. The experimental dataset used in this example is published in # Grandinetti `et al.` [#f1]_ # # # + import numpy as np import csdmpy as cp import matplotlib.pyplot as plt from lmfit import Minimizer from mrsimulator import Simulator from mrsimulator.methods import Method2D from mrsimulator import signal_processing as sp from mrsimulator.utils import spectral_fitting as sf from mrsimulator.utils import get_spectral_dimensions from mrsimulator.utils.collection import single_site_system_generator from mrsimulator.method.event import SpectralEvent # - # ## Import the dataset # # # + filename = "https://sandbox.zenodo.org/record/814455/files/DASCoesite.csdf" experiment = cp.load(filename) # standard deviation of noise from the dataset sigma = 921.6698 # For spectral fitting, we only focus on the real part of the complex dataset experiment = experiment.real # Convert the coordinates along each dimension from Hz to ppm. _ = [item.to("ppm", "nmr_frequency_ratio") for item in experiment.dimensions] # plot of the dataset. max_amp = experiment.max() levels = (np.arange(14) + 1) * max_amp / 15 # contours are drawn at these levels. options = dict(levels=levels, alpha=0.75, linewidths=0.5) # plot options plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") ax.contour(experiment, colors="k", **options) ax.invert_xaxis() ax.set_ylim(30, -30) plt.grid() plt.tight_layout() plt.show() # - # ## Create a fitting model # **Guess model** # # Create a guess list of spin systems. # # # + shifts = [29, 39, 54.8, 51, 56] # in ppm Cq = [6.1e6, 5.4e6, 5.5e6, 5.5e6, 5.1e6] # in Hz eta = [0.1, 0.2, 0.15, 0.15, 0.3] abundance_ratio = [1, 1, 2, 2, 2] abundance = np.asarray(abundance_ratio) / 8 * 100 # in % spin_systems = single_site_system_generator( isotope="17O", isotropic_chemical_shift=shifts, quadrupolar={"Cq": Cq, "eta": eta}, abundance=abundance, ) # - # **Method** # # Create the DAS method. # # # + # Get the spectral dimension parameters from the experiment. spectral_dims = get_spectral_dimensions(experiment) DAS = Method2D( channels=["17O"], magnetic_flux_density=11.744, # in T spectral_dimensions=[ dict( **spectral_dims[0], events=[ SpectralEvent( fraction=0.5, rotor_angle=37.38 * 3.14159 / 180, transition_query=[{"P": [-1], "D": [0]}], ), SpectralEvent( fraction=0.5, rotor_angle=79.19 * 3.14159 / 180, transition_query=[{"P": [-1], "D": [0]}], ), ], ), # The last spectral dimension block is the direct-dimension dict( **spectral_dims[1], events=[ SpectralEvent( rotor_angle=54.735 * 3.14159 / 180, transition_query=[{"P": [-1], "D": [0]}], ) ], ), ], experiment=experiment, # also add the measurement to the method. ) # Optimize the script by pre-setting the transition pathways for each spin system from # the das method. for sys in spin_systems: sys.transition_pathways = DAS.get_transition_pathways(sys) # - # **Guess Spectrum** # # # + # Simulation # ---------- sim = Simulator(spin_systems=spin_systems, methods=[DAS]) sim.config.number_of_sidebands = 1 # no sidebands are required for this dataset. sim.run() # Post Simulation Processing # -------------------------- processor = sp.SignalProcessor( operations=[ # Gaussian convolution along both dimensions. sp.IFFT(dim_index=(0, 1)), sp.apodization.Gaussian(FWHM="0.15 kHz", dim_index=0), sp.apodization.Gaussian(FWHM="0.1 kHz", dim_index=1), sp.FFT(dim_index=(0, 1)), sp.Scale(factor=4e7), ] ) processed_data = processor.apply_operations(data=sim.methods[0].simulation).real # Plot of the guess Spectrum # -------------------------- plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") ax.contour(experiment, colors="k", **options) ax.contour(processed_data, colors="r", linestyles="--", **options) ax.invert_xaxis() ax.set_ylim(30, -30) plt.grid() plt.tight_layout() plt.show() # - # ## Least-squares minimization with LMFIT # Use the :func:`~mrsimulator.utils.spectral_fitting.make_LMFIT_params` for a quick # setup of the fitting parameters. # # params = sf.make_LMFIT_params(sim, processor) print(params.pretty_print(columns=["value", "min", "max", "vary", "expr"])) # **Solve the minimizer using LMFIT** # # minner = Minimizer(sf.LMFIT_min_function, params, fcn_args=(sim, processor, sigma)) result = minner.minimize(method="powell") result # ## The best fit solution # # # + best_fit = sf.bestfit(sim, processor)[0] # Plot the spectrum plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") ax.contour(experiment, colors="k", **options) ax.contour(best_fit, colors="r", linestyles="--", **options) ax.invert_xaxis() ax.set_ylim(30, -30) plt.grid() plt.tight_layout() plt.show() # - # ## The best fit solution # # # + residuals = sf.residuals(sim, processor)[0] fig, ax = plt.subplots( 1, 3, sharey=True, figsize=(10, 3.0), subplot_kw={"projection": "csdm"} ) vmax, vmin = experiment.max(), experiment.min() for i, dat in enumerate([experiment, best_fit, residuals]): ax[i].imshow(dat, aspect="auto", vmax=vmax, vmin=vmin) ax[i].invert_xaxis() ax[0].set_ylim(30, -30) plt.tight_layout() plt.show() # - # .. [#f1] <NAME>., <NAME>., <NAME>., <NAME>., # <NAME>. and <NAME>. # Solid-State $^{17}\text{O}$ Magic-Angle and Dynamic-Angle Spinning NMR # Study of the $\text{SiO}_2$ Polymorph Coesite, J. Phys. Chem. 1995, # **99**, *32*, 12341-12348. # `DOI: 10.1021/j100032a045 <https://doi.org/10.1021/j100032a045>`_ # #
docs/notebooks/fitting/2D_fitting/plot_2_Coesite_DAS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb) # # Gaussian Probabilities #format the book # %matplotlib inline from __future__ import division, print_function from book_format import load_style load_style() # ## Introduction # # The last chapter ended by discussing some of the drawbacks of the Discrete Bayesian filter. For many tracking and filtering problems our desire is to have a filter that is *unimodal* and *continuous*. That is, we want to model our system using floating point math (continuous) and to have only one belief represented (unimodal). For example, we want to say an aircraft is at (12.34, -95.54, 2389.5) where that is latitude, longitude, and altitude. We do not want our filter to tell us "it might be at (1.65, -78.01, 2100.45) or it might be at (34.36, -98.23, 2543.79)." That doesn't match our physical intuition of how the world works, and as we discussed, it can be prohibitively expensive to compute the multimodal case. And, of course, multiple position estimates makes navigating impossible. # # We desire a unimodal, continuous way to represent probabilities that models how the real world works, and that is computationally efficient to calculate. Gaussian distributions provide all of these features. # ## Mean, Variance, and Standard Deviations # # ### Random Variables # # # Each time you roll a die the *outcome* will be between 1 and 6. If we rolled a fair die a million times we'd expect to get 1 1/6 of the time. Thus we say the *probability*, or *odds* of the outcome 1 is 1/6. Likewise, if I asked you the chance of 1 being the result of the next roll you'd reply 1/6. # # This combination of values and associated probabilities is called a [*random variable*](https://en.wikipedia.org/wiki/Random_variable). Here *random* does not mean the process is nondeterministic, only that we lack information. The result of a die toss is deterministic, but we lack enough information to compute the result. We don't know what will happen, except probabilistically. # # While we are defining things, the range of values is called the [*sample space*](https://en.wikipedia.org/wiki/Sample_space). For a die the sample space is {1, 2, 3, 4, 5, 6}. For a coin the sample space is {H, T}. *Space* is a mathematical term which means a set with structure. The sample space for the die is a subset of the natural numbers in the range of 1 to 6. # # Another example of a random variable is the heights of students in a university. Here the sample space is a range of values in the real numbers between two limits defined by biology. # # Random variables such as coin tosses and die rolls are *discrete random variables*. This means their sample space is represented by either a finite number of values or a countably infinite number of values such as the natural numbers. Heights of humans are called *continuous random variables* since they can take on any real value between two limits. # # Do not confuse the *measurement* of the random variable with the actual value. If we can only measure the height of a person to 0.1 meters we would only record values from 0.1, 0.2, 0.3...2.7, yielding 27 discrete choices. Nonetheless a person's height can vary between any arbitrary real value between those ranges, and so height is a continuous random variable. # # In statistics capital letters are used for random variables, usually from the latter half of the alphabet. So, we might say that $X$ is the random variable representing the die toss, or $Y$ are the heights of the students in the freshmen poetry class. Later chapters use linear algebra to solve these problems, and so there we will follow the convention of using lower case for vectors, and upper case for matrices. Unfortunately these conventions clash, and you will have to determine which an author is using from context. # ## Probability Distribution # # # The [*probability distribution*](https://en.wikipedia.org/wiki/Probability_distribution) gives the probability for the random variable to take any value in a sample space. For example, for a fair six sided die we might say: # # |Value|Probability| # |-----|-----------| # |1|1/6| # |2|1/6| # |3|1/6| # |4|1/6| # |5|1/6| # |6|1/6| # # Some sources call this the *probability function*. Using ordinary function notation, we would write: # # $$P(X{=}4) = f(4) = \frac{1}{6}$$ # # This states that the probability of the die landing on 4 is $\frac{1}{6}$. $P(X{=}x_k)$ is notation for "the probability of $X$ being $x_k$. Some texts use $Pr$ or $Prob$ instead of $P$. # # # Another example is a fair coin. It has the sample space {H, T}. The coin is fair, so the probability for heads (H) is 50%, and the probability for tails (T) is 50%. We write this as # # $$\begin{gathered}P(X{=}H) = 0.5\\P(X{=}T)=0.5\end{gathered}$$ # # Sample spaces are not unique. One sample space for a die is {1, 2, 3, 4, 5, 6}. Another valid sample space would be {even, odd}. Another might be {dots in all corners, not dots in all corners}. A sample space is valid so long as it covers all possibilities, and any single event is described by only one element. {even, 1, 3, 4, 5} is not a valid sample space for a die since a value of 4 is matched both by 'even' and '4'. # # The probabilities for all values of a *discrete random value* is known as the *discrete probability distribution* and the probabilities for all values of a *continuous random value* is known as the *continuous probability distribution*. # # To be a probability distribution the probability of each value $x_i$ must be $x_i \ge 0$, since no probability can be less than zero. Secondly, the sum of the probabilities for all values must equal one. This should be intuitively clear for a coin toss: if the odds of getting heads is 70%, then the odds of getting tails must be 30%. We formulize this requirement as # # $$\sum\limits_u P(X{=}u)= 1$$ # # for discrete distributions, and as # # $$\int\limits_u P(X{=}u) \,du= 1$$ # # for continuous distributions. # ### The Mean, Median, and Mode of a Random Variable # # Given a set of data we often want to know a representative or average value for that set. There are many measures for this, and the concept is called a [*measure of central tendency*](https://en.wikipedia.org/wiki/Central_tendency). For example we might want to know the *average* height of the students in a class. We all know how to find the average, but let me belabor the point so I can introduce more formal notation and terminology. Another word for average is the *mean*. We compute the mean by summing the values and dividing by the number of values. If the heights of the students in meters is # # $$X = \{1.8, 2.0, 1.7, 1.9, 1.6\}$$ # # we compute the mean as # # $$\mu = \frac{1.8 + 2.0 + 1.7 + 1.9 + 1.6}{5} = 1.8$$ # # It is traditional to use the symbol $\mu$ (mu) to denote the mean. # # We can formalize this computation with the equation # # $$ \mu = \frac{1}{n}\sum^n_{i=1} x_i$$ # # NumPy provides `numpy.mean()` for computing the mean. import numpy as np x = [1.85, 2.0, 1.7, 1.9, 1.6] print(np.mean(x)) # The *mode* of a set of numbers is the number that occurs most often. If only one number occurs most often we say it is a *unimodal* set, and if two or more numbers occur the most with equal frequency than the set is *multimodal*. For example the set {1, 2, 2, 2, 3, 4, 4, 4} has modes 2 and 4, which is multimodal, and the set {5, 7, 7, 13} has the mode 7, and so it is unimodal. We will not be computing the mode in this manner in this book, but we do use the concepts of unimodal and multimodal in a more general sense. For example, in the **Discrete Bayes** chapter we talked about our belief in the dog's position as a *multimodal distribution* because we assigned different probabilities to different positions. # # Finally, the *median* of a set of numbers is the middle point of the set so that half the values are below the median and half are above the median. Here, above and below is in relation to the set being sorted. If the set contains an even number of values then the two middle numbers are averaged together. # # Numpy provides `numpy.median()` to compute the median. As you can see the median of {1.85, 2.0, 1.7, 1.9, 1.6} is 1.85, because 1.85 is the third element of this set after being sorted. print(np.median(x)) # ## Expected Value of a Random Variable # # The [*expected value*](https://en.wikipedia.org/wiki/Expected_value) of a random variable is the average value it would have if we took an infinite number of samples of it and then averaged those samples together. Let's say we have $x=[1,3,5]$ and each value is equally probable. What value would we *expect* $x$ to have, on average? # # It would be the average of 1, 3, and 5, of course, which is 3. That should make sense; we would expect equal numbers of 1, 3, and 5 to occur, so $(1+3+5)/3=3$ is clearly the average of that infinite series of samples. In other words, here the expected value is the *mean* of the sample space. # # Now suppose that each value has a different probability of happening. Say 1 has an 80% chance of occurring, 3 has an 15% chance, and 5 has only a 5% chance. In this case we compute the expected value by multiplying each value of $x$ by the percent chance of it occurring, and summing the result. For this case we could compute # # $$\mathbb E[X] = (1)(0.8) + (3)(0.15) + (5)(0.05) = 1.5$$ # # Here I have introduced the notation $\mathbb E[X]$ for the expected value of $x$. Some texts use $E(x)$. The value 1.5 for $x$ makes intuitive sense because $x$ is far more likely to be 1 than 3 or 5, and 3 is more likely than 5 as well. # # We can formalize this by letting $x_i$ be the $i^{th}$ value of $X$, and $p_i$ be the probability of its occurrence. This gives us # # $$\mathbb E[X] = \sum_{i=1}^n p_ix_i$$ # # A trivial bit of algebra shows that if the probabilities are all equal, the expected value is the same as the mean: # # $$\mathbb E[X] = \sum_{i=1}^n p_ix_i = \sum_{i=1}^n \frac{1}{n}x_i = \mu_x$$ # # If $x$ is continuous we substitute the sum for an integral, like so # # $$\mathbb E[X] = \int_{-\infty}^\infty x\, f(x) \,dx$$ # # where $f(x)$ is the probability distribution function of $x$. We won't be using this equation yet, but we will be using it in the next chapter. # ### Variance of a Random Variable # # The computation above tells us the average height of the students, but it doesn't tell us everything we might want to know. For example, suppose we have three classes of students, which we label $X$, $Y$, and $Z$, with these heights: X = [1.8, 2.0, 1.7, 1.9, 1.6] Y = [2.2, 1.5, 2.3, 1.7, 1.3] Z = [1.8, 1.8, 1.8, 1.8, 1.8] # Using NumPy we see that the mean height of each class is the same. print(np.mean(X)) print(np.mean(Y)) print(np.mean(Z)) # The mean of each class is 1.8 meters, but notice that there is a much greater amount of variation in the heights in the second class than in the first class, and that there is no variation at all in the third class. # # The mean tells us something about the data, but not the whole story. We want to be able to specify how much *variation* there is between the heights of the students. You can imagine a number of reasons for this. Perhaps a school district needs to order 5,000 desks, and they want to be sure they buy sizes that accommodate the range of heights of the students. # # Statistics has formalized this concept of measuring variation into the notion of [*standard deviation*](https://en.wikipedia.org/wiki/Standard_deviation) and [*variance*](https://en.wikipedia.org/wiki/Variance). The equation for computing the variance is # # $$\mathit{VAR}(X) = E[(X - \mu)^2]$$ # # Ignoring the squared terms for a moment, you can see that the variance is the *expected value* for how much the sample space ($X$) varies from the mean. I will explain the purpose of the squared term later. We have the formula for the expected value $E[X] = \sum\limits_{i=1}^n p_ix_i$, and we will assume that any height is equally probable, so we can substitute that into the equation above to get # # $$\mathit{VAR}(X) = \frac{1}{n}\sum_{i=1}^n (x_i - \mu)^2$$ # # Let's compute the variance of the three classes to see what values we get and to become familiar with this concept. # # The mean of $X$ is 1.8 ($\mu_x = 1.8$) so we compute # # $$ # \begin{aligned} # \mathit{VAR}(X) &=\frac{(1.8-1.8)^2 + (2-1.8)^2 + (1.7-1.8)^2 + (1.9-1.8)^2 + (1.6-1.8)^2} {5} \\ # &= \frac{0 + 0.04 + 0.01 + 0.01 + 0.04}{5} \\ # \mathit{VAR}(X)&= 0.02 \, m^2 # \end{aligned}$$ # # NumPy provides the function `var()` to compute the variance: print(np.var(X), "meters squared") # This is perhaps a bit hard to interpret. Heights are in meters, yet the variance is meters squared. Thus we have a more commonly used measure, the *standard deviation*, which is defined as the square root of the variance: # # $$\sigma = \sqrt{\mathit{VAR}(X)}=\sqrt{\frac{1}{n}\sum_{i=1}^n(x_i - \mu)^2}$$ # # It is typical to use $\sigma$ for the *standard deviation* and $\sigma^2$ for the *variance*. In most of this book I will be using $\sigma^2$ instead of $\mathit{VAR}(X)$ for the variance; they symbolize the same thing. # # For the first class we compute the standard deviation with # # $$ # \begin{aligned} # \sigma_x &=\sqrt{\frac{(1.8-1.8)^2 + (2-1.8)^2 + (1.7-1.8)^2 + (1.9-1.8)^2 + (1.6-1.8)^2} {5}} \\ # &= \sqrt{\frac{0 + 0.04 + 0.01 + 0.01 + 0.04}{5}} \\ # \sigma_x&= 0.1414 # \end{aligned}$$ # # We can verify this computation with the NumPy method `numpy.std()` which computes the standard deviation. 'std' is a common abbreviation for standard deviation. print('std {:.4f}'.format(np.std(X))) print('var {:.4f}'.format(np.std(X)**2)) # And, of course, $0.1414^2 = 0.02$, which agrees with our earlier computation of the variance. # # What does the standard deviation signify? It tells us how much the heights vary amongst themselves. "How much" is not a mathematical term. We will be able to define it much more precisely once we introduce the concept of a Gaussian in the next section. For now I'll say that for many things 68% of all values lie within one standard deviation of the mean. In other words we can conclude that for a random class 68% of the students will have heights between 1.66 (1.8-0.1414) meters and 1.94 (1.8+0.1414) meters. # # We can view this in a plot: # + from book_format import set_figsize, figsize from code.gaussian_internal import plot_height_std import matplotlib.pyplot as plt plot_height_std(X) # - # For only 5 students we obviously will not get exactly 68% within one standard deviation. We do see that 3 out of 5 students are within $\pm1\sigma$, or 60%, which is as close as you can get to 68% with only 5 samples. I haven't yet introduced enough math or Python for you to fully understand the next bit of code, but let's look at the results for a class with 100 students. # # > We write one standard deviation as $1\sigma$, which is pronounced "one standard deviation", not "one sigma". Two standard deviations is $2\sigma$, and so on. # + from numpy.random import randn data = [1.8 + .1414*randn() for i in range(100)] plot_height_std(data, lw=2) print('mean = {:.3f}'.format(np.mean(data))) print('std = {:.3f}'.format(np.std(data))) # - # We can see by eye that roughly 68% of the heights lie within $\pm1\sigma$ of the mean 1.8. # # We'll discuss this in greater depth soon. For now let's compute the standard deviation for # # $$Y = [2.2, 1.5, 2.3, 1.7, 1.3]$$ # # The mean of $Y$ is $\mu=1.8$ m, so # # $$ # \begin{aligned} # \sigma_y &=\sqrt{\frac{(2.2-1.8)^2 + (1.5-1.8)^2 + (2.3-1.8)^2 + (1.7-1.8)^2 + (1.3-1.8)^2} {5}} \\ # &= \sqrt{0.152} = 0.39 \ m # \end{aligned}$$ # # We will verify that with NumPy with print('std of Y is {:.4f} m'.format(np.std(Y))) # This corresponds with what we would expect. There is more variation in the heights for $Y$, and the standard deviation is larger. # # Finally, let's compute the standard deviation for $Z$. There is no variation in the values, so we would expect the standard deviation to be zero. We show this to be true with # # $$ # \begin{aligned} # \sigma_z &=\sqrt{\frac{(1.8-1.8)^2 + (1.8-1.8)^2 + (1.8-1.8)^2 + (1.8-1.8)^2 + (1.8-1.8)^2} {5}} \\ # &= \sqrt{\frac{0+0+0+0+0}{5}} \\ # \sigma_z&= 0.0 \ m # \end{aligned}$$ print(np.std(Z)) # Before we continue I need to point out that I'm ignoring that on average men are taller than women. In general the height variance of a class that contains only men or women will be smaller than a class with both sexes. This is true for other factors as well. Well nourished children are taller than malnourished children. Scandinavians are taller than Italians. When designing experiments statisticians need to take these factors into account. # # I suggested we might be performing this analysis to order desks for a school district. For each age group there are likely to be two different means - one clustered around the mean height of the females, and a second mean clustered around the mean heights of the males. The mean of the entire class will be somewhere between the two. If we bought desks for the mean of all students we are likely to end up with desks that fit neither the males or females in the school! # # We will not normally be faced with these problems in this book. Consult any standard probability text if you need to learn techniques to deal with these issues. # ### Why the Square of the Differences # # Why are we taking the *square* of the differences for the variance? I could go into a lot of math, but let's look at this in a simple way. Here is a chart of the values of $X$ plotted against the mean for $X=[3,-3,3,-3]$ X = [3, -3, 3, -3] mean = np.average(X) for i in range(len(X)): plt.plot([i ,i], [mean, X[i]], color='k') plt.axhline(mean) plt.xlim(-1, len(X)) plt.tick_params(axis='x', labelbottom='off') # If we didn't take the square of the differences the signs would cancel everything out: # # $$\frac{(3-0) + (-3-0) + (3-0) + (-3-0)}{4} = 0$$ # # This is clearly incorrect, as there is more than 0 variance in the data. # # Maybe we can use the absolute value? We can see by inspection that the result is $12/4=3$ which is certainly correct — each value varies by 3 from the mean. But what if we have $Y=[6, -2, -3, 1]$? In this case we get $12/4=3$. $Y$ is clearly more spread out than $X$, but the computation yields the same variance. If we use the formula using squares we get a variance of 3.5 for $Y$, which reflects its larger variation. # # This is not a proof of correctness. Indeed, <NAME>, the inventor of the technique, recognized that is is somewhat arbitrary. If there are outliers then squaring the difference gives disproportionate weight to that term. For example, let's see what happens if we have $X = [1,-1,1,-2,3,2,100]$. X = [1, -1, 1, -2, 3, 2, 100] print('Variance of X = {:.2f}'.format(np.var(X))) # Is this "correct"? You tell me. Without the outlier of 100 we get $\sigma^2=2.89$, which accurately reflects how $X$ is varying absent the outlier. The one outlier swamps the computation. Do we want to swamp the computation so we know there is an outlier, or robustly incorporate the outlier and still provide an estimate close to the value absent the outlier? I will not continue down this path; if you are interested you might want to look at the work that <NAME> has done on this problem, in a field called *Bayesian robustness*, or the excellent publications on *robust statistics* by <NAME> [3]. # ## Gaussians # # We are now ready to learn about [Gaussians](https://en.wikipedia.org/wiki/Gaussian_function). Let's remind ourselves of the motivation for this chapter. # # > We desire a unimodal, continuous way to represent probabilities that models how the real world works, and that is computationally efficient to calculate. # # Let's look at a graph of a Gaussian distribution to get a sense of what we are talking about. from filterpy.stats import plot_gaussian_pdf plt.figure() ax = plot_gaussian_pdf(mean=1.8, variance=0.1414**2, xlabel='Student Height', ylabel='pdf') # This curve is a [*probability density function*](https://en.wikipedia.org/wiki/Probability_density_function) or *pdf* for short. It shows the relative likelihood for the random variable to take on a value. In the chart above, a student is somewhat more likely to have a height near 1.8 m than 1.7 m, and far more likely to have a height of 1.9 m vs 1.4 m. # # > I explain how to plot Gaussians, and much more, in the Notebook *Computing_and_Plotting_PDFs* in the # Supporting_Notebooks folder. You can read it online [here](https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/Supporting_Notebooks/Computing_and_plotting_PDFs.ipynb) [1]. # # This may be recognizable to you as a 'bell curve'. This curve is ubiquitous because under real world conditions many observations are distributed in such a manner. I will not use the term 'bell curve' to refer to a Gaussian because many probability distributions have a similar bell curve shape. Non-mathematical sources might not be as precise, so be judicious in what you conclude when you see the term used without definition. # # This curve is not unique to heights — a vast amount of natural phenomena exhibits this sort of distribution, including the sensors that we use in filtering problems. As we will see, it also has all the attributes that we are looking for — it represents a unimodal belief or value as a probability, it is continuous, and it is computationally efficient. We will soon discover that it also has other desirable qualities which we may not realize we desire. # # To further motivate you, recall the shapes of the probability distributions in the *Discrete Bayes* chapter: import code.book_plots as book_plots belief = [ 0.,0., 0., 0.1, 0.15, 0.5, 0.2, .15, 0, 0] book_plots.bar_plot(belief) # They were not perfect Gaussian curves, but they were similar. We will be using Gaussians to replace the discrete probabilities used in that chapter! # ## Nomenclature # # A bit of nomenclature before we continue - this chart depicts the *probability density* of a *random variable* having any value between ($-\infty..\infty)$. What does that mean? Imagine we take an infinite number of infinitely precise measurements of the speed of automobiles on a section of highway. We could then plot the results by showing the relative number of cars going past at any given speed. If the average was 120 kph, it might look like this: ax = plot_gaussian_pdf(mean=120, variance=17**2, xlabel='speed(kph)') # The y-axis depicts the *probability density* — the relative amount of cars that are going the speed at the corresponding x-axis. # # You may object that human heights or automobile speeds cannot be less than zero, let alone $-\infty$ or $\infty$. This is true, but this is a common limitation of mathematical modeling. “The map is not the territory” is a common expression, and it is true for Bayesian filtering and statistics. The Gaussian distribution above models the distribution of the measured automobile speeds, but being a model it is necessarily imperfect. The difference between model and reality will come up again and again in these filters. Gaussians are used in many branches of mathematics, not because they perfectly model reality, but because they are easier to use than any other relatively accurate choice. However, even in this book Gaussians will fail to model reality, forcing us to use computationally expensive alternatives. # # You will hear these distributions called *Gaussian distributions* or *normal distributions*. *Gaussian* and *normal* both mean the same thing in this context, and are used interchangeably. I will use both throughout this book as different sources will use either term, and I want you to be used to seeing both. Finally, as in this paragraph, it is typical to shorten the name and talk about a *Gaussian* or *normal* — these are both typical shortcut names for the *Gaussian distribution*. # ## Gaussian Distributions # # Let's explore how Gaussians work. A Gaussian is a *continuous probability distribution* that is completely described with two parameters, the mean ($\mu$) and the variance ($\sigma^2$). It is defined as: # # $$ # f(x, \mu, \sigma) = \frac{1}{\sigma\sqrt{2\pi}} \exp\big [{-\frac{(x-\mu)^2}{2\sigma^2} }\big ] # $$ # # $\exp[x]$ is notation for $e^x$. # # <p> Don't be dissuaded by the equation if you haven't seen it before; you will not need to memorize or manipulate it. The computation of this function is stored in `stats.py` with the function `gaussian(x, mean, var)`. # # > **Optional:** Let's remind ourselves how to look at a function stored in a file by using the *%load* magic. If you type *%load -s gaussian stats.py* into a code cell and then press CTRL-Enter, the notebook will create a new input cell and load the function into it. # # ```python # # %load -s gaussian stats.py # # def gaussian(x, mean, var): # """returns normal distribution for x given a # gaussian with the specified mean and variance. # """ # return (np.exp((-0.5*(np.asarray(x)-mean)**2)/var) / # math.sqrt(2*math.pi*var)) # # ``` # # <p><p><p><p>We will plot a Gaussian with a mean of 22 $(\mu=22)$, with a variance of 4 $(\sigma^2=4)$, and then discuss what this means. # + from filterpy.stats import gaussian, norm_cdf ax = plot_gaussian_pdf(22, 4, mean_line=True, xlabel='$^{\circ}C$') # - # What does this curve *mean*? Assume we have a thermometer which reads 22°C. No thermometer is perfectly accurate, and so we expect that each reading will be slightly off the actual value. However, a theorem called [*Central Limit Theorem*](https://en.wikipedia.org/wiki/Central_limit_theorem) states that if we make many measurements that the measurements will be normally distributed. When we look at this chart we can "sort of" think of it as representing the probability of the thermometer reading a particular value given the actual temperature of 22°C. # # Recall that a Gaussian distribution is *continuous*. Think of an infinitely long straight line - what is the probability that a point you pick randomly is at 2. Clearly 0%, as there is an infinite number of choices to choose from. The same is true for normal distributions; in the graph above the probability of being *exactly* 2°C is 0% because there are an infinite number of values the reading can take. # # What is this curve? It is something we call the *probability density function.* The area under the curve at any region gives you the probability of those values. So, for example, if you compute the area under the curve between 20 and 22 the resulting area will be the probability of the temperature reading being between those two temperatures. # # We can think of this in Bayesian terms or frequentist terms. As a Bayesian, if the thermometer reads exactly 22°C, then our belief is described by the curve - our belief that the actual (system) temperature is near 22 is very high, and our belief that the actual temperature is near 18 is very low. As a frequentist we would say that if we took 1 billion temperature measurements of a system at exactly 22°C, then a histogram of the measurements would look like this curve. # # How do you compute the probability, or area under the curve? You integrate the equation for the Gaussian # # $$ \int^{x_1}_{x_0} \frac{1}{\sigma\sqrt{2\pi}} e^{-\frac{1}{2}{(x-\mu)^2}/\sigma^2 } dx$$ # # I wrote `filterpy.stats.norm_cdf` which computes the integral for you. For example, we can compute print('Probability of range 21.5 to 22.5 is {:.2f}%'.format( norm_cdf((21.5, 22.5), 22,4)*100)) print('Probability of range 23.5 to 24.5 is {:.2f}%'.format( norm_cdf((23.5, 24.5), 22,4)*100)) # The mean ($\mu$) is what it sounds like — the average of all possible probabilities. Because of the symmetric shape of the curve it is also the tallest part of the curve. The thermometer reads 22°C, so that is what we used for the mean. # # The notation for a normal distribution for a random variable $X$ is $X \sim\ \mathcal{N}(\mu,\sigma^2)$ where $\sim$ means *distributed according to*. This means I can express the temperature reading of our thermometer as # # $$\text{temp} \sim \mathcal{N}(22,4)$$ # # This is an extremely important result. Gaussians allow me to capture an infinite number of possible values with only two numbers! With the values $\mu=22$ and $\sigma^2=4$ I can compute the distribution of measurements for over any range. # # > Some sources use $\mathcal N (\mu, \sigma)$ instead of $\mathcal N (\mu, \sigma^2)$. Either is fine, they are both conventions. You need to keep in mind which form is being used if you see a term such as $\mathcal{N}(22,4)$. In this book I always use $\mathcal N (\mu, \sigma^2)$, so $\sigma=2$, $\sigma^2=4$ for this example. # ## The Variance and Belief # # Since this is a probability density distribution it is required that the area under the curve always equals one. This should be intuitively clear — the area under the curve represents all possible outcomes, *something* happened, and the probability of *something happening* is one, so the density must sum to one. We can prove this ourselves with a bit of code. (If you are mathematically inclined, integrate the Gaussian equation from $-\infty$ to $\infty$) print(norm_cdf((-1e8, 1e8), mu=0, var=4)) # This leads to an important insight. If the variance is small the curve will be narrow. this is because the variance is a measure of *how much* the samples vary from the mean. To keep the area equal to 1, the curve must also be tall. On the other hand if the variance is large the curve will be wide, and thus it will also have to be short to make the area equal to 1. # # Let's look at that graphically: # + import numpy as np import matplotlib.pyplot as plt xs = np.arange(15, 30, 0.05) plt.plot(xs, gaussian(xs, 23, 0.05), label='$\sigma^2$=0.05', c='b') plt.plot(xs, gaussian(xs, 23, 1), label='$\sigma^2$=1', ls=':', c='b') plt.plot(xs, gaussian(xs, 23, 5), label='$\sigma^2$=5', ls='--', c='b') plt.legend(); # - # What is this telling us? The Gaussian with $\sigma^2=0.05$ is very narrow. It is saying that we believe $x=23$, and that we are very sure about that. In contrast, the Gaussian with $\sigma^2=5$ also believes that $x=23$, but we are much less sure about that. Our believe that $x=23$ is lower, and so our belief about the likely possible values for $x$ is spread out — we think it is quite likely that $x=20$ or $x=26$, for example. $\sigma^2=0.05$ has almost completely eliminated $22$ or $24$ as possible values, whereas $\sigma^2=5$ considers them nearly as likely as $23$. # # If we think back to the thermometer, we can consider these three curves as representing the readings from three different thermometers. The curve for $\sigma^2=0.05$ represents a very accurate thermometer, and curve for $\sigma^2=5$ represents a fairly inaccurate one. Note the very powerful property the Gaussian distribution affords us — we can entirely represent both the reading and the error of a thermometer with only two numbers — the mean and the variance. # # An equivalent formation for a Gaussian is $\mathcal{N}(\mu,1/\tau)$ where $\mu$ is the *mean* and $\tau$ the *precision*. $1/\tau = \sigma^2$; it is the reciprocal of the variance. While we do not use this formulation in this book, it underscores that the variance is a measure of how precise our data is. A small variance yields large precision — our measurement is very precise. Conversely, a large variance yields low precision — our belief is spread out across a large area. You should become comfortable with thinking about Gaussians in these equivalent forms. In Bayesian terms Gaussians reflect our *belief* about a measurement, they express the *precision* of the measurement, and they express how much *variance* there is in the measurements. These are all different ways of stating the same fact. # # I'm getting ahead of myself, but in the next chapters we will use Gaussians to express our belief in things like the estimated position of the object we are tracking, or the accuracy of the sensors we are using. # ## The 68-95-99.7 Rule # # It is worth spending a few words on standard deviation now. The standard deviation is a measure of how much variation from the mean exists. For Gaussian distributions, 68% of all the data falls within one standard deviation ($\pm1\sigma$) of the mean, 95% falls within two standard deviations ($\pm2\sigma$), and 99.7% within three ($\pm3\sigma$). This is often called the [68-95-99.7 rule](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule). If you were told that the average test score in a class was 71 with a standard deviation of 9.4, you could conclude that 95% of the students received a score between 52.2 and 89.8 if the distribution is normal (that is calculated with $71 \pm (2 * 9.4)$). # # Finally, these are not arbitrary numbers. If the Gaussian for our position is $\mu=22$ meters, then the standard deviation also has units meters. Thus $\sigma=0.2$ implies that 68% of the measurements range from 21.8 to 22.2 meters. Variance is the standard deviation squared, thus $\sigma^2 = .04$ meters$^2$. # # The following graph depicts the relationship between the standard deviation and the normal distribution. from code.gaussian_internal import display_stddev_plot display_stddev_plot() # ## Interactive Gaussians # # For those that are reading this in a Jupyter Notebook, here is an interactive version of the Gaussian plots. Use the sliders to modify $\mu$ and $\sigma^2$. Adjusting $\mu$ will move the graph to the left and right because you are adjusting the mean, and adjusting $\sigma^2$ will make the bell curve thicker and thinner. # + import math from IPython.html.widgets import interact, interactive, fixed set_figsize(y=3) def plt_g(mu,variance): plt.figure() xs = np.arange(2, 8, 0.1) ys = gaussian(xs, mu, variance) plt.plot(xs, ys) plt.ylim((0, 1)) interact (plt_g, mu=(0., 10), variance = (.2, 1.)); # - # Finally, if you are reading this online, here is an animation of a Gaussian. First, the mean is shifted to the right. Then the mean is centered at $\mu=5$ and the variance is modified. # # <img src='animations/04_gaussian_animate.gif'> # ## Computational Properties of Gaussians # # A remarkable property of Gaussians is that the product of two independent Gaussians is another Gaussian! The sum is not Gaussian, but proportional to a Gaussian. # # The discrete Bayes filter works by multiplying and adding arbitrary probability distributions. The Kalman filter uses Gaussians instead of arbitrary distributions, but the rest of the algorithm remains the same. This means we will need to multiply and add Gaussians. # # The Gaussian is a nonlinear function. Typically, if you multiply a nonlinear equation with itself you end up with a different type of equation. For example, the shape of `sin(x)sin(x)` is very different from `sin(x)`. But the result of multiplying two Gaussians is yet another Gaussian. This is a key reason why Kalman filters are computationally feasible. Said another way, Kalman filters use Gaussians *because* they are computationally nice. # # # The product of two independent Gaussians is given by: # # $$\begin{aligned}\mu &=\frac{\sigma_1^2\mu_2 + \sigma_2^2\mu_1}{\sigma_1^2+\sigma_2^2}\\ # \sigma^2 &=\frac{\sigma_1^2\sigma_2^2}{\sigma_1^2+\sigma_2^2} # \end{aligned}$$ # # The sum of two Gaussians is given by # # $$\begin{gathered}\mu = \mu_1 + \mu_2 \\ # \sigma^2 = \sigma^2_1 + \sigma^2_2 # \end{gathered}$$ # # The remainder of this section is optional. I will derive the equations for the sum and product of two Gaussians. You will not need to understand this material to understand the rest of the book, so long as you accept the results. # ### Product of Gaussians # # You can find this result by multiplying the equation for two Gaussians together and combining terms. The algebra gets messy. I will derive it using Bayes theorem. We can state the problem as: let the prior be $N(\bar\mu, \bar\sigma^2)$, and measurement be $z \propto N(z, \sigma_z^2)$. What is the posterior x given the measurement z? # # Write the posterior as $P(x \mid z)$. Now we can use Bayes Theorem to state # # $$P(x \mid z) = \frac{P(z \mid x)P(x)}{P(z)}$$ # # $P(z)$ is a normalizing constant, so we can create a proportinality # # $$P(x \mid z) \propto P(z|x)P(x)$$ # # Now we subtitute in the equations for the Gaussians, which are # # $$P(z \mid x) = \frac{1}{\sqrt{2\pi\sigma_z^2}}\exp \Big[-\frac{(z-x)^2}{2\sigma_z^2}\Big]$$ # # $$P(x) = \frac{1}{\sqrt{2\pi\bar\sigma^2}}\exp \Big[-\frac{(x-\bar\mu)^2}{2\bar\sigma^2}\Big]$$ # # We can drop the leading terms, as they are constants, giving us # # $$\begin{aligned} # P(x \mid z) &\propto \exp \Big[-\frac{(z-x)^2}{2\sigma_z^2}\Big]\exp \Big[-\frac{(x-\bar\mu)^2}{2\bar\sigma^2}\Big]\\ # &\propto \exp \Big[-\frac{(z-x)^2}{2\sigma_z^2}-\frac{(x-\bar\mu)^2}{2\bar\sigma^2}\Big] \\ # &\propto \exp \Big[-\frac{1}{2\sigma_z^2\bar\sigma^2}[\bar\sigma^2(z-x)^2-\sigma_z^2(x-\bar\mu)^2]\Big] # \end{aligned}$$ # # Now we multiply out the squared terms and group in terms of the posterior $x$. # # $$\begin{aligned} # P(x \mid z) &\propto \exp \Big[-\frac{1}{2\sigma_z^2\bar\sigma^2}[\bar\sigma^2(z^2 -2xz + x^2) + \sigma_z^2(x^2 - 2x\bar\mu+\bar\mu^2)]\Big ] \\ # &\propto \exp \Big[-\frac{1}{2\sigma_z^2\bar\sigma^2}[x^2(\bar\sigma^2+\sigma_z^2)-2x(\sigma_z^2\bar\mu + \bar\sigma^2z) + (\bar\sigma^2z^2+\sigma_z^2\bar\mu^2)]\Big ] # \end{aligned}$$ # # The last parentheses do not contain the posterior $x$, so it can be treated as a constant and discarded. # # $$P(x \mid z) \propto \exp \Big[-\frac{1}{2}\frac{x^2(\bar\sigma^2+\sigma_z^2)-2x(\sigma_z^2\bar\mu + \bar\sigma^2z)}{\sigma_z^2\bar\sigma^2}\Big ] # $$ # # Divide numerator and denominator by $\bar\sigma^2+\sigma_z^2$ to get # # $$P(x \mid z) \propto \exp \Big[-\frac{1}{2}\frac{x^2-2x(\frac{\sigma_z^2\bar\mu + \bar\sigma^2z}{\bar\sigma^2+\sigma_z^2})}{\frac{\sigma_z^2\bar\sigma^2}{\bar\sigma^2+\sigma_z^2}}\Big ] # $$ # # Proportionality allows us create or delete constants at will, so we can factor this into # # $$P(x \mid z) \propto \exp \Big[-\frac{1}{2}\frac{(x-\frac{\sigma_z^2\bar\mu + \bar\sigma^2z}{\bar\sigma^2+\sigma_z^2})^2}{\frac{\sigma_z^2\bar\sigma^2}{\bar\sigma^2+\sigma_z^2}}\Big ] # $$ # # A Gaussian is # # $$N(\mu,\, \sigma^2) \propto \exp\Big [-\frac{1}{2}\frac{(x - \mu)^2}{\sigma^2}\Big ]$$ # # So we can see that $P(x \mid z)$ has a mean of # # $$\mu_\mathtt{posterior} = \frac{\sigma_z^2\bar\mu + \bar\sigma^2z}{\bar\sigma^2+\sigma_z^2}$$ # # and a variance of # $$ # \sigma_\mathtt{posterior} = \frac{\sigma_z^2\bar\sigma^2}{\bar\sigma^2+\sigma_z^2} # $$ # # I've dropped the constants, and so the result is not a normal, but proportional to one. Bayes theorem normalizes with the $P(z)$ divisor, ensuring that the result is normal. We normalize in the update step of our filters, ensuring the filter estimate is Gaussian. # # $$\mathcal N_1 = \| \mathcal N_2\cdot \mathcal N_3\|$$ # ### Sum of Gaussians # # The sum of two Gaussians is given by # # $$\begin{gathered}\mu = \mu_1 + \mu_2 \\ # \sigma^2 = \sigma^2_1 + \sigma^2_2 # \end{gathered}$$ # # There are several proofs for this. I will use convolution since we used convolution in the previous chapter for the histograms of probabilities. # # To find the density function of the sum of two Gaussian random variables we sum the density functions of each. They are nonlinear, continuous functions, so we need to compute the sum with an integral. If the random variables $p$ and $z$ (e.g. prior and measurement) are independent we can compute this with # # $p(x) = \int\limits_{-\infty}^\infty f_p(x-z)f_z(z)\, dx$ # # This is the equation for a convolution. Now we just do some math: # # # $p(x) = \int\limits_{-\infty}^\infty f_2(x-x_1)f_1(x_1)\, dx$ # # $= \int\limits_{-\infty}^\infty # \frac{1}{\sqrt{2\pi}\sigma_z}\exp\left[-\frac{x - z - \mu_z}{2\sigma^2_z}\right] # \frac{1}{\sqrt{2\pi}\sigma_p}\exp\left[-\frac{x - \mu_p}{2\sigma^2_p}\right] \, dx$ # # $= \int\limits_{-\infty}^\infty # \frac{1}{\sqrt{2\pi}\sqrt{\sigma_p^2 + \sigma_z^2}} \exp\left[ -\frac{(x - (\mu_p + \mu_z)))^2}{2(\sigma_z^2+\sigma_p^2)}\right] # \frac{1}{\sqrt{2\pi}\frac{\sigma_p\sigma_z}{\sqrt{\sigma_p^2 + \sigma_z^2}}} \exp\left[ -\frac{(x - \frac{\sigma_p^2(x-\mu_z) + \sigma_z^2\mu_p}{}))^2}{2\left(\frac{\sigma_p\sigma_x}{\sqrt{\sigma_z^2+\sigma_p^2}}\right)^2}\right] \, dx$ # # $= \frac{1}{\sqrt{2\pi}\sqrt{\sigma_p^2 + \sigma_z^2}} \exp\left[ -\frac{(x - (\mu_p + \mu_z)))^2}{2(\sigma_z^2+\sigma_p^2)}\right] \int\limits_{-\infty}^\infty # \frac{1}{\sqrt{2\pi}\frac{\sigma_p\sigma_z}{\sqrt{\sigma_p^2 + \sigma_z^2}}} \exp\left[ -\frac{(x - \frac{\sigma_p^2(x-\mu_z) + \sigma_z^2\mu_p}{}))^2}{2\left(\frac{\sigma_p\sigma_x}{\sqrt{\sigma_z^2+\sigma_p^2}}\right)^2}\right] \, dx$ # # The expression inside the integral is a normal distribution. The sum of a normal distribution is one, hence the integral is one. This gives us # # $$p(x) = \frac{1}{\sqrt{2\pi}\sqrt{\sigma_p^2 + \sigma_z^2}} \exp\left[ -\frac{(x - (\mu_p + \mu_z)))^2}{2(\sigma_z^2+\sigma_p^2)}\right]$$ # # This is in the form of a normal, where # # $$\begin{gathered}\mu_x = \mu_p + \mu_z \\ # \sigma_x^2 = \sigma_z^2+\sigma_p^2\, \square\end{gathered}$$ # ## Computing Probabilities with scipy.stats # # In this chapter I used code from [FilterPy](https://github.com/rlabbe/filterpy) to compute and plot Gaussians. I did that to give you a chance to look at the code and see how these functions are implemented. However, Python comes with "batteries included" as the saying goes, and it comes with a wide range of statistics functions in the module `scipy.stats`. So let's walk through how to use scipy.stats to compute statistics and probabilities. # # The `scipy.stats` module contains a number of objects which you can use to compute attributes of various probability distributions. The full documentation for this module is here: http://docs.scipy.org/doc/scipy/reference/stats.html. We will focus on the norm variable, which implements the normal distribution. Let's look at some code that uses `scipy.stats.norm` to compute a Gaussian, and compare its value to the value returned by the `gaussian()` function from FilterPy. from scipy.stats import norm import filterpy.stats print(norm(2, 3).pdf(1.5)) print(filterpy.stats.gaussian(x=1.5, mean=2, var=3*3)) # The call `norm(2, 3)` creates what scipy calls a 'frozen' distribution - it creates and returns an object with a mean of 2 and a standard deviation of 3. You can then use this object multiple times to get the probability density of various values, like so: n23 = norm(2, 3) print('pdf of 1.5 is %.4f' % n23.pdf(1.5)) print('pdf of 2.5 is also %.4f' % n23.pdf(2.5)) print('pdf of 2 is %.4f' % n23.pdf(2)) # The documentation for [scipy.stats.norm](http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html#scipy.stats.normfor) [2] lists many other functions. For example, we can generate $n$ samples from the distribution with the `rvs()` function. np.set_printoptions(precision=3, linewidth=50) print(n23.rvs(size=15)) # We can get the [*cumulative distribution function (CDF)*](https://en.wikipedia.org/wiki/Cumulative_distribution_function), which is the probability that a randomly drawn value from the distribution is less than or equal to $x$. # probability that a random value is less than the mean 2 print(n23.cdf(2)) # We can get various properties of the distribution: print('variance is', n23.var()) print('standard deviation is', n23.std()) print('mean is', n23.mean()) # ## Fat Tails # # Earlier I mentioned the *central limit theorem*, which states that under certain conditions the arithmetic sum of any independent random variable will be normally distributed, regardless of how the random variables are distributed. This is important to us because nature is full of distributions which are not normal, but when we apply the central limit theorem over large populations we end up with normal distributions. # # However, a key part of the proof is “under certain conditions”. These conditions often do not hold for the physical world. The resulting distributions are called *fat tailed*. Tails is a colloquial term for the far left and right side parts of the curve where the probability density is close to zero. # # Let's consider a trivial example. We think of things like test scores as being normally distributed. If you have ever had a professor “grade on a curve” you have been subject to this assumption. But of course test scores cannot follow a normal distribution. This is because the distribution assigns a nonzero probability distribution for *any* value, no matter how far from the mean. So, for example, say your mean is 90 and the standard deviation is 13. The normal distribution assumes that there is a large chance of somebody getting a 90, and a small chance of somebody getting a 40. However, it also implies that there is a tiny chance of somebody getting a grade of -10, or 150. It assigns an infinitesimal chance of getting a score of $-10^{300}$ or $10^{32986}$. The tails of a Gaussian distribution are infinitely long. # # But for a test we know this is not true. Ignoring extra credit, you cannot get less than 0, or more than 100. Let's plot this range of values using a normal distribution. xs = np.arange(10,100, 0.05) ys = [gaussian(x, 90, 30) for x in xs] plt.plot(xs, ys, label='var=0.2') plt.xlim((0,120)) plt.ylim(0, 0.09); # The area under the curve cannot equal 1, so it is not a probability distribution. What actually happens is that more students than predicted by a normal distribution get scores nearer the upper end of the range (for example), and that tail becomes “fat”. Also, the test is probably not able to perfectly distinguish minute differences in skill in the students, so the distribution to the left of the mean is also probably a bit bunched up in places. The resulting distribution is called a [*fat tail distribution*](https://en.wikipedia.org/wiki/Fat-tailed_distribution). # # Sensors measure the world. The errors in a sensor's measurements are rarely truly Gaussian. It is far too early to be talking about the difficulties that this presents to the Kalman filter designer. It is worth keeping in the back of your mind the fact that the Kalman filter math is based on an idealized model of the world. For now I will present a bit of code that I will be using later in the book to form fat tail distributions to simulate various processes and sensors. This distribution is called the [*Student's $t$-distribution*](https://en.wikipedia.org/wiki/Student%27s_t-distribution). # # Let's say I want to model a sensor that has some white noise in the output. For simplicity, let's say the signal is a constant 10, and the standard deviation of the noise is 2. We can use the function `numpy.random.randn()` to get a random number with a mean of 0 and a standard deviation of 1. I can simulate this with: from numpy.random import randn def sense(): return 10 + randn()*2 # Let's plot that signal and see what it looks like. zs = [sense() for i in range(5000)] plt.plot(zs, lw=1) # That looks like I would expect. The signal is centered around 10. A standard deviation of 2 means that 68% of the measurements will be within $\pm$ 2 of 10, and 99% will be within $\pm$ 6 of 10, and that looks like what is happening. # # Now let's look at a fat tailed distribution generated with the Student's $t$-distribution. I will not go into the math, but just give you the source code for it and then plot a distribution using it. # + import random import math def rand_student_t(df, mu=0, std=1): """return random number distributed by Student's t distribution with `df` degrees of freedom with the specified mean and standard deviation. """ x = random.gauss(0, std) y = 2.0*random.gammavariate(0.5*df, 2.0) return x / (math.sqrt(y / df)) + mu # + def sense_t(): return 10 + rand_student_t(7)*2 zs = [sense_t() for i in range(5000)] plt.plot(zs, lw=1) # - # We can see from the plot that while the output is similar to the normal distribution there are outliers that go far more than 3 standard deviations from the mean (7 to 13). This is what causes the 'fat tail'. # # It is unlikely that the Student's $t$-distribution is an accurate model of how your sensor (say, a GPS or Doppler) performs, and this is not a book on how to model physical systems. However, it does produce reasonable data to test your filter's performance when presented with real world noise. We will be using distributions like these throughout the rest of the book in our simulations and tests. # # This is not an idle concern. The Kalman filter equations assume the noise is normally distributed, and perform sub-optimally if this is not true. Designers for mission critical filters, such as the filters on spacecraft, need to master a lot of theory and empirical knowledge about the performance of the sensors on their spacecraft. # # The code for rand_student_t is included in `filterpy.stats`. You may use it with # # ```python # from filterpy.stats import rand_student_t # ``` # ## Summary and Key Points # # This chapter is a poor introduction to statistics in general. I've only covered the concepts that needed to use Gaussians in the remainder of the book, no more. What I've covered will not get you very far if you intend to read the Kalman filter literature. If this is a new topic to you I suggest reading a statistics textbook. I've always liked the Schaum series for self study, and <NAME>'s *Think Stats* [5] is also very good. # # The following points **must** be understood by you before we continue: # # * Normals express a continuous probability distribution # * They are completely described by two parameters: the mean ($\mu$) and variance ($\sigma^2$) # * $\mu$ is the average of all possible values # * The variance $\sigma^2$ represents how much our measurements vary from the mean # * The standard deviation ($\sigma$) is the square root of the variance ($\sigma^2$) # * Many things in nature approximate a normal distribution # ## References # [1] https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/Supporting_Notebooks/Computing_and_plotting_PDFs.ipynb # # [2] http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html # # [3] http://docs.scipy.org/doc/scipy/reference/tutorial/stats.html # # [4] Huber, <NAME>. *Robust Statistical Procedures*, Second Edition. Society for Industrial and Applied Mathematics, 1996. # # [5] <NAME>. *Think Stats*, Second Edition. O'Reilly Media. # # https://github.com/AllenDowney/ThinkStats2 # # http://greenteapress.com/thinkstats/
03-Gaussians.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # initialize list of lists data = [['<NAME>', "South", 2.8, 2.4], ['Cupertino', "North", 3.3, 3.4], ['Campbell', "Central", 2.9, 3.0]] # + # Create the pandas DataFrame df = pd.DataFrame(data, columns = ['city', 'loc', "jan", "feb"]) # print dataframe. # df.set_index('city') # df.set_index('month') print(df) # + df["sum"] = df.sum(axis=1) print(df) # - print(f'{df["sum"].max():.1f}') df[df.values == df["sum"].max()]["city"] # data # data.drop(columns=["loc"], inplace=True) data = pd.read_csv("sc_county.csv", index_col="city") print(data, '\n') print(data.shape, "\n") print(data.head(5), "\n") # data.drop(columns=["loc"]) type(data) data.drop(columns=["loc"], inplace=True) print(data, "\n") data["sum"] = data.sum(axis=1) print(data, "\n") print(data[data.values == data["sum"].max()]) print(data[data.values == data["sum"].max()].index.values) index = data.index print(index) # + # print(index[data.values == data["sum"].max()]) # - data.loc["San Jose"].min() total = data.sum()
CIS009/3_Machine_Learning_with_scikit-learn/Midterm1_EC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ECDSA tag verification # ## Prepare # # To interact with Ethereum network, you need Ethereum node up and running. # You can run both notebook and ganache-cli node emulator by `./start.sh` script. # # Or you can run ganache-cli Ethereum emulator in separate terminal # `npx ganache-cli -m "dawn finish orchard pluck festival genuine absorb van bike mirror kiss loop"` # (12 words are the seed passphrase to keep addresses and keys constant) # ### Connect to Web3 # Now connect to Ethereum provider via web3 RPC from web3 import Web3 w3 = Web3(Web3.HTTPProvider('http://127.0.0.1:8545')) w3.eth.defaultAccount = w3.eth.accounts[0] # Check if web3 up and running w3.eth.getBlock('latest') # ### Solidity version # Solc compiler has to be installed on your machine. Check solidity version (should match pragma statement in your contract) # + import subprocess, re, json solc = subprocess.Popen(['solc', '--version'], stdout=subprocess.PIPE, stdin=subprocess.PIPE) solc_output = solc.communicate() m = re.search(r"Version: ([\w\.\+]+)", str(solc_output[0])) m.group(1) # - # ### Solidity TagRegistry contract # The contract stores tag registry and verifies the signatures reported by authenticators contract_source_code = b""" pragma solidity ^0.5.7; contract TagRegistry { mapping (address => bool) public allowedTagIDs; function allow(address tagID) public { allowedTagIDs[tagID] = true; } function ban(address tagID) public { allowedTagIDs[tagID] = false; } function isTagIdAllowed(bytes32 signedData, uint8 v, bytes32 r, bytes32 s) view public returns(bool) { address tagId = ecrecover(signedData, v, r, s); return allowedTagIDs[tagId]; } } """ # Actually compile and generate ABI solc = subprocess.Popen(['solc', '--combined-json', 'bin,abi', '-'], stdout=subprocess.PIPE, stdin=subprocess.PIPE) solc_output = solc.communicate(contract_source_code) # See the contract bytecode bytecode = json.loads(solc_output[0])['contracts']['<stdin>:TagRegistry']['bin'] bytecode # See the contract ABI interfaces abi = json.loads(solc_output[0])['contracts']['<stdin>:TagRegistry']['abi'] abi # ### Deploy # Instantiate contract fabric and deploy the contract on the net TagRegistry = w3.eth.contract(abi=abi, bytecode=bytecode) tx_hash = TagRegistry.constructor().transact() tx_hash # Get Tx receipt (see contractAddress) tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash) tx_receipt # Initialize contract instance at given address tag_registry = w3.eth.contract( address=tx_receipt.contractAddress, abi=abi, ) tag_registry # # Manufacturing # Tag is a wireless passive equipment with ECDSA IC on board which: # * stores the secret and never expose it to the outside # * reports its public key # * implements ECDSA SECP256k1 signatures and can sign the 32-bit data object with its secret key and send back the result # + from eth_keys import keys class Tag: _priv_key = None # never exposed outside pub_key = None def __init__(self, priv_key): self._priv_key = keys.PrivateKey(priv_key) def get_pub_key(self): return self._priv_key.public_key def sign(self, data): return w3.eth.account.signHash(data, private_key=self._priv_key) # - # The manufacturer produces a lot of tags, each with its own random private key. # We take two tags: # allowed_tag - will be registered in the registry as genuine # banned_tag - another tag will have no records in the registry #Produce two tags with random secret keys # took SHA3-256 for better randomization. Can take any hash with 256-bit output import hashlib, os m = hashlib.sha256() m.update(os.urandom(1024)) allowed_tag = Tag(m.digest()) m = hashlib.sha256() m.update(os.urandom(1024)) banned_tag = Tag(m.digest()) allowed_tag, banned_tag # # Provisioning # When the tag gets mounted on the rail, it gets authorized as genuine equipment. # It gets scanned, address gets calculated and stored in the registry contract. allowed_tag_address = allowed_tag.get_pub_key().to_checksum_address() tag_registry.functions.allow(allowed_tag_address).transact() # When blockchain transaction gets mined the record securely stored in the contract. # Allowed tag id is in allowed list. tag_registry.functions.allowedTagIDs(allowed_tag_address).call() # banned_tag has been never provisioned to the registry, so it's disallowed tag_registry.functions.allowedTagIDs(banned_tag.get_pub_key().to_checksum_address()).call() # # Operation # The scanner found the tag and received its ECDSA keypair public key. # It's optional step. It's possible to continuosly broadcast `.sign()` messages allowed_tag.get_pub_key() # To assure the Tag is not a stub radio transmitter broadcasting the public key copied from another ID, the reader uthenticates it. # Reader generates one-time random number of 32 bytes length. m = hashlib.sha256() m.update(os.urandom(1024)) rnd = m.digest() rnd.hex() # and sends the number towards the tag. Tag signs it and responds with its signature allowed_tag_signature = allowed_tag.sign(rnd) allowed_tag_signature # The receiver verifies the number and its signature via `isTagIdAllowed` method of registry tag_registry.functions.isTagIdAllowed( rnd, allowed_tag_signature.v, allowed_tag_signature.r.to_bytes(32, byteorder='big'), allowed_tag_signature.s.to_bytes(32, byteorder='big')).call() # Then reader discovers tag which wasn't allowed or was revoked (banned) banned_tag.get_pub_key() # Reader generates new one-time random number m = hashlib.sha256() m.update(os.urandom(1024)) rnd = m.digest() rnd.hex() banned_tag_signature = banned_tag.sign(rnd) banned_tag_signature tag_registry.functions.isTagIdAllowed( rnd, banned_tag_signature.v, banned_tag_signature.r.to_bytes(32, byteorder='big'), banned_tag_signature.s.to_bytes(32, byteorder='big')).call()
eth_tag_ecdsa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv # language: python # name: venv # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#DVC-+-MLflow-=-experiments-managementm" data-toc-modified-id="DVC-+-MLflow-=-experiments-managementm-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>DVC + MLflow = experiments managementm</a></span><ul class="toc-item"><li><span><a href="#Create-a-new-log_metrics-pipeline-stage" data-toc-modified-id="Create-a-new-log_metrics-pipeline-stage-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Create a new <code>log_metrics</code> pipeline stage</a></span></li></ul></li><li><span><a href="#Experimenting" data-toc-modified-id="Experimenting-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Experimenting</a></span><ul class="toc-item"><li><span><a href="#Experiment-1" data-toc-modified-id="Experiment-1-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Experiment 1</a></span></li><li><span><a href="#Experiment-2:-params-tuning" data-toc-modified-id="Experiment-2:-params-tuning-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Experiment 2: params tuning</a></span></li></ul></li><li><span><a href="#Show-metrics-and-params-diffs-with-DVC" data-toc-modified-id="Show-metrics-and-params-diffs-with-DVC-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Show metrics and params diffs with DVC</a></span></li></ul></div> # - # # DVC + MLflow = experiments managementm # ## Create a new `log_metrics` pipeline stage # # Then we can add it to DVC as a stage and run together with other stades through `dvc repro` # Install `catboost` flavour which is not supported out of the box: # ! pip install src/utils/cb_flavor/. # Check that the newly created `log_metrics` pipeline stage is functioning properly: # ! python -m src.pipelines.log_metrics --config=config/params.yaml # # ! mlflow ui # Add this new pipeline stage into DVC (`dvc.yaml`): # + language="bash" # dvc run -f \ # -n log_metrics \ # -d data/processed/features.feather \ # -d models/model.joblib \ # -d reports/train_metrics.json \ # -d reports/raw_metrics.csv \ # -O reports/mlflow_report.md\ # -p config/params.yaml:train,log_metrics \ # python -m src.pipelines.log_metrics --config=config/params.yaml # - # ! dvc dag # # Experimenting # ## Experiment 1 # - Create a new branch `experiment_1` # ! git checkout -b "experiment_1" # - Modify any train parameters in `params.yaml` # # For example: # # ```yaml # train: # estimator: catboost # catboost_params: # iterations: 10 # thread_count: 50 # has_time: true # depth: 6 # learning_rate: 0.15 # allow_writing_files: false # ``` # - Run experiment: # ! dvc repro # Note that `dvc.lock` has changed accordingly reflecting the updated model paramaters. # # - Commit the experiment: # ```bash # git add . # git commit -m "Make experiment 1" # ``` # - **Add tag** # Use MLflow `run_id` as a part of the tag # ```bash # git tag exp-1-[MLFLOW_RUN_ID] -a # ``` # ```bash # # ! git tag exp-1-89997d6ead9845babd4d6263b85bc217 -a # ``` # ! git tag # - **DVC Push** using `dvc push`: # ! dvc push -r myremote # - **Push to `origin experiments`**: # ! git push origin experiment_1 # **Create a Merge Request into & merge `dev`** # ## Experiment 2: params tuning # **GitHub**: # # - Create an issue: `Add experiment 2` # - Create a new branch from `exeriments` # # **Git fetch** the experiment branch to local workspace # # ```bash # git fetch # git checkout <EXPERIMENT_BRANCH_NAME> # ``` # ! git branch # ! git fetch # ! git branch -a # ! git checkout remotes/origin/experiment_2 # Used PyCharm to checkout from a remote branch (automatically creates a local copy of a remote branch). # ! git branch -a # **Modify any train parameters in `params.yaml`** # # For example: # # ```yaml # base: # ... # exp_name: 'Experiment 2' # # catboost_params: # iterations: 30 # ``` # - **Run experiment** via DVC pipeline # ! dvc repro # We can now check the results in MLflow server and `mlflow_report.md` to check the current experiment and run details. # - **Commit experiment** # - **Add tag** using PyCharm # ! git tag # - **git push** # ! git branch # ! git push # # Show metrics and params diffs with DVC # ! dvc metrics show # !dvc metrics diff exp-1-89997d6ead9845babd4d6263b85bc217 exp-2-dca173dc365e4ab5b4bb7decc0e83bab
3.2-antong-step-5-MLflow-experiment-management.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib matplotlib.rcParams['animation.embed_limit'] = 256 import shutil import os import numpy as np import torch from torch import nn, optim, autograd import torch.nn.init as init import torch.nn.functional as F import torchvision.utils as vutils import torch.utils.data as udata import torchvision.datasets as vdatasets import torchvision.transforms as transforms from PIL import Image from tqdm import tqdm import matplotlib.pyplot as plt import matplotlib.animation as animation from jupyterthemes import jtplot jtplot.style(theme='onedork') from IPython.display import HTML # + class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): #这里式计算kaiming初始化的大名鼎鼎的fan——in weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * np.sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) def equal_lr(module, name='weight'): EqualLR.apply(module, name) return module class EqualConv2d(nn.Module): def __init__(self, *args, **kwargs): super().__init__() conv = nn.Conv2d(*args, **kwargs) conv.weight.data.normal_() conv.bias.data.zero_() self.conv = equal_lr(conv) def forward(self, input): return self.conv(input) # - class PixelNorm(nn.Module): #像素归一化 def __init__(self): super().__init__() def forward(self, input): return input / torch.sqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) class ConvBlock(nn.Module): def __init__(self, in_channel, out_channel, kernel1, pad1, kernel2, pad2, pixel_norm=True): super().__init__() self.kernel1 = kernel1 self.kernel2 = kernel2 self.stride1 = 1 self.stride2 = 1 self.pad1 = pad1 self.pad2 = pad2 if pixel_norm: self.conv = nn.Sequential(EqualConv2d(in_channel, out_channel, self.kernel1, self.stride1, self.pad1), PixelNorm(), nn.LeakyReLU(0.2), EqualConv2d(out_channel, out_channel, self.kernel2, self.stride2, self.pad2), PixelNorm(), nn.LeakyReLU(0.2)) else: self.conv = nn.Sequential(EqualConv2d(in_channel, out_channel, self.kernel1, self.stride1, self.pad1), nn.LeakyReLU(0.2), EqualConv2d(out_channel, out_channel, self.kernel2, self.stride2, self.pad2), nn.LeakyReLU(0.2)) def forward(self, input): out = self.conv(input) return out class Generator(nn.Module): def __init__(self, code_dim=512): super().__init__() self.code_norm = PixelNorm() self.progression = nn.ModuleList([ConvBlock(512, 512, 4, 3, 3, 1), ConvBlock(512, 512, 3, 1, 3, 1), ConvBlock(512, 512, 3, 1, 3, 1), ConvBlock(512, 512, 3, 1, 3, 1), ConvBlock(512, 256, 3, 1, 3, 1), ConvBlock(256, 128, 3, 1, 3, 1)]) self.to_rgb = nn.ModuleList([nn.Conv2d(512, 3, 1), nn.Conv2d(512, 3, 1), nn.Conv2d(512, 3, 1), nn.Conv2d(512, 3, 1), nn.Conv2d(256, 3, 1), nn.Conv2d(128, 3, 1),]) def forward(self, input, expand=0, alpha=-1): out = self.code_norm(input) for i, (conv, to_rgb) in enumerate(zip(self.progression, self.to_rgb)): if i > 0 and expand > 0: upsample = F.interpolate(out, scale_factor=2) out = conv(upsample) else: out = conv(out) if i == expand: out = to_rgb(out) if i > 0 and 0 <= alpha < 1: skip_rgb = self.to_rgb[i - 1](upsample) out = (1 - alpha) * skip_rgb + alpha * out break return out class Distriminator(nn.Module): def __init__(self): super().__init__() self.progression = nn.ModuleList([ConvBlock(128, 256, 3, 1, 3, 1, pixel_norm=False), ConvBlock(256, 512, 3, 1, 3, 1, pixel_norm=False), ConvBlock(512, 512, 3, 1, 3, 1, pixel_norm=False), ConvBlock(512, 512, 3, 1, 3, 1, pixel_norm=False), ConvBlock(512, 512, 3, 1, 3, 1, pixel_norm=False), ConvBlock(513, 512, 3, 1, 4, 0, pixel_norm=False),]) self.from_rgb = nn.ModuleList([nn.Conv2d(3, 128, 1), nn.Conv2d(3, 256, 1), nn.Conv2d(3, 512, 1), nn.Conv2d(3, 512, 1), nn.Conv2d(3, 512, 1), nn.Conv2d(3, 512, 1),]) self.n_layer = len(self.progression) self.linear = nn.Linear(512, 1) def forward(self, input, expand=0, alpha=-1): for i in range(expand, -1, -1): index = self.n_layer - i - 1 if i == expand: out = self.from_rgb[index](input) if i == 0: mean_std = input.std(0).mean() mean_std = mean_std.expand(input.size(0), 1, 4, 4) out = torch.cat([out, mean_std], 1) out = self.progression[index](out) if i > 0: out = F.avg_pool2d(out, 2) if i == expand and 0 <= alpha < 1: skip_rgb = F.avg_pool2d(input, 2) skip_rgb = self.from_rgb[index + 1](skip_rgb) out = (1 - alpha) * skip_rgb + alpha * out out = out.squeeze(2).squeeze(2) out = self.linear(out) return out dataroot = '/home/samael/github/image_generation/dcgan/' workers = 8 # batch size map {4:32, 8:32, 16:32, 32:16, 64:16, 128:16, 256:12, 512:3, 1024:1} batch_size = 32 image_size = 4 nc = 3 nz = 512 num_epochs = 300 ngpu = 2 device = torch.device('cuda:0') # + def modify_data(root, image_size): transform = transforms.Compose([ transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = vdatasets.ImageFolder(root=root, transform=transform) return dataset dataset = modify_data(dataroot, image_size) dataloader = udata.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=workers) real_batch = next(iter(dataloader)) plt.figure(figsize=(8,8)) plt.axis("off") plt.title("Training Images") plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:16], padding=1, normalize=True, nrow=4).cpu(),(1,2,0))) plt.show() # - netG = Generator(nz).to(device) netD = Distriminator().to(device) if (device.type == 'cuda') and (ngpu > 1): netG = nn.DataParallel(netG, list(range(ngpu))) netD = nn.DataParallel(netD, list(range(ngpu))) fixed_noise = torch.randn(64, nz, 1, 1, device=device) criterion = nn.BCEWithLogitsLoss() real_label = 1 fake_label = 0 lr = 0.001 beta1 = 0.0 g_optimizer = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.99)) d_optimizer = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.99)) experiment_path = 'checkpoint/pggan' img_list = [] G_losses = [] D_losses = [] D_losses_tmp = [] Grad_penalty = [] i = 0 iters = 0 total_iters = 0 expand = 0 n_critic = 1 step = 0 alpha = 0 CLAMP = 0.01 one = torch.FloatTensor([1]).cuda() mone = one * -1 print('Training start!') for epoch in range(num_epochs): if epoch != 0 and epoch % 50 == 0: alpha = 0 iters = 0 expand += 1 if expand >= 3: batch_size = 16 if expand > 5: alpha = 1 expand = 5 dataset = modify_data(dataroot, image_size * 2 ** expand) dataloader = udata.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=workers) for i, data in enumerate(dataloader): real_cpu = data[0].to(device) b_size = real_cpu.size(0) if step < n_critic: netD.zero_grad() for p in netD.parameters(): p.requires_grad = True # p.data.clamp_(-CLAMP, CLAMP) output = netD(real_cpu, expand, alpha).view(-1) errD_real = (output.mean() - 0.001 * (output ** 2).mean()).view(1) errD_real.backward(mone) noise = torch.randn(b_size, nz, 1, 1, device=device) fake = netG(noise, expand, alpha) output = netD(fake.detach(), expand, alpha).view(-1) errD_fake = output.mean().view(1) errD_fake.backward(one) eps = torch.rand(b_size, 1, 1, 1, device=device) x_hat = eps * real_cpu.data + (1 - eps) * fake.data x_hat.requires_grad = True hat_predict = netD(x_hat, expand, alpha) grad_x_hat = autograd.grad(outputs=hat_predict.sum(), inputs=x_hat, create_graph=True)[0] grad_penalty = ((grad_x_hat.view(grad_x_hat.size(0), -1).norm(2, dim=1) - 1) ** 2).mean() grad_penalty = 10 * grad_penalty grad_penalty.backward() errD = errD_real - errD_fake d_optimizer.step() D_losses_tmp.append(errD.item()) step += 1 else: for p in netD.parameters(): p.requires_grad = False netG.zero_grad() noise = torch.randn(b_size, nz, 1, 1, device=device) fake = netG(noise, expand, alpha) output = netD(fake, expand, alpha).view(-1) errG = -output.mean().view(1) errG.backward() g_optimizer.step() D_losses.append(np.mean(D_losses_tmp)) G_losses.append(errG.item()) D_losses_tmp = [] step = 0 if (total_iters+1) % 200 == 0: print('[%d/%d][%d/%d](%d)\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f\tGrad: %.4f' % (epoch+1, num_epochs, i+1, len(dataloader), total_iters + 1, errD.item(), errG.item(), errD_real.data.mean(), errD_fake.data.mean(), grad_penalty.data)) # Check how the generator is doing by saving G's output on fixed_noise if (total_iters % 5000 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)): with torch.no_grad(): fake = netG(fixed_noise, expand, alpha).detach().cpu() img = vutils.make_grid(fake, padding=2, normalize=True) vutils.save_image(img, 'checkpoint/pggan/fake_image/fake_iter_{0}.jpg'.format(total_iters)) img_list.append(img) iters += 1 total_iters += 1 if (epoch + 1) % 50 == 0: torch.save(netG.state_dict(), '{0}/netG_epoch_{1}.pth'.format(experiment_path, epoch+1)) torch.save(netD.state_dict(), '{0}/netD_epoch_{1}.pth'.format(experiment_path, epoch+1)) plt.figure(figsize=(20,10)) plt.title("Generator and Discriminator Loss During Training") plt.plot(G_losses,label="G") plt.plot(D_losses,label="D") plt.xlabel("iterations") plt.ylabel("Loss") plt.legend() plt.show() # + fig = plt.figure(figsize=(15,15)) plt.axis("off") ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list] ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True) HTML(ani.to_jshtml())
pggan-101.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Randomized Layer Experiments # # In these experiments, explanations for images output from the same model with randomized weights at different layers are compared for similarity using scikit-learn's SSIM. The folders of images are outputs from the explainability methods run on the same set of images from the scrambled layer models. This is for sanity check #2. # + # %matplotlib inline import pandas as pd import numpy as np from PIL import Image from skimage.measure import compare_ssim as ssim import os from glob import glob import matplotlib.pyplot as plt # - path_dict = {} # Get paths to all images for i in range(7): path_dict[i] = glob('Explanations_TEST_Scramble_{}/*.png'.format(i),recursive=True) path_frame = pd.DataFrame.from_dict(path_dict) path_frame # Load all images, add images to dataframe for i in path_dict.keys(): image_list = [] path_list = path_frame[i].tolist() for j in path_list: image_list.append(np.asarray(Image.open(j))) path_frame['Folder_{}_image'.format(i)] = image_list # Add placeholder rows for SSIM, we will compare the SSIM of the unscrambled model to the progressively scrabled models for a in range(7): path_frame['SSIM_0_to_{}'.format(a)] = np.nan # SSIM of the same image should be one ssim(path_frame['Folder_0_image'][0], path_frame['Folder_0_image'][0], win_size = 3) # Calcualte all SSIM's between all models for all images for i in path_frame.index: for j in range(7): path_frame['SSIM_0_to_{}'.format(j)][i] = ssim(path_frame['Folder_0_image'][i], path_frame['Folder_{}_image'.format(j)][i], win_size=9, multichannel = True, use_sample_covariance = True) path_frame.head() # + # update dataframe with a column of the explanation type path_frame['SHAP or GradCAM'] = [ 'Grad' if 'Grad.png' in path_frame.iloc[i][0] else 'Shap' for i in path_frame.index ] # parse image id from the filename path_frame['ID'] = [ os.path.split(i)[1].split('_')[1] for i in path_frame[0] ] # drop all unneeded columns path_frame = path_frame.drop(list(range(7)) + ['Folder_{}_image'.format(i) for i in range(7)], axis=1) # - path_frame.head() df = path_frame df_shap = df['SHAP or GradCAM'] == 'Shap' df_shap = df[df_shap] df_shap = df_shap.drop('SHAP or GradCAM', axis = 1) df_shap df_grad = df['SHAP or GradCAM'] == 'Grad' df_grad = df[df_grad] df_grad = df_grad.drop('SHAP or GradCAM', axis = 1) df_grad # Save data and descriptions df_shap.to_csv('shap_random_layers.csv') df_grad.to_csv('grad_random_layers.csv') shap_describe = df_shap.describe() grad_describe = df_grad.describe() shap_describe.to_csv('shap_random_layers_stats.csv') grad_describe.to_csv('grad_random_layers_stats.csv') # Describe and plot all shap data shap_describe shap_describe.loc['mean'] shap_dict = {} shap_dict['SSIM'] = shap_describe.loc['mean'] shap_dict['Layers Scrambled'] = [0,1,2,3,4,5,6] plt.xlabel('Layers Scrambled') plt.ylabel('SSIM') plt.title('') plt.plot('Layers Scrambled', 'SSIM', data = shap_dict, color = 'black', lw = 1, marker = '.') # Describe and plot all GradCAM data grad_describe grad_dict = {} grad_dict['SSIM'] = grad_describe.loc['mean'] grad_dict['Layers Scrambled'] = [0,1,2,3,4,5,6] plt.xlabel('Layers Scrambled') plt.ylabel('SSIM') plt.title('') plt.plot('Layers Scrambled', 'SSIM', data = grad_dict, color = 'black', lw = 1, marker = '.') # ### Change to SSIM difference # # We now calcualte statistics on the differences of SSIM's, from scrabled layers 0 to 1, 1 to 2, etc. diff_shap = df.loc[df['SHAP or GradCAM'] == 'Shap'] diff_grad = df.loc[df['SHAP or GradCAM'] == 'Grad'] diff_shap['Diff_0_to_1'] = abs(diff_shap['SSIM_0_to_0'] - diff_shap['SSIM_0_to_1']) diff_shap['Diff_1_to_2'] = abs(diff_shap['SSIM_0_to_1'] - diff_shap['SSIM_0_to_2']) diff_shap['Diff_2_to_3'] = abs(diff_shap['SSIM_0_to_2'] - diff_shap['SSIM_0_to_3']) diff_shap['Diff_3_to_4'] = abs(diff_shap['SSIM_0_to_3'] - diff_shap['SSIM_0_to_4']) diff_shap['Diff_4_to_5'] = abs(diff_shap['SSIM_0_to_4'] - diff_shap['SSIM_0_to_5']) diff_shap['Diff_5_to_6'] = abs(diff_shap['SSIM_0_to_5'] - diff_shap['SSIM_0_to_6']) diff_shap.head() diff_shap = diff_shap[['Diff_0_to_1','Diff_1_to_2','Diff_2_to_3','Diff_3_to_4','Diff_4_to_5','Diff_5_to_6']] diff_shap.describe() # The same for GradCAM diff_grad['Diff_0_to_1'] = abs(diff_grad['SSIM_0_to_0'] - diff_grad['SSIM_0_to_1']) diff_grad['Diff_1_to_2'] = abs(diff_grad['SSIM_0_to_1'] - diff_grad['SSIM_0_to_2']) diff_grad['Diff_2_to_3'] = abs(diff_grad['SSIM_0_to_2'] - diff_grad['SSIM_0_to_3']) diff_grad['Diff_3_to_4'] = abs(diff_grad['SSIM_0_to_3'] - diff_grad['SSIM_0_to_4']) diff_grad['Diff_4_to_5'] = abs(diff_grad['SSIM_0_to_4'] - diff_grad['SSIM_0_to_5']) diff_grad['Diff_5_to_6'] = abs(diff_grad['SSIM_0_to_5'] - diff_grad['SSIM_0_to_6']) diff_grad = diff_grad[['Diff_0_to_1','Diff_1_to_2','Diff_2_to_3','Diff_3_to_4','Diff_4_to_5','Diff_5_to_6']] diff_grad.describe() diff_grad.describe().to_csv('random_layer_differences_grad.csv') diff_grad.describe().to_csv('random_layer_differences_shap.csv')
Randomised_layer_experiments/Random_Layer_SSIM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + jupyter={"outputs_hidden": false} from sklearn.datasets import load_iris from matplotlib import pyplot as plt from tsne import bh_sne # + jupyter={"outputs_hidden": false} iris = load_iris() # + jupyter={"outputs_hidden": false} X = iris.data # + jupyter={"outputs_hidden": false} y = iris.target # + jupyter={"outputs_hidden": false} X_2d = bh_sne(X) # + jupyter={"outputs_hidden": false} plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y) # -
examples/iris.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Peak Finder # # <img align="right" width="50%" src="./images/peak_finder_app.png"> # # This application has been designed for the detection of anomalies measured along survey lines. Simple detection parameters based on amplitude and peak migration allows for easy grouping of anomalies across data channels. Additionally, the application lets user: # # - Amplitude # - Dip direction # - Mad Tau (TEM data only) # - Anomaly skewness # # While initially designed for TEM data, the same application can be used for the characterization of anomalies of mixed data types (e.g. magnetics, gravity, topography, etc.). # # New user? Visit the [**Getting Started**](https://geoapps.readthedocs.io/en/latest/content/installation.html) page. # # [**Online Documentation**](https://geoapps.readthedocs.io/en/latest/content/applications/peak_finder.html) # # Click on the cell below and press **Shift+Enter** to run the application # + from geoapps.processing.peak_finder import PeakFinder app = PeakFinder() app() # - # Need help? Contact us at <EMAIL> #
geoapps/applications/Peak_Finder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tf2p0alpha] # language: python # name: python3 # --- # + from tensorflow.keras.datasets import mnist from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.layers import UpSampling2D, Conv2D from tensorflow.keras.models import Sequential, Model from tensorflow.keras.optimizers import Adam from tensorflow.keras import initializers import matplotlib.pyplot as plt import sys import numpy as np import tqdm # - # Load the TensorBoard notebook extension # %load_ext tensorboard # + # Set the seed for reproducible result np.random.seed(1000) randomDim = 10 # Load MNIST data (X_train, _), (_, _) = mnist.load_data() X_train = (X_train.astype(np.float32) - 127.5)/127.5 X_train = X_train.reshape(60000, 784) # + # Optimizer adam = Adam(lr=0.0002, beta_1=0.5) generator = Sequential() generator.add(Dense(256, input_dim=randomDim)) #, kernel_initializer=initializers.RandomNormal(stddev=0.02))) generator.add(LeakyReLU(0.2)) generator.add(Dense(512)) generator.add(LeakyReLU(0.2)) generator.add(Dense(1024)) generator.add(LeakyReLU(0.2)) generator.add(Dense(784, activation='tanh')) #generator.compile(loss='binary_crossentropy', optimizer=adam) # - discriminator = Sequential() discriminator.add(Dense(1024, input_dim=784, kernel_initializer=initializers.RandomNormal(stddev=0.02))) discriminator.add(LeakyReLU(0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Dense(512)) discriminator.add(LeakyReLU(0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Dense(256)) discriminator.add(LeakyReLU(0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Dense(1, activation='sigmoid')) discriminator.compile(loss='binary_crossentropy', optimizer=adam) # + # Combined network discriminator.trainable = False ganInput = Input(shape=(randomDim,)) x = generator(ganInput) ganOutput = discriminator(x) gan = Model(inputs=ganInput, outputs=ganOutput) gan.compile(loss='binary_crossentropy', optimizer=adam) dLosses = [] gLosses = [] # + # Plot the loss from each batch def plotLoss(epoch): plt.figure(figsize=(10, 8)) plt.plot(dLosses, label='Discriminitive loss') plt.plot(gLosses, label='Generative loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.savefig('images/gan_loss_epoch_%d.png' % epoch) # Create a wall of generated MNIST images def saveGeneratedImages(epoch, examples=100, dim=(10, 10), figsize=(10, 10)): noise = np.random.normal(0, 1, size=[examples, randomDim]) generatedImages = generator.predict(noise) generatedImages = generatedImages.reshape(examples, 28, 28) plt.figure(figsize=figsize) for i in range(generatedImages.shape[0]): plt.subplot(dim[0], dim[1], i+1) plt.imshow(generatedImages[i], interpolation='nearest', cmap='gray_r') plt.axis('off') plt.tight_layout() plt.savefig('images/gan_generated_image_epoch_%d.png' % epoch) # - def train(epochs=1, batchSize=128): batchCount = int(X_train.shape[0] / batchSize) print ('Epochs:', epochs) print ('Batch size:', batchSize) print ('Batches per epoch:', batchCount) for e in range(1, epochs+1): print ('-'*15, 'Epoch %d' % e, '-'*15) for _ in range(batchCount): # Get a random set of input noise and images noise = np.random.normal(0, 1, size=[batchSize, randomDim]) imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=batchSize)] # Generate fake MNIST images generatedImages = generator.predict(noise) # print np.shape(imageBatch), np.shape(generatedImages) X = np.concatenate([imageBatch, generatedImages]) # Labels for generated and real data yDis = np.zeros(2*batchSize) # One-sided label smoothing yDis[:batchSize] = 0.9 # Train discriminator discriminator.trainable = True dloss = discriminator.train_on_batch(X, yDis) # Train generator noise = np.random.normal(0, 1, size=[batchSize, randomDim]) yGen = np.ones(batchSize) discriminator.trainable = False gloss = gan.train_on_batch(noise, yGen) # Store loss of most recent batch from this epoch dLosses.append(dloss) gLosses.append(gloss) if e == 1 or e % 20 == 0: saveGeneratedImages(e) # Plot losses from every epoch plotLoss(e) train(200, 128) # %tensorboard --logdir=logs
Chapter 6/VanillaGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python3.6 # --- # + [markdown] heading_collapsed=true # # Colon Adenocarcinoma (COAD) # [Jump to the urls to download the GCT and CLS files](#Downloads) # + [markdown] hidden=true # **Authors:** <NAME>, <NAME> and <NAME> # **Is this what you want your scientific identity to be?** # **Contact info:** Email Edwin at [<EMAIL>](mailto:<EMAIL>) or post a question in http://www.genepattern.org/help # + [markdown] hidden=true # This notebook provides the steps to download all the COAD samples from The Cancer Genome Atlas (TCGA) contained in the Genomic Data Commons (GDC) Data portal. These samples can be downloaded as a GCT file and phenotype labels (primary tumor vs normal samples) can be downloaded as a CLS file. These files are compatible with other GenePattern Analyses. # + [markdown] hidden=true # ![image.png](attachment:image.png) # + [markdown] heading_collapsed=true # # Overview # + [markdown] hidden=true # <p>&nbsp;Adenocarcinoma is the most common type of colon cancer.&nbsp;</p> # # <p>Colorectal adenocarcinomas develop in the lining of the colon or rectum, which make up the large intestine. They tend to start in the inner lining and then spread to other layers.</p> # # <p>It starts out as a small polyp, or growth, that&#39;s usually harmless at first but can turn into cancer.</p> # # <p>Your glands make fluids that your body needs to stay moist and work well. You get adenocarcinoma when cells in the glands that line your organs grow out of control. They may spread to other places and harm healthy tissue.</p> # # + [markdown] hidden=true # <p><img alt="Imagen relacionada" src="https://www.researchgate.net/publication/292189985/figure/fig2/AS:323420265959428@1454120808307/Rectal-cancer-detected-on-initial-colonoscopy-4-to-5cm-from-anal-verge.png" style="width: 590px; height: 500px;" /></p> # # + [markdown] heading_collapsed=true # # COAD Statistics # + [markdown] hidden=true # <p>Number of New Cases and Deaths per 100,000:&nbsp;The number of new cases of colorectal cancer was 39.4 per 100,000 men and women per year. The number of deaths was 14.5 per 100,000 men and women per year. These rates are age-adjusted and based on 2011-2015 cases and deaths.</p> # # <p>Lifetime Risk of Developing Cancer: Approximately 4.2 percent of men and women will be diagnosed with colorectal cancer at some point during their lifetime, based on 2013-2015 data.</p> # # <p>Prevalence of This Cancer: In 2015, there were an estimated 1,332,085 people living with colorectal cancer in the United States.</p> # # <figure>&nbsp;</figure> # # + [markdown] hidden=true # ![image.png](attachment:image.png) # https://seer.cancer.gov/statfacts/html/colorect.html # + [markdown] hidden=true # ## Dataset's Demographic Information # + [markdown] hidden=true # <p>TCGA contained 521 COAD&nbsp;samples&nbsp;(304 primary cancer samples, and 3&nbsp;normal tissue samples and the rest are ignored)&nbsp; from 456&nbsp;people. Below is a summary of the demographic information represented in this dataset. If you are interested in viewing the complete study, as well as the files on the GDC Data Portal, you can follow&nbsp;<a href="https://portal.gdc.cancer.gov/repository?facetTab=cases&amp;filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.project.project_id%22%2C%22value%22%3A%5B%22TCGA-UVM%22%5D%7D%7D%2C%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22files.analysis.workflow_type%22%2C%22value%22%3A%5B%22HTSeq%20-%20Counts%22%5D%7D%7D%2C%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22files.experimental_strategy%22%2C%22value%22%3A%5B%22RNA-Seq%22%5D%7D%7D%5D%7D&amp;searchTableTab=cases" target="_blank">this link.(these data were gathered on July 17th, 2018)</a></p> # # + [markdown] hidden=true # ![image.png](attachment:image.png) # - # # Login to GenePattern # # <div class="alert alert-info"> # <h3 style="margin-top: 0;"> Instructions <i class="fa fa-info-circle"></i></h3> # # <ol> # <li>Login to the *GenePattern Cloud* server.</li> # </ol> # # </div> # + genepattern={"name": "Login", "server": "https://gp-beta-ami.genepattern.org/gp", "type": "auth"} # Requires GenePattern Notebook: pip install genepattern-notebook import gp import genepattern # Username and password removed for security reasons. genepattern.display(genepattern.session.register("https://gp-beta-ami.genepattern.org/gp", "", "")) # - # # Downloading RNA-Seq HTSeq Counts Using TCGAImporter # # Use the TCGAImporter module to download RNA-Seq HTSeq counts from the GDC Data Portal using a Manifest file and a Metadata file # <p><strong>Input files</strong></p> # # <ul> # <li><em>Manifest file</em>: a file containing the list of RNA-Seq samples to be downloaded.</li> # <li><em>Metadata file</em>: a file containing information about the files present at the GDC Data Portal. Instructions for downloading the Manifest and Metadata files can be found here: <a href="https://github.com/genepattern/TCGAImporter/blob/master/how_to_download_a_manifest_and_metadata.pdf" target="_blank">https://github.com/genepattern/TCGAImporter/blob/master/how_to_download_a_manifest_and_metadata.pdf</a></li> # </ul> # # <p><strong>Output files</strong></p> # # <ul> # <li><em>COAD_TCGA.gct</em> - This is a tab delimited file that contains the gene expression&nbsp;(HTSeq&nbsp;counts) from the samples listed on the Manifest file. For more info on GCT files, look at reference <a href="#References">1</a><em> </em></li> # <li><em><em>COAD_TCGA.cls</em> -</em> The CLS file defines phenotype labels (in this case Primary Tumor and Normal Sample) and associates each sample in the GCT file with a label. For more info on CLS files, look at reference <a href="#References">2</a></li> # </ul> # # <div class="alert alert-info"> # <h3 style="margin-top: 0;"> Instructions <i class="fa fa-info-circle"></i></h3> # # <ol> # <li>Load the manifest file in **Manifest** parameter.</li> # <li>Load the metadata file in **Metadata** parameter.</li> # <li>Click **run**.</li> # </ol> # # </div> # <p><strong>Estimated run time for TCGAImporter</strong> : ~ 10 minutes</p> # # + genepattern={"description": "This module imports data from TCGA by taking in a GDC manifest file, downloading the files listed on that manifest, renaming them to be human-friendly, and compiling them into a GCT file to be computer-friendly.", "name": "TCGAImporter", "param_values": {"cls": null, "gct": null, "manifest": ["https://cloud.genepattern.org/gp/users/marylu257/tmp/run2146805051291540277.tmp/COAD_manifest.txt"], "metadata": ["https://cloud.genepattern.org/gp/users/marylu257/tmp/run4917959005523454698.tmp/COAD_metadata.json"], "output_file_name": null, "translate_gene_id": null}, "type": "task"} tcgaimporter_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00369') tcgaimporter_job_spec = tcgaimporter_task.make_job_spec() tcgaimporter_job_spec.set_parameter("manifest", "https://cloud.genepattern.org/gp/users/marylu257/tmp/run2146805051291540277.tmp/COAD_manifest.txt") tcgaimporter_job_spec.set_parameter("metadata", "https://cloud.genepattern.org/gp/users/marylu257/tmp/run4917959005523454698.tmp/COAD_metadata.json") tcgaimporter_job_spec.set_parameter("output_file_name", "COAD_TCGA") tcgaimporter_job_spec.set_parameter("gct", "True") tcgaimporter_job_spec.set_parameter("translate_gene_id", "True") tcgaimporter_job_spec.set_parameter("cls", "True") genepattern.display(tcgaimporter_task) job35206 = gp.GPJob(genepattern.session.get(0), 35206) genepattern.display(job35206) # + genepattern={"description": "Collapses all probe set values for a gene into a single vector of values", "name": "CollapseDataset", "param_values": {"chip.platform": null, "collapse.mode": null, "dataset.file": null, "output.file.name": null}, "type": "task"} collapsedataset_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00134') collapsedataset_job_spec = collapsedataset_task.make_job_spec() collapsedataset_job_spec.set_parameter("dataset.file", "https://cloud.genepattern.org/gp/jobResults/32366/COAD_TCGA.gct") collapsedataset_job_spec.set_parameter("chip.platform", "ftp://ftp.broadinstitute.org/pub/gsea/annotations/ENSEMBL_human_gene.chip") collapsedataset_job_spec.set_parameter("collapse.mode", "Maximum") collapsedataset_job_spec.set_parameter("output.file.name", "<dataset.file_basename>.collapsed") genepattern.display(collapsedataset_task) job32374 = gp.GPJob(genepattern.session.get(0), 32374) genepattern.display(job32374) # - # # Downloads # <p>You can download the input and output files of TCGAImporter for this cancer type here:</p> # # <p><strong>Inputs:</strong></p> # # <ul> # <li><a href="https://datasets.genepattern.org/data/TCGA_HTSeq_counts/KIRP/KIRP_MANIFEST.txt" target="_blank">https://datasets.genepattern.org/data/TCGA_HTSeq_counts/COAD/COAD_MANIFEST.txt</a></li> # <li><a href="https://datasets.genepattern.org/data/TCGA_HTSeq_counts/KIRP/KIRP_METADATA.json" target="_blank">https://datasets.genepattern.org/data/TCGA_HTSeq_counts/COAD/COAD_METADATA.json</a></li> # </ul> # # <p><strong>Outputs:</strong></p> # # <ul> # <li><a href="https://datasets.genepattern.org/data/TCGA_HTSeq_counts/KIRP/KIRP_TCGA.gct" target="_blank">https://datasets.genepattern.org/data/TCGA_HTSeq_counts/COAD/COAD_TCGA.gct</a></li> # <li><a href="https://datasets.genepattern.org/data/TCGA_HTSeq_counts/KIRP/KIRP_TCGA.cls" target="_blank">https://datasets.genepattern.org/data/TCGA_HTSeq_counts/COAD/COAD_TCGA.cls</a></li> # </ul> # # If you'd like to download similar files for other TCGA datasets, visit this link: # - https://datasets.genepattern.org/?prefix=data/TCGA_HTSeq_counts/ # + [markdown] heading_collapsed=true # # References # + [markdown] hidden=true # [1] http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#GCT # + [markdown] hidden=true # [2] http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#CLS # + [markdown] hidden=true # [3] https://www.webmd.com/colorectal-cancer/what-is-adenocarcinoma#1&nbsp;</p> # # [4] https://www.cancercenter.com/colorectal-cancer/types/tab/colorectal-adenocarcinoma/&nbsp;</p> # # [5] https://seer.cancer.gov/statfacts/html/colorect.html</p> #
TCGA_HTSeq_counts/COAD/Colon Adenocarcinoma (COAD).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib as mpl #mpl.use('pdf') import matplotlib.pyplot as plt import numpy as np plt.rc('font', family='serif', serif='Times') plt.rc('text', usetex=True) plt.rc('xtick', labelsize=6) plt.rc('ytick', labelsize=6) plt.rc('axes', labelsize=6) #axes.linewidth : 0.5 plt.rc('axes', linewidth=0.5) #ytick.major.width : 0.5 plt.rc('ytick.major', width=0.5) plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rc('ytick.minor', visible=True) #plt.style.use(r"..\..\styles\infocom.mplstyle") # Insert your save location here # width as measured in inkscape fig_width = 3.487 fig_height = fig_width / 1.618 #fig_height = fig_width / 1.3 / 2 # + small_link_data = np.zeros((5, 10)) small_node_data = np.zeros((5, 10)) print(small_link_data) with open("failure/small-link-utilization.csv", "r") as f: f1 = f.readlines() for index in range(1, 11): line = f1[index] line = line.split(",") for i in range(0, 5): small_link_data[i, index-1] = line[i] print(line) print(type(f1)) with open("failure/small-node_utilization.csv", "r") as f: f1 = f.readlines() for index in range(1, 11): line = f1[index] line = line.split(",") for i in range(0, 5): small_node_data[i, index-1] = line[i] print(line) print(type(f1)) print(small_link_data) print(type(small_link_data[0,0])) #x = np.arange(0.0, 3*np.pi , 0.1) #plt.plot(x, np.sin(x)) #plt.show() small_link_data = 100 * small_link_data small_node_data = 100 * small_node_data # + x = [0, '60', '70', '80'] x_tick_label_list = ['60', '70', '80'] #objective = [[455, 521, 566, 566, 630], # CC-ILP # [436, 491, 562, 562, 623],# proposed # [346, 465, 502, 502, 530]] # NC objective = [[1165, 2343, 3675], [2047, 3400, 4176], [3508, 4739, 5055], [3753, 4769, 5153], [4433, 5920, 5953]] time_means = [[0.36, 0.17, 0.33], [5.28, 1.81, 40.84], [2613.23, 8.17, 45313.95], [2628.48, 18.8, 181778.9], [43.23, 7.00, 33.4]]; error = [[0, 0, 0.0564, 20, 0], [0, 0, 0, 0, 0], [0, 0, 0.0494, 24, 0]] # + N = 3 ind = np.arange(N) width = 1 / 6 fig, (ax1) = plt.subplots(1, 1) #ax1.bar(x, objective) #ax1.bar(x, objective[0]) label_list = ['No-rec', 'Link-rec', 'Lim-rec(3, 0)', 'Lim-rec(3, 1)', 'Any-rec'] patterns = ('//////','\\\\\\','---', 'ooo', 'xxx', '\\', '\\\\','++', '*', 'O', '.') plt.rcParams['hatch.linewidth'] = 0.25 # previous pdf hatch linewidth #plt.rcParams['hatch.linewidth'] = 1.0 # previous svg hatch linewidth #plt.rcParams['hatch.color'] = 'r' for i in range(5): if i == 2: ax1.bar(ind + width * (i-2), objective[i], width, label=label_list[i], hatch=patterns[i], alpha=0.7, tick_label=x_tick_label_list) #yerr=error[i], ecolor='black', capsize=1) else: ax1.bar(ind + width * (i-2), objective[i], width, label=label_list[i], hatch=patterns[i], alpha=0.7) #ax1.set_xticklabels(x) ax1.set_ylabel('Accumulated weighted working slice resources') ax1.set_xlabel('Percentage of working links and nodes in substrate network (\%)') ax1.set_ylim(0, 7000) #ax1.xaxis.set_label_coords(0.5,-0.17) #ax1.yaxis.set_label_coords(-0.17,0.5) ax1.legend(loc='upper left',# bbox_to_anchor=(0.5, 1.2), ncol=2, prop={'size': 5}) #ax1.legend(loc='best') fig.set_size_inches(fig_width, fig_height) #mpl.pyplot.subplots_adjust(wspace = 0.3) fig.subplots_adjust(left=.13, bottom=.13, right=.97, top=.95) #ax1.grid(color='b', ls = '-.', lw = 0.25) ax1.grid(lw = 0.25) plt.show() fig.savefig('test-heuristic-failure.pdf')
python-plot/slice-restoration/compare-heuristic-failure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from openpyxl import load_workbook from bs4 import BeautifulSoup from selenium import webdriver from time import sleep import csv from random import randint import json, io from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.webdriver.support.ui import WebDriverWait from selenium.common.exceptions import TimeoutException from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException from selenium.webdriver.common.action_chains import ActionChains import urllib import urllib3 import requests import json, io from bs4 import BeautifulSoup urllib3.disable_warnings() header = {'User-Agent':'Mozilla/6.0'} chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--user-agent="Mozilla/6.0') chrome_options.add_argument("user-data-dir=selenium") driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=r'chromedriver.exe') # cookies = json.load(open('cookiesdict.txt')) # for cookie in cookies: # driver.add_cookie(cookie) # - driver.get('https://www.tripadvisor.com/Hotel_Review-g295424-d1022759-Reviews-Atlantis_The_Palm-Dubai_Emirate_of_Dubai.html') for i in range(100): driver.refresh() sleep(3) # + driver.get('https://web.binck.nl/logon/') sleep(5) username=driver.find_element_by_id('UserName') username.send_keys('vars001') #you can enter credential here for automatic login sleep(5) Password=driver.find_element_by_id('Password') Password.send_keys('14<PASSWORD>') sleep(5) driver.find_elements_by_class_name('call-to-action')[0].click() # - driver.find_element_by_link_text('Inloggen zonder code').click() driver.find_element_by_tag_name('h6').click() driver.find_element_by_link_text("Turbo's").click() (data-stream-id="1412425") td so1=BeautifulSoup(driver.page_source, 'lxml') so1.find_all('td') # + model=driver.find_elements_by_class_name('selectSearchMenuItem') modelstorage=[] modelstoragename=[] for q in range(len(model)): if len(model[q].text)>0: modelstorage.append(q) modelstoragename.append(model[q].text) ww=0 for w in modelstorage: model_name0=driver.find_elements_by_class_name('selectSearchMenuItem')[w].text print model_name0,w # - savedata1=[] driver.find_element_by_xpath('//*[@id="make"]').click() sleep(1) Brands=driver.find_elements_by_class_name('selectSearchMenuItem') Brandsstoragename=[] for z in range(len(Brands)): Brandsstoragename.append(Brands[z].text) ii=0 for i in range(0,5): Brand_Name=Brandsstoragename[i] if ii>0: driver.find_element_by_xpath('//*[@id="make"]').click() Brands=driver.find_elements_by_class_name('selectSearchMenuItem') Brands[i].click() sleep(1) ii=1 driver.find_element_by_xpath('//*[@id="model"]').click() sleep(1) model=driver.find_elements_by_class_name('selectSearchMenuItem') modelstorage=[] modelstoragename=[] for q in range(len(model)): if len(model[q].text)>0: modelstorage.append(q) modelstoragename.append(model[q].text) ww=0 for w in modelstorage: model_name0=driver.find_elements_by_class_name('selectSearchMenuItem')[w] model_name=modelstoragename[modelstorage.index(w)] if ww>0: driver.find_element_by_xpath('//*[@id="model"]').click() model_name0.click() sleep(1) ww=1 driver.find_element_by_xpath('//*[@id="year"]').click() sleep(1) year=driver.find_elements_by_class_name('selectSearchMenuItem') yearstorage=[] yearnamestorage=[] for e in range(len(year)): if len(year[e].text)>0: yearstorage.append(e) yearnamestorage.append(year[e].text) rr=0 for r in yearstorage: year_name0=driver.find_elements_by_class_name('selectSearchMenuItem')[r] year_name=yearnamestorage[yearstorage.index(r)] if rr>0: driver.find_element_by_xpath('//*[@id="year"]').click() year_name0.click() sleep(1) rr=1 if year_name=='Other': Email=driver.find_element_by_name('email') sleep(1) Email.clear() sleep(1) Email.send_keys('<EMAIL>') print (Brand_Name,model_name,year_name,trim_name,transmission_name) driver.find_element_by_class_name('buttonText').click() sleep(4.5) so1=BeautifulSoup(driver.page_source, 'lxml') evaluationQuoteCard=so1.find_all('div',class_="evaluationQuoteCard") QuoteFirst=evaluationQuoteCard[0].find(class_="evaluationQuoteCardQuoteFirst").text QuoteMain=evaluationQuoteCard[0].find(class_="evaluationQuoteCardQuoteMain").text CarInfo=evaluationQuoteCard[0].find(class_="evaluationQuoteCardCarInfo").text dataset=['-']*8 dataset[0]=Brand_Name dataset[1]=model_name dataset[2]=year_name dataset[3]='-' dataset[4]='-' dataset[5]=QuoteFirst dataset[6]=QuoteMain dataset[7]=CarInfo print dataset savedata1.append(dataset) driver.back() sleep(2) else: driver.find_element_by_xpath('//*[@id="trim"]').click() sleep(1) trim=driver.find_elements_by_class_name('selectSearchMenuItem') trimstorage=[] trimstoragename=[] for t in range(len(trim)): if len(trim[t].text)>0: trimstorage.append(t) trimstoragename.append(trim[t].text) yy=0 for y in trimstorage: trim_name0=driver.find_elements_by_class_name('selectSearchMenuItem')[y] trim_name=trimstoragename[trimstorage.index(y)] if yy>0: driver.find_element_by_xpath('//*[@id="trim"]').click() trim_name0.click() sleep(1) yy=1 if trim_name=='Other': Email=driver.find_element_by_name('email') sleep(1) Email.clear() sleep(1) Email.send_keys('<EMAIL>') print (Brand_Name,model_name,year_name,trim_name,transmission_name) driver.find_element_by_class_name('buttonText').click() sleep(4.5) so1=BeautifulSoup(driver.page_source, 'lxml') evaluationQuoteCard=so1.find_all('div',class_="evaluationQuoteCard") QuoteFirst=evaluationQuoteCard[0].find(class_="evaluationQuoteCardQuoteFirst").text QuoteMain=evaluationQuoteCard[0].find(class_="evaluationQuoteCardQuoteMain").text CarInfo=evaluationQuoteCard[0].find(class_="evaluationQuoteCardCarInfo").text dataset=['-']*8 dataset[0]=Brand_Name dataset[1]=model_name dataset[2]=year_name dataset[3]=trim_name dataset[4]='-' dataset[5]=QuoteFirst dataset[6]=QuoteMain dataset[7]=CarInfo print dataset savedata1.append(dataset) driver.back() sleep(2) else: driver.find_element_by_xpath('//*[@id="transmission"]').click() sleep(1) transmission=driver.find_elements_by_class_name('selectSearchMenuItem') transmissionstorage=[] transmissionstoragename=[] for u in range(len(transmission)): if len(transmission[u].text)>0: transmissionstorage.append(u) transmissionstoragename.append(transmission[u].text) oo=0 for o in transmissionstorage: transmission_name0=driver.find_elements_by_class_name('selectSearchMenuItem')[o] transmission_name=transmissionstoragename[transmissionstorage.index(o)] if oo>0: driver.find_element_by_xpath('//*[@id="transmission"]').click() transmission_name0.click() sleep(1) oo=1 Email=driver.find_element_by_name('email') sleep(1) Email.clear() sleep(1) Email.send_keys('<EMAIL>') print (Brand_Name,model_name,year_name,trim_name,transmission_name) driver.find_element_by_class_name('buttonText').click() sleep(4.5) so1=BeautifulSoup(driver.page_source, 'lxml') evaluationQuoteCard=so1.find_all('div',class_="evaluationQuoteCard") QuoteFirst=evaluationQuoteCard[0].find(class_="evaluationQuoteCardQuoteFirst").text QuoteMain=evaluationQuoteCard[0].find(class_="evaluationQuoteCardQuoteMain").text CarInfo=evaluationQuoteCard[0].find(class_="evaluationQuoteCardCarInfo").text dataset=['-']*8 dataset[0]=Brand_Name dataset[1]=model_name dataset[2]=year_name dataset[3]=trim_name dataset[4]=transmission_name dataset[5]=QuoteFirst dataset[6]=QuoteMain dataset[7]=CarInfo print dataset savedata1.append(dataset) driver.back() sleep(2) # + import warnings from openpyxl import Workbook wb = Workbook(write_only=True) ws = wb.create_sheet() # now we'll fill it with 100 rows x 200 columns for irow in savedata1: ws.append(irow) # save the file wb.save('belimobilgue.co.idvolvo.xlsx') # + #[u'Isuzu', u'Panther', u'1997', u'Other', '-', u'Rp 34,000,000', u'Rp 42,000,000', u'1997 Isuzu Panther'] # - r len(savedata1) # + # [u'Chery', u'QQ', u'2012', u'Other', '-', u'Rp 387,500,000', u'Rp 479,000,000', u'2012 Chery QQ'] # repeat till start from 0. repeat 4 entries # - cookiesdict=driver.get_cookies() cookiesdict import json, io with io.open('cookiesdict.txt', 'w', encoding='utf8') as json_file: data3 = json.dumps(cookiesdict, ensure_ascii=False, encoding='utf8',indent=4, sort_keys=True) json_file.write(unicode(data3))
belimobilgue.co.id/belimobilgue_code.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.2 # language: julia # name: julia-1.6 # --- # # GMM # # This notebook shows a simple example of how GMM can be used to estimate model parameters. I starts with an exactly identified case and then move on to different ways of estimating an overidentified case (pre-defined weighting matrix, recombining the moment conditions, optimal weighting matrix). # ## Load Packages and Extra Functions # + using Printf, DelimitedFiles, Statistics, LinearAlgebra, Optim, NLsolve include("jlFiles/printmat.jl") include("jlFiles/CovNWFn.jl") # - # # Loading Data # + x = readdlm("Data/FFmFactorsPs.csv",',',skipstart=1) #start on line 2, column 1 x = x[:,2] #excess market returns, in % T = size(x,1) # - # # GMM I # # This section describes the basic (exactly identified) GMM, that is, when we have as many moment conditions as parameters. (In this case GMM is the same as the classical method of moments.) # ## Traditional Estimation of Mean and Variance # # The next cell applies the traditional way of estimating the mean and the variance. # + μ = mean(x) σ² = var(x,corrected=false) #"false" to use 1/T formula par_a = [μ;σ²] printblue("Traditional estimates:\n") xx = [par_a [sqrt((σ²/T));sqrt(2*σ²^2/T)]] colNames = ["coef","std"] parNames = ["μ","σ²"] printmat(xx;colNames,rowNames=parNames) # ; since keywords with same name # - # ## GMM Point Estimates and Distribution # # To estimate the mean and variance of $x_{t}$, use the following moment condition # # $ # \frac{1}{T}\sum\nolimits_{t=1}^{T}g_{t}=0 \: \text{ where } # $ # # $ # g_{t}(\mu,\sigma^{2})= # \begin{bmatrix} # x_{t}-\mu\\ # (x_{t}-\mu)^{2}-\sigma^{2} # \end{bmatrix}. # $ # # The parameter values ($\mu,\sigma^2$) that make these moment conditions hold must be the same as from the traditional method. In general, we have to solve the moment conditions for the GMM estimates. Although, this is simple in this linear case, it may be trickier in non-linear models. Therefore, the code below solves for the parameters by a numerical method (and also double checks that they indeed are the same as before). # # The distribution of the estimates is # # $ # \sqrt{T}(\hat{\mu}-\mu_{0})\overset{d}{\rightarrow}N(0,V) # \: \text{ where } \: # V = (D_{0}^{\prime}S_{0}^{-1}D_{0}) ^{-1} # $ # # Clearly, $D_{0}=-\textrm{I}$ and if data is iid then $S_{0}=\text{Var}(g_{t})$. """ Gmm2MomFn(par,x) Calculate traditional 2 moment conditions for estimating [μ,σ²] # Input - `par::Vector`: [μ,σ²] - `x::Vector`: T-vector with data # Output - `g::Matrix`: Tx2, moment conditions - `gbar::Vector`: 2-vector, means of each column in g """ function Gmm2MomFn(par,x) (μ,σ²) = (par[1],par[2]) g = [(x .- μ) ((x .- μ).^2 .- σ²)] #Tx2 gbar = vec(mean(g,dims=1)) #2-element vector return g,gbar end # + Sol = nlsolve(p->Gmm2MomFn(p,x)[2],par_a) #numerically solve for the estimates par_1 = Sol.zero printblue("GMM estimates:") printmat(par_1,rowNames=parNames) (g,gbar) = Gmm2MomFn(par_1,x) #Tx2, moment conditions printblue("Checking if mean of moment conditions = 0") printmat(gbar,rowNames=["g₁","g₂"]) # + D = -I(2) #Jacobian, does not really matter here S = CovNWFn(g,1)/T #Newey-West with 1 lag V1 = inv(D'inv(S)*D) printblue("GMM estimates:\n") xx = [par_1 sqrt.(diag(V1/T))] printmat(xx;colNames,rowNames=parNames) printstyled("Compare with the traditional estimates",color=:red,bold=true) # - # # GMM II # # This section expands the GMM calculations by doing an overidentified case: more moment conditions than parameters. # # Warning: some of the variables (```g,S,etc```) are overwritten with new values. # ## The Moment Conditions # # If $x_{t}$ is $N(\mu,\sigma^{2})$, then the following moment conditions should # all be zero (in expectation) # # $ # g_{t}(\mu,\sigma^{2})= # \begin{bmatrix} # x_{t}-\mu\\ # (x_{t}-\mu)^{2}-\sigma^{2}\\ # (x_{t}-\mu)^{3}\\ # (x_{t}-\mu)^{4}-3\sigma^{4} # \end{bmatrix}. # $ # # The first moment condition defines the mean $\mu$, the second defines the # variance $\sigma^{2}$, while the third and forth are the skewness and excess # kurtosis respectively. # + """ Gmm4MomFn(par,x) Calculate 4 moment conditions for estimating [μ,σ²] # Input - `par::Vector`: [μ,σ²] - `x::Vector`: T-vector with data """ function Gmm4MomFn(par,x) (μ,σ²) = (par[1],par[2]) g = [(x .- μ) ((x .- μ).^2 .- σ²) ((x .- μ).^3) ((x .- μ).^4 .- 3*σ²^2)] #Tx4 gbar = vec(mean(g,dims=1)) #4-element vector return g,gbar end """ DGmm4MomFn(par,x) Calculate (analytical) Jacobian of Gmm4MomFn(), 4x2 """ function DGmm4MomFn(par,x) (μ,σ²) = (par[1],par[2]) D = [-1 0 ; #Jacobian of Gmm4MomFn, 4x2 -2*mean(x.-μ) -1 ; -3*mean((x.-μ).^2) 0 ; -4*mean((x.-μ).^3) -6*σ²] return D end # - # ## GMM: Minimizing gbar'W*gbar # # # The following code applies a numerical method to solve a minimization problem with the weighting matrix # # $ # W= # \begin{bmatrix} # 1 & 0 & 0 & 0\\ # 0 & 1 & 0 & 0\\ # 0 & 0 & 0 & 0\\ # 0 & 0 & 0 & 0 # \end{bmatrix} # $ # # The results should be the same (or at least very close to) the previous results, since the $W$ matrix puts all weight on the first two moments (basically mimicking the estimations above). Changing $W$, for instance, by setting $W[3,3]=0.0001$ will give other estimates. # # We define the loss function as # # $ # \bar{g}'W\bar{g} # $ # # As a practical matter, it is often the case that a derivative-free method works better than other optimization routines. """ Gmm4MomLossFn(par,x,W=1) Calculate loss function from moment conditions in Gmm4MomFn() and a weighting matrix W. # Input -`par`: see Gmm4MomFn -`x`: see Gmm4MomFn -`W::Number or Matrix`: weighting matrix (or just any positive number) # Output - `Loss:: Number`: Loss function value """ function Gmm4MomLossFn(par,x,W=1) (g,gbar) = Gmm4MomFn(par,x) Loss = 1.0 + gbar'W*gbar #to be minimized return Loss end # + W = diagm(0=>[1.0,1.0,0.0,0.0]) #weighting matrix, try changing it #W[3,3] = 0.0001 Sol = optimize(par->Gmm4MomLossFn(par,x,W),par_a) par_2 = Optim.minimizer(Sol) printblue("GMM estimates from minimizing gbar'W*gbar:") printmat(par_2,rowNames=parNames) # + momNames = ["g₁","g₂","g₃","g₄"] D = DGmm4MomFn(par_2,x) #Jacobian, 4x2 printblue("The Jacobian is:\n") printmat(D,rowNames=momNames,colNames=parNames) g, = Gmm4MomFn(par_2,x) #Tx4, moment conditions, evaluated at point estimates S = CovNWFn(g,1)/T #variance of sqrt(T)"gbar, NW with 1 lag V2 = inv(D'W*D)*D'W*S*W'D*inv(D'W*D) #see lecture notes for V2 printblue("Weighting matrix:\n") printmat(W,colNames=momNames,rowNames=momNames) printblue("GMM estimates (gbar'W*gbar):\n") xx = [par_2 sqrt.(diag(V2/T))] printmat(xx;colNames,rowNames=parNames) # - # ## GMM: A*g = 0 # # # The following code from estimates the parameters (mean and # variance) by combining the 4 original moment conditions in $\bar{g}$ into 2 # effective moment conditions, $A\bar{g}$, where $A$ is a $2\times4$ matrix # # $ # A= # \begin{bmatrix} # 1 & 0 & 0 & 0\\ # 0 & 1 & 0 & 0 # \end{bmatrix} # $ # # This particular $A$ matrix implies that we use the classical # estimators of the mean and variance. Changing $A$,for instance, setting $A[1,3]=0.001$ will give different estimates. # + A = [1 0 0 0; #A in A*gbar=0 (here: all weight on first two moments) 0 1 0 0] #try setting A[1,3] = 0.001 Sol = nlsolve(p->A*Gmm4MomFn(p,x)[2],par_a) #solve for the GMM estimates par_3 = Sol.zero printblue("GMM estimates from A*gbar=0:") printmat(par_3,rowNames=parNames) (g,gbar) = Gmm4MomFn(par_3,x) #Tx4, moment conditions. Warning: overwriting old g q = size(g,2) printblue("\nChecking if mean of A*g_t = 0") printmat(A*gbar,rowNames=["A₁g","A₂g"]) # + D = DGmm4MomFn(par_3,x) #Jacobian printblue("The Jacobian is:") printmat(D,colNames=parNames,rowNames=momNames) S = CovNWFn(g,1)/T V3 = inv(A*D)*A*S*A'inv(A*D)' #see lecture notes for V3 printblue("GMM estimates (A*gbar):\n") xx = [par_3 sqrt.(diag(V3/T))] printmat(xx;colNames,rowNames=parNames) printstyled("Compare with the exactly identified GMM (above)",color=:red,bold=true) # - # # GMM: Minimizing gbar'W*gbar, Iterating over W # # # The following code iterates over the weighting matrix by using $W=S^{-1}$, where # # $S = \text{Cov}(\sqrt{T}\bar{g})$ # # is from the previous iteration. # + println("\niterated GMM, using optimal weighting matrix, starting with S from previous estimation") (par_4,par0) = (copy(par_1),copy(par_1)) (Δpar,i) = (Inf,1) println("\n\niterating over W starting with the W choice above") while (Δpar > 1e-3) || (i < 2) #require at least one iteration #global Δpar, par_4, par0, i, W, S #only needed in script local Sol, g println("-------iteration $i, old and new parameters--------") W = inv(S) Sol = optimize(par->Gmm4MomLossFn(par,x,W),par0) par_4 = Optim.minimizer(Sol) printlnPs(par0') printlnPs(par_4') g, = Gmm4MomFn(par_4,x) S = CovNWFn(g,1)/T Δpar = maximum(abs,par_4-par0) #same as maximum(abs.(par_4-par0)) par0 = copy(par_4) #par0=par_4 would make them always identical i = i + 1 end V2 = inv(D'W*D)*D'W*S*W'D*inv(D'W*D) #if non-optimal weighting matrix V1 = inv(D'inv(S)*D) #with optimal weighting matrix printblue("\nGMM estimates (gbar'W*gbar, iteration over W):") xx = [par_4 sqrt.(diag(V2/T)) sqrt.(diag(V1/T))] printmat(xx,colNames=[colNames;"std ver. 2"],rowNames=parNames,width=12) # + printblue("W matrix used in the last iteration, (times 10000):\n") printmat(W*10000,colNames=momNames,rowNames=momNames) # -
Ch25_GMM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ocandata # language: python # name: ocandata # --- # + # default_exp datatools # - # # datatools # > API details. #hide from nbdev.showdoc import * # + # export import requests, zipfile, io import os import hashlib def hash(data: str): return hashlib.sha1(data.encode()).hexdigest() def unzip_data(zip_url: str, path="."): response = requests.get(zip_url) zip_file = zipfile.ZipFile(io.BytesIO(response.content)) zip_file.extractall(path=path) return tuple([os.path.join(path, f) for f in zip_file.namelist()]) def get_filename_from_url(path: str): """ Get filename from path """ return path.split("/")[-1] def download_file(url: str, path="."): response = requests.get(url) filename = get_filename_from_url(url) if path: filename = os.path.join(path, filename) with open(filename, "wb") as fd: for chunk in response.iter_content(chunk_size=128): fd.write(chunk) return filename # - from nbdev.export import * notebook2script()
05_datatools.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 101 Pandas Exercises for Data Analysis # ## Index # #### 61. How to know the maximum possible correlation value of each column against other columns? # #### 62. How to create a column containing the minimum by maximum of each row? # #### 63. How to create a column that contains the penultimate value in each row? # #### 64. How to normalize all columns in a dataframe? # #### 65. How to compute the correlation of each row with the suceeding row? # #### 66. How to replace both the diagonals of dataframe with 0? # #### 67. How to get the particular group of a groupby dataframe by key? # #### 68. How to get the n’th largest value of a column when grouped by another column? # #### 69. How to compute grouped mean on pandas dataframe and keep the grouped column as another column (not index)? # #### 70. How to join two dataframes by 2 columns so they have only the common rows? # # ## 61. How to know the maximum possible correlation value of each column against other columns? import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline df = pd.DataFrame(np.random.randint(1,100, 80).reshape(8, -1), columns=list('pqrstuvwxy'), index=list('abcdefgh')) df plt.figure(figsize = (15,10)) sns.heatmap(df.corr(), annot=True) plt.show() # Solution abs_corrmat = np.abs(df.corr()) abs_corrmat max_corr = abs_corrmat.apply(lambda x: sorted(x)[-2]) max_corr print('Maximum Correlation possible for each column: ', np.round(max_corr.tolist(), 2)) df = pd.read_csv('Cars93_miss.csv') df.head() plt.figure(figsize = (15,10)) sns.heatmap(df.corr(), annot=True) plt.show() abs_corrmat2 = np.abs(df.corr()) abs_corrmat2 abs_corrmat2.iloc[:,1] sorted(abs_corrmat2.iloc[:,1]) sorted(abs_corrmat2.iloc[:,1])[-2] max_corr2 = abs_corrmat2.apply(lambda x: sorted(x)[-2]) max_corr2 print('Maximum Correlation possible for each column: ', np.round(max_corr2.tolist(), 2)) # ## 62. How to create a column containing the minimum by maximum of each row? df = pd.DataFrame(np.random.randint(1, 100, 80).reshape(8, -1), columns = list('pqrstuvwxy'), index = list('abcdefgh')) df # Solution 1 min_by_max = df.apply(lambda x: np.min(x)/np.max(x), axis=1) df # Solution 2 min_by_max = np.min(df, axis=1)/np.max(df, axis=1) df_min = df.loc[:,:].min(axis = 1) df_min df_max = df.loc[:,:].max(axis = 1) df_max df['MinbyMax'] = np.min(df, axis=1)/np.max(df, axis=1) df df['Min_Max'] = df_min/df_max df # ## 63. How to create a column that contains the penultimate value in each row? df = pd.DataFrame(np.random.randint(10, 40, 60).reshape(-1, 4)) df ultimate = np.max(df, axis = 1) ultimate df['Ultimate'] = ultimate df # Solution out = df.apply(lambda x: x.sort_values().unique()[-2], axis=1) df['Penultimate'] = out print(df) # ## 64. How to normalize all columns in a dataframe? # #### Normalize all columns of df by subtracting the column mean and divide by standard deviation. # #### Range all columns of df such that the minimum value in each column is 0 and max is 1. df = pd.read_csv('Cars93_miss.csv') new_df = df.copy() new_df.info() new_df.columns num_df = new_df[['Min.Price', 'Price', 'Max.Price', 'MPG.city', 'MPG.highway', 'EngineSize', 'Horsepower', 'RPM', 'Rev.per.mile', 'Fuel.tank.capacity', 'Passengers', 'Length', 'Wheelbase', 'Width', 'Turn.circle', 'Rear.seat.room', 'Luggage.room', 'Weight']] num_df.head() num_df.mean() num_df.std() NEW_df = (num_df - num_df.mean())/ num_df.std() NEW_df.head() NEW_df.shape NEW_df.describe().T NEW = NEW_df[(NEW_df >= 0) & (NEW_df <= 1)] NEW.head(10) NEW2 = (NEW_df.max()-NEW_df)/(NEW_df.max()-NEW_df.min()) NEW2.head() NEW2.describe().T # Solution 2 df = pd.DataFrame(np.random.randint(1,100, 80).reshape(8, -1)) df # Solution Q1 out1 = df.apply(lambda x: ((x - x.mean())/x.std()).round(2)) print('Solution Q1\n',out1) out1.describe().T # Solution Q2 out2 = df.apply(lambda x: ((x.max() - x)/(x.max() - x.min())).round(2)) print('Solution Q2\n', out2) out2.describe().T # ## 65. How to compute the correlation of each row with the suceeding row? df = pd.DataFrame(np.random.randint(1,100, 80).reshape(8, -1)) df # Solution [df.iloc[i].corr(df.iloc[i+1]).round(2) for i in range(df.shape[0])[:-1]] a = df.iloc[:1,:] b = df.iloc[1:2, :] c = df.iloc[2:3, :] np.corrcoef(a,b) # ## 66. How to replace both the diagonals of dataframe with 0? df = pd.DataFrame(np.random.randint(1,100, 100).reshape(10, -1)) df # Solution for i in range(df.shape[0]): df.iat[i, i] = 0 df.iat[df.shape[0]-i-1, i] = 0 df # ## 67. How to get the particular group of a groupby dataframe by key? # + active="" # This is a question related to understanding of grouped dataframe. # From df_grouped, get the group belonging to 'apple' as a dataframe. # # + df = pd.DataFrame({'col1': ['apple', 'banana', 'orange'] * 3, 'col2': np.random.rand(9), 'col3': np.random.randint(0, 15, 9)}) df_grouped = df.groupby(['col1']) df # - # Solution 1 df_grouped.get_group('apple') # Solution 2 for i, dff in df_grouped: if i == 'apple': print(dff) df = pd.read_csv('Cars93_miss.csv') df.head(100) df_model = df.groupby('Manufacturer') df_model.get_group('Audi') df_model.get_group('Volkswagen') df_model2 = df.groupby('Passengers') df_model2.get_group(7) # ## 68. How to get the n’th largest value of a column when grouped by another column? df = pd.DataFrame({'fruit': ['apple', 'banana', 'orange'] * 3, 'taste': np.random.rand(9), 'price': np.random.randint(0, 15, 9)}) df # Solution df_grpd = df['taste'].groupby(df.fruit) df_grpd df_grpd.get_group('banana') df_grpd.get_group('banana').sort_values() df_grpd.get_group('banana').sort_values().iloc[-2] # ## 69. How to compute grouped mean on pandas dataframe and keep the grouped column as another column (not index)? # + #In df, Compute the mean price of every fruit, while keeping the fruit as another column instead of an index. # - df = pd.DataFrame({'fruit': ['apple', 'banana', 'orange'] * 3, 'rating': np.random.rand(9), 'price': np.random.randint(0, 15, 9)}) df df.groupby('fruit').mean() # Solution out = df.groupby('fruit', as_index=False)['price'].mean() print(out) # ## 70. How to join two dataframes by 2 columns so they have only the common rows? df1 = pd.DataFrame(np.random.randint(1,100, 40).reshape(10, -1), columns=list('pqrs'), index=list('abcdefghij')) df1 df2 = pd.DataFrame(np.random.randint(100,200, 40).reshape(10, -1), columns=list('uvwx'), index=list('abcdefghij')) df2 df1.join(df2,how = 'left') # + # Input df1 = pd.DataFrame({'fruit': ['apple', 'banana', 'orange'] * 3, 'weight': ['high', 'medium', 'low'] * 3, 'price': np.random.randint(0, 15, 9)}) df1 # - df2 = pd.DataFrame({'pazham': ['apple', 'orange', 'pine'] * 2, 'kilo': ['high', 'low'] * 3, 'price': np.random.randint(0, 15, 6)}) df2 # Solution pd.merge(df1, df2, how='inner', left_on=['fruit', 'weight'], right_on=['pazham', 'kilo'], suffixes=['_left', '_right'])
1_Pandas_Tips/Pandas_Exercises_for_Data_Analysis_7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # teradataml preparing the features and training the model. # ## Connect to Vantage #import the teradataml package for Vantage access from teradataml import * import getpass from teradataml import display #display.print_sqlmr_query=True from sqlalchemy.sql.expression import select, case as case_when, func from sqlalchemy import TypeDecorator, Integer, String import warnings warnings.filterwarnings('ignore') Vantage = 'tdap1627t2.labs.teradata.com' User = 'alice' Pass = '<PASSWORD>' Vantage = '10.25.251.121' User = 'USER10' Pass = '<PASSWORD>' # + #Pass = getpass.getpass(prompt="pwd: ") # - print(Vantage,User) con = create_context(Vantage, User, Pass) # ## Prepare the explain text feature # Data Set Selection dbqlog = DataFrame.from_table(in_schema("dbc", "dbqlogtbl")).drop("ZoneId", axis = 1) dbqlexplain = DataFrame.from_table(in_schema("dbc", "dbqlexplaintbl")).drop("ZoneID", axis = 1) dbqldata = dbqlog.join(other = dbqlexplain, on = ["QueryID"], lsuffix = "t1", rsuffix = "t2") \ .select(['t1_QueryID','ExplainText','QueryBand','QueryText']) # Workaround until ELE-2072. dbqldata.to_sql('prediction_sentiment', if_exists="replace") dbqldata = DataFrame.from_table('prediction_sentiment') # ## Setup training and join condition features # Feature Extraction df_select_query_column_projection = [ dbqldata.t1_QueryID.expression.label("queryid"), dbqldata.ExplainText.expression.label("explaintext"), dbqldata.QueryBand.expression.label("queryband"), func.REGEXP_SUBSTR(dbqldata.QueryBand.expression, '(collected_statistics|no_statistics)', 1, 1, 'i').label("training"), func.REGEXP_SUBSTR(dbqldata.QueryText.expression, 'SELECT', 1, 1, 'i').label("select_info"), func.REGEXP_SUBSTR(func.REGEXP_SUBSTR(dbqldata.ExplainText.expression, '(joined using a *[A-z \-]+ join,)', 1, 1, 'i'), '[A-z]+', 15, 1, 'i').label("join_condition")] training_data = DataFrame.from_query(str(select(df_select_query_column_projection) .where(Column('join_condition') != None) .where(Column('training') != None) .compile(compile_kwargs={"literal_binds": True}))) # ## Filter to provide the training set and testing set # Establish training data training_data = DataFrame.from_query(str(select(df_select_query_column_projection) .compile(compile_kwargs={"literal_binds": True}))) data_filter = (training_data.join_condition != None) & (training_data.training != None) \ & (training_data.select_info != None) data_set = training_data[data_filter] # Split data set into training and testing sets training_set = Sampling(data = data_set, sample_fraction = 0.5, seed = 2).result testing_set = data_set.join(other = training_data, on = ["queryid<>queryid"], lsuffix = "t1", rsuffix = "t2") training_set[training_set.training == 'collected_statistics'].count() # + #testing = tsample.result.join(other = training_data, on = ["queryid<>queryid"], lsuffix = "t1", rsuffix = "t2") # + #training_set = tsample.result # - # Custom sentiment dictionary dictionary = DataFrame.from_table('dbql_sentiment') # Extract Confidience Features = SentimentExtractor( #dict_data = dictionary, newdata = training_set, level = "document", text_column = "explaintext", accumulate = ['queryid','join_condition','training'] ).result # + #help(SentimentExtractor) # - Features.count() # ## These are the features for training the model # + #features = td_sentiment_extractor_out.result # - # ## Training and saving the model # Train model stats_model = NaiveBayes( formula="training ~ out_polarity + join_condition", data=Features) stats_model td_save_model(model = stats_model, name = "Stats_collection_model_final") stats_model.result.to_sql("stats_model_final", if_exists="replace") target_collection = NaiveBayesPredict(newdata=Features, modeldata = stats_model, formula="training ~ out_polarity + join_condition", id_col = "queryid", responses = ["collected_statistics","no_statistics"] ) summary = target_collection.result.join(other = Features, on = ["queryid"], lsuffix = "t1", rsuffix = "t2") ConfusionMatrix(data = summary, prediction = 'prediction', reference = 'training').accuracytable ConfusionMatrix(data = summary, prediction = 'prediction', reference = 'join_condition').accuracytable accuracy.accuracytable help(NaiveBayes) # ***
MoveModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:lithosphere] # language: python # name: conda-env-lithosphere-py # --- # # Bending of the lithosphere during subduction # # **Goal:** Implement the solution to the 1D flexure equation that describes the bending of the subducting oceanic lithosphere and compare it's predictions against observations of gravity disturbances and bathymetry. # # Using the 1D flexure equation, we can model the bending of the oceanic lithosphere at a subduction zone. **Our assumption is that the plate is perfectly elastic and is bending under a load placed at one of its ends.** The solution for the amount of deflection (upwards or downwards) $w$ can be expressed as: # # $$ # w(x) = w_b\ e^{\pi/4} \sqrt{2} # \exp\left( -\dfrac{\pi}{4}\left(\dfrac{x - x_0}{x_b - x_0}\right) \right) # \sin\left(\dfrac{\pi}{4}\left(\dfrac{x - x_0}{x_b - x_0}\right) \right) # $$ # # in which $w_b$ is the deflection at the peak of the forebulge, $x_b$ is the position of the peak of the forebulge, and $x_0$ is the position in which the deflection crosses the zero line. The figure below can better illustrate these parameters. # # <img style="width: 500px; margin: 10px auto;" src="https://github.com/leouieda/lithosphere/raw/77a1da95fe7fcd65528bd8e30bbdcea949f5c3c7/lectures/assets/flexure-setup.svg"> # <p style="text-align: left; width: 500px; max-width: 100%; margin: 30px auto;"> # Figure caption: Sketch of the elastic plate flexure for an end load (placed on the left) and the geometric parameters used to derive the solution. # </p> # # We can also predict the observed gravity disturbances due to the density anomalies caused by the downward deflection of the oceanic crust. The figure below illustrate a conceptual model of the density anomalies causing the observed data. # # <img style="width: 500px; margin: 10px auto;" src="https://github.com/leouieda/lithosphere/raw/77a1da95fe7fcd65528bd8e30bbdcea949f5c3c7/lectures/assets/disturbance-subduction-model.svg"> # <p style="text-align: left; width: 500px; max-width: 100%; margin: 30px auto;"> # Figure caption: Model of the density anomalies causing the observed gravity disturbance due to subduction. # </p> # # The gravitational effect of these mass anomalies can be estimated using a **Bouguer plate approximation** ($g \approx 2 \pi G \Delta\rho w$) if we know the deflection $w$ and the density of water and mantle (in practice, the density of the crust cancels out). # # **In this notebook**, we'll write functions to: # # * Calculate the deflection given the geometric parameters $x_0$, $x_b$, and $w_b$ # * Estimate the gravity disturbance caused by the deflection # * Extract a profile from our global grid datasets along a great-circle path # # We'll also analyse the fit of the model to observations in different profiles to see its limitations. # ## The data # # **Download** (if you haven't already): [global-geophysical-data.nc](https://github.com/leouieda/lithosphere/raw/main/data/global-geophysical-data.nc) (place it in the **same folder** as this notebook) # # The data grids are stored in a [netCDF](https://en.wikipedia.org/wiki/NetCDF) file, which is a very common standard for storing and distributing gridded data. It's used throughout the Earth, ocean, and climate sciences and can be read an manipulated with most software platforms. It contains global grids of gravity, topography, and heat flow. All grids were interpolated onto the same resolution of 0.2 degrees (a compromise between resolution and size of the file) so that we can easily compare and use all of the data together. # # * **Gravity**: generated from the [EIGEN-6C4 spherical harmonic model](https://doi.org/10.5880/icgem.2015.1) through the [ICGEM webservice](http://icgem.gfz-potsdam.de/home). It includes two grids: the gravity disturbance calculated with the WGS84 ellipsoid and the Bouguer gravity disturbance calculated using a topography density of 2670 kg/m³. The data for both grids was generated on top of the Earth's surface (the water surface in the oceans and topography on land). # * **Topography and bathymetry**: a downsampled version of [ETOPO1](https://doi.org/10.7289/V5C8276M) and includes a version smoothed using a Gaussian filter with 1 degree width (~100 km). # * **Heat flow**: an upsampled version of the compilation by [Lucazeau (2019)](https://doi.org/10.1029/2019GC008389) (originally 0.5 degree resolution). # * **Lithosphere age (oceanic)**: a downsampled version of the grid by [Müller et al. (2008)](https://doi.org/10.1029/2007GC001743) (originally 6 arc-minutes). # ## Import the required libraries # # Load the required software to load the data, make maps, and perform calculations. # For arrays and linear algebra import numpy as np # To make figures and maps import matplotlib.pyplot as plt # To load and manipulate grids import xarray as xr # ## Load the data grids # # Once again, we'll use xarray to load the data from the netCDF file. data = xr.load_dataset("global-geophysical-data.nc") data data.topography.plot(figsize=(16, 8)) # ## Extracting a profile # # The first thing we'll do is extract a profile from our grid. The profile will follow a [great circle](https://en.wikipedia.org/wiki/Great_circle) path (we'll assume that the Earth is a sphere). The trick here is producing the latitude and longitude coordinates of point that are evenly spaced along the profile. This is the problem of finding [way points along a great circle](https://en.wikipedia.org/wiki/Great-circle_navigation#Finding_way-points), which is not trivial but has a well established solution. # # The function below employs this solution to generate longitude and latitude coordinates for points along a great circle path between start and end points. The links provided in the code explain where the equations come from and why we return `xarray.DataArray`s instead of just the longitude and latitude numpy arrays. def profile_points(start, end, npoints): """ Generate the evenly spaced points between start and end. Both start and end should be (longitude, latitude) pairs. The profile points follow a great circle. Returns the longitude and latitude coordinates of each point in a format that can be used with xarray. """ lon1, lat1 = np.radians(start) lon2, lat2 = np.radians(end) # These are needed to calculate the lon/lat coordinates of the profile. # See https://en.wikipedia.org/wiki/Great-circle_navigation#Finding_way-points azimuth1 = np.arctan2( np.cos(lat2) * np.sin(lon2 - lon1), np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lon2 - lon1) ) azimuth_equator = np.arctan2( np.sin(azimuth1) * np.cos(lat1), np.sqrt(np.cos(azimuth1) ** 2 + np.sin(azimuth1) ** 2 * np.sin(lat1) ** 2) ) great_circle_equator = np.arctan2(np.tan(lat1), np.cos(azimuth1)) lon_equator = lon1 - np.arctan2( np.sin(azimuth_equator) * np.sin(great_circle_equator), np.cos(great_circle_equator) ) # The great-circle distance between start and end (in radians) # This is the haversine formula: https://en.wikipedia.org/wiki/Haversine_formula great_circle_distance = 2 * np.arcsin( np.sqrt( np.sin((lat2 - lat1) / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin((lon2 - lon1) / 2) ** 2 ) ) # Generate evenly spaced points along the great circle distances = np.linspace(start=0, stop=great_circle_distance, num=npoints) # Make the distances relative to where the great circle cross the equator # This is needed for the calculations below. distances_equator = distances + great_circle_equator # Calculate the lon/lat coordinates of each point given their arc distance # and the azimuth of the great circle latitudes = np.arctan2( np.cos(azimuth_equator) * np.sin(distances_equator), np.sqrt(np.cos(distances_equator) ** 2 + (np.sin(azimuth_equator) * np.sin(distances_equator)) ** 2) ) longitudes = lon_equator + np.arctan2( np.sin(azimuth_equator) * np.sin(distances_equator), np.cos(distances_equator) ) # Convert the arc-distances into kilometers mean_earth_radius = 6_371 distances *= mean_earth_radius # xarray stuff so we can interpolate values on these coordinates # see: http://xarray.pydata.org/en/stable/interpolation.html#advanced-interpolation longitude = xr.DataArray(np.degrees(longitudes), coords={"distance": distances}, dims="distance") latitude = xr.DataArray(np.degrees(latitudes), coords={"distance": distances}, dims="distance") return longitude, latitude # We can now use this function to generate point for a profile cutting across the subduction zone close to Japan. longitudes, latitudes = profile_points(start=(148, 50), end=(155, 35), npoints=300) # Plot the topography grid and the profile points on a map so we can see where it's located. # Slice the grid to make it easier to visualize japan = data.sel(longitude=slice(130, 160), latitude=slice(25, 55)) japan.topography.plot(figsize=(8, 8)) plt.plot(longitudes, latitudes, "-k", linewidth=3) plt.grid() # The profile goes from the shallow [Sea of Okhotsk](https://en.wikipedia.org/wiki/Sea_of_Okhotsk) between Russia and Japan, across the suduction zone, and into the Pacific. Notice that the sudction zone is fairly straight here and the profile is perpendicular to it. **These are good features since our model assumes that there are no 2D variations in the subduction geometry.** # # Once we know the longitude and latitude coordinates of the points along the profile, we can interpolate the grid values at these locations. profile = data.interp(longitude=longitudes, latitude=latitudes) profile # The `profile` includes all of our grid data interpolated onto the profile. It also includes the distance from the starting point along the profile, which makes it easier to make our plots and calculations. # # Let's plot the topography along the profile to see what we have. plt.figure(figsize=(16, 5)) plt.plot(profile.distance, profile.topography) plt.grid() plt.xlabel("distance (km)") plt.ylabel("bathymetry (m)") plt.xlim(profile.distance.min(), profile.distance.max()) # Our model only includes the subducting plate so we have to crop our profile at the trench. To do so, let's find the distance coordinate for the trench, assuming that it is the deepest point in our profile. index_trench = profile.topography.argmin() distance_trench = profile.distance[index_trench].values print(distance_trench) # We can now select only points in our profile that are at greater distances than the trench. # In slice, None stands for "all the way to the end" # Like doing array[10:] profile_subducting = profile.sel(distance=slice(distance_trench, None)) plt.figure(figsize=(16, 5)) plt.plot(profile_subducting.distance, profile_subducting.topography) plt.grid() plt.xlabel("distance (km)") plt.ylabel("bathymetry (m)") plt.xlim(profile_subducting.distance.min(), profile_subducting.distance.max()) # This figure now looks like our sketch at the start of the notebook. You can clearly spot: # # 1. The place where the deflection is zero and the plate tends to a particular depth (on the right) # 2. The forebulge to the right of the trench # # Now we need to implement our flexure model and see if we can match these observations. # ## Calculating the flexure # # Let's write a function that calculates the flexure equation: # # $$ # w(x) = w_b\ e^{\pi/4} \sqrt{2} # \exp\left( -\dfrac{\pi}{4}\left(\dfrac{x - x_0}{x_b - x_0}\right) \right) # \sin\left(\dfrac{\pi}{4}\left(\dfrac{x - x_0}{x_b - x_0}\right) \right) # $$ # # It's inputs will be (all in kilometers): # # * The distance along the profile ($x$) # * The distance at which the deflection cross the zero line ($x_0$) # * The distance at which the forebulge peaks ($x_b$) # * The highest amplitude of the forebulge ($w_b$) # # The function will return the predicted deflection $w$ in meters. def flexure_end_load(x, zero_crossing, forebulge_peak, forebulge_amplitude): """ Calculate the flexure of a plate under an end load. Inputs should be in km. The output will be in meters. """ aux = (np.pi / 4) * (x - zero_crossing) / (forebulge_peak - zero_crossing) deflection = ( forebulge_amplitude * np.exp(np.pi / 4) * np.sqrt(2) * np.exp(-aux) * np.sin(aux) ) # Convert to meters return 1000 * deflection # Let's calculate the deflection based on input values derived visually from the profile. deflection = flexure_end_load( x=profile_subducting.distance, zero_crossing=730, forebulge_peak=800, forebulge_amplitude=0.7, ) plt.figure(figsize=(16, 5)) plt.plot(profile_subducting.distance, deflection) plt.grid() plt.xlabel("distance (km)") plt.ylabel("bathymetry (m)") plt.xlim(profile_subducting.distance.min(), profile_subducting.distance.max()) # Note that this is **not the bathymetry** but the amount of deflection suffered by the bathymetry. To actually arrive at bathymetry, we have to add the depth of the non-deflected bathymetry (the mean value at the right of the profile). predicted_bathymetry = -5700 + deflection plt.figure(figsize=(16, 5)) plt.plot(profile_subducting.distance, profile_subducting.topography) plt.plot(profile_subducting.distance, predicted_bathymetry) plt.grid() plt.xlabel("distance (km)") plt.ylabel("bathymetry (m)") plt.xlim(profile_subducting.distance.min(), profile_subducting.distance.max()) # With the values used in the model, we can actually fit the bathymetry data pretty well. The 3 geometric parameters can also tell us about the flexural rigidity and effective elastic thickness of the lithosphere. Both are important physical parameters for modelling the behaviour of plates (see the Geodynamics book for the equations). # ## Predicting gravity # # With the deflection that we just calculated, we can predict the gravity disturbance caused by it using the Bouguer plate approximation $g \approx 2 \pi G \Delta\rho h$. The thickness of the Bouguer plate $h$ is the absolute value of the deflection $w$ and the density contrast will be determined by the figure below. # # <img style="width: 500px; margin: 10px auto;" src="https://github.com/leouieda/lithosphere/raw/77a1da95fe7fcd65528bd8e30bbdcea949f5c3c7/lectures/assets/disturbance-subduction-model.svg"> # <p style="text-align: left; width: 500px; max-width: 100%; margin: 30px auto;"> # Figure caption: Model of the density anomalies causing the observed gravity disturbance due to subduction. # </p> # # The predicted gravity disturbance will be the sum of the effect of the top and bottom density anomalies. If we add the two Bouguer plate values, we can arrive at: # # * $\Delta\rho = \rho_m - \rho_w$ for $w > 0$ # * $\Delta\rho = \rho_w - \rho_m$ for $w < 0$ # # So in the end, the density of the crust doesn't come into the calculations. # # # Let's make a function that predicts gravity disturbances given the deflection and the densities of water and mantle. The value returned will be in mGal and all inputs in SI (meters and kg/m³). def gravity_flexure(deflection, density_water, density_mantle): """ Predict the gravity disturbance due the flexure of the lithosphere from subducction. """ G = 6.67430e-11 density_contrast = xr.where( deflection >= 0, density_mantle - density_water, density_water - density_mantle, ) # 10^5 converts from m/s² to mGal disturbance = 1e5 * 2 * np.pi * G * density_contrast * np.abs(deflection) return disturbance # Now predict the gravity disturbance assuming $\rho_w = 1000\ kg/m^3$ and $\rho_m = 3300\ kg/m^3$. predicted_gravity = gravity_flexure(deflection, density_water=1000, density_mantle=3300) plt.figure(figsize=(16, 5)) plt.plot(profile_subducting.distance, profile_subducting.gravity_disturbance) plt.plot(profile_subducting.distance, predicted_gravity) plt.grid() plt.xlabel("distance (km)") plt.ylabel("gravity disturbance (mGal)") plt.xlim(profile_subducting.distance.min(), profile_subducting.distance.max()) # The fit is also very good, which indicates that our simple model with an oceanic crust of uniform density and thickness is a reasonable approximation for reality. # --- # # ## **Your turn!** # # In groups: # # 1. Select one or more profiles at different locations: # * This could be a different part of the Japan subction or a different subduction zone entirely. # * Aim for places that may not be as uniform as the profile we used, which has a pretty straight subduction geometry. # * It helps if you determine a region first, then extract a subsection of the grid and plot it to pick the coordinates for the start and end of the profile. # 1. Try to fit both bathymetry and gravity using the functions we generated above. # * Notice that the values of $x_0$ and $x_b$ change the wavelength of the forebulge while $w_b$ only changes the amplitude. # 1. Discuss: # * **Why** the model is/isn't able to fit the data # * What that can tell us about the subduction zone # 1. Share your plots and main discussion points with the class.
practicals/practical3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> # </center> # # # Write and Save Files in Python # # Estimated time needed: **25** minutes # # ## Objectives # # After completing this lab you will be able to: # # - Write to files using Python libraries # # <h2>Table of Contents</h2> # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <ul> # <li><a href="write">Writing Files</a></li> # <li><a href="Append">Appending Files</a></li> # <li><a href="add">Additional File modes</a></li> # <li><a href="copy">Copy a File</a></li> # </ul> # # </div> # # <hr> # # <h2 id="write">Writing Files</h2> # # We can open a file object using the method <code>write()</code> to save the text file to a list. To write the mode, argument must be set to write <b>w</b>. Let’s write a file <b>Example2.txt</b> with the line: <b>“This is line A”</b> # # Write line to file exmp2 = '/resources/data/Example2.txt' with open(exmp2, 'w') as writefile: writefile.write("This is line A") # We can read the file to see if it worked: # # + # Read file with open(exmp2, 'r') as testwritefile: print(testwritefile.read()) # - # We can write multiple lines: # # + # Write lines to file with open(exmp2, 'w') as writefile: writefile.write("This is line A\n") writefile.write("This is line B\n") # - # The method <code>.write()</code> works similar to the method <code>.readline()</code>, except instead of reading a new line it writes a new line. The process is illustrated in the figure , the different colour coding of the grid represents a new line added to the file after each method call. # # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%204/images/WriteLine.png" width="500" /> # # You can check the file to see if your results are correct # # + # Check whether write to file with open(exmp2, 'r') as testwritefile: print(testwritefile.read()) # - # We write a list to a <b>.txt</b> file as follows: # # + # Sample list of text Lines = ["This is line A\n", "This is line B\n", "This is line C\n"] Lines # + # Write the strings in the list to text file with open('Example2.txt', 'w') as writefile: for line in Lines: print(line) writefile.write(line) # - # We can verify the file is written by reading it and printing out the values: # # + # Verify if writing to file is successfully executed with open('Example2.txt', 'r') as testwritefile: print(testwritefile.read()) # - # However, note that setting the mode to **w** overwrites all the existing data in the file. # with open('Example2.txt', 'w') as writefile: writefile.write("Overwrite\n") with open('Example2.txt', 'r') as testwritefile: print(testwritefile.read()) # <hr> # <h2 id="Append">Appending Files</h2> # # We can write to files without losing any of the existing data as follows by setting the mode argument to append **a**. you can append a new line as follows: # # + # Write a new line to text file with open('Example2.txt', 'a') as testwritefile: testwritefile.write("This is line C\n") testwritefile.write("This is line D\n") testwritefile.write("This is line E\n") # - # You can verify the file has changed by running the following cell: # # + # Verify if the new line is in the text file with open('Example2.txt', 'r') as testwritefile: print(testwritefile.read()) # - # <hr> # <h2 id="add">Additional modes</h2> # # It's fairly ineffecient to open the file in **a** or **w** and then reopening it in **r** to read any lines. Luckily we can access the file in the following modes: # # - **r+** : Reading and writing. Cannot truncate the file. # - **w+** : Writing and reading. Truncates the file. # - **a+** : Appending and Reading. Creates a new file, if none exists. # You dont have to dwell on the specifics of each mode for this lab. # # Let's try out the **a+** mode: # with open('Example2.txt', 'a+') as testwritefile: testwritefile.write("This is line E\n") print(testwritefile.read()) # There were no errors but <code>read() </code> also did not output anything. This is because of our location in the file. # # Most of the file methods we've looked at work in a certain location in the file. <code>.write() </code> writes at a certain location in the file. <code>.read()</code> reads at a certain location in the file and so on. You can think of this as moving your pointer around in the notepad to make changes at specific location. # # Opening the file in **w** is akin to opening the .txt file, moving your cursor to the beginning of the text file, writing new text and deleting everything that follows. # Whereas opening the file in **a** is similiar to opening the .txt file, moving your cursor to the very end and then adding the new pieces of text. <br> # It is often very useful to know where the 'cursor' is in a file and be able to control it. The following methods allow us to do precisely this - # # - <code>.tell()</code> - returns the current position in bytes # - <code>.seek(offset,from)</code> - changes the position by 'offset' bytes with respect to 'from'. From can take the value of 0,1,2 corresponding to beginning, relative to current position and end # # Now lets revisit **a+** # with open('Example2.txt', 'a+') as testwritefile: print("Initial Location: {}".format(testwritefile.tell())) data = testwritefile.read() if (not data): #empty strings return false in python print('Read nothing') else: print(testwritefile.read()) testwritefile.seek(0,0) # move 0 bytes from beginning. print("\nNew Location : {}".format(testwritefile.tell())) data = testwritefile.read() if (not data): print('Read nothing') else: print(data) print("Location after read: {}".format(testwritefile.tell()) ) # Finally, a note on the difference between **w+** and **r+**. Both of these modes allow access to read and write methods, However opening a file in **w+** overwrites it and deletes all existing data. <br> # To work with a file on existing data, use **r+** and **a+**. While using **r+**, it can be useful to add a <code>.truncate()</code> method at the end of your data. This will reduce the file to your data and delete everything that follows. <br> # In the following code block, Run the code as it is first and then run it with the <code>.truncate()</code>. # with open('Example2.txt', 'r+') as testwritefile: data = testwritefile.readlines() testwritefile.seek(0,0) #write at beginning of file testwritefile.write("Line 1" + "\n") testwritefile.write("Line 2" + "\n") testwritefile.write("Line 3" + "\n") testwritefile.write("finished\n") #Uncomment the line below #testwritefile.truncate() testwritefile.seek(0,0) print(testwritefile.read()) # <hr> # # <h2 id="copy">Copy a File</h2> # # Let's copy the file <b>Example2.txt</b> to the file <b>Example3.txt</b>: # # + # Copy file to another with open('Example2.txt','r') as readfile: with open('Example3.txt','w') as writefile: for line in readfile: writefile.write(line) # - # We can read the file to see if everything works: # # + # Verify if the copy is successfully executed with open('Example3.txt','r') as testwritefile: print(testwritefile.read()) # - # After reading files, we can also write data into files and save them in different file formats like **.txt, .csv, .xls (for excel files) etc**. You will come across these in further examples # # Now go to the directory to ensure the <b>.txt</b> file exists and contains the summary data that we wrote. # # <hr> # # <h2> Exercise </h2> # # Your local university's Raptors fan club maintains a register of its active members on a .txt document. Every month they update the file by removing the members who are not active. You have been tasked with automating this with your python skills. <br> # Given the file currentMem, Remove each member with a 'no' in their inactive coloumn. Keep track of each of the removed members and append them to the exMem file. Make sure the format of the original files in preserved. (_Hint: Do this by reading/writing whole lines and ensuring the header remains_ ) # <br> # Run the code block below prior to starting the exercise. The skeleton code has been provided for you, Edit only the cleanFiles function. # # + #Run this prior to starting the exercise from random import randint as rnd memReg = 'members.txt' exReg = 'inactive.txt' fee =('yes','no') def genFiles(current,old): with open(current,'w+') as writefile: writefile.write('Membership No Date Joined Active \n') data = "{:^13} {:<11} {:<6}\n" for rowno in range(20): date = str(rnd(2015,2020))+ '-' + str(rnd(1,12))+'-'+str(rnd(1,25)) writefile.write(data.format(rnd(10000,99999),date,fee[rnd(0,1)])) with open(old,'w+') as writefile: writefile.write('Membership No Date Joined Active \n') data = "{:^13} {:<11} {:<6}\n" for rowno in range(3): date = str(rnd(2015,2020))+ '-' + str(rnd(1,12))+'-'+str(rnd(1,25)) writefile.write(data.format(rnd(10000,99999),date,fee[1])) genFiles(memReg,exReg) # - # Start your solution below: # # + def cleanFiles(currentMem,exMem): with open(currentMem,'r+') as writeFile: with open(exMem, 'a+') as appendFile: #get the data writeFile.seek(0) members = writeFile.readlines() #remove header header = members[0] members.pop(0) inactive = [] for member in members: if 'no' in member: inactive.append(member) #go to the beginning of the write file writeFile.seek(0) writeFile.write(header) for member in members: if (member in inactive): appendFile.write(member) else: writeFile.write(member) writeFile.truncate() # Code to help you see the files # Leave as is memReg = 'members.txt' exReg = 'inactive.txt' cleanFiles(memReg,exReg) headers = "Membership No Date Joined Active \n" with open(memReg,'r') as readFile: print("Active Members: \n\n") print(readFile.read()) with open(exReg,'r') as readFile: print("Inactive Members: \n\n") print(readFile.read()) # - # Run the following to verify your code: # # + def testMsg(passed): if passed: return 'Test Passed' else : return 'Test Failed' testWrite = "testWrite.txt" testAppend = "testAppend.txt" passed = True genFiles(testWrite,testAppend) with open(testWrite,'r') as file: ogWrite = file.readlines() with open(testAppend,'r') as file: ogAppend = file.readlines() try: cleanFiles(testWrite,testAppend) except: print('Error') with open(testWrite,'r') as file: clWrite = file.readlines() with open(testAppend,'r') as file: clAppend = file.readlines() # checking if total no of rows is same, including headers if (len(ogWrite) + len(ogAppend) != len(clWrite) + len(clAppend)): print("The number of rows do not add up. Make sure your final files have the same header and format.") passed = False for line in clWrite: if 'no' in line: passed = False print("Inactive members in file") break else: if line not in ogWrite: print("Data in file does not match original file") passed = False print ("{}".format(testMsg(passed))) # - # <details><summary>Click here for the solution</summary> # # ```python # def cleanFiles(currentMem,exMem): # with open(currentMem,'r+') as writeFile: # with open(exMem,'a+') as appendFile: # #get the data # writeFile.seek(0) # members = writeFile.readlines() # #remove header # header = members[0] # members.pop(0) # # inactive = [member for member in members if ('no' in member)] # ''' # The above is the same as # # for member in active: # if 'no' in member: # inactive.append(member) # ''' # #go to the beginning of the write file # writeFile.seek(0) # writeFile.write(header) # for member in members: # if (member in inactive): # appendFile.write(member) # else: # writeFile.write(member) # writeFile.truncate() # # memReg = 'members.txt' # exReg = 'inactive.txt' # cleanFiles(memReg,exReg) # # # code to help you see the files # # headers = "Membership No Date Joined Active \n" # # with open(memReg,'r') as readFile: # print("Active Members: \n\n") # print(readFile.read()) # # with open(exReg,'r') as readFile: # print("Inactive Members: \n\n") # print(readFile.read()) # # ``` # # </details> # # <hr> # <h2>The last exercise!</h2> # <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work. # <hr> # # ## Author # # <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a> # # ### Other Contributors # # <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a"><NAME></a> # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ----------- | ----------------------------------- | # | 2020-10-16 | 1.3 | <NAME> | Added exercise | # | 2020-10-16 | 1.2 | <NAME> | Added section additional file modes | # | 2020-10-16 | 1.1 | <NAME> | Made append a different section | # | 2020-08-28 | 0.2 | Lavanya | Moved lab to course repo in GitLab | # # <hr> # # ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> #
4_Python for Data Science, AI & Development/PY0101EN-4-2-WriteFile.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Watch Me Code 1: Read and Write Message # # A simple example of reading and writing to a file. # filename = "WMC1-Message.txt" # + # Let's write to the file! message = input("Type a message and I'll write it to %s:" % filename) with open (filename,'w') as file_out: file_out.write(message) print("Done!") # + with open(filename, 'r') as file_in: message = file_in.read() print("Here's the message in %s: %s" % (filename, message)) # - # NOTE: Show this is really persistent. # # - open wmc1.txt from outside Jupyter. # - edit the message. Re-run the 2nd example. # + def put_text_in_file(text): with open("test-messages.txt", 'a', encoding='utf-8') as dog: dog.write(text + "\n") def get_text_out_file(): with open("test-messages.txt", 'r') as file: contents = [] for line in file: contents.append(line.strip()) return contents while True: user_input = input("Please enter a message (Enter to view all messages)") if user_input == '': break put_text_in_file(user_input) messages = get_text_out_file() print(", ".join(messages)) # -
content/lessons/08/Watch-Me-Code/WMC1-Read-And-Write-File.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import slideseg3 import os import sys params = slideseg3.load_parameters('Parameters.txt') print('running __main__ with parameters: {0}'.format(params)) # + if not os.path.isdir(params["slide_path"]): path, filename = os.path.split(params["slide_path"]) xpath, xml_filename = os.path.split(params["xml_path"]) params["slide_path"] = path params["xml_path"] = xpath print('loading {0}'.format(filename)) slideseg3.run(params, filename) else: for filename in os.listdir(params["slide_path"]): slideseg3.run(params, filename) # -
SlideSeg3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + jupyter={"outputs_hidden": true} from IPython.core.debugger import set_trace import numpy as np import sys import collections import re f=open('day19input.txt') #not with read because thats probably the whole file lines = [line.rstrip('\n') for line in f] # - print (lines[0:4]) # + 15400012 seti [ 0 10551343 0 2 1925000 1]seti 1 0 4 set ip to 2 15400013 mulr [ 0 10551343 1925000 3 1925000 1]mulr 5 4 2 set reg 2 to multiply reg 4 and reg 5 15400014 eqrr [ 0 10551343 0 4 1925000 1]eqrr 2 1 2 if reg 2 = reg 1 set 2 to 1 else set 2 to 0 15400015 addr [ 0 10551343 0 5 1925000 1]addr 2 3 3 add reg 2 and 3 and set to 3 ---> 0+5 no change 15400016 addi [ 0 10551343 0 7 1925000 1]addi 3 1 3 add reg 3 and value 1 save in 3 ---> ip increment 15400017 addi [ 0 10551343 0 8 1925001 1]addi 4 1 4 DONE add reg 4 and 1 save in 4 --> increment 4 1 15400018 gtrr [ 0 10551343 0 9 1925001 1]gtrr 4 1 2 reg 2 to 1 if register 4 is greater than register 1 15400019 addr [ 0 10551343 0 10 1925001 1]addr 3 2 3 reg 3 to reg 2+reg3 11244 gtrr [1, 10551343, 0, 9, 1404, 2] def addr(reg,ins): # #(add register) stores into register C the result of adding register A and register B. result = reg[ins[1]]+reg[ins[2]] reg[ins[3]]=result return reg def addi(reg,ins):# #addi (add immediate) stores into register C the result of adding register A and value B. reg[ins[3]]=reg[ins[1]]+ins[2] return reg def mulr(reg,ins):# #mulr (multiply register) stores into register C the result of multiplying register A and register B. #set_trace() reg[ins[3]]=reg[ins[1]]*reg[ins[2]] return reg def muli(reg,ins):# #muli (multiply immediate) stores into register C the result of multiplying register A and value B. reg[ins[3]]=reg[ins[1]]*ins[2] return reg def banr(reg,ins):# #banr (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B. reg[ins[3]]=reg[ins[1]]&reg[ins[2]] return reg def bani(reg,ins):# #bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B. reg[ins[3]]=reg[ins[1]]&ins[2] return reg def borr(reg,ins):# #borr (bitwise OR register) stores into register C the result of the bitwise OR of register A and register B. reg[ins[3]]=reg[ins[1]]|reg[ins[2]] return reg def bori(reg,ins):# #bori (bitwise OR immediate) stores into register C the result of the bitwise OR of register A and value B. reg[ins[3]]=reg[ins[1]]|ins[2] return reg def setr(reg,ins):# #setr (set register) copies the contents of register A into register C. (Input B is ignored.) reg[ins[3]]=reg[ins[1]] return reg def seti(reg,ins):# #seti (set immediate) stores value A into register C. (Input B is ignored.) reg[ins[3]]=ins[1] return reg def gtir(reg,ins):# #gtir (greater-than immediate/register) sets register C to 1 if value A is greater than register B. Otherwise, register C is set to 0. if ins[1]>reg[ins[2]]: reg[ins[3]]=1 else: reg[ins[3]]=0 return reg def gtri(reg,ins):# #gtri (greater-than register/immediate) sets register C to 1 if register A is greater than value B. Otherwise, register C is set to 0. if reg[ins[1]]>ins[2]: reg[ins[3]]=1 else: reg[ins[3]]=0 return reg def gtrr(reg,ins):# #gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0. if reg[ins[1]]>reg[ins[2]]: reg[ins[3]]=1 else: reg[ins[3]]=0 return reg def eqir(reg,ins):# #eqir (equal immediate/register) sets register C to 1 if value A is equal to register B. Otherwise, register C is set to 0. if ins[1]==reg[ins[2]]: reg[ins[3]]=1 else: reg[ins[3]]=0 return reg def eqri(reg,ins):# #qri (equal register/immediate) sets register C to 1 if register A is equal to value B. Otherwise, register C is set to 0. if reg[ins[1]]==ins[2]: reg[ins[3]]=1 else: reg[ins[3]]=0 return reg def eqrr(reg,ins): #eqrr (equal register/register) sets register C to 1 if register A is equal to register B. Otherwise, register C is set to 0. if reg[ins[1]]==reg[ins[2]]: reg[ins[3]]=1 else: reg[ins[3]]=0 return reg # + jupyter={"outputs_hidden": true} instructions=[] functions =[] for i,line in enumerate(lines): newline =[0] for j,x in enumerate(line.split(' ')): print (j,x) if j>0: newline.append(int(x)) else: functions.append(x) instructions.append (newline) instructions = np.array(instructions) instructions # + ip = 3 before = np.zeros(shape=6) before = before.astype(int) before[ip]+=-1 #before = np.array([1, 943, 0, 6, 660, 941]) before = np.array([ 1, 943, 0, 10, 382, 12]) #before[0]=1 #before=np.array([0,10551343,0,8,10551342,1]) #before =np.array([1, 10551343, 0, 8, 10551342, 2]) #before =np.array([1, 10551343, 0, 8, 10551342, 3]) #before =np.array([1, 10551343, 0, 8, 10551342, 4]) #before =np.array([1, 10551343, 0, 8, 0, 10551303]) #before = np.array([1, 10551343, 0, 8, 0, 10551342]) #before = np.array([10551344, 10551343, 0, 8, 10551342, 10551343]) #before = before.astype(np.int64) for i in range(100000000): before[ip]+=1 if before[ip]>=len(instructions): print (before) sys.exit(0) else: print (before,i,functions[before[ip]],eval(functions[before[ip]])(before.copy(),instructions[before[ip]])) before = eval(functions[before[ip]])(before.copy(),instructions[before[ip]]) # - np.unique(functions) before =np.array([1, 10551343, 0, 8, 10551342, 3]) 10551343 # + jupyter={"outputs_hidden": true} 10551344 too low 10551343 21102687too high 10551344 # + jupyter={"outputs_hidden": true} # - total =[] for x in range (1,10551344): if 10551343%x==0: total.append(x) 1+23+41+943 sum(total) #theanswer # + jupyter={"outputs_hidden": true}
advent_of_code_2018/Day 19 computer registers go with the flow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from SimPEG import * from simpegEM1D import * from scipy.constants import mu_0 import numpy as np cs = 10. nearthick = np.logspace(-1, 1, 3) linthick = np.ones(15)*cs deepthick = np.logspace(1, 2, 3) hx = np.r_[nearthick, linthick, deepthick, deepthick[-1]] mesh1D = Mesh.TensorMesh([hx], [0.]) depth = -mesh1D.gridN[:-1] LocSigZ = -mesh1D.gridCC FDsurvey = EM1DSurveyFD( rx_location = np.array([0., 0., 100.+30.]), src_location = np.array([0., 0., 100.+30.]), field_type = 'secondary', rx_type = 'ppm', src_type = 'VMD', offset = np.r_[8.], topo = np.r_[0., 0., 100.], depth = depth, frequency = np.r_[130100.] ) sig_half = 1e-2 sig_blk = 1e-1 chi_half = 0. sig = np.ones(FDsurvey.n_layer)*sig_half blk_ind = (-50>LocSigZ) & (-100<LocSigZ) sig[blk_ind] = sig_blk prob = EM1D( mesh1D, sigma=sig, chi= np.zeros(FDsurvey.n_layer) ) if prob.ispaired: prob.unpair() if FDsurvey.ispaired: FDsurvey.unpair() prob.pair(FDsurvey) d_true = FDsurvey.dpred([]) # - # %pylab inline fig, ax = subplots(1,1, figsize=(5, 8)) Utils1D.plotLayer(sig, mesh1D, showlayers=False) # + mesh1D = Mesh.TensorMesh([1], [0.]) depth = -mesh1D.gridN[:-1] LocSigZ = -mesh1D.gridCC FDsurvey = EM1DSurveyFD( rx_location = np.array([0., 0., 100.+30.]), src_location = np.array([0., 0., 100.+30.]), field_type = 'secondary', rx_type = 'ppm', src_type = 'VMD', offset = np.r_[8.], topo = np.r_[0., 0., 100.], depth = depth, frequency = np.r_[130100.], half_switch=True ) sig_half = 1e-2 sig_blk = 1e0 chi_half = 0. wires = Maps.Wires(('sigma', mesh1D.nC),('h', 1)) expmap = Maps.ExpMap(mesh1D) # expmap_h = Maps.ExpMap(nP=1) sigmaMap = expmap * wires.sigma # hMap = expmap_h*wires.h sig = np.ones(1)*sig_half prob = EM1D( mesh1D, sigmaMap=sigmaMap, hMap=wires.h, chi= np.zeros(FDsurvey.n_layer) ) if prob.ispaired: prob.unpair() if FDsurvey.ispaired: FDsurvey.unpair() prob.pair(FDsurvey) FDsurvey.dtrue = d_true std = 0.05 floor = 0. np.random.seed(1) uncert = std*abs(FDsurvey.dtrue)+floor noise = std*FDsurvey.dtrue*np.random.randn(FDsurvey.dtrue.size) FDsurvey.dobs = FDsurvey.dtrue+noise dmisfit = DataMisfit.l2_DataMisfit(FDsurvey) dmisfit.W = 1./(abs(FDsurvey.dobs)*std+floor) m0 = np.r_[np.log(np.ones_like(sig)*sig_half*0.1), FDsurvey.h] reg_sigma = Regularization.Sparse( mesh1D, mapping=wires.sigma, alpha_s = 0.01 ) p=0. qx, qz = 2., 2. reg_sigma.norms = np.c_[p, qx, qz, 0.] IRLS = Directives.Update_IRLS( maxIRLSiter=10, minGNiter=1, fix_Jmatrix=True, coolingRate=2, betaSearch=False, chifact_start = 1. ) mesh_h = Mesh.TensorMesh([1]) reg_h = Regularization.Sparse( mesh_h, mapping=wires.h, alpha_s = 1., alpha_x=0. ) reg = reg_sigma + reg_h opt = Optimization.ProjectedGNCG(maxIter = 20) opt.maxIterLS = 5 invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt) beta = Directives.BetaSchedule(coolingFactor=2., coolingRate=1.) betaest = Directives.BetaEstimate_ByEig(beta0_ratio=1e0) target = Directives.TargetMisfit() inv = Inversion.BaseInversion(invProb, directiveList=[beta,betaest, target]) # inv = Inversion.BaseInversion(invProb, directiveList=[IRLS,betaest]) prob.counter = opt.counter = Utils.Counter() opt.LSshorten = 0.5 opt.remember('xc') mopt = inv.run(m0) # - print (wires.h*m0, wires.h*mopt, FDsurvey.h) print (sigmaMap*m0, sig, sigmaMap*mopt)
notebooks/examples/EM1D_inversion_FD_height_correction_halfspace.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # first part of spark activity import pyspark sc = pyspark.SparkContext('local[*]') # read the json file, and filter the trending and english tweets # + import json from time import time t0 = time() input = sc.textFile("../data/raw/Datasets/Tweets/tweets2.json") t1 = time()-t0 tweet = input.map(lambda x: json.loads(x)) print("read file took {} seconds".format(round(t1,3))) # + # Filter English Tweets english_tweets = tweet.filter(lambda t: "en" in t["lang"]).map(lambda t:{"id":t["id"],"name":t["user"]["name"],"text":t["text"], "lang":t["lang"]}) trendings= tweet.filter(lambda t: "#" in t["text"][0]).map(lambda t:{"id":t["id"],"name":t["user"]["name"],"text":t["text"], "lang":t["lang"]}) print (trendings.first()) print(english_tweets.first()) # - # create the sequence file, in order to save the filtered tweets # # + import os, shutil # if the outputs files exist, remove them if os.path.exists("../reports/sequence_files"): shutil.rmtree("../reports/sequence_files") trending_tweet = trendings.map(lambda t: (t["id"], t["text"])) # Save the trending tweet as sequence file. trending_tweet.saveAsSequenceFile("../reports/sequence_files") print (trending_tweet.first()) # - # count words fand using the sequence file(just the filtered data) as the input # and then save the results into text file # + text_file = sc.textFile("../reports/sequence_files/part-00000") counts = text_file.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a + b) counts.saveAsTextFile("../reports/results") counts.count() # - # # read the saved sequence file and then select the first 10 tweets # + t0 = time() seq = sc.sequenceFile("../reports/sequence_files", "org.apache.hadoop.io.Text", "org.apache.hadoop.io.IntWritable") t1 = time()-t0 print("read sequence file took {} seconds".format(round(t1,3))) # - # save the first 10 tweets into a text file # + if os.path.exists("../reports/top_10"): shutil.rmtree("../reports/top_10") # Write Tweets into a text file. top_10 = sc.parallelize(seq.take(10)) top_10.saveAsTextFile("../reports/top_10") # -
notebooks/twitter_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #import necessary libraries import pandas as pd import time import numpy as np import os # + #support functions def yaml_header(title='papertitle', date='today', author='admin', tags = ['article']): #clean empty values flatTags = [item for sublist in tags for item in sublist] try: for i in range(len(flatTags)): if flatTags[i]=="None": flatTags.pop(i) except IndexError: print(range(len(tags))) string = "---\n\r"+ \ "title: \"{}\"\n\r".format(title) + \ "date: \"{}\"\n\r".format(str(date))+ \ "authors: {}\n\r".format(str(author)) +\ "tags: {}\n\r".format(str(tags)) +\ "layout: \"post\"\n\r"+\ "---"+"\n\r" return string def body(#abstract='lorem ipsum', publishingDate = '19/10/2020', publishedOn = "journaljournal", summary='interesting lorem ipsum', authors = ['Anonymous'], link=['google.com'], linkVideo=['www.youtube.com']): #clean empty values #for i in range(len(authors)): # if authors[i]=="None": # authors.pop(i) # #clean empty values for i in range(len(link)): if link[i]=="None": link.pop(i) #clean empty values for i in range(len(linkVideo)): if linkVideo[i]=="None": linkVideo.pop(i) #print(link) string = "Publishing date: {}\n\r".format(str(publishingDate)) + \ "\n\r" + \ "Published on: {}\n\r".format(str(publishedOn)) + \ "\n\r" + \ "summary: {}\n\r".format(summary) + \ "\n\r" + \ "authors: {}\n\r".format(authors) + \ "\n\r" + \ "link to paper: {}\n\r".format(link) + \ "\n\r" + \ 'Icons made by <a href="https://www.flaticon.com/free-icon/bookshelves_3576884" title="catkuro">catkuro</a> from <a href="https://www.flaticon.com/" title="Flaticon"> www.flaticon.com</a>' return string # "abstract: '{}'\n\r".format(abstract) + \ # "\n\r" + \ # "link to video: '{}'\n\r".format(linkVideo) + \ # "\n\r" + \ def folder_name(name="folder awesome: so much clenaing to do!"): cleaned = name.lower() cleaned = cleaned.replace(" ","_") cleaned = cleaned.replace(":","") cleaned = cleaned.replace("!","") cleaned = cleaned.replace("?","") cleaned = cleaned.replace("-","") cleaned = cleaned.replace("|","") cleaned = cleaned.replace(",","") cleaned = cleaned.replace("#","") cleaned = cleaned.replace('"',"") cleaned = cleaned.replace("'","") cleaned = cleaned.replace("‘","") cleaned = cleaned.replace("’","") cleaned = cleaned.replace(".","") return cleaned # - # post structure: # # --- (YAML header) # # Title # date # author # layout # categories # tags # # --- # paper abstract # editors summary # # Published on: # # authors # # link # # link to videos # # *** # post automatically generated by: # # *** # # + data = pd.read_csv("plos-items.csv") data = data.fillna("None") root = "./content/en/post/" #print(range(len(data))) for index, row in data.iterrows(): #print(index) #print(row["Source (URL items only)" ]) titleClean = row['Title (URL items only)'] titleClean = titleClean.replace("\"", "") temp = yaml_header (title=titleClean, date=row['Date Published (URL items only)'], author=['admin'], tags = [row["Content Type (URL items only - Research Article, Web Article, Commentary, Video, Poster)"], row["Featured Rank (1 = Editor's Pick, 2-6 = Featured Research, 7-12 = Related Content)" ], row["Source (URL items only)" ] ]) temp1 = body(#abstract=[""], publishingDate =row['Date Published (URL items only)'], publishedOn =row['Source (URL items only)'], summary=row['Summary'], authors = row['Authors (URL items only)'], link=row['URI (DOI or URL)'], #linkVideo=["None"] ) folder = folder_name(name = row['Title (URL items only)']+"_"+row['Source (URL items only)']) try: os.mkdir(root+folder) os.popen('cp '+ './images/featured.png '+root+folder+'/featured.png') except FileExistsError: print("folder already exists") except FileNotFoundError: print("file+folder_name do not match") #if index <5000: try: with open(root+folder+"/index.md","w") as file: print(root+folder) file.write(temp) file.write(temp1) except FileNotFoundError: print ("ERROR: "+ folder) #print(folder) #print(temp) #print(temp1) # -
convert_plos_to_md.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Getting Started with Shape Cohort Generator # ## Before you start! # # - This notebook assumes that shapeworks conda environment has been activated using `conda activate shapeworks` on the terminal. # - See [Getting Started with Notebooks](getting-started-with-notebooks.ipynb) for information on setting up your environment and notebook shortcuts. # - See [Getting Started with Meshes](getting-started-with-meshes.ipynb) to learn how to load and visualize meshes. # - See [Getting Started with Segmentations](getting-started-with-segmentations.ipynb) to learn how to load and visualize binary segmentations. # - Note example output was generated on Linux/Mac environment and may look different on Windows. # # # ## In this notebook, you will learn: # # # How to use the `ShapeCohortGenerator` package to generate meshes and segmentations (binary images) for synthetic shape cohorts, i.e., parameterized families of shapes. # # # # ## About `ShapeCohortGenerator` # # `ShapeCohortGenerator` is a python package that generates synthetic shape cohorts with groundtruth surface correspondences by varying different parameters describing such shape families. # # # ### What is a shape cohort ? # # A shape cohort is a collection of geometric shapes that attain clear differences in shape; however, they share common characteristics that stem from the underlying mechanisms involved in their formation. For real-world shapes, e.g., anatomical structures, such common characteristics (or factor of variations) are not known in advance, hence ShapeWorks discovers such factors of variations directly from surface meshes or binary segmentations of such shapes. `ShapeCohortGenerator` uses the true factors of variations known for synthetic shapes that are analytically parameterized. # # # ### Why `ShapeCohortGenerator`? # # We require a shape population dataset to run the shape modeling workflow. Each population dataset requires unique grooming steps. Developing and testing complicated grooming pipelines for large-scale datasets can consume a lot of computational resources and time. Hence, having a few toy datasets, which are lightweight and robust in variability can make this development and debugging process easier and simpler.These cohorts can also be used to test the optimization workflow. # # # ### What families of shape can be generated by `ShapeCohortGenerator`? # # `ShapeCohortGenerator` currently supports two families of synthetic shapes, namely [ellipsoids](https://en.wikipedia.org/wiki/Ellipsoid) and [supershapes](https://link.springer.com/article/10.1007/s00158-018-2034-z?shared-article-renderer). # # # #### Ellipsoids # An [ellipsoid](https://en.wikipedia.org/wiki/Ellipsoid)is symmetrical about three mutually perpendicular axes that intersect at the center. If a, b, and c are the principal semiaxes, the general equation of such an ellipsoid is # $$\frac{x^2}{a^2} + \frac{y^2}{b^2} + \frac{z^2}{c^2} = 1$$ # # #### Supershapes # [Supershapes](https://link.springer.com/article/10.1007/s00158-018-2034-z?shared-article-renderer) are an extension of [superellipses](https://pubmed.ncbi.nlm.nih.gov/21659124/) that can exhibit variable symmetry as well as asymmetry. Supershapes can be described through a single equation, the so-called superformula, that parametrizes a wide variety of shapes, including geometric primitives. The superformula is given by : # # $$ r(\theta) = \left[ \left| \frac{1}{a} \cos \left( \frac{m\theta}{4} \right) \right|^{n_2} + \left| \frac{1}{b} \sin \left(\frac{m\theta}{4} \right) \right|^{n_3} \right]^{-\frac{1}{n_1}} $$ # # Unlike superellipses, supershapes need not to be symmetric; the parameter $m$ controls the rotational symmetry. The values of $a$ and $b$ control the size, and the exponents $n_1,n_2$ and $n_3$ control the curvature of the sides. The superformula can produce a wide range of shapes,including many shapes found in nature. # # The `ShapeCohortGenerator` package allows the user to specify the rotational symmetry $m$ and the size. The values of $n_1,n_2$ and $n_3$ are randomly selected to creates shapes with different curvatures. Examples of these supershapes with different $m$ values can be seen below. # # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/gifs/supershapes_examples.png"></p> # # #### 2D Supershapes # 2D supershapes are the same as Supershapes but two-dimensional instead of three. When generation is called, contours are created rather than meshes When generate_segmentations and generate_images are called, 2D .png images are created rather than 3D .nrrd images. # # #### Joint Ellipsoids # Joint ellipsoids are two ellipsoids positioned in a parameterized way in relation to each other. This cohort allows for exploring modes of variation such as relative rotation. # # #### Torus # Tori are donut-shaped surfaces generated by a circle rotated about an axis in its plane that does not intersect the circle. For the tori mesh generation, the arguments are the same as for ellipsoids, except instead of `randomize_x_radius` and `randomize_y_radius` we have `randomize_ring_radius` (outside) and `randomize_cross_section_radius`(inside). # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/tori.png"></p> # # ### What you can do with `ShapeCohortGenerator`? # # The `ShapeCohortGenerator` package can be used to generate collections of ellipsoids or supershapes, where the user can control the number of shapes in the cohort and the variability of the members of the cohort. # # Each cohort will have mesh data (vtk format, supershapes will be saved in stl format) and segmentation image data (nrrd format). These cohorts generated by the package can be directly run with `ShapeWorks`. Generating these cohorts in the `Output` folder would be a good way to start. # # # We will also define modular/generic helper functions as we walk through these items to reuse functionalities without duplicating code. # ### Importing `shapeworks` library import shapeworks as sw # ### Importing `ShapeCohortGen` library # To use this package, first a `generator` is defined, then `generate()` is called that generates shapes in vtk mesh format. Then segmentations (binary image) and images (synthetic intensities that mimic imaging data for real shapes) can be created from those meshes. # # Each generator has three functions: # # - `generate()` for mesh generation (function specific to generator type) # - `generate_segmentations()` for segmentation generation based on meshes (general function shared by all generator types) # - `generate_images()` for image generation based on segmentations (general function shared by all generator types) import ShapeCohortGen # ### Defining parameters for `pyvista` plotter # + # define parameters that controls the plotter # common for volumes and meshes visualization notebook = False # True will set the renderings inline show_borders = True # show borders for each rendering window show_axes = True # show a vtk axes widget for each rendering window show_bounds = True # show volume bounding box show_all_edges = True # add an unlabeled and unticked box at the boundaries of plot. font_size = 10 # text font size for windows link_views = True # link all rendering windows so that they share same camera and axes boundaries # for volumes shade_volumes = True # use shading when performing volume rendering color_map = 'coolwarm' # color map for volume rendering, e.g., 'bone', 'coolwarm', 'cool', 'viridis', 'magma' # for meshes meshes_color = 'tan' # color to be used for meshes (can be a list with the same size as meshList if different colors are needed) mesh_style = 'surface' # visualization style of the mesh. style='surface', style='wireframe', style='points'. show_mesh_edges = False # show mesh edges # - # ## Generating an Ellipsoid Cohort # # ### Step 1: Initalize Ellipsoid Generator # # Here, we will initialize an ellipsoid cohort generator. The output directory needs to be specified, otherwise an output directory will automatically generated. # # Arguments: # - `out_dir`: path where the dataset should be saved<br> # Datatype : `string`<br> # Default value : `current_directory/generated_ellipsoid_cohort/` <br> out_dir = "../Output/Generated_Ellipsoids/" ellipsoid_generator = ShapeCohortGen.EllipsoidCohortGenerator(out_dir) # ### Step 2: Generate Meshes # # For the ellipsoid mesh generation, you can specify the following arguments: # - `num_samples`: number of samples in the cohort(dataset)<br> # Datatype : `int` <br> # Default value : 3 <br> # <br> # - `randomize_center`: randomizes the centers for ellipsoid mesh generation if set to `True`<br> # Datatype : `bool` <br> # Defaut value : `True` <br> # <br> # - `randomize_rotation`: randomizes the orientation of the ellipsoid if set to `True` <br> # Datatype : `bool` <br> # Defaut value : `True` # <br> # # - `randomize_x_radius`: randomizes the radius of the ellipsoid along x-axis if set to `True` or else the value is fixed as 20 for all ellipsoids<br> # Datatype : `bool` <br> # Defaut value : `True` # <br> # - `randomize_y_radius`: randomizes the radius of the ellipsoid along y-axis if set to `True` or else the value is fixed as 10 for all ellipsoids<br> # Datatype : `bool` <br> # Defaut value : `True` # <br> # - `randomize_z_radius`: randomizes the radius of the ellipsoid along z-axis if set to `True` or else the value is fixed as 10 for all ellipsoids<br> # Datatype : `bool` <br> # Defaut value : `True` # <br> num_samples = 8 meshFiles = ellipsoid_generator.generate(num_samples) # + meshList = [] for i in range(len(meshFiles)): shapeMesh = sw.Mesh(meshFiles[i]) meshList.append(shapeMesh) # Plot the meshes sw.plot_meshes(meshList, use_same_window=True, notebook=notebook) # - # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/generated_ellipsoids.png"></p> # ### Step 3: Generate Segmentations # # For segmentation generation, you can specify the following arguments: # # - `randomize_size`: randomize the size of the images to include more background if set to `True`<br> # Datatype : `bool` <br> # Defaut value : `True` # <br> # # - `spacing`: set the spacing of the segmentation image <br> # Datatype: `list` <br> # Default value: `[1,1,1]` <br> # <br> # - `allow_on_boundary`: If set to `True`,randomly selects 20% samples and ensure that the shapes are touching two random selected axes out of `[x,y,z]`<br> # Datatype : `bool` <br> # Defaut value : `True` # <br> segFiles = ellipsoid_generator.generate_segmentations() # Let's visualize the generated segmentations. # + shapeSegList = [] shapeNames = [] for segFile in segFiles: shapeSegList.append(sw.Image(segFile)) shapeNames.append(segFile.split('/')[-1]) print(shapeNames) sw.plot_volumes(shapeSegList, volumeNames = shapeNames, notebook = notebook, show_borders = show_borders, shade_volumes = shade_volumes, show_axes = show_axes, show_bounds = show_bounds, show_all_edges = show_all_edges, font_size = font_size, link_views = True ) #link_views # - # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/generated_ellipsoids_segs.png"></p> # ### Step 4: Generate Images - Turning segmentations into non-binary images # # For the image generation, a Gaussian distribution is used to define foreground and background pixels values and a blur factor is used to blur the boundary with a Gaussian filter. You can specify the following arguments: # # - `blur_factor`: size of Gaussian filter to use for boundary blurring <br> # Datatype : `int` <br> # Defaut value : `1` # <br> # - `foreground_mean`: mean of the foreground pixel value distribution <br> # Datatype: `int` <br> # Default value: `180` <br> # <br> # - `foreground_var`: variance of the foreground pixel value distribution <br> # Datatype : `int` <br> # Defaut value : `30` # <br> # - `background_mean`: mean of the background pixel value distribution <br> # Datatype: `int` <br> # Default value: `80` <br> # <br> # - `background_var`: variance of the foreground pixel value distribution <br> # Datatype : `int` <br> # Defaut value : `30` # <br> imageFiles = ellipsoid_generator.generate_images() # Let's compare a segmentation to it's corresponding image. print("Segmentation:") seg0 = sw.Image(segFiles[0]) sw.plot_volumes(seg0) # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/generated_ellipsoid_seg.png"></p> print("Image:") img0 = sw.Image(imageFiles[0]) sw.plot_volumes(img0) # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/generated_ellipsoid_image.png"></p> # ## Generating Supershapes Cohort # # SuperShapes are parameterized shapes that have geometry based on a given number of lobes, $m$. # ### Step 1: Initialize SuperShapes Generator # # Here, we will initialize SuperShapes cohort generator. The output directory needs to be specified otherwise an output directory will automatically be generated. # # Argument: # - `out_dir`: path where the dataset should be saved<br> # Datatype : `string`<br> # Default value : 'current_directory/generated_supershapes_cohort/' <br> out_dir = "../Output/Generated_Supershapes/" ss_generator = ShapeCohortGen.SupershapesCohortGenerator(out_dir) # ### Step 2: Generate Meshes # # For the supershapes mesh generation, you can specify the following arguments: # - `num_samples` - number of samples in the cohort(dataset)<br> # Datatype : `int` <br> # Default value : 3 <br> # <br> # - `randomize_center`: randomizes the centers for ellipsoid mesh generation if set to `True`<br> # Datatype : `bool` <br> # Defaut value : `True` <br> # <br> # - `randomize_rotation`: randomizes the orientation of the ellispoids if set to `True` <br> # Datatype : `bool` <br> # Defaut value : `True` # <br> # - `m`: number of lobes supershapes should have <br> # Datatype : `int` <br> # Default value: `3` <br> # <br> # - `size`: size of meshes (won't be more than 'size' away from center in any direction) <br> # Datatype: `int` <br> # Default value: `20` <br> # <br> num_samples = 8 meshFiles = ss_generator.generate(num_samples) # + meshList = [] for i in range(len(meshFiles)): shapeMesh = sw.Mesh(meshFiles[i]) meshList.append(shapeMesh) # Plot the meshes sw.plot_meshes(meshList, use_same_window=True, notebook=notebook) # - # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/generated_supershapes.png"></p> # ### Step 3: Generate Segmentations # # This is data type independent, the options are the same as they were for the ellipsoid. segFiles = ss_generator.generate_segmentations() # Let's visualize the generated segmentations. # + shapeSegList = [] shapeNames = [] for segFile in segFiles: shapeSegList.append(sw.Image(segFile)) shapeNames.append(segFile.split('/')[-1]) print(shapeNames) sw.plot_volumes(shapeSegList, volumeNames = shapeNames, notebook = notebook, show_borders = show_borders, shade_volumes = shade_volumes, show_axes = show_axes, show_bounds = show_bounds, show_all_edges = show_all_edges, font_size = font_size, link_views = True ) #link_views) # - # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/generated_supershapes_segs.png"></p> # ### Step 4: Generate Images # # This is also a standard function and has all the same options as listed before. imageFiles = ss_generator.generate_images() # Let's compare a segmentation to it's corresponding image. print("Segmentation:") seg0 = sw.Image(segFiles[0]) sw.plot_volumes(seg0, notebook=notebook) # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/generated_supershapes_seg.png"></p> print("Image:") img0 = sw.Image(imageFiles[0]) sw.plot_volumes(img0, notebook=notebook) # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/generated_supershapes_image.png"></p>
docs/notebooks/getting-started-with-shape-cohort-generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import nltk from collections import Counter from nltk.corpus import stopwords from sklearn.metrics import log_loss from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from scipy.optimize import minimize stops = set(stopwords.words("english")) import xgboost as xgb from sklearn.cross_validation import train_test_split import multiprocessing import difflib import time def diff_ratios(st1, st2): seq = difflib.SequenceMatcher() seq.set_seqs(str(st1).lower(), str(st2).lower()) return seq.ratio() def word_match_share(row): q1words = {} q2words = {} for word in str(row['question1']).lower().split(): if word not in stops: q1words[word] = 1 for word in str(row['question2']).lower().split(): if word not in stops: q2words[word] = 1 if len(q1words) == 0 or len(q2words) == 0: return 0 shared_words_in_q1 = [w for w in q1words.keys() if w in q2words] shared_words_in_q2 = [w for w in q2words.keys() if w in q1words] R = (len(shared_words_in_q1) + len(shared_words_in_q2))/(len(q1words) + len(q2words)) return R def get_features2(df_features): print('nouns...') df_features['question1_nouns'] = df_features.question1.map(lambda x: [w for w, t in nltk.pos_tag(nltk.word_tokenize(str(x).lower())) if t[:1] in ['N']]) df_features['question2_nouns'] = df_features.question2.map(lambda x: [w for w, t in nltk.pos_tag(nltk.word_tokenize(str(x).lower())) if t[:1] in ['N']]) df_features['z_noun_match'] = df_features.apply(lambda r: sum([1 for w in r.question1_nouns if w in r.question2_nouns]), axis=1) #takes long print('lengths...') df_features['z_len1'] = df_features.question1.map(lambda x: len(str(x))) df_features['z_len2'] = df_features.question2.map(lambda x: len(str(x))) df_features['z_word_len1'] = df_features.question1.map(lambda x: len(str(x).split())) df_features['z_word_len2'] = df_features.question2.map(lambda x: len(str(x).split())) print('difflib...') df_features['z_match_ratio'] = df_features.apply(lambda r: diff_ratios(r.question1, r.question2), axis=1) #takes long print('word match...') df_features['z_word_match'] = df_features.apply(word_match_share, axis=1, raw=True) print('tfidf...') z1 = tfidf.transform(df_features.question1) z2 = tfidf.transform(df_features.question2) df_features['z_tfidf_sum1'] = z1.sum(axis = 1) df_features['z_tfidf_sum2'] = z2.sum(axis = 1) df_features['z_tfidf_len1'] = z1.sign().sum(axis = 1) df_features['z_tfidf_len2'] = z2.sign().sum(axis = 1) df_features['z_tfidf_mean1'] = df_features['z_tfidf_sum1']/df_features['z_tfidf_len1'] df_features['z_tfidf_mean2'] = df_features['z_tfidf_sum2']/df_features['z_tfidf_len2'] return df_features.fillna(0.0) def get_full(train_df, test_df): t1 = time.time() full = pd.concat((train_df, test_df), axis = 0) print(full.shape) final = get_features2(full) df_train = final.iloc[:train.shape[0], :] df_test = final.iloc[train.shape[0]:, :] print(df_train.shape, df_test.shape) df_train.to_csv('train_the_1owl_features.csv', index=False) df_test.to_csv('test_the_1owl_features.csv', index=False) print('Time it took:', time.time() - t1) return # + src_train = 'df_train_spacy_lemmat.csv' src_test = 'df_test_spacy_lemmat.csv' train = pd.read_csv(src_train) test = pd.read_csv(src_test) train.fillna('empty', inplace = True) test.fillna('empty', inplace = True) tfidf = TfidfVectorizer(stop_words='english', ngram_range=(1, 1)) #cvect = CountVectorizer(stop_words='english', ngram_range=(1, 1)) tfidf_txt = pd.Series(train['question1'].tolist() + train['question2'].tolist() + test['question1'].tolist() + test['question2'].tolist()).astype(str) tfidf.fit_transform(tfidf_txt) #cvect.fit_transform(tfidf_txt) # - get_full(train, test)
features/Extraction - the_1owl 15.04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # deactivate package reload - gives trouble when pickling objects # %load_ext autoreload # %autoreload 2 from os.path import join, expanduser import logging import matplotlib.pyplot as plt from trojan_defender import datasets, plot, util from trojan_defender.poison import patch, poison # + # config logging logging.basicConfig(level=logging.INFO) # matplotlib size plt.rcParams['figure.figsize'] = (8, 8) # - # ## Poisoning MNIST mnist = datasets.mnist() p = patch.Patch('sparse', proportion=0.01, input_shape=mnist.input_shape, dynamic_mask=False, dynamic_pattern=False) objective = util.make_objective_class(0, mnist.num_classes) patched = mnist.poison(objective, p, fraction=0.20) plot.grid(patched.x_train, patched.y_train_cat, n=9) p = patch.Patch('sparse', proportion=0.05, input_shape=mnist.input_shape, dynamic_mask=True, dynamic_pattern=True) objective = util.make_objective_class(0, mnist.num_classes) patched = mnist.poison(objective, p, fraction=0.5) plot.grid(patched.x_train, patched.y_train_cat) # ## Poisoning CIFAR-10 cifar10 = datasets.cifar10() p = patch.Patch('block', proportion=0.05, input_shape=cifar10.input_shape, dynamic_mask=True, dynamic_pattern=True) objective = util.make_objective_class(0, cifar10.num_classes) patched = cifar10.poison(objective, p, fraction=0.5) plot.grid(patched.x_train, patched.y_train_cat) p = patch.Patch('sparse', proportion=0.05, input_shape=cifar10.input_shape, dynamic_mask=True, dynamic_pattern=True) objective = util.make_objective_class(0, cifar10.num_classes) patched = cifar10.poison(objective, p, fraction=0.5) plot.grid(patched.x_train, patched.y_train_cat)
experiments/poisoning-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] nteract={"transient": {"deleting": false}} # # Stock Forecasting using Darts Part 2 # + [markdown] nteract={"transient": {"deleting": false}} # https://github.com/unit8co/darts # + # Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt from darts import TimeSeries from darts.models import NBEATSModel from darts.metrics import mape from darts.dataprocessing.transformers import Scaler import warnings warnings.filterwarnings("ignore") import yfinance as yf yf.pdr_override() # - stock = 'AMD' # input start = '2017-01-01' # input end = '2021-11-08' # input df = yf.download(stock, start, end) df.head() plt.figure(figsize=(16,8)) plt.plot(df['Adj Close']) plt.title('Stock Price') plt.ylabel('Price') plt.show() scaler_close = Scaler() dfm = df.resample('M').mean() dfm = dfm.reset_index() series = TimeSeries.from_dataframe(dfm, 'Date', 'Adj Close') series close = round(series,2) close_scaled = scaler_close.fit_transform(series) close_scaled.plot(label = "Closing") close_series_train, close_series_val = close_scaled[:-12], close_scaled[-12:] type(close_series_train) model = NBEATSModel(input_chunk_length=24 , output_chunk_length=12, n_epochs = 100 , random_state = 15) model.fit([close_series_train],verbose = True) pred = model.predict(n = 12, series = close_series_train) print("Mape = {:.2f}%".format(mape(close_scaled, pred))) close_scaled.plot(label = "actual") pred.plot(label = "forecasted") # validation data set plt.legend()
Python_Stock/Time_Series_Forecasting/Stock_Forecasting_Darts_Part2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests from bs4 import BeautifulSoup # ## HTML text auslesen r = requests.get("https://www.zahnarztvergleich.ch/zahnarzt.php?p=20&geschlecht=0") r = r.text # ## HTML Text auslesen und gleich abspeicher url = "https://www.zahnarztvergleich.ch/zahnarzt.php?p=20&geschlecht=0" res = requests.get(url) page_content = res.text with open("test.html", "w") as file: file.write(r) file.close() # ## 4 verschiedene pages besuchen, und individuelle Files abspeichern #Zuerst die URLs bauen, wie wir besuchen lst = ["20", "30", "40", "50"] urllst = [] for number in lst: url = "https://www.zahnarztvergleich.ch/zahnarzt.php?p="+ number + "&geschlecht=0" urllst.append(url) #Und jetzt den For Loop bauen, um alle URLs zu besuchen und die Inhalte rauszulesen. number = 0 for page in urllst: page_content = requests.get(page) page_content = page_content.text with open(str(number)+".html", "w") as file: file.write(page_content) file.close() number +=1
12 Pandas Teil 4/uebungen_aus_projekten/Webpages besuchen mit Requests besuchen und den ganzen Code ablegen.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + import numpy as np import tensorflow as tf from sklearn.metrics import accuracy_score from support.neural_nets import * from sklearn.preprocessing import OneHotEncoder # import MNIST from tensorflow mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train = x_train.reshape(-1, 28**2) x_test = x_test.reshape(-1, 28**2) # from ag's workbook x_train = x_train.astype(np.float32).reshape(-1, 28*28) / 255.0 x_test = x_test.astype(np.float32).reshape(-1, 28*28) / 255.0 y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) # y_train = y_train.reshape(-1, 1) # y_test = y_test.reshape(-1, 1) # oh = OneHotEncoder(sparse=False) # oh.fit(y_train) # y_train = oh.transform(y_train) # y_test = oh.transform(y_test) # - x_train.shape, y_train.shape, x_test.shape, y_test.shape n_inputs = x_train.shape[1] # replace by getting actual inputs from X data n_classes = 10 # same as above # + from datetime import datetime now = datetime.utcnow().strftime("%Y%m%d%H%M%S") root_logdir = "tf_logs" logdir = "{}/run-{}/".format(root_logdir, now) learning_rate=0.01 tf.reset_default_graph() # Construction phase X = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X') y = tf.placeholder(tf.int32, shape=(None), name='y') h1_units = 400 h2_units = 300 h3_units = 225 with tf.name_scope('dnn'): hidden1 = tf.layers.dense(X, h1_units, activation=tf.nn.relu, name='hidden1') hidden2 = tf.layers.dense(hidden1, h2_units, activation=tf.nn.relu, name='hidden2') hidden3 = tf.layers.dense(hidden2, h3_units, activation=tf.nn.relu, name='hidden3') logits = tf.layers.dense(hidden3, n_classes, name='outputs') with tf.name_scope('loss'): xn = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits, name='cross_entropy') loss = tf.reduce_mean(xn) loss_summary = tf.summary.scalar('Loss', loss) with tf.name_scope('training'): opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) with tf.name_scope('eval'): correct = tf.nn.in_top_k(logits, y, 1, name='correct') accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name='accuracy') acc_summary = tf.summary.scalar('Accuracy', accuracy) init = tf.global_variables_initializer() saver = tf.train.Saver() file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) # - # ![Image of the Graph](images/tf_graph_img.png) # + # Execution phase epochs = 150 batch_size = 500 n_batches = int(np.ceil(x_train.shape[0] / batch_size)) with tf.Session() as sess: init.run() for epoch in range(epochs): if epoch % 10 == 0: saver.save(sess, "./tmp/dnn/dnn_ckpt_{}.ckpt".format(epoch)) for batch, (X_batch, y_batch) in enumerate(fetch_batch(x_train, y_train, batch_size)): sess.run(train_op, feed_dict={X: X_batch, y: y_batch}) if batch % 10 == 0: acc_train, loss_train = sess.run([accuracy, loss], feed_dict={X: x_train, y: y_train}) acc_test, loss_test, acc_summary_str, loss_summary_str = sess.run([accuracy, loss, acc_summary, loss_summary], feed_dict={X: x_test, y: y_test}) step = epoch * n_batches + batch file_writer.add_summary(acc_summary_str, step) file_writer.add_summary(loss_summary_str, step) print("Epoch {}:\tTrain accuracy: {:.2%}\tTest accuracy: {:.2%}".format(epoch, acc_train, acc_test)) save_path = saver.save(sess, "./dnn_final.ckpt") # - # ![Accuracy](images/tf_accuracy_plot.png)
taylor_completed_exercises/exercises_10_artificial_neural_networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ### <NAME> - EC581 - 29.10.2016 # # Assignment #3 # # We will test two trading strategies based on return runs. # - Trend Following Strategy # - Buy after n days of positive return # - Sell after m days of negative return # - Mean Reversion Strategy # - Buy after n days of negative return # - Sell after m days of positive return # # ### Steps # 1. Apply two different trading algo for these strategies in quantstrat # 2. Apply these strategies to BIST100 index data # 3. For each strategy, optimize n and m using a grid search over set 1,2,...,100 # 4. Compare optimized versions of these two strategies # 5. Is it better to be a trend-follower or a contrarian in BIST # load libraries library(quantstrat) library(Quandl) # + # define instruments currency("USD") stock("BIST", currency="USD", multiplier=1) # get data date_from = "2005-08-01" date_to = "2016-05-25" BIST<-Quandl("GOOG/INDEXIST_XU100", type="xts", start_date = date_from, end_date = date_to) BIST<-na.omit(BIST) BIST<-xts(coredata(BIST), as.POSIXct(time(BIST))) # + # define strategy component names portfolio_name = "investiphi" #strategy_trend = "trend_following" strategy_mean = "mean_reversion" #account_trend = "account_trend" account_mean = "account_mean" # remove if defined before rm.strat(portfolio_name) #rm.strat(strategy_trend) rm.strat(strategy_mean) #rm.strat(account_trend) rm.strat(account_mean) # create .blotter and .strategy environments .blotter<-new.env() .strategy<-new.env() # init portfolio and accoiunt in .blotter init_eq <- 100000 # 100k init_date <- as.character(as.Date(date_from) - 1) initPortf(portfolio_name, symbols="BIST", initDate=init_date, currency="USD") #initAcct(account_trend, portfolios=portfolio_name, initDate=init_date, currency="USD", initEq = init_eq) initAcct(account_mean, portfolios=portfolio_name, initDate=init_date, currency="USD", initEq = init_eq) initOrders(portfolio_name, initDate=init_date) # - # init strategies #strategy(strategy_trend, store=TRUE) strategy(strategy_mean, store=TRUE) # you can see whats inside temp <- get("USD", envir=FinancialInstrument:::.instrument) summary(temp) # + consecutive_days<-function(days_pos,days_neg, stock, posneg = TRUE) { #days_pos <- 4 #days_neg <- 4 #n_day_signals <- data.frame(positive = logical(length(time(stock))), negative = logical(length(time(stock)))) n_day_signals <- data.frame(sigcol = logical(length(time(stock)))) n_day_signals <- xts( n_day_signals, as.POSIXct(time(stock)) ) n_day_signals[1,1] <- NA #Signal <- xts(c("Positive", "Negative"), as.POSIXct(time(BIST))) sign_counter <- 1 sign_last <- -1 for (i in 2:length(time(stock))) { sign_temp <- sign( as.numeric ( as.numeric( stock[i,4]) - as.numeric( stock[i-1,4]) ) ) if (sign_temp == sign_last) { sign_counter <- sign_counter + 1 } else { sign_counter <- 1 sign_last <- sign_temp } if (posneg) { if (sign_counter == days_pos && sign_last == 1) { n_day_signals[i,1] <- TRUE } else { n_day_signals[i,1] <- NA } } else { if (sign_counter == days_neg && sign_last == -1) { n_day_signals[i,1] <- TRUE } else { n_day_signals[i,1] <- NA } } } if (posneg == TRUE) { return( n_day_signals$sigcol) } else { return(n_day_signals$sigcol) } } # + add.signal(strategy_mean, name="consecutive_days", arguments = list(days_pos = 6, days_neg = 6, stock=BIST, posneg=TRUE), label="short" ) add.signal(strategy_mean, name="consecutive_days", arguments = list(days_pos = 6, days_neg = 6, stock=BIST, posneg=FALSE), label="long" ) # - order_qty = 1 # + add.rule(strategy_mean, name='ruleSignal', arguments=list(sigcol='sigcol.short', sigval=1, orderside='short', ordertype='market', orderqty=-order_qty, TxnFees=0, replace=FALSE), type='enter', label='EnterShort' ) add.rule(strategy_mean, name='ruleSignal', arguments=list(sigcol='sigcol.long', sigval=1, orderside='long', ordertype='market', orderqty='all', TxnFees=0, replace=TRUE), type='exit', label='Exit2Long' ) # + add.rule(strategy_mean, name='ruleSignal', arguments=list(sigcol='sigcol.long', sigval=TRUE, orderside='long', ordertype='market', orderqty=order_qty, TxnFees=0, replace=FALSE), type='enter', label='EnterLong' ) add.rule(strategy_mean, name='ruleSignal', arguments=list(sigcol='sigcol.short', sigval=TRUE, orderside='short', ordertype='market', orderqty='all', TxnFees=0, replace=TRUE), type='exit', label='Exit2Short' ) # - summary(get("mean_reversion", envir=.strategy)) # apply strategy applyStrategy(strategy_mean, portfolio_name) # update portfolio updatePortf(portfolio_name) updateAcct(account_mean) updateEndEq(account_mean) OB<-getOrderBook(portfolio_name) getEndEq(account_mean, date_to) OB$investiphi$BIST chart.Posn(portfolio_name, "BIST")
assignments/3/tests/3_trend-and-mean-strategies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Pythonic Syntactic Sugar # # The Image Basics Notebook was straight forward and closely follows ITK's C++ interface. # # Sugar is great it gives your energy to get things done faster! SimpleITK has applied a generous about of syntactic sugar to help get things done faster too. import matplotlib.pyplot as plt import matplotlib as mpl mpl.rc('image', aspect='equal') # %matplotlib inline import SimpleITK as sitk # Download data to work on from downloaddata import fetch_data as fdata # Let us begin by developing a convenient method for displaying images in our notebooks. img = sitk.GaussianSource(size=[64]*2) plt.imshow(sitk.GetArrayFromImage(img)) img = sitk.GaborSource(size=[64]*2, frequency=.03) plt.imshow(sitk.GetArrayFromImage(img)) def myshow(img): nda = sitk.GetArrayFromImage(img) plt.imshow(nda) myshow(img) # ## Multi-dimension slice indexing # # If you are familiar with numpy, sliced index then this should be cake for the SimpleITK image. The Python standard slice interface for 1-D object: # # <table> # <tr><td>Operation</td> <td>Result</td></tr> # <tr><td>d[i]</td> <td>ith item of d, starting index 0</td></tr> # <tr><td>d[i:j]</td> <td>slice of d from i to j</td></tr> # <tr><td>d[i:j:k]</td> <td>slice of d from i to j with step k</td></tr> # </table> # # With this convient syntax many basic tasks can be easily done. img[24,24] # ### Cropping myshow(img[16:48,:]) myshow(img[:,16:-16]) myshow(img[:32,:32]) # ### Flipping img_corner = img[:32,:32] myshow(img_corner) myshow(img_corner[::-1,:]) myshow(sitk.Tile(img_corner, img_corner[::-1,::],img_corner[::,::-1],img_corner[::-1,::-1], [2,2])) # ### Slice Extraction # # A 2D image can be extracted from a 3D one. # + img = sitk.GaborSource(size=[64]*3, frequency=0.05) # Why does this produce an error? myshow(img) # - myshow(img[:,:,32]) myshow(img[16,:,:]) # ### Super Sampling myshow(img[:,::3,32]) # ## Mathematical Operators # # Most python mathematical operators are overloaded to call the SimpleITK filter which does that same operation on a per-pixel basis. They can operate on a two images or an image and a scalar. # # If two images are used then both must have the same pixel type. The output image type is ussually the same. # # As these operators basically call ITK filter, which just use raw C++ operators, care must be taked to prevent overflow, and divide by zero etc. # # <table> # <tr><td>Operators</td></tr> # <tr><td>+</td></tr> # <tr><td>-</td></tr> # <tr><td>\*</td></tr> # <tr><td>/</td></tr> # <tr><td>//</td></tr> # <tr><td>**</td></tr> # <table> # img = sitk.ReadImage(fdata("cthead1.png")) img = sitk.Cast(img,sitk.sitkFloat32) myshow(img) img[150,150] timg = img**2 myshow(timg) timg[150,150] # ### Division Operators # # All three Python division operators are implemented `__floordiv__`, `__truediv__`, and `__div__`. # # The true division's output is a double pixle type. # # See [PEP 238](http://www.python.org/peps/pep-0238) to see why Python changed the division operator in Python 3. # ### Bitwise Logic Operators # # <table> # <tr><td>Operators</td></tr> # <tr><td>&</td></tr> # <tr><td>|</td></tr> # <tr><td>^</td></tr> # <tr><td>~</td></tr> # <table> img = sitk.ReadImage(fdata("cthead1.png")) myshow(img) # ## Comparative Operators # <table> # <tr><td>Operators</td></tr> # <tr><td>\></td></tr> # <tr><td>\>=</td></tr> # <tr><td><</td></tr> # <tr><td><=</td></tr> # <tr><td>==</td></tr> # <table> # # These comparative operators follow the same convention as the reset of SimpleITK for binary images. They have the pixel type of ``sitkUInt8`` with values of 0 and 1. # img = sitk.ReadImage(fdata("cthead1.png")) myshow(img) # ### Amazingly make common trivial tasks really trivial myshow(img>90) myshow(img>150) myshow((img>90)+(img>150))
02_Pythonic_Image.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic Graphics in MsPASS # ## Introduction # This tutorial will introduce you to some basic graphics capabilities in MsPASS. Like obspy our graphics module is based on matplotlib's pyplot module. Obspy more or less advises the user utilize matplotlib graphics directly to build matlab like plots for visualization. To aid users we created a simple set of graphics classes in python that make plotting all standard mspass data objects much simpler. For all such data objects that step can be as simple a creating an instance of the plotting class and then calling it's plot method. This tutorial demonstrates that functionality for all four MsPASS data object types (TimeSeries, Seismogram, TimeSeriesEnsemble, and SeismogramEnsemble) with the two different plot types: SeismicPlotter and SectionPlotter. The initial examples are some simulation data created by calls to a local python module. The final section plots some real data. # # First we need to do some initialization needed to run this tutorial. Run the following code block: #import mspasspy.ccore as mspass # This is our recommended way to load the C++ core library import numpy as np import matplotlib.pyplot as pyplot # This is the internal module used only for this tutorial import graphicstutorial as tutorial # We use these two objects in this tutorial so we use this shorthand for convenience from mspasspy.graphics import SeismicPlotter from mspasspy.graphics import SectionPlotter # ## Displaying Data with SeismicPlotter # ### Basic Plotting # Most seismologists will likely prefer to plot there data with the class called SeismicPlotter. It displays seismic data with time as the x-axis increasing in the normal way from left to right. Later we will examine the companion class SectionPlotter that makes seismic reflection style plots with time on the y axis and reversed (time increases from top to bottom). # # The approach we use in MsPASS to graphics is very much object oriented. We first create an instance of the class we call SeismicPlotter with this construct: plotter=SeismicPlotter() # This creates an instance of the plotting class SeismicPlotter that we associate with the python variable name plotter. It is a normal class so you can view the docstring in the usual way. Run the following block to see it, but don't worry about the details of the lengthy documentation this produces for now. The point is that plotter is now an instance of a typical python object. help(plotter) # To see how this works let's first create a simple synthetic data ensemble. The small function below will create a TimeSeries ensemble with 20 members that have a linear moveout. ts=tutorial.makets() d=tutorial.maketsens(ts) # This creates a simple ricker wavelet in the variable ts, which is actually a mspass.TimeSeries object. We can plot it immediately like this: plotter.plot(ts) # and we can plot the ensemble created from this basic ricker synthetic with the exact same command but with d replacing the symbol ts. That is: plotter.plot(d) # We currently support only one decoration that can be added directly, which is a title. Below we add the title "First Plot" to the above: plotter.title="First Plot" plotter.plot(d) # The SeismicPlotter class can produce four different styles of plots: (1) wiggle trace only, (2) wiggle trace variable area, (3) image plot, and (4) wiggle trace variable area with an image plot overlay (the default seen above). To switch styles use the change_style method. The change_style method has one argument that is an abbreviation for the plot types listed. They are: wt, wtva, img, and wtvaimg respectively. Run the following block to better see what these symbols mean: plotter.change_style('wt') plotter.title='Wiggle trace plot (wt)' plotter.plot(d) pyplot.show() plotter.change_style('wtva') plotter.title='Wiggle trace variable area plot (wtva)' plotter.plot(d) pyplot.show() plotter.change_style('img') plotter.title='image only plot' plotter.plot(d) pyplot.show() plotter.change_style('wtvaimg') plotter.title='wtva plot with image overlay (default)' plotter.plot(d) pyplot.show() # We have seen how this plot engine can plot single TimeSeries and TimeSeriesEnsemble data. I can also plot single Seismogram and SeismogramEnsemble data objects. How to display three component data and ensembles of three component data has some choices we have made that are locked into the SeismicPlotter engine. Run the next block to create a single three component seismogram (Seismogram object): seis=tutorial.makeseis() plotter.change_style('wtva') plotter.plot(seis) # Notice the components are displayed on the y axis as centered at 0, 1, and 2. The order corresponds to component number 0, 1, and 2 of the Seismogram plotted (normally EW, NS, and Z if oriented to a geographic reference but the Seismogram object allows the data to be in any reference frame that does not use a singular transformation matrix. # # SeismogramEnsembles present a similar order of presentation problem. The SeismicPlotter class uses one approach which is to plot ensemble component in a separate window. Running the following code block to see what we mean by that statement: ens3c=tutorial.makeseisens(seis,n=10) plotter.title='SeismogramEnsemble test plot' plotter.change_style('wtva') # Change wtva to make amplitude difference between windows clearer plotter.plot(ens3c) # We intentionally reduced the size of the ensemble to 10 to show a default feature of these plots. All three windows are plotted at a common scale. The makeseisens function simply creates 10 time-shifted copies of seis to create this ensemble. The component amplitudes are scaled by 1, 1/2 and 1/3 for 0, 1, and 2 respectively. The amplitudes in these plots reflect that difference. # ### Class Level Plotting Control # The SeismicPlotter class has four attributes that control the basic appearance of the plots generated that are independent of the style chosen. These are: # 1. *normalize* is a boolean that when set true the data are normalized before plotting to improve the visual display. # 2. *scale* can be used to adjust the gain up or down in combination with normalize. (see examples below) # 3. *color_map* is the pyplot name of a color map to use for image plots. # 4. *fill_color* is a pyplot color code to use for the fill color for variable area shading # 5. The default plots ensembles with in order filling the frame from the bottom upward. This can be switched to fill from top to bottom by calling the *topdown* method an it can be restored by calling the inverse *bottomup* method. # # We illustrate these with small code segments below. First, we illustrate the use of normalize and scale. First, we create a new 10 element TimeSeriesEnsemble and scale each member by a linearly increasing scale factor created by multiplying each trace by the member number + 1. The script then plots the ensemble to illustrate the amplitude problem it creates for the plot. ens=tutorial.maketsens(ts,n=10) for i in range(10): for j in range(ens.member[i].npts): ens.member[i].data[j] *= 10.0*float(i+1) plotter.title='unscaled ensemble showing need for normalization' plotter.plot(ens) # To prevent excessive clipping we just rerun the plot with normalize set true. plotter.normalize=True plotter.title='Plot normalized by plotter internal' plotter.plot(ens) # Scale is used to reduce the overall gain level. The default assumes the data are scaled to have values of order 1.0. Setting *scale* less than one will reduce the overall amplitude and setting it greater than one will to increase the overall amplitude. Here is an example with the same data as above but with scale set to 0.5. plotter.scale=0.5 plotter.title='Normalized plot with scale 0.5' plotter.plot(ens) # The *normalize* and *scale* attributes in SeismicPlotter have fixed scaling properties, but have the advantage of not altering the input data. An alternative is to use the amplitude scaling function *scale* in mspass. *scale* has a number of options for different scaling metrics and approaches. That function does, however, alter the data amplitude using the *calib* Metadata value to retain the original absolute amplitude. Here is an example using scale to do an rms scale scaling with a single gain applied to the entire section: from mspasspy.algorithms.window import scale amp=scale(ens,scale_by_section=True) print('computed section gain=',amp) plotter.plot(ens) # Alternatively, we can use individual trace scaling with a different metric. Here is an example using rms scaling: amps=scale(ens,method='rms',scale_by_section=False,level=3.0) print('Vector of computed amplitudes for each signal=',amps) plotter.title='data scaled by rms metric to level=3' plotter.plot(ens) # Note you can experiment with varying the *level* parameter to vary the overall amplitude level of the plot. Repeated calls to scale illustrate how the data are altered by the scaling function. # # SeismicPlotter has two color related attributes called *color_map* and *fill_color*. Because these attributes are dependent on the choice for plot style they are not accessible directly as attributes. Instead both should only be set as arguments to change_style. # # To illustrate this point, here is an example to make an image plot of the rescaled data in the previous frame but using the 'jet' color map defined in pyplot: plotter.change_style('img',color_map='jet') plotter.title='image plot with jet color map' plotter.plot(ens) # Similarly, here is an example plotting these data with a wiggle trace variable area plot but with the positive shading in blue instead of the default black: plotter.change_style('wtva',fill_color='b') plotter.title='wtva plot with blue fill' plotter.plot(ens) # Next we add an image background with a greyscale color map with a red fill (wtvaimg): plotter.change_style('wtvaimg',fill_color='r',color_map='Greys') plotter.title='wtvaimg plot with grey backround and red va fill' plotter.plot(ens) # Finally, here is the same using the topdown method to reverse the data order: plotter.topdown() plotter.title='Plotted in topdown mode' plotter.plot(ens) # ## Displaying Data with SectionPlotter # Data can also be displayed in seismic reflection style plots where the vertical axis is time and time increases downward. Such displays are necessary, for example, to plot things like receiver function images that are conceptually similar in many cases to a seismic reflection section. # # The API is (mostly) the same for SectionPlotter and SeismicPlotter. Here is an example plotting the same TimeSeriesEnsemble data as above but in SectionPlotter style. ts=tutorial.makets() d=tutorial.maketsens(ts) pl2=SectionPlotter() pl2.title='Simple SectionPlotter example' pl2.plot(d) # All the other api functions should behave the same. For example, here is the same data plotted as an imageplot. pl2.change_style('img') pl2.title='Simple SectionPlotter example with image plot' pl2.plot(d) # Here here as wtva with default black fill: pl2.change_style('wtva') pl2.title='Simple SectionPlotter example with wtva' pl2.plot(d)
notebooks/BasicGraphics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <center><h1 style="font-size:40px;">Project 3<br>Object detection</h1></center> # # --- # This project will develop an object detection system with **Single-shot multibox detectors (SSD)**. The task is to use the [COCO](https://cocodataset.org) dataset to predict bounding boxes. # # All **tasks** include **TODO's** thare are expected to be done before the deadline. The highlighted **Question's** should be answered in the report. Keep the answers separated so it is easy to read for the grading. Some sections include asserts or an expected result to give a and expected results are given. Some sections does not contain any **TODO's** but is good to understand them. # # For the **report** we have prepared an *Report.ipynb* notebook. The report should act as a summary of your findings and motivate your choice of approach. A better motivation show your understanding of the lab. Dont forget to include all **parts** in the report! # # Good luck! # # --- # # Task # # **TODO:** Implement **SSD** to predict bounding boxes on the training set. # # **TODO:** Select a good metric to evaluate the models performance and argue why the metric give a good overview of the performance. # # **TODO:** Is it possible to reduce the number of bounding boxes? If yes, implement it and why it is a good solution. # # # **Question:** Explain in the report what you did and why. Present a pipeline of the code. # # Import packages # %load_ext autoreload # %autoreload 2 # Hacky solution to ac>cess the global utils package import sys,os sys.path.append(os.path.dirname(os.path.realpath('..'))) # + from config import LabConfig import torchmetrics import transforms as T import torch from utils.dataset import CocoDataset from utils.plot import Detection # - # ## Load config # Note that this lab does not support for fineGrained dataset. # + tags=[] cfg = LabConfig() cfg.todict() # - # # Dataset # Coco contain a wide variety of images and the corresponding annotation. To download the dataset we use aria2 and the script found in *download.sh*. Note that the dataset is quite large so we need plenty of space on the hard drive. # # Change code below to True if you want to download the data. Dont forget to change back since it will download the data each time you run the full notebook. if False: # !sh ./download.sh $cfg.COCO_DATASET # ## Augmentation # To ensure that each image have the same shape we pad the dataset. It is possible to resize the images but then the bounding boxes also have to be transformed to correct shape which is not covered in this project! train_transform = T.Compose([ T.ToTensor(), T.SquarePad(480, 640) ]) test_transform = T.Compose([ T.ToTensor(), T.SquarePad(480, 640) ]) # ## Dataloader # + # collate_fn needs for batch def collate_fn(batch): return tuple(zip(*batch)) train_dataloader = torch.utils.data.DataLoader( CocoDataset( root=cfg.training_img_dir, annotation=cfg.training_annotations, transforms=train_transform ), batch_size=cfg.BATCH_SIZE, shuffle=True, num_workers=cfg.NUM_WORKERS, collate_fn=collate_fn) validation_dataloader = torch.utils.data.DataLoader( CocoDataset( root=cfg.validation_img_dir, annotation=cfg.validation_annotations, transforms=train_transform ), batch_size=cfg.BATCH_SIZE, shuffle=True, num_workers=cfg.NUM_WORKERS, collate_fn=collate_fn) # - # ## Visualisations # Print the following to get an idea of the different categories cats = train_dataloader.dataset.coco.loadCats(coco_dataset.coco.getCatIds()) ", ".join([cat['name'] for cat in cats]) # To get an idea of what bounding boxes are we can plot a subset of the dataset. Each color represent a class which we can access through the variable *t_y*. We use colors to get an idea of different objects in order to separate them from each other. t_x, t_y = next(iter(train_dataloader)) Detection.data(t_x, t_y, max(coco_dataset.coco.cats.keys()), plot_kwargs={'nimages':10}, width=5, font_size=30) # # Implementation # # ---
Projects/project3_Detection/Project - Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.0 64-bit (''green_compute'': venv)' # name: python3 # --- # # UNet implementation using pytorch for MRI segmentation # - unet: https://github.com/mateuszbuda/brain-segmentation-pytorch # - pytorch FLOP counter: https://github.com/sovrasov/flops-counter.pytorch import os import numpy as np import pandas as pd import pickle import seaborn as sns import torch import torchvision.models as models import matplotlib.pyplot as plt from pypapi import events, papi_high as high from ptflops import get_model_complexity_info # ### Paths project_dir = '../' data_dir = '{}data/'.format(project_dir) results_dir = '{}results/'.format(project_dir) # ## Measure FLOPs # ### Pre-trained example # + init_features = 32 input_size = 256 #i.e. 256x256 n_channels = 3 model = torch.hub.load('mateuszbuda/brain-segmentation-pytorch', 'unet', in_channels=n_channels, out_channels=1, init_features=init_features, pretrained=True) macs, params = get_model_complexity_info(model, (n_channels, input_size, input_size), as_strings=True, print_per_layer_stat=False, verbose=False) print('{:<30} {:<8}'.format('Computational complexity: ', macs)) print('{:<30} {:<8}'.format('Number of parameters: ', params)) # + resnet18 = models.resnet18(pretrained=True) alexnet = models.alexnet(pretrained=True) squeezenet = models.squeezenet1_0(pretrained=True) inception_v3 = models.inception_v3(pretrained=True) googlenet = models.googlenet(pretrained=True) mobilenet = models.mobilenet_v2(pretrained=True) # fcn_resnet50 = models.segmentation.fcn_resnet50(pretrained=True) fcn_resnet101 = models.segmentation.fcn_resnet101(pretrained=True) vgg19 = models.vgg19(pretrained=True) unet = torch.hub.load('mateuszbuda/brain-segmentation-pytorch', 'unet', in_channels=3, out_channels=1, init_features=64, pretrained=False) # + model_dict = { 'mobilenet':mobilenet, 'squeezenet':squeezenet, 'alexnet':alexnet, 'googlenet':googlenet, 'resnet18':resnet18, 'fcn_resnet101':fcn_resnet101, 'vgg19':vgg19, 'UNet_tumor':unet} input_size_list = [64, 256] # + tags=[] init_features = 'n/a' n_channels = 3 FLOP_df = pd.DataFrame(columns=['model','input_size','init_features','n_channels','FLOPs','FLOP_unit','n_parameters', 'param_unit']) i = 0 for name, model in model_dict.items(): for input_size in input_size_list: macs, params = get_model_complexity_info(model, (n_channels, input_size, input_size), as_strings=True, print_per_layer_stat=False, verbose=False) if name == 'unet_small': init_features = 32 else: init_features = 128 FLOP_df.loc[i] = [name,input_size,init_features,n_channels] + macs.split(' ') + params.split(' ') i = i + 1 FLOP_df.head() # - # ## Add FastSurfer model stats # + # input 256: flops: 30913921024.0 params: 1799206 # input 64: flops: 1932120064.0 params: 1799206 # multiplying by 3 since the model is run three times (axial, coronal, sagittal) FLOP_df.loc[i] = ['FastSurferCNN', 256, init_features, 3] + [3*30913921024.0/1e9, 'GMac', 3*1799206.0/1e6, 'M'] FLOP_df.loc[i+1] = ['FastSurferCNN', 64, init_features, 3] + [3*1932120064.0/1e9, 'GMac', 3*1799206.0/1e6, 'M'] # + plot_df = FLOP_df.copy() plot_df[['FLOPs','n_parameters']] = plot_df[['FLOPs','n_parameters']].astype(float) plot_df['FLOPs'] = 1e9*plot_df['FLOPs'] plot_df.loc[plot_df['param_unit']=='k','n_parameters'] = plot_df[plot_df['param_unit']=='k']['n_parameters'].values*1e3 plot_df.loc[plot_df['param_unit']=='M','n_parameters'] = plot_df[plot_df['param_unit']=='M']['n_parameters'].values*1e6 sns.set(font_scale = 1.5) with sns.axes_style("whitegrid"): g = sns.catplot(y='model',x='FLOPs', hue='input_size', kind='bar', data=plot_df,palette='Set2',aspect=3) g.set(xscale="log") # - # ### plot of FLOPs # + plot_df = FLOP_df.copy() plot_df['FLOPs'] = 1e9*plot_df['FLOPs'].astype(np.float) plot_df['n_parameters'] = 1e6*plot_df['n_parameters'].astype(np.float) plot_df['domain'] = 'computer-vision' plot_df.loc[plot_df['model'].isin(['FastSurferCNN','UNet_tumor']), 'domain'] = 'neuroimaging' sns.set(font_scale = 6) palette = ['chocolate','firebrick'] # sns.color_palette("husl", 2) #sns.color_palette("husl", 2) with sns.axes_style("whitegrid"): fig, ax1 = plt.subplots(figsize=(40,20),sharex=True,sharey=True) g = sns.scatterplot(x='n_parameters',y='FLOPs', hue='input_size', size='input_size', sizes=[1000,2000], data=plot_df, palette=palette,ax=ax1) g.set(xscale="log",yscale='log') g.grid(True,which="both",ls="--",c='lightgray') # plt.ylabel('compute cost') #'FLOP count -- energy (μJ) plt.title('Model Sizes and FLOPs for Various DeepNets') g.set(xlim=(1e6, 2e8)) # edit Legend for clarity new_labels = ['64x64','256x256'] ax1.legend(loc='upper left') leg = ax1.get_legend() leg.set_title('input size') for t, l in zip(leg.texts, new_labels): t.set_text(l) # add model names as bubble labels def label_point(x, y, val, ax, x_shift=200000, y_shift=200000): a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1) for i, point in a.iterrows(): ax.text(point['x']+x_shift, point['y']+y_shift, str(point['val']), fontsize=40) label_point(plot_df['n_parameters'], plot_df['FLOPs'], plot_df['model'], plt.gca()) # - # ## Unet architecture variations (with no-pretrained models) # - input_size # - init_features # #### FLOPs # + model_name = 'unet' input_size_list = [256] ## vairable input size does not impact model size in fully-conv-nets init_features_list = [8, 16, 32, 64] n_channels_list = [3] FLOP_df = pd.DataFrame(columns=['model','input_size','init_features','n_channels','FLOPs','FLOP_unit','n_parameters', 'param_unit']) i = 0 for input_size in input_size_list: for init_features in init_features_list: for n_channels in n_channels_list: model = torch.hub.load('mateuszbuda/brain-segmentation-pytorch', 'unet', in_channels=n_channels, out_channels=1, init_features=init_features, pretrained=False) macs, params = get_model_complexity_info(model, (n_channels, input_size, input_size), as_strings=True, print_per_layer_stat=False, verbose=False) FLOP_df.loc[i] = [model_name,input_size,init_features,n_channels] + macs.split(' ') + params.split(' ') i = i + 1 FLOP_df.head()
lit_review/notebooks/deepnet_archs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Unit 3.5.2 - Capstone Submission - Collect Data, Part I: Neuroscience data # ## <NAME>, 12/21/2021 # ### Dataset 1: See 'Applications of Graph Theory and ML in Neuroscience' (https://docs.google.com/document/d/1MsBeSa2sujr_A1eT3QPI7kjxqXY9FjGO2xeVg_gJLJY/edit?usp=sharing) # #### I downloaded the first portion of the LMU_2 dataset (http://fcon_1000.projects.nitrc.org/indi/CoRR/html/lmu_2.html), namely LMU_2_0025362_0025389. # #### This contains fMRI data for subjects 0025362 through 0025389 and is gzipped tarball approximately 2.6 GB in size. # #### I unzipped and untarred the data for a single subject, 0026362 via the 'tar -xvf' and 'tar -xf' Unix commands. This produced a directory structure containing five .nii files with a total size of about 170 MB. (See https://radiopaedia.org/articles/nifti-file-format?lang=us for more information about the NIfTI file format used in Neuroimaging.) # #### I installed Git LFS on my laptop and followed the instructions at https://git-lfs.github.com/ to track and commit the 170 MB of .nii files to a LFS server. # ## How to access a .nii file: (see https://nipy.org/nibabel/installation.html, https://nipy.org/nibabel/gettingstarted.html, https://nipy.org/nibabel/api.html#api, and https://nipy.org/nibabel/manual.html) #conda install -c conda-forge nibabel import nibabel as nib import os import numpy as np from nibabel.testing import data_path # #### First, load a sample .nii file included in the nibabel package example_filename = os.path.join(data_path, 'example4d.nii.gz') img = nib.load(example_filename) img.shape img.get_data_dtype() == np.dtype(np.int16) img.affine img.affine.shape header = img.header print(header) print(header.get_data_shape()) print(header.get_data_dtype()) print(header.get_zooms()) # #### Now, load a .nii file from the LMU_2 dataset base_path = os.getcwd() file_path = r'0025362\session_1\anat_1\anat-nii\anat.nii' full_file_path = os.path.join(base_path, file_path) print(full_file_path) img = nib.load(full_file_path) img.shape img.affine img.affine.shape header = img.header print(header) print(header.get_data_shape()) print(header.get_data_dtype()) print(header.get_zooms())
Data-collection/Neuroscience-data/Unit-3.5.2-Capstone-Submission-Collect-Data-Part-I.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd #extracting lines for simplied verion open('2-fft-normal-n-0-12.txt','w').writelines([ line for line in open("2-fft-normal-n-0-12.log") if "Enqueue" in line]) print ("done") #extracting content from lines csv_out = open('2-fft-normal-n-0-12-csv.txt','w') with open ('2-fft-normal-n-0-12.txt', 'rt') as fft: csv_out.write("time,router,outport,inport,packet_address,packet_type,flit_id,flit_type,vnet,vc,src_ni,src_router,dst_ni,dst_router,enq_time\n") for line in fft: line_split = line.split() time = line_split[line_split.index("time:") + 1] router = line_split[line_split.index("SwitchAllocator") + 3] outport = line_split[line_split.index("outport") + 1] inport = line_split[line_split.index("inport") + 1] packet_address = line_split[line_split.index("addr") + 2][1:-1] packet_type = line_split[line_split.index("addr") + 7] flit_id = line_split[line_split.index("[flit::") + 1][3:] flit_type = line_split[line_split.index("Id="+str(flit_id)) + 1][5:] vnet = line_split[line_split.index("Type="+str(flit_type)) + 1][5:] vc = line_split[line_split.index("Vnet="+str(vnet)) + 1][3:] src_ni = line_split[line_split.index("VC="+str(vc)) + 2][3:] src_router = line_split[line_split.index("NI="+str(src_ni)) + 2][7:] dst_ni = line_split[line_split.index("Router="+str(src_router)) + 2][3:] dst_router = line_split[line_split.index("NI="+str(dst_ni)) + 2][7:] enq_time = str(line_split[line_split.index("Enqueue") + 1][5:]) line_csv = time+","+router+","+outport+","+inport+","+packet_address+","+packet_type+","+flit_id+","+flit_type+","+vnet+","+vc+","+src_ni+","+src_router+","+dst_ni+","+dst_router+","+enq_time+"\n" csv_out.write(line_csv) print ("done") # + #convert txt to csv df = pd.read_csv("2-fft-normal-n-0-12-csv.txt",delimiter=',') df.to_csv('2-fft-normal-n-0-12.csv',index=False)
[03 - Results]/dos results ver 1/router fetch/dataset fetch/2-fft-normal-n-0-12-fetch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pickle import networkx as nx from stats_calc import * from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # + def run_analysis(dataset_name): simple_classify_f1(dataset_name) modified_classify_f1(dataset_name) classify_analysis(dataset_name) # - run_analysis('citeseer') run_analysis('pubmed') run_analysis('cora')
gcn_stats_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import os import yaml import time import pickle import warnings import scipy import scipy.sparse.linalg from collections import defaultdict from numpy import matrix, asmatrix from scipy.sparse.sputils import asmatrix import random import numpy as np import fastmat as fm # need 0.2a3 or later? import matplotlib.gridspec as gridspec warnings.filterwarnings("ignore") from pylab import rcParams from matplotlib import container from matplotlib import pyplot as plt from IPython.core.display import HTML import os, sys module_path = os.path.abspath(os.path.join('../../')) if module_path not in sys.path: sys.path.append(module_path) os.environ['PRJ'] = "/Users/ymohit/skigp/" #from src.nmpy.solvers import cg random.seed(1337) np.random.seed(1337) import os import decimal from collections import defaultdict # create a new context for this task ctx = decimal.Context() # 20 digits should be enough for everyone :D ctx.prec = 4 def float_to_str(f): """ Convert the given float to a string, without resorting to scientific notation """ d1 = ctx.create_decimal(repr(f)) return format(d1, 'f') # + # Importing required packages from fkigp.configs import GridSizeFunc from experiments.plotting import plot_attribute_gs as plot_attribute from experiments.plotting import get_fmt, M_rep, attributes # - class PrecipitationDataDump(object): def __init__(self, fname): self.fname = fname self.data = None assert os.path.exists(fname), fname + " does not exists!" self.extract_values(fname) def extract_values(self, fname): assert os.path.exists(fname), fname self.data = yaml.load(open(fname)) def get_att(self, att_name='#iters'): attributes = ['#iters', "time/iter", 'total', "time-preprocess", 'inf_time', 'grid_size'] if att_name == attributes[0]: return self.data['num_iters'] elif att_name == attributes[1]: return self.data['inf_time'] / self.data['num_iters'] elif att_name == attributes[2]: return self.data['inf_time'] + self.data['pre_time'] elif att_name == attributes[3]: return self.data['pre_time'] elif att_name == attributes[4]: return self.data['inf_time'] elif att_name == attributes[5]: return self.data['grid_size'] else: raise NotImplementedError # + def read_dumps(class_, sweep_id = 'pwe8kptd'): log_dir_path = os.environ['PRJ'] + 'logs/precipitation/' + sweep_id assert os.path.exists(log_dir_path) == True runs = [log_dir_path + '/' + fname + '/results.yaml' for fname in os.listdir(log_dir_path) if fname.startswith('rid')] seeds = [1, 23, 67, 971, 23427, 431241, 2423717, 9871] dumps = {} for run in runs: dump = class_(run) data = dump.data run_index = seeds.index(data['seed']) dumps[(data['grid_size'], run_index, methods[data['method']-1])] = dump return dumps methods = ['kissgp', 'gsgp'] dumps = read_dumps(PrecipitationDataDump, sweep_id = 'pwe8kptd') # - plot_attribute(dumps,attribute='inf_time', x_logscale=True, y_logscale=True, show_legends=True, set_zero_min_y_limit=True, x_label='m', y_label = 'Inference Time (in secs)', set_y_limit=-50) plot_attribute(dumps,attribute='time-preprocess', x_logscale=True, y_logscale=True, set_zero_min_y_limit=True, x_label='m', y_label = 'Processing Time (in secs)', set_y_limit=-50, show_legends=True) # + # Load dumps corresponding to llk # llk_dumps = read_dumps(PrecipitationDataDump, sweep_id = 'llk_sweepid') # plot_attribute(dumps,attribute='inf_time', x_logscale=True, y_logscale=True, print_values=True, # x_label='m', y_label = 'Log-det Time (in secs)', # set_y_limit=-50, show_legends=True)
ipynb/precipitation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Dependencies import json import requests import pandas as pd from config import api_key from config import census_api_key # + url = f"https://api.census.gov/data/timeseries/asm/state?get=NAICS_TTL,EMP,GEO_TTL&for=state:*&YEAR=2016&NAICS=32*&key={census_api_key}" response = requests.get(url) # - manuf_info = response.json() manufacturing_df = pd.DataFrame(manuf_info) manufacturing_df del manufacturing_df[5] manufacturing_df manufact_edited = manufacturing_df.rename(columns={0:"Job Category", 1:"People Employed", 2:"State", 3:"Year", 4:"NAICS Code"}) manufact_edited.head() # Mounir added .reset_index(drop=True) to reset the index. This doesn't happen automatically and when you do things like groupby it can keep adding to it. Screwing it up. manufact_edited = manufact_edited.drop(0).reset_index(drop=True) manufact_edited.head() manufact_edited.dtypes manufact_edited["People Employed"]= pd.to_numeric(manufact_edited["People Employed"]) manufact_edited.count() manufac_by_state = manufact_edited.sort_values(by="State").reset_index(drop=True) manufac_by_state.head(25) manufac_by_state["People Employed"].sum() list(set(manufac_by_state('Job Category')) manufac_by_industry = manufact_edited.sort_values(by="Job Category").reset_index(drop=True) manufac_by_industry.head() total_manufac_worker = manufac_by_industry.groupby(["State"]) workers_per_state = total_manufac_worker["People Employed"].sum() Employed_per_State = pd.DataFrame({"Workers per State":workers_per_state}) Employed_per_State total_manufac_category = manufac_by_industry.groupby(["Job Category"]) workers_per_category = total_manufac_category["People Employed"].sum() People_per_manu_cat = pd.DataFrame({"People per Category":workers_per_category}) People_per_manu_cat # + #Exporting manufac_by_state.to_csv("CSV Files/Manufacturing Jobs by State", index=False, header=True) manufac_by_industry.to_csv("CSV Files/Manufacturing Jobs by Industry", index=False, header=True) Employed_per_State.to_csv("CSV Files/Total Jobs per State", header=True) People_per_manu_cat.to_csv("CSV Files/Total Worker per Category", header=True) # -
Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np import math from matplotlib import font_manager from matplotlib import rcParams rcParams["font.family"] = "IPAexGothic" # - font_manager.FontProperties().get_name() theta = np.linspace(0, 100, 10000) a, b = 0.1, 0.2 def x_func(a, b, theta): return a*math.pow(math.e, b*theta)*math.cos(theta) def y_func(a, b, theta): return a*math.pow(math.e, b*theta)*math.sin(theta) # + x = [] for i in range(len(theta)): v = x_func(a, b, theta[i]) x.append(v) y = [] for i in range(len(theta)): v = y_func(a, b, theta[i]) y.append(v) # + plt.plot(x, y, color = "cornflowerblue", label = "ラベル:対数螺旋") plt.legend(loc = "upper left") plt.title("タイトル:対数螺旋") plt.xlabel("X軸") plt.ylabel("Y軸") plt.grid(True) plt.show() # -
notebooks/jpn_font_with_matplotlib.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mask-RCNN: mimic training on Coco Dataset # + """ written by wozhouh """ # Import Python Packages import os import imgaug # Root directory of the project ROOT_DIR = os.path.abspath("../../../") import sys # Import Mask RCNN sys.path.append(ROOT_DIR) from mrcnn import model as modellib from optimization import mimicking # Import COCO config sys.path.append(os.path.join(ROOT_DIR, "samples", "coco")) # To find local version import coco # assign the GPUs for training os.environ['CUDA_VISIBLE_DEVICES'] = '5,6,7' # + # Default setting HOME_DIR = os.getenv('HOME') DEFAULT_WEIGHTS_DIR = os.path.join(ROOT_DIR, "weights") DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs") DEFAULT_DATASET_YEAR = "2017" DEFAULT_COCO_PATH = os.path.join(HOME_DIR, "data", "Coco") # weights to load MODEL_PATH_UNDER_HOME = os.path.join(DEFAULT_WEIGHTS_DIR, "ResNet-101", "mask_rcnn_coco.h5") INIT_MODEL_PATH = os.path.join(HOME_DIR, MODEL_PATH_UNDER_HOME) LOG_DIR = os.path.join(DEFAULT_LOGS_DIR, "Mimicking") # + # Mimicking config class MimicConfig(coco.CocoConfig): NAME = "Mimicking" # GPU # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 2 IMAGES_PER_GPU = 2 # data IMAGE_MIN_DIM = 800 IMAGE_MAX_DIM = 1024 RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512) # model TEACHER_BACKBONE = "resnet101" STUDENT_BACKBONE = "resnet50" BACKBONE_STRIDES = [4, 8, 16, 32, 64] # heads TOP_DOWN_PYRAMID_SIZE = 256 RPN_TRAIN_ANCHORS_PER_IMAGE = 256 # training LEARNING_RATE = 0.001 WEIGHT_DECAY = 0.0001 LOSS_WEIGHTS = {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'rpn_mimic_loss': 0.1} STEPS_PER_EPOCH = 1000 VALIDATION_STEPS = 50 config = MimicConfig() config.display() # - model = mimicking.MimicMaskRCNN(mode="training", model_dir=LOG_DIR, config=config) # print the model summary model.keras_model.summary() # Load the weights model.load_weights(INIT_MODEL_PATH, by_name=True) # + # Load the dataset train_dataset = coco.CocoDataset() train_dataset.load_coco(DEFAULT_COCO_PATH, "train", year=DEFAULT_DATASET_YEAR) train_dataset.prepare() val_dataset = coco.CocoDataset() val_dataset.load_coco(DEFAULT_COCO_PATH, "val", year=DEFAULT_DATASET_YEAR) val_dataset.prepare() # Image Augmentation # Right/Left flip 50% of the time augmentation = imgaug.augmenters.Fliplr(0.5) # - model.train(train_dataset=train_dataset, val_dataset=val_dataset, learning_rate=config.LEARNING_RATE, epochs=1, layers='all', augmentation=augmentation)
notebooks/trainval/Mimicking/train_on_coco.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install bokeh # + import numpy as np import matplotlib.pyplot as plt from matplotlib.backends.backend_agg import FigureCanvas from scipy.spatial import distance import random import pickle from scipy import stats import bokeh # allow us to re-use the framework from the src directory import sys, os sys.path.append(os.path.abspath(os.path.join('../'))) import max_ent.examples.grid_9_by_9 as G from max_ent.utility.support import * from max_ent.gridworld import Directions import pandas as pd from bokeh.io import output_file, show from bokeh.models import (BasicTicker, ColorBar, ColumnDataSource, LinearColorMapper, PrintfTickFormatter) from bokeh.plotting import figure from bokeh.sampledata.unemployment1948 import data from bokeh.transform import transform random.seed(123) np.random.seed(123) # - def compute_mean(target = None): df = pd.DataFrame() if target: for i in range(0, len(target.time_stat)): #print(f"Stat: {mca.trajectory_stat[i]}") mask_1 = np.array(target.trajectory_stat[i]) == 1 mask_2 = np.array(target.trajectory_stat[i]) == 0 #Creaty np array from time array selected = np.array(target.time_stat[i]) #Select actions in trajectory based on which system computed them selected_1= selected[mask_1] selected_2= selected[mask_2] #Compute total time per solver time_s1 = np.sum(selected_1) time_s2 = np.sum(selected_2) #Creaty np array from trajectory array selected = np.array(target.trajectory_stat[i]) #Select builder in trajectory based on which system computed them selected_1= selected[mask_1] selected_2= selected[mask_2] #Compute total time per solver usage_s1 = np.sum(selected_1) usage_s2 = len(target.trajectory_stat[i]) - np.sum(selected_1) #Creaty np array from trajectory array selected = np.array(target.action_reward[i]) #Select builder in trajectory based on which system computed them selected_1= selected[mask_1] selected_2= selected[mask_2] #Compute total time per solver reward_s1 = np.sum(selected_1) reward_s2 = np.sum(selected_2) selected = np.array(target.thresholds_stat[i]) thresholds_stat_s1 = np.mean(selected[mask_1], axis=0) selected = np.array(target.thresholds_stat[i]) thresholds_stat_s2 = np.mean(selected[mask_2], axis=0) dict_mca = {} dict_mca['traj_n'] = i dict_mca['length'] = len(target.trajectory_stat[i]) dict_mca['reward'] = np.sum(target.action_reward[i]) dict_mca['time'] = np.sum(target.time_stat[i]) dict_mca['sub_type'] = "s1" dict_mca['time_agent'] = time_s1 dict_mca['avg_time'] = time_s1 / usage_s1 dict_mca['reward_agent'] = reward_s1 dict_mca['avg_reward'] = reward_s1 / usage_s1 dict_mca['usage']= usage_s1 dict_mca['perc_usage']= usage_s1 / len(target.trajectory_stat[i]) dict_mca['confidence'] = thresholds_stat_s1[2] temp_df = pd.DataFrame(data=dict_mca, index=[i]) df = pd.concat([df, temp_df]) dict_mca = {} dict_mca['traj_n'] = i dict_mca['length'] = len(target.trajectory_stat[i]) dict_mca['reward'] = np.sum(target.action_reward[i]) dict_mca['time'] = np.sum(target.time_stat[i]) dict_mca['sub_type'] = "s2" dict_mca['time_agent'] = time_s2 dict_mca['avg_time'] = time_s2 / usage_s2 dict_mca['reward_agent'] = reward_s2 dict_mca['avg_reward'] = reward_s2 / usage_s2 dict_mca['usage']= usage_s2 dict_mca['perc_usage']= usage_s2 / len(target.trajectory_stat[i]) dict_mca['confidence'] = thresholds_stat_s2[2] temp_df = pd.DataFrame(data=dict_mca, index=[i]) df = pd.concat([df, temp_df]) else: dict_mca = {} dict_mca['traj_n'] = 0 dict_mca['length'] = 0 dict_mca['reward'] = 0 dict_mca['time'] = 0 dict_mca['sub_type'] = "null" dict_mca['time_agent'] = 0 dict_mca['avg_time'] = 0 dict_mca['reward_agent'] = 0 dict_mca['avg_reward'] = 0 dict_mca['usage']= 0 temp_df = pd.DataFrame(data=dict_mca, index=[0]) df = pd.concat([df, temp_df]) #print(dict_mca) return df # + n_trajectories = 500 threshold1 = 200 threshold2 = 0.5 threshold3 = 0.5 threshold4 = 0 #this one varies in [-1,1] threshold5 = 0 #means default w=[1,0] threshold7 = 0.9 # - ## Set the grid blue = [21, 9, 59, 1, 0, 20] green = [42, 18, 76, 41, 23, 30] cs = [63, 74, 13, 39, 48, 38] ca = [Directions.DOWN_LEFT, Directions.UP_LEFT] start =7 goal = 65 #blue, green, cs, ca, start, goal = generate_constraints(9) n, n_cfg, demo_n, f1 = create_world('Nominal', blue, green, start=start, goal=goal) cc = [1, 2] c, c_cfg, demo_c, f2 = create_world('Constrained', blue, green, cs, ca, cc, start=start, goal=goal, check=True) # + #demo_c.policy # + from max_ent.algorithms.gridworld_icrl import Demonstration demo_test = Demonstration([], demo_c.policy) import pickle as pkl import numpy as np fileName = 'qvalues' fileObject = open(fileName, 'wb') pkl.dump(demo_c.policy, fileObject) fileObject.close() f1 = G.plot_world(f'MCA', c, c_cfg.state_penalties, c_cfg.action_penalties, c_cfg.color_penalties, demo_test, c_cfg.blue, c_cfg.green, vmin=-50, vmax=10) # + from mc.mca import * from max_ent.algorithms.gridworld_icrl import Demonstration mca = MCA(n=n, c=c, demo=None, threshold1 = threshold1, threshold3 = threshold3, threshold4 = threshold4, threshold5 = 1) demo_mca = mca.generate_trajectories(n_trajectories) #mca = MCA(s1,s2,modelSelf, threshold1=threshold1, threshold2=threshold2, threshold3 = threshold3, threshold5 = threshold5, threshold7 = threshold7) #print(c) f1 = G.plot_world(f'MCA', c, c_cfg.state_penalties, c_cfg.action_penalties, c_cfg.color_penalties, demo_mca, c_cfg.blue, c_cfg.green, vmin=-50, vmax=10) # + print(mca.__dict__.keys()) mca.__dict__['action_reward'] average_stat = compute_mean(mca) np.mean(mca.thresholds_stat[0], axis=0)[2] mca.thresholds_mask # + average_stat#.loc[average_stat['sub_type']=='s1'].length[0] average_stat.loc[average_stat['sub_type']=='s2'].reward_agent[0] average_stat.loc[average_stat['sub_type']=='s1'].reward_agent[0] average_stat.reward mask_1 = np.array(mca.trajectory_stat[11]) == 1 selected = np.array(mca.thresholds_stat[11]) np.mean(mca.thresholds_stat[11], axis=0) average_stat # + #np.mean(selected[mask_1], axis=0) # - # mca.thresholds_stat[11][mask_1] # + from bokeh.plotting import figure, show x_traj = [x for x in range(n_trajectories)] y_action = mca.__dict__['action_reward'] y_time = mca.__dict__['time_stat'] y_mean_action = [np.mean(m) for m in y_action] y_mean_time = [np.mean(m) for m in y_time] #print(y_time) # + max_x = np.max([len(t) for t in y_action]) min_y_action = np.min([min(t) for t in y_action]) - 5 max_y_action = np.max([max(t) for t in y_action]) + 5 min_y_time = np.min([min(t) for t in y_time]) - 5 max_y_time = np.max([max(t) for t in y_time]) + 5 min_y_reward = np.min(average_stat.reward) # - max_y_action df_confidence = dict(y = [np.mean(np.nonzero(mca.thresholds_stat[xi]), axis=0)[2] for xi in range(n_trajectories)]) df_confidence # + index_stat=150 data = pd.DataFrame() for traj in range(len(mca.thresholds_mask)): for i in range(len(mca.thresholds_mask[traj])): for t in range(7): temp = {} temp['transition'] = i temp['traj'] = traj if t==0: temp['threshold'] = str("#Traj") elif t==1: temp['threshold'] = str("Reward") elif t==2: temp['threshold'] = str("Conf.") elif t==4: temp['threshold'] = str("Use S2") elif t==5: temp['threshold'] = str("#Traj S2") elif t==6: temp['threshold'] = str("Chance") else: temp['threshold'] = str("t"+str(t+1)) temp['value'] = mca.thresholds_mask[traj][i][t] #print( pd.DataFrame(temp, index=[0])) data = pd.concat([data, pd.DataFrame(temp, index=[0])], ignore_index=True) data.traj = data.traj.astype(str) data.transition = data.transition.astype(str) data = data.set_index('traj') # - data.iloc[data.index=='1'] # + print(bokeh.__version__) from bokeh.models import Div, CustomJS, Slider, RadioGroup from bokeh.layouts import layout, gridplot, row from bokeh.plotting import ColumnDataSource, output_file df_action = dict(x = [xi for xi in range(max_x)], y=y_action[0]) df_time = dict(x = [xi for xi in range(max_x)], y=y_time[0]) #df_confidence = dict(x = [np.mean(mca.thresholds_stat[xi], axis=0)[2] for xi in range(n_trajectories)], y=y_time[0]) df_length = dict(x=[0], top=[average_stat.loc[average_stat['sub_type']=='s2'].length[0]]) systems_type = ["S1", "S2"] colors_bar = ["blue", "orange"] df_reward = dict(x=[0], S2=[average_stat.loc[average_stat['sub_type']=='s2'].reward_agent[0]], S1=[average_stat.loc[average_stat['sub_type']=='s1'].reward_agent[0]]) df_time_agent = dict(x=[0], S2=[average_stat.loc[average_stat['sub_type']=='s2'].time_agent[0]], S1=[average_stat.loc[average_stat['sub_type']=='s1'].time_agent[0]]) source = ColumnDataSource(data=df_action) source_time = ColumnDataSource(data=df_time) source_length = ColumnDataSource(data=df_length) source_reward = ColumnDataSource(data=df_reward) source_time_agent = ColumnDataSource(data=df_time_agent) avg_plot = figure(title="Avg. action reward per trajectory", x_range=(0,n_trajectories), width=400, height=250) avg_plot.circle(x_traj, average_stat.loc[average_stat['sub_type']=='s1'].avg_reward, color='orange', fill_alpha=0.6, line_width=2, line_alpha=0.6, legend_label="S1") avg_plot.circle(x_traj, average_stat.loc[average_stat['sub_type']=='s2'].avg_reward, color='blue', fill_alpha=0.6, line_width=2, line_alpha=0.6, legend_label="S2") avg_plot_time = figure(title="Avg. action time per trajectory",x_range=(0,n_trajectories), width=400, height=250) avg_plot_time.circle(x_traj, average_stat.loc[average_stat['sub_type']=='s1'].avg_time, color='orange', fill_alpha=0.6, line_width=2, line_alpha=0.6, legend_label="S1") avg_plot_time.circle(x_traj, average_stat.loc[average_stat['sub_type']=='s2'].avg_time, color='blue', fill_alpha=0.6, line_width=2, line_alpha=0.6, legend_label="S2") avg_plot_usage = figure(title="Frac. use times per trajectory",x_range=(0,n_trajectories), width=400, height=250) avg_plot_usage.circle(x_traj, average_stat.loc[average_stat['sub_type']=='s1'].perc_usage, color='orange', fill_alpha=0.6, line_width=2, line_alpha=0.6, legend_label="S1") avg_plot_usage.circle(x_traj, average_stat.loc[average_stat['sub_type']=='s2'].perc_usage, color='blue', fill_alpha=0.6, line_width=2, line_alpha=0.6, legend_label="S2") avg_plot_confidence = figure(title="Avg confidence per trajectory",x_range=(0,n_trajectories), width=400, height=250) avg_plot_confidence.circle(x_traj, df_confidence['y'], color='orange', fill_alpha=0.6, line_width=2, line_alpha=0.6, legend_label="S1") #avg_plot_confidence.circle(x_traj, average_stat.loc[average_stat['sub_type']=='s2'].perc_usage, color='blue', fill_alpha=0.6, line_width=2, line_alpha=0.6, legend_label="S2") ########################################## df = pd.DataFrame(data.iloc[data.index=='0']) df_all = pd.DataFrame(data) #print(df) source_threshold_mask = ColumnDataSource(df) source_all = ColumnDataSource(df_all) # this is the colormap from the original NYTimes plot colors = ["#ff0000", "#00ff00"] mapper = LinearColorMapper(palette=colors, low=0, high=1) p_threshold_mask = figure(width=400, height=250, \ x_range=list(pd.unique(data.iloc[data.index=='0'].transition)), \ y_range=list(reversed(pd.unique(data.iloc[data.index=='0']['threshold'].values))), toolbar_location=None, tools="", x_axis_location="above") p_threshold_mask.rect(x="transition", y="threshold", width=1, height=1, source=source_threshold_mask, line_color="white", fill_color=transform('value', mapper)) color_bar = ColorBar(color_mapper=mapper, ticker=BasicTicker(desired_num_ticks=len(colors)), formatter=PrintfTickFormatter(format="%d%%")) #p_threshold_mask.add_layout(color_bar, 'right') p_threshold_mask.axis.axis_line_color = None p_threshold_mask.axis.major_tick_line_color = None p_threshold_mask.axis.major_label_text_font_size = "7px" p_threshold_mask.axis.major_label_standoff = 0 p_threshold_mask.xaxis.major_label_orientation = 1.0 ########################################### p = figure(x_range=(0,max_x), y_range=(min_y_action,max_y_action),width=400, height=250, title="Reward per action") p.line('x', 'y', source=source, line_width=3, line_alpha=0.6) slider = Slider(start=0, end=n_trajectories, value=0, step=1, title="Select a trajectory") callback = CustomJS(args=dict(source=source, source_time=source_time, source_length=source_length, #source_reward=source_reward, #source_time_agent=source_time_agent, source_threshold_mask=source_threshold_mask, fig_range = p_threshold_mask.x_range, new_data_thresholds_mask=mca.thresholds_mask, slider=slider, new_data=y_action, new_data_time=y_time, df_length = average_stat.loc[average_stat['sub_type']=='s2'].length, df_reward_s2 = average_stat.loc[average_stat['sub_type']=='s2'].reward_agent , df_reward_s1 = average_stat.loc[average_stat['sub_type']=='s1'].reward_agent, df_time_s2 = average_stat.loc[average_stat['sub_type']=='s2'].time_agent , df_time_s1 = average_stat.loc[average_stat['sub_type']=='s1'].time_agent), code=""" const data = source.data; const data_time = source_time.data; const data_length = source_length.data; //const data_reward = source_reward.data; //const data_time_agent = source_time_agent.data; const x = data['x'] const y = data['y'] const y_time = data_time['y'] const y_length = data_length['top'] //const y_reward_s1 = data_reward['S1'] //const y_reward_s2 = data_reward['S2'] //const y_time_s1 = data_time_agent['S1'] //const y_time_s2 = data_time_agent['S2'] for (let i = 0; i < x.length; i++) { y[i] = new_data[slider.value][i] y_time[i] = new_data_time[slider.value][i] } y_length[0] = df_length[slider.value] console.log('data_reward: ' + df_reward_s1[slider.value]) //y_reward_s1[0] = df_reward_s1[slider.value] //y_reward_s2[0] = df_reward_s2[slider.value] //y_time_s1[0] = df_time_s1[slider.value] //y_time_s2[0] = df_time_s2[slider.value] const data_thresholds_mask = source_threshold_mask.data; var x_thresholds_mask = data_thresholds_mask['transition'] var y_thresholds_mask = data_thresholds_mask['threshold'] var value_rect = data_thresholds_mask['value'] console.log('old_value: ' + value_rect) console.log('x: ' + x_thresholds_mask) console.log('y: ' + y_thresholds_mask) fig_range.start = 0 fig_range.end = new_data_thresholds_mask[slider.value].length var arr = {0:'#Traj', 1:'Reward', 2:'Conf.', 3:'t3', 4:'Use S2', 5:'#Traj S2', 6:'Chance'} for (let i = 0; i < new_data_thresholds_mask[slider.value].length; i++) { //data['value'][i]=new_data_thresholds_mask[slider.value][i][j] for (let j = 0; j < new_data_thresholds_mask[slider.value][i].length; j++) { data_thresholds_mask['value'][i*7 + j]=new_data_thresholds_mask[slider.value][i][j] //data_thresholds_mask['x'][i*7 + j]= i //data_thresholds_mask['y'][i*7 + j]= arr[j] console.log('new data i,3: ' + new_data_thresholds_mask[slider.value][i][j]) } } source_threshold_mask.change.emit() source.change.emit(); source_time.change.emit(); source_length.change.emit(); //source_reward.change.emit(); //source_time_agent.change.emit(); """) slider.js_on_change('value', callback) p_time = figure(x_range=(0,max_x), y_range=(min_y_time,max_y_time),width=400, height=250, title="Time per action") p_time.line('x', 'y', source=source_time, line_width=3, line_alpha=0.6) '''tot_reward = figure(height=250, width=150, title="Total Reward", toolbar_location=None, tools="", y_range=(min_y_reward,0)) tot_reward.vbar_stack(systems_type, width=0.9, legend_label=systems_type, color=colors_bar, source=source_reward) tot_reward.xgrid.grid_line_color = None tot_time = figure(height=250, width=150, title="Total Time", toolbar_location=None, tools="", y_range=(0,max_y_time)) tot_time.vbar_stack(systems_type, width=0.9, legend_label=systems_type, color=colors_bar, source=source_time_agent) tot_time.xgrid.grid_line_color = None tot_length = figure(height=250, width=150, title="Total Length", toolbar_location=None, tools="", y_range=(0,max_x)) tot_length.vbar(source=source_length, width=0.9) tot_length.xgrid.grid_line_color = None tot_length.y_range.start = 0''' #l = layout([[p, [p_time, tot_length]], slider, [avg_plot, avg_plot_time]]) #l = layout([[p, p_time], slider, [avg_plot, avg_plot_time]]) #row1 = row(p, tot_reward, p_time, tot_time, tot_length) row1 = row(p, p_time, p_threshold_mask) row3 = row(avg_plot, avg_plot_time, avg_plot_confidence) l2 = layout([row1, slider, row3 ]) output_file("results.html") show(l2) #show(p) # + #p.renderers[0].data_source.data # + from bokeh.models import Div, CustomJS, Slider, RadioGroup from bokeh.layouts import layout, gridplot, row from bokeh.plotting import ColumnDataSource, output_file df = pd.DataFrame(data.iloc[data.index=='0']) df_all = pd.DataFrame(data) #print(df) source_threshold_mask = ColumnDataSource(df) source_all = ColumnDataSource(df_all) # this is the colormap from the original NYTimes plot colors = ["#000000", "#00ff00","#aaaaaa", "#0000ff"] mapper = LinearColorMapper(palette=colors, low=0, high=1) p_threshold_mask = figure(width=400, height=250, \ x_range=list(pd.unique(data.iloc[data.index=='0'].transition)), \ y_range=list(reversed(pd.unique(data.iloc[data.index=='0']['threshold'].values))), toolbar_location=None, tools="", x_axis_location="above") p_threshold_mask.rect(x="transition", y="threshold", width=1, height=1, source=source_threshold_mask, line_color="white", fill_color=transform('value', mapper)) color_bar = ColorBar(color_mapper=mapper, ticker=BasicTicker(desired_num_ticks=len(colors)), formatter=PrintfTickFormatter(format="%d%%")) p_threshold_mask.add_layout(color_bar, 'right') p_threshold_mask.axis.axis_line_color = None p_threshold_mask.axis.major_tick_line_color = None p_threshold_mask.axis.major_label_text_font_size = "7px" p_threshold_mask.axis.major_label_standoff = 0 p_threshold_mask.xaxis.major_label_orientation = 1.0 slider = Slider(start=0, end=n_trajectories, value=0, step=1, title="Select a trajectory") callback = CustomJS(args=dict(source=source_threshold_mask, slider=slider, fig_range = p_threshold_mask.x_range, new_data=mca.thresholds_mask, ), code=""" const data = source.data; console.log('length: ' + new_data[slider.value].length) console.log('new data 0: ' + new_data[0]) var x = data['transition'] var y = data['threshold'] var value_rect = data['value'] console.log('old_value: ' + value_rect) console.log('x: ' + x) console.log('y: ' + y) fig_range.start = 0 fig_range.end = new_data[slider.value].length for (let i = 0; i < new_data[slider.value].length; i++) { //data['value'][i]=new_data[slider.value][i][j] for (let j = 0; j < new_data[slider.value][i].length; j++) { data['value'][i*7 + j]=new_data[slider.value][i][j] console.log('new data i,3: ' + new_data[slider.value][i][j]) } } source.change.emit(); """) '''def callback(attr, old, new): N = slider.value df = pd.DataFrame(data.iloc[data.index==str(N)]) source = ColumnDataSource(df)''' slider.js_on_change('value', callback) #slider.on_change('value', callback) l = layout([p_threshold_mask, slider]) output_file("results_mask.html") show(l) # - len(df_time['x'])
notebooks/.ipynb_checkpoints/new_stat_on_threshold-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # In this notebook, we track the changes in annotations between the _master_ and [v3.0.0-alpha](https://github.com/MTG/otmm_tonic_dataset/tree/v3.0.0-alpha). _v3.0.0-alpha_ is the pre-release of the dataset when all the tonic test datasets had been merged prior to annotation verification. import urllib2 import json import numpy as np from unittests.validate_annotations import test_annotations import warnings from morty.converter import Converter from morty.pitchdistribution import PitchDistribution from matplotlib import pyplot as plt # + # load master data anno_master = json.load(open('../annotations.json')) # load v3.0.0-alpha data anno_alpha_url = 'https://raw.githubusercontent.com/MTG/otmm_tonic_dataset/v3.0.0-alpha/annotations.json' response = urllib2.urlopen(anno_alpha_url) anno_alpha = json.load(response) # - # First, we run the [automatic validation tests](https://github.com/MTG/otmm_tonic_dataset/blob/master/unittests/validate_annotations.py#L11) on _master_ to find how many recordings get the current validation reports. Note that it is also done by [Travis CI](https://travis-ci.org/MTG/otmm_tonic_dataset). Here we run it for the sake of completeness. test_annotations(anno_master) # The _master_ does not have any tonic frequency mismatches in the 908 recordings tested out of 1994. By this test, validation %45 of the dataset is tested automatically. # # Next, we run the [automatic validation tests](https://github.com/MTG/otmm_tonic_dataset/blob/master/unittests/validate_annotations.py#L11) on _v3.0.0-alpha_ to find how many recordings had annotation inconsistencies before verification. # + try: test_annotations(anno_alpha) except AssertionError as err: # get the number of mismatches print err num_err = [int(s) for s in err.args[0].split() if s.isdigit()][0] inconsistent_percent = float(num_err) * 100 / (2007-1589) # taken from the penultimate warning print("The human annotators show inconsistencies in {:.1f}% of " "the {:d} tested recordings.".format(inconsistent_percent, 2007-1589)) # - # As can be seen _v3.0.0-alpha_ has inconsistencies in the annotation of 23 recordings according to the automatic tests. Nevertheless, there are some additional cases, which are not found by the automatic validation. Therefore the automatic validation result above must be interpreted as the minimum number of recordings with inconsistencies. Below the two major reasons are explained: # # - In _v3.0.0-alpha_, there were several recordings, where the tonic frequency/symbol varies throughout the recording (e.g. [Isfahan Peşrev by Mesut Cemil](http://musicbrainz.org/recording/ed189797-5c50-4fde-abfa-cb1c8a2a2571)). This variation was not annotated so the tonic information is partially true. # # Most of these recordings have been removed due to the rigor in re-annotating or the ambiguity in where the change occurs (e.g. in geçiş taksims). See [removed.json](https://github.com/MTG/otmm_tonic_dataset/blob/master/removed.json) for detailed justifications. # # - As seen from the output warning for the validation test, only 2007 - 1589 = 418 recordings (~20% of the dataset) have been validated. Therefore, the coverage of the automatic validation is not adequate in _v3.0.0-alpha_. Note that the human annotators show inconsistencies in 5.5% of the tested recordings. # # To compensate, we added the automatic tonic annotations obtained by joint analysis (see the commit [1655db0](https://github.com/MTG/otmm_tonic_dataset/commit/1655db0593c031d6db44841d168419a226e21ddc)). There were 822 automatic annotations added. 19 of these were duplicates, which were later removed in the commit [eca1c12](https://github.com/MTG/otmm_tonic_dataset/commit/eca1c122fc7535c5b3d44a95a8c697f84f51396f), resulting in 803 unique annotations. Then, the recordings with automatic annotations went through a final verification by a human annotator. After verification 789 (in 782 recordings, see the statistics printed in the 7th cell, below.) automatic annotations were found to be correct and the rest of automatic annotations are discarded (14 annotations; around 1.74% error). The accuracy of the automatic annotation method is less than but still close to the 99% accuracy reported in the original paper (Ş<NAME>., <NAME>., and Serra, X., 2013) and hence it is more reliable than humans annotators. # Below, we compare the annotations from v3.0.0 to master and see what has changed rec_stats = {} cent_dev = [] for aa_key, aa_val in anno_alpha.items(): try: # get the relevant recording entry in master am_val = anno_master[aa_key] rec_stats[aa_key] = {'num_deleted_anno': 0, 'status': 'kept', 'num_added_anno': 0, 'num_unchanged_anno': 0, 'num_modified_anno': 0, 'num_auto_anno': 0, 'verified': am_val['verified']} # note automatic annotations in master; they did not exist in v3.0.0-alpha for jj, am_anno in reversed(list(enumerate(am_val['annotations']))): if 'jointanalyzer' in am_anno['source']: rec_stats[aa_key]['num_auto_anno'] += 1 am_val['annotations'].pop(jj) # start comparison from v3.0.0 to master for ii, aa_anno in reversed(list(enumerate(aa_val['annotations']))): passed_break = False for jj, am_anno in reversed(list(enumerate(am_val['annotations']))): if aa_anno['source'] == am_anno['source']: # annotation exists # unchanged anno; allow a change less than 0.051 Hz due to # decimal point rounding if abs(aa_anno['value'] - am_anno['value']) < 0.06: rec_stats[aa_key]['num_unchanged_anno'] += 1 else: # modified anno (by a human verifier) rec_stats[aa_key]['num_modified_anno'] += 1 # find the introduced octave-wrapped deviation temp_dev = Converter.hz_to_cent( aa_anno['value'], am_anno['value']) # hz to cent conversion temp_dev = temp_dev % 1200 # octave wrap temp_dev = min(temp_dev, 1200-temp_dev) # get minimum distance cent_dev.append(temp_dev) # pop annotations am_val['annotations'].pop(jj) aa_val['annotations'].pop(ii) break # the remainders are human addition and deletions rec_stats[aa_key]['num_added_anno'] = len(am_val['annotations']) rec_stats[aa_key]['num_deleted_anno'] = len(aa_val['annotations']) except KeyError as kerr: # removed rec_stats[kerr.args[0]] = {'num_deleted_anno':len(aa_val['annotations']), 'status': 'removed', 'num_added_anno': 0, 'num_modified_anno': 0, 'num_unchanged_anno': 0, 'num_auto_anno': 0, 'verified': True} # There are a few additions to the master, let's also add them to the comparison: new_recs = set(anno_master.keys()) - set(anno_alpha.keys()) for am_key in new_recs: am_val = anno_master[am_key] rec_stats[am_key] = {'num_deleted_anno': 0, 'status': 'new', 'num_added_anno': 0, 'num_unchanged_anno': 0, 'num_modified_anno': 0, 'num_auto_anno': 0, 'verified': am_val['verified']} # note automatic annotations; they did not exist in v3.0.0-alpha for jj, am_anno in reversed(list(enumerate(am_val['annotations']))): if 'jointanalyzer' in am_anno['source']: rec_stats[am_key]['num_auto_anno'] += 1 am_val['annotations'].pop(jj) # the remainders are human additions rec_stats[am_key]['num_added_anno'] = len(am_val['annotations']) # Finally, we are reporting all the differences: # + # removed rm_recs_in_json = json.load(open('../removed.json')).keys() # TODO add statistics num_removed_rec = 0 num_new_rec = 0 num_changed_rec = 0 # num recordings with changes, incl. automatic annotations num_human_changed_rec = 0 # num recordings with human changes num_anno = 0 # total number of annotations num_verified_anno = 0 # total number of verified annotations num_human_verified_anno = 0 # totola number of annotations verified by humans num_additions = 0 # number of added annotations num_deletions = 0 # number of deleted annotations num_modifications = 0 # number of modified annotations num_unchanged = 0 # number of unchanged annotations num_auto = 0 # number of automatic annotations num_rec_add = 0 # number of recordings with additions num_rec_del = 0 # number of recordings with deletions num_rec_mod = 0 # number of recordings with modification num_rec_auto = 0 # number of recordings with automatic annotations for rk, rs in rec_stats.items(): # get the number of removed and new recordings if rs['status'] == 'removed': num_removed_rec += 1 if rk not in rm_recs_in_json: # verify they are listed in removed.json warnings.warn('%s is removed but not listed in removed.json' % rk) elif rs['status'] == 'new': num_new_rec += 1 num_anno += (rs['num_added_anno'] + rs['num_auto_anno'] + rs['num_modified_anno'] + rs['num_unchanged_anno']) # how many recordings have changed if any([rs['num_added_anno'], rs['num_auto_anno'], rs['num_deleted_anno'], rs['num_modified_anno']]): num_changed_rec += 1 num_verified_anno += (rs['num_added_anno'] + rs['num_auto_anno'] + rs['num_modified_anno'] + rs['num_unchanged_anno']) # how many recordings have changed only by humans if any([rs['num_added_anno'], rs['num_deleted_anno'], rs['num_modified_anno']]): num_human_changed_rec += 1 num_human_verified_anno += (rs['num_added_anno'] + rs['num_auto_anno'] + rs['num_modified_anno'] + rs['num_unchanged_anno']) if not rs['verified']: warnings.warn("%s has changes but verified flag is False" % rk) # how many automatic annotations in how many recordings num_auto += rs['num_auto_anno'] num_rec_auto += rs['num_auto_anno'] > 0 # how many annotation modifications/additions/deletions in how many recordings num_additions += rs['num_added_anno'] num_rec_add += rs['num_added_anno'] > 0 num_deletions += rs['num_deleted_anno'] num_rec_del += rs['num_deleted_anno'] > 0 num_modifications += rs['num_modified_anno'] num_rec_mod += rs['num_modified_anno'] > 0 # how many unchanged annotations num_unchanged += rs['num_unchanged_anno'] # distribution of human frequency modifications # print print('In master, there are %d annotations in total in %d recordings.' % (num_anno, len(anno_master))) print('Since v3.0.0-alpha, %d recordings are removed and %d new recordings are added.' % (num_removed_rec, num_new_rec)) print('%d recordings are changed (incl. automatic annotations). ' '%d of the annotations are verified in these recordings.' % (num_changed_rec, num_verified_anno)) print('%d annotations in %d recordings are changed by humans in total. ' '%d of the annotations are verified by humans in these recordings.' % (num_additions + num_deletions + num_modifications, num_human_changed_rec, num_human_verified_anno)) print('%d annotations are added to %d recordings by humans.' %(num_additions, num_rec_add)) print('%d annotations are deleted from %d recordings by humans.' %(num_deletions, num_rec_del)) print('%d annotations are modified in %d recordings by humans.' %(num_modifications, num_rec_mod)) print('%d automatic annotations are added to %d recordings.' %(num_auto, num_rec_auto)) # - # Finally, we compute the distribution of octave-wrapped cent deviation of the modified annotations. # + dev_dist = PitchDistribution.from_cent_pitch( cent_dev, step_size=10, kernel_width=0, norm_type=None) dev_dist.bar() plt.title('Distribution of absolute deviation from \naverage of annotation pitch-class') plt.xlabel('Absolute deviation (cents)') plt.xlim([min(dev_dist.bins), max(dev_dist.bins) + dev_dist.step_size/2]) plt.xticks(np.arange(0, 600, 50)) plt.show() print("%d of %d modifications were done within a 20 cent octave-wrapped window." % (sum(c < 20 for c in cent_dev), len(cent_dev))) print("%d of %d modifications were done within a 50 cent octave-wrapped window." % (sum(c < 50 for c in cent_dev), len(cent_dev))) # - # As can be seen above, almost half of the modifications were simply refinements on the annotation. This is understandable given the rigor of the task for humans. Below, we list some of the issues: # # - Due to quality, the tonic of historical recordings may be hard to annotate accurately. Moreover the playback speed and hence the tonic frequency may not be very stable in many historical recordings. Many annotators disregard these factors. It is typically better to avoid historical recordings, if the annotator is not highly trained (or disciplined). # - Heterophonic recordings could be challenging due to the complex interactions between musical sources. # - As the annotators label more, they will get better through practice. Therefore, it is possible for an annotator to do more mistakes in the start. On the other hand, annotating without pausess for a long time will increase the cognitive fatigue (and boredom), which might make an annotator make more mistakes than usual. # - The makam performances typically finish on the tonic note. Therefore, annotating the tonic pitch/pitch-class from the final note is the most straightforward way for manual annotation. Nevertheless, the musicians may rarely choose to finish in another note for artistic purposes (you can listen to [this recording of <NAME>](http://musicbrainz.org/recording/132cb194-c083-4002-9cc9-720b8cbfc61c) as an example). Unless they are highly trained on makams, many annotators will not notice this artistic decision. # # As discussed earlier, score-informed tonic identification method (<NAME>., <NAME>., and <NAME>., 2013) typically outputs more accurate results than human annotators. Moreover, it is much faster than the humans. When there are multiple performances, the method can identify the tonic for each sub-performance separately. This case will be extra tedious for humans as they have to identify the time-intervals of the sub-performances first. Therefore, using this method is much cheaper, rapid and reliable, when the music score of a composition performed in a recording is available. # Reference # ---------- # <NAME>., <NAME>., and <NAME>. (2013). Score informed tonic identification for makam music of Turkey. In Proceedings of 14th International Society for Music Information Retrieval Conference (ISMIR 2013), pages 175–180, Curitiba, Brazil.
extras/compare_annotations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import pandas as pd import numpy as np # - # ## To directly use the dataset from internet we need some extra libraries # + import urllib # for python 2 import urllib.request # for python 3 # - # #### Click here for viewing [DataSet](https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data) # + url = "https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data" # raw_data = urllib.urlopen(url) // for python 2 raw_data = urllib.request.urlopen(url) dataset = np.loadtxt(raw_data, delimiter = ',') # + # printing the first row print (dataset[0]) # - features = dataset[:, 0:48] # load 0 to 48 column among all rows # + # according to the documentation, the last column is for the label so selecting the last column as target/label labels = dataset[:, -1] # - features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size= .10, random_state = 17) mlnNB = MultinomialNB() # + mlnNB.fit(features_train, labels_train) print (mlnNB) # - pred_on_test_data = mlnNB.predict(features_test) score = accuracy_score(pred_on_test_data, labels_test) print ("Accuracry Score : ", score ,"%")
Source Codes/2nd way using internet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ezbdsmb/hand_detection/blob/main/hand_detection_nn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="v2AMEYvSA_HV" import tensorflow as tf import pathlib data_dir = tf.keras.utils.get_file( origin="http://vision.soic.indiana.edu/egohands_files/egohands_data.zip", fname="hands_photos.zip", extract=True) data_dir = pathlib.Path(data_dir).parent # + colab={"base_uri": "https://localhost:8080/", "height": 724} id="huT4qEbfBPKx" outputId="9b4b4ecc-bfea-4de3-8561-f72570d8e60a" import PIL import PIL.Image hands = list(data_dir.glob('*/*/*.jpg')) PIL.Image.open(str(hands[0])) # + colab={"base_uri": "https://localhost:8080/"} id="Hfw0tK6rJfid" outputId="2abbdc46-5849-42dd-f1ac-c78e2c983496" os.listdir(data_dir) # + id="Ldt7kCRjKq4V" import scipy.io as sio sio.loadmat(data_dir / 'metadata.mat')
hand_detection_nn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Predicting sentiment from product reviews # # # The goal of this first notebook is to explore logistic regression and feature engineering with existing Turi Create functions. # # In this notebook you will use product review data from Amazon.com to predict whether the sentiments about a product (from its reviews) are positive or negative. # # * Use SFrames to do some feature engineering # * Train a logistic regression model to predict the sentiment of product reviews. # * Inspect the weights (coefficients) of a trained logistic regression model. # * Make a prediction (both class and probability) of sentiment for a new product review. # * Given the logistic regression weights, predictors and ground truth labels, write a function to compute the **accuracy** of the model. # * Inspect the coefficients of the logistic regression model and interpret their meanings. # * Compare multiple logistic regression models. # # Let's get started! # # ## Fire up Turi Create # Make sure you have the latest version of Turi Create. from __future__ import division import turicreate import math import string # # Data preparation # # We will use a dataset consisting of baby product reviews on Amazon.com. products = turicreate.SFrame('amazon_baby.sframe/') # Now, let us see a preview of what the dataset looks like. products # ## Build the word count vector for each review # Let us explore a specific example of a baby product. # products[269] # Now, we will perform 2 simple data transformations: # # 1. Remove punctuation using [Python's built-in](https://docs.python.org/2/library/string.html) string functionality. # 2. Transform the reviews into word-counts. # # **Aside**. In this notebook, we remove all punctuations for the sake of simplicity. A smarter approach to punctuations would preserve phrases such as "I'd", "would've", "hadn't" and so forth. See [this page](https://www.cis.upenn.edu/~treebank/tokenization.html) for an example of smart handling of punctuations. # + import string def remove_punctuation(text): try: # python 2.x text = text.translate(None, string.punctuation) except: # python 3.x translator = text.maketrans('', '', string.punctuation) text = text.translate(translator) return text review_without_punctuation = products['review'].apply(remove_punctuation) products['word_count'] = turicreate.text_analytics.count_words(review_without_punctuation) # - # Now, let us explore what the sample example above looks like after these 2 transformations. Here, each entry in the **word_count** column is a dictionary where the key is the word and the value is a count of the number of times the word occurs. products[269]['word_count'] # ## Extract sentiments # # We will **ignore** all reviews with *rating = 3*, since they tend to have a neutral sentiment. products = products[products['rating'] != 3] len(products) # Now, we will assign reviews with a rating of 4 or higher to be *positive* reviews, while the ones with rating of 2 or lower are *negative*. For the sentiment column, we use +1 for the positive class label and -1 for the negative class label. products['sentiment'] = products['rating'].apply(lambda rating : +1 if rating > 3 else -1) products # Now, we can see that the dataset contains an extra column called **sentiment** which is either positive (+1) or negative (-1). # ## Split data into training and test sets # Let's perform a train/test split with 80% of the data in the training set and 20% of the data in the test set. We use `seed=1` so that everyone gets the same result. train_data, test_data = products.random_split(.8, seed=1) print(len(train_data)) print(len(test_data)) # # Train a sentiment classifier with logistic regression # # We will now use logistic regression to create a sentiment classifier on the training data. This model will use the column **word_count** as a feature and the column **sentiment** as the target. We will use `validation_set=None` to obtain same results as everyone else. # # **Note:** This line may take 1-2 minutes. sentiment_model = turicreate.logistic_classifier.create(train_data, target = 'sentiment', features=['word_count'], validation_set=None) sentiment_model # **Aside**. You may get a warning to the effect of "Terminated due to numerical difficulties --- this model may not be ideal". It means that the quality metric (to be covered in Module 3) failed to improve in the last iteration of the run. The difficulty arises as the sentiment model puts too much weight on extremely rare words. A way to rectify this is to apply regularization, to be covered in Module 4. Regularization lessens the effect of extremely rare words. For the purpose of this assignment, however, please proceed with the model above. # Now that we have fitted the model, we can extract the weights (coefficients) as an SFrame as follows: weights = sentiment_model.coefficients weights.column_names() # There are a total of `121713` coefficients in the model. Recall from the lecture that positive weights $w_j$ correspond to weights that cause positive sentiment, while negative weights correspond to negative sentiment. # # Fill in the following block of code to calculate how many *weights* are positive ( >= 0). (**Hint**: The `'value'` column in SFrame *weights* must be positive ( >= 0)). # + num_positive_weights = ... num_negative_weights = ... print("Number of positive weights: %s " % num_positive_weights) print("Number of negative weights: %s " % num_negative_weights) # - # **Quiz Question:** How many weights are >= 0? # ## Making predictions with logistic regression # # Now that a model is trained, we can make predictions on the **test data**. In this section, we will explore this in the context of 3 examples in the test dataset. We refer to this set of 3 examples as the **sample_test_data**. sample_test_data = test_data[10:13] print(sample_test_data['rating']) sample_test_data # Let's dig deeper into the first row of the **sample_test_data**. Here's the full review: sample_test_data[0]['review'] # That review seems pretty positive. # # Now, let's see what the next row of the **sample_test_data** looks like. As we could guess from the sentiment (-1), the review is quite negative. sample_test_data[1]['review'] # We will now make a **class** prediction for the **sample_test_data**. The `sentiment_model` should predict **+1** if the sentiment is positive and **-1** if the sentiment is negative. Recall from the lecture that the **score** (sometimes called **margin**) for the logistic regression model is defined as: # # $$ # \mbox{score}_i = \mathbf{w}^T h(\mathbf{x}_i) # $$ # # where $h(\mathbf{x}_i)$ represents the features for example $i$. We will write some code to obtain the **scores** using Turi Create. For each row, the **score** (or margin) is a number in the range **[-inf, inf]**. scores = sentiment_model.predict(sample_test_data, output_type='margin') print(scores) # ### Predicting sentiment # # These scores can be used to make class predictions as follows: # # $$ # \hat{y} = # \left\{ # \begin{array}{ll} # +1 & \mathbf{w}^T h(\mathbf{x}_i) > 0 \\ # -1 & \mathbf{w}^T h(\mathbf{x}_i) \leq 0 \\ # \end{array} # \right. # $$ # # Using scores, write code to calculate $\hat{y}$, the class predictions: # Run the following code to verify that the class predictions obtained by your calculations are the same as that obtained from Turi Create. print("Class predictions according to Turi Create:") print(sentiment_model.predict(sample_test_data)) # **Checkpoint**: Make sure your class predictions match with the one obtained from Turi Create. # # ### Probability predictions # # Recall from the lectures that we can also calculate the probability predictions from the scores using: # $$ # P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))}. # $$ # # Using the variable **scores** calculated previously, write code to calculate the probability that a sentiment is positive using the above formula. For each row, the probabilities should be a number in the range **[0, 1]**. # **Checkpoint**: Make sure your probability predictions match the ones obtained from Turi Create. print("Class predictions according to Turi Create:") print(sentiment_model.predict(sample_test_data, output_type='probability')) # ** Quiz Question:** Of the three data points in **sample_test_data**, which one (first, second, or third) has the **lowest probability** of being classified as a positive review? # # Find the most positive (and negative) review # We now turn to examining the full test dataset, **test_data**, and use Turi Create to form predictions on all of the test data points for faster performance. # # Using the `sentiment_model`, find the 20 reviews in the entire **test_data** with the **highest probability** of being classified as a **positive review**. We refer to these as the "most positive reviews." # # To calculate these top-20 reviews, use the following steps: # 1. Make probability predictions on **test_data** using the `sentiment_model`. (**Hint:** When you call `.predict` to make predictions on the test data, use option `output_type='probability'` to output the probability rather than just the most likely class.) # 2. Sort the data according to those predictions and pick the top 20. (**Hint:** You can use the `.topk` method on an SFrame to find the top k rows sorted according to the value of a specified column.) # **Quiz Question**: Which of the following products are represented in the 20 most positive reviews? [multiple choice] # # # Now, let us repeat this exercise to find the "most negative reviews." Use the prediction probabilities to find the 20 reviews in the **test_data** with the **lowest probability** of being classified as a **positive review**. Repeat the same steps above but make sure you **sort in the opposite order**. # **Quiz Question**: Which of the following products are represented in the 20 most negative reviews? [multiple choice] # ## Compute accuracy of the classifier # # We will now evaluate the accuracy of the trained classifier. Recall that the accuracy is given by # # # $$ # \mbox{accuracy} = \frac{\mbox{# correctly classified examples}}{\mbox{# total examples}} # $$ # # This can be computed as follows: # # * **Step 1:** Use the trained model to compute class predictions (**Hint:** Use the `predict` method) # * **Step 2:** Count the number of data points when the predicted class labels match the ground truth labels (called `true_labels` below). # * **Step 3:** Divide the total number of correct predictions by the total number of data points in the dataset. # # Complete the function below to compute the classification accuracy: def get_classification_accuracy(model, data, true_labels): # First get the predictions ## YOUR CODE HERE ... # Compute the number of correctly classified examples ## YOUR CODE HERE ... # Then compute accuracy by dividing num_correct by total number of examples ## YOUR CODE HERE ... return accuracy # Now, let's compute the classification accuracy of the **sentiment_model** on the **test_data**. get_classification_accuracy(sentiment_model, test_data, test_data['sentiment']) # **Quiz Question**: What is the accuracy of the **sentiment_model** on the **test_data**? Round your answer to 2 decimal places (e.g. 0.76). # # **Quiz Question**: Does a higher accuracy value on the **training_data** always imply that the classifier is better? # ## Learn another classifier with fewer words # # There were a lot of words in the model we trained above. We will now train a simpler logistic regression model using only a subset of words that occur in the reviews. For this assignment, we selected a 20 words to work with. These are: significant_words = ['love', 'great', 'easy', 'old', 'little', 'perfect', 'loves', 'well', 'able', 'car', 'broke', 'less', 'even', 'waste', 'disappointed', 'work', 'product', 'money', 'would', 'return'] len(significant_words) # For each review, we will use the **word_count** column and trim out all words that are **not** in the **significant_words** list above. We will use the [SArray dictionary trim by keys functionality]( https://dato.com/products/create/docs/generated/graphlab.SArray.dict_trim_by_keys.html). Note that we are performing this on both the training and test set. train_data['word_count_subset'] = train_data['word_count'].dict_trim_by_keys(significant_words, exclude=False) test_data['word_count_subset'] = test_data['word_count'].dict_trim_by_keys(significant_words, exclude=False) # Let's see what the first example of the dataset looks like: train_data[0]['review'] # The **word_count** column had been working with before looks like the following: print(train_data[0]['word_count']) # Since we are only working with a subset of these words, the column **word_count_subset** is a subset of the above dictionary. In this example, only 2 `significant words` are present in this review. print(train_data[0]['word_count_subset']) # ## Train a logistic regression model on a subset of data # We will now build a classifier with **word_count_subset** as the feature and **sentiment** as the target. simple_model = turicreate.logistic_classifier.create(train_data, target = 'sentiment', features=['word_count_subset'], validation_set=None) simple_model # We can compute the classification accuracy using the `get_classification_accuracy` function you implemented earlier. get_classification_accuracy(simple_model, test_data, test_data['sentiment']) # Now, we will inspect the weights (coefficients) of the **simple_model**: simple_model.coefficients # Let's sort the coefficients (in descending order) by the **value** to obtain the coefficients with the most positive effect on the sentiment. simple_model.coefficients.sort('value', ascending=False).print_rows(num_rows=21) # **Quiz Question**: Consider the coefficients of **simple_model**. There should be 21 of them, an intercept term + one for each word in **significant_words**. How many of the 20 coefficients (corresponding to the 20 **significant_words** and *excluding the intercept term*) are positive for the `simple_model`? # **Quiz Question**: Are the positive words in the **simple_model** (let us call them `positive_significant_words`) also positive words in the **sentiment_model**? # # Comparing models # We will now compare the accuracy of the **sentiment_model** and the **simple_model** using the `get_classification_accuracy` method you implemented above. # # First, compute the classification accuracy of the **sentiment_model** on the **train_data**: # Now, compute the classification accuracy of the **simple_model** on the **train_data**: # **Quiz Question**: Which model (**sentiment_model** or **simple_model**) has higher accuracy on the TRAINING set? # Now, we will repeat this exercise on the **test_data**. Start by computing the classification accuracy of the **sentiment_model** on the **test_data**: # Next, we will compute the classification accuracy of the **simple_model** on the **test_data**: # **Quiz Question**: Which model (**sentiment_model** or **simple_model**) has higher accuracy on the TEST set? # ## Baseline: Majority class prediction # # It is quite common to use the **majority class classifier** as the a baseline (or reference) model for comparison with your classifier model. The majority classifier model predicts the majority class for all data points. At the very least, you should healthily beat the majority class classifier, otherwise, the model is (usually) pointless. # # What is the majority class in the **train_data**? num_positive = (train_data['sentiment'] == +1).sum() num_negative = (train_data['sentiment'] == -1).sum() print(num_positive) print(num_negative) # Now compute the accuracy of the majority class classifier on **test_data**. # # **Quiz Question**: Enter the accuracy of the majority class classifier model on the **test_data**. Round your answer to two decimal places (e.g. 0.76). # **Quiz Question**: Is the **sentiment_model** definitely better than the majority class classifier (the baseline)?
2_ML_Combined_Courses/Course/Week-2/Colab_CLA02-NB01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 2 - Exploring Network Generation II # In this tutorial we explore the different types of network orderings that `network_layout()` function from the `generate` module can generate. # ### Imports import geopandas as gpd import pandas as pd import netsim.generate as ng import netsim.utils as utils from pathlib import Path # ### Set data path data_path = Path.cwd().parent / "data" data_path # ### Read a sample shapefile fn_shp = data_path / "sample" / "sample15.shp" # We use geopandas to read the shapefile that contains the location and the columns needed to run the simulation. In case the location file is a simple a text file, e.g. *comma-delimited* or *csv*, use # ```python # import pandas as pd # df = pd.read_csv(filename) # ``` # instead. # df= gpd.read_file(fn_shp) df # We shall used a smaller version of these locations. df = df.drop(df.index[9:15]) df # We run the ```check()``` function in the `generate` module to check whether the table with the information needed to run the simulation has all the appropriate columns and values within these. df = ng.check(df) # ### Creating a network generator # # The next step we run the ```create_network_generator()``` function to create a *network generator* that we can use to produce different versions, or iterations, of our network. netgentor, net_info, total_iterations = ng.create_network_generator(df) # ### Creating a network of paths # The ```network_layout()``` function transforms an iteration generated by the generator created using the ```create_network_generator()``` function into a network of paths. This function can generate several different types of networks. Let's explore this using a single iteration, sample_iteration = list(next(netgentor)) sample_iteration # #### *close* network # Let's generate a 'close' network of paths, the *default* network type. This type of network is defined as a close circuit of paths amongst the different locations in each group. Each group is independent (no connection) from each other df_net = ng.network_layout(df, sample_iteration, 1) df_net # We can generate a very simple visualization of the network using ```plot_network()``` in the *utils* module utils.plot_network(df_net) # Note that with the exception of the first group (which is only made of two locations), there is only one path for each pair of locations. If we want, we can change this by including the parameter ```twoway = True``` when calling the function (this parameter can be used with all network options) df_net = ng.network_layout(df, sample_iteration, 1, twoway= True) df_net utils.plot_network(df_net) # *N.B.* If you plot this new version with ´´´plot_network()´´´ you will not observe any qualitative difference (it might look different because networks are rendered slightly differently each time) as 'return' paths are drawn on top of original ones. # # With the *opt* parameter ```network_layout()``` can define other types of network. Currently *opt* can take the following values ```['close', 'central', 'decentral', 'distributed', 'all]``` # # #### *central*ized network df_net = ng.network_layout(df, sample_iteration, 1, opt='central') df_net utils.plot_network(df_net) # This option defines a network of paths that consists of a centralized set of paths from the locations of the first group to all of the locations in the remaining groups. Paths amongst the locations in the first group are generated first and then paths from the remaining locations (regardless of whether they are in different groups) to each of the first group locations are generated. # # #### *decentral*ized network df_net = ng.network_layout(df, sample_iteration, 1, opt='decentral') df_net utils.plot_network(df_net) # This option defines a network of paths so that the locations of each group are connected to the locations of the following (lower level) group. Paths amongst the locations in the first group are generated first, paths from all locations in the second group are generated to all location in the first group, then paths from the all locations in the second group to all locations in the third group and so on. # # #### *distributed* network df_net = ng.network_layout(df, sample_iteration, 1, opt='distributed') df_net utils.plot_network(df_net) # This option defines a network of paths similar to *decentral*, where the locations of each group are connected to the locations of the following (lower level) group. However, in addition, locations within each group are connected amongst themselves. # # #### *all* network df_net = ng.network_layout(df, sample_iteration, 1, opt='all') df_net utils.plot_network(df_net) # This option defines a network of paths from all to all locations regardless of whether they are from different groups. # # Finally, it is possible to define a network for more than one iteration. In order to do this, however, we need to provide ```network_layout()``` function with a blank dataframe. This is shown in the following cell, # + # create a blank dataframe net = {'origin': 'int32', 'destination': 'int32', 'iteration': 'int32'} df_net = pd.DataFrame(columns=list(net.keys())).astype(net) for i in range(3): iteration = list(next(netgentor)) print(iteration) df_net = ng.network_layout(df, iteration, i, df_net= df_net, opt='close') df_net # - # This concludes this tutorial. In this tutorial you learnt about how to generate different network layouts given a set sequence or order of locations.
notebooks/2 - Exploring Network Generation II.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="Goq4mAMtgfVv" import csv import numpy as np import pandas as pd import matplotlib.pyplot as plt from datetime import datetime import warnings import seaborn as sns # from plotly.subplots import make_subplots # import plotly.graph_objects as go warnings.filterwarnings("ignore") warnings.filterwarnings("ignore") # + colab={"base_uri": "https://localhost:8080/", "height": 406} id="GZs_jDjygfWH" outputId="dd0f4677-71ac-4533-ff53-812ee3715fd6" city_day = pd.read_csv('city_day.csv') del city_day['Unnamed: 0'] city_day # + colab={"base_uri": "https://localhost:8080/"} id="i726IxbQgfWK" outputId="5e392821-6296-4383-e1c1-8e7cd1148180" city_day.isnull().sum() # + id="mL5dSwyfgfWK" from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer city_day = city_day.copy(deep=True) mice_imputer = IterativeImputer() city_day['PM2.5'] = mice_imputer.fit_transform(city_day[['PM2.5']]) city_day['PM10'] = mice_imputer.fit_transform(city_day[['PM10']]) city_day['NO'] = mice_imputer.fit_transform(city_day[['NO']]) city_day['NOx'] = mice_imputer.fit_transform(city_day[['NOx']]) city_day['NH3'] = mice_imputer.fit_transform(city_day[['NH3']]) city_day['CO'] = mice_imputer.fit_transform(city_day[['CO']]) city_day['SO2'] = mice_imputer.fit_transform(city_day[['SO2']]) city_day['O3'] = mice_imputer.fit_transform(city_day[['O3']]) city_day['Benzene'] = mice_imputer.fit_transform(city_day[['Benzene']]) city_day['Toluene'] = mice_imputer.fit_transform(city_day[['Toluene']]) city_day['Xylene'] = mice_imputer.fit_transform(city_day[['Xylene']]) city_day['AQI'] = mice_imputer.fit_transform(city_day[['AQI']]) city_day['NO2'] = mice_imputer.fit_transform(city_day[['NO2']]) # + colab={"base_uri": "https://localhost:8080/"} id="vAVw8BC7gfWL" outputId="39fd947a-a01f-4127-8656-a28f76540a75" city_day.isnull().sum() # + colab={"base_uri": "https://localhost:8080/", "height": 593} id="_Zl1cSyagfWM" outputId="6173b6f9-eaa3-495b-f2bb-a18b17c992da" city_day['AQI_Bucket'] = np.where( (city_day['AQI'] <=50) & (city_day['AQI'] >=0) ,'Good' , city_day['AQI_Bucket'] ) city_day['AQI_Bucket'] = np.where( (city_day['AQI'] <=100) & (city_day['AQI'] >=51) ,'Satisfactory' , city_day['AQI_Bucket'] ) city_day['AQI_Bucket'] = np.where( (city_day['AQI'] <=200) & (city_day['AQI'] >=101) ,'Moderate' , city_day['AQI_Bucket'] ) city_day['AQI_Bucket'] = np.where( (city_day['AQI'] <=300) & (city_day['AQI'] >=201) ,'Poor' , city_day['AQI_Bucket'] ) city_day['AQI_Bucket'] = np.where( (city_day['AQI'] <=400) & (city_day['AQI'] >=301) ,'Very Poor' , city_day['AQI_Bucket'] ) city_day['AQI_Bucket'] = np.where( (city_day['AQI'] <=500) & (city_day['AQI'] >=401) ,'Severe' , city_day['AQI_Bucket'] ) city_day # + id="tOc4lL08gfWN" city_day.to_csv('city_day.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 406} id="wBlBFQyAgfWN" outputId="cbfa673a-0e51-4247-b894-10f73898f548" def mergeColumns(data): data['Date'] = pd.to_datetime(data['Date']) data['BTX'] = data['Benzene'] + data['Toluene'] + data['Xylene'] data.drop(['Benzene','Toluene','Xylene'], axis=1) data['Particulate_Matter'] = data['PM2.5'] + data['PM10'] return data def subsetColumns(data): pollutants = ['Particulate_Matter', 'NO2', 'CO','SO2', 'O3', 'BTX'] columns = ['Date', 'City', 'AQI', 'AQI_Bucket'] + pollutants data = data[columns] return data, pollutants def handleMissingValues(data): # missing_values = getMissingValues(data) newCityData = mergeColumns(data) newCityData, pollutants = subsetColumns(newCityData) return newCityData, pollutants newCityData, newColumns = handleMissingValues(city_day) newCityData #print(newColumns) # + colab={"base_uri": "https://localhost:8080/"} id="EeMgUuGpgfWO" outputId="2678a3bb-5f2f-439c-8753-ac67282d7371" newCityData.isnull().sum() # + colab={"base_uri": "https://localhost:8080/", "height": 593} id="EuOPKmAYgfWP" outputId="b6672b0f-5c77-47ff-d3db-e95bc6fe5f69" min_Particulate_Matter = newCityData['Particulate_Matter'].min() max_Particulate_Matter = newCityData['Particulate_Matter'].max() newCityData['Particulate_Matter_new'] = [(x-min_Particulate_Matter)/(max_Particulate_Matter-min_Particulate_Matter) for x in newCityData['Particulate_Matter']] min_NO2 = newCityData['NO2'].min() max_NO2 = newCityData['NO2'].max() newCityData['NO2_new'] = [(x-min_NO2)/(max_NO2-min_NO2) for x in newCityData['NO2']] min_CO= newCityData['CO'].min() max_CO = newCityData['CO'].max() newCityData['CO_new'] = [(x-min_CO)/(max_CO-min_CO) for x in newCityData['CO']] min_SO2= newCityData['SO2'].min() max_SO2 = newCityData['SO2'].max() newCityData['SO2_new'] = [(x-min_SO2)/(max_SO2-min_SO2) for x in newCityData['SO2']] min_O3= newCityData['O3'].min() max_O3 = newCityData['O3'].max() newCityData['O3_new'] = [(x-min_O3)/(max_O3-min_O3) for x in newCityData['O3']] min_BTX = newCityData['BTX'].min() max_BTX = newCityData['BTX'].max() newCityData['BTX_new'] = [(x-min_BTX)/(max_BTX-min_BTX) for x in newCityData['BTX']] newCityData # + id="zTn4eiIdgfWQ" newCityData= newCityData[['City','Date','AQI','AQI_Bucket', 'Particulate_Matter_new','NO2_new','CO_new','SO2_new','O3_new', 'BTX_new']] newCityData.to_csv('newCityData.csv') # + id="OddjFa08gfWQ" pollutant = ['Particulate_Matter_new','NO2_new','CO_new','SO2_new','O3_new','BTX_new'] column = ['Date', 'City', 'AQI', 'AQI_Bucket'] + pollutant nCityData = newCityData[column] nCityData.to_csv('nCityData.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 807} id="T-RR8T7qgfWR" outputId="0d2bc52c-fa43-414d-c03b-2ff9a6f9429c" def visualisepollutants(udata, column): data = udata.copy() data.set_index('Date',inplace=True) axes = data[column].plot(marker='.', alpha=10, linestyle='None', figsize=(16, 15), subplots=True) for ax in axes: ax.set_xlabel('Years') ax.set_ylabel('ug/m3') visualisepollutants(nCityData, pollutant) # + colab={"base_uri": "https://localhost:8080/", "height": 518} id="i5cSEiMZgfWS" outputId="0fa82762-398d-4f94-b453-b811750f1906" def trend_plot(nCityData, value): data = nCityData.copy() data['Year'] = [d.year for d in data.Date] data['Month'] = [d.strftime('%b') for d in data.Date] years = data['Year'].unique() fig, axes = plt.subplots(1, 2, figsize=(12,3), dpi= 80) sns.boxplot(x='Year', y=value, data=data, ax=axes[0]) sns.lineplot(x='Month', y=value, data=data.loc[~data.Year.isin([2015, 2020]), :]) axes[0].set_title('Year-wise Plot i.e. the trend', fontsize=18); axes[1].set_title('Month-wise Plot i.e. the seasonality', fontsize=18) plt.show() value='Particulate_Matter_new' trend_plot(nCityData,value) value='NO2_new' trend_plot(nCityData,value) # + colab={"base_uri": "https://localhost:8080/", "height": 220} id="MxlH8Rw_gfWT" outputId="23b3d135-5594-48c2-f12d-d363416b3fbb" def visualiseAQI(udata, column): data = udata.copy() data.set_index('Date',inplace=True) axes = data[column].plot(marker='.', alpha=0.5, linestyle='None', figsize=(16, 3), subplots=True) for ax in axes: ax.set_xlabel('Years') ax.set_ylabel('AQI') visualiseAQI(nCityData, ['AQI']) # + id="r-NxwKCqgfWU" from pandas import DataFrame from pandas import concat # + id="-iBZeg79gfWU" def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # put it all together agg = concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg # + id="eBmocxQqgfWV" newCityData = pd.read_csv('newCityData.csv', header=0, index_col=0) values = newCityData.values # + colab={"base_uri": "https://localhost:8080/"} id="W4BPWSokgfWV" outputId="2408a5c5-adba-4509-e1ab-722a17c7dbb1" values[:] # + id="jPmZRUsTgfWV" cities = ['Mumbai','Shillong','Lucknow','Delhi','Visakhapatnam','Patna','Bhopal'] somecityday = newCityData[newCityData['Date'] >= '2015-01-01'] AQI = somecityday[somecityday.City.isin(cities)][['Date','City','AQI','AQI_Bucket']] # + id="9qryW5YfgfWW" AQI_pivot = AQI.pivot(index='Date', columns='City', values='AQI') # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="cARzRAQrgfWW" outputId="962441e3-36a4-4687-c379-866708685061" def getColorBar(city): col = [] for val in AQI_pivot[city]: if val < 50: col.append('royalblue') elif val > 50 and val < 101: col.append('lightskyblue') #cornflowerblue elif val > 100 and val < 201: col.append('lightsteelblue') elif val > 200 and val < 301: col.append('peachpuff') elif val > 300 and val < 401: col.append('lightcoral') elif val> 400: col.append('firebrick') else: col.append('white') return col ah = getColorBar('Mumbai') de = getColorBar('Shillong') mu = getColorBar('Lucknow') ko = getColorBar('Delhi') hy = getColorBar('Visakhapatnam') ch = getColorBar('Patna') bp=getColorBar('Bhopal') colors = {'Good':'royalblue', 'Satisfactory':'lightskyblue', 'Moderate':'lightsteelblue', 'Poor':'peachpuff', 'Very Poor':'lightcoral', 'Severe':'firebrick'} labels = list(colors.keys()) handles = [plt.Rectangle((0,0),1,1, color=colors[label]) for label in labels] f, ((ax1, ax2, ax3, ax4, ax5, ax6, ax7)) = plt.subplots(7, 1, sharex='col', sharey='row', figsize=(15,18)) ax1.bar(AQI_pivot.index, AQI_pivot['Mumbai'], color = ah, width = 0.75) ax2.bar(AQI_pivot.index, AQI_pivot['Shillong'], color = de, width = 0.75) ax3.bar(AQI_pivot.index, AQI_pivot['Lucknow'], color = mu, width = 0.75) ax4.bar(AQI_pivot.index, AQI_pivot['Delhi'], color = ko, width = 0.75) ax5.bar(AQI_pivot.index, AQI_pivot['Visakhapatnam'], color = hy, width = 0.75) ax6.bar(AQI_pivot.index, AQI_pivot['Patna'], color = ch, width = 0.75) ax7.bar(AQI_pivot.index, AQI_pivot['Bhopal'], color = bp, width = 0.75) ax1.legend(handles, labels, loc='upper left') ax2.legend(handles, labels, loc='upper left') ax3.legend(handles, labels, loc='upper left') ax4.legend(handles, labels, loc='upper left') ax5.legend(handles, labels, loc='upper left') ax6.legend(handles, labels, loc='upper left') ax7.legend(handles, labels, loc='upper left') ax1.title.set_text('Mumbai') ax2.title.set_text('Shillong') ax3.title.set_text('Lucknow') ax4.title.set_text('Delhi') ax5.title.set_text('Visakhapatnam') ax6.title.set_text('Patna') ax7.title.set_text('Bhopal') ax1.set_ylabel('AQI') ax2.set_ylabel('AQI') ax3.set_ylabel('AQI') ax4.set_ylabel('AQI') ax5.set_ylabel('AQI') ax6.set_ylabel('AQI') ax7.set_ylabel('AQI') # + colab={"base_uri": "https://localhost:8080/", "height": 643} id="RQHnHpQegfWX" outputId="b80e8344-6fea-4da9-e1c2-aa92a5f34b54" AQI_beforeLockdown = AQI_pivot['2015-01-01':'2020-03-25'] AQI_afterLockdown = AQI_pivot['2020-03-26':'2020-07-01'] limits = [50, 100, 200, 300, 400, 510] #palette = sns.light_palette("Spectral", len(limits), reverse = True) palette = sns.color_palette("coolwarm", len(limits)) for city in cities: aqi_before = AQI_beforeLockdown[city].mean() aqi_after = AQI_afterLockdown[city].mean() fig, (ax1, ax2) = plt.subplots(1,2,figsize=(27, 1.5)) ax1.set_yticks([1]) ax1.set_yticklabels([city]) ax1.spines['bottom'].set_visible(False) ax1.spines['top'].set_visible(False) ax1.spines['right'].set_visible(False) ax1.spines['left'].set_visible(False) prev_limit = 0 for idx, lim in enumerate(limits): ax1.barh([1], lim-prev_limit, left=prev_limit, height=15, color=palette[idx]) prev_limit = lim ax1.barh([1], aqi_before, color='black', height=5) # after lockdown ax2.set_yticks([1]) ax2.set_yticklabels([city]) ax2.spines['bottom'].set_visible(False) ax2.spines['top'].set_visible(False) ax2.spines['right'].set_visible(False) ax2.spines['left'].set_visible(False) prev_limit = 0 for idx, lim in enumerate(limits): ax2.barh([1], lim-prev_limit, left=prev_limit, height=15, color=palette[idx]) prev_limit = lim ax2.barh([1], aqi_after, color='black', height=5) ax1.set_title('Before Lockdown') ax2.set_title('After Lockdown') rects = ax1.patches labels=["Good", "Satisfactory", "Moderate", "Poor", 'Very Poor', 'Severe'] for rect, label in zip(rects, labels): height = rect.get_height() ax1.text( rect.get_x() + rect.get_width() / 2, -height * .4, label, ha='center', va='bottom', color='black') ax2.text( rect.get_x() + rect.get_width() / 2, -height * .4, label, ha='center', va='bottom', color='black') # + id="zsGWjnqkgfWY" Delhi_data = newCityData[newCityData['City']=='Delhi'] Delhi_data.set_index('Date',inplace=True, drop = False) # + colab={"base_uri": "https://localhost:8080/", "height": 436} id="s563-RMCgfWY" outputId="89e7e5c1-041f-4d86-d48a-e65956ebec34" Delhi_data # + id="CHuDZDIMgfWZ" val = 'AQI' final_data = pd.DataFrame(index=np.arange('2015-01-01','2020-07-02',dtype='datetime64[D]'), columns = [val]) final_data[val] = Delhi_data[val] final_data=final_data.astype('float64') final_data[val] = final_data[val].fillna(final_data[val].mean(axis=0)) # + colab={"base_uri": "https://localhost:8080/", "height": 406} id="lts5D4apgfWZ" outputId="91399dfd-7194-4305-972e-ce62dd9fe54d" seasonal_data = final_data seasonal_data = seasonal_data.resample(rule='MS').mean() seasonal_data # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="ZuP-CNKKgfWa" outputId="853c857c-755a-4ba8-fcee-c0993db3c6b2" from statsmodels.tsa.seasonal import seasonal_decompose Delhi_AQI = seasonal_data[val] result = seasonal_decompose(Delhi_AQI, model='multiplicative') result.plot(); # + colab={"base_uri": "https://localhost:8080/"} id="uBPc7NB0gfWa" outputId="1897e1f4-77d1-4b6a-92b6-3709402a1f62" # !pip install pmdarima import pmdarima as pm # + id="QvN_comogfWa" from statsmodels.tsa.statespace.sarimax import SARIMAX from pmdarima import auto_arima; # + colab={"base_uri": "https://localhost:8080/"} id="zMo_wzHpgfWb" outputId="21561ec9-6393-4b31-df30-b81d77513601" auto_arima(y=Delhi_AQI,start_p=0,start_P=0,start_q=0,start_Q=0,seasonal=True, m=12) # + id="6AcxYRFZgfWb" train = Delhi_AQI[:41] #from 2015-2018 test = Delhi_AQI[42:54]# july 2018-june 2019 # + colab={"base_uri": "https://localhost:8080/"} id="_hjs5dWFgfWc" outputId="68191972-b7f2-4cdd-be1b-100758d75f29" test # + colab={"base_uri": "https://localhost:8080/", "height": 387} id="VoqeQMSXgfWc" outputId="bcc67c0a-645d-47ea-d8fb-4e19ac6f4bb8" model=SARIMAX(train,order=(1,0,0),seasonal_order=(1,0,1,12),) results=model.fit() results.summary() # + id="xHAD1-B3gfWc" predictions = results.predict(start=42, end=53, typ='levels').rename('Predictions') # + colab={"base_uri": "https://localhost:8080/", "height": 276} id="KD0KTJNXgfWd" outputId="b6259fcc-0ade-4916-90cf-418bf1b073cf" predictions.plot(legend=True) test.plot(legend=True,title="Delhi Prediction data"); # + colab={"base_uri": "https://localhost:8080/"} id="56oSayvNgfWd" outputId="825de3f2-1dc4-47ac-fa82-ec0a01771c5b" from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, explained_variance_score, max_error,mean_poisson_deviance,mean_gamma_deviance import math RMSE=np.sqrt(mean_squared_error(predictions,test)) print('Root Mean Squared Error: ', RMSE) print('Mean AQI:',test.mean()) forecast_errors = [test[i]-predictions[i] for i in range(len(test))] bias = sum(forecast_errors) * 1.0/len(test) print('Bias: %f' % bias) mse = mean_squared_error(test, predictions) print('MSE: '+str(mse)) mae = mean_absolute_error(test, predictions) print('MAE: '+str(mae)) rmse = math.sqrt(mean_squared_error(test, predictions)) print('RMSE: '+str(rmse)) mape = np.mean(np.abs(predictions - test)/np.abs(test)) print('MAPE: '+str(mape)) r2score=r2_score(test, predictions) print('r2score: '+str(r2score)) explainedVariance_score=explained_variance_score(test, predictions) print('explainedVariance_score: '+str(explainedVariance_score)) me=max_error(test, predictions) print('me: '+str(me)) mpd=mean_poisson_deviance(test, predictions) print('mpd: '+str(mpd)) mgd=mean_gamma_deviance(test, predictions) print('mgd: '+str(mgd)) # + colab={"base_uri": "https://localhost:8080/", "height": 483} id="ozaIOkyCgfWe" outputId="35b9806c-371a-4e38-98ed-265243f6b449" # Forming the model: final_model = SARIMAX(train,order=(1,0,0),seasonal_order=(1,0,1,12)) results = final_model.fit() #Obtaining predicted values: predictions = results.predict(start=64, end=77, typ='levels').rename('Predictions') #Plotting predicted values against the true values: predictions.plot(legend=True) Delhi_AQI.plot(legend=True,figsize=(12,8),grid=True,title="Delhi AQI"); # - df_anova = pd.read_csv('newCityData.csv') df_anova = df_anova[['AQI','City']] # + from scipy import stats Citys = pd.unique(df_anova.City.values) d_data = {city:df_anova['AQI'][df_anova.City == city] for city in Citys} F, p = stats.f_oneway(d_data['Mumbai'], d_data['Shillong'], d_data['Lucknow']) print("p-value for significance is: ", p) if p<0.05: print("We reject the null hypothesis") else: print("We accept the null hypothesis") # + from scipy import stats Citys = pd.unique(df_anova.City.values) d_data = {city:df_anova['AQI'][df_anova.City == city] for city in Citys} F, p = stats.f_oneway(d_data['Delhi'], d_data['Visakhapatnam']) print("p-value for significance is: ", p) if p<0.05: print("We reject the null hypothesis") else: print("We accept the null hypothesis") # + from scipy import stats Citys = pd.unique(df_anova.City.values) d_data = {city:df_anova['AQI'][df_anova.City == city] for city in Citys} F, p = stats.f_oneway(d_data['Patna'], d_data['Bhopal']) print("p-value for significance is: ", p) if p<0.05: print("We reject the null hypothesis") else: print("We accept the null hypothesis")
SARIMA_implementation_delhi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (py39) # language: python # name: py39 # --- # + import numpy as np import matplotlib.pyplot as plt import xarray as xr from salishsea_tools import evaltools as et, viz_tools, places import datetime as dt import matplotlib.dates as mdates import matplotlib as mpl # %matplotlib inline # - # ### load 2015-present atmospheric forcing files # ### NOTE: the wind grid rotation is not the same as the SalishSeaCast grid rotation startdate=dt.datetime(2015,3,1) enddate=dt.datetime(2015,3,31) opsdir='/results/forcing/atmospheric/GEM2.5/operational' flist=et.index_model_files(startdate,enddate,opsdir,nam_fmt='ops',flen=1,ftype='None',tres=1) flist windsMarch=xr.open_mfdataset(flist['paths']) jw,iw=places.PLACES['S3']['GEM2.5 grid ji'] # GEM2.5 grid ji is atm forcing grid for ops files (naming... starts with ops) windsMarch plt.plot(windsMarch.nav_lon.isel(time_counter=0), windsMarch.nav_lat.isel(time_counter=0),'r*'); # grab interesting values: u_wind=windsMarch.u_wind.isel(y=jw,x=iw) v_wind=windsMarch.v_wind.isel(y=jw,x=iw) tt=windsMarch.time_counter fig,ax=plt.subplots(1,1,figsize=(8,2)) ax.plot(tt,u_wind,'c-') ax.plot(tt,v_wind,'b-') ax.set_xlim(startdate,enddate) ax.set_title('Wind speed') wspeed=np.sqrt(u_wind**2 + v_wind**2) # wind direction in degrees from east d = np.arctan2(v_wind, u_wind) winddirec=np.rad2deg(d + (d < 0)*2*np.pi) fig,ax=plt.subplots(1,1,figsize=(8,2)) q=ax.quiver(tt, np.zeros(len(tt)), u_wind, v_wind,scale=8*10e0); ax.set_yticklabels([]); fig.autofmt_xdate(bottom=0.3, rotation=30, ha='right') yearsFmt = mdates.DateFormatter('%b %d') ax.xaxis.set_major_formatter(yearsFmt) ax.set_xlim(startdate,enddate) solar=windsMarch.solar.isel(y=jw,x=iw) fig,ax=plt.subplots(1,1,figsize=(8,2)) ax.plot(tt,solar,'r-') ax.set_xlim(startdate,enddate) ax.set_title('Solar radiation') # calculate daily average solar radiation: ttday=tt[12::24] # start at 12th value and take every 24th solardaily=list() for ii in range(0,int(len(solar)/24)): solardaily.append(np.mean(solar[(ii*24):((ii+1)*24)])) solardaily=np.array(solardaily) # convert to numpy array from list to be able to plot fig,ax=plt.subplots(1,1,figsize=(8,2)) ax.plot(ttday,solardaily,'m-') ax.set_xlim(startdate,enddate) ax.set_title('Daily average solar radiation')
notebooks/Examples/modelAtmosForcingExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Meyer Penny Game # + import pyquil.quil as pq from pyquil import api from pyquil.gates import I, H, X import numpy as np # - def meyer_penny_program(): """ Returns the program to simulate the Meyer-Penny Game The full description is available in ../docs/source/exercises.rst :return: pyQuil Program """ prog = pq.Program() ro = prog.declare("ro", memory_size=2) picard_register = ro[1] answer_register = ro[0] then_branch = pq.Program(X(0)) else_branch = pq.Program(I(0)) # Prepare Qubits in Heads state or superposition, respectively prog.inst(X(0), H(1)) # Q puts the coin into a superposition prog.inst(H(0)) # Picard makes a decision and acts accordingly prog.measure(1, picard_register) prog.if_then(picard_register, then_branch, else_branch) # Q undoes his superposition operation prog.inst(H(0)) # The outcome is recorded into the answer register prog.measure(0, answer_register) return prog # + n_trials = 10 qvm = api.QVMConnection() outcomes = np.asarray(qvm.run(meyer_penny_program(), [0, 1], trials=n_trials)) print("Number of games: {}".format(n_trials)) print("Q's winning average: {}".format(outcomes[:, 0].mean())) print("Picard's flip-decision average: {}".format(outcomes[:, 1].mean()))
notebooks/MeyerPennyGame.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mcd # language: python # name: mcd # --- import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import plotly.graph_objects as go import dash_bootstrap_components as dbc import dash from dash import html from dash import dcc from dash.dependencies import Input, Output, State # + app = dash.Dash(external_stylesheets=[dbc.themes.YETI]) server = app.server ds = pd.read_csv("https://raw.githubusercontent.com/danielmc81/ing_caracteristicas/main/datos_limpios.csv") ds.drop(["city_hmo","city_nog","city_obr"], axis=1, inplace=True) ds["date"] = pd.to_datetime(ds.date) # + dsn = ds.copy() column = ["HQprcp_hmo","prcp_hmo","HQprcp_nog","prcp_nog","HQprcp_obr","prcp_obr"] dsn[column] = ds[column]/ds[column].abs().max() # - dsn_hmo = dsn[["date","HQprcp_hmo","prcp_hmo"]] dsn_nog = dsn[["date","HQprcp_nog","prcp_nog"]] dsn_obr = dsn[["date","HQprcp_obr","prcp_obr"]] dsn_hmo = ds[["date","HQprcp_hmo","prcp_hmo"]] dsn_nog = ds[["date","HQprcp_nog","prcp_nog"]] dsn_obr = ds[["date","HQprcp_obr","prcp_obr"]] dsn_hmo.rename(columns={"prcp_hmo":"Prometeus","HQprcp_hmo":"Nasa"}, inplace=True) dsn_nog.rename(columns={"prcp_nog":"Prometeus","HQprcp_nog":"Nasa"}, inplace=True) dsn_obr.rename(columns={"prcp_obr":"Prometeus","HQprcp_obr":"Nasa"}, inplace=True) def grafica(ds, ciudad): fig = px.line(ds, x=ds["date"], y=["Prometeus","Nasa"], labels={"variable": "Pronóstico", "date": "Fecha", "value": "Precip. (mm)" }) fig.update_xaxes(showgrid=True, visible=True, title_text=ciudad) fig.update_yaxes(tickformat=".2f", title_text="Precipitación (mm)") return fig # + def mapa(): lats = [29.095200, 31.318611, 27.486389] lons = [-111.051100, -110.945833, -109.94083] nlats = [29.049999, 31.350002, 27.450001] nlons = [-111.049995, -110.949997, -109.949997] fig = go.Figure(go.Scattermapbox( mode = "markers+lines", lat = [29.095200, 29.049999], lon = [-111.051100, -111.049995], marker = {'size': 10}, text=["Prometeus"+'<br>'"Dist. entre abmos 5.01 Km","Nasa"+'<br>'"Dist. entre abmos 5.01 Km"], name="Hermosillo")) fig.add_trace(go.Scattermapbox( mode = "markers+lines", lat = [31.318611, 31.350002], lon = [-110.945833, -110.949997], marker = {'size': 10}, text=["Prometeus"+'<br>'"Dist. entre abmos 3.50 Km","Nasa"+'<br>'"Dist. entre abmos 3.50 Km"], name="Heroica Nogales")) fig.add_trace(go.Scattermapbox( mode = "markers+lines", lat = [27.486389, 27.450001], lon = [-109.94083, -109.949997], marker = {'size': 10}, text=["Prometeus"+'<br>'"Dist. entre abmos 4.13 Km","Nasa"+'<br>'"Dist. entre abmos 4.13 Km"], name="Ciudad Obregón")) fig.update_layout( margin ={'l':0,'t':0,'b':0,'r':0}, mapbox = { 'center': {'lon': -111, 'lat': 29.5}, 'style': "open-street-map", 'center': {'lon': -111, 'lat': 29.5}, 'zoom': 6}) # fig.show() return fig # + app.layout = \ dbc.Container\ ([ dbc.Row([ html.H2("Reporte PROMETEUS"), html.H5("Comparación de los pronósticos de precipitación que genera PROMETEUS y la NASA"), html.Br(), html.Br(), html.Br(), html.Br() ]), dbc.Row([ dbc.Col([ html.H5("Se utilizaron 3 ciudades de Sonora para revisión ya que por el momento solo contamos con información para estas localidades en ambos datasets"), html.H5("En la siguiente gráfica podemos ver la poca similitud entre ambos pronósticos."), html.H5("Esto es en parte a que los valores de la Nasa estan aproximadamente a 5 Km de los de Prometeus, lo cual es suficiente para que sean tan diferentes"), html.Br(), html.Br() ], align="center", lg=4, md=12, xs=12), dbc.Col([ dcc.Dropdown( id="menu", options=[{"label": "Hermosillo", "value": "dsn_hmo"}, {"label": "Heroica Nogales", "value": "dsn_nog"}, {"label": "Ciudad Obregón", "value": "dsn_obr"} ], value="dsn_hmo" ), dcc.Graph( id="grafica", className="dropgraph", ) ], align="center", lg=8, md=12, xs=12) ]), dbc.Row([ dbc.Col([ html.H5("En el mapa que se presenta, esta graficada la distancia entre los puntos de ambos datasets, tambien se indica la distancia en Km que existe entre ellos"), html.H5("Como trabajo futuro podriamos plantear la posibilidad de utilizar la mismas coordenadas de la Nasa en Prometeus para ver si eso mejora la similitud en cuanto a pronóstico"), html.Br() ], align="center", lg=4, md=12, xs=12), dbc.Col([ dcc.Graph(figure=mapa()) ], align="center", lg=8, md=12, xs=12) ]), dbc.Row([ html.Br(), ]), dbc.Row([ html.Br(), html.H5("Toda la información a detalle acerca de como se generó esta informacion ademas de el código para reproducir este tablero se encuentra en el siguiente repositorio https://github.com/danielmc81/ing_caracteristicas.git") ]) ]) @app.callback(Output("grafica", "figure"), Input("menu", "value")) def grafica(value): if value == "dsn_hmo": ds = dsn_hmo ciudad = "Hermosillo" elif value == "dsn_nog": ds = dsn_nog ciudad = "Heroica Nogales" else: ds = dsn_obr ciudad = "Ciudad Obregón" fig = px.line(ds, x=ds["date"], y=["Prometeus","Nasa"], labels={"variable": "Pronóstico", "date": "Fecha", "value": "Precip. (mm)" }) fig.update_xaxes(showgrid=True, visible=True, title_text=ciudad) fig.update_yaxes(tickformat=".2f", title_text="Precipitación (mm)") return fig if __name__ == '__main__': app.run_server() # -
codigo/reporte.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (statmath) # language: python # name: statmath # --- # + import sys import os import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn.metrics import mean_squared_error, mean_absolute_error, accuracy_score from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.manifold import TSNE from sklearn.metrics.pairwise import manhattan_distances import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.layers import Dense, Input, Dropout, Flatten, Reshape from tensorflow.keras.constraints import MinMaxNorm from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import load_model sys.path.append("../") sys.path.append("../dbal") from query_methods import * from training_models import * from utils import * tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) pd.set_option('display.max_columns', 500) # + source = "synth" target = "svhn" encoder = load_model("../datasets/models_digits/encoder.h5") Xs, ys = load_digits(source) Xt, yt = load_digits(target) Xs = encoder.predict(Xs[:,:,:,np.newaxis]) Xt = encoder.predict(Xt[:,:,:,np.newaxis]) Xs, ys, Xt, yt, convert_y = preprocessing_digits(Xs, ys, Xt, yt) X = np.concatenate((Xs, Xt)) y = np.concatenate((ys, yt)) # - def get_base_model(input_shape=X.shape[1:], output_shape=10, activation="softmax", C=1): inputs = Input(input_shape) modeled = Dense(100, activation='relu', kernel_constraint=MinMaxNorm(0, C), bias_constraint=MinMaxNorm(0, C))(inputs) modeled = Dense(100, activation='relu', kernel_constraint=MinMaxNorm(0, C), bias_constraint=MinMaxNorm(0, C))(modeled) modeled = Dense(10, activation=activation, kernel_constraint=MinMaxNorm(0, C), bias_constraint=MinMaxNorm(0, C))(modeled) model = Model(inputs, modeled) model.compile(optimizer=Adam(0.001), loss='categorical_crossentropy', metrics=["accuracy"]) return model # ## Visualization # + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5)) pca = PCA(2) pca.fit(X) Xs_pca = pca.transform(Xs) Xt_pca = pca.transform(Xt) ax1.plot(Xs_pca[:, 0], Xs_pca[:, 1], '.', alpha=0.5, label="Source") ax1.plot(Xt_pca[:, 0], Xt_pca[:, 1], '.', alpha=0.5, label="Target") ax2.hist(ys.argmax(1).ravel(), bins=20, alpha=0.5, density=False, weights=np.ones(len(ys)) * 1/len(y), label="Source") ax2.hist(yt.argmax(1).ravel(), bins=20, alpha=0.5, density=False, weights=np.ones(len(yt)) * 1/len(y), label="Target") ax1.set_title("PCA on input features X", fontsize=16) ax1.set_xlabel(r"PCA first component") ax1.set_ylabel(r"PCA second component") ax2.set_title("Output Distribution", fontsize=16) ax2.set_xlabel("Classes") ax2.set_ylabel("Probabilities") ax1.legend() ax2.legend() plt.show() # - # ## Parameters n_queries = 50 seed = 1 fit_params = dict(epochs=30, batch_size=128, verbose=0) model = BalanceWeighting(get_base_model) # ## Query Methods -- No Adversarial features # ### Random # + np.random.seed(seed); tf.random.set_seed(seed) random = RandomQuery() random.fit(Xt, Xs, ys, n_queries) queries = random.predict(n_queries) model.fit(Xs, ys, Xt[queries], yt[queries], **fit_params); test_index = np.array(list(set(np.arange(len(Xt))) - set(queries))) y_pred = convert_y(model.predict(Xt[test_index])) y_true = convert_y(yt[test_index]) score_random = accuracy_score(y_true, y_pred) print("Score on Unlabeled Target: %.3f"%score_random) # - # ### K-means # + np.random.seed(seed); tf.random.set_seed(seed) kmeans = KMeansQuery(minibatch=True) kmeans.fit(Xt, Xs, ys, n_queries) queries = kmeans.predict(n_queries) model.fit(Xs, ys, Xt[queries], yt[queries], **fit_params); test_index = np.array(list(set(np.arange(len(Xt))) - set(queries))) y_pred = convert_y(model.predict(Xt[test_index])) y_true = convert_y(yt[test_index]) score_kmeans = accuracy_score(y_true, y_pred) print("Score on Unlabeled Target: %.3f"%score_kmeans) # - # ### Diversity # + np.random.seed(seed); tf.random.set_seed(seed) sub_index = np.random.choice(len(Xs), 1000, replace=False) divers = DiversityQuery() divers.fit(Xt, Xs[sub_index], ys[sub_index], n_queries) queries = divers.predict(n_queries) model.fit(Xs, ys, Xt[queries], yt[queries], **fit_params); test_index = np.array(list(set(np.arange(len(Xt))) - set(queries))) y_pred = convert_y(model.predict(Xt[test_index])) y_true = convert_y(yt[test_index]) score_divers = accuracy_score(y_true, y_pred) print("Score on Unlabeled Target: %.3f"%score_divers) # - # ### BVSB # + np.random.seed(seed); tf.random.set_seed(seed) bvsb_model = BVSB(get_base_model) bvsb_model.fit(Xs, ys, **fit_params) bvsb = OrderedQuery() bvsb.fit(Xt, Xs, ys, n_queries, sample_weight=bvsb_model.uncertainties(Xt)) queries= bvsb.predict(n_queries) model.fit(Xs, ys, Xt[queries], yt[queries], **fit_params); test_index = np.array(list(set(np.arange(len(Xt))) - set(queries))) y_pred = convert_y(model.predict(Xt[test_index])) y_true = convert_y(yt[test_index]) score_bvsb = accuracy_score(y_true, y_pred) print("Score on Unlabeled Target: %.3f"%score_bvsb) # - # ### K-centers # + np.random.seed(seed); tf.random.set_seed(seed) Xt_emb = bvsb_model.embeddings(Xt) Xs_emb = bvsb_model.embeddings(Xs) kcenters = KCentersQuery(nn_algorithm="kdt-forest", n_trees=50) kcenters.fit(Xt_emb, Xs_emb, ys, n_queries) queries = kcenters.predict(n_queries) model.fit(Xs, ys, Xt[queries], yt[queries], **fit_params); test_index = np.array(list(set(np.arange(len(Xt))) - set(queries))) y_pred = convert_y(model.predict(Xt[test_index])) y_true = convert_y(yt[test_index]) score_kcenters = accuracy_score(y_true, y_pred) print("Score on Unlabeled Target: %.3f"%score_kcenters) # - # ### K-medoids W+E # + np.random.seed(seed); tf.random.set_seed(seed) Xt_emb = bvsb_model.embeddings(Xt) Xs_emb = bvsb_model.embeddings(Xs) kmedoids = KMedoidsAccelerated(verbose=1, nn_algorithm="kdt-forest", n_trees=50, batch_size_init=5000) kmedoids.fit(Xt_emb, Xs_emb, ys, n_queries, sample_weight=bvsb_model.uncertainties(Xt)) queries = kmedoids.predict(n_queries) model.fit(Xs, ys, Xt[queries], yt[queries], **fit_params); test_index = np.array(list(set(np.arange(len(Xt))) - set(queries))) y_pred = convert_y(model.predict(Xt[test_index])) y_true = convert_y(yt[test_index]) score_kmedoids = accuracy_score(y_true, y_pred) print("Score on Unlabeled Target: %.3f"%score_kmedoids) # - # ### AADA discriminator = load_model("../datasets/models_digits/discriminator.h5") task = load_model("../datasets/models_digits/task.h5") y_disc = discriminator.predict(Xt).ravel() y_task = task.predict(Xt) # + np.random.seed(seed); tf.random.set_seed(seed) aada = OrderedQuery() aada.fit(Xt, Xs, ys, n_queries, sample_weight=AADA(y_task, y_disc).uncertainties(Xt)) queries= aada.predict(n_queries) model.fit(Xs, ys, Xt[queries], yt[queries], **fit_params); test_index = np.array(list(set(np.arange(len(Xt))) - set(queries))) y_pred = convert_y(model.predict(Xt[test_index])) y_true = convert_y(yt[test_index]) score_bvsb = accuracy_score(y_true, y_pred) print("Score on Unlabeled Target: %.3f"%score_bvsb) # - # ## Results Summary # + np.random.seed(seed) fig, axes = plt.subplots(2, 3, figsize=(16, 8)) for method, ax, name, score in zip([kmedoids, random, kcenters, divers, kmeans, bvsb], axes.ravel(), ["K-medoids", "Random", "K-centers", "Diversity", "K-means", "BVSB"], [score_kmedoids, score_random, score_kcenters, score_divers, score_kmeans, score_bvsb]): queries = method.predict(n_queries) ax.plot(Xs_pca[:, 0], Xs_pca[:, 1], '.', ms=8, label="Source", alpha=0.5) ax.plot(Xt_pca[:, 0], Xt_pca[:, 1], '.', ms=8, label="Target", alpha=0.5) ax.plot(Xt_pca[queries, 0], Xt_pca[queries, 1], 's', ms=10, markerfacecolor=(0,0,0,0.5), label="Queries", markeredgecolor=(0,0,0,1)) ax.set_yticklabels([]) ax.set_xticklabels([]) ax.tick_params(direction ='in') ax.set_title("%s -- Score: %.3f"%(name, score), fontsize=16) if method.__class__.__name__ == "KMedoidsGreedy": lgd = ax.legend(loc='upper left', framealpha=1, fontsize=14) lgd.legendHandles[0]._legmarker.set_markersize(20) lgd.legendHandles[1]._legmarker.set_markersize(20) plt.show()
notebooks/Digits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.11 64-bit (''CryptoSent'': conda)' # language: python # name: python3 # --- import pandas as pd sent=pd.read_pickle(r'C:\Users\Ben\Desktop\Diplomatiki\CryptoSent\Datasets\Datasets Heisenberg\submissions_heisenberg.pickle') import pandas as pd import numpy as np import tensorflow as tf from sklearn import preprocessing import matplotlib.pyplot as plt tf.random.set_seed(123) np.random.seed(123) from tqdm import tqdm import datetime tqdm.pandas() #connect local PATH p = os.path.abspath(r'C:\Users\Ben\Desktop\Diplomatiki') sys.path.append(str(p)) from CryptoSent.tools.crypto_coin_actual import get_btc_actual_hourly from CryptoSent.tools import evaluation as eval btc=get_btc_actual_hourly() def result_processing(sentiment,coin_price,resample_period='d'): sdmin=datetime.datetime.strftime(sentiment.index.min(),'%Y-%m-%d') sdmax=datetime.datetime.strftime(sentiment.index.max(),'%Y-%m-%d') sent=sentiment.resample(resample_period).sum() btc=coin_price[sdmin:sdmax].resample(resample_period).mean() merres=pd.concat([sent,btc],axis=1) merres.loc[:,'avg_hl_diff']=merres.avg_hl.diff() merres.loc[:,'avg_hl_pct_change']=merres.avg_hl.pct_change() merres.loc[:,'sent_db_shift']=sent.shift(1) merres.loc[:,'sent_db_ptc_change']=sent.pct_change() return merres merres=result_processing(sent.polarity_lr,btc.avg_hl,resample_period='d') merres=merres[['polarity_lr','avg_hl']] ft_pred=2 validate_merres=merres.tail(ft_pred) merres.drop(merres.tail(ft_pred).index,inplace=True) def custom_ts_multi_data_prep(dataset, target, start, end, window, horizon): X = [] y = [] start = start + window if end is None: end = len(dataset) - horizon for i in range(start, end): indices = range(i-window, i) X.append(dataset[indices]) indicey = range(i+1, i+1+horizon) y.append(target[indicey]) return np.array(X), np.array(y) # between 0-1 scaller x_scaler = preprocessing.MinMaxScaler() y_scaler = preprocessing.MinMaxScaler() dataX = x_scaler.fit_transform(merres) dataY = y_scaler.fit_transform(merres[['avg_hl']]) hist_window = 4 horizon = ft_pred TRAIN_SPLIT = 600 x_train_multi, y_train_multi = custom_ts_multi_data_prep( dataX, dataY, 0, TRAIN_SPLIT, hist_window, horizon) x_val_multi, y_val_multi = custom_ts_multi_data_prep( dataX, dataY, TRAIN_SPLIT, None, hist_window, horizon) print ('Single window of past history') print(x_train_multi[0]) print ('\n Target horizon') print (y_train_multi[0]) # + BATCH_SIZE = 256 BUFFER_SIZE = 150 train_data_multi = tf.data.Dataset.from_tensor_slices((x_train_multi, y_train_multi)) train_data_multi = train_data_multi.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi)) val_data_multi = val_data_multi.batch(BATCH_SIZE).repeat() # - Bi_lstm_model = tf.keras.models.Sequential([ tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(10, return_sequences=False), input_shape=x_train_multi.shape[-2:]), #tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(5)), #tf.keras.layers.Dense(5, activation='tanh'), tf.keras.layers.Dropout(0.2), tf.keras.layers.LayerNormalization(), #tf.keras.layers.MaxPool2D(pool_size=(2, 2),strides=(1, 1), padding='valid'), tf.keras.layers.Dense(units=horizon), ]) Bi_lstm_model.compile(optimizer='adam', loss='mse') x_train_multi.shape[-2:] model_path = r'C:\Users\Ben\Desktop\Diplomatiki\CryptoSent\Papers\hands-on-time-series-analylsis-python-master\Chapter 7\Bidirectional_LSTM_Multivariate.h5' EVALUATION_INTERVAL = 200 EPOCHS = 20 history = Bi_lstm_model.fit(train_data_multi, epochs=EPOCHS, steps_per_epoch=EVALUATION_INTERVAL, validation_data=val_data_multi, validation_steps=50, verbose=1,callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='min'), tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_loss', save_best_only=True, mode='min', verbose=0)]) Trained_model = tf.keras.models.load_model(model_path) Trained_model.summary() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train loss', 'validation loss'], loc='upper left') plt.rcParams["figure.figsize"] = [16,9] plt.show() # + data_val = x_scaler.fit_transform(merres.tail(hist_window)) val_rescaled = data_val.reshape(1, data_val.shape[0], data_val.shape[1]) Predicted_results = Trained_model.predict(val_rescaled) # - Predicted_results Predicted_results_Inv_trans = y_scaler.inverse_transform(Predicted_results) Predicted_results_Inv_trans from sklearn import metrics def timeseries_evaluation_metrics_func(y_true, y_pred): def mean_absolute_percentage_error(y_true, y_pred): y_true, y_pred = np.array(y_true), np.array(y_pred) return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 print('Evaluation metric results:-') print(f'MSE is : {metrics.mean_squared_error(y_true, y_pred)}') print(f'MAE is : {metrics.mean_absolute_error(y_true, y_pred)}') print(f'RMSE is : {np.sqrt(metrics.mean_squared_error(y_true, y_pred))}') print(f'MAPE is : {mean_absolute_percentage_error(y_true, y_pred)}') print(f'R2 is : {metrics.r2_score(y_true, y_pred)}',end='\n\n') len(Predicted_results_Inv_trans[0]) len(validate_merres['avg_hl']) timeseries_evaluation_metrics_func(validate_merres['avg_hl'],Predicted_results_Inv_trans[0]) validate_merres['avg_hl'] Predicted_results_Inv_trans[0] plt.plot( list(validate_merres['avg_hl'])) plt.plot( list(Predicted_results_Inv_trans[0])) plt.title("Actual vs Predicted") plt.ylabel("Traffic volume") plt.legend(('Actual','predicted')) plt.show() validate_merres.avg_hl.plot() # # Example from the book # import pandas as pd import numpy as np import tensorflow as tf from sklearn import preprocessing import matplotlib.pyplot as plt tf.random.set_seed(123) np.random.seed(123) df = pd.read_csv(r'C:\Users\Ben\Desktop\Diplomatiki\CryptoSent\Papers\hands-on-time-series-analylsis-python-master\Data\Metro_Interstate_Traffic_Volume.csv') df.head() df.describe() df.drop_duplicates(subset=['date_time'], keep=False,inplace=True) holiday_le = preprocessing.LabelEncoder() #labeling df['holiday_le'] = holiday_le.fit_transform(df['holiday']) weather_main_le = preprocessing.LabelEncoder() df['weather_main_le'] = weather_main_le.fit_transform(df['weather_main']) weather_description_le = preprocessing.LabelEncoder() df['weather_description_le'] = weather_description_le.fit_transform(df['weather_description']) df = df[['rain_1h','temp', 'snow_1h', 'clouds_all', 'holiday_le', 'weather_main_le', 'weather_description_le','traffic_volume']] validate = df[['rain_1h','temp', 'snow_1h', 'clouds_all', 'holiday_le', 'weather_main_le', 'weather_description_le','traffic_volume']].tail(10) df.drop(df.tail(10).index,inplace=True) def custom_ts_multi_data_prep(dataset, target, start, end, window, horizon): X = [] y = [] start = start + window if end is None: end = len(dataset) - horizon for i in range(start, end): indices = range(i-window, i) X.append(dataset[indices]) indicey = range(i+1, i+1+horizon) y.append(target[indicey]) return np.array(X), np.array(y) validate = df[['rain_1h','temp', 'snow_1h', 'clouds_all', 'holiday_le', 'weather_main_le', 'weather_description_le','traffic_volume']].tail(10) df.drop(df.tail(10).index,inplace=True) # between 0-1 scaller x_scaler = preprocessing.MinMaxScaler() y_scaler = preprocessing.MinMaxScaler() dataX = x_scaler.fit_transform(df[['rain_1h','temp', 'snow_1h', 'clouds_all', 'holiday_le', 'weather_main_le', 'weather_description_le','traffic_volume']]) dataY = y_scaler.fit_transform(df[['traffic_volume']]) hist_window = 48 horizon = 10 TRAIN_SPLIT = 30000 x_train_multi, y_train_multi = custom_ts_multi_data_prep( dataX, dataY, 0, TRAIN_SPLIT, hist_window, horizon) x_val_multi, y_val_multi = custom_ts_multi_data_prep( dataX, dataY, TRAIN_SPLIT, None, hist_window, horizon) print ('Single window of past history') print(x_train_multi[0]) print ('\n Target horizon') print (y_train_multi[0]) # + BATCH_SIZE = 256 BUFFER_SIZE = 150 train_data_multi = tf.data.Dataset.from_tensor_slices((x_train_multi, y_train_multi)) train_data_multi = train_data_multi.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi)) val_data_multi = val_data_multi.batch(BATCH_SIZE).repeat() # - Bi_lstm_model = tf.keras.models.Sequential([ tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(150, return_sequences=True), input_shape=x_train_multi.shape[-2:]), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(50)), tf.keras.layers.Dense(20, activation='tanh'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=horizon), ]) Bi_lstm_model.compile(optimizer='adam', loss='mse') model_path = r'C:\Users\Ben\Desktop\Diplomatiki\CryptoSent\Papers\hands-on-time-series-analylsis-python-master\Chapter 7\Bidirectional_LSTM_Multivariate.h5' EVALUATION_INTERVAL = 20 EPOCHS = 15 history = Bi_lstm_model.fit(train_data_multi, epochs=EPOCHS, steps_per_epoch=EVALUATION_INTERVAL, validation_data=val_data_multi, validation_steps=50, verbose=1,callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='min'), tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_loss', save_best_only=True, mode='min', verbose=0)]) Trained_model = tf.keras.models.load_model(model_path) # Show the model architecture Trained_model.summary() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train loss', 'validation loss'], loc='upper left') plt.rcParams["figure.figsize"] = [16,9] plt.show() data_val = x_scaler.fit_transform(df[['rain_1h','temp', 'snow_1h', 'clouds_all', 'holiday_le', 'weather_main_le', 'weather_description_le','traffic_volume']].tail(48)) val_rescaled = data_val.reshape(1, data_val.shape[0], data_val.shape[1]) Predicted_results = Trained_model.predict(val_rescaled) Predicted_results Predicted_results_Inv_trans = y_scaler.inverse_transform(Predicted_results) Predicted_results_Inv_trans from sklearn import metrics def timeseries_evaluation_metrics_func(y_true, y_pred): def mean_absolute_percentage_error(y_true, y_pred): y_true, y_pred = np.array(y_true), np.array(y_pred) return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 print('Evaluation metric results:-') print(f'MSE is : {metrics.mean_squared_error(y_true, y_pred)}') print(f'MAE is : {metrics.mean_absolute_error(y_true, y_pred)}') print(f'RMSE is : {np.sqrt(metrics.mean_squared_error(y_true, y_pred))}') print(f'MAPE is : {mean_absolute_percentage_error(y_true, y_pred)}') print(f'R2 is : {metrics.r2_score(y_true, y_pred)}',end='\n\n') timeseries_evaluation_metrics_func(validate['traffic_volume'],Predicted_results_Inv_trans[0]) plt.plot( list(validate['traffic_volume'])) plt.plot( list(Predicted_results_Inv_trans[0])) plt.title("Actual vs Predicted") plt.ylabel("Traffic volume") plt.legend(('Actual','predicted')) plt.show()
SandBox/multivarite_LSTM_ts.ipynb