repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | cells
list | types
list |
|---|---|---|---|---|
masve/saav-deliveries
|
project/Weather Data.ipynb
|
mit
|
[
"Weather and Motor Vehicle Collisions",
"import pandas as pd\nimport numpy as np\nimport datetime\nfrom datetime import date\nfrom dateutil.rrule import rrule, DAILY\nfrom __future__ import division\nimport geoplotlib as glp\nfrom geoplotlib.utils import BoundingBox, DataAccessObject\n\npd.set_option('display.max_columns', None)\n%matplotlib inline ",
"Download weather data",
"start_date = date(2012, 7, 1)\nend_date = date(2016, 2, 29)\n\n# data = pd.DataFrame()\nframes = []\nurl_template = 'https://www.wunderground.com/history/airport/KNYC/%s/%s/%s/DailyHistory.html?req_city=New+York&req_state=NY&req_statename=New+York&reqdb.zip=10001&reqdb.magic=4&reqdb.wmo=99999&format=1.csv'\n\nmonth = \"\"\n\nfor dt in rrule(DAILY, dtstart=start_date, until=end_date):\n if (month != dt.strftime(\"%m\")):\n month = dt.strftime(\"%m\")\n print 'Downloading to memory: ' + dt.strftime(\"%Y-%m\") \n frames.append(pd.read_csv(url_template % (dt.strftime(\"%Y\"),dt.strftime(\"%m\"), dt.strftime(\"%d\"))))\n\nprint \"Saving data to csv...\"\ndata = pd.concat(frames)\ndata.to_csv('weather_data_nyc.csv', sep=',')",
"Cleaning the weather dataset\nConvert weather DateUTC to local time",
"from datetime import datetime\nfrom dateutil import tz\n\nweather = pd.read_csv('datasets/weather_data_nyc_clean.csv')\n\ndef UTCtoActual(utcDate):\n from_zone = tz.gettz('UTC')\n to_zone = tz.gettz('America/New_York')\n \n utc = datetime.strptime(utcDate.DateUTC, '%m/%d/%Y %H:%M:%S')\\\n .replace(tzinfo=from_zone)\\\n .astimezone(to_zone)\n s = pd.Series([utc.year, utc.month, utc.day, utc.hour])\n s.columns = ['Year', 'Month', 'Day', 'Hour']\n return s\n \n#weather['DateActual'] = weather.DateUTC.map()\n\nweather[['Year', 'Month', 'Day', 'Hour']] = weather.apply(UTCtoActual, axis=1)\nweather.to_csv('datasets/weather_data_nyc_clean2.csv')",
"Merge weather and NYPD MVC datasets",
"incidents = pd.read_csv('datasets/NYPD_Motor_Vehicle_Collisions.csv')\nweather = pd.read_csv('datasets/weather_data_nyc_clean2.csv')\nweather.head(1)\n\nweather[(weather.Year == 2015) & (weather.Month == 11) & (weather.Day == 27)]\n\nfeatures0 = ['Conditions', 'TemperatureC']\nfeatures = ['Conditions', 'Precipitationmm',\\\n 'TemperatureC', 'VisibilityKm']\n\ndef lookup_weather2(year, month, day, hour):\n w = weather[(weather.Year == year) & (weather.Month == month) & (weather.Day == day) & (weather.Hour == hour)]\n return w\n\ndef lookup_weather(date, time):\n month = int(date.split('/')[0])\n day = int(date.split('/')[1])\n year = int(date.split('/')[2])\n hour = int(time.split(':')[0])\n d = lookup_weather2(year, month, day, hour).head(1)\n if (d.empty):\n dt_back = datetime.datetime(year, month, day, hour) - datetime.timedelta(hours=1)\n dt_forward = datetime.datetime(year, month, day, hour) + datetime.timedelta(hours=1)\n \n d_back = lookup_weather2(dt_back.year, dt_back.month, dt_back.day, dt_back.hour)\n if (not d_back.empty): return d_back\n \n d_forward = lookup_weather2(dt_forward.year, dt_forward.month, dt_forward.day, dt_forward.hour)\n if (not d_forward.empty): return d_forward\n return d\n\n\n\ndef merge_weather(incident):\n date = incident.DATE\n time = incident.TIME\n #print \"0\"\n w = lookup_weather(date, time)\n #[unnamed, condition, dateUTC, Dew, Events, Gust, Humidity,Precipitationmm,Sea_Level_PressurehPa, TemperatureC] = w.values[0]\n\n #print \"1\"\n try:\n #print \"2\"\n #print w\n con = \"-\"\n temp = \"-\"\n rainmm = \"-\"\n viskm = \"-\"\n #print \"2.5\"\n if (not pd.isnull(w['Conditions'].iloc[0])):\n con = w['Conditions'].iloc[0]\n if (not pd.isnull(w['TemperatureC'].iloc[0])):\n temp = w['TemperatureC'].iloc[0]\n if (not pd.isnull(w['Precipitationmm'].iloc[0])):\n rainmm = w['Precipitationmm'].iloc[0]\n if (not pd.isnull(w['VisibilityKm'].iloc[0])):\n viskm = w['VisibilityKm'].iloc[0]\n \n #print 'con %s, temp %s, rainmm %s, viskm %s' % (con, temp, rainmm, viskm)\n \n #print \"2.75\"\n s = pd.Series([con, rainmm, temp, viskm])\n #print \"3\"\n #print str(len(w.values[0]))\n #s = pd.Series(w.values[0])\n #s = pd.Series([w['Conditions'].iloc[0], w['Dew PointC'].iloc[0], w['Gust SpeedKm/h'].iloc[0]])\n\n #s.columns = features\n return s\n except:\n #print \"4\"\n print date + \"x\" + time\n s = pd.Series([None,None,None,None])\n #s = pd.Series([\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"])\n #s = pd.Series([])\n #s.columns = features\n return s\n \n \n \n\n#lookup_weather2(2016, 2, 14, 7)\n#lookup_weather('03/14/2016', '3:27').values[0]\n#[unnamed, condition, dateUTC, Dew, Events, Gust, Humidity,Precipitationmm,Sea_Level_PressurehPa, TemperatureC] = lookup_weather('01/27/2016', '3:27').values[0]\n\nprint \"Applying weather data to incidents...\"\nincidents[features] = incidents[incidents.DATE.str.split('/').str.get(2) != '2016'].apply(merge_weather, axis=1)\nprint \"Saving weather in-riched incident data...\"\nincidents.to_csv('datasets/NYPD_Motor_Vehicle_Collisions_weather3.csv', sep=',')\n\nincidents[incidents.DATE.str.split('/').str.get(2) == '2016']",
"Make some nice data analysis",
"# Read dataset\nincidents = pd.read_csv('datasets/NYPD_Motor_Vehicle_Collisions_weather3.csv')\n# Filter 2016 incidents\nincidents = incidents[(incidents.DATE.str.split('/').str.get(2) != '2016') \n & (pd.notnull(incidents.Conditions))]\n\n# Distribution of incidents by weather conditions\nys = []\nxs = []\n\nfor c in incidents.Conditions.unique():\n mask = (incidents.Conditions == c)\n filtered_incidents = incidents[mask]\n ys.append(len(filtered_incidents.index))\n xs.append(c)\n\ndf = pd.DataFrame(pd.Series(ys, index=xs, name=\"Incidents by weather conditions\").sort_values())\ndf.plot(kind='barh', figsize=(8,8))\n\ndf",
"Now lets try to find out if there are any condition that causes more incidents than others. We do this by plotting out heatmaps to get an idea of the distributions in the NYC area",
"def plot_zip_weather(condition, data):\n ys = []\n xs = []\n\n for z in data['ZIP CODE'].unique():\n mask = (data['ZIP CODE'] == z)\n filtered_incidents = data[mask]\n ys.append(len(filtered_incidents.index))\n xs.append(z)\n\n df = pd.DataFrame(pd.Series(ys, index=xs, name=\"%s incidents by zip code\" % condition).sort_values())\n df.plot(kind='barh', figsize=(8,32))\n\ndef draw_kde(data):\n bbox = BoundingBox(north=data.LATITUDE.max()-0.055,\\\n west=data.LONGITUDE.min()+0.055,\\\n south=data.LATITUDE.min()-0.055,\\\n east=data.LONGITUDE.max()+0.055)\n\n coords = {'lat': data.LATITUDE.values.tolist(), 'lon': data.LONGITUDE.values.tolist()}\n \n glp.kde(coords, bw=5, cut_below=1e-4)\n glp.set_bbox(bbox)\n glp.inline()\n \ndef plot_stuff(conditions, data):\n print \"%s conditions\" % conditions \n plot_zip_weather(conditions, data)\n draw_kde(data)\n\nsnowy = incidents[incidents['Conditions'].str.contains('Snow')]\nrainy = incidents[incidents['Conditions'].str.contains('Rain')]\nclear = incidents[incidents['Conditions'].str.contains('Clear')]\ncloudy = incidents[(incidents['Conditions'].str.contains('Cloud')) | (incidents['Conditions'].str.contains('Overcast'))]\nhaze = incidents[incidents['Conditions'].str.contains('Haze')]\nplot_stuff(\"Snowy\", snowy)\nplot_stuff(\"Rainy\", rainy)\nplot_stuff(\"Clear\", clear)\nplot_stuff(\"Cloudy\", cloudy)\nplot_stuff(\"Hazy\", haze)",
"Finding the ratio between conditions that resulted in an incident. Borough level",
"# What is the probability of an incident based on the weather condition?\n# Normalize incidents based on the conditions.\n\nfrom collections import Counter\nConditionIncidentCounter = Counter(incidents.Conditions.values)\n\np_incident = {}\nfor k,v in ConditionIncidentCounter.most_common():\n p_incident[k] = v/len(incidents)\n\np_incident\n\n# Do the same again but for individual areas of NYC\np_incident_district = {}\nl = len(incidents)\nfor district in incidents[pd.notnull(incidents.BOROUGH)].BOROUGH.unique():\n filtered = incidents[incidents.BOROUGH == district]\n counter = Counter(filtered.Conditions.values)\n p_incident_district[district] = {}\n for k,v in counter.most_common():\n p_incident_district[district][k] = v / len(list(counter.elements()));\n \np_incident_district\n\n# Are there any areas in NYC that experience incidents based \n# on a condition unusually higher or lower compared to other areas?\n# Calculate the ratio of incidents based on the condition.\ndef calcRatioForDistrict(districtCounter, overAllCounter, district):\n ys = []\n xs = []\n for con in incidents.Conditions.unique():\n ys.append(districtCounter[con] / overAllCounter[con])\n xs.append(con)\n return pd.Series(ys, index=xs)\n \nseries = {}\nfor b in incidents[pd.notnull(incidents.BOROUGH)].BOROUGH.unique():\n series[b] = calcRatioForDistrict(p_incident_district[b], p_incident, b)\n\ndf = pd.DataFrame(series)\ndf.plot(kind=\"bar\", subplots=True, figsize=(14,14),layout=(7,2), legend=False,sharey=True)",
"Let's try to look at zip codes in Brooklyn only",
"# What is the probability of an incident based on the weather condition?\n# Normalize incidents based on the conditions.\n\nfrom collections import Counter\nborough = incidents[incidents.BOROUGH == 'MANHATTAN']\nConditionIncidentCounter = Counter(borough.Conditions.values)\n\np_incident = {}\nfor k,v in ConditionIncidentCounter.most_common():\n p_incident[k] = v/len(borough)\n\np_incident\n\n# Do the same again but for individual areas of NYC\np_incident_borough_zip = {}\nl = len(borough)\nfor z in borough[pd.notnull(incidents['ZIP CODE'])]['ZIP CODE'].unique():\n filtered = borough[incidents['ZIP CODE'] == z]\n counter = Counter(filtered.Conditions.values)\n# z = str(z).split(\".\")[0]\n p_incident_borough_zip[z] = {}\n for k,v in counter.most_common():\n p_incident_borough_zip[z][k] = v / len(list(counter.elements()));\n \np_incident_borough_zip\n\n# Are there any areas in NYC that experience incidents based \n# on a condition unusually higher or lower compared to other areas?\n# Calculate the ratio of incidents based on the condition.\ndef calcRatioForDistrict(districtCounter, overAllCounter, district):\n ys = []\n xs = []\n for con in incidents.Conditions.unique():\n if (con in districtCounter):\n ys.append(districtCounter[con] / overAllCounter[con])\n else:\n ys.append(0)\n xs.append(con)\n return pd.Series(ys, index=xs)\n \nseries = {}\nfor z in borough[pd.notnull(incidents['ZIP CODE'])]['ZIP CODE'].unique():\n series[z] = calcRatioForDistrict(p_incident_borough_zip[z], p_incident, b)\n\ndf = pd.DataFrame(series)\n\ndf.plot(kind=\"bar\", subplots=True, figsize=(14,100), layout=(50,2), legend=False, sharey=False)\n\nworst_day = incidents.DATE.value_counts().index[0]\nworst_day_count = incidents.DATE.value_counts()[0]\n\nincidents[incidents.DATE == worst_day]\n\nincidents.DATE.value_counts()\n\nincidents['CONTRIBUTING FACTOR VEHICLE 1'].unique()",
"Looking at weather based causes",
"# Read dataset\nincidents = pd.read_csv('datasets/NYPD_Motor_Vehicle_Collisions_weather4.csv')\n# Filter 2016 incidents\nincidents = incidents[(incidents.DATE.str.split('/').str.get(2) != '2016') \n & (pd.notnull(incidents.Conditions))]\n\ndef count_contributing(cont):\n temp = incidents[(incidents['CONTRIBUTING FACTOR VEHICLE 1'] == cont) | \\\n (incidents['CONTRIBUTING FACTOR VEHICLE 2'] == cont) | \\\n (incidents['CONTRIBUTING FACTOR VEHICLE 3'] == cont) | \\\n (incidents['CONTRIBUTING FACTOR VEHICLE 4'] == cont) | \\\n (incidents['CONTRIBUTING FACTOR VEHICLE 5'] == cont) ]\n return temp.shape[0]\n\nprint \"Accidents caused by Pavement Slippery: %s\" % count_contributing('Pavement Slippery')\nprint \"Accidents caused by Glare: %s \" % count_contributing('Glare')\nprint \"Accidents caused by Pavement Defective: %s \" % count_contributing('Pavement Defective')",
"There seems to be a lot of incidents caused by slippery pavement. Let's look at the weather conditions for those incidents.",
"weather_incidents = incidents[(incidents['CONTRIBUTING FACTOR VEHICLE 1'] == 'Pavement Slippery') | \\\n (incidents['CONTRIBUTING FACTOR VEHICLE 2'] == 'Pavement Slippery') | \\\n (incidents['CONTRIBUTING FACTOR VEHICLE 3'] == 'Pavement Slippery') | \\\n (incidents['CONTRIBUTING FACTOR VEHICLE 4'] == 'Pavement Slippery') | \\\n (incidents['CONTRIBUTING FACTOR VEHICLE 5'] == 'Pavement Slippery') ]\n\n# Distribution of incidents by weather conditions\nys = []\nxs = []\n\nfor c in weather_incidents.Conditions.unique():\n mask = (weather_incidents.Conditions == c)\n filtered_incidents = weather_incidents[mask]\n ys.append(filtered_incidents.shape[0])\n xs.append(c)\n\ndf = pd.DataFrame(pd.Series(ys, index=xs, name=\"Weather conditions during 'slippery pavement' based incidents\").sort_values())\ndf.plot(kind='barh', figsize=(8,8))\n\n# Export to json for d3 viz\nfrom collections import OrderedDict\nimport json\nwith open('datasets/slippery_pavement.json', 'w') as fp:\n json.dump(OrderedDict(sorted(dict(zip(xs, ys)).items(), key=lambda x: x[1], reverse=True)), fp)",
"Okay, the overcast and clear weather still are the top 2. The assumption that the type of incidents are caused by weather conditions might still hold true. It could be that top 2 are caused by pavement conditions independent of the weather, such as water or oil on the roads. In any case, lets try to plot out where these incidents occur.",
"def draw_dot(data, type_color):\n bbox = BoundingBox(north=incidents.LATITUDE.max()-0.055,\\\n west=incidents.LONGITUDE.min()+0.055,\\\n south=incidents.LATITUDE.min()-0.055,\\\n east=incidents.LONGITUDE.max()+0.055)\n \n gridDots = {'lat': data.LATITUDE.values.tolist(), 'lon': data.LONGITUDE.values.tolist()}\n \n glp.set_bbox(bbox)\n glp.dot(gridDots, color=type_color)\n \ndef get_spaced_colors(n):\n max_value = 16581375 #255**3\n interval = int(max_value / n)\n colors = [hex(I)[2:].zfill(6) for I in range(0, max_value, interval)]\n \n return [[int(i[:2], 16), int(i[2:4], 16), int(i[4:], 16), 255] for i in colors]\n\ncolormap = get_spaced_colors(weather_incidents['Conditions'].unique().size)\n \nfor idx, wi in enumerate(weather_incidents['Conditions'].unique().tolist()):\n filtered = weather_incidents[weather_incidents['Conditions'] == wi]\n print \"%s %s\" % (wi, str(len(filtered.index)))\n draw_dot(filtered, colormap[idx])\n draw_dot(filtered, 'r')\n glp.inline()\n\n#glp.inline()\n\nbbox = BoundingBox(north=incidents.LATITUDE.max()-0.055,\\\n west=incidents.LONGITUDE.min()+0.055,\\\n south=incidents.LATITUDE.min()-0.055,\\\n east=incidents.LONGITUDE.max()+0.055)\n\nglp.set_bbox(bbox)\n\nglp.kde({'lat': weather_incidents.LATITUDE.values.astype('float'), 'lon': weather_incidents.LONGITUDE.values.astype('float')},bw=5, cut_below=1e-4)\nglp.inline()\n\n",
"Looking at the intersections we can find those most dangerous based on the number of incidents happening there which are in some way caused by slippery pavement.",
"top10 = weather_incidents.LOCATION.value_counts()[:20]\n\ntop10.to_csv('datasets/top20slippery')",
"Ignoring incidents happening outside intersections the top 3 looks like this:\n\n40.5732661, -74.1469613 (Google Street View)\n40.8726198, -73.9046763 (Google Street View)\n40.7460347, -73.9344002 (Google Street View)\n\nWhat we find is - that atleast for top 3 - incidents occouring because of slippery pavement happens because of steep angled roads leading into a intersection. Or very bad road pavement conditions.",
"locations = weather_incidents[weather_incidents.LOCATION.isin(top10.index)].drop_duplicates('LOCATION','first')\\\n[['TIME','BOROUGH','ZIP CODE','LATITUDE','LONGITUDE','LOCATION','ON STREET NAME','CROSS STREET NAME']]\n\nloca = locations.copy()\n\n\n\ndef m(r):\n return top10[top10.index == r.LOCATION].iloc[0]\n\nloca['COUNT'] = loca.apply(m, axis=1)\n\nloca.sort_values(by='COUNT', ascending=False).to_csv('../app/datasets/slippery.csv', sep=',')",
"fin",
"lightsnow = incidents[incidents['Conditions'] == 'Light Snow']\nprint \"Accidents happening because of light snow: %s\" % str(lightsnow.size)\n\nprint \"Injuries: %s\" % lightsnow['NUMBER OF PERSONS INJURED'].sum()\nprint \"Killed: %s\" % lightsnow['NUMBER OF PERSONS KILLED'].sum()\n\nprint \"Top intersections:\"\nlightsnow.LOCATION.value_counts()[:3]\n\nlightrain = incidents[incidents['Conditions'] == 'Light Rain']\nprint \"Accidents happening because of light rain: %s\" % str(lightrain.size)\n\nprint \"Injuries: %s\" % lightrain['NUMBER OF PERSONS INJURED'].sum()\nprint \"Killed: %s\" % lightrain['NUMBER OF PERSONS KILLED'].sum()\n\nprint \"Top intersections:\"\nlightrain.LOCATION.value_counts()[:3]"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kingsgeocomp/code-camp
|
notebook-09-iteration.ipynb
|
mit
|
[
"Notebook-9: Loops and Iteration\nLesson Content\n\nTwo Ways to Iterate\nWhile Statement\nWhile Loop\nBreak and Continue\nLoop over a list \n\n\nFor Loop \nCode (Applied Geo-example)\n\nIn this lesson we cover the concept of iteration, which is basically the idea of repeating the same set of instructions until a certain condition is met. This sequence of instructions is also called a loop. \nThis is an extremely important and powerful concept, as it allows us to finally automate tasks! Remember that a fundamental feature of programmers is that they are lazy (refresh Larry Wall's \"Three Virtues\" that we saw in the first notebook!). The more you can delegate to the machine and avoid repeating boring and repetitive tasks yourself, the better!\nOf course, as the fantastic Randall Munroe likes to remind us, reality sometimes challenges this idea...\n\nTwo Ways to Iterate\nThe two most common ways to iterate (or repeat) a set of commands are the while loop and the for loop. That's why in Python the words while, for (and in) are reserved words and you can't use them for variable names. \nThe video below is a nice (if slightly scary) intro to each of the approaches; you might remember them the next time you're in the gym:\n\nThe 'right' approach you use usually depends on what sort of data structure you are iterating over and whether we know before we start how many times we want to repeat the commands. That might sound a little vague at this point, but come back and have a think about it once you have a better understanding of how loops work.\nSo let's first see how while loops work, then we'll look at actual loops in Python.\nWHILE Loops\nRemember if statements? When the Python interpreter finds an if statement in your code, it checks if the specified condition evalutes to True. If the condition is True then it runs the remainder of the indented code block following the if. The 'true' block of code is only run once.\nUsing the while statement, the 'true' block is run for as long as the condition evalutes to True. So if the statment continues to evaluation to True then the block of code is run again and again and again and again... and again until some stopping condition is reached (usually the while condition becomes False for 'some reason').\nThis allows us to finally do some interesting stuff in code:",
"counter = 1 # Starting condition: counter is at 1\n\nprint(\"The while loop is ready to begin.\")\n\nwhile counter <= 10: # Test the condition\n print(\"The counter is at: \" + str(counter))\n counter += 1 # Increment the counter\n \nprint(\"The while loop has ended.\")\n",
"If that looks confusing, don't worry! It's perfectly normal as that's your first example of iteration. \nLet's take a deeper look:\n- First, we defined a variable counter and we initialised it with a value of 1. \n- Then we used the while statement to check if the value of counter was less than or equal to 10. \n- Since that condition evaluated to True, we printed the value of counter and then added 1 to the counter. \n- The indendented block of code was then run again and again by Python until the while statement returned a False (this happened because the value of counter went all the way to 11).\n- After that Python simply continued to execute the code outside the while block (i.e. the last line of non-indented code)\nThe fundamental idea is thus: while this condition holds, repeat the instructions in the indented code block.\nMany beginner programmers get confused by the fact that the value of counter is increasing without the code advancing to the point where in prints out 'The while loop has ended'. Remeber: we are starting the instruction from the beginning of the while block. So the first time that we hit it, counter has value of 1 because this is what we set it to outside of the while loop. \nBut the second time the while conditional is evaluted, counter has been incremented to 2 because the last line of the while block is counter += 1! So we increased its value while inside the while block. At the beginning of the third iteration it will have a value of 3, while at its end it will be incremented to 4. And so on...\nCAVEAT: pay attention to how you write your while loops! They can potentially run forever (for as long as the condition they are evaluating is True) maxing out your machine's memory. For example:\n```python\ndon't run this!\nor if you do, save everything first\nand then be prepared to stop the code execution manually\n(usually by pressing CTRL+D or Cmd+D)\nin the terminale/console\nwhile True:\n print(\"Forever Loop! Yeeee!\")\n```\nA challenge for you!\nComplete the code below to run a while loop that prints only odd numbers under 10.",
"otherCounter = 1\n\nwhile ??? <= ???:\n print(\"This is an odd number: \" + str(???))\n otherCounter += 2\n\notherCounter = 1\n\nwhile otherCounter <= 10:\n print(\"This is an odd number: \" + str(otherCounter))\n otherCounter += 2",
"You can also run while loops decrementally until they meet a certain condition (run the code block to see how):",
"myThirdCounter = 10\n\nwhile myThirdCounter >= 0:\n print(\"Decrementing counter: \" + str(myThirdCounter))\n myThirdCounter -= 1",
"Break and Continue\nTo prematurely interrupt the execution of a while loop (before the while condition become false) you can use the break statement",
"myFourthCounter = 1\n\nwhile myFourthCounter < 10:\n \n print(str(myFourthCounter))\n myFourthCounter += 1\n \n if myFourthCounter == 5:\n print(\"Time to escape this madness!\")\n break",
"Nesting Conditions\nThat last example shows that you can 'nest' an if (if myFourthCounter) inside of a while loop -- this approach allows us to add much more complex logic to our code than we could before. Here's an example, but see if you can figure out what it will print out (and how) before you run the code block!",
"i=0\nwhile True:\n i += 1\n\n if i % 2 != 0:\n print( str(i) + \" is odd.\")\n if i == 9:\n print(\"\\t i is 9\")\n else:\n print(\"\\t i is not 9\")\n else:\n print( str(i) + \" is even.\")\n if i % 4 == 0:\n print(\"\\t i is divisible by 4.\")\n elif i == 8:\n print(\"\\t i is 8\")\n\n if i == 21:\n break",
"In terms of explanation:\n\n\nPython is going to keep running the code (while True) until it is told to stop by a break (which happens at i == 21). In fact, if you remove the break then you will crash jupyter because the computer will print out every even number to infinity (which the computer can’t handle because it runs out of memory).\n\n\n'Inside' the while loop there is a main if/else block:\nif i % 2 != 0:\n ... do something with odd numbers...\nelse:\n ... do something else with even numbers...\n\n\n'Inside' the odd numbers section we now have a second if/else block:\nif i == 9:\n ... do something if the odd number is 9...\nelse:\n ... do something else if the odd number is not 9...\n\n\nAnd it’s a similar story in the even numbers section:\nif i % 4 == 0:\n ... do something if the even number is divisible by 4...\nelif i == 8\n ... do something else if the even number is 8...\nWe'll not give you the answer to how to print out WOOT in place of 7 below, but at this point you all the clues you need. It’s the concept that is the hard part and following what’s going on when you start to nest conditions inside of conditions. We can talk this through more if anyone needs more help getting to grips with this!\n\n\ni and j in Loops\nIf you want to skip to the next iteration then you can continue the exection of a while loop using the continue statement. In the following example we are going to skip all even numbers and print WOOT! if we hit a lucky 7 (or any number divisible by 7). We'll break out of the loop after hitting the 21<sup>st</sup> iteration.\nOne other thing to notice is that we've switched from looooong counter names like myFourthCounter to i. A common programming trick (which is well-known and so actually increases the legibility of your code to others) is to use i and j for counters in loops (you can add k if you ever need a third level loop).",
"# So use i and j as counters because \n# this is a stylistic convention and \n# helps you to to write more concise code\n# (and be constructively lazy).\n\ni = 0\n\nwhile True:\n \n i += 1\n \n if i % 2 != 0:\n print(i)\n else:\n continue\n \n if i % 7 == 0:\n print(\"WOOT!\")\n \n if i == 25:\n break\n \nprint(\"Done!\")",
"How would you change the code above so that it printed only the odd number or 'WOOT!', but not both? In other words, change the code so that it prints:\n1\n3\n5\nWOOT!\n9\n11\n13\n15\n17\n19\nWOOT!",
"i = 0\n\nwhile True:\n \n i += 1\n \n if i % 2 != 0:\n if i % 7 == 0:\n print(\"WOOT!\")\n else:\n print(i)\n \n if i == 25:\n break\n \nprint(\"Done!\")",
"A challenge for you!\nNow, replace the ??? in the code below and use it to print only even numbers less than 22.",
"i = 0\nwhile True:\n i += 1\n \n if i % 2 != 0:\n continue\n if i == ???:\n break\n print(???)\n\nprint(\"Done!\")\n\ni = 0\nwhile True:\n i += 1\n \n if i % 2 != 0:\n continue\n if i == 22:\n break\n print(i)\n\nprint(\"Done!\")",
"Iterating over a List\nWhat you just saw with the while statement is a way of iterating: a way of repeating a certain set of instruction until a given condition is met. We can use to our advantage not only to print stuff, but also to 'iterate over' the elements in a list:",
"# remember our friends, the british computer scientists?\nbritishCompList = [\"babbage\", \"lovelace\", \"turing\"]\n\n# this is the condition python is going to check against\nstoppingCondition = len(britishCompList)\n\ncounter = 1\nwhile counter < stoppingCondition:\n print(britishCompList[counter] + \" was a british computer scientist\")\n # don't forget to increment the counter!!!\n counter += 1",
"Wow, lot of stuff in that chunk of code, eh? Well, once again, take a deep breath and go through it line by line.\nThe important bits are:\n- notice that this time we used the len of britishCompList as stopping condition, instead of specifying ourselves a number.\n- we accessed the items in the list with a regular index, like we have done in the past. The difference is that this time the index was the variable counter, as at each iteration counter assumes the value of 0, 1 ... until the stopping condition is met. This is equivalent to writing :\npython\nprint(britishCompList[0]) # on the first iteration\nprint(britishCompList[1]) # on the second iteration\nA challenge for you!\nBut wait a second... what about the great Babbage? Why isn't his name displayed? Certainly not because he's not worth a mention! Can you spot the reason why the iteration skipped him? Can you fix the code to include Babbage?\nHint: check (using print) the values of counter and britishCompList. What is the condition we are asking Python to use?",
"#your code here\n\ncounter = 0 # To include Babbage, we need to start counter from 0\nwhile counter < len(britishCompList):\n print(britishCompList[counter] + \" was a british computer scientist\")\n # don't forget to increment the counter!!!\n counter += 1",
"Other Overlooked Computers\nIf you think that being skipped over in the loop above was tough for old Babbage, then perhaps you might be interested to hear about the history of the NASA 'computers' who helped put a man on the moon and are only getting a film made about them in 2016.\n\nA challenge for you!\nComplete the following code:",
"counter = ???\nnonBritishProgrammers = [\"Torvald\", \"Knuth\", \"Swartz\"]\nstoppingCondition = len(???)\n\nwhile counter < stoppingCondition :\n print(\"This is a computer genius too: \" + nonBritishProgrammers[counter])\n # always remember to increment the counter!!!\n counter += 1\n\ncounter = 0\nnonBritishProgrammers = [\"Torvald\", \"Knuth\", \"Swartz\"]\nstoppingCondition = len(nonBritishProgrammers)\n\nwhile counter < stoppingCondition :\n print(\"This is a computer genius too: \" + nonBritishProgrammers[counter])\n # always remember to increment the counter!!!\n counter += 1",
"CAVEAT: An important condition to remember when iterating over a list is thus that lists are zero-indexed! If if you start you counter from 1 you will certainly miss the first item in the list (which has an index of 0). \nBut watch out! There's more:\nAnother challenge for you!\nCan you guess why I needed to subtract -1 to the list's len? \n[Hint: Check the condition again. Is the same as before? (Run the code below before continuing)]",
"counter = 0\nnonBritishProgrammers = [\"Torvald\", \"Knuth\", \"Swartz\"]\nstoppingCondition = len(nonBritishProgrammers) -1\n\nwhile counter <= stoppingCondition :\n print(\"These are geniuses too! \" + nonBritishProgrammers[counter])\n # always remember to increment the counter!!!\n counter +=1",
"We can see that from the code above the while condition is slightly different:\n while counter < stoppingCondition : versus while counter <= stoppingCondition\nBecause len counts the number of elements, if we use <= we will access the variable counter at each iteration counter with the value of 0, 1, 2, 3. This would result in an indexError since we only have three variables.\nFOR Loop\nWe've just seen that we can use a while loop to iterate over a list, but it's kind of clunky and inelegant. All those counters and things makes for a lot of extra typing, especially when you consider how terse Python's language usually is! Surely there must be another way?\nYou guessed right, my friend! Let me introduce you to the 'for ... in:' statement:",
"for programmer in britishCompList:\n print(programmer)",
"As you can see, the for loop statement is much more concise: you simply tell Python to repeat a certain instruction (print the list item in this example) for EVERY ITEM in A SEQUENCE. The sequence here is the list of British computer scientists britishCompList created in the code block above.\nNow, Python will stop automatically when the sequence is finished without you having to worry about specifying the stopping condition (which you have to do when using a while loop).\nNotice also that we didn't have to initialise the counter value!\nSo, the biggest difference between a while and a for loop is thus not merely stylistic, it's also conceptual!\nLet's recap with another example:",
"# WHILE LOOP\nwhileCounter = 0\nmyList = [0,1,2,3,4]\nstoppingCondition = len(myList)\nwhile whileCounter < stoppingCondition:\n print(\"Element number: \" + str(myList[whileCounter]))\n whileCounter +=1 \n\n# FOR LOOP\nfor element in myList:\n print(\"Element number: \" + str(element))",
"SIDENOTE: See how the value of myList[whileCounter] and that of element in the two loops are the same? That's because Python is doing the indexing job for you behind the scenes.\nA challenge for you!\nPrint only the odd numbers in the list. HINT: remember the modulo operator?",
"numbers = [1,2,3,4,5,6,7,8,9,10]\nfor n in numbers:\n if (???):\n print(n)\n\nnumbers = [1,2,3,4,5,6,7,8,9,10]\nfor n in numbers:\n if (n % 2 != 0):\n print(n)",
"Now, as we are lazy programmers, let's repeat the above example combining the range function with a for loop. It will save us the hassle of typing all those numbers!",
"for i in range(10):\n if (i % 2 != 0):\n print(i)",
"Cool, we have seen how to iterate over a list, but what about a dictionary? Well, if you remember we said that you might think of a dictionary as a kind of list where each element isn't indexed by an integer, but rather by a unique identifier (key).\nHence, as with lists where we iterate over the indexes, with dictionaries we are going to iterate over the keys!",
"programmers = {\n \"Charles\": \"Babbage\",\n \"Ada\": \"Lovelace\",\n \"Alan\":\"Turing\"\n}\nfor k in programmers:\n print(k)",
"NOTE: I've used the variable k. This is simply an arbitrary word that I've choosen and not some kind of special variable. You could have used anyRandomNameForWhatMatters. \nWhat if you want to retrieve the values? In that case you should use not only two variables, (the first for the keys and the second for the values) but also invoke the method items() on the dictionary, like so:",
"for k,v in programmers.items():\n print(\"this is the value: \\'\" + v + \"\\' for the key: \\'\" + k +\"\\'\")",
"A Challenge for you!\nIterate over the GeoJSON marker and print its \"properties\".",
"KCL_marker = {\n \"type\": \"Feature\",\n \"properties\": {\n \"marker-color\": \"#7e7e7e\",\n \"marker-size\": \"medium\",\n \"marker-symbol\": \"\",\n \"name\": \"KCL\"\n },\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n -0.11630058288574219,\n 51.51135999349117\n ]\n }\n }\nfor ???,v in KCL_marker[\"???\"].items():\n print(\"KCL_marker has a property: '\" + ??? + \"' for the key: \" + k)\n\n\nKCL_marker = {\n \"type\": \"Feature\",\n \"properties\": {\n \"marker-color\": \"#7e7e7e\",\n \"marker-size\": \"medium\",\n \"marker-symbol\": \"\",\n \"name\": \"KCL\"\n },\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n -0.11630058288574219,\n 51.51135999349117\n ]\n }\n }\nfor k,v in KCL_marker[\"properties\"].items():\n print(\"KCL_marker has a property: '\" + v + \"' for the key: \" + k)\n",
"Very good! Let's summarise some facts about loops:\n\nyou can increment the counter\nbut also decrement it (effectively counting down!)\nthe increment doesn't need to be 1 every the time (you can increment by 2, 50, whatever..)\ndon’t forget to to indent the block of code after the colon!\n\nCode (Applied Geo-example)\nThe geo-excercise I'll give you this time is a real-world problem that you might face one day in your career as geospatial professionals. In principle it is possible to work out everything below based on what we've done up to this point; however, this exercise is also hard since it is a big jump conceptually that mixes up the ideas in a new way. So if you can't quite make out the answer don't worry, just try to understand the concepts above and see if you can solve parts of the problem. \nLet's say a colleague of yours used a GPS to survey at regular intervals the dispersion of pollutants in a patch of terrain. Unfortunately, after a good start they forgot to record all the remaining points! \nBut that's not a terrible problem, as the transect has a perfect West-East orientation and direction, and all the points are spaced by a the same value dX (short for 'delta-X', the change in X between each point) of 0.03 degrees longitude, i.e.:\n(0.0102, 51.592)-----(X+dX,Y)-----(X+2dX,Y)-----(X+3dX,Y)--->\nUsing what we've seen so far, try to create a GeoJSON featureCollection of points. To give you a head start, I've provided some scaffolding.\nHINT: Being the skilled geographer that you are, you immediately realise that actually you've got all the coordinates that you need, even for the missing points (i.e. the latitude values will remain constant...)",
"# define a new featureCollection: it is basically a very fancy dictionary\n# to which we are going to add new 'features' (which are points on a map\n# but represented as *data* by a dictionary). We need to add one feature\n# at a time when building our transect...\n# initial coordinate list\ninit_coords = [-0.0200, 51.592]\n# dX delta \ndx = 0.03\ngap = 0\n\ntransect = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {},\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": init_coords\n }\n }\n# -------------------------------------------------------------\n# here is where the remaining three points have to be\n# added using *code* and not manually\n# -------------------------------------------------------------\n ]\n}\n\n# new empty list where I'm going to put all the new dictionaries \n# a.k.a. all the new points\nthree_new_points = []\n\nfor i in range(3):\n# define a new point \n new_point = {\n \"type\": \"Feature\",\n \"properties\": {},\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": []\n\n }\n }\n \n # increment the longitude\n gap += dx\n \n # create a new list with the updated coordinates\n new_coordinates = [init_coords[0] + gap, init_coords[1]]\n\n # assign the new coordinates to the coordinates key\n # in the new point dictionary\n new_point[\"geometry\"][\"coordinates\"] = new_coordinates\n new_point[\"properties\"][\"name\"] = \"Point \" + str(i+1)\n\n # append the new point dictionary to the list of new points\n three_new_points.append(new_point)\n\n\n# append to the feature list the three new points\n# that we created\ntransect[\"features\"].extend(three_new_points)\nprint(transect)",
"This output on its own makes very little sense, but it's actually a real world data structure called JSON. Below we use a handy library (that you might not yet have installed on your computer) to turn that JSON into something easier to read, and then we'll see how you can use it!",
"import json \nparsed = json.loads(str(transect).replace(\"\\'\", \"\\\"\"))\nprint(json.dumps(parsed, indent=4))\n\nfrom ipyleaflet import Map, GeoJSON, basemaps\nm = Map(center = (51.51, -0.10), zoom=10, min_zoom=5, max_zoom=20, \n basemap=basemaps.OpenTopoMap)\ngeo = GeoJSON(data=parsed)\nm.add_layer(geo)\nm",
"Further references:\nGeneral list of resources\n- Awesome list of resources\n- Python Docs\n- HitchHiker's guide to Python\n- Python for Informatics\n- Learn Python the Hard Way - Lists\n- Learn Python the Hard Way - Dictionaries\n- CodeAcademy\nCredits!\nContributors:\nThe following individuals have contributed to these teaching materials: \n- James Millington\n- Jon Reades\n- Michele Ferretti\n- Zahratu Shabrina\nLicense\nThe content and structure of this teaching project itself is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 license, and the contributing source code is licensed under The MIT License.\nAcknowledgements:\nSupported by the Royal Geographical Society (with the Institute of British Geographers) with a Ray Y Gildea Jr Award.\nPotential Dependencies:\nThis notebook may depend on the following libraries: None"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
orbitfold/tardis
|
docs/notebooks/to_hdf.ipynb
|
bsd-3-clause
|
[
"Example to_hdf calls\nInitialize the simulation with the tardis_example.yml configuration file.",
"from tardis.io.config_reader import Configuration\nfrom tardis.model import Radial1DModel\nfrom tardis.simulation import Simulation\n\n# Must have the tardis_example folder in the working directory.\nconfig_fname = 'tardis_example/tardis_example.yml'\ntardis_config = Configuration.from_yaml(config_fname)\n\nmodel = Radial1DModel(tardis_config)\nsimulation = Simulation(tardis_config)",
"Run the simulation while storing all its iterations to an HDF file.\nThe first parameter is the path where the HDF file should be stored. The second parameter determines which properties will be stored. When its value is 'input', only Input plasma properties will be stored. The third parameter, hdf_last_only, if True will only store the last iteration of the simulation, otherwise every iteration will be stored.",
"simulation.legacy_run_simulation(model, '/tmp/full_example.hdf', 'full', hdf_last_only=False)",
"Open the stored HDF file with pandas and print its structure.",
"import pandas as pd\ndata = pd.HDFStore('/tmp/full_example.hdf')\nprint data",
"Access model.plasma.density of the 9th simulation, which is a one-dimensional array",
"print data['/simulation9/model/plasma/density']",
"Scalars are stored in a scalars pandas.Series for every module. For example to access model.t_inner of the 9th iteration of the simulation, one would need to do the following.\nNote: Quantities are always stored as their SI values.",
"print data['/simulation9/model/scalars']['t_inner']",
"Breakdown of the various to_hdf methods\nEvery module in TARDIS has its own to_hdf method responsible to store its own data to an HDF file.\nPlasma\nThe following call will store every plasma property to /tmp/plasma_output.hdf under /parent/plasma",
"model.plasma.to_hdf('/tmp/plasma_output.hdf', path='parent')\n\nimport pandas\nwith pandas.HDFStore('/tmp/plasma_output.hdf') as data:\n print data",
"Plasma's to_hdf method can also accept a collection parameter which can specify which types of plasma properties will be stored. For example if we wanted to only store Input plasma properties, we would do the following:",
"from tardis.plasma.properties.base import Input\nmodel.plasma.to_hdf('/tmp/plasma_input_output.hdf', collection=[Input])\n\nimport pandas\nwith pandas.HDFStore('/tmp/plasma_input_output.hdf') as data:\n print data",
"Model\nThe following call will store properties of the Radial1DModel to /tmp/model_output.hdf under /model. Additionally, it will automatically call model.plasma.to_hdf, since plasma is also a property of the model.",
"model.to_hdf('/tmp/model_output.hdf')",
"MontecarloRunner\nThe following call will store properties of the MontecarloRunner to /tmp/runner_output.hdf under /runner",
"simulation.runner.to_hdf('/tmp/runner_output.hdf')\n\nimport pandas\nwith pandas.HDFStore('/tmp/runner_output.hdf') as data:\n print data"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/docs-l10n
|
site/ja/tutorials/generative/style_transfer.ipynb
|
apache-2.0
|
[
"Copyright 2018 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"ニューラル画風変換\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td> <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\"><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/generative/style_transfer.ipynb\">TensorFlow.org で表示</a> </td>\n <td> <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\"><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/generative/style_transfer.ipynb\">Google Colab で実行</a> </td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/style_transfer.ipynb\"><img src=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tutorials/generative/style_transfer.ipynb\">GitHub でソースを表示</a></td>\n <td> <img src=\"https://www.tensorflow.org/images/download_logo_32px.png\"><a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/generative/style_transfer.ipynb\">ノートブックをダウンロード</a> </td>\n <td> <a href=\"https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2\"><img src=\"https://www.tensorflow.org/images/hub_logo_32px.png\"> TF Hub モデルを参照</a> </td>\n</table>\n\nこのチュートリアルでは、ディープラーニングを使用して、ある画像を別の画像の画風で作成します(ピカソやゴッホのように描いてみたいと思ったことはありませんか?)。これはニューラル画風変換として知られるテクニックで、「<a href=\"https://arxiv.org/abs/1508.06576\" class=\"external\">A Neural Algorithm of Artistic Style</a>」(Gatys et al.)に説明されています。\n注意: このチュートリアルでは、元の画風変換アルゴリズムを実演しています。画像のコンテンツを特定の画風に最適化するものです。最新のアプローチでは、画風が適用された画像を直接生成するモデルをトレーニングします(cyclegan に類似)。このアプローチはより高速に実行できます(最大 1000 倍)。\n画風変換の簡単なアプリケーションについては、このチュートリアルをご覧ください。TensorFlow Hub から事前トレーニング済みの任意の画風変換モデルを使用する方法や TensorFlow Lite で画風変換モデルを使用する方法について詳しく記載されています。 \nニューラル画風変換は、コンテンツ画像と画風参照画像(有名な画家の作品など)という 2 つの画像をブレンドしてコンテンツ画像に見えるが、画風参照画像の画風で「描かれた」ように見える画像を出力するための最適化テクニックです。\nこれは、コンテンツ画像のコンテンツ統計と画風参照画像の画風統計に一致するように出力画像を最適化することで実装されています。これらの統計は、畳み込みネットワークを使用して画像から抽出されます。\nたとえば、以下の犬の画像とワシリー・カジンスキーの作品 7 を使用しましょう。\n<img src=\"https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg\" class=\"\">\nウィキペディアコモンズの Yellow Labrador Looking。作成者、Elf。ライセンス CC BY-SA 3.0\n<img src=\"https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg\">\nカジンスキーがこの画風のみを使用してこの犬の絵を描こうとしたら、どのようになるのか見てみましょう。以下のようになるでしょうか?\n<img src=\"https://tensorflow.org/tutorials/generative/images/stylized-image.png\" style=\"width: 500px;\"> \nセットアップ\nモジュールのインポートと構成",
"import os\nimport tensorflow as tf\n# Load compressed models from tensorflow_hub\nos.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'\n\nimport IPython.display as display\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['figure.figsize'] = (12, 12)\nmpl.rcParams['axes.grid'] = False\n\nimport numpy as np\nimport PIL.Image\nimport time\nimport functools\n\ndef tensor_to_image(tensor):\n tensor = tensor*255\n tensor = np.array(tensor, dtype=np.uint8)\n if np.ndim(tensor)>3:\n assert tensor.shape[0] == 1\n tensor = tensor[0]\n return PIL.Image.fromarray(tensor)",
"画像をダウンロードして、画風画像とコンテンツ画像を選択します。",
"content_path = tf.keras.utils.get_file('YellowLabradorLooking_new.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg')\nstyle_path = tf.keras.utils.get_file('kandinsky5.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg')",
"入力を視覚化する\n画像を読み込んで、その最大寸法を 512 ピクセルに制限する関数を定義します。",
"def load_img(path_to_img):\n max_dim = 512\n img = tf.io.read_file(path_to_img)\n img = tf.image.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n\n shape = tf.cast(tf.shape(img)[:-1], tf.float32)\n long_dim = max(shape)\n scale = max_dim / long_dim\n\n new_shape = tf.cast(shape * scale, tf.int32)\n\n img = tf.image.resize(img, new_shape)\n img = img[tf.newaxis, :]\n return img",
"画像を表示する単純な関数を作成します。",
"def imshow(image, title=None):\n if len(image.shape) > 3:\n image = tf.squeeze(image, axis=0)\n\n plt.imshow(image)\n if title:\n plt.title(title)\n\ncontent_image = load_img(content_path)\nstyle_image = load_img(style_path)\n\nplt.subplot(1, 2, 1)\nimshow(content_image, 'Content Image')\n\nplt.subplot(1, 2, 2)\nimshow(style_image, 'Style Image')",
"TF-Hub を使用した高速画風変換\nこのチュートリアルでは、画像コンテンツを特定の画風に最適化する、元の画風変換アルゴリズムを実演します。詳細に踏み込む前に、TensorFlow Hub が何を行うのかを確認しましょう。",
"import tensorflow_hub as hub\nhub_model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')\nstylized_image = hub_model(tf.constant(content_image), tf.constant(style_image))[0]\ntensor_to_image(stylized_image)",
"コンテンツと画風の表現を定義する\nモデルの中間レイヤーを使用して、画像のコンテンツと画風の表現を取得します。ネットワークの入力レイヤーから数えて最初のいくつかのレイヤーアクティベーションは、エッジやテクスチャといった低レベルの特徴を表します。ネットワークをさらに深く探ると、最後のいくつかのレイヤーは、車輪や目といった高レベルの特徴を表します。この場合、VGG19 ネットワークアーキテクチャという、事前トレーニング済みの画像分類ネットワークを使用しています。中間レイヤーは、画像からコンテンツと画風の表現を定義するために必要となるレイヤーです。入力画像については、これらの中間レイヤーにある、対応する画風とコンテンツターゲットの表現に一致するようにします。\nVGG19 を読み込んで、画像で実行し、正しく使用されていることを確認します。",
"x = tf.keras.applications.vgg19.preprocess_input(content_image*255)\nx = tf.image.resize(x, (224, 224))\nvgg = tf.keras.applications.VGG19(include_top=True, weights='imagenet')\nprediction_probabilities = vgg(x)\nprediction_probabilities.shape\n\npredicted_top_5 = tf.keras.applications.vgg19.decode_predictions(prediction_probabilities.numpy())[0]\n[(class_name, prob) for (number, class_name, prob) in predicted_top_5]",
"分類ヘッドを除く VGG19 を読み込み、レイヤー名をリストします。",
"vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n\nprint()\nfor layer in vgg.layers:\n print(layer.name)",
"画像の画風とコンテンツを表現する中間レイヤーをネットワークから選択します。",
"content_layers = ['block5_conv2'] \n\nstyle_layers = ['block1_conv1',\n 'block2_conv1',\n 'block3_conv1', \n 'block4_conv1', \n 'block5_conv1']\n\nnum_content_layers = len(content_layers)\nnum_style_layers = len(style_layers)",
"画風とコンテンツ用の中間レイヤー\nでは、事前トレーニング済みの画像分類ネットワーク内のこれらの中間出力によって画風とコンテンツの表現を定義できるのはなぜでしょうか。\n大まかに言えば、ネットワークが画像分類を行うには(このネットワークがトレーニングされた目的)、画像を理解している必要があります。これには、生の画像を入力ピクセルとして取り、生の画像ピクセルを、画像内に存在する特徴の複雑な理解への変換する内部表現が必要です。\nこれが、畳み込みニューラルネットワークがうまく一般化できる理由でもあります。不変性をキャプチャして、バックグラウンドノイズやその他の邪魔なものにとらわれない特徴をクラス内(猫と犬など)定義することができます。そのため、生の画像がモデルにフェードされる場所と出力分類ラベルの間のどこかで、モデルは複雑な特徴量抽出器としての役割を果たしていることになります。モデルの中間レイヤーにアクセスすることによって、入力画像のコンテンツと画風を説明することができます。\nモデルを構築する\ntf.keras.applications のネットワークは、Keras の Functional API を使用して、中間レイヤーの値を簡単に抽出できるように設計されています。\nFunctional API を使用してモデルを定義するには、入力と出力を以下のように指定します。\nmodel = Model(inputs, outputs)\n以下の関数は、中間レイヤーの出力を返す VGG19 モデルを構築します。",
"def vgg_layers(layer_names):\n \"\"\" Creates a vgg model that returns a list of intermediate output values.\"\"\"\n # Load our model. Load pretrained VGG, trained on imagenet data\n vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')\n vgg.trainable = False\n \n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model",
"モデルを作成するには、以下のように行います。",
"style_extractor = vgg_layers(style_layers)\nstyle_outputs = style_extractor(style_image*255)\n\n#Look at the statistics of each layer's output\nfor name, output in zip(style_layers, style_outputs):\n print(name)\n print(\" shape: \", output.numpy().shape)\n print(\" min: \", output.numpy().min())\n print(\" max: \", output.numpy().max())\n print(\" mean: \", output.numpy().mean())\n print()",
"画風を計算する\n画像のコンテンツは、中間特徴量マップの値によって表現されます。\nこれにより、画像の画風は、異なる特徴量マップの平均と相関によって表現できるようになります。各場所での特徴ベクトルの外積を取り、その外積をすべての場所で平均することにより、この情報を含むグラム行列を計算します。特定のレイヤーのグラム行列は以下のように計算できます。\n$$G^l_{cd} = \\frac{\\sum_{ij} F^l_{ijc}(x)F^l_{ijd}(x)}{IJ}$$\nこれは、tf.linalg.einsum 関数を使用して、明確に実装することができます。",
"def gram_matrix(input_tensor):\n result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor)\n input_shape = tf.shape(input_tensor)\n num_locations = tf.cast(input_shape[1]*input_shape[2], tf.float32)\n return result/(num_locations)",
"画風とコンテンツを抽出する\n画風とコンテンツのテンソルを返すモデルを構築します。",
"class StyleContentModel(tf.keras.models.Model):\n def __init__(self, style_layers, content_layers):\n super(StyleContentModel, self).__init__()\n self.vgg = vgg_layers(style_layers + content_layers)\n self.style_layers = style_layers\n self.content_layers = content_layers\n self.num_style_layers = len(style_layers)\n self.vgg.trainable = False\n\n def call(self, inputs):\n \"Expects float input in [0,1]\"\n inputs = inputs*255.0\n preprocessed_input = tf.keras.applications.vgg19.preprocess_input(inputs)\n outputs = self.vgg(preprocessed_input)\n style_outputs, content_outputs = (outputs[:self.num_style_layers],\n outputs[self.num_style_layers:])\n\n style_outputs = [gram_matrix(style_output)\n for style_output in style_outputs]\n\n content_dict = {content_name: value\n for content_name, value\n in zip(self.content_layers, content_outputs)}\n\n style_dict = {style_name: value\n for style_name, value\n in zip(self.style_layers, style_outputs)}\n\n return {'content': content_dict, 'style': style_dict}",
"画像に対して呼び出されると、このモデルは style_layers のグラム行列(画風)と content_layers のコンテンツを返します。",
"extractor = StyleContentModel(style_layers, content_layers)\n\nresults = extractor(tf.constant(content_image))\n\nprint('Styles:')\nfor name, output in sorted(results['style'].items()):\n print(\" \", name)\n print(\" shape: \", output.numpy().shape)\n print(\" min: \", output.numpy().min())\n print(\" max: \", output.numpy().max())\n print(\" mean: \", output.numpy().mean())\n print()\n\nprint(\"Contents:\")\nfor name, output in sorted(results['content'].items()):\n print(\" \", name)\n print(\" shape: \", output.numpy().shape)\n print(\" min: \", output.numpy().min())\n print(\" max: \", output.numpy().max())\n print(\" mean: \", output.numpy().mean())\n",
"勾配下降法を実行する\nこの画風とコンテンツの抽出器を使用して、画風変換アルゴリズムを実装できるようになりました。各ターゲットに相対する画像の出力の平均二乗誤差を計算し、これらの損失の加重和を取って行います。\n画風とコンテンツターゲット値を設定します。",
"style_targets = extractor(style_image)['style']\ncontent_targets = extractor(content_image)['content']",
"最適化する画像を含めて tf.Variable を定義します。これを素早く行うには、コンテンツ画像で初期化します(tf.Variable は、コンテンツ画像を同じ形状である必要があります)。",
"image = tf.Variable(content_image)",
"これはフローと画像であるため、ピクセル値を 0 から 1 に維持する関数を定義します。",
"def clip_0_1(image):\n return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)",
"オプティマイザを作成します。論文では、LBFGS が推奨されていますが、Adam も使用できます。",
"opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)",
"これを最適化するために、重みづけされた、2 つの損失の組み合わせを使用して合計損失を取得します。",
"style_weight=1e-2\ncontent_weight=1e4\n\ndef style_content_loss(outputs):\n style_outputs = outputs['style']\n content_outputs = outputs['content']\n style_loss = tf.add_n([tf.reduce_mean((style_outputs[name]-style_targets[name])**2) \n for name in style_outputs.keys()])\n style_loss *= style_weight / num_style_layers\n\n content_loss = tf.add_n([tf.reduce_mean((content_outputs[name]-content_targets[name])**2) \n for name in content_outputs.keys()])\n content_loss *= content_weight / num_content_layers\n loss = style_loss + content_loss\n return loss",
"tf.GradientTape を使用して画像を更新します。",
"@tf.function()\ndef train_step(image):\n with tf.GradientTape() as tape:\n outputs = extractor(image)\n loss = style_content_loss(outputs)\n\n grad = tape.gradient(loss, image)\n opt.apply_gradients([(grad, image)])\n image.assign(clip_0_1(image))",
"いくつかのステップを実行してテストします。",
"train_step(image)\ntrain_step(image)\ntrain_step(image)\ntensor_to_image(image)",
"機能しているので、さらに長い最適化を実行します。",
"import time\nstart = time.time()\n\nepochs = 10\nsteps_per_epoch = 100\n\nstep = 0\nfor n in range(epochs):\n for m in range(steps_per_epoch):\n step += 1\n train_step(image)\n print(\".\", end='', flush=True)\n display.clear_output(wait=True)\n display.display(tensor_to_image(image))\n print(\"Train step: {}\".format(step))\n \nend = time.time()\nprint(\"Total time: {:.1f}\".format(end-start))",
"総変動損失\nこの基本実装には、 高周波アーチファクトが多く生成されるという難点があります。これらは、画像の高周波コンポーネントに明示的な正規化項を使用することで低下させることができます。画風変換では、通常これは総変動損失と呼ばれています。",
"def high_pass_x_y(image):\n x_var = image[:, :, 1:, :] - image[:, :, :-1, :]\n y_var = image[:, 1:, :, :] - image[:, :-1, :, :]\n\n return x_var, y_var\n\nx_deltas, y_deltas = high_pass_x_y(content_image)\n\nplt.figure(figsize=(14, 10))\nplt.subplot(2, 2, 1)\nimshow(clip_0_1(2*y_deltas+0.5), \"Horizontal Deltas: Original\")\n\nplt.subplot(2, 2, 2)\nimshow(clip_0_1(2*x_deltas+0.5), \"Vertical Deltas: Original\")\n\nx_deltas, y_deltas = high_pass_x_y(image)\n\nplt.subplot(2, 2, 3)\nimshow(clip_0_1(2*y_deltas+0.5), \"Horizontal Deltas: Styled\")\n\nplt.subplot(2, 2, 4)\nimshow(clip_0_1(2*x_deltas+0.5), \"Vertical Deltas: Styled\")",
"これは、高周波コンポーネントがどのように高まったのかを示します。\nまた、この高周波コンポーネントは基本的にエッジ検出器でもあります。似たような出力は、Sobel エッジ検出器でも見られます。以下に例を示します。",
"plt.figure(figsize=(14, 10))\n\nsobel = tf.image.sobel_edges(content_image)\nplt.subplot(1, 2, 1)\nimshow(clip_0_1(sobel[..., 0]/4+0.5), \"Horizontal Sobel-edges\")\nplt.subplot(1, 2, 2)\nimshow(clip_0_1(sobel[..., 1]/4+0.5), \"Vertical Sobel-edges\")",
"これに伴う正規化損失は、値の二乗の和です。",
"def total_variation_loss(image):\n x_deltas, y_deltas = high_pass_x_y(image)\n return tf.reduce_sum(tf.abs(x_deltas)) + tf.reduce_sum(tf.abs(y_deltas))\n\ntotal_variation_loss(image).numpy()",
"何を行うかについては実演されましたが、これを自分で実装する必要はありません。TensorFlow には、標準実装が含まれています。",
"tf.image.total_variation(image).numpy()",
"最適化を再実行する\ntotal_variation_loss の重みを選択します。",
"total_variation_weight=30",
"train_step 関数にそれを含めます。",
"@tf.function()\ndef train_step(image):\n with tf.GradientTape() as tape:\n outputs = extractor(image)\n loss = style_content_loss(outputs)\n loss += total_variation_weight*tf.image.total_variation(image)\n\n grad = tape.gradient(loss, image)\n opt.apply_gradients([(grad, image)])\n image.assign(clip_0_1(image))",
"最適化変数を再初期化します。",
"image = tf.Variable(content_image)",
"そして、最適化を実行します。",
"import time\nstart = time.time()\n\nepochs = 10\nsteps_per_epoch = 100\n\nstep = 0\nfor n in range(epochs):\n for m in range(steps_per_epoch):\n step += 1\n train_step(image)\n print(\".\", end='', flush=True)\n display.clear_output(wait=True)\n display.display(tensor_to_image(image))\n print(\"Train step: {}\".format(step))\n\nend = time.time()\nprint(\"Total time: {:.1f}\".format(end-start))",
"最後に、結果を保存します。",
"file_name = 'stylized-image.png'\ntensor_to_image(image).save(file_name)\n\ntry:\n from google.colab import files\nexcept ImportError:\n pass\nelse:\n files.download(file_name)",
"詳細情報\nこのチュートリアルでは、元の画風変換アルゴリズムを紹介しました。画風変換の簡単なアプリケーションについては、このチュートリアルを参照してください。TensorFlow Hub から任意の画風変換モデルを使用する方法についての詳細が記載されています。"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tpin3694/tpin3694.github.io
|
python/priority_queues.ipynb
|
mit
|
[
"Title: Priority Queues\nSlug: priority_queues\nSummary: Priority Queues Using Python.\nDate: 2017-02-02 12:00\nCategory: Python\nTags: Basics\nAuthors: Chris Albon \nPreliminaries",
"import heapq",
"Create A Priority Queue Object",
"# Create a priority queue abstract base class\nclass priority_queue:\n # Initialize the instance\n def __init__(self):\n # Create a list to use as the queue\n self._queue = []\n # Create an index to use as ordering\n self._index = 0\n\n # Create a function to add a task to the queue\n def add_task(self, item, priority):\n # Push the arguments to the _queue using a heap\n heapq.heappush(self._queue, (-priority, self._index, item))\n # Add one to the index\n self._index += 1\n\n # Create a function to get the next item from the queue\n def next_task(self):\n # Return the next item in the queue\n return heapq.heappop(self._queue)[-1]\n\n# Create a priority queue called task_list\ntask_list = priority_queue()",
"Add Items To Queue",
"# Add an item to the queue\ntask_list.add_task('Clean Dishes', 1)\n\n# Add an item to the queue\ntask_list.add_task('Wash Car', 2)\n\n# Add an item to the queue\ntask_list.add_task('Walk Dog', 3)",
"Retrieve Items From Queue By Priority",
"# Retrieve items from the queue\ntask_list.next_task()\n\n# Retrieve items from the queue\ntask_list.next_task()\n\n# Retrieve items from the queue\ntask_list.next_task()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
unoebauer/public-astro-tools
|
jupyter/wind_tutorial.ipynb
|
mit
|
[
"# some imports specific for the notebook\nimport warnings\nwarnings.filterwarnings('ignore')\nimport sys\nsys.path.append(\"../wind_structure/\")\n\n# some other module imports\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport wind_structure as ws",
"Wind Structure Calculator\nThis Python module provides a number of simple calculators with which the structure of line-driven winds can be determined following different analytic approaches. In the following, we briefly demonstrate the usage of these calculators at the example of the well-studies O-star zeta-Puppis.\nSetting the stellar Parameters\nThese parameters describe the basic properties of the star. These are adopted from Noebauer & Sim 2015, tables 2 and 3.",
"mstar = 52.5 # mass; if no astropy units are provided, the calculators will assume units of solar masses\nlstar = 1e6 # luminosity; if no astropy units are provided, the calculators will assume units of solar luminosities\nteff = 4.2e4 # effective temperature; if no astropy units are provided, the calculators will assume kelvin\nsigma = 0.3 # reference electron scattering cross section; if no astropy untis are provided, cm^2/g is assumed\ngamma = 0.502 # Eddington factor with respect to electron scattering",
"CAK force multiplier paramters. Again these are adopted from Noebauer & Sim 2015, table 3",
"alpha = 0.595\nk = 0.381",
"The grid for dimensional radii (i.e. r/Rstar) for which the wind velocity and density will be later determined",
"x = np.logspace(-3, 3, 1024) + 1.",
"Setting up the calculators\n\nWindStructureCak75: wind structure based on the seminal work by Castor, Abbott and Klein 1975; the central star is assumed to be a point source\nWindStructureFa86: wind structure based on fits to the numerical results obtained by Friend and Abbott 1986; only the influence of the finite extent of the central star is taken into account\nWindStructureKppa89: wind structure based on the approximate analytic description by Kudritzki, Pauldrach, Puls and Abbbott 1989; the finite extent of the central star is taken into account\n\nNote: in all wind structure calculators it is assumed that the ionization state is frozen-in, i.e. constant throughout the wind.",
"wind_cak75 = ws.WindStructureCak75(mstar=mstar, lstar=lstar, teff=teff, gamma=gamma, sigma=sigma, k=k, alpha=alpha)\nwind_fa86 = ws.WindStructureFa86(mstar=mstar, lstar=lstar, teff=teff, gamma=gamma, sigma=sigma, k=k, alpha=alpha)\nwind_kppa89 = ws.WindStructureKppa89(mstar=mstar, lstar=lstar, teff=teff, gamma=gamma, sigma=sigma, k=k, alpha=alpha)\n\nwinds = [wind_cak75, wind_fa86, wind_kppa89]\nlabels = [\"CAK75\", \"FA86\", \"KPPA89\"]\nlinestyles = [\"solid\", \"dashed\", \"dashdot\"]",
"Calculating the mass-loss rates",
"print(\" | vterm [km/s] | Mdot [solMass/yr] \")\nprint(\"==========================================\")\nfor wind, label in zip(winds, labels):\n print(\"{:6s} | {:7.2f} | {:.4e}\".format(label, wind.vterm.value, wind.mdot.value))",
"Determining and visualizing the wind structure\nLet's first have a look at the wind velocity in absolute terms as predicted by the different descriptions of the wind structure.",
"plt.figure()\nfor wind, label, ls in zip(winds, labels, linestyles):\n plt.plot(x, wind.v(x), ls=ls, label=label)\nplt.legend(frameon=False, loc=\"lower right\")\nplt.xlabel(r\"$r/R_{\\star}$\")\nplt.ylabel(r\"$v$ [km/s]\")\nplt.xlim([0.8, 1e1])",
"The following illustration compares how fast the terminal wind speed is reached in the various wind descriptions",
"plt.figure()\nfor wind, label, ls in zip(winds, labels, linestyles):\n plt.plot(x - 1., wind.v(x) / wind.vterm, ls=ls, label=label)\nplt.xscale(\"log\")\nplt.xlim([1e-3, 1e3])\nplt.ylim([0, 1])\nplt.legend(loc=\"upper left\", frameon=False)\nplt.xlabel(r\"$r/R_{\\star} - 1$\")\nplt.ylabel(r\"$v/v_{\\infty}$\")",
"Finally, we have a look at the predicted wind density:",
"plt.figure()\nfor wind, label, ls in zip(winds, labels, linestyles):\n plt.plot(x, wind.rho(x), ls=ls, label=label)\nplt.yscale(\"log\")\nplt.ylim([1e-15, 1e-10])\nplt.xlim([0.8, 10])\nplt.xlabel(r\"$r/R_{\\star}$\")\nplt.ylabel(r\"$\\rho$ $[\\mathrm{g\\,cm^{-3}}]$\")\nplt.legend(loc=\"upper right\", frameon=False)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
SParadiso18/juliasets
|
juliaplots.ipynb
|
mit
|
[
"Julia Set Plotting Extension\nLoad module for a JuliaSet that conforms to the specified interface.\nIt is wise to run the test suite in test_juliaset.py with nosetests prior to attempting to plot here.",
"from juliaset import JuliaSet",
"Load additional libraries needed for plotting and profiling.",
"# Math libraries\nimport numpy as np\nfrom math import sqrt\n\n# Matplotlib plotting libraries\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# Bokeh plotting libraries\nimport bokeh.plotting as blt\nblt.output_notebook()",
"Extend JuliaSet class with additional functionality.",
"class JuliaSetPlot(JuliaSet):\n \"\"\"Extend JuliaSet to add plotting functionality\"\"\"\n \n def __init__(self, *args, **kwargs):\n # Invoke constructor for JuliaSet first, unaltered\n JuliaSet.__init__(self, *args, **kwargs)\n # Add another attribute: a rendered image array\n self.img = np.array([])\n \n def get_dim(self):\n \"\"\"Return linear number of points in axis\"\"\"\n return int(4.0 / self._d)\n \n def render(self):\n \"\"\"Render image as square array of ints\"\"\"\n if not self.set: self.generate()\n # Convert inefficient list to efficient numpy array\n self.img = np.array(self.set)\n # Reshape array into a 2d complex plane\n dim = int(sqrt(len(self.img)))\n self.img = np.reshape(self.img, (dim,dim)).T\n \n def show(self):\n \"\"\"Use matplotlib to plot image as an efficient mesh\"\"\"\n if not self.img.size: self.render()\n plt.figure(1, figsize=(12,9))\n xy = np.linspace(-2,2,self.get_dim())\n plt.pcolormesh(xy, xy, self.img, cmap=plt.cm.hot)\n plt.colorbar()\n plt.show()\n \n def interact(self):\n \"\"\"Use bokeh to plot an interactive image\"\"\"\n from matplotlib.colors import rgb2hex\n if not self.img.size: self.render()\n # Mimic matplotlib \"hot\" color palette\n colormap = plt.cm.get_cmap(\"hot\")\n bokehpalette = [rgb2hex(m) for m in colormap(np.arange(colormap.N))]\n # Create bokeh figure\n f = blt.figure(x_range=(-2,2), y_range=(-2,2), plot_width=600, plot_height=600)\n f.image(image=[self.img], x=[-2], y=[-2], dw=[4], dh=[4], palette=bokehpalette, dilate=True)\n blt.show(f)",
"Visualize a Julia set using matplotlib.",
"j = JuliaSetPlot(-1.037 + 0.17j)\n%time j.set_spacing(0.006)\n%time j.generate()\n%time j.show()",
"Visualize a different Julia set using Bokeh as an interactive Javascript plot.",
"j = JuliaSetPlot(-0.624 + 0.435j)\n%time j.set_spacing(0.006)\n%time j.generate()\n%time j.interact()\n\n%prun j.generate()\n\n%load_ext line_profiler\n%lprun -f j.generate j.generate()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
synthicity/synthpop
|
demos/census_api.ipynb
|
bsd-3-clause
|
[
"%load_ext autoreload\n%autoreload 2\nfrom synthpop.census_helpers import Census\nfrom synthpop import categorizer as cat\nimport pandas as pd\nimport numpy as np\nimport os\npd.set_option('display.max_columns', 500)",
"The census api needs a key - you can register for can sign up\nhttp://api.census.gov/data/key_signup.html",
"c = Census(os.environ[\"CENSUS\"])",
"Here we get aggregate information on households from ACS - note some variables are associated with block groups and others with tracts",
"income_columns = ['B19001_0%02dE'%i for i in range(1, 18)]\nvehicle_columns = ['B08201_0%02dE'%i for i in range(1, 7)]\nworkers_columns = ['B08202_0%02dE'%i for i in range(1, 6)]\nfamilies_columns = ['B11001_001E', 'B11001_002E']\nblock_group_columns = income_columns + families_columns\ntract_columns = vehicle_columns + workers_columns\nh_acs = c.block_group_and_tract_query(block_group_columns,\n tract_columns, \"06\", \"075\", \n merge_columns=['tract', 'county', 'state'],\n block_group_size_attr=\"B11001_001E\",\n tract_size_attr=\"B08201_001E\",\n tract=\"030600\")\nh_acs",
"And here is aggregate information on people from ACS",
"population = ['B01001_001E']\nsex = ['B01001_002E', 'B01001_026E']\nrace = ['B02001_0%02dE'%i for i in range(1,11)]\nmale_age_columns = ['B01001_0%02dE'%i for i in range(3,26)]\nfemale_age_columns = ['B01001_0%02dE'%i for i in range(27,50)]\nall_columns = population + sex + race + male_age_columns + female_age_columns\np_acs = c.block_group_query(all_columns, \"06\", \"075\", tract=\"030600\")\np_acs",
"Get the puma for our test tracts - this actually downloads the mapping file from the census website so it might take a few seconds",
"puma = c.tract_to_puma(\"06\", \"075\", \"030600\")\npuma\n\npuma10 = puma[0]\npuma00 = puma[1]",
"Download PUMS for people records for a PUMA from our server (we processed the large files into smaller ones for you)",
"p_pums = c.download_population_pums(\"06\", puma10=puma10, puma00=puma00)\np_pums.head(5)",
"Download PUMS for household records for a PUMA",
"h_pums = c.download_household_pums(\"06\", puma10=puma10, puma00=puma00)\nh_pums.head(5)",
"Now the job is to categorize acs and pums into the same categories - we start with the household acs data",
"h_acs_cat = cat.categorize(h_acs, {\n (\"households\", \"total\"): \"B11001_001E\",\n (\"children\", \"yes\"): \"B11001_002E\",\n (\"children\", \"no\"): \"B11001_001E - B11001_002E\",\n (\"income\", \"lt35\"): \"B19001_002E + B19001_003E + B19001_004E + \"\n \"B19001_005E + B19001_006E + B19001_007E\",\n (\"income\", \"gt35-lt100\"): \"B19001_008E + B19001_009E + \"\n \"B19001_010E + B19001_011E + B19001_012E\"\n \"+ B19001_013E\",\n (\"income\", \"gt100\"): \"B19001_014E + B19001_015E + B19001_016E\"\n \"+ B19001_017E\",\n (\"cars\", \"none\"): \"B08201_002E\",\n (\"cars\", \"one\"): \"B08201_003E\",\n (\"cars\", \"two or more\"): \"B08201_004E + B08201_005E + B08201_006E\",\n (\"workers\", \"none\"): \"B08202_002E\",\n (\"workers\", \"one\"): \"B08202_003E\",\n (\"workers\", \"two or more\"): \"B08202_004E + B08202_005E\" \n}, index_cols=['NAME'])\nh_acs_cat\n\nassert np.all(cat.sum_accross_category(h_acs_cat) < 2)",
"And the same for ACS population - the output of the categorization is the MARGINALS for each variable category",
"p_acs_cat = cat.categorize(p_acs, {\n (\"population\", \"total\"): \"B01001_001E\",\n (\"age\", \"19 and under\"): \"B01001_003E + B01001_004E + B01001_005E + \"\n \"B01001_006E + B01001_007E + B01001_027E + \"\n \"B01001_028E + B01001_029E + B01001_030E + \"\n \"B01001_031E\",\n (\"age\", \"20 to 35\"): \"B01001_008E + B01001_009E + B01001_010E + \"\n \"B01001_011E + B01001_012E + B01001_032E + \"\n \"B01001_033E + B01001_034E + B01001_035E + \"\n \"B01001_036E\",\n (\"age\", \"35 to 60\"): \"B01001_013E + B01001_014E + B01001_015E + \"\n \"B01001_016E + B01001_017E + B01001_037E + \"\n \"B01001_038E + B01001_039E + B01001_040E + \"\n \"B01001_041E\",\n (\"age\", \"above 60\"): \"B01001_018E + B01001_019E + B01001_020E + \"\n \"B01001_021E + B01001_022E + B01001_023E + \"\n \"B01001_024E + B01001_025E + B01001_042E + \"\n \"B01001_043E + B01001_044E + B01001_045E + \"\n \"B01001_046E + B01001_047E + B01001_048E + \"\n \"B01001_049E\", \n (\"race\", \"white\"): \"B02001_002E\",\n (\"race\", \"black\"): \"B02001_003E\",\n (\"race\", \"asian\"): \"B02001_005E\",\n (\"race\", \"other\"): \"B02001_004E + B02001_006E + B02001_007E + \"\n \"B02001_008E\",\n (\"sex\", \"male\"): \"B01001_002E\",\n (\"sex\", \"female\"): \"B01001_026E\"\n}, index_cols=['NAME'])\np_acs_cat\n\nassert np.all(cat.sum_accross_category(p_acs_cat) < 2)",
"To get the marginals a series for one geography do this",
"p_acs_cat.iloc[0].transpose()",
"Now categorize the PUMS population data into the same categories",
"def age_cat(r):\n if r.AGEP <= 19: return \"19 and under\"\n elif r.AGEP <= 35: return \"20 to 35\"\n elif r.AGEP <= 60: return \"35 to 60\"\n return \"above 60\"\n\ndef race_cat(r):\n if r.RAC1P == 1: return \"white\"\n elif r.RAC1P == 2: return \"black\"\n elif r.RAC1P == 6: return \"asian\"\n return \"other\"\n\ndef sex_cat(r):\n if r.SEX == 1: return \"male\"\n return \"female\"\n\n_, jd_persons = cat.joint_distribution(\n p_pums,\n cat.category_combinations(p_acs_cat.columns),\n {\"age\": age_cat, \"race\": race_cat, \"sex\": sex_cat}\n)\njd_persons ",
"Do the same for households - the output of this step is the JOINT DISTRIBUTIONS for the cross product of all possible categories",
"def cars_cat(r):\n if r.VEH == 0: return \"none\"\n elif r.VEH == 1: return \"one\"\n return \"two or more\"\n\ndef children_cat(r):\n if r.NOC > 0: return \"yes\"\n return \"no\"\n\ndef income_cat(r):\n if r.FINCP > 100000: return \"gt100\"\n elif r.FINCP > 35000: return \"gt35-lt100\"\n return \"lt35\"\n\ndef workers_cat(r):\n if r.WIF == 3: return \"two or more\"\n elif r.WIF == 2: return \"two or more\"\n elif r.WIF == 1: return \"one\"\n return \"none\"\n\n_, jd_households = cat.joint_distribution(\n h_pums,\n cat.category_combinations(h_acs_cat.columns),\n {\"cars\": cars_cat, \"children\": children_cat, \n \"income\": income_cat, \"workers\": workers_cat}\n)\njd_households",
"With marginals (aggregate, from ACS) and joint distribution (disaggregate, from PUMS) we're ready for some synthesis",
"\"TBD\""
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ecervera/mindstorms-nb
|
task/navigation.ipynb
|
mit
|
[
"Exercici de navegació\n<span title=\"Roomba navigating around furniture\"><img src=\"img/roomba.jpg\" align=\"right\" width=200></span>\nUn robot mòbil com el Roomba de la imatge ha d'evitar xocar amb els obstacles del seu entorn, i si arriba a col·lisionar, ha de reaccionar per a no fer, ni fer-se mal.\nAmb el sensor de tacte no podem evitar el xoc, però si detectar-lo un cop es produeix, i reaccionar.\nL'objectiu d'aquest exercici és programar el següent comportament en el robot:\n\nmentre no detecte res, el robot va cap avant\ndesprés de xocar, el robot anirà cap enrere i girarà\n\nConnecteu el robot:",
"from functions import connect, touch, forward, backward, left, right, stop, disconnect, next_notebook\nfrom time import sleep\nconnect()",
"Versió 1.0\nUtilitzeu el codi de l'exemple anterior del bucle while: només heu d'afegir que, després del xoc, el robot vaja cap enrere, gire una mica (cap al vostre costat preferit), i pare.",
"while ___:\n ___\n___",
"Versió 2.0\nSe suposa que la maniobra del robot li permet evitar l'obstacle, i per tant tornar a anar cap avant. Com ho podem programar?\nCal repetir tot el bloc d'instruccions del comportament, incloent el bucle. Cap problema, els llenguatges de programació permeten posar un bucle dins d'un altre, el que s'anomena bucles anidats.\nUtilitzeu un bucle for (com el que vam vore a l'exercici del quadrat) per a repetir 5 vegades el codi anterior.",
"for ___:\n while ___:\n ___\n ___",
"Versió 3.0\n<img src=\"img/interrupt.png\" align=\"right\">\nI si en lloc de repetir 5, 10 o 20 vegades, volem que el robot continue fins que el parem nosaltres? Ho podem fer amb un bucle infinit, i indicarem al programa que pare amb el botó interrupt kernel.\nEn Python, un bucle infinit s'escriu així:\npython\nwhile True:\n statement\nQuan s'interromp el programa, s'abandona la instrucció que s'estava executant en eixe moment, i cal parar el robot. En Python, aquest procés s'anomena excepció i es gestiona d'aquesta manera:\npython\ntry:\n while True:\n statement # ací anirà el comportament\nexcept KeyboardInterrupt:\n statement # ací pararem el robot\nUtilitzeu un bucle infinit per a repetir el comportament del robot fins que el pareu.",
"try:\n while True:\n while ___:\n ___\n ___\nexcept KeyboardInterrupt:\n stop() ",
"Versió 4.0\nEl comportament del robot, girant sempre cap al mateix costat, és una mica previsible, no vos sembla?\nAnem a introduir un component d'atzar: en els llenguatges de programació, existeixen els generadors de números aleatoris, que són com els daus dels ordinadors.\nExecuteu el següent codi vàries vegades amb Ctrl+Enter i comproveu els resultats.",
"from random import random\nrandom()",
"La funció random és com llançar un dau, però en compte de donar una valor d'1 a 6, dóna un número real entre 0 i 1.\nAleshores, el robot pot utilitzar eixe valor per a decidir si gira a esquerra o dreta. Com? Doncs si el valor és major que 0.5, gira a un costat, i si no, cap a l'altre. Aleshores, girarà a l'atzar, amb una probabilitat del 50% per a cada costat.\nIncorporeu la decisió a l'atzar per a girar al codi de la versió anterior:",
"try:\n while True:\n while ___:\n ___\n if ___:\n ___\n else:\n ___\nexcept KeyboardInterrupt:\n stop() ",
"Recapitulem\nTot el que hem vist en aquest exercici:\n\nbucles anidats\nexcepcions\nnúmeros aleatoris\n\nNo està malament, quasi hem vist el temari d'un primer curs de programació, i això només amb un sensor!\nPassem a vore doncs el següent sensor.\nAbans de continuar, desconnecteu el robot:",
"disconnect()\nnext_notebook('sound')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/labs/sdk_custom_xgboost.ipynb
|
apache-2.0
|
[
"Migrating Custom XGBoost Model with Pre-built Training Container\nLearning Objectives\n\nTrain a model.\nUpload a model.\nMake a batch and online predictions.\nDeploy a model.\n\nIntroduction\nThe dataset used for this tutorial is the Iris dataset from TensorFlow Datasets. This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of Iris flower species from a class of three species: setosa, virginica, or versicolor.\nEach learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook.\nInstallation\nInstall the latest version of Vertex SDK for Python.",
"# import necessary libraries\nimport os\n\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = \"--user\"\nelse:\n USER_FLAG = \"\"\n\n! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG",
"Install the latest GA version of google-cloud-storage library as well.",
"! pip3 install -U google-cloud-storage $USER_FLAG\n\nif os.getenv(\"IS_TESTING\"):\n ! pip3 install --upgrade tensorflow $USER_FLAG",
"Restart the kernel\nOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.",
"import os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)",
"Set up your Google Cloud project",
"PROJECT_ID = \"<your-project>\" # replace with your project ID\n\nif PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)\n\n! gcloud config set project $PROJECT_ID",
"Region\nYou can also change the REGION variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.\n\nAmericas: us-central1\nEurope: europe-west4\nAsia Pacific: asia-east1\n\nYou may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.\nLearn more about Vertex AI regions",
"REGION = \"us-central1\"",
"Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.",
"from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")",
"Create a Cloud Storage bucket\nWhen you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.",
"BUCKET_NAME = \"gs://<your-bucket>\" # replace bucket name\n\nif BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP",
"Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.",
"! gsutil mb -l $REGION $BUCKET_NAME",
"Finally, validate access to your Cloud Storage bucket by examining its contents:",
"! gsutil ls -al $BUCKET_NAME",
"Set up variables\nNext, set up some variables used in this notebook.\nImport libraries and define constants",
"import google.cloud.aiplatform as aip",
"Initialize Vertex SDK for Python\nInitialize the Vertex SDK for Python for your project and corresponding bucket.",
"aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)",
"Set pre-built containers\nSet the pre-built Docker container image for training and prediction.\nFor the latest list, see Pre-built containers for training.\nFor the latest list, see Pre-built containers for prediction.",
"TRAIN_VERSION = \"xgboost-cpu.1-1\"\nDEPLOY_VERSION = \"xgboost-cpu.1-1\"\n\nTRAIN_IMAGE = \"gcr.io/cloud-aiplatform/training/{}:latest\".format(TRAIN_VERSION)\nDEPLOY_IMAGE = \"gcr.io/cloud-aiplatform/prediction/{}:latest\".format(DEPLOY_VERSION)",
"Set machine type\nNext, set the machine type to use for training and prediction.\n\nSet the variables TRAIN_COMPUTE and DEPLOY_COMPUTE to configure the compute resources for the VMs you will use for for training and prediction.\nmachine type\nn1-standard: 3.75GB of memory per vCPU.\nn1-highmem: 6.5GB of memory per vCPU\nn1-highcpu: 0.9 GB of memory per vCPU\n\n\nvCPUs: number of [2, 4, 8, 16, 32, 64, 96 ]\n\nNote: The following is not supported for training:\n\nstandard: 2 vCPUs\nhighcpu: 2, 4 and 8 vCPUs\n\nNote: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs.",
"import os\n\nif os.getenv(\"IS_TESTING_TRAIN_MACHINE\"):\n MACHINE_TYPE = os.getenv(\"IS_TESTING_TRAIN_MACHINE\")\nelse:\n MACHINE_TYPE = \"n1-standard\"\n\nVCPU = \"4\"\nTRAIN_COMPUTE = MACHINE_TYPE + \"-\" + VCPU\nprint(\"Train machine type\", TRAIN_COMPUTE)\n\nif os.getenv(\"IS_TESTING_DEPLOY_MACHINE\"):\n MACHINE_TYPE = os.getenv(\"IS_TESTING_DEPLOY_MACHINE\")\nelse:\n MACHINE_TYPE = \"n1-standard\"\n\nVCPU = \"4\"\nDEPLOY_COMPUTE = MACHINE_TYPE + \"-\" + VCPU\nprint(\"Deploy machine type\", DEPLOY_COMPUTE)",
"Examine the training package\nPackage layout\nBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.\n\nPKG-INFO\nREADME.md\nsetup.cfg\nsetup.py\ntrainer\n__init__.py\ntask.py\n\nThe files setup.cfg and setup.py are the instructions for installing the package into the operating environment of the Docker image.\nThe file trainer/task.py is the Python script for executing the custom training job. Note, when we referred to it in the worker pool specification, we replace the directory slash with a dot (trainer.task) and dropped the file suffix (.py).\nPackage Assembly\nIn the following cells, you will assemble the training package.",
"# Make folder for Python training script\n! rm -rf custom\n! mkdir custom\n\n# Add package information\n! touch custom/README.md\n\nsetup_cfg = \"[egg_info]\\n\\ntag_build =\\n\\ntag_date = 0\"\n! echo \"$setup_cfg\" > custom/setup.cfg\n\nsetup_py = \"import setuptools\\n\\nsetuptools.setup(\\n\\n install_requires=[\\n\\n 'tensorflow_datasets==1.3.0',\\n\\n ],\\n\\n packages=setuptools.find_packages())\"\n! echo \"$setup_py\" > custom/setup.py\n\npkg_info = \"Metadata-Version: 1.0\\n\\nName: Iris tabular classification\\n\\nVersion: 0.0.0\\n\\nSummary: Demostration training script\\n\\nHome-page: www.google.com\\n\\nAuthor: Google\\n\\nAuthor-email: aferlitsch@google.com\\n\\nLicense: Public\\n\\nDescription: Demo\\n\\nPlatform: Vertex\"\n! echo \"$pkg_info\" > custom/PKG-INFO\n\n# Make the training subfolder\n! mkdir custom/trainer\n! touch custom/trainer/__init__.py\n\n%%writefile custom/trainer/task.py\n# Single Instance Training for Iris\n\nimport datetime\nimport os\nimport subprocess\nimport sys\nimport pandas as pd\nimport xgboost as xgb\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model-dir', dest='model_dir',\n default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')\nargs = parser.parse_args()\n\n# Download data\niris_data_filename = 'iris_data.csv'\niris_target_filename = 'iris_target.csv'\ndata_dir = 'gs://cloud-samples-data/ai-platform/iris'\n\n# gsutil outputs everything to stderr so we need to divert it to stdout.\nsubprocess.check_call(['gsutil', 'cp', os.path.join(data_dir,\n iris_data_filename),\n iris_data_filename], stderr=sys.stdout)\nsubprocess.check_call(['gsutil', 'cp', os.path.join(data_dir,\n iris_target_filename),\n iris_target_filename], stderr=sys.stdout)\n\n\n# Load data into pandas, then use `.values` to get NumPy arrays\niris_data = pd.read_csv(iris_data_filename).values\niris_target = pd.read_csv(iris_target_filename).values\n\n# Convert one-column 2D array into 1D array for use with XGBoost\niris_target = iris_target.reshape((iris_target.size,))\n\n\n# Load data into DMatrix object\ndtrain = xgb.DMatrix(iris_data, label=iris_target)\n\n\n# Train XGBoost model\nbst = xgb.train({}, dtrain, 20)\n\n# Export the classifier to a file\nmodel_filename = 'model.bst'\nbst.save_model(model_filename)\n\n# Upload the saved model file to Cloud Storage\ngcs_model_path = os.path.join(args.model_dir, model_filename)\nsubprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path],\n stderr=sys.stdout)",
"Store training script on your Cloud Storage bucket\nNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.",
"! rm -f custom.tar custom.tar.gz\n! tar cvf custom.tar custom\n! gzip custom.tar\n! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_iris.tar.gz",
"Train a model (training.create-python-pre-built-container)\nCreate and run custom training job\nTo train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job.\nCreate custom training job\nA custom training job is created with the CustomTrainingJob class, with the following parameters:\n\ndisplay_name: The human readable name for the custom training job.\ncontainer_uri: The training container image.\nrequirements: Package requirements for the training container image (e.g., pandas).\nscript_path: The relative path to the training script.",
"# constructs a Custom Training Job using a Python script\njob = # TODO: Your code goes here\n\nprint(job)",
"Run the custom training job\nNext, you run the custom job to start the training job by invoking the method run, with the following parameters:\n\nreplica_count: The number of compute instances for training (replica_count = 1 is single node training).\nmachine_type: The machine type for the compute instances.\nbase_output_dir: The Cloud Storage location to write the model artifacts to.\nsync: Whether to block until completion of the job.",
"MODEL_DIR = \"{}/{}\".format(BUCKET_NAME, TIMESTAMP)\n\n\njob.run(\n replica_count=1, machine_type=TRAIN_COMPUTE, base_output_dir=MODEL_DIR, sync=True\n)\n\nMODEL_DIR = MODEL_DIR + \"/model\"\nmodel_path_to_deploy = MODEL_DIR",
"The custom training job will take some time to complete.\nUpload the model (general.import-model)\nNext, upload your model to a Model resource using Model.upload() method, with the following parameters:\n\ndisplay_name: The human readable name for the Model resource.\nartifact_uri: The Cloud Storage location of the trained model artifacts.\nserving_container_image_uri: The serving container image.\nsync: Whether to execute the upload asynchronously or synchronously.\n\nIf the upload() method is run asynchronously, you can subsequently block until completion with the wait() method.",
"model = # TODO: Your code goes here\n\nmodel.wait()",
"Make batch predictions (predictions.batch-prediction)\nMake test items\nYou will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.",
"INSTANCES = [[1.4, 1.3, 5.1, 2.8], [1.5, 1.2, 4.7, 2.4]]",
"Make the batch input file\nNow make a batch input file, which you will store in your local Cloud Storage bucket. Each instance in the prediction request is a list of the form:\n [ [ content_1], [content_2] ]\n\n\ncontent: The feature values of the test item as a list.",
"import tensorflow as tf\n\ngcs_input_uri = BUCKET_NAME + \"/\" + \"test.jsonl\"\nwith tf.io.gfile.GFile(gcs_input_uri, \"w\") as f:\n for i in INSTANCES:\n f.write(str(i) + \"\\n\")\n\n! gsutil cat $gcs_input_uri",
"Make the batch prediction request\nNow that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:\n\njob_display_name: The human readable name for the batch prediction job.\ngcs_source: A list of one or more batch request input files.\ngcs_destination_prefix: The Cloud Storage location for storing the batch prediction resuls.\ninstances_format: The format for the input instances, either 'csv' or 'jsonl'. Defaults to 'jsonl'.\npredictions_format: The format for the output predictions, either 'csv' or 'jsonl'. Defaults to 'jsonl'.\nmachine_type: The type of machine to use for training.\nsync: If set to True, the call will block while waiting for the asynchronous batch job to complete.",
"MIN_NODES = 1\nMAX_NODES = 1\n\nbatch_predict_job = # TODO: Your code goes here (\n job_display_name=\"iris_\" + TIMESTAMP,\n gcs_source=gcs_input_uri,\n gcs_destination_prefix=BUCKET_NAME,\n instances_format=\"jsonl\",\n predictions_format=\"jsonl\",\n model_parameters=None,\n machine_type=DEPLOY_COMPUTE,\n starting_replica_count=MIN_NODES,\n max_replica_count=MAX_NODES,\n sync=False,\n)\n\nprint(batch_predict_job)",
"Batch prediction request will take 25-30 mins to complete.\nGet the predictions\nNext, get the results from the completed batch prediction job.\nThe results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:\n\ninstance: The prediction request.\nprediction: The prediction response.",
"import json\n\nbp_iter_outputs = batch_predict_job.iter_outputs()\n\nprediction_results = list()\nfor blob in bp_iter_outputs:\n if blob.name.split(\"/\")[-1].startswith(\"prediction\"):\n prediction_results.append(blob.name)\n\ntags = list()\nfor prediction_result in prediction_results:\n gfile_name = f\"gs://{bp_iter_outputs.bucket.name}/{prediction_result}\"\n with tf.io.gfile.GFile(name=gfile_name, mode=\"r\") as gfile:\n for line in gfile.readlines():\n line = json.loads(line)\n print(line)\n break",
"Make online predictions (predictions.deploy-model-api)\nDeploy the model\nNext, deploy your model for online prediction. To deploy the model, you invoke the deploy method, with the following parameters:\n\ndeployed_model_display_name: A human readable name for the deployed model.\ntraffic_split: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs.\nIf only one model, then specify as { \"0\": 100 }, where \"0\" refers to this model being uploaded and 100 means 100% of the traffic.\nIf there are existing models on the endpoint, for which the traffic will be split, then use model_id to specify as { \"0\": percent, model_id: percent, ... }, where model_id is the model id of an existing model to the deployed endpoint. The percents must add up to 100.\nmachine_type: The type of machine to use for training.\nmin_replica_count: The number of compute instances to initially provision.\nmax_replica_count: The maximum number of compute instances to scale to. In this tutorial, only one instance is provisioned.",
"DEPLOYED_NAME = \"iris-\" + TIMESTAMP\n\nTRAFFIC_SPLIT = {\"0\": 100}\n\nMIN_NODES = 1\nMAX_NODES = 1\n\nendpoint = # TODO: Your code goes here",
"Model deployment will take some time to complete.\nMake test item (predictions.online-prediction-automl)\nYou will use synthetic data as a test data item. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.",
"INSTANCE = [1.4, 1.3, 5.1, 2.8]",
"Make the prediction\nNow that your Model resource is deployed to an Endpoint resource, you can do online predictions by sending prediction requests to the Endpoint resource.\nRequest\nThe format of each instance is:\n[feature_list]\n\nSince the predict() method can take multiple items (instances), send your single test item as a list of one test item.\nResponse\nThe response from the predict() call is a Python dictionary with the following entries:\n\nids: The internal assigned unique identifiers for each prediction request.\npredictions: The predicted confidence, between 0 and 1, per class label.\ndeployed_model_id: The Vertex AI identifier for the deployed Model resource which did the predictions.",
"instances_list = [INSTANCE]\n\nprediction = endpoint.predict(instances_list)\nprint(prediction)",
"Undeploy the model\nWhen you are done doing predictions, you undeploy the model from the Endpoint resouce. This deprovisions all compute resources and ends billing for the deployed model.",
"endpoint.undeploy_all()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
karthikrangarajan/intro-to-sklearn
|
04.Supervised Learning.ipynb
|
bsd-3-clause
|
[
"Learning Algorithms - Supervised Learning\n\nReminder: All supervised estimators in scikit-learn implement a fit(X, y) method to fit the model and a predict(X) method that, given unlabeled observations X, returns the predicted labels y. (direct quote from sklearn docs)\n\n\nGiven that Iris is a fairly small, labeled dataset with relatively few features...what algorithm would you start with and why?\n\n\n\"Often the hardest part of solving a machine learning problem can be finding the right estimator for the job.\"\n\"Different estimators are better suited for different types of data and different problems.\"\n\n<a href = \"http://scikit-learn.org/stable/tutorial/machine_learning_map/index.html\" style = \"float: right\">-Choosing the Right Estimator from sklearn docs</a>\n<b>An estimator for recognizing a new iris from its measurements</b>\n\nOr, in machine learning parlance, we <i>fit</i> an estimator on known samples of the iris measurements to <i>predict</i> the class to which an unseen iris belongs.\n\nLet's give it a try! (We are actually going to hold out a small percentage of the iris dataset and check our predictions against the labels)",
"from sklearn.datasets import load_iris\nfrom sklearn.cross_validation import train_test_split\n\n# Let's load the iris dataset\niris = load_iris()\nX, y = iris.data, iris.target\n\n# split data into training and test sets using the handy train_test_split func\n# in this split, we are \"holding out\" only one value and label (placed into X_test and y_test)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)\n\n# Let's try a decision tree classification method\nfrom sklearn import tree\n\nt = tree.DecisionTreeClassifier(max_depth = 4,\n criterion = 'entropy', \n class_weight = 'balanced',\n random_state = 2)\nt.fit(X_train, y_train)\n\nt.score(X_test, y_test) # what performance metric is this?\n\n# What was the label associated with this test sample? (\"held out\" sample's original label)\n# Let's predict on our \"held out\" sample\ny_pred = t.predict(X_test)\nprint(y_pred)\n\n# fill in the blank below\n\n# how did our prediction do for first sample in test dataset?\nprint(\"Prediction: %d, Original label: %d\" % (y_pred[0], y_test[0])) # <-- fill in blank\n\n# Here's a nifty way to cross-validate (useful for quick model evaluation!)\nfrom sklearn import cross_validation\n\nt = tree.DecisionTreeClassifier(max_depth = 4,\n criterion = 'entropy', \n class_weight = 'balanced',\n random_state = 2)\n\n# splits, fits and predicts all in one with a score (does this multiple times)\nscore = cross_validation.cross_val_score(t, X, y)\nscore",
"QUESTIONS: What do these scores tell you? Are they too high or too low you think? If it's 1.0, what does that mean?\nWhat does the graph look like for this decision tree? i.e. what are the \"questions\" and \"decisions\" for this tree...\n\nNote: You need both Graphviz app and the python package graphviz (It's worth it for this cool decision tree graph, I promise!)\nTo install both on OS X:\nsudo port install graphviz\nsudo pip install graphviz\nFor general Installation see this guide",
"from sklearn.tree import export_graphviz\nimport graphviz\n\n# Let's rerun the decision tree classifier\nfrom sklearn import tree\n\nt = tree.DecisionTreeClassifier(max_depth = 4,\n criterion = 'entropy', \n class_weight = 'balanced',\n random_state = 2)\nt.fit(X_train, y_train)\n\nt.score(X_test, y_test) # what performance metric is this?\n\nexport_graphviz(t, out_file=\"mytree.dot\", \n feature_names=iris.feature_names, \n class_names=iris.target_names, \n filled=True, rounded=True, \n special_characters=True)\n\nwith open(\"mytree.dot\") as f:\n dot_graph = f.read()\n\ngraphviz.Source(dot_graph, format = 'png')",
"From Decision Tree to Random Forest",
"from sklearn.datasets import load_iris\nimport pandas as pd\nimport numpy as np\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nforest = RandomForestClassifier(max_depth=4,\n criterion = 'entropy', \n n_estimators = 100, \n class_weight = 'balanced',\n n_jobs = -1,\n random_state = 2)\n\n#forest = RandomForestClassifier()\nforest.fit(X_train, y_train)\n\ny_preds = iris.target_names[forest.predict(X_test)]\n\nforest.score(X_test, y_test)\n\n# Here's a nifty way to cross-validate (useful for model evaluation!)\nfrom sklearn import cross_validation\n\n# reinitialize classifier\nforest = RandomForestClassifier(max_depth=4,\n criterion = 'entropy', \n n_estimators = 100, \n class_weight = 'balanced',\n n_jobs = -1,\n random_state = 2)\n\nscore = cross_validation.cross_val_score(forest, X, y)\nscore",
"QUESTION: Comparing to the decision tree method, what do these accuracy scores tell you? Do they seem more reasonable?\nSplitting into train and test set vs. cross-validation\n<p>We can be explicit and use the `train_test_split` method in scikit-learn ( [train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.train_test_split.html) ) as in (and as shown above for `iris` data):<p>\n\n```python\n# Create some data by hand and place 70% into a training set and the rest into a test set\n# Here we are using labeled features (X - feature data, y - labels) in our made-up data\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.cross_validation import train_test_split\nX, y = np.arange(10).reshape((5, 2)), range(5)\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.70)\nclf = linear_model.LinearRegression()\nclf.fit(X_train, y_train)\n```\n\nOR\n\nBe more concise and\n\n```python\nimport numpy as np\nfrom sklearn import cross_validation, linear_model\nX, y = np.arange(10).reshape((5, 2)), range(5)\nclf = linear_model.LinearRegression()\nscore = cross_validation.cross_val_score(clf, X, y)\n```\n\n<p>There is also a `cross_val_predict` method to create estimates rather than scores and is very useful for cross-validation to evaluate models ( [cross_val_predict](http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.cross_val_predict.html) )\n\nCreated by a Microsoft Employee.\n\nThe MIT License (MIT)<br>\nCopyright (c) 2016 Micheleen Harris"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
cdawei/flickr-photo
|
src/flickr_analysis.ipynb
|
gpl-2.0
|
[
"Trajectory Analysis\nIn this notebook file, we will give a detail analysis of trajectories extracted from trajectory_construction notebook file.\nThe analysis is based on two files trajectory_photos.csv and trajectory_stats.csv. The first file keeps the detail information about each point(photo/video) in each trajectory, and the second file keeps the detail statistics about each trajectory.\nAnd at the end of the notebook, we will show how to generate KML data which helps to plot trajectories on various map services (e.g. Google Map), and show some example of interesting trajectories on the Google Map.\nTable of Contents\n\n1. Basic data files\n1.1. Contents of trajectory table\n1.2. Contents of trajectory statistics table\n\n\n2. Basic Statistics\n2.1. Number of users, trajectories, and average trajectories per user\n2.2. Min/Max/Median/Mean of each attribute\n2.3 Number of Photos by year\n\n\n3. Distributions\n3.1. Bar chart: Accuracy of photos\n3.2. Histogram: Travel time\n3.3. Histogram: Distances of trajectory\n3.4. Histogram: Average Speed \n\n\n4. Trajectory Visualization\n4.1. Generating KML files with trajectory ID\n4.2. Example of interesting trajectories\n\n\n\nBefore analyzing, we need to include some libraries.",
"%matplotlib inline\n\nimport os\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntoday = pd.datetime.strftime(pd.datetime.today(),'%Y%m%d')",
"1. Basic data files\nPandas provide various data analysis tools. We will load two trajectory table files using Pandas library.",
"# read data and convert timestamps\ndata_dir = '../data/'\nphoto_table = os.path.join(data_dir, 'trajectory_photos.csv')\ntraj_table = os.path.join(data_dir, 'trajectory_stats.csv')\ntraj = pd.read_csv(photo_table, delimiter=',', parse_dates=[3], skipinitialspace=True)\ntraj_stats = pd.read_csv(traj_table, delimiter=',', parse_dates=[3], skipinitialspace=True)",
"1.1. Contents of trajectory table\nThe first table is from trajectory-photos.csv file.\nHere's five sample entries from the trajectory table. Each entry of trajectory table corresponds to single photo/video.\nThe table consists of following attributes(columns):\n\nTrajectory_ID: trajectory ID of entry (multiple entries belong to the same trajectory will have the same trajectory ID)\nPhoto_ID: Unique Photo ID of entry\nUser_ID: User ID\nTimestamp: Timestamp of when the photo was taken\nLongitude: Longitude of entry \nLatitude: Latitude of entry\nAccuracy: GPS Accuracy level (16 - the most accurate, 1 - the least accurate)\nMarker: 0 if the entry is photo, 1 if the entry is video\nURL: flickr URL to the entry",
"traj.head()",
"1.2. Contents of trajectory statistics table\nThe second table is about statistics for each trajectory. Each entry of this table corresponds to single trajectory.\nThis table consists of following attributes(columns):\n\nTrajectory_ID: Unique trajectory ID\nUser_ID: User ID\n#Photo: Number of photos in the trajectory\nStart_Time: When the first photo was taken\nTravel_Distance(km): Sum of the distances between consecutive photos (Euclidean Distance)\nTotal_Time(min): The time gap between the first photo and the last photo\nAverage_Speed(km/h): Travel_Distances(km)/Total_Time(h)",
"traj_stats.head()",
"2. Basic Statistics\n2.1. Number of users, trajectories, and average trajectories per user",
"num_user = traj_stats['User_ID'].unique().size\nnum_traj = traj_stats['Trajectory_ID'].unique().size\navg_traj_per_user = num_traj/num_user\nprint('# users :', num_user)\nprint('# trajectories :', num_traj)\nprint('Average trajectories per user :', avg_traj_per_user)",
"2.2. Min/Max/Median/Mean of each attribute",
"basic_stats = pd.DataFrame([traj_stats.min(), traj_stats.max(), traj_stats.median(), traj_stats.mean()], \\\n index=['min','max', 'median', 'mean'])\nbasic_stats.drop('Start_Time', axis=1, inplace=True)\nbasic_stats.drop('Trajectory_ID', axis=1, inplace=True)\nbasic_stats.drop('User_ID', axis=1, inplace=True)\nbasic_stats",
"2.3 Number of Photos by year\nHere's the bar chart that plots the number of photos taken by each year.",
"yeardict = dict()\nfor i in traj.index:\n dt = traj.ix[i]['Timestamp']\n if dt.year not in yeardict: yeardict[dt.year] = 1\n else: yeardict[dt.year] += 1\n\nplt.figure(figsize=[9, 5])\nplt.xlabel('Year')\nplt.ylabel('#Photos')\nX = list(sorted(yeardict.keys()))\nY = [yeardict[x] for x in X]\nplt.bar(X, Y, width=1)",
"3. Distributions\n3.1. Bar chart: Accuracy of photos\nThe original dataset provide the accuracy of geo-tag for each photo. Accuracy of 16 is the most accurate and the accuracy of 1 is the least accurate. (The default accuracy is set to 16 by Flickr when the user does not provide any accuracy information about photo.)\nHere's the Description about accuracy in Tumblr API",
"print(\"Number and % of points at max accuracy (16)\", len(traj[traj['Accuracy']==16]), \\\n 1.*len(traj[traj['Accuracy']==16])/len(traj))\nprint(\"Number and % of points at accuracy >=12\", len(traj[traj['Accuracy']>=11]), \\\n 1.*len(traj[traj['Accuracy']>=11])/len(traj))\n\nax1 = plt.figure(figsize=[10,3]).add_subplot(111)\ntraj.hist(column=['Accuracy'], bins=15, ax=ax1)",
"3.2. Histogram: Travel time",
"ax1 = plt.figure(figsize=[10,3]).add_subplot(111)\ntraj_stats.hist(column='Total_Time(min)', bins=50, ax=ax1)",
"3.3. Histogram: Distances of trajectory",
"ax1 = plt.figure(figsize=[10,3]).add_subplot(111)\ntraj_stats.hist(column='Travel_Distance(km)', bins=50, ax=ax1)",
"3.4. Histogram: Average Speed",
"ax1 = plt.figure(figsize=[10,3]).add_subplot(111)\ntraj_stats.hist(column='Average_Speed(km/h)', bins=50, ax=ax1)",
"4. Trajectory Visualization\nKML file is a useful tool to visualize trajectories on commercial map services such as Google Map. \nIn this section, we provide how to generate KML files for visualize trajectories on the map of Melbourne.\n4.1. Generating KML files with trajectory ID\nWe implemented KML file generator in traj_visualise.py file. So let's first import that file.",
"import traj_visualise # for visualization on map",
"traj_visualise.gen_kml function takes list of trajectories as an input and generate KML files for the list of trajectories.\npython\ndef gen_kml(fname, traj_data, traj_stats, traj_id_list, traj_name_list=None)\n\nfname: output file path\ntraj_data: Trajectory table\ntraj_stats: Trajectory stat table\ntraj_id_list: List of trajectory IDs\ntraj_name_list: List of names for each trajectory in traj_id_list\n\nUpload the generated KML files to my Google map helps navigating trajectories over map.\n4.2. Example of interesting trajectories\nIn this section, we show statistics of some extreme cases in our dataset and plot these trajectories on Google map.\nTrajectory with the most number of photos: Link to Google Map\n<img src=\"./img/most_photos.png\" style=\"width: 600px;\"/>",
"mostphoto_idx = traj_stats['#Photo'].idxmax()\nmostphoto_traj_id = traj_stats.ix[mostphoto_idx].Trajectory_ID\n\noutput_file = '../data/most_photos.kml'\ntraj_visualise.gen_kml(output_file, traj, traj_stats, [mostphoto_traj_id], ['The most number of photos'])\ntraj_stats.ix[mostphoto_idx]",
"Trajectory with the longest travel time: Link to Google Map\n<img src=\"./img/longest_time.png\" style=\"width: 600px;\"/>",
"time_idx = traj_stats['Total_Time(min)'].idxmax()\ntime_traj_id = traj_stats.ix[time_idx].Trajectory_ID\n\noutput_file = '../data/longest_time.kml'\ntraj_visualise.gen_kml(output_file, traj, traj_stats, [time_traj_id], ['The longest travel time'])\ntraj_stats.ix[time_idx]",
"Longest travel distance: Link to Google Map\n<img src=\"./img/longest_distance.png\" style=\"width: 600px;\"/>",
"longest_idx = traj_stats['Travel_Distance(km)'].idxmax()\nlongest_traj_id = traj_stats.ix[longest_idx].Trajectory_ID\n\noutput_file = '../data/longest.kml'\ntraj_visualise.gen_kml(output_file, traj, traj_stats, [longest_traj_id], ['longest_traj'])\ntraj_stats.ix[longest_idx]",
"Fastest trajectory: Link to Google Map\n<img src=\"./img/fastest_speed.png\" style=\"width: 600px;\"/>",
"fastest_idx = traj_stats['Average_Speed(km/h)'].idxmax()\nfastest_traj_id = traj_stats.ix[fastest_idx].Trajectory_ID\n\noutput_file = '../data/fastest.kml'\ntraj_visualise.gen_kml(output_file, traj, traj_stats, [fastest_traj_id], ['fastest_traj'])\ntraj_stats.ix[fastest_idx]"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
chloeyangu/BigDataAnalytics
|
The Airbnb Scoop/Source Code/2. Data Preparation Part 1 (Listings).ipynb
|
mit
|
[
"From Command Line - Import CSV file (Raw Data) into MongoDB\nmongoimport --db airbnb --type csv --file listings_new.csv -c listings_new\nmongoimport --db airbnb --type csv --file barcelona_attractions.csv -c attractions",
"import pymongo\nfrom pymongo import MongoClient",
"Connect Python to MongoDB",
"client = MongoClient('mongodb://localhost:27017/')",
"Retrieve from Database\nDatabase named as \"airbnb\"",
"db = client.airbnb",
"Retrieve Tables from Database",
"listings = db.listings_new\nattractions = db.attractions",
"Store data in a pandas dataframe for further analysis",
"import pandas as pd\n\nlistings_df = pd.DataFrame(list(db.listings_new.find()))\n\nlistings_df.head()\n\nlistings_df.columns.values",
"Convert numeric variables",
"listings_df = listings_df.convert_objects(convert_numeric=True)\n\nlistings_df['price'] = listings_df['price'].str[1:]\nlistings_df['price'] = listings_df.price.replace(',', '',regex=True)\nlistings_df['price'] = listings_df.price.astype(float).fillna(0.0)\n\nlistings_df['extra_people'] = listings_df['extra_people'].str[1:]\nlistings_df['extra_people'] = listings_df.extra_people.replace(',', '',regex=True).replace('', '0',regex=True)\nlistings_df['extra_people'] = listings_df.extra_people.astype(float).fillna(0.0)\n\nlistings_df['weekly_price'] = listings_df['weekly_price'].str[1:]\nlistings_df['weekly_price'] = listings_df.weekly_price.replace(',', '',regex=True).replace('', '0',regex=True)\nlistings_df['weekly_price'] = listings_df.weekly_price.astype(float).fillna(0.0)\n\nlistings_df['monthly_price'] = listings_df['monthly_price'].str[1:]\nlistings_df['monthly_price'] = listings_df.monthly_price.replace(',', '',regex=True).replace('', '0',regex=True)\nlistings_df['monthly_price'] = listings_df.monthly_price.astype(float).fillna(0.0)\n\nlistings_df['security_deposit'] = listings_df['security_deposit'].str[1:]\nlistings_df['security_deposit'] = listings_df.security_deposit.replace(',', '',regex=True).replace('', '0',regex=True)\nlistings_df['security_deposit'] = listings_df.security_deposit.astype(float).fillna(0.0)\n\nlistings_df['cleaning_fee'] = listings_df['cleaning_fee'].str[1:]\nlistings_df['cleaning_fee'] = listings_df.cleaning_fee.replace(',', '',regex=True).replace('', '0',regex=True)\nlistings_df['cleaning_fee'] = listings_df.cleaning_fee.astype(float).fillna(0.0)",
"Convert Amenities to Dummy Variables",
"listings_df['amenities_split'] = listings_df[\"amenities\"].apply(lambda x: x[1:-1].split(','))\n\n#Get unique amenities\nunique_amenities = list(set(x for l in listings_df[\"amenities_split\"] for x in l))\nunique_amenities = unique_amenities[0:2] + unique_amenities[3:]\nunique_amenities\n\nnum_col = len(unique_amenities) #number of columns\ndata_array = []\nfor n in range(0, len(listings_df)):\n lst = []\n for i in range (0, num_col):\n row = listings_df[\"amenities_split\"][n]\n if unique_amenities[i] in row:\n lst.append(1)\n else:\n lst.append(0)\n data_array.append(lst)\n\ndf = pd.DataFrame(data_array, columns=unique_amenities)\n\nlistings_df2 = listings_df.join(df)\n\nlistings_df2.head()",
"Combine Attractions Data",
"attractions = pd.DataFrame(list(db.attractions.find()))\n\nattractions.head()\n\n#Calculate distance between 2 lat long points\n#Returns distance in km\ndef distance(lat1, long1, lat2, long2):\n from math import sin, cos, sqrt, atan2, radians\n\n # approximate radius of earth in km\n R = 6373.0\n\n lat1 = radians(lat1)\n long1 = radians(long1)\n lat2 = radians(lat2)\n long2 = radians(long2)\n\n dlong = long2 - long1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlong / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance\n\nfor n in range(0, len(listings_df2)):\n nearest_attr = attractions['attraction'][0]\n nearest_attr_rating = attractions['rating'][0]\n nearest_attr_lat = attractions['lat'][0]\n nearest_attr_long = attractions['long'][0]\n\n list_lat = listings_df2['latitude'][n]\n list_long = listings_df2['longitude'][n]\n\n #Distance from first attraction to listing\n dist_nearest = distance(list_lat, list_long, nearest_attr_lat, nearest_attr_long)\n\n for i in range(1, len(attractions)):\n attr_lat = attractions['lat'][i]\n attr_long = attractions['long'][i]\n dist = distance(list_lat, list_long, attr_lat, attr_long)\n\n if dist < dist_nearest:\n nearest_attr = attractions['attraction'][i]\n nearest_attr_rating = attractions['rating'][i]\n nearest_attr_lat = attractions['lat'][i]\n nearest_attr_long = attractions['long'][i]\n dist_nearest = dist \n\n listings_df2.loc[n, 'nearest_attr'] = nearest_attr\n listings_df2.loc[n, 'nearest_attr_rating'] = nearest_attr_rating\n listings_df2.loc[n, 'nearest_attr_lat'] = nearest_attr_lat\n listings_df2.loc[n, 'nearest_attr_long'] = nearest_attr_long\n listings_df2.loc[n, 'nearest_attr_dist'] = dist_nearest\n\nlistings_df2.head()\n\n#listings_df2.to_csv(\"listings_31Mar.csv\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ramhiser/Keras-Tutorials
|
notebooks/04_deep_multilayer_perceptron.ipynb
|
mit
|
[
"from IPython.display import Image, SVG\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\nimport numpy as np\nimport keras\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\n\nfrom keras.utils.vis_utils import model_to_dot",
"Data Loading and Preprocessing",
"# Loads the training and test data sets\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nfirst_image = X_train[0, :, :]\n\n# To interpret the values as a 28x28 image, we need to reshape\n# the numpy array, which is one dimensional.\nplt.imshow(first_image, cmap=plt.cm.Greys);\n\nnum_classes = len(np.unique(y_train))\nnum_classes\n\n# 60K training 28 x 28 (pixel) images\nX_train.shape\n\n# 10K test 28 x 28 (pixel) images\nX_test.shape\n\ninput_dim = np.prod(X_train.shape[1:])\ninput_dim\n\n# The training and test data sets are integers, ranging from 0 to 255.\n# We reshape the training and test data sets to be matrices with 784 (= 28 * 28) features.\nX_train = X_train.reshape(60000, input_dim).astype('float32')\nX_test = X_test.reshape(10000, input_dim).astype('float32')\n\n# Scales the training and test data to range between 0 and 1.\nmax_value = X_train.max()\nX_train /= max_value\nX_test /= max_value\n\n# The training and test labels are integers from 0 to 9 indicating the class label\n(y_train, y_test)\n\n# We convert the class labels to binary class matrices\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)",
"Multilayer Perceptron\nIn this example, we increase the depth of the network to have two hidden layers.\nWe also apply dropout, which is a regularization technique that randomly drops a percentage of the nodes (neurons) in the network.",
"model = Sequential()\nmodel.add(Dense(512, activation='relu', input_shape=(input_dim,)))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(num_classes, activation='softmax'))",
"Different Ways to Summarize Model",
"model.summary()\n\nSVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))\n\nimport json\njson.loads(model.to_json())",
"Train Classifier",
"# Trains the model, iterating on the training data in batches of 32 in 3 epochs.\n# Using the Adam optimizer.\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(X_train, y_train, batch_size=32, epochs=3, verbose=1)",
"Model Evaluation",
"# Test accuracy is ~98%.\nmodel.evaluate(X_test, y_test)",
"Predicting a Couple of Held-Out Images",
"first_test_image = X_test[0, :]\nplt.imshow(first_test_image.reshape(28, 28), cmap=plt.cm.Greys);\n\nsecond_test_image = X_test[1, :]\nplt.imshow(second_test_image.reshape(28, 28), cmap=plt.cm.Greys);\n\nmodel.predict_classes(X_test[[0, 1], :])"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Becksteinlab/PSAnalysisTutorial
|
psa_short.ipynb
|
gpl-3.0
|
[
"Performing a quick distance comparison using PSA\nIn this example, the trajectories have been pre-aligned using the fitting scheme described in:\nS.L. Seyler, A. Kumar, M.F. Thorpe, and O. Beckstein, Path\nSimilarity Analysis: a Method for Quantifying Macromolecular\nPathways. arXiv:1505.04807v1_ [q-bio.QM], 2015.",
"%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\n# Suppress FutureWarning about element-wise comparison to None\n# Occurs when calling PSA plotting functions\nimport warnings\nwarnings.filterwarnings('ignore')",
"1) Set up input data for PSA using MDAnalysis",
"from MDAnalysis import Universe\nfrom MDAnalysis.analysis.psa import PSAnalysis\nfrom pair_id import PairID",
"Initialize lists for the methods on which to perform PSA. PSA will be performed for four different simulations methods with three runs for each: DIMS, FRODA, rTMD-F, and rTMD-S. Also initialize a PSAIdentifier object to keep track of the data corresponding to comparisons between pairs of simulations.",
"method_names = ['DIMS', 'FRODA', 'GOdMD', 'MDdMD', 'rTMD-F', 'rTMD-S',\n 'ANMP', 'iENM', 'MAP', 'MENM-SD', 'MENM-SP',\n 'Morph', 'LinInt']\nlabels = [] # Heat map labels\nsimulations = [] # List of simulation topology/trajectory filename pairs\nuniverses = [] # List of MDAnalysis Universes representing simulations",
"For each method, get the topology and each of three total trajectories (per method). Each simulation is represented as a (topology, trajectory) pair of file names, which is appended to a master list of simulations.",
"for method in method_names:\n # Note: DIMS uses the PSF topology format\n topname = 'top.psf' if 'DIMS' in method or 'TMD' in method else 'top.pdb'\n pathname = 'fitted_psa.dcd'\n method_dir = 'methods/{}'.format(method)\n if method is not 'LinInt':\n for run in xrange(1, 4): # 3 runs per method\n run_dir = '{}/{:03n}'.format(method_dir, run)\n topology = '{}/{}'.format(method_dir, topname)\n trajectory = '{}/{}'.format(run_dir, pathname)\n labels.append(method + '(' + str(run) + ')')\n simulations.append((topology, trajectory))\n else: # only one LinInt trajectory\n topology = '{}/{}'.format(method_dir, topname)\n trajectory = '{}/{}'.format(method_dir, pathname)\n labels.append(method)\n simulations.append((topology, trajectory))",
"Generate a list of universes from the list of simulations.",
"for sim in simulations:\n universes.append(Universe(*sim))",
"2) Compute and plot all-pairs distances using PSA\nInitialize a PSA comparison from the universe list using a C$_\\alpha$ trajectory representation, then generate PSA Paths from the universes.",
"psa_short = PSAnalysis(universes, path_select='name CA', labels=labels)\npsa_short.generate_paths()",
"Computing mutual distances using Hausdorff and (discrete) Fréchet path metrics\nHausdorff: compute the Hausdorff distances between all unique pairs of Paths and store the distance matrix.",
"psa_short.run(metric='hausdorff')\nhausdorff_distances = psa_short.get_pairwise_distances()",
"Plot clustered heat maps using Ward hierarchical clustering. The first heat map is plotted with the corresponding dendrogram and is fully labeled by the method names; the second heat map is annotated by the Hausdorff distances.",
"psa_short.plot(filename='dh_ward_psa-short.pdf', linkage='ward');\n\npsa_short.plot_annotated_heatmap(filename='dh_ward_psa-short_annot.pdf', linkage='ward');",
"Fréchet: compute the (discrete) Fréchet distances between all unique pairs of Paths and store the distance matrix.",
"psa_short.run(metric='discrete_frechet')\nfrechet_distances = psa_short.get_pairwise_distances()",
"As above, plot heat maps for (discrete) Fréchet distances.",
"psa_short.plot(filename='df_ward_psa-short.pdf', linkage='ward');\n\npsa_short.plot_annotated_heatmap(filename='df_ward_psa-short_annot.pdf', linkage='ward');",
"3) Extract specific data from PSA\nGet the Simulation IDs and PSA ID for the second DIMS simulation (DIMS 2) and third rTMD-F simulation (rTMD-F 3).",
"identifier = PairID()\nfor name in method_names:\n run_ids = [1] if 'LinInt' in name else [1,2,3]\n identifier.add_sim(name, run_ids)\n\nsid1 = identifier.get_sim_id('DIMS 2')\nsid2 = identifier.get_sim_id('rTMD-F 3')\npid = identifier.get_pair_id('DIMS 2', 'rTMD-F 3')",
"Use the Simulation IDs to locate Hausdorff and (discrete) Fréchet distances DIMS 2/rTMD-F 3 comparison:",
"print hausdorff_distances[sid1,sid2]\nprint frechet_distances[sid1,sid2]",
"Use the Pair ID when the distances are in the form of a distance vector (see scipy.spatial.distance.squareform)",
"from scipy.spatial.distance import squareform\nhausdorff_vectorform = squareform(hausdorff_distances)\nfrechet_vectorform = squareform(frechet_distances)\n\nprint hausdorff_vectorform[pid]\nprint frechet_vectorform[pid]",
"Check that data obtained from the distance matrix is the same as that accessed from the distance vector",
"print hausdorff_distances[sid1,sid2] == hausdorff_vectorform[pid]\nprint frechet_distances[sid1,sid2] == frechet_vectorform[pid]"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Juanlu001/poliastro
|
docs/source/examples/Going to Mars with Python using poliastro.ipynb
|
mit
|
[
"Going to Mars with Python using poliastro\n<img src=\"https://docs.poliastro.space/en/latest/_images/logo_text.png\" />\nThis is an example on how to use poliastro, a little library I've been working on to use in my Astrodynamics lessons. It features conversion between classical orbital elements and position vectors, propagation of Keplerian orbits, initial orbit determination using the solution of the Lambert's problem and orbit plotting.\nIn this example we're going to draw the trajectory of the mission Mars Science Laboratory (MSL), which carried the rover Curiosity to the surface of Mars in a period of something less than 9 months.\nNote: This is a very simplistic analysis which doesn't take into account many important factors of the mission, but can serve as an starting point for more serious computations (and as a side effect produces a beautiful plot at the end).\nFirst of all, we import the necessary modules. Apart from poliastro we will make use of astropy to deal with physical units and time definitions and jplephem to compute the positions and velocities of the planets.",
"import numpy as np\n\nimport astropy.units as u\nfrom astropy import time\n\nfrom poliastro import iod\nfrom poliastro.bodies import Earth, Mars, Sun\nfrom poliastro.ephem import Ephem\nfrom poliastro.twobody import Orbit\nfrom poliastro.maneuver import Maneuver\nfrom poliastro.util import time_range\n\nimport plotly.io as pio\npio.renderers.default = \"notebook_connected\"",
"We need a binary file from NASA called SPICE kernel to compute the position and velocities of the planets. Astropy downloads it for us:",
"from astropy.coordinates import solar_system_ephemeris\nsolar_system_ephemeris.set(\"jpl\")",
"The initial data was gathered from Wikipedia: the date of the launch was on November 26, 2011 at 15:02 UTC and landing was on August 6, 2012 at 05:17 UTC. We compute then the time of flight, which is exactly what it sounds.",
"# Initial data\ndate_launch = time.Time(\"2011-11-26 15:02\", scale=\"utc\").tdb\ndate_arrival = time.Time(\"2012-08-06 05:17\", scale=\"utc\").tdb",
"To compute the transfer orbit, we have the useful function lambert : according to a theorem with the same name, the transfer orbit between two points in space only depends on those two points and the time it takes to go from one to the other. We could make use of the raw algorithms available in poliastro.iod for solving this but working with the poliastro.maneuvers is even easier!\nWe just need to create the orbits for each one of the planets at the specific departure and arrival dates.",
"earth = Ephem.from_body(Earth, time_range(date_launch, end=date_arrival))\nmars = Ephem.from_body(Mars, time_range(date_launch, end=date_arrival))\n\n# Solve for departure and target orbits\nss_earth = Orbit.from_ephem(Sun, earth, date_launch)\nss_mars = Orbit.from_ephem(Sun, mars, date_arrival)",
"We can now solve for the maneuver that will take us from Earth to Mars. After solving it, we just need to apply it to the departure orbit to solve for the transfer one.",
"# Solve for the transfer maneuver\nman_lambert = Maneuver.lambert(ss_earth, ss_mars)\n\n# Get the transfer and final orbits\nss_trans, ss_target = ss_earth.apply_maneuver(man_lambert, intermediate=True)",
"Let's plot this transfer orbit in 3D!",
"from poliastro.plotting import OrbitPlotter3D\n\nplotter = OrbitPlotter3D()\nplotter.set_attractor(Sun)\n\nplotter.plot_ephem(earth, date_launch, label=\"Earth at launch position\")\nplotter.plot_ephem(mars, date_arrival, label=\"Mars at arrival position\")\nplotter.plot_trajectory(\n ss_trans.sample(max_anomaly=180 * u.deg), color=\"black\", label=\"Transfer orbit\"\n)\nplotter.set_view(30 * u.deg, 260 * u.deg, distance=3 * u.km)",
"Not bad! Let's celebrate with some music!",
"from IPython.display import YouTubeVideo\nYouTubeVideo('zSgiXGELjbc')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ninoxcello/mscs710-project
|
waterfall_checkpoint/demo/Demo.ipynb
|
mit
|
[
"Waterfall Checkpoint Demo\nTeam: IBM (Itty Bitty Money)\n\nPatrick Handley\nBhargavi Madhunala\nMatt Maffa\nAntonino Tan-Marcello\n\nDate: 10-05-2017\nThis notebook is used for our waterfall checkpoint presentation demo.\nDependencies\n\nPython3\nPandas\nNumpy\nMatplotlib\nSeaborn",
"import numpy as np # Linear Alg\nimport pandas as pd # CSV file I/O & data processing\n\n# Visualization\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport seaborn as sns\nimport warnings \nfrom matplotlib import style\nfrom matplotlib.finance import candlestick_ohlc\nwarnings.filterwarnings(\"ignore\")\nstyle.use('ggplot')\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (12.0, 8.0)\n\nfrom subprocess import check_output",
"Dataset\nWe are using the Cryptocurrency Historical Prices dataset from Kaggle.",
"data_dir = '../../input'\n# Check what files our dataset contain\nprint('Our dataset contains the following files: \\n')\nprint(check_output([\"ls\", data_dir]).decode(\"utf8\"))",
"Bitcoin\nWe will explore the bitcoin data for the purpose of a demo.",
"# Reading in bitcoin price file\nbitcoin_price = pd.read_csv('{}/bitcoin_price.csv'.format(data_dir), parse_dates=['Date'], index_col=0)\nbitcoin_price.dtypes\n\n# Lets look at the first 10 rows\nbitcoin_price.head(10)\n\nbitcoin_price.tail(10)\n\nprint('Date of newest data: {}'.format(bitcoin_price.index[0]))\nprint('Date of oldest data: {}'.format(bitcoin_price.index[-1]))\n\nbtc_ohlc = ['Open', 'High', 'Low', 'Close']\n\nfor feat in btc_ohlc:\n print('---------------------------------------------')\n print('Statistics for Bitcoin {} values:'.format(feat))\n print(bitcoin_price[feat].describe())\n\n# Lets plot the Open, Close, High, Low values on a line plot\nfor feat in btc_ohlc:\n plt.plot(bitcoin_price[feat], label=feat)\n\nplt.xlabel('Time(Yr-M)')\nplt.ylabel('Value(USD)')\nplt.legend()\nplt.show()\n\n# Lets looks at the more recent data\nn_days = 365 # number of recent days\n\nfor feature in btc_ohlc:\n plt.plot(bitcoin_price[feature].iloc[:n_days], label=feature)\n\nplt.title('Pricing Trend(last year)')\nplt.xlabel('Time(Yr-M)')\nplt.ylabel('Value(USD)')\nplt.legend()\nplt.show()\n\n# Candlestick graph for Bitcoin Closing Price\nbtc_close_ohlc = bitcoin_price['Close'][:n_days].resample('10D').ohlc()\nbtc_close_ohlc.reset_index(inplace=True)\nbtc_close_ohlc['Date'] = btc_close_ohlc['Date'].map(mdates.date2num)\n\nfig, ax = plt.subplots()\n\ncandlestick_ohlc(ax, btc_close_ohlc.values, width=2, colorup='g')\nax.xaxis_date()\n\nplt.title('Candlestick Chart')\nplt.xlabel('Time(Yr-M)')\nplt.ylabel('Value(USD)')\nplt.legend()\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
PyLCARS/PythonUberHDL
|
myHDL_DigitalSignalandSystems/DigitalAnalog_AnalogDigital/DACDeltaSigma1Bit.ipynb
|
bsd-3-clause
|
[
"\\title{myHDL 1Bit $\\Delta\\Sigma$ Pulse Density Modulated Digital to Analog Converter}\n\\author{Steven K Armour}\n\\maketitle\nThis is revamp of the Original myHDL $\\Delta\\Sigma$ DAC Core done by the late George Pantazopoulos\n<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\" style=\"margin-top: 1em;\"><ul class=\"toc-item\"><li><span><a href=\"#References\" data-toc-modified-id=\"References-1\"><span class=\"toc-item-num\">1 </span>References</a></span></li><li><span><a href=\"#Setup\" data-toc-modified-id=\"Setup-2\"><span class=\"toc-item-num\">2 </span>Setup</a></span><ul class=\"toc-item\"><li><span><a href=\"#Libraries\" data-toc-modified-id=\"Libraries-2.1\"><span class=\"toc-item-num\">2.1 </span>Libraries</a></span></li><li><span><a href=\"#Helper-Function\" data-toc-modified-id=\"Helper-Function-2.2\"><span class=\"toc-item-num\">2.2 </span>Helper Function</a></span></li><li><span><a href=\"#Architecture-Setup\" data-toc-modified-id=\"Architecture-Setup-2.3\"><span class=\"toc-item-num\">2.3 </span>Architecture Setup</a></span></li></ul></li><li><span><a href=\"#$\\Delta$-Difference-Section\" data-toc-modified-id=\"$\\Delta$-Difference-Section-3\"><span class=\"toc-item-num\">3 </span>$\\Delta$ Difference Section</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-Implementation\" data-toc-modified-id=\"myHDL-Implementation-3.1\"><span class=\"toc-item-num\">3.1 </span>myHDL Implementation</a></span></li><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-3.2\"><span class=\"toc-item-num\">3.2 </span>myHDL Testing</a></span></li><li><span><a href=\"#myHDL-to-Verilog\" data-toc-modified-id=\"myHDL-to-Verilog-3.3\"><span class=\"toc-item-num\">3.3 </span>myHDL to Verilog</a></span></li></ul></li><li><span><a href=\"#Comparator\" data-toc-modified-id=\"Comparator-4\"><span class=\"toc-item-num\">4 </span>Comparator</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL--Implementation\" data-toc-modified-id=\"myHDL--Implementation-4.1\"><span class=\"toc-item-num\">4.1 </span>myHDL Implementation</a></span></li><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-4.2\"><span class=\"toc-item-num\">4.2 </span>myHDL Testing</a></span></li><li><span><a href=\"#myHDL-to-Verilog\" data-toc-modified-id=\"myHDL-to-Verilog-4.3\"><span class=\"toc-item-num\">4.3 </span>myHDL to Verilog</a></span></li></ul></li><li><span><a href=\"#Rail-To-Rail-Digital-Scaler\" data-toc-modified-id=\"Rail-To-Rail-Digital-Scaler-5\"><span class=\"toc-item-num\">5 </span>Rail To Rail Digital Scaler</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-Implementation\" data-toc-modified-id=\"myHDL-Implementation-5.1\"><span class=\"toc-item-num\">5.1 </span>myHDL Implementation</a></span></li><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-5.2\"><span class=\"toc-item-num\">5.2 </span>myHDL Testing</a></span></li><li><span><a href=\"#myHDL-to-Verilog\" data-toc-modified-id=\"myHDL-to-Verilog-5.3\"><span class=\"toc-item-num\">5.3 </span>myHDL to Verilog</a></span></li></ul></li><li><span><a href=\"#$\\Sigma$-Integrator\" data-toc-modified-id=\"$\\Sigma$-Integrator-6\"><span class=\"toc-item-num\">6 </span>$\\Sigma$ Integrator</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-Implementation\" data-toc-modified-id=\"myHDL-Implementation-6.1\"><span class=\"toc-item-num\">6.1 </span>myHDL Implementation</a></span></li><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-6.2\"><span class=\"toc-item-num\">6.2 </span>myHDL Testing</a></span></li><li><span><a href=\"#myHDL-to-Verilog\" data-toc-modified-id=\"myHDL-to-Verilog-6.3\"><span class=\"toc-item-num\">6.3 </span>myHDL to Verilog</a></span></li></ul></li><li><span><a href=\"#1Bit-$\\Delta\\Sigma$-DAC\" data-toc-modified-id=\"1Bit-$\\Delta\\Sigma$-DAC-7\"><span class=\"toc-item-num\">7 </span>1Bit $\\Delta\\Sigma$-DAC</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-Implementation\" data-toc-modified-id=\"myHDL-Implementation-7.1\"><span class=\"toc-item-num\">7.1 </span>myHDL Implementation</a></span></li><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-7.2\"><span class=\"toc-item-num\">7.2 </span>myHDL Testing</a></span></li><li><span><a href=\"#myHDL-to-Verilog\" data-toc-modified-id=\"myHDL-to-Verilog-7.3\"><span class=\"toc-item-num\">7.3 </span>myHDL to Verilog</a></span></li></ul></li></ul></div>\n\nReferences\n@article{cheung_raj_2005,\ntitle={Implementation of 12-bit delta-sigma DAC with MSC12xx controller},\nvolume={Q1},\nurl={http://www.ti.com/lit/an/slyt076/slyt076.pdf},\njournal={Analog Applications Journal},\nauthor={Cheung, Hugo and Raj, Sreeja},\nyear={2005},\npages={27-32}\n},\n@misc{pantazopoulos_2006,\ntitle={DSX1000 ?S DAC Core [MyHDL]},\nurl={http://old.myhdl.org/doku.php/projects:dsx1000},\nauthor={Pantazopoulos, George},\nyear={2006}\n}\nSetup\nLibraries",
"from myhdl import *\nimport pandas as pd\nfrom myhdlpeek import Peeker\nimport numpy as np",
"Helper Function",
"#helper functions to read in the .v and .vhd generated files into python\ndef VerilogTextReader(loc, printresult=True):\n with open(f'{loc}.v', 'r') as vText:\n VerilogText=vText.read()\n if printresult:\n print(f'***Verilog modual from {loc}.v***\\n\\n', VerilogText)\n return VerilogText\n\ndef VHDLTextReader(loc, printresult=True):\n with open(f'{loc}.vhd', 'r') as vText:\n VerilogText=vText.read()\n if printresult:\n print(f'***VHDL modual from {loc}.vhd***\\n\\n', VerilogText)\n return VerilogText",
"Architecture Setup",
"BitWidth=16\n#the max in excluded in intbv \nMaxV=int(2**(BitWidth-1)); MinV=-int(2**(BitWidth-1))\na=intbv(0)[BitWidth:]; a=a.signed()\nlen(a), a.min, MinV, a.max, MaxV",
"$\\Delta$ Difference Section\n<img src='DiffSect.png'>\nThe Delta ($\\Delta$) section takes in the input Digital word $x(n)$ signal and subtracts from the input word the feedback word which is\n$$\\Delta(n)=x(n)-\\begin{cases}\n 2^{W}-1, & \\text{if}\\ y(n)=1 \\\n 0, & \\text{if}\\ y(n)=0\n \\end{cases}$$\nwhere $y(n)$ is the output one bit from the DAC that after leaving the digital device still needs to processed by a Analog low pass filter. And $W$ is the architecture word length. The conversion from the the 1bit output to a word is done by the DigitalRailScaler and the output is feed to the Integrator ($\\Sigma$) section.",
"x1N=np.random.randint(MinV, MaxV, 20)\nx2N=np.random.randint(MinV, MaxV, 20)\nResultsDiff=pd.DataFrame(columns=['x1', 'x2', 'yN'])\nResultsDiff['x1']=x1N; ResultsDiff['x2']=x2N\nResultsDiff['yN']=x1N-x2N\n\nResultsDiff=ResultsDiff[ResultsDiff['yN']>=MinV]\nResultsDiff=ResultsDiff[ResultsDiff['yN']<=MaxV]\nResultsDiff.reset_index(drop=True, inplace=True)\nResultsDiff",
"myHDL Implementation",
"@block\ndef Difference(x1, x2, y):\n \"\"\"\n Prototype Delta Section such that `x1` is th input word, \n `x2` is the Feedback word and `y` is the ouput to the Sigma Section\n Inputs:\n x1 (2's): The From\n x2 (2's): the suptraction amount\n Outputs:\n y (2's): x1-x2\n \"\"\"\n @always_comb\n def logic():\n y.next=x1-x2\n return logic",
"myHDL Testing",
"Peeker.clear()\nx1=Signal(modbv(0, min=MinV, max=MaxV)); Peeker(x1, 'x1')\nx2=Signal(modbv(0, min=MinV, max=MaxV)); Peeker(x2, 'x2')\ny=Signal(modbv(0, min=MinV, max=MaxV)); Peeker(y, 'y')\n\nDUT=Difference(x1, x2, y)\n\ndef Difference_TB():\n for i, Row in ResultsDiff.iterrows():\n x1.next=int(Row['x1']); x2.next=int(Row['x2'])\n yield (delay(1))\n raise StopSimulation\n \nsim=Simulation(DUT, Difference_TB(), *Peeker.instances()).run()\nPeeker.to_wavedrom()\n\nResultsDiff=pd.merge(ResultsDiff, Peeker.to_dataframe().astype(int, copy=False), how='left')\nComp=(ResultsDiff['yN']==ResultsDiff['y']).all()\nprint(f'Compersion of Sim and myHDL equal: {Comp}')\nResultsDiff\n",
"myHDL to Verilog",
"if Comp:\n DUT.convert()\n VerilogTextReader('Difference')\n",
"Comparator\n<img src='Comparator.png'>\nThe Comparator section performers the same actions as would a Analog Computer does. It takes a input and compares that values to a reference where if the input is greater then the reference then the output is 1 else 0. Thus the Comparator performs a 1Bit quantization of the result of the Integration ($\\Sigma$) section. \nMathematically the 1Bit Digital Comparator is \n$$y(n)=\\begin{cases}\n 1, & \\text{if}\\ x>\\text{Ref} \\\n 0, & \\text{otherwise}\n \\end{cases}$$",
"xN=np.random.randint(MinV, MaxV, 20)\nRef=np.random.randint(MinV, MaxV, 20)\nResultsComp=pd.DataFrame(columns=['x', 'Ref', 'yN'])\nResultsComp['x']=x1N; ResultsComp['Ref']=Ref\n\nfor i , Row in ResultsComp.iterrows():\n ResultsComp['yN'].loc[i]=int(Row['x']>Row['Ref'])\n\nResultsComp",
"myHDL Implementation",
"@block\ndef Comparator(x, Ref, y):\n \"\"\"\n Prototype Comparator section used to convert word to binary \n ouput \n Inputs:\n x (2's): Input Word\n Ref (2's): Reference to compare too\n \n Outputs:\n y(bool): Outcome of Comparison; True if Input (`x`) is \n Greater Than the Reference, False Otherwise\n \"\"\"\n @always_comb\n def logic():\n if x>Ref:\n y.next=1\n else:\n y.next=0\n \n return logic",
"myHDL Testing",
"Peeker.clear()\nx=Signal(intbv(0, min=MinV, max=MaxV)); Peeker(x, 'x')\nRef=Signal(intbv(0, min=MinV, max=MaxV)); Peeker(Ref, 'Ref')\ny=Signal(bool(0)); Peeker(y, 'y')\n\nDUT=Comparator(x, Ref, y)\n\ndef Comparator_TB():\n for i, Row in ResultsComp.iterrows():\n x.next=int(Row['x']); Ref.next=int(Row['Ref'])\n yield (delay(1))\n raise StopSimulation\n \nsim=Simulation(DUT, Comparator_TB(), *Peeker.instances()).run()\nPeeker.to_wavedrom()\n\nResultsComp=pd.merge(ResultsComp, Peeker.to_dataframe().astype(int, copy=False), how='left')\nComp=(ResultsComp['yN']==ResultsComp['y']).all()\nprint(f'Compersion of Sim and myHDL equal: {Comp}')\nResultsComp\n",
"myHDL to Verilog",
"if Comp:\n DUT.convert()\n VerilogTextReader('Comparator')\n",
"Rail To Rail Digital Scaler\n<img src='RailToRail.png'>\nThe Rail to Rail Digital Scaler is part of the feedback loop of the $\\Delta\\Sigma$ DAC where it takes the 1Bit output and converts it to digital word that is feed to the Difference ($\\Delta$) Section. Is is called a Rail to Rail since the output range is to constrained to two extremes; where is this is similar to Rail to Rail behavior in Analog Circuits.\nThe Rail to Rail Digital Scaler can be modeled as \n$$y(n)=\\begin{cases}\n \\text{+Rail} , & \\text{if}\\ x(n)=1 \\\n \\text{-Rail}, & \\text{if}\\ x(n)=0\n \\end{cases}$$",
"x=np.random.randint(0, 2, 20)\nPRail=MaxV-1; NRail=MinV+1\nDigitalRailScaleComp=pd.DataFrame(columns=['x', 'yN'])\nDigitalRailScaleComp['x']=x\nDigitalRailScaleComp['yN']=[PRail if i is 1 else NRail for i in DigitalRailScaleComp['x']]\nDigitalRailScaleComp",
"myHDL Implementation",
"@block\ndef DigitalRailScaler(x, y, PRail, NRail):\n \"\"\"\n Prototype RailToRailDigital Scaler \n Inputs:\n x (bool): Binary Bit input select scaling\n Ouputs:\n y (2's): the Rail ouput word will be Positive Rail value\n if `x` is the else will be Negative Rail value\n Parms:\n PRail (int): Positive Word Value\n NRail (int): Negative Word Value\n \"\"\"\n @always_comb\n def logic():\n if x:\n y.next=PRail\n else:\n y.next=NRail\n return logic",
"myHDL Testing",
"Peeker.clear()\nx=Signal(bool(0)); Peeker(x, 'x')\ny=Signal(intbv(0, min=MinV, max=MaxV)); Peeker(y, 'y')\n\nDUT=DigitalRailScaler(x, y, PRail, NRail)\ndef DigitalRailScaler_TB():\n for i, Row in DigitalRailScaleComp.iterrows():\n x.next=int(Row['x'])\n yield (delay(1))\n raise StopSimulation\n \nsim=Simulation(DUT, DigitalRailScaler_TB(), *Peeker.instances()).run()\nPeeker.to_wavedrom()\n\nSimData=Peeker.to_dataframe().astype(int, copy=False, inplace=True)\nDigitalRailScaleComp=DigitalRailScaleComp.loc[SimData.index]\nDigitalRailScaleComp=pd.merge(DigitalRailScaleComp, SimData, left_index=True, right_index=True, how='left')\nDigitalRailScaleComp.drop('x_y', axis=1, inplace=True)\nDigitalRailScaleComp.rename(columns={'x_x':'x'}, inplace=True)\n\nComp=(DigitalRailScaleComp['yN']==DigitalRailScaleComp['y']).all()\nprint(f'Compersion of Sim and myHDL equal: {Comp}')\nDigitalRailScaleComp",
"myHDL to Verilog",
"if Comp:\n DUT.convert()\n VerilogTextReader('DigitalRailScaler')\n",
"$\\Sigma$ Integrator\n<img src='Integrator.png'>\nThe Integrator ($\\Sigma$) section is not a true integrator in the calculus since but instead acts as a accumulator of of the error from the $\\Delta$ sections feedback. Where then the accumulated error is feed to the Comparator to convert to a binary value. \nIn the discrete domain the Integrator is expressed as \n$$y(n)=y(n-1)+x(n)$$\nand in the frequency domain as either\n$$\\dfrac{Y}{X}=\\dfrac{1}{1-Z^{-1}}$$\nor \n$$Y=Z^{-1}Y+X$$\nwhere the $z^{-1}$ indicates the use of a D Register to store the past value of $y$",
"x=np.arange(-11, 13+1)\ny=[]\nfor i, xi in enumerate(x):\n if i==0:\n y.append(0)\n else:\n y.append(y[-1]+xi)\n\nIntegratorComp=pd.DataFrame(columns=['x', 'yN'])\nIntegratorComp['x']=x; IntegratorComp['yN']=y\nIntegratorComp",
"myHDL Implementation",
"@block\ndef Integrator(x, y, clk, rst):\n '''\n Prototype Simple Integrator/ Accumultor for the Sigma Section\n Inputs:\n x (2's): the x(n) data in feed\n ------------------------\n \n clk(bool): clock feed\n rst(bool): reset feed\n \n Outputs:\n y (2's): the y(n) output of y(n)=y(n-1)+x(n)\n \n '''\n @always(clk.posedge)\n def logic():\n if rst:\n y.next=0\n else:\n #y(n)=y(n-1)+x(n)\n y.next=y+x\n \n \n return logic\n ",
"myHDL Testing",
"Peeker.clear()\n\nx=Signal(intbv(0, min=MinV, max=MaxV)); Peeker(x, 'x')\ny=Signal(intbv(0, min=MinV, max=MaxV)); Peeker(y, 'y')\n\nclk, rst=[Signal(bool(0)) for _ in range(2)]\nPeeker(clk, 'clk'); Peeker(rst, 'rst')\n\nDUT=Integrator(x, y, clk, rst)\n\ndef Integrator_TB():\n \n @always(delay(1)) ## delay in nano seconds\n def clkGen():\n clk.next = not clk\n \n @instance\n def stimulus():\n for i, Row in IntegratorComp.iterrows():\n if i==0:\n x.next=0 \n elif i<(IntegratorComp.shape[0]-2):\n x.next=int(Row['x'])\n else:\n x.next=int(Row['x'])\n rst.next=True\n \n yield clk.posedge\n \n raise StopSimulation\n \n \n return instances()\nsim = Simulation(DUT, Integrator_TB(), *Peeker.instances()).run()\nPeeker.to_wavedrom()\n\nSimData=Peeker.to_dataframe()\nSimData=SimData.reindex(columns=['x', 'y', 'rst', 'clk'])\nSimData\n\n#remove clk and rst \nSimData=SimData[SimData.clk!=0]\nSimData=SimData[SimData.rst!=1]\nSimData.drop(['clk', 'rst'], axis=1, inplace=True)\nSimData.reset_index(drop=True, inplace=True)\nSimData.drop([SimData.index[-1]], axis=0, inplace=True)\nSimData\n\nIntegratorComp['x']=IntegratorComp.x.shift(-1)\nIntegratorComp=IntegratorComp.loc[SimData.index]\nIntegratorComp=IntegratorComp.astype(int, copy=False)\nIntegratorComp\n\nIntegratorComp=pd.merge(IntegratorComp, SimData,how='left')\nComp=(IntegratorComp['yN']==IntegratorComp['y']).all()\nprint(f'Compersion of Sim and myHDL equal: {Comp}')\nIntegratorComp\n",
"myHDL to Verilog",
"if Comp:\n DUT.convert()\n VerilogTextReader('Integrator')\n",
"1Bit $\\Delta\\Sigma$-DAC\n<img src='DAC.png'>\nThe final topology of the 1Bit $\\Delta\\Sigma$-DAC is shown in it's block diagram where the section that where developed above are are stitched via the following\nThe Delta Section, which consists the DAC input and the feedback subtraction after the binary output is converted to the feed back word performs the following\n$$\\Delta(n)=x(n)-\\begin{cases}\n 2^{W}-1, & \\text{if}\\ y(n)=1 \\\n 0, & \\text{if}\\ y(n)=0\n \\end{cases}$$\nThe Sigma section is the error integrator performing (note: $\\Sigma(n)$ is a variable and not summation from calculus)\n$$\\Sigma(n+1)=\\Sigma(n)+\\Delta(n)$$\nThe Comparator converts the output error to a binary output based on wither the sight of the $\\Sigma$ sections accumulated error is positive or negative where in this case 0 is defined to be negative resulting in\n$$y(n)=\\begin{cases}\n 1, & \\text{if}\\ \\Sigma(n)>0 \\\n 0, & \\text{otherwise}\n \\end{cases}$$",
"TestData=pd.DataFrame(columns=['Ref', 'QRef'])\nrefvals = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n\n# Generate the actual DAC codes based on the DAC resolution\ndac_codes = list()\n\nfor value in refvals:\n\n dac_code = int(value * 2**(BitWidth-2))\n\n if dac_code == 2**BitWidth:\n dac_code = 2**BitWidth-1\n elif dac_code > 2**BitWidth:\n raise Exception\n\n dac_codes.append(dac_code)\nTestData['Ref']=refvals; TestData['QRef']=dac_codes\nTestData\n\nclass DACSim:\n def __init__(self):\n self.yStore=[]\n self.FB=0\n self.Sigma=0\n \n def Action(self, x):\n self.DeltaAction(x, self.FB)\n self.SigmaAction(self.Delta)\n self.CompAction(self.Sigma)\n self.yStore.append(self.y)\n self.FBScale(self.y)\n \n def DeltaAction(self, x, FB):\n self.Delta=x-FB\n \n def SigmaAction(self, Delta):\n self.Sigma=self.Sigma+Delta\n \n def CompAction(self, Sigma):\n if Sigma>0:\n self.y=1\n else:\n self.y=0\n \n def FBScale(self, y):\n Scale=2**BitWidth-1\n if y==1:\n self.FB=Scale\n else:\n self.FB=0\n \n\nDAC=DACSim()\nDAC.Action(0)\nprint(f'x: {0}, Delta: {DAC.Delta}, Sigma: {DAC.Sigma}, y: {DAC.y}, FB: {DAC.FB}')\n\nfor i in TestData['QRef']:\n DAC.Action(i)\n print(f'x: {i}, Delta: {DAC.Delta}, Sigma: {DAC.Sigma}, y: {DAC.y}, FB: {DAC.FB}')\n",
"myHDL Implementation",
"@block\ndef DAC_1Bit(x, y, clk, rst):\n \"\"\"\n A 1bit SigmaDelta DAC based on \n http://old.myhdl.org/doku.php/projects:dsx1000\n using a Instantiation approach\n Inputs:\n x (2's): Input Digital word to be translated to PDM ouput\n -----------------------------\n clk (bool): system clock input\n rst (bool); reset signal\n Ouput:\n y (bool): the 1BIt Sigma Delta PDM output \n \"\"\"\n #Internal Parameter Calculations; Dont show up in conversion\n #Though values are set to the various subcomponets in the\n #Conversion\n \n RES = len(x)\n \n DREF_NEG = 0\n DREF_POS = (2**RES)-1\n \n MIN = -2**(RES-1)\n MAX = +2**(RES-1)\n \n #Buss and Wires\n #Comparter Ref: 0\n Ref=Signal(intbv(0)[RES:])\n #bus from the Delta Section to the Sigma Section\n diff_o = Signal(intbv(0, min=4*MIN, max=4*MAX))\n #But from the Sigma Section to the Comparator\n Intgral_o = Signal(intbv(0, min=4*MIN, max=4*MAX))\n #Wire from Compater to synchronized output\n comp_o = Signal(bool(0))\n #bus feedback from the DigitalRailScaler to the Delta Section\n ddc_o = Signal(intbv(0, min=4*MIN, max=4*MAX))\n \n \n # Delta\n Diff=Difference(x, ddc_o, diff_o)\n #Sigma\n Intgrat=Integrator(diff_o, Intgral_o,clk, rst)\n #Comparator agenst 0\n Comp=Comparator(Intgral_o, Ref, comp_o)\n #Digital Scale for FB: Scale is (2**RES)-1:0\n DDC=DigitalRailScaler(comp_o, ddc_o, DREF_POS, DREF_NEG)\n \n #create synchronized ouput\n @always(clk.posedge)\n def Ouput():\n if rst:\n y.next=0\n else:\n y.next=comp_o\n \n return instances()",
"myHDL Testing",
"Peeker.clear()\nclk, rst=[Signal(bool(0)) for i in range(2)]\nPeeker(clk, 'clk'); Peeker(rst, 'rst')\n\nx=Signal(intbv(0, min=MinV, max=MaxV)); Peeker(x, 'x')\ny=Signal(bool(0)); Peeker(y, 'y')\n\nDUT=DAC_1Bit(x, y, clk, rst)\n\ndef DAC_TB():\n \n @always(delay(1)) ## delay in nano seconds\n def clkGen():\n clk.next = not clk\n \n \n @instance\n def Stimules():\n for i, Row in TestData.iterrows():\n x.next=int(Row['QRef'])\n yield clk.posedge\n \n raise StopSimulation\n \n return instances()\n \n \n\nsim=Simulation(DUT, DAC_TB(), *Peeker.instances()).run()\nPeeker.to_wavedrom()\n\nSimData=Peeker.to_dataframe()\nSimData=SimData[SimData.clk!=0]\nSimData.drop(['clk', 'rst'], axis=1, inplace=True)\nSimData.reset_index(drop=True, inplace=True)\nSimData",
"myHDL to Verilog",
"DUT.convert()\nVerilogTextReader('DAC_1Bit');"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ajdawson/python_for_climate_scientists
|
course_content/solutions/iris_exercise_3.ipynb
|
gpl-3.0
|
[
"Extra exercise\n1. Load 'A1B_north_america.nc' from the iris sample data",
"import iris\nfilename = iris.sample_data_path(\"A1B_north_america.nc\")\ncube = iris.load_cube(filename)\nprint(cube)",
"2. Extract just data from the year 1980 and beyond from the loaded cube",
"tcoord = cube.coord('time')\n\ndef since_1980(cell):\n return tcoord.units.num2date(cell.point).year >= 1980\n\ntcon = iris.Constraint(time=since_1980)\ncube = cube.extract(tcon)\n\ntcoord = cube.coord('time')\n\nprint(tcoord.units.num2date(tcoord.points.min()))\nprint(tcoord.units.num2date(tcoord.points.max()))",
"3. Define a function which takes a coordinate and a single time point as arguments, and returns the decade. For example, your function should return 2010 for the following:\n time = iris.coords.DimCoord([10], 'time', units='days since 2018-01-01')\n print your_decade_function(time, time.points[0])",
"def get_decade(coord, point):\n year = coord.units.num2date(point).year\n return (year // 10) * 10\n\ntime = iris.coords.DimCoord([10], 'time', units='days since 2018-01-01')\nprint(get_decade(time, time.points[0]))",
"4. Add a \"decade\" coordinate to the loaded cube using your function and the coord categorisation module",
"import iris.coord_categorisation as coord_cat\n\ncoord_cat.add_categorised_coord(cube, 'decade', 'time', get_decade)\nprint(cube.coord('decade'))",
"5. Calculate the decadal means cube for this scenario",
"import iris.analysis\n\ncube = cube.aggregated_by('decade', iris.analysis.MEAN)\nprint(cube)",
"6. Create a figure with 3 rows and 4 columns displaying the decadal means, with the decade displayed prominently in each axes' title (hint: the slices or slices_over method of the cube will be helpful, especially combined with the built-in enumerate function in a for-loop)",
"import matplotlib.pyplot as plt\nimport iris.plot as iplt\n\nplt.figure(figsize=(12, 6))\n\nplt.suptitle('Decadal means for the A1B scenario')\nfor i, decade_cube in enumerate(cube.slices(['latitude', 'longitude'])):\n plt.subplot(3, 4, i+1)\n iplt.contourf(decade_cube, 20, cmap='viridis')\n plt.title('{}'.format(decade_cube.coord('decade').points[0]))\n plt.gca().coastlines()\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
alexandrnikitin/workshops
|
automated-feature-engineering-selection/notebooks/3-featuretools-scale.ipynb
|
mit
|
[
"Scaling Featuretools with Dask\n\nhttps://dask.pydata.org/en/latest/\n\"Dask provides advanced parallelism for analytics, enabling performance at scale for the tools you love\"\n\"Dask's schedulers scale to thousand-node clusters and its algorithms have been tested on some of the largest supercomputers in the world.\"\nWorks with NumPy, Pandas, Scikit-Learn. Mimic their APIs.",
"import os\nfrom datetime import datetime\nfrom glob import glob\n\nimport numpy as np\nimport pandas as pd\nimport featuretools as ft\n\nfrom dask import bag\nfrom dask.diagnostics import ProgressBar\nfrom featuretools.primitives import *\n\npbar = ProgressBar()\npbar.register()",
"1. Partition data",
"# data is taken from kaggle.com/c/talkingdata-adtracking-fraud-detection\ninput_file = '../data/train_sample.csv'\noutput_dir = \"../data/partitioned\"\n\ndef partition_by(df, column, output_dir):\n directory = f\"{output_dir}/{column}\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n df.groupby(column).apply(lambda x: x.to_csv(f\"{directory}/train_{x.name}.csv\", index=False))\n\npartition_by(pd.read_csv(input_file), 'app', output_dir)",
"2. Create distributed EntitySets",
"input_path = '../data/partitioned/app'\n\ndtypes = {\n 'ip': 'uint32',\n 'app': 'uint16',\n 'device': 'uint16',\n 'os': 'uint16',\n 'channel': 'uint16',\n 'is_attributed': 'uint8'\n}\nto_read = ['app', 'device', 'os', 'channel', 'is_attributed', 'click_time']\nto_parse = ['click_time']\n\nfilenames = glob(f\"{input_path}/train_*.csv\")\n\ndef createEntitySet(filename):\n df = pd.read_csv(filename, usecols=to_read, dtype=dtypes, parse_dates=to_parse)\n df['id'] = range(len(df))\n \n es = ft.EntitySet(id='clicks')\n es = es.entity_from_dataframe(\n entity_id='clicks',\n dataframe=df,\n index='id',\n time_index='click_time',\n \n variable_types={\n 'app': ft.variable_types.Categorical,\n 'device': ft.variable_types.Categorical,\n 'os': ft.variable_types.Categorical,\n 'channel': ft.variable_types.Categorical,\n 'is_attributed': ft.variable_types.Boolean,\n }\n )\n\n es = es.normalize_entity(base_entity_id='clicks', new_entity_id='apps', index='app', make_time_index=False)\n es.add_last_time_indexes()\n return es\n\nb = bag.from_sequence(filenames)\nentity_sets = b.map(createEntitySet)",
"3. Calculate feature matrices and definitions",
"def calc_feature_matrix(es, entity_id, cutoff_time):\n feature_matrix, feature_defs = ft.dfs(\n entityset=es,\n target_entity=entity_id,\n cutoff_time=cutoff_time,\n training_window=ft.Timedelta(\"3 days\"),\n max_depth=3\n )\n\n return feature_matrix, feature_defs\n\n# For the sake of simplicity we take predefined time\ncutoff_time = datetime.datetime(2017, 11, 9, 15, 59, 51)\n\nfeature_matrices = entity_sets.map(calc_feature_matrix, entity_id='apps', cutoff_time=cutoff_time)",
"4. Compute the distributed features",
"out = feature_matrices.compute()\n_, feature_defs = out[0]\nfeature_matrices = list(map(list, zip(*out)))[0]\nfeature_matrix = pd.concat(feature_matrices)\n\nfeature_defs\n\nfeature_matrix"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ptpro3/ptpro3.github.io
|
Projects/Challenges/challenge_set_5_prashant.ipynb
|
mit
|
[
"Topic: Challenge Set 5\nSubject: Linear Regression and Train/Test Split\nDate: 02/07/2017\nName: Prashant Tatineni",
"import pandas as pd\nimport patsy\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.cross_validation import train_test_split\n\n%matplotlib inline\n\ndf = pd.read_csv('2013_movies.csv')\n\ndf.head()\n\ny, X = patsy.dmatrices('DomesticTotalGross ~ Budget + Runtime', data=df, return_type=\"dataframe\")\n\nX.head()",
"Challenge 1",
"model = sm.OLS(y, X['Intercept'])\nfit = model.fit()\nfit.summary()",
"This model is representing the null hypothesis.",
"records = range(89)\n\nplt.scatter(records, y, color='g')\nplt.scatter(records, fit.predict(X['Intercept']))\n\nplt.hist((y['DomesticTotalGross'] - fit.predict(X['Intercept'])));",
"Challenge 2",
"model = sm.OLS(y, X[['Intercept','Budget']])\nfit = model.fit()\nfit.summary()\n\nplt.scatter(X['Budget'], y, color='g')\nplt.scatter(X['Budget'], fit.predict(X[['Intercept','Budget']]))\n\nplt.scatter(X['Budget'], fit.predict(X[['Intercept','Budget']]) - y['DomesticTotalGross'])",
"For higher budget, higher grossing movies there is some spread in the data and the model's residuals are higher\nChallenge 3",
"y3, X3 = patsy.dmatrices('DomesticTotalGross ~ Rating', data=df, return_type=\"dataframe\")\n\nX3.head()\n\nmodel = sm.OLS(y3, X3)\nfit = model.fit()\nfit.summary()\n\nrecords3 = range(100)\n\nplt.scatter(records3, y3, color='g')\nplt.scatter(records3, fit.predict(X3))\n\nplt.hist((y3['DomesticTotalGross'] - fit.predict(X3)));",
"Here, the model is using the 'rating' to predict Domestic gross. Since there's 4 ratings, it's predicting one of 4 domestic gross values.\nChallenge 4",
"y4, X4 = patsy.dmatrices('DomesticTotalGross ~ Budget + Runtime + Rating', data=df, return_type=\"dataframe\")\n\nX4.head()\n\nmodel = sm.OLS(y4, X4)\nfit = model.fit()\nfit.summary()\n\nplt.scatter(records, y4, color='g')\nplt.scatter(records, fit.predict(X4))",
"Challenge 5",
"X_train, X_test, y_train, y_test = train_test_split(X4, y4, test_size=0.25)\n\ny_test.shape\n\nmodel = sm.OLS(y_train, X_train)\nfit = model.fit()\nfit.summary()\n\nrecords5 = range(23)\n\nplt.scatter(records5, y_test, color='g')\nplt.scatter(records5, fit.predict(X_test))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
aliakbars/uai-ai
|
scripts/tugas1b.ipynb
|
mit
|
[
"Kecerdasan Buatan\nTugas 1: Model Linear\nMekanisme\nAnda hanya diwajibkan untuk mengumpulkan file ini saja ke uploader yang disediakan di http://elearning2.uai.ac.id/. Ganti nama file ini saat pengumpulan menjadi tugas1_NIM.ipynb.\nKeterlambatan: Pengumpulan tugas yang melebihi tenggat yang telah ditentukan tidak akan diterima. Keterlambatan akan berakibat pada nilai nol untuk tugas ini.\nKolaborasi: Anda diperbolehkan untuk berdiskusi dengan teman Anda, tetapi dilarang keras menyalin kode maupun tulisan dari teman Anda.\nPetunjuk\nPackages yang Anda akan gunakan dalam mengerjakan tugas ini antara lain:\n\nkeras\nmatplotlib\nnumpy\npandas\npillow\nscipy\nseaborn\n\nAnda diperbolehkan (jika dirasa perlu) untuk mengimpor modul tambahan untuk tugas ini. Namun, seharusnya modul yang tersedia sudah cukup untuk memenuhi kebutuhan Anda. Untuk kode yang Anda ambil dari sumber lain, cantumkan URL menuju referensi tersebut jika diambil dari internet!\nPerhatikan poin untuk tiap soal! Semakin kecil poinnya, berarti kode yang diperlukan untuk menjawab soal tersebut seharusnya semakin sedikit!\nNilai akhir: XX/40\nDeskripsi Dataset\nPada tugas kali ini, Anda akan mencoba menggunakan metode machine learning untuk melakukan dua jenis prediksi: regresi dan klasifikasi.\nUntuk kasus regresi, Anda diminta untuk memprediksi jumlah penjualan berdasarkan uang yang dihabiskan pada media iklan yang digunakan. Terdapat tiga media iklan, yaitu TV, Radio dan Newspaper. Dengan detail atribut sebagai berikut:\n\nTV: biaya yang dihabiskan untuk iklan tayangan TV untuk setiap satu produk dalam sebuah pasar (dalam ribuan dollar)\nRadio: biaya yang dihabiskan untuk iklan di radio (dalam ribuan dollar)\nNewspaper: biaya yang dihabiskan untuk iklan di koran (dalam ribuan dollar)\nSales: penjualan dari setiap satuan produk pada suatu pasar (dalam ribuan widget)\n\nUntuk kasus klasifikasi, Anda akan menggunakan dataset Food-101 yang memiliki 101 kategori makanan dengan total 101.000 gambar makanan. Dataset untuk tugas ini diambil dari Food-101 (https://www.vision.ee.ethz.ch/datasets_extra/food-101/). Untuk versi yang lebih sederhana, Anda hanya akan membandingkan apakah gambar yang diberikan berupa sushi atau pizza. Anda akan melakukan klasifikasi menggunakan algoritma regresi logistik dan neural networks dalam tugas ini.\nMengimpor Modul dan Dataset",
"from __future__ import print_function, division # Gunakan print(...) dan bukan print ...\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport random\nimport requests\n\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.metrics import accuracy_score, confusion_matrix, mean_squared_error, r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\n%matplotlib inline\n\nRANDOM_STATE = 1337\nnp.random.seed(RANDOM_STATE)",
"1. Eksplorasi Awal Data - Advertising (6 poin)",
"df = pd.read_csv('https://github.com/aliakbars/uai-ai/raw/master/datasets/advertising.csv', index_col=0)",
"Soal 1.1.a (1 poin)\nLaporkan deskripsi dari Advertising dataset dengan menggunakan metode dari Pandas!\nSoal 1.1.b (2 poin)\nBerapa nilai sales paling rendah dan nilai sales paling tinggi dari data yang Anda miliki? Berapa ribu dollar uang yang dihabiskan untuk membayar biaya iklan di TV, radio, dan newspaper untuk produk tersebut?\nSoal 1.2 (3 poin)\nGambarkan scatter plot dari sales terhadap media iklan TV, radio, dan newspaper.\n2. Prediksi Penjualan Berdasarkan Biaya Media Iklan dengan Regresi Linear (19 poin)\nSoal 2.1 (4 poin)\nKita akan membuat simple linear regression dengan satu fitur. Dalam kasus ini, mari mencoba melihat hubungan antara sales dengan biaya untuk media iklan di TV.\nAmbil fitur dari kolom TV dan response dari kolom sales, kemudian buat sebuah model linear regression menggunakan pustaka scikit-learn dan latih model tersebut dengan data yang Anda miliki! Laporkan nilai bias dan koefisiennya. Lalu, jelaskan bagaimana intepretasi Anda terhadap koefisien dari model yang Anda miliki.\nPetunjuk: Lihat cara penggunaan pustakanya di sini.\nJawaban Anda di sini\nSoal 2.2.a (3 poin)\nMari kita lihat seberapa baik garis regresi yang dibuat dari model yang Anda miliki. Buatlah prediksi dari biaya TV yang paling minimum dan biaya TV yang paling maksimum! Gambarkan scatter plot dan garis regresi model Anda atas prediksi tersebut. Bagaimana garis tersebut mencocokkan data Anda?\nSoal 2.2.b (3 poin)\nCoba lakukan kembali regresi pada data tersebut, tetapi kali ini gunakan fungsi basis polinomial orde 3. Gambarkan kembali scatter plot dan fungsi regresinya.\nSoal 2.2.c (3 poin)\nSalah satu cara untuk memastikan bahwa model yang Anda hasilkan sudah cukup baik pada model regresi adalah dengan menghitung nilai mean squared error (MSE). Coba hitung nilai MSE untuk regresi dengan dan tanpa fungsi basis polinomial seperti yang Anda kerjakan pada bagian a dan b. Apa yang dapat Anda amati? Apakah nilainya sesuai dengan ekspektasi Anda?\nJawaban Anda di sini\nSoal 2.3.a (4 poin)\nSekarang kita akan melakukan Multiple Linear Regression. Buatlah sebuah model dengan menggunakan Linear Regression dari scikit-learn untuk fitur TV, radio, dan newspaper. Variabel dependen yang digunakan adalah sales. Keluarkan pula nilai bias dan nilai koefisien ketiga fitur tersebut. Sebelum itu, bagi dataset menjadi data latih dan data uji dengan proporsi data uji sebanyak 20%.\nSoal 2.3.b (2 poin)\nLakukan evaluasi model multiple linear regression yang Anda miliki dari data uji dengan menggunakan mean squared error.\n3. Eksplorasi Awal Data Food-101 (3 poin)\nPertama, kita akan memuat data menggunakan kode di bawah ini. X merupakan gambar yang telah diterjemahkan dalam bentuk tensor atau array multidimensi. Dimensi pertama menunjukkan jumlah datanya, dua dimensi berikutnya menunjukkan panjang dan lebar dari gambarnya, dan dimensi keempat merupakan channels (RGB). Di sisi lain, y adalah kelas dari masing-masing gambar yang diberikan dalam X sehingga X.shape[0] == y.shape[0].",
"def load_file(url):\n filename = url.split('/')[-1]\n with open(filename, 'wb') as f:\n resp = requests.get(url)\n f.write(resp.content)\n return np.load(filename)\n\nX = load_file('https://github.com/aliakbars/uai-ai/raw/master/datasets/food.npy')\ny = load_file('https://github.com/aliakbars/uai-ai/raw/master/datasets/food_labels.npy')\n\nX.shape",
"Soal 3.1 (1 poin)\nBerapa banyak gambar sushi dan pizza masing-masing dalam dataset ini?\nSoal 3.2 (2 poin)\nGambarkan satu contoh pizza (indeks 0-1000) dan satu contoh sushi (indeks 1001-2000) dari dataset yang digunakan.\nPetunjuk: Anda dapat menggunakan plt.imshow()\n4. Klasifikasi Gambar Pizza dan Sushi Menggunakan Logistic Regression (12 poin)\nSoal 4.1 (1 poin)\nBagi dataset Anda menjadi 70-30 untuk training-test sets.\nSoal 4.2.a (4 poin)\nBuat gambar yang berbentuk matriks pixel yang ada menjadi flat, lalu lakukan regresi logistik ke data yang telah Anda bagi tadi.\nSoal 4.2.b (2 poin)\nApa pendapat Anda tentang hasil klasifikasi tersebut? Apakah hasilnya sudah cukup baik?\nJawaban Anda di sini\nSoal 4.3 (5 poin)\nApa kesimpulan Anda dari eksperimen sejauh ini? Apa yang dapat dilakukan untuk memperbaiki kinerja model regresi logistik?\nJawaban Anda di sini\n5. Bonus: Klasifikasi Gambar Pizza dan Sushi Menggunakan Deep Learning (5 poin)\nCoba klasifikasikan data Food-101 untuk kedua jenis makanan diatas dengan menggunakan pustaka dari Keras untuk deep learning atau neural networks. Jangan lupa untuk melaporkan akurasi dan confusion matrix-nya. Adaptasi kode di atas dan lihat pula contoh kode di sini. Coba buat Convolutional Neural Network dan jalankan kodenya. Kode deep learning Anda hanya dinilai jika berhasil berjalan.\nPetunjuk: Anda mungkin perlu mengubah dimensi dari data latih dan data uji Anda menjadi $n \\times d \\times d \\times 1$ dengan 1 dimensi terakhir adalah channel hitam-putih. Nilainya bisa menjadi 3 jika kita menggunakan channel warna RGB.\nAnda mungkin perlu menggunakan representasi one-of-K untuk label yang digunakan. Anda dapat menggunakan keras.utils.to_categorical untuk melakukan hal ini. Silakan mencontoh kode untuk deep neural networks sederhana di sini atau dengan menggunakan convolutional neural networks di sini."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
rileyrustad/pdxapartmentfinder
|
analysis/Munge-Copy2.ipynb
|
mit
|
[
"import numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport json",
"Load the data from our JSON file.\nThe data is stored as a dictionary of dictionaries in the json file. We store it that way beacause it's easy to add data to the existing master data file. Also, I haven't figured out how to get it in a database yet.",
"with open('../pipeline/data/Day90ApartmentData.json') as f:\n my_dict1 = json.load(f)\n\n\nwith open('../pipeline/data/ProcessedDay90ApartmentData.json') as g:\n my_dict2 = json.load(g)\n\ndframe1 = DataFrame(my_dict1)\ndframe1 = dframe1.T\ndframe1 = dframe1[['content', 'laundry', 'price', 'dog', 'bed', \n'bath', 'feet', 'long', 'parking', 'lat', 'smoking', 'getphotos', \n'cat', 'hasmap', 'wheelchair', 'housingtype']]\ndframe1.head(50)\n\n\ndframe2 = DataFrame(my_dict2)\ndframe2 = dframe2.T\ndframe2 = dframe2[['content', 'laundry', 'price', 'dog', 'bed', \n'bath', 'feet', 'long', 'parking', 'lat', 'smoking', 'getphotos', \n'cat', 'hasmap', 'wheelchair', 'housingtype']]\ndframe2.describe()\n\ndframe = pd.get_dummies(dframe2, columns = ['laundry', 'parking', 'smoking', 'wheelchair', 'housingtype'])\n\npd.set_option('display.max_columns', 500)\ndframe\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n\tdframe.drop('price', axis = 1), dframe.price, test_size=0.33)\n\ndef listing_cleaner(entry):\n print entry\n \n\ndf = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],\n 'C': [1, 2, 3]})\ndf\n\npd.get_dummies(df, columns=['A','C'])\n\nlisting_cleaner(my_dict['5465197037'])\n\ntype(dframe['bath']['5399866740'])",
"Clean up the data a bit\nRight now the 'shared' and 'split' are included in number of bathrooms. If I were to convert that to a number I would consider a shared/split bathroom to be half or 0.5 of a bathroom.",
"dframe.bath = dframe.bath.replace('shared',0.5)\ndframe.bath = dframe.bath.replace('split',0.5)\ndframe.smoking = dframe.smoking.replace(np.nan, 'smoking')\ndframe.furnished = dframe.furnished.replace(np.nan,'not furnished')\ndframe.wheelchair = dframe.wheelchair.replace(np.nan, 'not wheelchair accessible')\n\ndframe.describe()\n\ndframe.bed.unique()\n\nfrom sklearn.preprocessing import Imputer, LabelEncoder\n\ndef meanimputer(column):\n imp = Imputer(missing_values='NaN', strategy='mean', axis=1)\n imp.fit(column)\n X = imp.transform(column)\n return X[0]\n\narr = np.array([np.nan, 'house', 'boat', 'houseboat', 'house', np.nan, 'house','houseboat'])\nprac_df = DataFrame()\nprac_df['arr'] = arr\nprac_df['arr']\nmodeimputer(prac_df['arr'])\n\n\ndef modeimputer(column):\n\n le = LabelEncoder()\n column = le.fit_transform(column)\n print le.classes_\n print type(le.classes_[0])\n print column\n nan = le.transform([np.nan])[0]\n print nan\n print type(column)\n column = list(column)\n for _,i in enumerate(column):\n if i == nan:\n column[_] = np.nan\n \n imp = Imputer(missing_values='NaN', strategy='most_frequent', axis=1)\n imp.fit(column)\n\n X = imp.transform(column)\n \n for _,i in enumerate(X[0]):\n if np.isnan(i):\n X[_] = 0\n X = X.astype(int)\n\n\n Y = le.inverse_transform(X)\n\n return Y\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.base import TransformerMixin\nclass ModeImputer(TransformerMixin):\n\n def __init__(self):\n \"\"\"Impute missing values.\n\n Columns of dtype object are imputed with the most frequent value \n in column.\n\n Columns of other types are imputed with mean of column.\n \n Credit:http://stackoverflow.com/questions/25239958/\n impute-categorical-missing-values-in-scikit-learn\n\n \"\"\"\n def fit(self, X, y=None):\n\n self.fill = pd.Series([X[c].value_counts().index[0]\n if X[c].dtype == np.dtype('O') else X[c].mean() for c in X],\n index=X.columns)\n\n return self\n\n def transform(self, X, y=None):\n return X.fillna(self.fill)\n\ndata = [\n ['a', 1, 2],\n ['b', 1, 1],\n ['b', 2, 2],\n [np.nan, np.nan, np.nan]\n]\n\nX = pd.DataFrame(data)\nxt = ModeImputer().fit_transform(X)\n\nprint('before...')\nprint(X)\nprint('after...')\nprint(xt)\n\ndframe = ModeImputer().fit_transform(dframe)\n\ndframe.head()\n\ndframe.describe(include = 'all')\n\ndframe.bed.mean()\n\ndframe.parking.unique()\n\nu_dframe = DataFrame()\ndframe['bath'] = meanimputer(dframe['bath'])\ndframe['bed'] = meanimputer(dframe['bed'])\ndframe['feet'] = meanimputer(dframe['feet'])\ndframe['lat'] = meanimputer(dframe['lat'])\ndframe['long'] = meanimputer(dframe['long'])\n\n\n\n\n\n\n\n\ndframe.head()\n\ndframe.describe(include='all')\n\ndata = dframe[dframe.lat > 45.4][dframe.lat < 45.6][dframe.long < -122.0][dframe.long > -123.5]\nplt.figure(figsize=(15,10))\nplt.scatter(data = data, x = 'long',y='lat')\n",
"It looks like Portland!!!\nLet's cluster the data. Start by creating a list of [['lat','long'], ...]",
"XYdf = dframe[dframe.lat > 45.4][dframe.lat < 45.6][dframe.long < -122.0][dframe.long > -123.5]\ndata = [[XYdf['lat'][i],XYdf['long'][i]] for i in XYdf.index]\n\n",
"We'll use K Means Clustering because that's the clustering method I recently learned in class! There may be others that work better, but this is the tool that I know",
"from sklearn.cluster import KMeans\nkm = KMeans(n_clusters=40)\nkm.fit(data)\nneighborhoods = km.cluster_centers_\n\n\n%pylab inline\nfigure(1,figsize=(20,12))\nplot([row[1] for row in data],[row[0] for row in data],'b.')\nfor i in km.cluster_centers_: \n plot(i[1],i[0], 'g*',ms=25)\n'''Note to Riley: come back and make it look pretty'''",
"We chose our neighborhoods!\nI've found that every once in a while the centers end up in different points, but are fairly consistant. Now let's process our data points and figure out where the closest neighborhood center is to it!",
"neighborhoods = neighborhoods.tolist()\nfor i in enumerate(neighborhoods):\n i[1].append(i[0])\nprint neighborhoods",
"Create a function that will label each point with a number coresponding to it's neighborhood",
"def clusterer(X, Y,neighborhoods):\n neighbors = []\n for i in neighborhoods:\n distance = ((i[0]-X)**2 + (i[1]-Y)**2)\n neighbors.append(distance)\n closest = min(neighbors)\n return neighbors.index(closest)\n\nneighborhoodlist = []\nfor i in dframe.index:\n neighborhoodlist.append(clusterer(dframe['lat'][i],dframe['long'][i],neighborhoods))\ndframe['neighborhood'] = neighborhoodlist\n\n\ndframe",
"Here's the new Part. We're breaking out the neighborhood values into their own columns. Now the algorithms can read them as categorical data rather than continuous data.",
"from sklearn import preprocessing\ndef CategoricalToBinary(dframe,column_name):\n le = preprocessing.LabelEncoder()\n listy = le.fit_transform(dframe[column_name])\n dframe[column_name] = listy\n unique = dframe[column_name].unique()\n serieslist = [list() for _ in xrange(len(unique))]\n \n \n for column, _ in enumerate(serieslist):\n for i, item in enumerate(dframe[column_name]):\n if item == column:\n serieslist[column].append(1)\n else:\n serieslist[column].append(0)\n dframe[column_name+str(column)] = serieslist[column]\n\n \n return dframe\n\n\n\n\n\npd.set_option('max_columns', 100)\ndframe = CategoricalToBinary(dframe,'housingtype')\ndframe = CategoricalToBinary(dframe,'parking')\ndframe = CategoricalToBinary(dframe,'laundry')\ndframe = CategoricalToBinary(dframe,'smoking')\ndframe = CategoricalToBinary(dframe,'wheelchair')\ndframe = CategoricalToBinary(dframe,'neighborhood')\ndframe\n\n\ndframe = dframe.drop('date',1)\ndframe = dframe.drop('housingtype',1)\ndframe = dframe.drop('parking',1)\ndframe = dframe.drop('laundry',1)\ndframe = dframe.drop('smoking',1)\ndframe = dframe.drop('wheelchair',1)\ndframe = dframe.drop('neighborhood',1)\ndframe = dframe.drop('time',1)\n\n\n\n\ncolumns=list(dframe.columns)\n\n\nfrom __future__ import division\nprint len(dframe)\ndf2 = dframe[dframe.price < 10000][columns].dropna()\nprint len(df2)\nprint len(df2)/len(dframe)\n\nprice = df2[['price']].values\ncolumns.pop(columns.index('price'))\nfeatures = df2[columns].values\n\nfrom sklearn.cross_validation import train_test_split\nfeatures_train, features_test, price_train, price_test = train_test_split(features, price, test_size=0.1, random_state=42)",
"Ok, lets put it through Decision Tree!\nWhat about Random Forest?",
"from sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import r2_score\nreg = RandomForestRegressor()\nreg = reg.fit(features_train, price_train)\n\n\nforest_pred = reg.predict(features_test)\nforest_pred = np.array([[item] for item in forest_pred])\n\nprint r2_score(forest_pred, price_test)\nplt.scatter(forest_pred,price_test)\n\n\ndf2['predictions'] = reg.predict(df2[columns])\n\ndf2['predictions_diff'] = df2['predictions']-df2['price']\n\nsd = np.std(df2['predictions_diff'])\nsns.kdeplot(df2['predictions_diff'][df2['predictions_diff']>-150][df2['predictions_diff']<150])\nsns.plt.xlim(-150,150)\n\ndata = df2[dframe.lat > 45.45][df2.lat < 45.6][df2.long < -122.4][df2.long > -122.8][df2['predictions_diff']>-150][df2['predictions_diff']<150]\nplt.figure(figsize=(15,10))\nplt.scatter(data = data, x = 'long',y='lat', c = 'predictions_diff',s=10,cmap='coolwarm')\n\ndframe\n\nprint np.mean([1,2,34,np.nan])\n\ndef averager(dframe):\n dframe = dframe.T\n dframe.dropna()\n averages = {}\n for listing in dframe:\n try:\n key = str(dframe[listing]['bed'])+','+str(dframe[listing]['bath'])+','+str(dframe[listing]['neighborhood'])+','+str(dframe[listing]['feet']-dframe[listing]['feet']%50)\n if key not in averages:\n averages[key] = {'average_list':[dframe[listing]['price']], 'average':0}\n elif key in averages:\n averages[key]['average_list'].append(dframe[listing]['price'])\n except TypeError:\n continue\n for entry in averages:\n averages[entry]['average'] = np.mean(averages[entry]['average_list'])\n return averages\n \n \n \n \n \n\naverages = averager(dframe)\nprint averages\n\ndframe['averages']= averages[str(dframe['bed'])+','+str(dframe['bath'])+','+str(dframe['neighborhood'])+','+str(dframe['feet']-dframe['feet']%50)]\n\ndframe.T\n",
"Wow! up to .87! That's our best yet! What if we add more trees???",
"reg = RandomForestRegressor(n_estimators = 100)\nreg = reg.fit(features_train, price_train)\n\nforest_pred = reg.predict(features_test)\nforest_pred = np.array([[item] for item in forest_pred])\n\nprint r2_score(forest_pred, price_test)\nprint plt.scatter(pred,price_test)\n\nfrom sklearn.tree import DecisionTreeRegressor\nreg = DecisionTreeRegressor(max_depth = 5)\nreg.fit(features_train, price_train)\nprint len(features_train[0])\ncolumns = [str(x) for x in columns]\nprint columns\nfrom sklearn.tree import export_graphviz\nexport_graphviz(reg,feature_names=columns)",
"Up to .88!\nSo what is our goal now? I'd like to see if adjusting the number of neighborhoods increases the accuracy. same for the affect with the number of trees",
"def neighborhood_optimizer(dframe,neighborhood_number_range, counter_num):\n XYdf = dframe[dframe.lat > 45.4][dframe.lat < 45.6][dframe.long < -122.0][dframe.long > -123.5]\n data = [[XYdf['lat'][i],XYdf['long'][i]] for i in XYdf.index]\n r2_dict = []\n for i in neighborhood_number_range:\n counter = counter_num\n average_accuracy_list = []\n while counter > 0:\n km = KMeans(n_clusters=i)\n km.fit(data)\n neighborhoods = km.cluster_centers_\n neighborhoods = neighborhoods.tolist()\n for x in enumerate(neighborhoods):\n x[1].append(x[0])\n neighborhoodlist = []\n for z in dframe.index:\n neighborhoodlist.append(clusterer(dframe['lat'][z],dframe['long'][z],neighborhoods))\n dframecopy = dframe.copy()\n dframecopy['neighborhood'] = Series((neighborhoodlist), index=dframe.index)\n df2 = dframecopy[dframe.price < 10000][['bath','bed','feet','dog','cat','content','getphotos', 'hasmap', 'price','neighborhood']].dropna()\n features = df2[['bath','bed','feet','dog','cat','content','getphotos', 'hasmap', 'neighborhood']].values\n price = df2[['price']].values\n features_train, features_test, price_train, price_test = train_test_split(features, price, test_size=0.1)\n reg = RandomForestRegressor()\n reg = reg.fit(features_train, price_train)\n forest_pred = reg.predict(features_test)\n forest_pred = np.array([[item] for item in forest_pred])\n counter -= 1\n average_accuracy_list.append(r2_score(forest_pred, price_test))\n total = 0\n for entry in average_accuracy_list:\n total += entry\n r2_accuracy = total/len(average_accuracy_list)\n r2_dict.append((i,r2_accuracy))\n print r2_dict\n return r2_dict\n\nneighborhood_number_range = [i for _,i in enumerate(range(2,31,2))]\nneighborhood_number_range\n\nr2_dict = neighborhood_optimizer(dframe,neighborhood_number_range,10)\n\nr2_dict[:][0]\n\nplt.scatter([x[0] for x in r2_dict],[x[1] for x in r2_dict])",
"Looks like the optimum is right around 10 or 11, and then starts to drop off. Let's get a little more granular and look at a smaller range",
"neighborhood_number_range = [i for _,i in enumerate(range(7,15))]\nneighborhood_number_range\n\nr2_dict = neighborhood_optimizer(dframe,neighborhood_number_range,10)\n\nprint r2_dict\nplt.scatter([x[0] for x in r2_dict],[x[1] for x in r2_dict])",
"Trying a few times, it looks like 10, 11 and 12 get the best results at ~.85. Of course, we'll need to redo some of these optomizations after we properly process our data. Hopefully we'll see some more consistency then too.",
"r2_dict = neighborhood_optimizer(dframe,[10,11,12],25)",
"Note #1 to Riley: (From Last time) Perhaps look into another regressor? see if there's one that's inherantly better at this kind of thing.\nNote #2 to Riley: Figure out how to process data so that you don't have to drop null values\nNote #3 to Riley: convert categorical data into binary\nNote #4 to Riley: I wonder if increasing the number of neighborhoods would become more accurate as we collect more data? like you could create a bunch of little accurate models instead of a bunch of bigger ones.\nLearned: If you plan on using Decision Tree/Random Forest from SKLearn, make sure you collect your discrete variables in separate columns and make them binary yes or no(0 or 1)."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
flohorovicic/pynoddy
|
docs/notebooks/Gippsland-topology.ipynb
|
gpl-2.0
|
[
"Topology analysis for the Gippsland Basin\nIn this notebook, we are testing the combination of the pynoddy Experiment class with the topology analysis and apply it to the Gippsland Basin model. This notebook will evaluate three aspects:\n\nApplication of the Experiment class to set-up a topology experiment\nRunning the topology analysis for a range of models\nEvaluating and visualising results\n\nFurther steps might include the parallelisation of the random model generation, let's see where we get...",
"from IPython.core.display import HTML\ncss_file = 'pynoddy.css'\nHTML(open(css_file, \"r\").read())\n\n%matplotlib inline\n\n# here the usual imports. If any of the imports fails, make sure that pynoddy is installed\n# properly, ideally with 'python setup.py develop' or 'python setup.py install'\nimport sys, os\nimport matplotlib.pyplot as plt\nimport numpy as np\n# adjust some settings for matplotlib\nfrom matplotlib import rcParams\n# print rcParams\nrcParams['font.size'] = 15\n# determine path of repository to set paths corretly below\nrepo_path = os.path.realpath('../..')\nimport pynoddy.history\nimport pynoddy.experiment\nrcParams.update({'font.size': 20})",
"Creating an experiment object\nFirst, we start with generating a pynoddy experiment object. The experiment class inherits all the methods from the base pynoddy.history class and we can directly import the Gippsland Basin model that we want to analyse into the object:",
"reload(pynoddy.experiment)\n# the model itself is now part of the repository, in the examples directory:\nhistory_file = os.path.join(repo_path, \"examples/GBasin_Ve1_V4_b.his\")\ngipps_topo_ex = pynoddy.experiment.Experiment(history = history_file) ",
"Some basic information about the model can be obtained with:",
"gipps_topo_ex.info()",
"We can have a quick look at the model in a section view (note that Noddy is now executed in the background when required - and the output automatically generated in the required resolution):",
"gipps_topo_ex.plot_section('y')",
"The base plot is not very useful - but we can create a section plot with a define vertical exaggeration (keyword ve) and plot the colorbar in horizontal orientation:",
"# gipps_topo_ex.determine_model_stratigraphy()\ngipps_topo_ex.plot_section('x', ve = 5, position = 'centre',\n cmap = 'YlOrRd',\n title = '',\n colorbar = False)\ngipps_topo_ex.plot_section('y', position = 100, ve = 5.,\n cmap = 'YlOrRd',\n title = '',\n colorbar_orientation = 'horizontal')",
"Note: The names of the model stratigraphy (colorbar labels) are unfortunately not defined correctly in the input file - we need to fix that, then we should get useful labels, as well!\nLoading parameters ranges from file\nWe now need to define the parameter ranges. This step can either be done through explicit definition in the notebook (see the previous notebook on the Experiment class), or a list of parameters and defined ranges plus statistics can be read in from a csv file. This enables the convenient parameter definition in a spreadsheed (for example through Excel).\nIn order to be read in correctly, the header should contain the labels:\n\n'event' : event id\n'parameter' : Noddy parameter ('Dip', 'Dip Direction', etc.)\n'min' : minimum value\n'max' : maximum value\n'initial' : initial value\n\nIn addition, it is possible to define PDF type and parameters. For now, the following settings are supported:\n\n'type' = 'normal' \n'stdev' : standard deviation\n'mean' : mean value (default: 'initial' value)\n\nWe can read in the parameters simply with:",
"reload(pynoddy.experiment)\n# the model itself is now part of the repository, in the examples directory:\nhistory_file = os.path.join(repo_path, \"examples/GBasin_Ve1_V4_b.his\")\ngipps_topo_ex = pynoddy.experiment.Experiment(history = history_file) \ngipps_topo_ex.load_parameter_file(os.path.join(repo_path, \"examples/gipps_params.csv\"))",
"Generating random perturbations of the model\nBefore generating random prerturbations, we should now store the base version so that we can always revert to it at a later stage:",
"gipps_topo_ex.freeze()",
"For a reproducible experiment, we can also set the random seed:",
"gipps_topo_ex.set_random_seed(12345)",
"And now, let's perturb the model:",
"gipps_topo_ex.random_perturbation()",
"Let's see what happened: we can compare the new model to the base model as we stored it before:",
"fig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(211)\nax2 = fig.add_subplot(212)\ngipps_topo_ex.plot_section(ax = ax1, direction = 'x', model_type = \"base\", \n colorbar = False, title = \"\", ve = 5.)\ngipps_topo_ex.plot_section(ax = ax2, direction = 'x', colorbar = False, \n title = \"\", ve = 5.)\n\nb1 = gipps_topo_ex.get_section('x', resolution = 50, model_type = 'base')\n# b1.plot_section(direction = 'x', colorbar = False, title = \"\", ve = 5.)\nb2 = gipps_topo_ex.get_section('x', resolution = 50, model_type = 'current')\ndiff = b1 - b2\n\nprint b1\n\nb1.shape",
"...and another perturbation:",
"gipps_topo_ex.random_perturbation()\n\nfig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(311)\nax2 = fig.add_subplot(312)\nax3 = fig.add_subplot(313)\ngipps_topo_ex.plot_section(ax = ax1, direction = 'x', model_type = \"base\", \n colorbar = False, title = \"\", ve = 5.)\ngipps_topo_ex.plot_section(ax = ax2, direction = 'x', colorbar = False, \n title = \"\", ve = 5.)\n# plot difference\n\n\nfig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(211)\nax2 = fig.add_subplot(212)\ngipps_topo_ex.plot_section(ax = ax1, direction = 'x', model_type = \"base\", \n colorbar = False, title = \"\", ve = 5.)\ngipps_topo_ex.plot_section(ax = ax2, direction = 'x', colorbar = False, \n title = \"\", ve = 5.)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
pligor/predicting-future-product-prices
|
dfa/notebook/dfa_simulations.ipynb
|
agpl-3.0
|
[
"DFA robustness simulations\nDominik Krzemiński",
"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport scipy.signal as ss\nimport scipy.stats as st\n\nplt.style.use('ggplot')\n%matplotlib inline",
"Detrended Fluctuation Analysis is a method which allows to measure self-affinity properties of time series. It is claimed to be very roboust method for Hurst exponent estimation even for nonstationary signals. It consists of three main steps:\n1) Cumulative sum calculation;\n2) Detrending time series in windows $\\Delta n$ equally distributed on logarithmic scale;\n3) Mean squared residuals $F$ calculation on a set of windows $\\Delta n_i$;\nFinally, to determine DFA exponent one need to fit a line to so-called fluctuation function $F(\\Delta n)$. A slope of the line is our Hurst exponent estimator.\nIn the following simulations we test robustness of the method to short and high amplitude artifacs and signal slicing. We use self-implemented version of DFA algorithm, which may be slower but for testing reasons is more transparent and thus easier to understand.",
"def calc_rms(x, scale):\n \"\"\"\n Root Mean Square in windows with linear detrending.\n \n Args:\n -----\n *x* : numpy.array\n one dimensional data vector\n *scale* : int\n length of the window in which RMS will be calculaed\n Returns:\n --------\n *rms* : numpy.array\n RMS data in each window with length len(x)//scale\n \"\"\"\n # making an array with data divided in windows\n shape = (x.shape[0]//scale, scale)\n X = np.lib.stride_tricks.as_strided(x,shape=shape)\n # vector of x-axis points to regression\n scale_ax = np.arange(scale)\n rms = np.zeros(X.shape[0])\n for e, xcut in enumerate(X):\n coeff = np.polyfit(scale_ax, xcut, 1)\n xfit = np.polyval(coeff, scale_ax)\n # detrending and computing RMS of each window\n rms[e] = np.sqrt(np.mean((xcut-xfit)**2))\n return rms\n\ndef dfa(x, scale_lim=[5,9], scale_dens=0.25, show=False):\n \"\"\"\n Detrended Fluctuation Analysis - algorithm with measures power law\n scaling of the given signal *x*.\n More details about algorithm can be found e.g. here:\n Hardstone, R. et al. Detrended fluctuation analysis: A scale-free \n view on neuronal oscillations, (2012).\n \n Args:\n -----\n *x* : numpy.array\n one dimensional data vector\n *scale_lim* = [5,9] : list of lenght 2 \n boundaries of the scale where scale means windows in which RMS\n is calculated. Numbers from list are indexes of 2 to the power\n of range.\n *scale_dens* = 0.25 : float\n density of scale divisions\n *show* = False\n if True it shows matplotlib picture\n Returns:\n --------\n *scales* : numpy.array\n vector of scales\n *fluct* : numpy.array\n fluctuation function\n *alpha* : float\n DFA exponent\n \"\"\"\n # cumulative sum of data with substracted offset\n y = np.cumsum(x - np.mean(x))\n scales = (2**np.arange(scale_lim[0], scale_lim[1], scale_dens)).astype(np.int)\n fluct = np.zeros(len(scales))\n # computing RMS for each window\n for e, sc in enumerate(scales):\n fluct[e] = np.mean(np.sqrt(calc_rms(y, sc)**2))\n # fitting a line to rms data\n coeff = np.polyfit(np.log2(scales), np.log2(fluct), 1)\n if show:\n fluctfit = 2**np.polyval(coeff,np.log2(scales))\n plt.loglog(scales, fluct, 'bo')\n plt.loglog(scales, fluctfit, 'r', label=r'$\\alpha$ = %0.2f'%coeff[0])\n plt.title('DFA')\n plt.xlabel(r'$\\log_{10}$(time window)')\n plt.ylabel(r'$\\log_{10}$<F(t)>')\n plt.legend()\n plt.show()\n return scales, fluct, coeff[0]\n\n\ndef power_law_noise(n, alpha, var=1):\n '''\n Generale power law noise. \n \n Args:\n -----\n *n* : int\n number of data points\n *alpha* : float\n DFA exponent\n *var* = 1 : float\n variance\n Returns:\n --------\n *x* : numpy.array\n generated noisy data with exponent *alpha*\n\n Based on:\n N. Jeremy Kasdin, Discrete simulation of power law noise (for\n oscillator stability evaluation)\n '''\n # computing standard deviation from variance\n stdev = np.sqrt(np.abs(var))\n beta = 2*alpha-1\n hfa = np.zeros(2*n)\n hfa[0] = 1\n for i in range(1,n):\n hfa[i] = hfa[i-1] * (0.5*beta + (i-1))/i\n # sample white noise\n wfa = np.hstack((-stdev +2*stdev * np.random.rand(n), np.zeros(n)))\n fh = np.fft.fft(hfa)\n fw = np.fft.fft(wfa)\n fh = fh[1:n+1]\n fw = fw[1:n+1]\n ftot = fh * fw\n # matching the conventions of the Numerical Recipes\n ftot = np.hstack((ftot, np.zeros(n-1)))\n x = np.fft.ifft(ftot) \n return np.real(x[:n])\n",
"Firstly let's just test our implemetation on randomly generated power-law data.",
"n = 2**12\ndfa_alpha = 0.7\nx = power_law_noise(n, dfa_alpha)\nscales, fluct, esta = dfa(x, show=1)\nprint(\"DFA exponent {}\".format(esta))",
"We got acceptable estimation of the initial value of $\\alpha$=0.7.\nSimulation 1: artifacts\nNow we are ready to perform the first simulation. In biomedical signals (EEG in particular) many high amplitude artifacts appear. Those can be caused by body movements, eyes blinking or just by recording device. Typically, in most of the studies researchers inspect signals visually and remove parts of them when neccessary. Although some more sophisticated methods exist, this is still the most common choice giving the best efficiency. However, because DFA is considered to be valid also for non-stationary time series we could take an adventage of that property. Beforehand let's test it if it is true.\nFirst of all, we need some model of signal artifacs.",
"mr = ss.morlet(100, w=0.9, s=0.3)\nplt.plot(mr.real)",
"Artifacts look very often as a big unexpected peak with much higher amplitude than the rest of the signal. I decided to model it as a Morlet wavelet with low frequency. I multiply part of the signal by that shape with some arbitrarly big amplitude.\nThe picture below shows an example of signal with artifact.",
"x = power_law_noise(n, dfa_alpha)\nplt.figure(figsize=(9,7))\nplt.subplot(211)\nplt.plot(x)\nplt.title(\"Original signal\")\nplt.ylim([-2.3,2.3])\nncut = 500\nidx = 400\nmr = ss.morlet(ncut, w=1, s=0.3)\nx[idx:idx+ncut] *= 10*mr.real\nplt.subplot(212)\nplt.plot(x)\nplt.ylim([-2.3,2.3])\nplt.xlabel('time')\nplt.title(\"Signal with artifact\")\nscales, fluct, esta = dfa(x)\nprint(\"DFA exponent {}\".format(esta))",
"Now we perform bootstrapping, so in principle repeat such an operation Nrep times adding artifacts in random places with random amplitudes and lengths.",
"Nrep = 1000 # how many resamplings\nx_down, x_top = 400, 3500 # range of artifacts beginnings\nsig_amp, mu_amp = 3.5, 10 # amplitude parameters (to random Gauss generator)\nsig_ncut, mu_ncut = 100, 500 # length of the artifact\n\ndfavec = np.zeros(Nrep)\nfor i in range(Nrep):\n if i%10==0: print(i)#, end=' ')\n x = power_law_noise(n, dfa_alpha)\n idx = np.random.randint(x_down, x_top)\n ncut = int(np.random.randn()*sig_ncut+mu_ncut)\n mr = ss.morlet(ncut, w=np.random.randn()*0.1+1, s=np.random.randn()*0.1+0.3)\n amp = np.random.randn()*sig_amp+mu_amp\n if idx+ncut-x.shape[0] > 0: idx = x.shape[0]-ncut-1 # checks if idxs are in range of x\n x[idx:idx+ncut] *= amp*mr.real\n scales, fluct, estalpha = dfa(x)\n dfavec[i] = estalpha",
"As a result we get a histogram with confidence level values marked by red dashed lines and actual value marked as a purple line. We see that we cannot reject a null-hypothesis that artifacts (those generated as above) don't have any impact on DFA exponent estimation.",
"alpha = 0.05\nv1 = st.scoreatpercentile(dfavec, 0.5*alpha*100)\nv2 = st.scoreatpercentile(dfavec, 100-0.5*alpha*100)\nplt.figure(figsize=(9,6))\nplt.hist(dfavec, color='#57aefc')\nplt.axvline(v1, color='r', linestyle='--')\nplt.axvline(v2, color='r', linestyle='--')\nplt.axvline(dfa_alpha, color='m')\nplt.ylabel('Counts')\nplt.xlabel('DFA-exp')\nplt.title('Histogram - artifacts')\nplt.show()",
"Simulation 2: slicing\nIn the second simulation we are going to check what happens if we slice the signal and join two pieces together. Does it affect DFA value?\nAs it happened before firstly we consider only signle case.",
"n = 2**13\nx = power_law_noise(n, dfa_alpha)\nplt.figure(figsize=(9,7))\nplt.subplot(211)\nplt.plot(x)\nplt.title(\"Original signal\")\nplt.subplot(212)\nidx = 1400\ngap_width = 400\nx_c = np.concatenate((x[:idx],x[idx+gap_width:]))\nplt.plot(x_c)\nplt.xlabel('time')\nplt.title(\"Sliced signal\")\nscales, fluct, estaalpha = dfa(x_c)\nprint(\"DFA exponent {}\".format(estaalpha))",
"And now we test it by bootstrapping.",
"Nrep = 1000 # how many resamplings\nx_down, x_top = int(0.1*n), int(0.9*n) # range of slice\nsig_gw, mu_gw = 100, 300 # gap width\n\ngap_width = 200\n\ndfavec = np.zeros(Nrep)\nfor i in range(Nrep):\n if i%10==0: print(i, end=\" \")\n x = power_law_noise(n, dfa_alpha)\n idx = np.random.randint(x_down, x_top)\n gap_width = int(np.random.randn()*sig_gw+mu_gw)\n x_c = np.concatenate((x[:idx],x[idx+gap_width:]))\n scales, fluct, estalpha = dfa(x)\n dfavec[i] = estalpha",
"Once again the initial value is in between confidence intervals so we can infer that slicing has no effect on DFA estimation.",
"alpha = 0.05\nv1 = st.scoreatpercentile(dfavec, 0.5*alpha*100)\nv2 = st.scoreatpercentile(dfavec, 100-0.5*alpha*100)\nplt.figure(figsize=(9, 6))\nplt.hist(dfavec, color='#57aefc')\nplt.axvline(v1, color='r', linestyle='--')\nplt.axvline(v2, color='r', linestyle='--')\nplt.axvline(dfa_alpha, color='m')\nplt.ylabel('Counts')\nplt.xlabel('DFA-exp')\nplt.title('Histogram - slicing')\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
shareactorIO/pipeline
|
oreilly.ml/high-performance-tensorflow/notebooks/05_Debug_Model.ipynb
|
apache-2.0
|
[
"Debug Models Using tfdbg\nOpen a Terminal through Jupyter Notebook\n(Menu Bar -> Terminal -> New Terminal)\n\nRun the Next Cell to Display the Code\nFind the DebugWrapper around the tf.Session\nsess = tf.Session(config=config)\nsess = tf_debug.LocalCLIDebugWrapperSession(sess)",
"%%bash\n\ncat /root/src/main/python/debug/debug_model_cpu.py",
"Run the following in the Terminal (CPU):\npython /root/src/main/python/debug/debug_model_cpu.py",
"%%bash\n\ncat /root/src/main/python/debug/debug_model_gpu.py",
"Run the following in the Terminal (GPU):\npython /root/src/main/python/debug/debug_model_gpu.py"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
superliaoyong/plist-forsource
|
python第一课课件.ipynb
|
apache-2.0
|
[
"人生苦短,我用python\n\npython课程\n课表\n一、 python基础 - 变量与数据类型,及常见数据类型的用法\n二、 python基础 - 条件、循环、函数、类\n三、 python爬虫 - python爬虫并用Mysql数据库存储\n四、 pandas通览 - 用pandas做数据处理与分析\n五、 实战 - 泰坦尼克幸存者预测\n\n学完本课程之后,你会:\n1、 掌握基本的python语法,并编写简单的python程序\n2、 可以阅读别人写的python代码\n3、 编写简单的爬虫\n4、 进行基本的数据分析\n\n机器学习需要:\n1、 数学理论知识 (统计、线性代数、微积分 。。。)\n2、 编程能力(python)\n\npython的特性\n1、python语法简单,容易理解,容易学习\n2、跨平台,可在windows/linux/mac os等系统上运行\n3、可以做网站、爬虫、大数据处理、机器学习\n4、拥有强大、丰富的第三方库 numpy、pandas ...\n\n最简单的开始",
"print('hello, \"world')\n\nprint(\"hello, 'world\")\n\nimport this",
"从计算器开始",
"5 + 100\n\n100 / 10\n\n100 * 10\n\n100 - 99\n\n10 % 3\n\n10 ** 3\n\n10 ** (1/3)\n\nimport math\n\nmath.pi\n\nmath.sin(math.pi/2)\n\nmath.floor(9.23432)\n\nmath.ceil(9.234)",
"应用题:\n1、小姐姐去买水果,苹果5元一斤,葡萄15元一斤,现在小姐姐买了2斤苹果,1.5斤葡萄,问,小姐姐买这两种水果分别花了多少钱?总共花了多少钱?",
"# 苹果的花费\nprint(5*2)\n\n# 葡萄的花费\nprint(15*1.5)\n\n# 总花费\nprint((5*2) + (15*1.5))",
"三个问题:\n1、如果脱离了题干和注释,我们无法理解 5 * 2 是什么意思\n2、当计算总价的时候,重复计算了苹果和葡萄的花费\n3、输出是一个数字,表达不清晰",
"apple_price = 5\napple_weight = 2\napple_cost = apple_price * apple_weight\n\ngrape_price = 15\ngrape_weight = 1.5\ngrape_cost = grape_price * grape_weight\n\ntotal_cost = apple_cost + grape_cost\n\nprint(apple_cost, total_cost)",
"增强的格式化字符串函数 format",
"\"苹果的花费为:{};葡萄的花费为:{};总花费为:{}\".format(apple_cost, grape_cost, total_cost)",
"变量: 代表某个值得名称",
"number + 2",
"语法糖",
"a = 10 \nb = 20\n\na, b = b, a\n\nprint(\"a is {}, b is {}\".format(a, b))",
"命名规范\n1、标识符的第一个字符必须是字母表中的字母(大写或小写)或者一个下划线\n2、标识符名称的其他部分可以由字母(大写或小写)、下划线(‘_’)或数字(0-9)组成。\n3、标识符名称是对大小写敏感的。",
"n = 10\nN = 10\n\n# 错误: 首字符为数字\n3thidnf = 10 \n\n# 字母中间有空格,为非法字符\nmy name = 100\n\n# -是变量命名中的非法字符\nmy-name = 10\n\nround(100/3, 3)",
"代码规范建议\n\n1、不要使用单字符\n2、变量名能清晰表达变量的意思\n3、合理使用字母中间下划线\n\n变量类型\n1、字符串 str\n2、数字 int float complex ..\n3、列表 list\n4、元组 tuple\n5、字典 dict\n\n数值类型",
"number = 10\nnumber = number + 10\nnumber += 10\n\nnumber -= 10\nnumber *= 10\nnumber /= 10\n\nimport math\n\n# 乘方、开放\nmath.pow(3, 10)\n\n# 推荐是用\n3 ** 10\n\nmath.floor(2.23242)\n\nmath.ceil(2.234234)\n\n# 度的转换\nmath.radians(180)\n\nmath.sin(math.pi/2)\n\nmin(10, 12, 234, 100, 1)\n\nmax(10, 12, 234, 100, 1)\n\nsum([10, 12, 234, 100, 1])\n\ndivmod(10, 3)",
"bool型",
"True, False\n\nTrue == 1\n\nFalse == 0\n\n# 不建议这样写,没有意义, 有一个特例,后面会讲到\nTrue + 10\n\n100 > 10",
"bool类型,运算: 与运算、或运算、非运算",
"# 与运算,同为真则为真\nTrue and False\n\n# 或运算, 只要一个为真则为真\nTrue or False\n\n# 非运算,取反\nnot False",
"| 操作符 | 解释 |\n| --- | --- |\n| < | 小于 |\n| <= | 小于等于 |\n| > | 大于 |\n| >= | 大于等于 |\n| == | 等于 |\n| != | 不等于 |\n| is | 是相同对象 |\n字符串",
"# 字符串可以用双引号,也可以用单引号。通过单、双引号的恰当使用,可以避免不必要的字符转义(escape),也就是说,可以避免使用 \\ (转义字符)\n\nline = \"hello world\"\nline = \"hello world\\\"\"\nline = 'hello \\'world'\n\n# 字符串的加法操作\nline_1 = \"ni hao, \"\nline_2 = 'xiaojiejie'\nline_1 + line_2\n\n# 字符串的乘法操作\nline = 'nihao '\nline * 10\n\n# 字符串是不可变类型的变量\nline\n\nline = 'buhao '\nline\n\n# 返回字符串的长度\nlen(line)\n\nline = 'nihao '\nline_copy = line\n# id函数,返回一个身份识别符,可以理解为一个变量的内存地址\nid(line), id(line_copy)\n\nline = 'buhao '\nid(line), id(line_copy)\n\n23 + 10",
"markdown中表格的写法\n| 序号 | 名称 | 年龄 |\n| --- | :---: | --- |\n| 1 | wong | 18 |\n| 2 | sun xing zhe | 500 |\n切片",
"line = 'huan ying da jia lai wan men shang ke'",
"取前十个字符",
"line[0:10]\n\nline[0:20:3]",
"取后十个字符",
"line[-10:]",
"翻转字符",
"line[::-1]",
"单字符\n单字符是不可以进行赋值的",
"line[-1] = 'E'\n\nline.capitalize()\n\nline = \"ASDFASDFEWFSDF\"\nline.capitalize()\n\n# 居中\nline.center(20)\n\n# 计数\nline.count('Z')",
"字符串首尾判断",
"line.endswith('FEWFSDD')\n\nline.startswith('ASDFA')\n\nline\n\nline.find('A', 2)\n\n# 当字符不存在时,报错\nline.index('Z')\n\nline = 'Aslkdfjsldkf'\n\nline.upper()\n\nline.lower()\n\nline.istitle()\n\nline.isupper()\n\nline.islower()\n\nline = ' lskdas k \\n\\t '\n\nline.strip()\n\nline.rstrip()\n\nline.lstrip()\n\nline = 'Aslkdfjsldkf'\n\nline.swapcase()",
"【重点】上面我们用到的所有字符串函数,都是为我们生成了一个新的字符串,原有的字符串是不变的",
"line = \"ni hao\"\nid(line)\n\nnew_line = line.upper()\nid(line), id(new_line)",
"列表",
"# 空列表\nvaribals = []\nvaribals = list()",
"可以容纳任意类型的对象,任意数量的对象 【重点】列表是可变类型的",
"varibals = [1, 2, 3, 'ni hao', 'hello, python', [], [100, 100]]\n\nvaribals = []\nvaribals.append(1)\nvaribals.append(2)\nvaribals.append('ni hao')\nvaribals\n\nvaribals[0] = 10\nvaribals",
"python是一种动态类型的语言,一个变量是什么类型,要看程序在运行过程中变量所代表的值是什么",
"var = 10\ntype(var)\nvar = 'str'\ntype(var)",
"切片",
"varibals[-2:]\n\nvaribals + [1,23]\n\nvaribals * 4",
"序列 列表是一种容器型的序列;字符串是一种扁平型的序列",
"len(varibals)\n\n# 没有返回值,而是修改了我们列表对象本身\nvaribals.append(1)\nvaribals\n\n# 清空\nvaribals.clear()\n\nvaribals = [1,12,23,4234, [1,2]]\n\nnew_varibals = varibals.copy()\n\nnew_varibals[-1][0] = 99999\nnew_varibals\n\nvaribals\n\nid(new_varibals[-1]), id(varibals[-1])\n\na = [1,2]\nb = [3,4]\na + b\n\na.extend(b)\na\n\na\n\na.insert(0, 100)\n\na\n\na.pop(0)\n\na\n\na.remove('z')\n\na\n\na.remove?\n\na\n\na.sort(reverse=True)\n\na\n\n5 in a",
"tuple",
"var = tuple()\nvar = ()\ntype(var)\n\nvar = (1,2, 1, 3,4,5, [23,34,43])\n\nvar.count(1)\n\nvar.index(5)\n\na, b = 10, 20\n\na = 10, 20\n\na",
"| 元组变量 | 字符串变量 | 列表变量 | \n| --- | --- | -- |\n| t_1 = [1,2,3,4,5] | s_1 = 'ni hao' | l_1 = [1,2,3,4,5] |\n| t_2 = [5,6,7,8,9] | s_2 = 'how are you' | l_2 = [6,7,8,9,10] |\n| 函数 | 元组 | 实例 | 字符串 | 实例 | 列表 | 实例 |\n| :--- | :---: | :--- | :---: | :--- | :---: | :-- | :--- |\n| + | ✅ | t_1 + t_2 | ✅ | s_1 + s_2 | ✅ | l_1 + l_2 |\n| * | ✅ | t_1 * 2 | ✅ | s_1 * 2 | ✅ | l_1 * 2 |\n| > < | ✅ | t_1 > t_2 | ✅ | s_1 > s_2 | ✅ | l_1 > l2 |\n| [index] | ✅ | t_1[0] | ✅ | s_1[0] | ✅ | l_1[0] | 列表可以索引赋值,字符串、元组不可以 |\n| [::] | ✅ | t_1[::] | ✅ | s_1[0:10:1] | ✅ | l_1[0:10:2] | 列表可以切片赋值,字符串、元组不可以 |\n| len | ✅ | len(t_1) |✅ | len(s_1) | ✅ | len(l_1) |\n| bool | ✅ | bool(t_1) |✅ | bool(s_1) | ✅ | bool(l_1) | 空字符串、空列表、空元组转换为布尔型为False |\n| count | ✅ | t_1.count(1) | ✅ | s_1.count('n') | ✅ | l_1.count(1) |\n| index | ✅ | t_1.index(3) | ✅ | s_1.index('n') | ✅ | l_1.index(1) | \n| replace | | | ✅ | s_1.replace('n', 'N') | | | 字符串replace函数返回一个新字符串,原来的变量不变 |\n| sort | | | | | ✅ | l_1.sort() |\n| reverse | | | | | ✅ | l_1.reverse() | 字符串不可更改,只能通过生成一个新的字符串来翻转 | \n| append | | | | | ✅ | l_1.append(100) |\n| extend | | | | | ✅ | l_1.extend(l_2) |\n| remove | | | | | ✅ | l_1.remove(1) |\n| pop | | | | | ✅ | l_1.pop() |\n字典类型",
"var = {}\nvar = dict()\ntype(var)\n\nvar = {\n '中': 100,\n '左': 200\n}\n\nvar['中']\n\nwords = ['中', '左']\nlocation = [100, 200]\n\nlocation[words.index('中')]",
"拉锁函数",
"new_var = list(zip(words, location))\n\nnew_var\n\ndict(new_var)\n\nlist(zip([1,2], [3,4, 5], [4,5,5]))\n\nstudents = ['wong', 'li', 'sun', 'zhao', 'qian']\n\nmoney = dict.fromkeys(students, 10)",
"访问字典中的值",
"money['wong']\n\na = money.get('ww', '100')\n\nprint(a)\n\nmoney.keys()\n\nmoney.values()\n\nmoney.items()\n\n# 删除操作\nmoney.pop('wong')\n\nmoney\n\nmoney['nihao'] = 100\n\nmoney\n\nmoney.setdefault('haha', 1000)\n\nmoney"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
H-E-L-P/XID_plus
|
docs/build/html/notebooks/examples/XID+example_run_script_profile.ipynb
|
mit
|
[
"XID+ Example Run Script\n(This is based on a Jupyter notebook, available in the XID+ package and can be interactively run and edited)\nXID+ is a probababilistic deblender for confusion dominated maps. It is designed to:\n\nUse a MCMC based approach to get FULL posterior probability distribution on flux\nProvide a natural framework to introduce additional prior information\nAllows more representative estimation of source flux density uncertainties\nProvides a platform for doing science with the maps (e.g XID+ Hierarchical stacking, Luminosity function from the map etc)\n\nCross-identification tends to be done with catalogues, then science with the matched catalogues.\nXID+ takes a different philosophy. Catalogues are a form of data compression. OK in some cases, not so much in others, i.e. confused images: catalogue compression loses correlation information. Ideally, science should be done without compression.\nXID+ provides a framework to cross identify galaxies we know about in different maps, with the idea that it can be extended to do science with the maps!!\nPhilosophy: \n\nbuild a probabilistic generative model for the SPIRE maps\nInfer model on SPIRE maps\n\nBayes Theorem\n$p(\\mathbf{f}|\\mathbf{d}) \\propto p(\\mathbf{d}|\\mathbf{f}) \\times p(\\mathbf{f})$\nIn order to carry out Bayesian inference, we need a model to carry out inference on.\nFor the SPIRE maps, our model is quite simple, with likelihood defined as:\n $L = p(\\mathbf{d}|\\mathbf{f}) \\propto |\\mathbf{N_d}|^{-1/2} \\exp\\big{ -\\frac{1}{2}(\\mathbf{d}-\\mathbf{Af})^T\\mathbf{N_d}^{-1}(\\mathbf{d}-\\mathbf{Af})\\big}$\nwhere:\n $\\mathbf{N_{d,ii}} =\\sigma_{inst.,ii}^2+\\sigma_{conf.}^2$\nSimplest model for XID+ assumes following:\n\nAll sources are known and have positive flux (fi)\nA global background (B) contributes to all pixels \nPRF is fixed and known\nConfusion noise is constant and not correlated across pixels\n\n\nBecause we are getting the joint probability distribution, our model is generative i.e. given parameters, we generate data and vica-versa\nCompared to discriminative model (i.e. neural network), which only obtains conditional probability distribution i.e. Neural network, give inputs, get output. Can't go other way'\nGenerative model is full probabilistic model. Allows more complex relationships between observed and target variables\nXID+ SPIRE\nXID+ applied to GALFORM simulation of COSMOS field\n\nSAM simulation (with dust) ran through SMAP pipeline_ similar depth and size as COSMOS\nUse galaxies with an observed 100 micron flux of gt. $50\\mathbf{\\mu Jy}$. Gives 64823 sources\nUninformative prior: uniform $0 - 10{^3} \\mathbf{mJy}$\n\nImport required modules",
"from astropy.io import ascii, fits\nimport pylab as plt\n%matplotlib inline\nfrom astropy import wcs\n\n\nimport numpy as np\nimport xidplus\nfrom xidplus import moc_routines\nimport pickle",
"Set image and catalogue filenames",
"xidplus.__path__[0]\n\n#Folder containing maps\nimfolder=xidplus.__path__[0]+'/../test_files/'\n\npswfits=imfolder+'cosmos_itermap_lacey_07012015_simulated_observation_w_noise_PSW_hipe.fits.gz'#SPIRE 250 map\npmwfits=imfolder+'cosmos_itermap_lacey_07012015_simulated_observation_w_noise_PMW_hipe.fits.gz'#SPIRE 350 map\nplwfits=imfolder+'cosmos_itermap_lacey_07012015_simulated_observation_w_noise_PLW_hipe.fits.gz'#SPIRE 500 map\n\n\n#Folder containing prior input catalogue\ncatfolder=xidplus.__path__[0]+'/../test_files/'\n#prior catalogue\nprior_cat='lacey_07012015_MillGas.ALLVOLS_cat_PSW_COSMOS_test.fits'\n\n\n#output folder\noutput_folder='./'",
"Load in images, noise maps, header info and WCS information",
"#-----250-------------\nhdulist = fits.open(pswfits)\nim250phdu=hdulist[0].header\nim250hdu=hdulist[1].header\n\nim250=hdulist[1].data*1.0E3 #convert to mJy\nnim250=hdulist[2].data*1.0E3 #convert to mJy\nw_250 = wcs.WCS(hdulist[1].header)\npixsize250=3600.0*w_250.wcs.cd[1,1] #pixel size (in arcseconds)\nhdulist.close()\n#-----350-------------\nhdulist = fits.open(pmwfits)\nim350phdu=hdulist[0].header\nim350hdu=hdulist[1].header\n\nim350=hdulist[1].data*1.0E3 #convert to mJy\nnim350=hdulist[2].data*1.0E3 #convert to mJy\nw_350 = wcs.WCS(hdulist[1].header)\npixsize350=3600.0*w_350.wcs.cd[1,1] #pixel size (in arcseconds)\nhdulist.close()\n#-----500-------------\nhdulist = fits.open(plwfits)\nim500phdu=hdulist[0].header\nim500hdu=hdulist[1].header \nim500=hdulist[1].data*1.0E3 #convert to mJy\nnim500=hdulist[2].data*1.0E3 #convert to mJy\nw_500 = wcs.WCS(hdulist[1].header)\npixsize500=3600.0*w_500.wcs.cd[1,1] #pixel size (in arcseconds)\nhdulist.close()",
"Load in catalogue you want to fit (and make any cuts)",
"hdulist = fits.open(catfolder+prior_cat)\nfcat=hdulist[1].data\nhdulist.close()\ninra=fcat['RA']\nindec=fcat['DEC']\n# select only sources with 100micron flux greater than 50 microJy\nsgood=fcat['S100']>0.050\ninra=inra[sgood]\nindec=indec[sgood]",
"XID+ uses Multi Order Coverage (MOC) maps for cutting down maps and catalogues so they cover the same area. It can also take in MOCs as selection functions to carry out additional cuts. Lets use the python module pymoc to create a MOC, centered on a specific position we are interested in. We will use a HEALPix order of 15 (the resolution: higher order means higher resolution), have a radius of 100 arcseconds centered around an R.A. of 150.74 degrees and Declination of 2.03 degrees.",
"from astropy.coordinates import SkyCoord\nfrom astropy import units as u\n#c = SkyCoord(ra=[150.74]*u.degree, dec=[2.03]*u.degree) \nc = SkyCoord(ra=inra*u.degree, dec=indec*u.degree) \nimport pymoc\nmoc=pymoc.util.catalog.catalog_to_moc(c,100,15)",
"XID+ is built around two python classes. A prior and posterior class. There should be a prior class for each map being fitted. It is initiated with a map, noise map, primary header and map header and can be set with a MOC. It also requires an input prior catalogue and point spread function.",
"#---prior250--------\nprior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header\nprior250.prior_cat(inra,indec,prior_cat)#Set input catalogue\nprior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma)\n#---prior350--------\nprior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc)\nprior350.prior_cat(inra,indec,prior_cat)\nprior350.prior_bkg(-5.0,5)\n\n#---prior500--------\nprior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc)\nprior500.prior_cat(inra,indec,prior_cat)\nprior500.prior_bkg(-5.0,5)",
"Set PRF. For SPIRE, the PRF can be assumed to be Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and 500 $\\mathrm{\\mu m}$ respectively. Lets use the astropy module to construct a Gaussian PRF and assign it to the three XID+ prior classes.",
"#pixsize array (size of pixels in arcseconds)\npixsize=np.array([pixsize250,pixsize350,pixsize500])\n#point response function for the three bands\nprfsize=np.array([18.15,25.15,36.3])\n#use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355)\nfrom astropy.convolution import Gaussian2DKernel\n\n##---------fit using Gaussian beam-----------------------\nprf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101)\nprf250.normalize(mode='peak')\nprf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101)\nprf350.normalize(mode='peak')\nprf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101)\nprf500.normalize(mode='peak')\n\npind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map\npind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map\npind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map\n\nprior250.set_prf(prf250.array,pind250,pind250)#requires PRF as 2d grid, and x and y bins for grid (in pixel scale)\nprior350.set_prf(prf350.array,pind350,pind350)\nprior500.set_prf(prf500.array,pind500,pind500)\n\nprint('fitting '+ str(prior250.nsrc)+' sources \\n')\nprint('using ' + str(prior250.snpix)+', '+ str(prior250.snpix)+' and '+ str(prior500.snpix)+' pixels')\n\n\nimport pickle\n#from moc, get healpix pixels at a given order\nfrom xidplus import moc_routines\norder=9\ntiles=moc_routines.get_HEALPix_pixels(order,prior250.sra,prior250.sdec,unique=True)\norder_large=6\ntiles_large=moc_routines.get_HEALPix_pixels(order_large,prior250.sra,prior250.sdec,unique=True)\nprint('----- There are '+str(len(tiles))+' tiles required for input catalogue and '+str(len(tiles_large))+' large tiles')\noutput_folder='./'\noutfile=output_folder+'Master_prior_test.pkl'\nwith open(outfile, 'wb') as f:\n pickle.dump({'priors':[prior250,prior350,prior500],'tiles':tiles,'order':order,'version':xidplus.io.git_version()},f)\n\nfrom xidplus.stan_fit import SPIRE as SPIRE_stan\nfrom xidplus.numpyro_fit import SPIRE as SPIRE_numpyro\nn_sources=[]\nn_area=[]\ntime_stan=[]\ntime_numpyro=[]\ndef run_xidplus_numpyro(priors):\n fit=SPIRE_numpyro.all_bands(priors)\n return xidplus.posterior_numpyro(fit,priors)\n \n\nfor i,order in enumerate(range(7,13)):\n with open(outfile, 'rb') as f:\n obj=pickle.load(f)\n priors=obj['priors']\n moc=moc=moc_routines.get_fitting_region(order,\n moc_routines.get_HEALPix_pixels(order,150.2,2.3,unique=True))\n for p in priors:\n p.moc=moc\n p.cut_down_prior()\n p.prior_bkg(0.0,5)\n p.get_pointing_matrix()\n p.upper_lim_map()\n\n print('fitting '+ str(priors[0].nsrc)+' sources \\n')\n print('there are '+ str(priors[0].snpix)+' pixels')\n print(' order: '+str(order))\n n_sources.append(priors[0].nsrc)\n n_area.append(priors[0].moc.area_sq_deg)\n #t_s= %timeit -n1 -r1 -o SPIRE_stan.all_bands(priors[0],priors[1],priors[2],iter=1000)\n t_np = %timeit -n1 -r1 -o run_xidplus_numpyro(priors)\n #time_stan.append(t_s)\n time_numpyro.append(t_np)\n \n \n\n\n[t.average for t in time_stan]\n\ntime_stan=[2889.8496906470973, 287.8023751830915, 32.09536925505381, 11.379522859933786]\n\nfig, ax_f = plt.subplots()\nax_c = ax_f.twiny()\n\n# automatically update ylim of ax2 when ylim of ax1 changes.\n#ax_f.plot(n_area,time_stan,label='Stan')\nax_f.plot(n_area,[t.best for t in time_numpyro],label='NumPyro')\nax_f.set_ylabel('Time (s)')\nax_c.set_xlim(n_sources[-1],n_sources[0])\nax_f.set_title('Profiling')\nax_f.set_xlabel('Area (sq. deg)')\nax_c.set_xlabel('No. Sources')\nax_f.legend()\nplt.show()\n\nfig, ax_f = plt.subplots()\nax_c = ax_f.twiny()\n\n# automatically update ylim of ax2 when ylim of ax1 changes.\nax_f.plot(n_area,np.asarray(time_stan)/np.asarray([t.best for t in time_numpyro]),)\n\nax_c.set_xlim(n_sources[-1],n_sources[0])\nax_f.set_title('Profiling')\nax_f.set_xlabel('Area (sq. deg)')\nax_c.set_xlabel('No. Sources')\nax_f.set_ylabel('Stan/NumPyro')\nax_f.legend()\nplt.show()\n\norder=9\nwith open(outfile, 'rb') as f:\n obj=pickle.load(f)\npriors=obj['priors']\nmoc=moc=moc_routines.get_fitting_region(order,\n moc_routines.get_HEALPix_pixels(order,150.2,2.3,unique=True))\nfor p in priors:\n p.moc=moc\n p.cut_down_prior()\n p.prior_bkg(0.0,5)\n p.get_pointing_matrix()\n p.upper_lim_map()\n\nprint('fitting '+ str(priors[0].nsrc)+' sources \\n')\nprint('there are '+ str(priors[0].snpix)+' pixels')\nprint(' order: '+str(order))\n\n#fit_stan=SPIRE_stan.all_bands(priors[0],priors[1],priors[2],iter=1000)\nfit_pyro=SPIRE_numpyro.all_bands(priors)\nposterior_numpyro=xidplus.posterior_numpyro(fit_pyro,priors)\nxidplus.save(priors,posterior_numpyro,'test_numpyro_order_'+str(order))\n#posterior=xidplus.posterior_stan(fit_stan,priors)\n#xidplus.save(priors,posterior,'test_stan_order_'+str(order))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
AlienVault-Labs/OTX-Python-SDK
|
howto_use_python_otx_api.ipynb
|
apache-2.0
|
[
"Using the OTX-Python-SDK\nAPI Key Configuration",
"from OTXv2 import OTXv2, IndicatorTypes\n\nfrom pandas.io.json import json_normalize\n\nfrom datetime import datetime, timedelta\n\notx = OTXv2(\"\")",
"Replace YOUR_KEY with your OTX API key. You can find it on your settings page https://otx.alienvault.com/settings.\nSubscriptions\nThe getall() method accesses your subscriptions. It downloads all the OTX pulses and their assocciated indicators of compromise (IOCs) from your account. This includes:\n- All pulses you subscribe to directly\n- All pulses by users you subscribe to\n- OTX pulses you created (including private pulses)\nIf this is the first time you are using your account, the download includes all pulses created by AlienVault. All users are subscribed to the AlienVault user by default.",
"pulses = otx.getall()\n\nlen(pulses)",
"Let's list a few pulses:",
"json_normalize(pulses)[0:5]",
"author_name: The username of the OTX User that created the pulse\ncreated: Date when the pulse was created in the system\ndescription: Describes the pulse in terms of the type of threat it poses, and any other facts that may link it to other threat indicators.\nid: Unique identifier of the pulse\nindicators: Collection of Indicators Of Compromise \nmodified: Date when the pulse was last modified\nname: Name of the pulse\nreferences: List of references to papers, websites or blogs related to the threat described in the pulse\nrevision: Revision number that increments each time pulse contents change\ntags: List of tags that provide information about pulse content, for example, Phshing, malware, C&C, and apt.\n\nLet's explore the indicators object:",
"json_normalize(pulses[1][\"indicators\"])",
"_id: Unique identifier of the IOC\ncreated: Date IOC was added to the pulse\ndescription: Describe the Indicator Of Compromise\nindicator: The IOC\nindicator_type: Type of indicator\n\nThe following Indicator Types are supported (also defined in IndicatorTypes.py):",
"indicator_types = [\n\t\t\t{\n\t\t\t \"name\": \"IPv4\", \n\t\t\t \"description\": \"An IPv4 address indicating the online location of a server or other computer.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"IPv6\", \n\t\t\t \"description\": \"An IPv6 address indicating the online location of a server or other computer.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"domain\", \n\t\t\t \"description\": \"A domain name for a website or server. Domains encompass a series of hostnames.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"hostname\", \n\t\t\t \"description\": \"The hostname for a server located within a domain.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \n\t\t\t \"name\": \"email\", \n\t\t\t \"description\": \"An email associated with suspicious activity.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"URL\", \n\t\t\t \"description\": \" Uniform Resource Location (URL) summarizing the online location of a file or resource.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \n\t\t\t \"name\": \"URI\", \n\t\t\t \"description\": \"Uniform Resource Indicator (URI) describing the explicit path to a file hosted online.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"FileHash-MD5\", \n\t\t\t \"description\": \"A MD5-format hash that summarizes the architecture and content of a file.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"FileHash-SHA1\", \n\t\t\t \"description\": \"A SHA-format hash that summarizes the architecture and content of a file.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"FileHash-SHA256\", \n\t\t\t \"description\": \"A SHA-256-format hash that summarizes the architecture and content of a file.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \n\t\t\t \"name\": \"FileHash-PEHASH\", \n\t\t\t \"description\": \"A PEPHASH-format hash that summarizes the architecture and content of a file.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \n\t\t\t \"name\": \"FileHash-IMPHASH\", \n\t\t\t \"description\": \"An IMPHASH-format hash that summarizes the architecture and content of a file.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"CIDR\", \n\t\t\t \"description\": \"Classless Inter-Domain Routing (CIDR) address, which describes both a server's IP address and the network architecture (routing path) surrounding that server.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \n\t\t\t \"name\": \"FilePath\", \n\t\t\t \"description\": \"A unique location in a file system.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \n\t\t\t \"name\": \"Mutex\", \n\t\t\t \"description\": \"The name of a mutex resource describing the execution architecture of a file.\"\n\t\t\t}, \n\t\t\t{\n\t\t\t \"name\": \"CVE\", \n\t\t\t \"description\": \"Common Vulnerability and Exposure (CVE) entry describing a software vulnerability that can be exploited to engage in malicious activity.\"\n\t\t\t}]\n\njson_normalize(indicator_types)\n\nmtime = (datetime.now() - timedelta(days=1)).isoformat()\n\nmtime",
"Events\nBesides receiving the pulse information, there is another function that can retrieve different events that are ocurring in the OTX system and affect your account.",
"events = otx.getevents_since(mtime)\n\njson_normalize(events)",
"id: object id of this event. Unique reference identifier\naction : \"[subscribe | unsubscribe | delete]\", Currently supports subscribe / unsubscribe events for users and pulses and delete events for pulses\nobject_type : \"[pulse | user]\", // Currently supports events for pulse and user objects\nobject_id : \"[pulse id | author id]\", // Unique id can be used to lookup pulses and users (e.g. to remove them from system, they would remove all pulses by author_id or an individual pulse by pulse \"id\".\n\"created\" : <timestamp of event>\n\nWhen developing an application, you must decide how you want to handle different types of events. For instance, if one OTX user unsubscribes from another user, do you want to delete the IOCs the second user contributed from your application? How do you plan to reconcile the data on the server versus the data in your application?\nThe same question comes up when users delete a pulse.\nUsing Search and get Pulse by ID\nThe OTX API allows you to search for pulses and users by keyword. This allows you to obtain pulses that you're not (yet) subscribed to.",
"pulses = otx.search_pulses(\"Russian\")\n\njson_normalize(pulses[\"results\"])",
"Let's say we're interested in viewing the full details (including indicators) from one of our search results. For example maybe we're interested in the Enigma Ransomware:",
"pulse_id = pulses[\"results\"][1][\"id\"]\n\npulse_details = otx.get_pulse_details(pulse_id)\n\njson_normalize(pulse_details)",
"Indicator details\nLet's investigate an indicator included in the Enigma Ransomware pulse.",
"indicator = pulse_details[\"indicators\"][4][\"indicator\"]\n\nindicator_details = otx.get_indicator_details_full(IndicatorTypes.IPv4, indicator)",
"Indicator details are divided into sections for convenience:",
"indicator_details.keys()\n\njson_normalize(indicator_details[\"url_list\"])\n\njson_normalize(indicator_details[\"passive_dns\"].get('passive_dns'))",
"Indicator details are not available for all supported indicator types. IndicatorTypes.supported_api_types contains a list of the indicator types you can use with get_indicator_details_by_section and get_indicator_details_full. \nCreate pulse\nYou can create new pulses using the create_pulse function. A name string is required. Public boolean is also required but will be set True if not provided:",
"indicators = [{\"indicator\": \"82.194.84.121\", \"description\":\"\", \"type\": \"IPv4\"}, {\"indicator\": \"82.194.84.122\", \"description\":\"\", \"type\": \"IPv4\"}]\n\nnew_pulse = otx.create_pulse(name=\"IPy Notebook Test\", indicators=indicators, public=False)\n\njson_normalize(new_pulse)",
"The following fields can be passed into create_pulse:\n- name(string, required) pulse name\n- public(boolean, required) long form description of threat\n- description(string) long form description of threat\n- tlp(string, white/green/amber/red) Traffic Light Protocol level for threat sharing\n- tags(list of strings) short keywords to associate with your pulse\n- references(list of strings, preferably URLs) external references for this threat\n- indicators(list of objects) IOCs to include in pulse"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
emjotde/UMZ
|
Wyklady/10/Sieci neuronowe.ipynb
|
cc0-1.0
|
[
"%load_ext tikzmagic\n%matplotlib inline\n\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom IPython.core.display import HTML\ndef css_styling():\n styles = \"\"\"\n<style>\n.output_png { text-align: center; }\n</style>\n \"\"\"\n return HTML(styles)\ncss_styling()\n",
"Sieci neuronowe\nMetody uczenia\nTrochę historii: Perceptron liniowy\nMark 1 perceptron (Frank Rosenblatt, 1957): \n* Aparat przeznaczony do rozpoznawania obrazu;\n* 400 fotokomórek\n* Wagi to potencjometry;\n* Wagi aktualizowana za pomocą silniczków.\nThe New York Times, 1958:\n\n[...] the embryo of an electronic computer that the Navy expects will be able to walk, talk, see, write, reproduce itself and be conscious of its existence.\n\n<img style=\"margin: auto\" width=\"80%\" src=\"http://m.natemat.pl/b94a41cd7322e1b8793e4644e5f82683,641,0,0,0.png\" alt=\"Frank Rosenblatt\"/>\n<img style=\"margin: auto\" src=\"http://m.natemat.pl/02943a7dc0f638d786b78cd5c9e75742,641,0,0,0.png\" width=\"70%\" alt=\"Frank Rosenblatt\"/>\n<img style=\"margin:auto\" src=\"http://www.newyorker.com/wp-content/uploads/2012/11/frank-rosenblatt-perception.jpg\" width=\"60%\"/>",
"from IPython.display import YouTubeVideo\nYouTubeVideo('cNxadbrN_aI', width=800, height=600)",
"Uczenie (bez ingerencji człowieka)\nCykl uczenia perceptronu (w sumie 2000 \"epok\"):\n* pokazanie (do kamery cyfrowej) planszy z kolejnym obiektem (np. trójkat, koło, kwadrat,...);\n* zaobserwowanie, jaka lampka się zapaliła na wyjściu;\n* sprawdzenie, czy jest to właściwa lampka (arbitralnie ustalona);\n* wysłanie sygnału \"nagrody\" lub \"kary\".\n* Człowiek tylko \"podaje\" informacje.\nPerceptron: formalizacja\nNieliniowa funkcja aktywacji (Rosenblatt: funkcja schodkowa):",
"import matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.rcParams.update({'font.size': 16})\n\n\nplt.figure(figsize=(10,6))\nx = [-1,-.23,1] \ny = [-1, -1, 1]\nplt.ylim(-1.2,1.2)\nplt.xlim(-1.2,1.2)\nplt.plot([-2,2],[1,1], color='black', ls=\"dashed\")\nplt.plot([-2,2],[-1,-1], color='black', ls=\"dashed\")\nplt.step(x, y, lw=3)\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data',0))\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0))\n\nplt.annotate(r'$\\theta_0$',\n xy=(-.23,0), xycoords='data',\n xytext=(-50, +50), textcoords='offset points', fontsize=26,\n arrowprops=dict(arrowstyle=\"->\"))\n\nplt.show()",
"$$ g(z) = \\left{ \n\\begin{array}{rl}\n1 & \\textrm{gdy $z > \\theta_0$} \\\n-1 & \\textrm{wpp.}\n\\end{array}\n\\right. $$\ngdzie $z = \\theta_0x_0 + \\ldots + \\theta_nx_n$. Niech $\\theta_0$ to próg aktywacji, ustalamy $x_0 = 1$. \nSkładamy wszystko w całość",
"%%tikz -l arrows,automata,positioning,shapes,shapes.geometric,fit -f png -s 2000,1400\n\\tikzstyle{every node}=[font=\\large]\n\\tikzstyle{every path}=[line width=1pt]\n\n\\node[state] (x0) {$1$};\n\\node[state] (x1) [below=0.5cm of x0] {$x_1$};\n\\node[state] (x2) [below=0.5cm of x1] {$x_2$};\n\\node[state, draw=none,fill=none] (dots) [below=0.5cm of x2] {$\\cdots$};\n\\node[state] (xn) [below=0.5cm of dots] {$x_n$};\n\n\\node[state,circle,label=above:{Funkcja wej\\'{s}cia}] (sum) [right=2cm of x2] {$z=\\displaystyle\\sum_{i=0}^{n}\\theta_ix_i$};\n\\node[state,rectangle,label=above:{Funkcja aktywacji}] (g) [right=of sum] \n{$g(z) = \\left\\{\\begin{array}{rl} 1 & \\textrm{gdy } z > \\theta_0 \\\\ -1 & \\textrm{wpp.} \\end{array}\\right.$};\n\\node[state] (output) [right=of g] {Wyj\\'{s}cie};\n\n\n\\path[->] \n(x0) edge node [above, pos=0.4] {$\\theta_0$} (sum)\n(x1) edge node [above, pos=0.4] {$\\theta_1$} (sum)\n(x2) edge node [above, pos=0.4] {$\\theta_2$} (sum)\n(xn) edge node [above, pos=0.4] {$\\theta_n$} (sum)\n(sum) edge node {} (g)\n(g) edge node {} (output);\n \n\\node [draw,dashed, fit= (x0) (x1) (x2) (dots) (xn),label=above:Cechy, label=below:{Warstwa 0}] {};\n\\node [draw,dashed, fit= (sum) (g) (output),label=above:Neuron, label=below:{Warstwa 1}, inner sep=0.65cm] {};",
"Pseudokod\n\nUstal wartości początkowe $\\theta$ (wektor 0 lub liczby losowe blisko 0)\nDla każdego przykładu $(x^{(i)}, y^{(i)})$, dla $i=1,\\ldots,m$\nOblicz wartość wyjścia $o^{(i)}$:\n$$o^{(i)} = g(\\theta^{T}x^{(i)}) = g(\\sum_{j=0}^{n} \\theta_jx_j^{(i)})$$\nWykonaj aktualizację wag (tzw. perceptron rule):\n$$ \\theta := \\theta + \\Delta \\theta $$\n$$ \\Delta \\theta = \\alpha(y^{(i)}-o^{(i)})x^{(i)} $$\n\n\n\nPytania:\n* Co nam to przypomina?\n* Jakie wartości może przyjąć wyrażenie $\\Delta \\theta_j$?\n Reguła perceptronowa: \n$$\\theta_j := \\theta_j + \\Delta \\theta_j $$\nPoprawnie zaklasyfikowane:\n\n$y^{(i)}=1$ oraz $o^{(i)}=1$ : $$\\Delta\\theta_j = \\alpha(1 - 1)x_j^{(i)} = 0$$\n$y^{(i)}=-1$ oraz $o^{(i)}=-1$ : $$\\Delta\\theta_j = \\alpha(-1 - -1)x_j^{(i)} = 0$$\n\nSkoro trafiłeś, to nic nie zmieniaj!\n Reguła perceptronowa: \n$$\\theta_j := \\theta_j + \\Delta \\theta_j $$\nNiepoprawnie zaklasyfikowane:\n\n$y^{(i)}=1$ oraz $o^{(i)}=-1$ : $$\\Delta\\theta_j = \\alpha(1 - -1)x_j^{(i)} = 2 \\alpha x_j^{(i)}$$\n$y^{(i)}=-1$ oraz $o^{(i)}=1$ : $$\\Delta\\theta_j = \\alpha(-1 - 1)x_j^{(i)} = -2 \\alpha x_j^{(i)}$$\n\nPrzesuń w wagi w odpowiednią stronę:\n* Czyli zmniejsz jeśli, niepoprawnie przekroczono próg; \n* Zwiększ, jeśli nie przekroczono.\nZalety:\n\nDosyć intuicyjny i prosty\nŁatwa implementacja\nWykazano, że konwerguje w skończonym czase, gdy dane można linowo oddzielić.\n\nWady:\n\nMoże \"skakać\" w nieskończoność dla danych, których nie da się oddzielić liniowo.",
"\n\nplt.figure(figsize=(16,7))\nplt.subplot(121)\nx = [-2,-.23,2] \ny = [-1, -1, 1]\nplt.ylim(-1.2,1.2)\nplt.xlim(-2.2,2.2)\nplt.plot([-2,2],[1,1], color='black', ls=\"dashed\")\nplt.plot([-2,2],[-1,-1], color='black', ls=\"dashed\")\nplt.step(x, y, lw=3)\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data',0))\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0))\n\nplt.annotate(r'$\\theta_0$',\n xy=(-.23,0), xycoords='data',\n xytext=(-50, +50), textcoords='offset points', fontsize=26,\n arrowprops=dict(arrowstyle=\"->\"))\n\nimport numpy as np\n\nplt.subplot(122)\nx2 = np.linspace(-2,2,100)\ny2 = np.tanh(x2+ 0.23)\nplt.ylim(-1.2,1.2)\nplt.xlim(-2.2,2.2)\nplt.plot([-2,2],[1,1], color='black', ls=\"dashed\")\nplt.plot([-2,2],[-1,-1], color='black', ls=\"dashed\")\nplt.plot(x2, y2, lw=3)\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data',0))\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0))\n\nplt.annotate(r'$\\theta_0$',\n xy=(-.23,0), xycoords='data',\n xytext=(-50, +50), textcoords='offset points', fontsize=26,\n arrowprops=dict(arrowstyle=\"->\"))\n\nplt.show()",
"Rozwiązanie: SGD!\nPerceptron a regresja liniowa",
"%%tikz -l arrows,automata,positioning,shapes,shapes.geometric,fit -f png -s 2000,1400\n\\tikzstyle{every node}=[font=\\large]\n\\tikzstyle{every path}=[line width=1pt]\n\n\\node[state] (x0) {$1$};\n\\node[state] (x1) [below=0.5cm of x0] {$x_1$};\n\\node[state] (x2) [below=0.5cm of x1] {$x_2$};\n\\node[state, draw=none,fill=none] (dots) [below=0.5cm of x2] {$\\cdots$};\n\\node[state] (xn) [below=0.5cm of dots] {$x_n$};\n\n\\node[state,circle,label=above:{Funkcja wej\\'{s}cia}] (sum) [right=2cm of x2] {$z=\\displaystyle\\sum_{i=0}^{n}\\theta_ix_i$};\n\\node[state,rectangle,label=above:{Funkcja aktywacji}] (g) [right=of sum] \n{$g(z) = z$};\n\\node[state] (output) [right=of g] {Wyj\\'{s}cie};\n\n\n\\path[->] \n(x0) edge node [above, pos=0.4] {$\\theta_0$} (sum)\n(x1) edge node [above, pos=0.4] {$\\theta_1$} (sum)\n(x2) edge node [above, pos=0.4] {$\\theta_2$} (sum)\n(xn) edge node [above, pos=0.4] {$\\theta_n$} (sum)\n(sum) edge node {} (g)\n(g) edge node {} (output);\n \n\\node [draw,dashed, fit= (x0) (x1) (x2) (dots) (xn),label=above:Cechy, label=below:{Warstwa 0}] {};\n\\node [draw,dashed, fit= (sum) (g) (output),label=above:Neuron, label=below:{Warstwa 1}, inner sep=0.65cm] {};",
"Uczenie regresji liniowej:\n\nModel: $$h_{\\theta}(x) = \\sum_{i=0}^n \\theta_ix_i$$\n\nFunkcja kosztu (błąd średniokwadratowy): $$J(\\theta) = \\frac{1}{m} \\sum_{i=1}^{m} (h_{\\theta}(x^{(i)}) - y^{(i)})^2$$\n\n\nPo obliczeniu $\\nabla J(\\theta)$, zwykły SGD\n\n\nPerceptron a\nbinarna regresja logistyczna",
"%%tikz -l arrows,automata,positioning,shapes,shapes.geometric,fit -f png -s 2000,1400\n\\tikzstyle{every node}=[font=\\large]\n\\tikzstyle{every path}=[line width=1pt]\n\n\\node[state] (x0) {$1$};\n\\node[state] (x1) [below=0.5cm of x0] {$x_1$};\n\\node[state] (x2) [below=0.5cm of x1] {$x_2$};\n\\node[state, draw=none,fill=none] (dots) [below=0.5cm of x2] {$\\cdots$};\n\\node[state] (xn) [below=0.5cm of dots] {$x_n$};\n\n\\node[state,circle,label=above:{Funkcja wej\\'{s}cia}] (sum) [right=2cm of x2] {$z=\\displaystyle\\sum_{i=0}^{n}\\theta_ix_i$};\n\\node[state,rectangle,label=above:{Funkcja aktywacji}] (g) [right=of sum] \n{$g(z) = \\displaystyle\\frac{1}{1+e^{-z}}$};\n\\node[state] (output) [right=of g] {Wyj\\'{s}cie};\n\n\n\\path[->] \n(x0) edge node [above, pos=0.4] {$\\theta_0$} (sum)\n(x1) edge node [above, pos=0.4] {$\\theta_1$} (sum)\n(x2) edge node [above, pos=0.4] {$\\theta_2$} (sum)\n(xn) edge node [above, pos=0.4] {$\\theta_n$} (sum)\n(sum) edge node {} (g)\n(g) edge node {} (output);\n \n\\node [draw,dashed, fit= (x0) (x1) (x2) (dots) (xn),label=above:Cechy, label=below:{Warstwa 0}] {};\n\\node [draw,dashed, fit= (sum) (g) (output),label=above:Neuron, label=below:{Warstwa 1}, inner sep=0.65cm] {};\n",
"Uczenie regresji logistycznej binarnej:\n\nModel: $$h_{\\theta}(x) = \\sigma(\\sum_{i=0}^n \\theta_ix_i) = P(1|x,\\theta)$$\n\nFunkcja kosztu (entropia krzyżowa): $$\\begin{eqnarray} J(\\theta) &=& -\\frac{1}{m} \\sum_{i=1}^{m} [y^{(i)}\\log P(1|x^{(i)},\\theta) \\ && + (1-y^{(i)})\\log(1-P(1|x^{(i)},\\theta))]\\end{eqnarray}$$\n\n\nPo obliczeniu $\\nabla J(\\theta)$, zwykły SGD\n\n\nPerceptron a\nwieloklasowa regresja logistyczna",
"%%tikz -l arrows,automata,positioning,shapes,shapes.geometric,fit -f png -s 1000,600\n\\tikzstyle{every node}=[font=\\large]\n\\tikzstyle{every path}=[line width=1pt]\n\n\\node[state] (x0) {$1$};\n\\node[state] (x1) [below=0.5cm of x0] {$x_1$};\n\\node[state] (x2) [below=0.5cm of x1] {$x_2$};\n\\node[state, draw=none,fill=none] (dots) [below=0.5cm of x2] {$\\cdots$};\n\\node[state] (xn) [below=0.5cm of dots] {$x_n$};\n\n\\node[state,circle] (sum1) [right=4cm of x1] {$g(\\sum)$};\n\\node[state,circle] (sum2) [right=4cm of x2] {$g(\\sum)$};\n\\node[state,circle] (sum3) [right=4cm of dots] {$g(\\sum)$};\n\n\\node[state, draw=none,fill=none] (p1) [right=0.5cm of sum1] {$P(c=0)$};\n\\node[state, draw=none,fill=none] (p2) [right=0.5cm of sum2] {$P(c=1)$};\n\\node[state, draw=none,fill=none] (p3) [right=0.5cm of sum3] {$P(c=2)$};\n\n\\path[->] \n(x0) edge node [above, pos=0.5] {$\\theta^{(0)}_{0}$} (sum1)\n(x1) edge node [above, pos=0.5] {$\\theta^{(0)}_{1}$} (sum1)\n(x2) edge node [above, pos=0.5] {$\\theta^{(0)}_{2}$} (sum1)\n(xn) edge node [above, pos=0.5] {$\\theta^{(0)}_{n}$} (sum1);\n \n\\path[-, thin, dotted] \n(x0) edge node {} (sum2)\n(x1) edge node {} (sum2)\n(x2) edge node {} (sum2)\n(xn) edge node {} (sum2)\n\n(x0) edge node {} (sum3)\n(x1) edge node {} (sum3)\n(x2) edge node {} (sum3)\n(xn) edge node [below, pos=0.5] {$\\theta^{(2)}_{n}$} (sum3);\n \n\\node [draw, dashed, fit= (x0) (x1) (x2) (dots) (xn),label=above:Cechy, label=below:{Warstwa 0}] (w0) {};\n\\node [draw, dashed, fit= (sum1) (sum2) (sum3), label=below:{Warstwa 1}, label=above:{$g(\\cdot) = \\mathrm{softmax}(\\cdot)$}] (w1) {};\n\n\\node[draw=none,fill=none] (theta) [below=1cm of w1] \n{$\\Theta = \\left[% \n \\begin{array}{ccc} %\n \\theta_0^{(0)} & \\theta_0^{(1)} & \\theta_0^{(2)} \\\\%\n \\theta_1^{(0)} & \\theta_1^{(1)} & \\theta_1^{(2)} \\\\%\n \\vdots & \\vdots & \\vdots \\\\%\n \\theta_n^{(0)} & \\theta_n^{(1)} & \\theta_n^{(2)} \\\\%\n \\end{array} \\right]$\n};\n\n",
"Wieloklasowa regresji logistyczna\n\nModel (dla $c$ klasyfikatorów binarnych): \n$$\\begin{eqnarray}\nh_{(\\theta^{(1)},\\dots,\\theta^{(c)})}(x) &=& \\mathrm{softmax}(\\sum_{i=0}^n \\theta_{i}^{(1)}x_i, \\ldots, \\sum_{i=0}^n \\theta_i^{(c)}x_i) \\ \n&=& \\left[ P(k|x,\\theta^{(1)},\\dots,\\theta^{(c)}) \\right]_{k=1,\\dots,c} \n\\end{eqnarray}$$\n\nFunkcja kosztu (przymując model regresji binarnej): $$\\begin{eqnarray} J(\\theta^{(k)}) &=& -\\frac{1}{m} \\sum_{i=1}^{m} [y^{(i)}\\log P(k|x^{(i)},\\theta^{(k)}) \\ && + (1-y^{(i)})\\log P(\\neg k|x^{(i)},\\theta^{(k)})]\\end{eqnarray}$$\n\n\nPo obliczeniu $\\nabla J(\\theta)$, c-krotne uruchomienie SGD, zastosowanie $\\mathrm{softmax}(X)$ do niezależnie uzyskanych klasyfikatorów binarnych.\n\n\nPrzyjmijmy: \n$$ \\Theta = (\\theta^{(1)},\\dots,\\theta^{(c)}) $$\n\n\n$$h_{\\Theta}(x) = \\left[ P(k|x,\\Theta) \\right]_{k=1,\\dots,c}$$\n$$\\delta(x,y) = \\left{\\begin{array}{cl} 1 & \\textrm{gdy } x=y \\ 0 & \\textrm{wpp.}\\end{array}\\right.$$\n\n\nWieloklasowa funkcja kosztu $J(\\Theta)$ (kategorialna entropia krzyżowa):\n$$ J(\\Theta) = -\\frac{1}{m}\\sum_{i=1}^{m}\\sum_{k=1}^{c} \\delta({y^{(i)},k}) \\log P(k|x^{(i)},\\Theta) $$\n\n\nGradient $\\nabla J(\\Theta)$:\n$$ \\dfrac{\\partial J(\\Theta)}{\\partial \\Theta_{j,k}} = -\\frac{1}{m}\\sum_{i = 1}^{m} (\\delta({y^{(i)},k}) - P(k|x^{(i)}, \\Theta)) x^{(i)}_j \n$$\n\n\nLiczymy wszystkie wagi jednym uruchomieniem SGD\n\n\nPodsumowanie sieci jednowarstwowych\n\nW przypadku jednowarstowej sieci neurnowej wystarczy znać gradient funkcji kosztu.\nWtedy liczymy tak samo jak w przypadku regresji liniowej, logisticznej, wieloklasowej logistycznej itp.\nWymienione modele to szczególne przypadki jednowarstwowych sieci neuronowych.\nRegresja liniowa i binarna logistyczna to jeden neuron.\nWieloklasowa regresja logistyczna to tyle neuronów ile klas.\nDobieramy funkcję aktywacji i funkcję kosztu do problemu.\n\nSieci wielowarstwowe - Przypomnienie",
"%%tikz -l arrows,automata,positioning,shapes,shapes.geometric,fit -f png -s 1000,600\n\n\\node[state] (x1) {$x_1$};\n\\node[state] (x2) [below=0.5cm of x1] {$x_2$};\n\\node[state, draw=none,fill=none] (dots) [below=0.5cm of x2] {$\\cdots$};\n\\node[state] (xn) [below=0.5cm of dots] {$x_n$};\n\n\\node[state,circle] (a1) [below right=-0.33cm and 3cm of x1] {$a^{(1)}_1$};\n\\node[state,circle] (a2) [below=0.5cm of a1] {$a^{(1)}_2$};\n\\node[state,circle] (a3) [below=0.5cm of a2] {$a^{(1)}_3$};\n\\node[state] (b1) [above left=1cm and 1cm of a1] {$1$};\n\n\\node[state,circle] (a21) [right=2cm of a1] {$a^{(2)}_1$};\n\\node[state,circle] (a22) [below=0.5cm of a21] {$a^{(2)}_2$};\n\\node[state,circle] (a23) [below=0.5cm of a22] {$a^{(2)}_3$};\n\\node[state] (b2) [above left=1cm and 1cm of a21] {$1$};\n\n\\node[state,circle] (a31) [right=2cm of a22] {$a^{(3)}_1$};\n\\node[state] (b3) [right=2cm of b2] {$1$};\n\n\\path[-] \n(b1) edge node [above=.2cm, pos=0.5] {$\\beta^{(1)}_{1}$} (a1)\n(x1) edge node [above, pos=0.5] {$\\Theta^{(1)}_{1,1}$} (a1)\n(x2) edge node [above, pos=0.5] {$\\Theta^{(1)}_{2,1}$} (a1)\n(xn) edge node [above, pos=0.5] {$\\Theta^{(1)}_{n,1}$} (a1);\n \n\\path[-, thin, dotted] \n(b1) edge node {} (a2)\n(x1) edge node {} (a2)\n(x2) edge node {} (a2)\n(xn) edge node {} (a2)\n\n(b1) edge node {} (a3)\n(x1) edge node {} (a3)\n(x2) edge node {} (a3)\n(xn) edge node [below, pos=0.5] {$\\Theta^{(1)}_{n,3}$} (a3);\n\n\\path[-] \n(b2) edge node [above=.2cm, pos=0.5] {$\\beta^{(2)}_{1}$} (a21)\n(a1) edge node [above, pos=0.5] {$\\Theta^{(2)}_{1,1}$} (a21)\n(a2) edge node [above, pos=0.5] {$\\Theta^{(2)}_{2,1}$} (a21)\n(a3) edge node [above, pos=0.5] {$\\Theta^{(2)}_{3,1}$} (a21);\n\n\\path[-, thin, dotted] \n(b2) edge node {} (a22)\n(a1) edge node {} (a22)\n(a2) edge node {} (a22)\n(a3) edge node {} (a22)\n\n(b2) edge node {} (a23)\n(a1) edge node {} (a23)\n(a2) edge node {} (a23)\n(a3) edge node [below, pos=0.5] {$\\Theta^{(2)}_{3,3}$} (a23);\n\n\\path[-] \n(b3) edge node [above=.5cm, pos=0.5] {$\\beta^{(3)}_{1}$} (a31)\n(a21) edge node [above, pos=0.5] {$\\Theta^{(3)}_{1,1}$} (a31)\n(a22) edge node [above, pos=0.5] {$\\Theta^{(3)}_{2,1}$} (a31)\n(a23) edge node [above, pos=0.5] {$\\Theta^{(3)}_{3,1}$} (a31);\n\n\\node [draw, dashed, fit= (x1) (x2) (dots) (xn)] (w0) {};\n\\node [draw, dashed, fit= (a1) (a2) (a3)] (w1) {};\n\\node [draw, dashed, fit= (a21) (a22) (a23)] (w2) {};\n\\node [draw, dashed, fit= (a31)] (w3) {};\n\n\\node [draw, draw=none, fill=none, below=0.5cm of w0] (mw0) {\\small$a^{(0)}=x$};\n\\node [draw, draw=none, fill=none, right=1.1cm of mw0] (mw1) \n{\\small$\\begin{array}{l}z^{(1)} = a^{(0)} \\Theta^{(1)} + \\beta^{(1)}\\\\g^{(1)}(x)=\\tanh(x)\\\\a^{(1)}=g^{(1)}(z^{(1)})\\end{array}$};\n\\node [draw, draw=none, fill=none, right=-.3cm of mw1] (mw2) \n{\\small$\\begin{array}{l}z^{(2)} = a^{(1)} \\Theta^{(2)} + \\beta^{(2)}\\\\g^{(2)}(x)=\\tanh(x)\\\\a^{(2)}=g^{(2)}(z^{(2)})\\end{array}$};\n\\node [draw, draw=none, fill=none, right=-.3cm of mw2] (mw3) \n{\\small$\\begin{array}{l}z^{(3)} = a^{(2)} \\Theta^{(3)} + \\beta^{(3)}\\\\g^{(3)}(x)=\\tanh(x)\\\\a^{(3)}=g^{(3)}(z^{(3)})\\end{array}$};\n",
"Feedforward 1\n\nMając daną $n$-warstwową sieć neuronową oraz jej parametry $\\Theta^{(1)}, \\ldots, \\Theta^{(L)} $ oraz $\\beta^{(1)}, \\ldots, \\beta^{(L)} $ liczymy: \n$$a^{(l)} = g^{(l)}\\left( a^{(l-1)} \\Theta^{(l)} + \\beta^{(l)} \\right). $$ \nParametry $\\Theta$ to wagi na połączeniach miedzy neuronami dwóch warstw. Rozmiar macierzy $\\Theta^{(l)}$, czyli macierzy wag na połączeniach warstw $a^{(l-1)}$ i $a^{(l)}$, to $\\dim(a^{(l-1)}) \\times \\dim(a^{(l)})$. \nParametry $\\beta$ zastępują tutaj dodawanie kolumny z jedynkami do naszej macierzy cech. Macierz $\\beta^{(l)}$ ma rozmiar równy liczbie neuronów w odpowiedniej warstwie, czyli $1 \\times \\dim(a^{(l)})$.\n\nFeedforward 2\n\nFunkcje $g^{(l)}$ to tzw. funkcje aktywacji.\nDla $i = 0$ przyjmujemy $a^{(0)} = \\mathrm{x}$ (wektor wierszowy cech) oraz $g^{(0)}(x) = x$ (identyczność).\nW przypadku klasyfikacji, często dla ostatniej warstwy $L$ (o rozmiarze równym liczbie klas) przyjmuje się $g^{(L)}(x) = \\mathop{\\mathrm{softmax}}(x)$.\nPozostałe funkcje aktywacji najcześciej mają postać sigmoidy (np. funkcja logistyczna lub tangens hiperboliczny, $\\tanh$).\nW przypadku regresji często mamy pojedynczy neuron wyjściowy jak na obrazku. Funkcją aktywacji może wtedy być np. funkcja identycznościowa.\n\nUczenie wielowarstwowych sieci\nMając algorytm SGD oraz gradienty wszystkich wag, moglibyśmy trenować każdą sieć. \n\n\nNiech:\n$$\\Theta = (\\Theta^{(1)},\\Theta^{(2)},\\Theta^{(3)},\\beta^{(1)},\\beta^{(2)},\\beta^{(3)})$$\n\n\nFunkcja sieci neuronowej z grafiki:\n\n\n$$\\small h_\\Theta(x) = \\tanh(\\tanh(\\tanh(x\\Theta^{(1)}+\\beta^{(1)})\\Theta^{(2)} + \\beta^{(2)})\\Theta^{(3)} + \\beta^{(3)})$$\n* Funkcja kosztu dla regresji:\n$$J(\\Theta) = \\dfrac{1}{2m} \\sum_{i=1}^{m} (h_\\Theta(x^{(i)})- y^{(i)})^2 $$\n* Jak wyglądają gradienty?\n$$\\nabla_{\\Theta^{(1)}} J(\\Theta) = ? \\quad \\nabla_{\\beta^{(3)}} J(\\Theta) = ?$$\nPropagacja wsteczna (Backpropagation)\nDla jednego przykładu (x,y):\n\nWejście: Ustaw aktywacje w warstwie cech $a^{(0)}=x$ \nFeedforward: dla $l=1,\\dots,L$ oblicz \n$$z^{(l)} = a^{(l-1)} \\Theta^{(l)} + \\beta^{(l)} \\textrm{ oraz } a^{(l)}=g^{(l)}(z^{(l)})$$\nBłąd wyjścia $\\delta^{(L)}$: oblicz wektor $$\\delta^{(L)}= \\nabla_{a^{(L)}}J(\\Theta) \\odot {g^{\\prime}}^{(L)}(z^{(L)})$$\nPropagacja wsteczna błędu: dla $l = L-1,L-2,\\dots,1$ oblicz $$\\delta^{(l)} = \\delta^{(l+1)}(\\Theta^{(l+1)})^T \\odot {g^{\\prime}}^{(l)}(z^{(l)})$$\nGradienty: \n$\\dfrac{\\partial}{\\partial \\Theta_{ij}^{(l)}} J(\\Theta) = a_i^{(l-1)}\\delta_j^{(l)} \\textrm{ oraz } \\dfrac{\\partial}{\\partial \\beta_{j}^{(l)}} J(\\Theta) = \\delta_j^{(l)}$\n\n\n\n$$\\small J(\\Theta) = \\frac{1}{2}(a^{(L)} - y)^2 $$\n$$\\small \\dfrac{\\partial}{\\partial a^{(L)}} J(\\Theta) = a^{(L)} - y$$\n$$\\small \\tanh^{\\prime}(x) = 1 - \\tanh^2(x)$$",
"%%tikz -l arrows,automata,positioning,shapes,shapes.geometric,fit -f png -s 1000,600\n\n\\node[state] (x1) {$x_1$};\n\\node[state] (x2) [below=0.5cm of x1] {$x_2$};\n\\node[state, draw=none,fill=none] (dots) [below=0.5cm of x2] {$\\cdots$};\n\\node[state] (xn) [below=0.5cm of dots] {$x_n$};\n\n\\node[state,circle] (a1) [below right=-0.33cm and 3cm of x1] {$a^{(1)}_1$};\n\\node[state,circle] (a2) [below=0.5cm of a1] {$a^{(1)}_2$};\n\\node[state,circle] (a3) [below=0.5cm of a2] {$a^{(1)}_3$};\n\\node[state] (b1) [above left=1cm and 1cm of a1] {$1$};\n\n\\node[state,circle] (a21) [right=2cm of a1] {$a^{(2)}_1$};\n\\node[state,circle] (a22) [below=0.5cm of a21] {$a^{(2)}_2$};\n\\node[state,circle] (a23) [below=0.5cm of a22] {$a^{(2)}_3$};\n\\node[state] (b2) [above left=1cm and 1cm of a21] {$1$};\n\n\\node[state,circle] (a31) [right=2cm of a22] {$a^{(3)}_1$};\n\\node[state] (b3) [right=2cm of b2] {$1$};\n\n\\node[draw=none, fill=none] (delta3) [below right=0.5cm and -1cm of a31] \n{$\\delta^{(3)}=(a^{(3)}-y) \\odot (1-\\tanh^2(z^{(3)}))$};\n\n\\node[draw=none, fill=none] (delta2) [below right=0.5cm and -1cm of a23] \n{$\\delta^{(2)}= \\delta^{(3)}(\\Theta^{(3)})^T \\odot (1-\\tanh^2(z^{(2)}))$};\n\n\\node[draw=none, fill=none] (delta1) [below right=1.5cm and -4cm of a3] \n{$\\delta^{(1)}= \\delta^{(2)}(\\Theta^{(2)})^T \\odot (1-\\tanh^2(z^{(1)}))$};\n\n\n\\path[-] \n(b1) edge node [above=.2cm, pos=0.5] {$\\beta^{(1)}_{1}$} (a1)\n(x1) edge node [above, pos=0.5] {$\\Theta^{(1)}_{1,1}$} (a1)\n(x2) edge node [above, pos=0.5] {$\\Theta^{(1)}_{2,1}$} (a1)\n(xn) edge node [above, pos=0.5] {$\\Theta^{(1)}_{n,1}$} (a1);\n \n\\path[-, thin, dotted] \n(b1) edge node {} (a2)\n(x1) edge node {} (a2)\n(x2) edge node {} (a2)\n(xn) edge node {} (a2)\n\n(b1) edge node {} (a3)\n(x1) edge node {} (a3)\n(x2) edge node {} (a3)\n(xn) edge node [below, pos=0.5] {$\\Theta^{(1)}_{n,3}$} (a3);\n\n\\path[-] \n(b2) edge node [above=.2cm, pos=0.5] {$\\beta^{(2)}_{1}$} (a21)\n(a1) edge node [above, pos=0.5] {$\\Theta^{(2)}_{1,1}$} (a21)\n(a2) edge node [above, pos=0.5] {$\\Theta^{(2)}_{2,1}$} (a21)\n(a3) edge node [above, pos=0.5] {$\\Theta^{(2)}_{3,1}$} (a21);\n\n\\path[-, thin, dotted] \n(b2) edge node {} (a22)\n(a1) edge node {} (a22)\n(a2) edge node {} (a22)\n(a3) edge node {} (a22)\n\n(b2) edge node {} (a23)\n(a1) edge node {} (a23)\n(a2) edge node {} (a23)\n(a3) edge node [below, pos=0.5] {$\\Theta^{(2)}_{3,3}$} (a23);\n\n\\path[-] \n(b3) edge node [above=.5cm, pos=0.5] {$\\beta^{(3)}_{1}$} (a31)\n(a21) edge node [above, pos=0.5] {$\\Theta^{(3)}_{1,1}$} (a31)\n(a22) edge node [above, pos=0.5] {$\\Theta^{(3)}_{2,1}$} (a31)\n(a23) edge node [above, pos=0.5] {$\\Theta^{(3)}_{3,1}$} (a31);\n\n\\node [draw, dashed, fit= (x1) (x2) (dots) (xn)] (w0) {};\n\\node [draw, dashed, fit= (a1) (a2) (a3)] (w1) {};\n\\node [draw, dashed, fit= (a21) (a22) (a23)] (w2) {};\n\\node [draw, dashed, fit= (a31)] (w3) {};\n\n\\node [draw, draw=none, fill=none, below=2cm of w0] (mw0) {\\small$a^{(0)}=x$};\n\\node [draw, draw=none, fill=none, right=.5cm of mw0] (mw1) \n{\\small$\\begin{array}{l}z^{(1)} = a^{(0)} \\Theta^{(1)} + \\beta^{(1)}\\\\g^{(1)}(x)=\\tanh(x)\\\\a^{(1)}=g^{(1)}(z^{(1)})\\end{array}$};\n\\node [draw, draw=none, fill=none, right=.5cm of mw1] (mw2) \n{\\small$\\begin{array}{l}z^{(2)} = a^{(1)} \\Theta^{(2)} + \\beta^{(2)}\\\\g^{(2)}(x)=\\tanh(x)\\\\a^{(2)}=g^{(2)}(z^{(2)})\\end{array}$};\n\\node [draw, draw=none, fill=none, right=.5cm of mw2] (mw3) \n{\\small$\\begin{array}{l}z^{(3)} = a^{(2)} \\Theta^{(3)} + \\beta^{(3)}\\\\g^{(3)}(x)=\\tanh(x)\\\\a^{(3)}=g^{(3)}(z^{(3)})\\end{array}$};\n\n\\path[->] \n(mw0) edge node {} (mw1)\n(mw1) edge node {} (mw2)\n(mw2) edge node {} (mw3)\n(mw3.east) edge[in=320,out=0] node {} (delta3.south)\n;\n\n\\path[->] \n(delta3) edge node {} (delta2)\n(delta2) edge node {} (delta1)\n;",
"SGD z propagacją wsteczną\nJedna iteracja:\n* Dla parametrów $\\Theta = (\\Theta^{(1)},\\ldots,\\Theta^{(L)})$ utwórz pomocnicze macierze zerowe $\\Delta = (\\Delta^{(1)},\\ldots,\\Delta^{(L)})$ o takich samych wymiarach (dla uproszczenia opuszczono wagi $\\beta$).\n* Dla $m$ przykładów w wsadzie (batch), $i = 1,\\ldots,m$:\n * Wykonaj algortym propagacji wstecz dla przykładu $(x^{(i)}, y^{(i)})$ i przechowaj gradienty $\\nabla_{\\Theta}J^{(i)}(\\Theta)$ dla tego przykładu;\n * $\\Delta := \\Delta + \\dfrac{1}{m}\\nabla_{\\Theta}J^{(i)}(\\Theta)$\n* Wykonaj aktualizację wag: $\\Theta := \\Theta - \\alpha \\Delta$"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
quaquel/EMAworkbench
|
docs/source/indepth_tutorial/general-introduction.ipynb
|
bsd-3-clause
|
[
"General Introduction\nSince 2010, I have been working on the development of an open source toolkit for supporting decision-making under deep uncertainty. This toolkit is known as the exploratory modeling workbench. The motivation for this name is that in my opinion all model-based deep uncertainty approaches are forms of exploratory modeling as first introduced by Bankes (1993). The design of the workbench has undergone various changes over time, but it has started to stabilize in the fall of 2016. In the summer 0f 2017, I published a paper detailing the workbench (Kwakkel, 2017). There is an in depth example in the paper, but for the documentation I want to provide a more tutorial style description of the functionality of the workbench.\nAs a starting point, I will use the Direct Policy Search example that is available for Rhodium (Quinn et al 2017). A quick note on terminology is in order here. I have a background in transport modeling. Here we often use discrete event simulation models. These are intrinsically stochastic models. It is standard practice to run these models several times and take descriptive statistics over the set of runs. In discrete event simulation, and also in the context of agent based modeling, this is known as running replications. The workbench adopts this terminology and draws a sharp distinction between designing experiments over a set of deeply uncertain factors, and performing replications of this experiment to cope with stochastic uncertainty.",
"import math\n\n# more or less default imports when using\n# the workbench\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import brentq\n\n\ndef get_antropogenic_release(xt, c1, c2, r1, r2, w1):\n \"\"\"\n\n Parameters\n ----------\n xt : float\n polution in lake at time t\n c1 : float\n center rbf 1\n c2 : float\n center rbf 2\n r1 : float\n ratius rbf 1\n r2 : float\n ratius rbf 2\n w1 : float\n weight of rbf 1\n\n Returns\n -------\n float\n\n note:: w2 = 1 - w1\n\n \"\"\"\n\n rule = w1 * (abs(xt - c1) / r1) ** 3 + (1 - w1) * (abs(xt - c2) / r2) ** 3\n at1 = max(rule, 0.01)\n at = min(at1, 0.1)\n\n return at\n\n\ndef lake_model(\n b=0.42,\n q=2.0,\n mean=0.02,\n stdev=0.001,\n delta=0.98,\n alpha=0.4,\n nsamples=100,\n myears=100,\n c1=0.25,\n c2=0.25,\n r1=0.5,\n r2=0.5,\n w1=0.5,\n seed=None,\n):\n \"\"\"runs the lake model for nsamples stochastic realisation using\n specified random seed.\n\n Parameters\n ----------\n b : float\n decay rate for P in lake (0.42 = irreversible)\n q : float\n recycling exponent\n mean : float\n mean of natural inflows\n stdev : float\n standard deviation of natural inflows\n delta : float\n future utility discount rate\n alpha : float\n utility from pollution\n nsamples : int, optional\n myears : int, optional\n c1 : float\n c2 : float\n r1 : float\n r2 : float\n w1 : float\n seed : int, optional\n seed for the random number generator\n\n Returns\n -------\n tuple\n\n \"\"\"\n np.random.seed(seed)\n Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5)\n\n X = np.zeros((myears,))\n average_daily_P = np.zeros((myears,))\n reliability = 0.0\n inertia = 0\n utility = 0\n\n for _ in range(nsamples):\n X[0] = 0.0\n decision = 0.1\n\n decisions = np.zeros(\n myears,\n )\n decisions[0] = decision\n\n natural_inflows = np.random.lognormal(\n math.log(mean**2 / math.sqrt(stdev**2 + mean**2)),\n math.sqrt(math.log(1.0 + stdev**2 / mean**2)),\n size=myears,\n )\n\n for t in range(1, myears):\n\n # here we use the decision rule\n decision = get_antropogenic_release(X[t - 1], c1, c2, r1, r2, w1)\n decisions[t] = decision\n\n X[t] = (\n (1 - b) * X[t - 1]\n + X[t - 1] ** q / (1 + X[t - 1] ** q)\n + decision\n + natural_inflows[t - 1]\n )\n average_daily_P[t] += X[t] / nsamples\n\n reliability += np.sum(X < Pcrit) / (nsamples * myears)\n inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears)\n utility += (\n np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples\n )\n max_P = np.max(average_daily_P)\n return max_P, utility, inertia, reliability",
"Connecting a python function to the workbench\nNow we are ready to connect this model to the workbench. We have to specify the uncertainties, the outcomes, and the policy levers. For the uncertainties and the levers, we can use real valued parameters, integer valued parameters, and categorical parameters. For outcomes, we can use either scalar, single valued outcomes or time series outcomes. For convenience, we can also explicitly control constants in case we want to have them set to a value different from their default value.",
"from ema_workbench import RealParameter, ScalarOutcome, Constant, Model\nfrom dps_lake_model import lake_model\n\nmodel = Model(\"lakeproblem\", function=lake_model)\n\n# specify uncertainties\nmodel.uncertainties = [\n RealParameter(\"b\", 0.1, 0.45),\n RealParameter(\"q\", 2.0, 4.5),\n RealParameter(\"mean\", 0.01, 0.05),\n RealParameter(\"stdev\", 0.001, 0.005),\n RealParameter(\"delta\", 0.93, 0.99),\n]\n\n# set levers\nmodel.levers = [\n RealParameter(\"c1\", -2, 2),\n RealParameter(\"c2\", -2, 2),\n RealParameter(\"r1\", 0, 2),\n RealParameter(\"r2\", 0, 2),\n RealParameter(\"w1\", 0, 1),\n]\n\n# specify outcomes\nmodel.outcomes = [\n ScalarOutcome(\"max_P\"),\n ScalarOutcome(\"utility\"),\n ScalarOutcome(\"inertia\"),\n ScalarOutcome(\"reliability\"),\n]\n\n# override some of the defaults of the model\nmodel.constants = [\n Constant(\"alpha\", 0.41),\n Constant(\"nsamples\", 150),\n Constant(\"myears\", 100),\n]",
"Performing experiments\nNow that we have specified the model with the workbench, we are ready to perform experiments on it. We can use evaluators to distribute these experiments either over multiple cores on a single machine, or over a cluster using ipyparallel. Using any parallelization is an advanced topic, in particular if you are on a windows machine. The code as presented here will run fine in parallel on a mac or Linux machine. If you are trying to run this in parallel using multiprocessing on a windows machine, from within a jupyter notebook, it won't work. The solution is to move the lake_model and get_antropogenic_release to a separate python module and import the lake model function into the notebook. \nAnother common practice when working with the exploratory modeling workbench is to turn on the logging functionality that it provides. This will report on the progress of the experiments, as well as provide more insight into what is happening in particular in case of errors. \nIf we want to perform experiments on the model we have just defined, we can use the perform_experiments method on the evaluator, or the stand alone perform_experiments function. We can perform experiments over the uncertainties and/or over the levers. Any given parameterization of the levers is known as a policy, while any given parametrization over the uncertainties is known as a scenario. Any policy is evaluated over each of the scenarios. So if we want to use 100 scenarios and 10 policies, this means that we will end up performing 100 * 10 = 1000 experiments. By default, the workbench uses Latin hypercube sampling for both sampling over levers and sampling over uncertainties. However, the workbench also offers support for full factorial, partial factorial, and Monte Carlo sampling, as well as wrappers for the various sampling schemes provided by SALib. \nThe ema_workbench offers support for parallelization of the execution of the experiments using either the multiprocessing or ipyparallel. There are various OS specific concerns you have to keep in mind when using either of these libraries. Please have a look at the documentation of these libraries, before using them.",
"from ema_workbench import MultiprocessingEvaluator, ema_logging, perform_experiments\n\nema_logging.log_to_stderr(ema_logging.INFO)\n\nwith MultiprocessingEvaluator(model) as evaluator:\n results = evaluator.perform_experiments(scenarios=1000, policies=10)",
"Processing the results of the experiments\nBy default, the return of perform_experiments is a tuple of length 2. The first item in the tuple is the experiments. The second item is the outcomes. Experiments and outcomes are aligned by index. The experiments are stored in a Pandas DataFrame, while the outcomes are a dict with the name of the outcome as key, and the values are in a numpy array.",
"experiments, outcomes = results\nprint(experiments.shape)\nprint(list(outcomes.keys()))",
"We can easily visualize these results. The workbench comes with various convenience plotting functions built on top of matplotlib and seaborn. We can however also easily use seaborn or matplotlib directly. For example, we can create a pairplot using seaborn where we group our outcomes by policy. For this, we need to create a dataframe with the outcomes and a policy column. By default the name of a policy is a string representation of the dict with lever names and values. We can replace this easily with a number as shown below.",
"policies = experiments[\"policy\"]\nfor i, policy in enumerate(np.unique(policies)):\n experiments.loc[policies == policy, \"policy\"] = str(i)\n\ndata = pd.DataFrame(outcomes)\ndata[\"policy\"] = policies",
"Next, all that is left is to use seaborn's pairplot function to visualize the data.",
"sns.pairplot(data, hue=\"policy\", vars=list(outcomes.keys()))\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
steinam/teacher
|
jup_notebooks/data-science-ipython-notebooks-master/python-data/unit_tests.ipynb
|
mit
|
[
"This notebook was prepared by Donne Martin. Source and license info is on GitHub.\nNose Unit Tests with IPython Notebook\nNose\nTesting is a vital part of software development. Nose extends unittest to make testing easier.\nInstall Nose\nRun the following command line:",
"!pip install nose",
"Create the Code\nSave your code to a file with the %%file magic:",
"%%file type_util.py\nclass TypeUtil:\n\n @classmethod\n def is_iterable(cls, obj):\n \"\"\"Determines if obj is iterable.\n\n Useful when writing functions that can accept multiple types of\n input (list, tuple, ndarray, iterator). Pairs well with\n convert_to_list.\n \"\"\"\n try:\n iter(obj)\n return True\n except TypeError:\n return False\n\n @classmethod\n def convert_to_list(cls, obj):\n \"\"\"Converts obj to a list if it is not a list and it is iterable, \n else returns the original obj.\n \"\"\"\n if not isinstance(obj, list) and cls.is_iterable(obj):\n obj = list(obj)\n return obj\n",
"Create the Nose Tests\nSave your test to a file with the %%file magic:",
"%%file tests/test_type_util.py\nfrom nose.tools import assert_equal\nfrom ..type_util import TypeUtil\n\n\nclass TestUtil():\n\n def test_is_iterable(self):\n assert_equal(TypeUtil.is_iterable('foo'), True)\n assert_equal(TypeUtil.is_iterable(7), False)\n\n def test_convert_to_list(self):\n assert_equal(isinstance(TypeUtil.convert_to_list('foo'), list), True)\n assert_equal(isinstance(TypeUtil.convert_to_list(7), list), False)",
"Run the Nose Tests\nRun the following command line:",
"!nosetests tests/test_type_util.py -v"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kit-cel/wt
|
nt1/uebung/AWGN_Demo.ipynb
|
gpl-2.0
|
[
"Demonstration: Low-pass Filtered Additive White Gaussian Noise (AWGN)",
"# import necessary libraries\nimport numpy as np # basic vector / matrix tools, numerical math\nfrom matplotlib import pyplot as plt # Plotting\nimport seaborn # prettier plots\nimport math # General math functions\nfrom scipy import signal, stats # Signal analysis, filter design; statistic/stochastics\n\n# Show all plots inline (not in new window), make text large, and fix the figure size\n%matplotlib inline\nseaborn.set(font_scale=2)\nplt.rc(\"figure\", figsize = (1200/72, 400/72), dpi=72)",
"Generate noisy samples\nDraw real and imaginary part of each sample from independent normal distributions:",
"# Time reference: \nT_S = 1e-6 # seconds\nf_S = 1 / T_S # sampling frequency\nf_nyquist = f_S / 2 # nyquist frequency\n\nprint(\"\"\"\nSampling rate: {rate} kHz\nSample period: {period} µs\nNyquist frequency: {nyquist} kHz\"\"\".format(rate=f_S/1000, period=T_S*1e6, nyquist=f_nyquist/1000))\n\nN_0 = 1; # W/Hz\nvariance = N_0*f_S/2 # variance of each component (I, Q)\nsigma = math.sqrt(variance) # standard deviation of each component\nnum_samples = 10000 # number of samples (= length of vector)\n\ncomplex_noisy_samples = np.random.normal(0, sigma, num_samples)\\\n + 1j * np.random.normal(0, sigma, num_samples)",
"Noise samples in complex plane",
"plt.axis('square');\nplt.scatter(complex_noisy_samples.real, complex_noisy_samples.imag, s=6, alpha=0.3)\nplt.xlim(-3000, 3000); plt.ylim(-3000, 3000)\nplt.xlabel('In-phase component'); plt.ylabel('Quadrature component')\nplt.tight_layout()",
"Can you see the Gaussian bell?\nWhere's the Gaussian?",
"seaborn.jointplot(complex_noisy_samples.real, complex_noisy_samples.imag, kind=\"reg\", size=6, joint_kws={\"scatter_kws\": {\"s\":6, \"alpha\":0.3}})\nplt.xlim(-3000, 3000); plt.ylim(-3000, 3000)\nplt.xlabel('In-phase component'); plt.ylabel('Quadrature component')",
"Visualize Noise",
"t = np.arange(num_samples) * T_S # vector of sampling time instances\nplt.plot(t*1e3, complex_noisy_samples.real, t*1e3, complex_noisy_samples.imag, alpha=0.7)\nplt.title(\"Time Domain of I an Q components\")\nplt.xlabel('Time / ms'); plt.ylabel('Amplitude'); plt.legend(('inphase', 'quadrature'));",
"Autocorrelation function",
"plt.subplot(121)\nplt.acorr(complex_noisy_samples.real, usevlines=True, maxlags=50)\nplt.ylabel('$\\phi_{\\Re\\Re}$'); plt.xlabel('lag / Samples'); plt.axis('tight')\n\nplt.subplot(122)\nplt.acorr(complex_noisy_samples.imag, usevlines=True, maxlags=50)\nplt.ylabel('$\\phi_{\\Im\\Im}$'); plt.xlabel('lag / Samples'); plt.axis('tight');",
"Histogram & PDF of normal distribution",
"# Plot normalized histogram\nplt.hist(complex_noisy_samples.real, bins=40, normed=True, alpha=0.5);\nplt.xlabel('Amplitude'); plt.ylabel('Probability')\n\n# Plot normal distribution\nx = np.linspace(-3000, 3000, 100)\n_ = plt.plot(x,stats.norm.pdf(x,0,sigma))",
"Amplitudes are normally distributed. Try to play with number of samples and number of bins.\nYou may also want to take a look at histogram of quadrature component. What do you expect?\nPower spectral density using Welch method",
"freqs, Pxx = signal.welch(complex_noisy_samples,\n fs=f_S, nfft=1024, noverlap=0,\n window=\"hanning\", scaling=\"density\",\n return_onesided=False)\nfreqs = np.fft.fftshift(freqs); Pxx = np.fft.fftshift(Pxx)\n# Plot PSD, use logarithmic scale:\nplt.plot(freqs / 1000, 10*np.log10(np.abs(Pxx)))\nplt.ylim(-70, 10)\nplt.ylabel('$\\Phi_{XX}(f)$ [dB]'); plt.xlabel('$f$ / kHz');",
"Not quite a constant, why not?\nLow-Pass filtered Gaussian noise",
"cutoff_freq = 1e5 # cutoff frequency of lowpass filter: 100 kHz\nnumtaps = 51 # number of filter taps \n\n# FIR filter design:\nlpass_taps = signal.firwin(numtaps, cutoff_freq, nyq=f_nyquist) # Get filter taps\nfreq_norm, response = signal.freqz(lpass_taps) # filter response in frequency domain\nfreq = freq_norm * f_nyquist / np.pi\n\n# Plot frequency response:\nplt.plot(freq / 1e3, 10*np.log10(np.abs(response))) \nplt.title('Frequency response of lowpass filter'); plt.ylabel('$H(f)$ [dB]'); plt.xlabel('$f$ / kHz');",
"Filter noisy samples\nActually generate low-pass filtered Gaussian noise.",
"# Filter noise with lowpass:\nfiltered_x = signal.lfilter(lpass_taps, 1.0, complex_noisy_samples)\n# Calculate PSD:\nfreqs, Pxx = signal.welch(filtered_x,\n nfft=1024, fs=f_S, window=\"hanning\", noverlap=0, scaling=\"density\", return_onesided=False)\nplt.plot(np.fft.fftshift(freqs),\n 10*np.log10(np.abs(np.fft.fftshift(Pxx))))\n# Plot PSD, use logarithmic scale:\nplt.title('PSD of low-pass filtered Gaussian noise');\nplt.axis('tight'); plt.ylim(-70, 10); plt.ylabel('$P_{XX}(f)$'); plt.xlabel('$f$ / kHz');",
"Autocorrelation function",
"plt.acorr(filtered_x.real, usevlines=False, maxlags=50, marker=None, linestyle='-')\nplt.acorr(filtered_x.imag, usevlines=False, maxlags=50, marker=None, linestyle='-')\nplt.xlabel('lag / Samples')\nplt.legend(('inphase', 'quadrature'));",
"If you compare this with the autocorrelation function of the unfiltered noise, can you explain what happened?\nDownsampling",
"# Take every 5th element of filtered signal\nfactor = 5; filt_x_dwnsampled = filtered_x[::factor]\nplt.acorr(filt_x_dwnsampled.real, usevlines=False, maxlags=50, marker=None, linestyle='-')\nplt.acorr(filt_x_dwnsampled.imag, usevlines=False, maxlags=50, marker=None, linestyle='-')\nplt.title('Autocorrelation function of downsampled signal')\nplt.xlabel('lag / Samples'); plt.axis('tight'); plt.legend(('inphase', 'quadrature'));",
"What happened? Why did we take every 5th sample? \nHint: take a look at the cutoff frequency of the filter and at the nyquist frequency.\nPSD after downsampling",
"freqs, Pxx = signal.welch(filt_x_dwnsampled,\n fs=f_S/factor,nfft=1024, window=\"hanning\", noverlap=0, scaling=\"density\", return_onesided=False)\n# Plot PSD, use logarithmic scale:\nplt.plot(np.fft.fftshift(freqs),\n 10*np.log10(np.abs(np.fft.fftshift(Pxx))))\nplt.axis('tight'); plt.ylim(-70, 10)\nplt.ylabel('$P_{XX}$'); plt.xlabel('$f$ / kHz');",
"Author: Johannes Fink, April 2016. <br/>\nUpdates: Marcus Müller mueller@kit.edu, May 2018."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
wasat/JupyTEPIDE
|
notebooks/grass/bash/import.ipynb
|
apache-2.0
|
[
"Import and export of data from different sources in GRASS GIS\nGRASS GIS Location can contain data only in one coordinate reference system (CRS)\nin order to have full control over reprojection\nand avoid issues coming from on-the-fly reprojection.\nWhen starting a project, decide which CRS you will use. Create\na new Location using Location Wizard (accessible from GRASS GIS start-up page).\nSpecify desired CRS either by providing\nEPSG code (can be found e.g. at epsg.io)\nor by providing a georeferenced file (such as Shapefile) which has\nthe CRS you want.\nImporting data in common vector and raster formats\nFor basic import of raster and vector files, use r.import\nand v.import, respectively.\nThese modules will reproject the input data if necessary.\nIf the input data's CRS matches the Location's CRS, we can use\nr.in.gdal or v.in.ogr\nfor importing raster and vector.\nAlternatively, you can use a two-step approach\nfor the cases when the data's CRS doesn't match the Location's CRS.\nFirst create a new temporary Location\nbased on the CRS of the data you want to import, switch to this Location\nand then use r.in.gdal or v.in.ogr\nto import raster and vector data, respectively. Then switch to the Location\nof your project and use\nr.proj and v.proj\nto reproject data from the temporary Location to your project Location.\nThis approach is necessary for formats which are not supported by\nr.import and v.import modules.\nModules r.proj and v.proj\ncan be also used for bringing raster and vector maps from one Location to another.\nModules r.in.gdal and v.in.ogr\ncheck whether the CRS of the imported data matches the Location's CRS.\nSometimes the CRS of imported data is not specified correctly\nor is missing and therefore import fails.\nIf you know that the actual CRS matches the Location's CRS,\nit is appropriate to use r.in.gdal's\nor v.in.ogr's -o flag to overwrite the projection\ncheck and import the data as they are.\nIf you zoom to raster or vector in GRASS GUI and it does not fit with\nthe rest of the data, it means that it was imported with wrong projection\ninformation (or with the -o flag when the coordinates in fact don't match).\nYou can use r.info and v.info to get the information\nabout the extents of (already imported) rasters and vectors.\nImporting CSV and other ASCII data\nThere are many formats of plain text files. In the context of GIS we usually\ntalk about ASCII formats and CSV files. CSV files usually hold only\ncoordinates and sometimes attributes of points.\nThese files usually don't have CRS information attached to them,\nso we must be very careful and import the data only if the coordinates\nare in the CRS of the Location we are using.\nLet's create a CSV file called points.txt\nusing a text editor (Notepad++, TextEdit, MS Notepad), for example:\n637803.6,223804.7\n641835.5,223761.2\n643056.0,217419.0\nThe coordinates we entered are in EPSG:3358 and we assume that the\nGRASS Location is using this CRS as well.\nThis file can be imported to GRASS GIS using:",
"!v.in.ascii input=points.txt output=test_ascii separator=comma x=1 y=2",
"Notice, we have to specify the column number where the X and Y (optionally Z)\ncoordinates are stored. In this example, X coordinates are in the first column\nY in the second one. Don't forget to specify correct column delimiter.\nIf the data are not in the CRS we are using, create a new Location\nwith matching CRS,\nimport the data and use v.proj as described above.\nImporting lidar point clouds\nLidar point clouds can be imported in two ways: as raster maps using binning\nor as vector points. However, one must explore the dataset first.\nIn command line, we can check the projection information and other metadata\nabout a LAS file using lasinfo tool:",
"!lasinfo tile_0793_016_spm.las",
"r.in.lidar module can be used to scan the spatial extent\nof the dataset:",
"!r.in.lidar input=tile_0793_016_spm.las -s",
"Binning\nBefore creating the actual raster with elevation, we need to decide the extent\nand the resolution we will use for the binning. We can use\nr.in.lidar module for that by setting the resolution\ndirectly and using a -e flag to use dataset extent instead of taking it from\nthe computational region.\nWe are interested in the density of points, so we use method=n:",
"!r.in.lidar input=tile_0793_016_spm.las output=tile_0793_016_n method=n -e resolution=2",
"After determining the optimal resolution for binning and the desired area,\nwe can use g.region to set the computational region.\nr.in.lidar without the additional parameters above\nwill create a raster map from points using binning with resolution and extent\ntaken from the computational region:",
"!r.in.lidar input=tile_0793_016_spm.las output=tile_0793_016",
"Interpolation\nWhen the result of binning contains a lot of NULL cells or when it is not\nsmooth enough for further analysis, we can import the point cloud as vector\npoints and interpolate a raster.\nSupposing that we already determined the desired extent and resolution\n(using r.in.lidar as described above) we can use\nv.in.lidar lidar for import (and using class filter\nto get only ground points):",
"!v.in.lidar input=tile_0793_016_spm.las output=tile_0793_016 class=2 -r -t -b",
"This import only the points of class 2 (ground)\nin the current computational region\nwithout the attribute table and building the topology.\nThen we follow with interpolation using,\ne.g. v.surf.rst module:",
"!v.surf.rst input=tile_0793_016 elevation=tile_0793_016_elevation slope=tile_0793_016_slope aspect=tile_0793_016_aspect npmin=100 tension=20 smooth=1",
"Importing data in different CRS\nIn case the CRS of the file doesn't match the CRS\nused in the GRASS Location, reprojection can be done before importing\nusing las2las tool.\nThe following example command is for reprojecting tiles\nin NAD83/North Carolina in feet (EPSG:2264)\ninto NAD83/North Carolina in meters (EPSG:3358):",
"!las2las --a_srs=EPSG:2264 --t_srs=EPSG:3358 -i input_spf.las -o output_spm.las",
"Importing data with broken projection information\nModules r.in.lidar and v.in.lidar\ncheck whether the CRS of the imported data matches the Location's CRS.\nSometimes the CRS of imported data is not specified correctly\nor is missing and therefore import fails.\nIf you know that the actual CRS matches the Location's CRS,\nit is appropriate to use r.in.lidar's\nor v.in.lidar's -o flag to overwrite the projection\ncheck and import the data as they are.",
"!r.in.lidar input=tile_0793_016_spm.las -s -o",
"Transferring GRASS GIS data between two computers\nIf two GRASS GIS users want to exchange data, they can use GRASS GIS native\nexchange format -- packed map. A vector or raster map can be\nexported from a GRASS Location in this format using\nv.pack or r.pack respectively.\nThis format preserves everything for a map in a way as it is stored in\na GRASS Database. Projection of the source and target GRASS Locations\nmust be the same.\nIf GRASS GIS users wish to exchange GRASS Mapsets, they can do so as long as\nthe source and target GRASS Locations have the same projection.\nThe PERMANENT Mapset should not be usually exchanged as it is a crucial part\nof the given Location.\nLocations can be easily transferred in between GRASS Database directories\non different computers as they carry all data and projection information\nwithin them and the storage format used in the background is platform independent.\nLocations as well as whole GRASS Databases can be copied and moved\nin the same way as any other directories on the computer.\nFurther resources\n\n\nGRASS GIS manual\n\n\nAbout GRASS GIS Database structure\n\n\nGRASS GIS for ArcGIS users\n\n\nepsg.io (Repository of EPSG codes)",
"# end the GRASS session\nos.remove(rcfile)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jdsanch1/SimRC
|
02. Parte 2/13. Clase 13/.ipynb_checkpoints/06Class NB-checkpoint.ipynb
|
mit
|
[
"Clase 6: Generación de números aleatorios y simulación Montecarlo\nJuan Diego Sánchez Torres, \nProfesor, MAF ITESO\n\nDepartamento de Matemáticas y Física\ndsanchez@iteso.mx\nTel. 3669-34-34 Ext. 3069\nOficina: Cubículo 4, Edificio J, 2do piso\n\n1. Motivación\nPresentar los métodos básicos para la generación de números aleatorios uniformes y normales.",
"import numpy as np\nimport seaborn as sns\nimport scipy.stats as stats\n%matplotlib inline",
"2. Generador congruencial lineal",
"def lcg(n, m=2**31-1, a=16807, c=0, seed=2**30):\n x = np.zeros(n+1)\n x[0]=seed\n for i in range(1,n+1):\n x[i] = (a * x[i-1]+c)%m\n return x[1:]/m",
"Ejemplo",
"lcg(10, m=31, a=13, c=0, seed=3)",
"Generador mínimo estándar",
"x=lcg(10000)\nsns.distplot(x, color=\"b\", fit=stats.uniform);",
"Generado Randu (Usado por IBM)",
"x=lcg(10000, m=2**31, a=2**16+3, c=0, seed=3)\nsns.distplot(x, color=\"b\", fit=stats.uniform);",
"3. Método de Box-Muller",
"def bm(n):\n m=2**31-1\n a=16807\n c=0\n seed=2**30\n x = np.zeros(n+1)\n x[0]=seed\n for i in range(1,n+1):\n x[i] = (a * x[i-1]+c)%m\n u=x[1:]/m\n u1=u[:int((n/2))]\n u2=u[int(n/2):]\n nn=np.concatenate((np.sqrt(-2*np.log(1-u1))*np.cos(2*np.pi*u2), np.sqrt(-2*np.log(1-u1))*np.sin(2*np.pi*u2)),axis=0)\n return nn\n\ny=bm(100000)\nsns.distplot(y, color=\"b\", fit=stats.norm);"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
azhurb/deep-learning
|
reinforcement/Q-learning-cart.ipynb
|
mit
|
[
"Deep Q-learning\nIn this notebook, we'll build a neural network that can learn to play games through reinforcement learning. More specifically, we'll use Q-learning to train an agent to play a game called Cart-Pole. In this game, a freely swinging pole is attached to a cart. The cart can move to the left and right, and the goal is to keep the pole upright as long as possible.\n\nWe can simulate this game using OpenAI Gym. First, let's check out how OpenAI Gym works. Then, we'll get into training an agent to play the Cart-Pole game.",
"import gym\nimport tensorflow as tf\nimport numpy as np",
"Note: Make sure you have OpenAI Gym cloned into the same directory with this notebook. I've included gym as a submodule, so you can run git submodule --init --recursive to pull the contents into the gym repo.",
"# Create the Cart-Pole game environment\nenv = gym.make('CartPole-v0')",
"We interact with the simulation through env. To show the simulation running, you can use env.render() to render one frame. Passing in an action as an integer to env.step will generate the next step in the simulation. You can see how many actions are possible from env.action_space and to get a random action you can use env.action_space.sample(). This is general to all Gym games. In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1.\nRun the code below to watch the simulation run.",
"env.reset()\nrewards = []\nfor _ in range(100):\n env.render()\n state, reward, done, info = env.step(env.action_space.sample()) # take a random action\n rewards.append(reward)\n if done:\n rewards = []\n env.reset()",
"To shut the window showing the simulation, use env.close().",
"env.close()",
"If you ran the simulation above, we can look at the rewards:",
"print(rewards[-20:])",
"The game resets after the pole has fallen past a certain angle. For each frame while the simulation is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right.\nQ-Network\nWe train our Q-learning agent using the Bellman Equation:\n$$\nQ(s, a) = r + \\gamma \\max{Q(s', a')}\n$$\nwhere $s$ is a state, $a$ is an action, and $s'$ is the next state from state $s$ and action $a$.\nBefore we used this equation to learn values for a Q-table. However, for this game there are a huge number of states available. The state has four values: the position and velocity of the cart, and the position and velocity of the pole. These are all real-valued numbers, so ignoring floating point precisions, you practically have infinite states. Instead of using a table then, we'll replace it with a neural network that will approximate the Q-table lookup function.\n<img src=\"assets/deep-q-learning.png\" width=450px>\nNow, our Q value, $Q(s, a)$ is calculated by passing in a state to the network. The output will be Q-values for each available action, with fully connected hidden layers.\n<img src=\"assets/q-network.png\" width=550px>\nAs I showed before, we can define our targets for training as $\\hat{Q}(s,a) = r + \\gamma \\max{Q(s', a')}$. Then we update the weights by minimizing $(\\hat{Q}(s,a) - Q(s,a))^2$. \nFor this Cart-Pole game, we have four inputs, one for each value in the state, and two outputs, one for each action. To get $\\hat{Q}$, we'll first choose an action, then simulate the game using that action. This will get us the next state, $s'$, and the reward. With that, we can calculate $\\hat{Q}$ then pass it back into the $Q$ network to run the optimizer and update the weights.\nBelow is my implementation of the Q-network. I used two fully connected layers with ReLU activations. Two seems to be good enough, three might be better. Feel free to try it out.",
"class QNetwork:\n def __init__(self, learning_rate=0.01, state_size=4, \n action_size=2, hidden_size=10, \n name='QNetwork'):\n # state inputs to the Q-network\n with tf.variable_scope(name):\n self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs')\n \n # One hot encode the actions to later choose the Q-value for the action\n self.actions_ = tf.placeholder(tf.int32, [None], name='actions')\n one_hot_actions = tf.one_hot(self.actions_, action_size)\n \n # Target Q values for training\n self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')\n \n # ReLU hidden layers\n self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size)\n self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size)\n\n # Linear output layer\n self.output = tf.contrib.layers.fully_connected(self.fc2, action_size, \n activation_fn=None)\n \n ### Train with loss (targetQ - Q)^2\n # output has length 2, for two actions. This next line chooses\n # one value from output (per row) according to the one-hot encoded actions.\n self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)\n \n self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))\n self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)",
"Experience replay\nReinforcement learning algorithms can have stability issues due to correlations between states. To reduce correlations when training, we can store the agent's experiences and later draw a random mini-batch of those experiences to train on. \nHere, we'll create a Memory object that will store our experiences, our transitions $<s, a, r, s'>$. This memory will have a maxmium capacity, so we can keep newer experiences in memory while getting rid of older experiences. Then, we'll sample a random mini-batch of transitions $<s, a, r, s'>$ and train on those.\nBelow, I've implemented a Memory object. If you're unfamiliar with deque, this is a double-ended queue. You can think of it like a tube open on both sides. You can put objects in either side of the tube. But if it's full, adding anything more will push an object out the other side. This is a great data structure to use for the memory buffer.",
"from collections import deque\nclass Memory():\n def __init__(self, max_size = 1000):\n self.buffer = deque(maxlen=max_size)\n \n def add(self, experience):\n self.buffer.append(experience)\n \n def sample(self, batch_size):\n idx = np.random.choice(np.arange(len(self.buffer)), \n size=batch_size, \n replace=False)\n return [self.buffer[ii] for ii in idx]",
"Exploration - Exploitation\nTo learn about the environment and rules of the game, the agent needs to explore by taking random actions. We'll do this by choosing a random action with some probability $\\epsilon$ (epsilon). That is, with some probability $\\epsilon$ the agent will make a random action and with probability $1 - \\epsilon$, the agent will choose an action from $Q(s,a)$. This is called an $\\epsilon$-greedy policy.\nAt first, the agent needs to do a lot of exploring. Later when it has learned more, the agent can favor choosing actions based on what it has learned. This is called exploitation. We'll set it up so the agent is more likely to explore early in training, then more likely to exploit later in training.\nQ-Learning training algorithm\nPutting all this together, we can list out the algorithm we'll use to train the network. We'll train the network in episodes. One episode is one simulation of the game. For this game, the goal is to keep the pole upright for 195 frames. So we can start a new episode once meeting that goal. The game ends if the pole tilts over too far, or if the cart moves too far the left or right. When a game ends, we'll start a new episode. Now, to train the agent:\n\nInitialize the memory $D$\nInitialize the action-value network $Q$ with random weights\nFor episode = 1, $M$ do\nFor $t$, $T$ do\nWith probability $\\epsilon$ select a random action $a_t$, otherwise select $a_t = \\mathrm{argmax}_a Q(s,a)$\nExecute action $a_t$ in simulator and observe reward $r_{t+1}$ and new state $s_{t+1}$\nStore transition $<s_t, a_t, r_{t+1}, s_{t+1}>$ in memory $D$\nSample random mini-batch from $D$: $<s_j, a_j, r_j, s'_j>$\nSet $\\hat{Q}j = r_j$ if the episode ends at $j+1$, otherwise set $\\hat{Q}_j = r_j + \\gamma \\max{a'}{Q(s'_j, a')}$\nMake a gradient descent step with loss $(\\hat{Q}_j - Q(s_j, a_j))^2$\n\n\nendfor\nendfor\n\nHyperparameters\nOne of the more difficult aspects of reinforcememt learning are the large number of hyperparameters. Not only are we tuning the network, but we're tuning the simulation.",
"train_episodes = 1000 # max number of episodes to learn from\nmax_steps = 200 # max steps in an episode\ngamma = 0.99 # future reward discount\n\n# Exploration parameters\nexplore_start = 1.0 # exploration probability at start\nexplore_stop = 0.01 # minimum exploration probability \ndecay_rate = 0.0001 # exponential decay rate for exploration prob\n\n# Network parameters\nhidden_size = 64 # number of units in each Q-network hidden layer\nlearning_rate = 0.0001 # Q-network learning rate\n\n# Memory parameters\nmemory_size = 10000 # memory capacity\nbatch_size = 20 # experience mini-batch size\npretrain_length = batch_size # number experiences to pretrain the memory\n\ntf.reset_default_graph()\nmainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate)",
"Populate the experience memory\nHere I'm re-initializing the simulation and pre-populating the memory. The agent is taking random actions and storing the transitions in memory. This will help the agent with exploring the game.",
"# Initialize the simulation\nenv.reset()\n# Take one random step to get the pole and cart moving\nstate, reward, done, _ = env.step(env.action_space.sample())\n\nmemory = Memory(max_size=memory_size)\n\n# Make a bunch of random actions and store the experiences\nfor ii in range(pretrain_length):\n # Uncomment the line below to watch the simulation\n # env.render()\n\n # Make a random action\n action = env.action_space.sample()\n next_state, reward, done, _ = env.step(action)\n\n if done:\n # The simulation fails so no next state\n next_state = np.zeros(state.shape)\n # Add experience to memory\n memory.add((state, action, reward, next_state))\n \n # Start new episode\n env.reset()\n # Take one random step to get the pole and cart moving\n state, reward, done, _ = env.step(env.action_space.sample())\n else:\n # Add experience to memory\n memory.add((state, action, reward, next_state))\n state = next_state",
"Training\nBelow we'll train our agent. If you want to watch it train, uncomment the env.render() line. This is slow because it's rendering the frames slower than the network can train. But, it's cool to watch the agent get better at the game.",
"# Now train with experiences\nsaver = tf.train.Saver()\nrewards_list = []\nwith tf.Session() as sess:\n # Initialize variables\n sess.run(tf.global_variables_initializer())\n \n step = 0\n for ep in range(1, train_episodes):\n total_reward = 0\n t = 0\n while t < max_steps:\n step += 1\n # Uncomment this next line to watch the training\n # env.render() \n \n # Explore or Exploit\n explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step) \n if explore_p > np.random.rand():\n # Make a random action\n action = env.action_space.sample()\n else:\n # Get action from Q-network\n feed = {mainQN.inputs_: state.reshape((1, *state.shape))}\n Qs = sess.run(mainQN.output, feed_dict=feed)\n action = np.argmax(Qs)\n \n # Take action, get new state and reward\n next_state, reward, done, _ = env.step(action)\n \n total_reward += reward\n \n if done:\n # the episode ends so no next state\n next_state = np.zeros(state.shape)\n t = max_steps\n \n print('Episode: {}'.format(ep),\n 'Total reward: {}'.format(total_reward),\n 'Training loss: {:.4f}'.format(loss),\n 'Explore P: {:.4f}'.format(explore_p))\n rewards_list.append((ep, total_reward))\n \n # Add experience to memory\n memory.add((state, action, reward, next_state))\n \n # Start new episode\n env.reset()\n # Take one random step to get the pole and cart moving\n state, reward, done, _ = env.step(env.action_space.sample())\n\n else:\n # Add experience to memory\n memory.add((state, action, reward, next_state))\n state = next_state\n t += 1\n \n # Sample mini-batch from memory\n batch = memory.sample(batch_size)\n states = np.array([each[0] for each in batch])\n actions = np.array([each[1] for each in batch])\n rewards = np.array([each[2] for each in batch])\n next_states = np.array([each[3] for each in batch])\n \n # Train network\n target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states})\n \n # Set target_Qs to 0 for states where episode ends\n episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1)\n target_Qs[episode_ends] = (0, 0)\n \n targets = rewards + gamma * np.max(target_Qs, axis=1)\n\n loss, _ = sess.run([mainQN.loss, mainQN.opt],\n feed_dict={mainQN.inputs_: states,\n mainQN.targetQs_: targets,\n mainQN.actions_: actions})\n \n saver.save(sess, \"checkpoints/cartpole.ckpt\")\n",
"Visualizing training\nBelow I'll plot the total rewards for each episode. I'm plotting the rolling average too, in blue.",
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / N \n\neps, rews = np.array(rewards_list).T\nsmoothed_rews = running_mean(rews, 10)\nplt.plot(eps[-len(smoothed_rews):], smoothed_rews)\nplt.plot(eps, rews, color='grey', alpha=0.3)\nplt.xlabel('Episode')\nplt.ylabel('Total Reward')",
"Testing\nLet's checkout how our trained agent plays the game.",
"test_episodes = 10\ntest_max_steps = 400\nenv.reset()\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n \n for ep in range(1, test_episodes):\n t = 0\n while t < test_max_steps:\n env.render() \n \n # Get action from Q-network\n feed = {mainQN.inputs_: state.reshape((1, *state.shape))}\n Qs = sess.run(mainQN.output, feed_dict=feed)\n action = np.argmax(Qs)\n \n # Take action, get new state and reward\n next_state, reward, done, _ = env.step(action)\n \n if done:\n t = test_max_steps\n env.reset()\n # Take one random step to get the pole and cart moving\n state, reward, done, _ = env.step(env.action_space.sample())\n\n else:\n state = next_state\n t += 1\n\nenv.close()",
"Extending this\nSo, Cart-Pole is a pretty simple game. However, the same model can be used to train an agent to play something much more complicated like Pong or Space Invaders. Instead of a state like we're using here though, you'd want to use convolutional layers to get the state from the screen images.\n\nI'll leave it as a challenge for you to use deep Q-learning to train an agent to play Atari games. Here's the original paper which will get you started: http://www.davidqiu.com:8888/research/nature14236.pdf."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
trsherborne/learn-python
|
lesson5.ipynb
|
mit
|
[
"LSESU Applicable Maths Python Lesson 5\n22/11/16\nToday we will be learning about\n* Introductory Object Oriented Programming (OOP)\n * What is a class?\n * Creating a class and initialising the class\n * Methods of a class\n * Magic Methods\n * Using a class in a program\n* A peek at Inheritance\n Recap from Week 4\n\nLists\nTuples\n\ndef my_swap_function(a,b):\n return (b,a)\n\nDictionaries\n\n```\nfor country in europe:\n if country['speak_german']:\n for key, value in country.items():\n print('{}\\t{}'.format(key,value))\n print()\n```\nOOP in Python - \"Everything is an object\"\nObject Oriented structure is one of the most popular way people who write software will develop a program. You might have also heard of Functional or Dynamic programming which have their own benefits but we will only focus on OOP. Before we start writing any code, we have to go through some definitions.",
"# We've interacted with objects and classes before when we used the type() function\n\nmy_list = [1,2,3,4,5]\ntype(my_list)\n\n# We looked at class methods before when using the .items() method of a dictionary\n\nmy_dict = {'name':'Hilbert Space','age':154,'favourite_subject':'Maths'}\n\nprint(my_dict.items())\nprint()\n\nfor key,value in my_dict.items():\n print('{}:\\t{}'.format(key,value))",
"Creating a class\nDefining your own classes allows you to create a completely customisable data type for your own needs. This will be guided by the problem you are solving, you might need to create a class Cell() for a computational biology task, when you are modelling lots of kinds of cells. Or you might create a class Location() for a location extraction task, which stores the address, latitude and longtitude and many other features of a location (I've done something very similar to this working with production Python!).\nThe formal definition we just looked at:\n * Class: A user-defined prototype for an object that defines a set of attributes that characterize any object of the class. The attributes are data members (class variables and instance variables) and methods, accessed via dot notation.",
"# Let me create a class Dog\n\n# This is the how you declare a class\nclass Dog(object):\n # You can then declare Data members of the class Dog()\n name = ''\n age = ''\n breed = ''\n animal = 'Dog'\n# Creating object of class Dog is now as easy as creating any of the standard variables\nmy_dog = Dog()\nmy_dog.name = 'Lucky'\nmy_dog.age = '3'\nmy_dog.breed = 'Golden Retriever'\n\n# This is not very useful at all, look at what happens when I print this...\nprint(my_dog)",
"Initialising a class with a constructor\nWe can initialise our class in a smarter way using a constructor, which means we can initialise the Dog() object on one line as below. \nThe self keyword you see when making a class is the way that we refer to the specific instance of the class we want inside the class itself.",
"class Dog(object):\n # You can then declare Data members of the class Dog()\n animal = 'Dog'\n \n # This is similar to all the other functions you have written but the args start with self\n def __init__(self,name,age,breed):\n self.name = name\n self.age = age\n self.breed = breed\n\n# We now create 2 instances of the Dog class which will have unique names, ages and breeds\nmy_dog = Dog('Lucky',3,'Golden Retriever')\nmy_other_dog = Dog('Hamilton',2,'Pembroke Corgi')\n\nprint(my_dog.name)\nprint(my_other_dog.breed)\n\n# However the animal attribute is shared among ALL instances of the dog class\nprint(my_dog.animal)\nprint(my_other_dog.animal)\n\n# We can also make the animal variable \"pseudo private\" so it is not easily accessible. \n# You won't ever really have to do this but it's a demonstration of how to get it done\n\nclass Dog(object):\n # The __ before the variable means someone cannot type my_dog.animal to get to it\n __animal = 'Dog'\n \n def __init__(self,name,age,breed):\n self.name = name\n self.age = age\n self.breed = breed\n \nmy_dog = Dog('Lucky',3,'Golden Retriever')\n\n# This isn't going to work as the variable name is \"mangled\"\nprint(my_dog.__animal)\n\n# You need a _ClassName before to get to the variable\nprint(my_dog._Dog__animal)",
"Creating methods of a class\nFirst things first. When we tried to print the Dog() class we didn't get something that was\nentirely useful. This is because when we want to print our own classes, we need to tell Python\nhow to print it, and we do that using the __str__ method. \nOne note: the __str__ method and the __repr__ method are very similar. Keep in mind that __repr__ is meant to be unambiguous and the __str__ method is just meant to be readable",
"class Dog(object):\n __animal = 'Dog'\n \n def __init__(self,name,age,breed):\n self.name = name\n self.age = age\n self.breed = breed\n \n def __str__(self):\n return 'Hi my name is {} and I am a {} year old {}'.format(self.name,self.age,self.breed)\n \nmy_other_dog = Dog('Hamilton',2,'Pembroke Corgi')\nprint(my_other_dog)",
"Any method between the __example__ double underscores is a special method called a magic method which we will look at further below. We can also define other methods as function attributes of the class which can take arguments like an isolated function",
"class Dog(object):\n __animal = 'Dog'\n \n def __init__(self,name,age,breed,favourite_toy):\n self.name = name\n self.age = age\n self.breed = breed\n self.favourite_toy = favourite_toy\n \n def __str__(self):\n return 'Hi my name is {} and I am a {} year old {}'.format(self.name,self.age,self.breed)\n \n def age_in_dog_years(self):\n return 7*self.age\n \n def is_favourite_toy(self,toy):\n if toy==self.favourite_toy:\n return 'That is my favourite toy!'\n else:\n return 'That is not my favourite toy :('\n \nmy_other_dog = Dog('Hamilton',2,'Pembroke Corgi','Tennis Ball')\nprint(my_other_dog)\nprint()\n\n# We can access the method using dot notation\nprint(my_other_dog.age_in_dog_years())\nprint()\n\n# Methods of classes can also take arguments\nprint(my_other_dog.is_favourite_toy('Stuffed Bear'))\nprint()\nprint(my_other_dog.is_favourite_toy('Tennis Ball'))",
"Magic Methods\nMagic methods (like __str__) are what allows your own classes to behave similarly to the Python built in types. Instinctively it is hard to image how to do dog_a > dog_b, but if we use magic methods then this behaviour is definable.\nA detailed list of the definitions of the different kind of Magic Methods can be found here",
"class Dog(object):\n __animal = 'Dog'\n \n def __init__(self,name,age,breed,favourite_toy):\n self.name = name\n self.age = age\n self.breed = breed\n self.favourite_toy = favourite_toy\n \n # THESE ARE ALL MAGIC METHODS\n def __str__(self):\n return 'Hi my name is {} and I am a {} year old {}'.format(self.name,self.age,self.breed)\n \n # I've chosen to compare on a Dog's age but this is up to the designer of the class.\n def __lt__(self,other):\n return self.age < other.age\n \n def __le__(self,other):\n return self.age <= other.age\n \n def __gt__(self,other):\n return self.age > other.age\n \n def __ge__(self,other):\n return self.age >= other.age \n \n def __eq__(self,other):\n return (self.age == other.age)and(self.name==other.name)and(self.breed==other.breed)\n \n def __ne__(self,other):\n return not self==other\n \n # THESE ARE STANDARD ATTRIBUTE METHODS\n def age_in_dog_years(self):\n return 7*self.age\n \n def is_favourite_toy(self,toy):\n if toy==self.favourite_toy:\n return 'That is my favourite toy!'\n else:\n return 'That is not my favourite toy :('\n\nmy_dog = Dog('Lucky',1,'Golden Retriever','Stick')\nmy_other_dog = Dog('Hamilton',2,'Pembroke Corgi','Tennis Ball')\nmy_other_other_dog = Dog('Socks',4,'Pug','Giant Pillow')\n\n# We can now compare objects of the Dog class using their base type attributes\nprint(my_dog > my_other_dog)\nprint(my_dog <= my_other_other_dog)\nprint(my_dog==my_other_dog)\nprint(my_dog!=my_other_dog)",
"Challenge for today\n\nCreate a class Human() which has attributes 'name', 'age' and 'height' (in centimetres). \nAdd a constructor to the class to be able to create Human objects in a program.\nAdd magic methods to the class to be able to compare Humans by age\nAdd a method to class Human() to return the age of the Human in Dog years (1 human year = 7 dog years)\n\nThe definition is given to you, the final block of code is the Test block. Do not modify the test block, use it to test your code, running it when you think your class is ready.",
"class Human(object):\n #Everything goes here!",
"This is the test block, run this to evaluate your code.",
"joe = Human('Joe',19,174)\nlisa = Human('Lisa',23,181)\nchu = Human('Chu',23,160)\n\n# Test the attributes set by the Constructor\ntry:\n print(joe.name)\n print(lisa.age)\n print(chu.height)\nexcept AttributeError as e:\n print('Your attributes are not working correctly :(\\n')\nelse:\n print('All your attributes are working correctly!\\n')\n \n# Test the Magic methods \ntry:\n print(joe < lisa)\n print(lisa <= lisa)\n print(chu == lisa)\n print(joe > chu)\n print(lisa >= chu)\nexcept TypeError as e:\n print('Your magic methods are not working correctly :(\\n')\nelse:\n print('All your magic methods are working correctly!\\n')\n \n# Test the age_in_dog_years() function\ntry:\n print('{}\\'s age in dog years is {}'.format(joe.name,joe.age_in_dog_years()))\n print('{}\\'s age in dog years is {}'.format(lisa.name,lisa.age_in_dog_years()))\n print('{}\\'s age in dog years is {}'.format(chu.name,chu.age_in_dog_years()))\nexcept AttributeError as e:\n print('Your method is not working correctly :(\\n')\nelse:\n print('Your method is working correctly!\\n')\n print('Congratulations! Your class is all correct!')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
deepfield/ibis
|
docs/source/notebooks/tutorial/7-Advanced-Topics-ComplexFiltering.ipynb
|
apache-2.0
|
[
"Advanced Topics: Additional Filtering\nThe filtering examples we've shown to this point have been pretty simple, either comparisons between columns or fixed values, or set filter functions like isin and notin. \nIbis supports a number of richer analytical filters that can involve one or more of:\n\nAggregates computed from the same or other tables\nConditional aggregates (in SQL-speak these are similar to \"correlated subqueries\")\n\"Existence\" set filters (equivalent to the SQL EXISTS and NOT EXISTS keywords)\n\nSetup",
"import ibis\nimport os\nhdfs_port = os.environ.get('IBIS_WEBHDFS_PORT', 50070)\nhdfs = ibis.hdfs_connect(host='quickstart.cloudera', port=hdfs_port)\ncon = ibis.impala.connect(host='quickstart.cloudera', database='ibis_testing',\n hdfs_client=hdfs)\nibis.options.interactive = True",
"Using scalar aggregates in filters",
"table = con.table('functional_alltypes')\ntable.limit(5)",
"We could always compute some aggregate value from the table and use that in another expression, or we can use a data-derived aggregate in the filter. Take the average of a column for example:",
"table.double_col.mean()",
"You can use this expression as a substitute for a scalar value in a filter, and the execution engine will combine everything into a single query rather than having to access Impala multiple times:",
"cond = table.bigint_col > table.double_col.mean()\nexpr = table[cond & table.bool_col].limit(5)\nexpr",
"Conditional aggregates\nSuppose that we wish to filter using an aggregate computed conditional on some other expressions holding true. Using the TPC-H datasets, suppose that we want to filter customers based on the following criteria: Orders such that their amount exceeds the average amount for their sales region over the whole dataset. This can be computed any numbers of ways (such as joining auxiliary tables and filtering post-join)\nAgain, from prior examples, here are the joined up tables with all the customer data:",
"region = con.table('tpch_region')\nnation = con.table('tpch_nation')\ncustomer = con.table('tpch_customer')\norders = con.table('tpch_orders')\n\nfields_of_interest = [customer,\n region.r_name.name('region'), \n orders.o_totalprice,\n orders.o_orderdate.cast('timestamp').name('odate')]\n\ntpch = (region.join(nation, region.r_regionkey == nation.n_regionkey)\n .join(customer, customer.c_nationkey == nation.n_nationkey)\n .join(orders, orders.o_custkey == customer.c_custkey)\n [fields_of_interest])\n\ntpch.limit(5)",
"In this particular case, filtering based on the conditional average o_totalprice by region requires creating a table view (similar to the self-join examples from earlier) that can be treated as a distinct table entity in the expression. This would not be required if we were computing a conditional statistic from some other table. So this is a little more complicated than some other cases would be:",
"t2 = tpch.view()\nconditional_avg = t2[(t2.region == tpch.region)].o_totalprice.mean()",
"Once you've done this, you can use the conditional average in a filter expression",
"amount_filter = tpch.o_totalprice > conditional_avg\ntpch[amount_filter].limit(10)",
"By looking at the table sizes before and after applying the filter you can see the relative size of the subset taken.",
"tpch.count()\n\ntpch[amount_filter].count()",
"Or even group by year and compare before and after:",
"tpch.schema()\n\nyear = tpch.odate.year().name('year')\n\npre_sizes = tpch.group_by(year).size()\npost_sizes = tpch[amount_filter].group_by(year).size().view()\n\npercent = ((post_sizes['count'] / pre_sizes['count'].cast('double'))\n .name('fraction'))\n\nexpr = (pre_sizes.join(post_sizes, pre_sizes.year == post_sizes.year)\n [pre_sizes.year, \n pre_sizes['count'].name('pre_count'),\n post_sizes['count'].name('post_count'),\n percent])\nexpr",
"\"Existence\" filters\nSome filtering involves checking for the existence of a particular value in a column of another table, or amount the results of some value expression. This is common in many-to-many relationships, and can be performed in numerous different ways, but it's nice to be able to express it with a single concise statement and let Ibis compute it optimally.\nHere's some examples:\n\nFilter down to customers having at least one open order\nFind customers having no open orders with 1-URGENT status\nFind stores (in the stores table) having the same name as a vendor (in the vendors table).\n\nWe'll go ahead and solve the first couple of these problems using the TPC-H tables to illustrate the API:",
"customer = con.table('tpch_customer')\norders = con.table('tpch_orders')\n\norders.limit(5)",
"We introduce the any reduction:",
"has_open_orders = ((orders.o_orderstatus == 'O') & \n (customer.c_custkey == orders.o_custkey)).any()",
"This is now a valid filter:",
"customer[has_open_orders].limit(10)",
"For the second example, in which we want to find customers not having any open urgent orders, we write down the condition that they do have some first:",
"has_open_urgent_orders = ((orders.o_orderstatus == 'O') & \n (orders.o_orderpriority == '1-URGENT') & \n (customer.c_custkey == orders.o_custkey)).any()",
"Now, we can negate this condition and use it as a filter:",
"customer[-has_open_urgent_orders].count()",
"In this case, it is true that customer.c_custkey has no duplicate values, but that need not be the case. There could be multiple copies of any given value in either table column being compared, and the behavior will be the same (existence or non-existence is verified)."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
IST256/learn-python
|
content/lessons/11-WebAPIs/Slides.ipynb
|
mit
|
[
"IST256 Lesson 11\nWeb Services and API's\n\nAssigned Reading\n https://ist256.github.io/spring2020/readings/Web-APIs-In-Python.html\n\nLinks\n\nParticipation: https://poll.ist256.com\nZoom Chat!\n\nFEQT (Future Exam Questions Training) 1\nWhat prints on the last line of this program?",
"import requests\nw = 'http://httpbin.org/get'\nx = { 'a' :'b', 'c':'d'}\nz = { 'w' : 'r'}\nresponse = requests.get(w, params = x, headers = z)\nprint(response.url)",
"A. http://httpbin.org/get?a=b\nB. http://httpbin.org/get?c=d\nC. http://httpbin.org/get?a=b&c=d\nD. http://httpbin.org/get \nVote Now: https://poll.ist256.com\nFEQT (Future Exam Questions Training) 2\nWhich line de-serializes the response?",
"import requests # <= load up a bunch of pre-defined functions from the requests module\nw = 'http://httpbin.org/ip' # <= string\nresponse = requests.get(w) # <= w is a url. HTTP POST/GET/PUT/DELETE Verbs of HTTP\nresponse.raise_for_status() # <= response code.if not 2??, throw exception 4 client 5 server\nd = response.json() #<= de-serilaize!\nd['origin']",
"A. 2\nB. 3\nC. 4\nD. 5 \nVote Now: https://poll.ist256.com\nAgenda\n\n\nLesson 10 Homework Solution\n\n\nA look at web API's \n\nPlaces to find web API's\nHow to read API documentation\nExamples of using API's\n\nConnect Activity\nQuestion: A common two-step verification process uses by API's discussed in the reading is\nA. OAUTH2\nB. Multi-Factor\nC. API Key in Header\nD. JSON format \nVote Now: https://poll.ist256.com\nThe Web Has Evolved….\nFrom User-Consumption\n\nCheck the news / weather in your browser\nSearch the web for \"George Washington's birthday\"\nInternet is for people.\n\nTo Device-Consumption\n\nGet news/ weather alerts on your Phone\nAsk Alexa “When is George Washingon's Birthday?\"\nInternet of Things.\n\nDevice Consuption Requires a Web API\n\nAPI = Application Program Interface. In essence is a formal definition of functions exposed by a service.\nWeb API - API which works over HTTP.\nIn essence you can call functions and access services using the HTTP protocol.\nBasic use starts with an HTTP request and the output is typically a JSON response.\nWe saw examples of this previously with:\nOpen Street Maps Geocoding: https://nominatim.openstreetmap.org/search?q=address&format=json\nWeather Data Service: https://openweathermap.org\nThanks to APIs' we can write programs to interact with a variety of services. \n\nFinding API's requires research…\nStart googling…\"foreign exchange rate api\"\n\nThen start reading the documentation on fixer.io …\n\nThen start hacking away in Python …",
"import requests\nurl = \"http://data.fixer.io/api/latest?access_key=159f1a48ad7a3d6f4dbe5d5a\"\nresponse = requests.get(url)\nresponse.json()",
"API’s R Awesum!\n\n- Leverage the power of the Internet to do almost anything!\n- If you can USE it on a web page or mobile phone It probably has an API!\n- Whether or not you can access the API is up to the provider.\n- Not all API’s are free.\n- Some require registration + authentication\nAPI Registries\n\nhttp://www.programmableweb.com/ \nhttps://market.mashape.com/ \nhttps://github.com/toddmotto/public-apis \n1,000's of APIs to search through\nExamples of how to use them and where to find documentation.\nAgain: Not all are free. Most require a key.\n\nOther Sources of API's\n\nCloud Providers: \nAmazon, Google (CC Req'd), Microsoft Azure, IBM.\nExecellent source of API's for the services they offer.\nAll have free / trial versions.\nYour favorite mobile app:\nIf it is mobile, it has an API.\nTry searching their website for a developers page.\n\nCheck Yourself 1\nA good source of Web API's are:\nA. API Registries\nB. Cloud Providers\nC. Services you know have a mobile app\nD. All of the above \nVote Now: https://poll.ist256.com\nProject Sources!\n\nWhat interests you? \nChoose: API or Python Modules: https://pypi.org (You might need to figure out which is best).\nThree is the minimum\nThink how you can \"mash up\" multiple services to build an application or tell a story. \nWe will not help you learn your API or module - that's on you.\n\nResearch… No Magic Bullet Here.\n\nExpect to spend hours doing research.\nExpect to read a lot of documentation.\nExpect to write a lot of \"throw away code\" just to learn how to use the API.\nExpect to hit a lot of dead ends.\nExpect to get frustrated in the process.\nSounds a lot like everything we do in this course... do you see the pattern? Start Small. Start Simple.\n\nEnd-To-End Example:\nGet the most positive and negative online reviews about a business\n\nLearn the Yelp API https://www.yelp.com/developers\nSearch for a business and get reviews\nUse Azure Text Analytics https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/\nGet sentiment of reviews\nIPython interactive and displaying images\nDemonstrate the problem simplification approach\n\nConclusion Activity : Exit Ticket\nWhat type of API Authentication was required by the Yelp API?\nA. OAUTH2\nB. API Key in Query String\nC. API Key in Header\nD. Other\nVote Now: https://poll.ist256.com"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
sarathid/Learning
|
Analyzing_Subway_Data_NDFDSI.ipynb
|
gpl-3.0
|
[
"Subway Data Analysis\nIntroduction\nThe NYC public transportantion system - Metro Transit Authority - provides data for download via csv files. Part of the information available are data from the subway turnstiles, containing weekly logs for cumulative entries and exits by turnstile and by subway station during a provided timeframe.\nFor this project, we will only use the information available at: http://web.mta.info/developers/turnstile.html.\nAbout this project\nFor this project, you will apply the knowledge acquired in the first month of this course. We will practice basic data acquisition and data cleaning tasks to find out fundamental stuff about the data using what we learned in the Statistics course. \nThe goal of this project is to explore the relationship between data from the NYC Subway turnstiles and the city weather. For this, besides data from the subway, we will also need data from the weather in NYC. \nHere are the main points that will be considered in this work:\n\nGathering data from the Internet\nUsing Statistics for Data Analysis\nData handling and simple graphics creation with Pandas\n\nHow to find help: We suggest that you try the following channels, in the following order:\n| Type of Question\\Channels | Google | Forum | Slack | Email |\n|------------------------------- |-------- |------- |------- |------- |\n| Pandas and Python Programming | 1 | 2 | 3 | |\n| Projects Requiriments | | 1 | 2 | 3 |\n| Projects Specific Parts | | 1 | 2 | 3 |\nHere is the address for each of these channels:\n\nForum: https://discussions.udacity.com/c/ndfdsi-project\nSlack: Big Data Foundations\nEmail: india@udacity.com\n\nThe student is expected to submit this report including:\n\nAll TODO's completed, as they are crucial for the code to run accordingly\nThe ipynb file, exported as html\n\nTo submit this project, go to the classroom, and submit your zipped .ipynb and html.\nReminders\nBefore we start, there are a few things you must have in mind while using iPython notebooks:\n\nRemember you can see, in the left side of a code cell, when was the last time it ran, if there is a number inside the keys.\nWhen starting a new session in the notebook, please make sure to run all cells up to the point where you last left it. Even if the output can still be viewed from the moment you ran your cells in the previews session, the kernel starts in a new state, so you will need to reload all data, etc. in a new session.\nThe previous point is useful to have in mind if your answers do not match what is expected from the quizzes in the classroom. Try reloading the data and running all processing steps, one by one, to make sure you're working with the same variables and data from each step of the quizz.\n\nSession 1 - Data Gathering\nExercise 1.1\nLet's do it!! Now it's your turn to gather data. Please write bellow a Python code to access the link http://web.mta.info/developers/turnstile.html and download all files from June 2017. The file must be named turnstile_100617.txt, where 10/06/17 is the file's date.\nPlease see below a few commands that might help you:\nUse the urllib library to open and redeem a webpage. Use the command below, where url is the webpage path to the following file:\npython\nu = urllib.urlopen(url)\nhtml = u.read()\nUse the BeautifulSoup library to search for the link to the file you want to donwload in the page. Use the command below to create your soup object and search for all 'a' tags in the document:\npython\nsoup = BeautifulSoup(html, \"html.parser\")\nlinks = soup.find_all('a')\nA tip to only download the files from June is to check data in the name of the file. For instance, to donwload the 17/06/2017 file, please see if the link ends with \"turnstile_170610.txt\". If you forget to do this, you will download all files from that page. In order to do this, you can use the following command:\npython\nif '1706' in link.get('href'):\nOur final tip is to use the command bellow to download the txt file:\npython\nurllib.urlretrieve(link_do_arquivo, filename)\nPlease remember - you first have to load all packages and functions that will be used in your analysys.",
"import urllib\nfrom bs4 import BeautifulSoup\n\n#your code here\n",
"Exercise 1.2\nWrite down a function that takes the list of all names of the files you downloaded in Exercise 1.1 and compile them into one single file. There must be only one header line in the output file. \nFor example, if file_1 has:\nline 1...\nline 2...\nand the other file, file_2, has:\nline 3...\nline 4...\nline 5...\nWe must combine file_1 and file_2 into one master file, as follows:\n'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn'\nline 1...\nline 2...\nline 3...\nline 4...\nline 5...",
"def create_master_turnstile_file(filenames, output_file):\n with open(output_file, 'w') as master_file:\n master_file.write('C/A,UNIT,SCP,STATION, LINENAME, DIVISION, DATEn,TIMEn,DESCn,ENTRIESn,EXITSn\\n')\n for filename in filenames:\n # your code here",
"Exercise 1.3\nFor this exercise, you will write a function that reads the master_file created in the previous exercise and load it into a Pandas Dataframe. This function can be filtered, so that the Dataframe only has lines where column \"DESCn\" has the value \"Regular\".\nFor example, if the Pandas Dataframe looks like this:\n,C/A,UNIT,SCP,DATEn,TIMEn,DESCn,ENTRIESn,EXITSn\n0,A002,R051,02-00-00,05-01-11,00:00:00,REGULAR,3144312,1088151\n1,A002,R051,02-00-00,05-01-11,04:00:00,DOOR,3144335,1088159\n2,A002,R051,02-00-00,05-01-11,08:00:00,REGULAR,3144353,1088177\n3,A002,R051,02-00-00,05-01-11,12:00:00,DOOR,3144424,1088231\n\nThe Dataframe must look like the following, after filtering only the lines where column DESCn has the value REGULAR:\n0,A002,R051,02-00-00,05-01-11,00:00:00,REGULAR,3144312,1088151\n2,A002,R051,02-00-00,05-01-11,08:00:00,REGULAR,3144353,1088177",
"import pandas\n\ndef filter_by_regular(filename):\n \n turnstile_data = # your code here\n # more of your code here\n return turnstile_data",
"Exercise 1.4\nThe NYC Subway data has cumulative entry and exit data in each line. Let's assume you have a Dataframe called df, which contains only lines for one particular turnstile (unique SCP, C/A, and UNIT). The following function must change these cumulative entries for counting all entries since the last reading (entries from the last line of the Dataframe).\nMore specifically, there are two things you should do:\n1 - Create a new column, called ENTRIESn_hourly\n2 - Insert in this column the difference between ENTRIESn in the current and the previous column. If the line has any NAN, fill it out/replace by 1.\nTip: The funtions shift() and fillna() in Pandas might be usefull for this exercise.\nBelow you will find and example of how your Dataframe should look by the end of this exercise:\n C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly\n0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 1\n1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23\n2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18\n3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71\n4 A002 R051 02-00-00 05-01-11 16:00:00 REGULAR 3144594 1088275 170\n5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214\n6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 87\n7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10\n8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36\n9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153\n10 A002 R051 02-00-00 05-02-11 16:00:00 REGULAR 3145337 1088823 243",
"import pandas\n\ndef get_hourly_entries(df):\n \n \n #your code here\n return df",
"Exercise 1.5\nDo the same thing you did in the previous exercise, but taking into account the exits, column EXITSn.\nFor this, you need to create a column called EXITSn_hourly and insert the difference between the column EXITSn in the current line vs he previous line. If there is any NaN, fill it out/replace by 0.",
"import pandas\n\ndef get_hourly_exits(df):\n \n #your code here\n return df",
"Exercise 1.6\nGiven an entry variable that represents time, in the format:\n \"00:00:00\" (hour: minutes: seconds)\n \nWrite a function to extract the hour part from the time in the entry variable\nAnd return it as an integer. For example:\n \n 1) if hour is 00, your code must return 0\n 2) if hour is 01, your code must return 1\n 3) if hour is 21, your code must return 21\n \nPlease return te hour as an integer.",
"def time_to_hour(time):\n \n hour = # your code here\n return hour",
"Exercise 2 - Data Analysis\nExercise 2.1\nTo understand the relationship between the Subway activity and the weather, please complete the data from the file already downloaded with the weather data.\nWe provided you with the file containing NYC weather data and made it available with the Support Material. You can access it through the link: https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/turnstile_data_master_with_weather.csv\nNow that we have our data in a csv file, write Python code that reads this file and saves it into a Pandas Dataframe. \nTip: \nUse the command below to read the file:\npython\npd.read_csv('output_list.txt', sep=\",\")",
"import pandas as pd\n\nfilename = \"turnstile_data_master_with_weather.csv\"\n\n\n#your code here",
"Exercise 2.2\nNow, create a function that calculates the number of rainy days. For this, return the count of the number of days where the column \"rain\" is equal to 1.\nTip: You might think that interpreting numbers as integers or floats might not\n work at first. To handle this issue, it might be useful to convert\n these numbers into integers. You can do this by writting cast (column as integer).\n So, for example, if we want to launch the column maxtempi as an integer, we have to\n write something like cast (maxtempi as integer) = 76, instead of just\n where maxtempi = 76.",
"\ndef num_rainy_days(df):\n\n \n\n\n #your code here\n return",
"Exercise 2.3\nCalculate if the day was cloudy or not (0 or 1) and the maximum temperature for fog (i.e. the maximum temperature \n for cloudy days).",
"\ndef max_temp_aggregate_by_fog(df):\n \n #your code here \n return\n \n\n",
"*Exercise 2.4\nNow, calculate the mean for 'meantempi' for the days that are Saturdays or Sundays (weekend):",
"def avg_weekend_temperature(filename):\n \n mean_temp_weekends = \n \n return mean_temp_weekends",
"*Exercise 2.5\nCalculate the mean of the minimum temperature 'mintempi' for the days when the minimum temperature was greater that 55 degrees:",
"def avg_min_temperature(filename):\n\n avg_min_temp_rainy = \n \n return avg_min_temp_rainy",
"*Exercise 2.6\nBefore you make any analysis, it might be useful to look at the data we want to analyse. More specifically, we will evaluate the entries by hour in our data from the NYC Subway to determine the data distribution. This data is stored in the column ['ENTRIESn_hourly'].\n \nDraw two histogramns in the same axis, to show the entries when it's raining vs when it's not. \nBelow, you will find an example of how to draw histogramns with Pandas and Matplotlib:\n \npython\nTurnstile_weather ['column_to_graph']. Hist ()",
"import numpy as np\nimport pandas\nimport matplotlib.pyplot as plt\n\ndef entries_histogram(turnstile_weather):\n \n \n \n plt.figure()\n turnstile_weather['...'] # your code here to plot a historgram for hourly entries when it is raining\n turnstile_weather['...'] # your code here to plot a histogram for hourly entries when it is not raining\n return plt\n",
"*Exercise 2.7\nThe data you just plotted is in what kind of distribution? Is there a difference in distribution between rainy and non-rainy days?\n Answer : Replace this text with your answer!\n*Exercise 2.8\nBuild a function that returns:\n\nThe mean of entries when it's raining\nThe mean of entries when it's not raining",
"import numpy as np\n\nimport pandas\n\ndef means(turnstile_weather):\n \n \n ### YOUR CODE HERE ###\n \n return with_rain_mean, without_rain_mean, p # leave this line for the grader",
"Answer to the following questions according to your functions' exits:\n\nWhat is the mean of entries when it's raining?\nWhat is the mean of entries when it's not raining?\n\n Answer : Replace this text with your answer!\nExercise 3 - Map Reduce\nExercise 3.1\nThe entry for this exercise is the same file from the previous session (Exercise 2). You can download the file from this link:\nhttps://s3.amazonaws.com/content.udacity-data.com/courses/ud359/turnstile_data_master_with_weather.csv\nNow, we will create a mapper. For each entry line, the mapper exit must PRINT (not return) UNIT as a key, and the number of ENTRIESn_hourly as the value. Separate the key and the value with a tab. For example: 'R002 \\ t105105.0'\nExport your mapper into a file named mapper_result.txt and send it with your submission. The code for exporting your mapper is already written in the code bellow.",
"import sys\n\ndef mapper():\n \n\n for line in sys.stdin:\n # your code here\n\n\nmapper()\nsys.stdin = open('turnstile_data_master_with_weather.csv')\nsys.stdout = open('mapper_result.txt', 'w')",
"Exercise 3.2\nNow, create the reducer. Given the mapper result from the previous exercise, the reducer must print (not return) one line per UNIT, with the total number of ENTRIESn_hourly during May (which is our data duration), separated by a tab. An example of exit line from the reducer may look like this: 'R001 \\ t500625.0'\nYou can assume that the entry for the reducer is ordered in a way that all lines corresponding to a particular unit are grouped. However, the reducer exit will have repetition, as there are stores that appear in different files' locations.\nExport your reducer into a file named reducer_result.txt and send it with your submission.",
"def reducer():\n \n\n for line in sys.stdin:\n # your code here\n\n \nreducer()\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kemerelab/NeuroHMM
|
KLscore.ipynb
|
mit
|
[
"KLscore.ipynb\nKullback-Leibler divergence based sequence scoring for hidden Markov models\nNOTE: After some discussion, Caleb and I reformulated our desired score, where we now evaluate the sequential part using the Kullback-Leibler divergence. The score has not quite been finalized, and I will write a more comprehensive introduction/motivation here soon, but this notebook serves as a testbed and proof-of-concept for the KL-based approach. So far it seems to do remarkably well, and it is definitely conceptually nicer. The contextual score has also been modified slightly, where we now use more information than simply the maximum most likely state at each time point.\nHere I will compute and evaluate the Kullback–Leibler (KL) divergence based sequence score for HMMs.\nIn particular, I base the contextual component on\n$$\\dfrac{1}{|\\mathcal{S}|}\\sum_{S\\in\\mathcal{S}} \\text{Pr}(\\mathbf{y}_t|S)$$\nwhere we sum over all possible states. The choice of not weighing by $p(S)$ is intentional (usually we marginalize as $p(\\mathbf{y}_t) = \\langle p(\\mathbf{y}_t | S), p(S) \\rangle$).\nThe sequential component is based on the average KL divergence from the expected state evolution to the posterior state distribution.\nIf we start with $\\boldsymbol{\\pi}$, we can compute (and visualize!) its state distribution evolution ${S'0, S_1', S_2', \\ldots} \\equiv {\\boldsymbol{\\pi}, \\boldsymbol{\\pi}\\mathbf{A}, \\boldsymbol{\\pi}\\mathbf{A}^2, \\ldots }$ This is the _a priori expected state evolution. Note that we assume $\\mathbf{A}{ij} \\equiv \\text{Pr}(S{t+1}=j|S_t=i)$. If however we use our observations to arrive at posterior state estimates, then we have the posterior state evolution (using, e.g. the forward-backward algorithm). For the sequential component, we then consider\n$$\nD_\\text{KL}(U||V) \\text{ with } U \\stackrel{\\Delta}{=} p(S_{t+1}|\\mathbf{y}{1:T}) \\text{ and } V \\stackrel{\\Delta}{=} p(S_t)\\mathbf{A}\n$$\nwhere $p(S{t+1})$ and $p(S_t)$ are posterior state distributions. Here we probably have to be a little more careful with notation. Note that in general $p(S_t)\\mathbf{A} \\neq S_{t+1}'$.\nLet $m=|\\mathcal{S}|$, so that $\\mathbf{A}\\in \\mathbb{R}^{m\\times m}$ and $p(S_t)\\in \\mathbb{R}^{1\\times m}$.\nSee also https://www.quora.com/What-is-a-good-laymans-explanation-for-the-Kullback-Leibler-Divergence",
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport sys\n\nsys.path.insert(0, 'helpers')\n\nfrom efunctions import * # load my helper function(s) to save pdf figures, etc.\nfrom hc3 import load_data, get_sessions\nimport klabtools as klab\nimport seqtools as sq\n\n%matplotlib inline\n#mpld3.enable_notebook()\n\nsns.set(rc={'figure.figsize': (12, 4),'lines.linewidth': 1, 'font.size': 18, 'axes.labelsize': 16,\n 'legend.fontsize': 12, 'ytick.labelsize': 12, 'xtick.labelsize': 12 })\nsns.set_style(\"white\")\n\nfrom hmmlearn import hmm # see https://github.com/ckemere/hmmlearn\nimport importlib\n\nimportlib.reload(sq) # reload module here only while prototyping...\nimportlib.reload(klab) # reload module here only while prototyping...\n\nimport sys \nimport time\n\nfrom IPython.display import display, clear_output\n#for i in range(10):\n# time.sleep(0.25)\n# clear_output(wait=True)\n# print(i)\n# sys.stdout.flush()",
"Load data",
"datadirs = ['/home/etienne/Dropbox/neoReader/Data',\n 'C:/etienne/Dropbox/neoReader/Data',\n '/Users/etienne/Dropbox/neoReader/Data']\n\nfileroot = next( (dir for dir in datadirs if os.path.isdir(dir)), None)\n\nanimal = 'gor01'; month,day = (6,7); session = '11-26-53' # WARNING! POSITION DATA INCOMPLETE!\nanimal = 'gor01'; month,day = (6,7); session = '16-40-19' # 91 units\n\n#animal = 'gor01'; month,day = (6,12); session = '15-55-31' # 55 units\n#animal = 'gor01'; month,day = (6,12); session = '16-53-46' # 55 units\n\n#animal = 'gor01'; month,day = (6,13); session = '14-42-6' # 58 units\n#animal = 'gor01'; month,day = (6,13); session = '15-22-3' # 58 units\n\n#animal = 'vvp01'; month,day = (4,9); session = '16-40-54' # ?? units\n#animal = 'vvp01'; month,day = (4,9); session = '17-29-30' # ?? units\n\n#animal = 'vvp01'; month,day = (4,10); session = '12-25-50' # lin1; ?? units\n#animal = 'vvp01'; month,day = (4,10); session = '12-58-3' # lin2; ?? units\n#animal = 'vvp01'; month,day = (4,10); session = '19-11-57' # lin2; ?? units\n#animal = 'vvp01'; month,day = (4,10); session = '21-2-40' # lin1; ?? units\n\n#animal = 'vvp01'; month,day = (4,18); session = '13-06-01' # lin1; ?? units\n#animal = 'vvp01'; month,day = (4,18); session = '13-28-57' # lin2; ?? units\n#animal = 'vvp01'; month,day = (4,18); session = '15-23-32' # lin1; ?? units\n#animal = 'vvp01'; month,day = (4,18); session = '15-38-02' # lin2; ?? units\n\nspikes = load_data(fileroot=fileroot, datatype='spikes',animal=animal, session=session, month=month, day=day, fs=32552, verbose=True)\neeg = load_data(fileroot=fileroot, datatype='eeg', animal=animal, session=session, month=month, day=day,channels=[0,1,2], fs=1252, starttime=0, verbose=True)\nposdf = load_data(fileroot=fileroot, datatype='pos',animal=animal, session=session, month=month, day=day, verbose=True)\nspeed = klab.get_smooth_speed(posdf,fs=60,th=8,cutoff=0.5,showfig=True,verbose=True)\n\n## bin ALL spikes\nds = 0.125 # bin spikes into 125 ms bins (theta-cycle inspired)\nbinned_spikes = klab.bin_spikes(spikes.data, ds=ds, fs=spikes.samprate, verbose=True)\n\n## identify boundaries for running (active) epochs and then bin those observations into separate sequences:\nrunbdries = klab.get_boundaries_from_bins(eeg.samprate,bins=speed.active_bins,bins_fs=60)\nbinned_spikes_bvr = klab.bin_spikes(spikes.data, fs=spikes.samprate, boundaries=runbdries, boundaries_fs=eeg.samprate, ds=ds)\n\n## stack data for hmmlearn:\nseq_stk_bvr = sq.data_stack(binned_spikes_bvr, verbose=True)\nseq_stk_all = sq.data_stack(binned_spikes, verbose=True)\n\n## split data into train, test, and validation sets:\ntr_b,vl_b,ts_b = sq.data_split(seq_stk_bvr, tr=60, vl=10, ts=30, randomseed = 0, verbose=True)\n\n## train HMM on active behavioral data; training set (with a fixed, arbitrary number of states for now):\nmyhmm = sq.hmm_train(tr_b, num_states=35, n_iter=50, verbose=False)",
"Basic visualization of hmm state evolutions",
"myhmm.transmat_.sum(axis=1) # confirm orientation of transition prob matrix\nA = myhmm.transmat_.copy()\nfig, ax = plt.subplots(figsize=(3.5, 3))\nim = ax.matshow(A, interpolation='none', cmap='RdPu')\n# Make an axis for the colorbar on the right side\ncax = fig.add_axes([0.9, 0.1, 0.03, 0.8])\nfig.colorbar(im, cax=cax)\n\nm = myhmm.n_components\nPi = myhmm.startprob_.copy()\nPi = np.reshape(Pi,(1,m))\nfig, ax = plt.subplots(figsize=(6, 2))\nax.stem(np.transpose(Pi),':k')\n\nfig, ax = plt.subplots(figsize=(6, 2))\nax.matshow(Pi)\nPiA = np.dot(Pi,A)\nax.matshow(np.vstack([Pi,PiA,np.dot(PiA,A)]))\n\ndef advance_states(St,A,n):\n count = 1\n St = np.dot(St,A)\n while count <= n:\n yield St\n count += 1\n St = np.dot(St,A)\n\nnumsteps = 50\nprior_evo = np.zeros((numsteps+1,m))\nprior_evo[0,:] = Pi\nfor ii, S in enumerate(advance_states(Pi, A, numsteps)):\n prior_evo[ii+1,:] = S\n \nfig, ax = plt.subplots(figsize=(10, 3))\nax.matshow(np.transpose(prior_evo))",
"Remark: Note that steady-state state information is related to the eigenvectors of $\\mathbf{A}$, since $p(S)\\mathbf{A} = p(S) \\implies p(S)$ is a steady-state state distribution, such that $p(S)$ is an eigenvector of $\\mathbf{A}$ with associated eigenvalue $\\lambda = 1$.",
"seq_id = 0\ntmpseqbdries = [0]; tmpseqbdries.extend(np.cumsum(ts_b.sequence_lengths).tolist());\nobs = ts_b.data[tmpseqbdries[seq_id]:tmpseqbdries[seq_id+1],:]\nll, pp = myhmm.score_samples(obs)\nfig, ax = plt.subplots(figsize=(10, 3))\nax.matshow(np.transpose(pp))\n\ndef advance_states_one(pp, A):\n return np.dot(pp, A)\n\nppp = advance_states_one(np.vstack([Pi,pp[:pp.shape[0]-1,:]]), A)\n\nnumsteps = pp.shape[0]-1\nprior_evo = np.zeros((numsteps+1,m))\nprior_evo[0,:] = Pi\nfor ii, S in enumerate(advance_states(Pi, A, numsteps)):\n prior_evo[ii+1,:] = S\n \nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(6, 3))\nax1.matshow(np.transpose(prior_evo),cmap='OrRd'); ax1.set_xlabel('prior')\nax2.matshow(np.transpose(ppp),cmap='OrRd'); ax2.set_xlabel('predicted')\nax3.matshow(np.transpose(pp),cmap='OrRd'); ax3.set_xlabel('posterior')\n\n# sort model states:\nnew_order = [0]\nrem_states = np.arange(1,m).tolist()\ncs = 0\n\nfor ii in np.arange(0,m-1):\n nstilde = np.argmax(A[cs,rem_states])\n ns = rem_states[nstilde]\n rem_states.remove(ns)\n cs = ns\n new_order.append(cs)\n \nAnew = A[:, new_order][new_order]\n\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(6, 3))\nax1.matshow(np.transpose(prior_evo)[new_order,:],cmap='OrRd'); ax1.set_xlabel('prior')\nax2.matshow(np.transpose(ppp)[new_order,:],cmap='OrRd'); ax2.set_xlabel('predicted')\nax3.matshow(np.transpose(pp)[new_order,:],cmap='OrRd'); ax3.set_xlabel('posterior')\n\nfig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8))\nim = ax1.matshow(myhmm.means_, cmap='OrRd', vmin=0, vmax=16); ax1.set_xlabel('before sorting states')\nim = ax2.matshow(myhmm.means_[new_order,:], cmap='OrRd', vmin=0, vmax=16); ax2.set_xlabel('after sorting states')\n\n# Make an axis for the colorbar on the right side\ncax = fig.add_axes([0.9, 0.1, 0.03, 0.8])\nfig.colorbar(im, cax=cax)\n\nds = 1/60 # bin spikes into 1/60 ms bins, corresponding to video sampling period\nbinned_spikes = klab.bin_spikes(spikes.data, ds=ds, fs=spikes.samprate, verbose=True)\nrunidx = speed.active_bins\nlin_pos = (posdf.x1.values + posdf.x2.values)/2\npfs, pfbincenters, pindex = klab.estimate_place_fields(lin_pos[runidx],binned_spikes.data[runidx],fs=60, \n x0=0,xl=100, max_meanfiringrate = 5,min_maxfiringrate=3,num_pos_bins=100,sigma=1, verbose=True,showfig=True)\n\nklab.show_place_fields(pfs,pfbincenters,pindex,min_maxfiringrate=2)\n\nfig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 4))\nMM = myhmm.means_.copy()\nfor cell in np.arange(0,MM.shape[1]):\n if cell not in pindex:\n MM[:,cell] = np.nan\nim = ax1.matshow(MM, cmap='OrRd', vmin=0, vmax=15); ax1.set_xlabel('before sorting states, only place cells')\nim = ax2.matshow(MM[new_order,:], cmap='OrRd', vmin=0, vmax=15); ax2.set_xlabel('after sorting states, only place cells')\n\n# Make an axis for the colorbar on the right side\ncax = fig.add_axes([0.9, 0.1, 0.03, 0.8])\nfig.colorbar(im, cax=cax)\n\nfrom random import shuffle\n\nlp, pth = myhmm.decode(obs,algorithm='map')\ntrj_shfl_idx = np.arange(0,len(pth))\nshuffle(trj_shfl_idx)\nobs_shfl = np.array([obs[i] for i in trj_shfl_idx])\nlp_shfl, pp_shfl = myhmm.score_samples(obs_shfl)\nppp_shfl = advance_states_one(np.vstack([Pi,pp_shfl[:pp_shfl.shape[0]-1,:]]), A)\n\n\nfig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, figsize=(15, 3))\nax1.matshow(np.transpose(prior_evo)[new_order,:],cmap='OrRd'); ax1.set_xlabel('prior'); ax1.set_ylabel('State')\nax2.matshow(np.transpose(ppp)[new_order,:],cmap='OrRd'); ax2.set_xlabel('predicted')\nax3.matshow(np.transpose(pp)[new_order,:],cmap='OrRd'); ax3.set_xlabel('posterior')\nax4.matshow(np.transpose(ppp_shfl)[new_order,:],cmap='OrRd'); ax4.set_xlabel('pred shfl')\nax5.matshow(np.transpose(pp_shfl)[new_order,:],cmap='OrRd'); ax5.set_xlabel('post shfl')",
"Remark: What if our pmfs contain any zeros? Problem! \nOne way to think about your problem is that you don't really have confidence in the PMF you have calculated from the histogram. You might need a slight prior in your model. Since if you were confident in the PMF, then the KL divergence should be infinity since you got values in one PMF that are impossible in the other PMF. If, on the other hand you had a slight, uninformative prior then there is always some small probability of seeing a certain outcome. One way of introducing this would be to add a vector of ones times some scalar to the histogram. The theoretical prior distribution you would be using is the dirichlet distribution, which is the conjugate prior of the categorical distribution. But for practical purposes you can do something like\npython\n pmf_unnorm = scipy.histogram(samples, bins=bins, density=True)[0] + w * scipy.ones(len(bins)-1)\n pmf = pmf_unnor / sum(pmf_unnorm)\nwhere w is some positive weight, depending on how strong a prior you want to have.",
"from scipy.stats import entropy as KLD\n\nKLlist = []\nKLlist_shfl = []\n\nfor ii in np.arange(1,len(pth)):\n KLlist.append(KLD(pp[ii,:],ppp[ii,:]))\n KLlist_shfl.append(KLD(pp_shfl[ii,:],ppp_shfl[ii,:]))\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 3))\n\nax1.plot(np.cumsum(KLlist), label='sequence', lw=1.5)\nax1.plot(np.cumsum(KLlist_shfl), label='trajectory shuffled', lw=1.5)\nax1.legend()\nax1.set_xlabel('bin')\nax1.set_title('Cumulative KL divergence')\n\nseqscore = np.cumsum(KLlist) / np.arange(1,len(pth))\nseqscore_shfl = np.cumsum(KLlist_shfl) / np.arange(1,len(pth))\n\nax2.plot(seqscore, label='sequence', lw=1.5)\nax2.plot(seqscore_shfl, label='trajectory shuffled', lw=1.5)\nax2.legend()\nax2.set_xlabel('bin')\nax2.set_title('Running average KL divergence')",
"First look at shuffle distribution for single sequence",
"def KL(distr_matU, distr_matV):\n from scipy.stats import entropy as KLD\n num_bins = distr_matU.shape[0]\n\n KLarray = np.zeros(num_bins)\n \n for ii in np.arange(1,num_bins):\n KLarray[ii-1] = KLD(distr_matU[ii,:],distr_matV[ii,:])\n \n return KLarray.mean()\n\nfrom random import shuffle\n\n###########################################################3\nstacked_data = ts_b\nseq_id = 0\nn_shuffles = 500\n###########################################################3\n\ntmpseqbdries = [0]; tmpseqbdries.extend(np.cumsum(stacked_data.sequence_lengths).tolist());\nobs = stacked_data.data[tmpseqbdries[seq_id]:tmpseqbdries[seq_id+1],:]\nll, pp = myhmm.score_samples(obs)\nnum_bins = obs.shape[0] \nppp = advance_states_one(np.vstack([Pi,pp[:num_bins-1,:]]), A)\ntrj_shfl_idx = np.arange(0, num_bins); \nKL_true = KL(pp,ppp)\n\nKL_shuffles = np.zeros(n_shuffles)\nfor nn in np.arange(0,n_shuffles):\n shuffle(trj_shfl_idx)\n obs_shfl = np.array([obs[i] for i in trj_shfl_idx])\n ll_shfl, pp_shfl = myhmm.score_samples(obs_shfl)\n ppp_shfl = advance_states_one(np.vstack([Pi,pp_shfl[:num_bins-1,:]]), A)\n KL_shuffles[nn] = KL(pp_shfl,ppp_shfl)\n\nfig, ax1 = plt.subplots(1, 1, figsize=(6, 2))\n\nsns.distplot(KL_shuffles, ax=ax1, bins=40)\nax1.axvline(x=KL_true, ymin=0, ymax=1, linewidth=2, color = 'k', linestyle='dashed', label='true sequence')\nx1.legend()",
"Now look at entire data set, with shuffles for each sequence\nIn particular, we consider all the sequences in the test set of running behavior, ts_b, none of which have been seen by the HMM before.",
"from random import shuffle\n\n###########################################################3\nstacked_data = ts_b\nn_shuffles = 250 # shuffles PER sequence in data set\n###########################################################3\n\nnum_sequences = len(stacked_data.sequence_lengths)\ntmpseqbdries = [0]; tmpseqbdries.extend(np.cumsum(stacked_data.sequence_lengths).tolist());\n\nKL_true = np.zeros(num_sequences)\nKL_shuffles = np.zeros((num_sequences,n_shuffles))\n\nfor seq_id in np.arange(0,num_sequences):\n\n obs = stacked_data.data[tmpseqbdries[seq_id]:tmpseqbdries[seq_id+1],:]\n ll, pp = myhmm.score_samples(obs)\n num_bins = obs.shape[0] \n ppp = advance_states_one(np.vstack([Pi,pp[:num_bins-1,:]]), A)\n trj_shfl_idx = np.arange(0, num_bins); \n KL_true[seq_id] = KL(pp,ppp)\n\n for nn in np.arange(0,n_shuffles):\n shuffle(trj_shfl_idx)\n obs_shfl = np.array([obs[i] for i in trj_shfl_idx])\n ll_shfl, pp_shfl = myhmm.score_samples(obs_shfl)\n ppp_shfl = advance_states_one(np.vstack([Pi,pp_shfl[:num_bins-1,:]]), A)\n KL_shuffles[seq_id,nn] = KL(pp_shfl,ppp_shfl)\n\nfig, ax1 = plt.subplots(1, 1, figsize=(6, 2))\n\nsns.distplot(KL_true, ax=ax1, label='true sequences')\nsns.distplot(KL_shuffles.flatten(), bins=80, ax=ax1, label='trajectory shuffled')\n#ax1.axvline(x=KL_true, ymin=0, ymax=1, linewidth=2, color = 'k', linestyle='dashed', label='true sequence')\n#ax1.set_xlim([0,5])\nax1.legend()",
"Discussion and further considerations\nThe KL-based sequence score actually works remarkably well when looking at individual examples. On a population level, the results seem good, but not great, but this might be due to several \"bad\" sequences in the data. For example, some sequences are quite short (4 bins) where only one or two states are visited. Consequently, reshuffling the data using a trajectory shuffle does nothing to the sequence, and we get false positives for shuffled data that was classified as a true sequence.\nRemaining important characterizations:\nI should consider other distributions here, such as the RUN > 8 vs NORUN < 4 distributions, and I should also split up data into lin1a, lin1b (splitting when the track was shortened) and lin2a and lin2b data. Each of those subsets of data can also be split into RUN > 8 and NORUN < 4 subsets to try and observe finer discrimination.\nOf course, I should also characterize this score with the numerous other shuffling strategies that are commonly employed, including the unit-ID shuffle, etc.\nKL is not symmetric:\nThere can be some debate about whether it is more appropriate to use $D_\\text{KL}(U||V)$ or $D_\\text{KL}(V||U)$. As of now, I am leaning in favor of $D_\\text{KL}(U||V)$, where $V$ is the expected model evolution, and $U$ is the actual observed behavior. I have tested both, and both consistently gives a lower score (more similarity) to true sequences than to shuffled sequences.\nAbout the place fields:\nThe place fields here also don't look that great, but that is partly due to the fact that I don't consider directionality here, and also I don't split the experiment into the parts before and after the track was shortened. However, we don't actually use any of the place field information in this entire analysis, so that it should not be of any real concern. We can of course estimate them better, if we really have to...\nNotation:\nImportantly, I should re-write the introduction to be more friendly, making my notation consistent and final, and I should demonstrate why this approach makes both intuitive and mathematical sense. I do like the notation used above, where $p(S_t|\\mathbf{y}{1:T})$ is the _posterior state distribution at time $t$ having observed the sequence $\\mathbf{y}_{1:T}$, and I also like the notation for distinguishing between distributions $p(\\cdot)$ and probabilities $P(\\cdot)$ although if I draw attention to this distinction, I have to be very careful to follow such a convention consistently.\nWhat's next? How can we derive an effective final score?\nFinally, what then, is my final sequence score? I still have both sequential and contextual components, but I need to think more carefully about the best way to make the KL-score into the sequential component. For one thing, a score is typically better if it is larger, but so far we have opposite desired directions for the contextual and sequential components (larger ctx is good, smaller KL is good). Another issue is interpretability, both local and global. Local interpretability might answer how likely we are to observe a KL score that small or smaller, based on a shuffle distribution of the sequence under consideration, but it would be more appealing to not have to compute shuffle distributions first, and also to have global interpretability, which would allow us to say \"this sequence is more consistent with the underlying model than that sequence\"."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
AllenDowney/ProbablyOverthinkingIt
|
convolution.ipynb
|
mit
|
[
"Bayesian filtering in the frequency domain\nThis notebook contains an exploration of the characteristic function as a way to represent a probability distribution, yielding an efficient implementation of a Bayes filter.\nCopyright 2015 Allen Downey\nMIT License: http://opensource.org/licenses/MIT",
"from __future__ import print_function, division\n\nimport thinkstats2\nimport thinkplot\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style=\"white\", palette=\"muted\", color_codes=True)\n\n%matplotlib inline",
"Suppose you are tracking a rotating part and want to estimate its angle, in degrees, as a function of time, given noisy measurements of its position and velocity.\nI'll represent possible positions using a vector of 360 values in degrees.",
"n = 360\nxs = np.arange(n)",
"I'll represent distributions using a Numpy array of probabilities. The following function takes a Numpy array and normalizes it so the probabilities add to 1.",
"def normalize(dist):\n dist /= sum(dist)",
"The following function creates a discrete approximation of a Gaussian distribution with the given parameters, evaluated at the given positions, xs:",
"def gaussian(xs, mu, sigma):\n dist = stats.norm.pdf(xs, loc=180, scale=sigma)\n dist = np.roll(dist, mu-180)\n normalize(dist)\n return dist",
"Suppose that initially we believe that the position of the part is 180 degrees, with uncertainty represented by a Gaussian distribution with $\\sigma=4$.\nHere's what that looks like:",
"pos = gaussian(xs, mu=180, sigma=4)\nplt.plot(xs, pos);",
"And suppose that we believe the part is rotating at an angular velocity of 15 degrees per time unit, with uncertainty represented by a Gaussian distribution with $\\sigma=3$:",
"move = gaussian(xs, mu=15, sigma=3)\nplt.plot(xs, move);",
"The predict step\nWhat should we believe about the position of the part after one time unit has elapsed?\nA simple way to estimate the answer is to draw samples from the distributions of position and velocity, and add them together.\nThe following function draws a sample from a distribution (again, represented by a Numpy array of probabilities). I'm using a Pandas series because it provides a function that computes weighted samples.",
"def sample_dist(xs, dist, n=1000):\n series = pd.Series(xs)\n return series.sample(n=n, weights=dist, replace=True).values",
"As a quick check, the sample from the position distribution has the mean and standard deviation we expect.",
"pos_sample = sample_dist(xs, pos)\npos_sample.mean(), pos_sample.std()",
"And so does the sample from the distribution of velocities:",
"move_sample = sample_dist(xs, move)\nmove_sample.mean(), move_sample.std()",
"When we add them together, we get a sample from the distribution of positions after one time unit.\nThe mean is the sum of the means, and the standard deviation is the hypoteneuse of a triangle with the other two standard deviations. In this case, it's a 3-4-5 triangle:",
"sample = pos_sample + move_sample\nsample.mean(), sample.std()",
"Based on the samples, we can estimate the distribution of the sum.\nTo compute the distribution of the sum exactly, we can iterate through all possible values from both distributions, computing the sum of each pair and the product of their probabilities:",
"def add_dist(xs, dist1, dist2):\n res = np.zeros_like(dist1)\n for x1, p1 in zip(xs, dist1):\n for x2, p2 in zip(xs, dist2):\n x = (x1 + x2) % 360\n res[x] = res[x] + p1 * p2\n return res",
"This algorithm is slow (taking time proportional to $N^2$, where $N$ is the length of xs), but it works:",
"new_pos = add_dist(xs, pos, move)",
"Here's what the result looks like:",
"plt.plot(xs, new_pos);",
"And we can check the mean and standard deviation:",
"def mean_dist(xs, dist):\n return sum(xs * dist)",
"The mean of the sum is the sum of the means:",
"mu = mean_dist(xs, new_pos)\nmu",
"And the standard deviation of the sum is the hypoteneuse of the standard deviations:",
"def std_dist(xs, dist, mu):\n return np.sqrt(sum((xs - mu)**2 * dist))",
"Which should be 5:",
"sigma = std_dist(xs, new_pos, mu)\nsigma",
"What we just computed is the convolution of the two distributions.\nNow we get to the fun part. The characteristic function is an alternative way to represent a distribution. It is the Fourier transform of the probability density function, or for discrete distributions, the DFT of the probability mass function.\nI'll use Numpy's implementation of FFT. The result is an array of complex, so I'll just plot the magnitude and ignore the phase.",
"from numpy.fft import fft\n\nchar_pos = fft(pos)\nplt.plot(xs, np.abs(char_pos));",
"The Fourier transform of a Gaussian is also a Gaussian, which we can see more clearly if we rotate the characteristic function before plotting it.",
"plt.plot(xs, np.roll(np.abs(char_pos), 180));",
"We can also compute the characteristic function of the velocity distribution:",
"char_move = fft(move)\nplt.plot(xs, abs(char_move));",
"You might notice that the narrower (more certain) the distribution is in the space domain, the wider (less certain) it is in the frequency domain. As it turns out, the product of the two standard deviations is constant.\nWe can see that more clearly by plotting the distribution side-by-side in the space and frequency domains:",
"def plot_dist_and_char(dist):\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 4))\n ax1.plot(xs, dist)\n ax1.set_xlabel('space domain')\n char = fft(dist)\n ax2.plot(xs, np.roll(abs(char), 180))\n ax2.set_xlabel('frequency domain')",
"The following function plots Gaussian distributions with given parameters.",
"def plot_gaussian_dist_and_char(mu=180, sigma=3):\n dist = gaussian(xs, mu, sigma)\n plot_dist_and_char(dist)",
"Here's a simple example:",
"plot_gaussian_dist_and_char(mu=180, sigma=3)",
"Now we can make sliders to control mu and sigma.",
"from IPython.html.widgets import interact, fixed\nfrom IPython.html import widgets\n\nslider1 = widgets.IntSliderWidget(min=0, max=360, value=180)\nslider2 = widgets.FloatSliderWidget(min=0, max=100, value=3)\ninteract(plot_gaussian_dist_and_char, mu=slider1, sigma=slider2);",
"As you increase sigma, the distribution gets wider in the space domain and narrower in the frequency domain.\nAs you vary mu, the location changes in the space domain, and the phase changes in the frequency domain, but the magnitudes are unchanged.\nBut enough of that; I still haven't explained why characteristic functions are useful. Here it is: If the characteristic function of X is $\\phi_X$ and the characteristic function of Y is $\\phi_Y$, the characteristic function of the sum X+Y is the elementwise product of $\\phi_X$ and $\\phi_Y$.\nSo the characteristic function of the new position (after one time step) is the product of the two characteristic functions we just computed:",
"char_new_pos = char_pos * char_move\nplt.plot(xs, abs(char_new_pos));",
"If we compute the inverse FFT of the characteristic function, we get the PMF of the new position:",
"from numpy.fft import ifft\n\nnew_pos = ifft(char_new_pos).real\nplt.plot(xs, new_pos);",
"We can check the mean and standard deviation of the result:",
"def mean_std_dist(xs, dist):\n xbar = mean_dist(xs, dist)\n s = std_dist(xs, dist, xbar)\n return xbar, s",
"Yup, that's what we expected (forgiving some floating-point errors):",
"mean_std_dist(xs, new_pos)",
"We can encapsulate this process in a function that computes the convolution of two distributions:",
"def fft_convolve(dist1, dist2):\n prod = fft(dist1) * fft(dist2)\n dist = ifft(prod).real\n return dist",
"Since FFT is $N \\log N$, and elementwise multiplication is linear, the whole function is $N \\log N$, which is better than the $N^2$ algorithm we started with.\nThe results from the function are the same:",
"new_pos = fft_convolve(pos, move)\nmean_std_dist(xs, new_pos)",
"The update step\nNow suppose that after the move we measure the position of the rotating part with a noisy instrument. If the measurement is 197 and the standard deviation of measurement error is 4, the following distribution shows the likelihood of the observed measurement for each possible, actual, position of the part:",
"likelihood = gaussian(xs, mu=197, sigma=4)\nplt.plot(xs, new_pos);",
"Now we can take our belief about the position of the part and update it using the observed measurement. By Bayes's theorem, we compute the product of the prior distribution and the likelihood, then renormalize to get the posterior distribution:",
"new_pos = new_pos * likelihood\nnormalize(new_pos)\nplt.plot(xs, new_pos);",
"The prior mean was 195 and the measurement was 197, so the posterior mean is in between, at 196.2 (closer to the measurement because the measurement error is 4 and the standard deviation of the prior is 5).\nThe posterior standard deviation is 3.1, so the measurement decreased our uncertainty about the location.",
"mean_std_dist(xs, new_pos)",
"We can encapsulate the prediction step in a function:",
"def predict(xs, pos, move):\n new_pos = fft_convolve(pos, move)\n return new_pos",
"And likewise the update function:",
"def update(xs, pos, likelihood):\n new_pos = pos * likelihood\n normalize(new_pos)\n return new_pos",
"The following function takes a prior distribution, velocity, and a measurement, and performs one predict-update step.\n(The uncertainty of the velocity and measurement are hard-coded in this function, but could be parameters.)",
"def predict_update(xs, pos1, velocity, measure):\n # predict\n move = gaussian(xs, velocity, 3)\n pos2 = predict(xs, pos1, move)\n \n #update\n likelihood = gaussian(xs, measure, 4)\n pos3 = update(xs, pos2, likelihood)\n \n #plot\n plt.plot(xs, pos1, label='pos1')\n plt.plot(xs, pos2, label='pos2')\n plt.plot(xs, pos3, label='pos3')\n plt.legend()\n \n return pos3",
"In the figure below, pos1 is the initial belief about the position, pos2 is the belief after the predict step, and pos3 is the posterior belief after the measurement.\nThe taller the distribution, the narrower it is, indicating more certainty about position. In general, the predict step makes us less certain, and the update makes us more certain.",
"pos1 = gaussian(xs, 180, 4)\npos3 = predict_update(xs, pos1, velocity=15, measure=197)",
"So far I've been using Gaussian distributions for everything, but in that case we could skip all the computation and get the results analytically.\nThe implementation I showed generalizes to arbitrary distribitions. For example, suppose our initial beliefs are multimodal, for example, if we can't tell whether the part has been rotated 90 degrees.",
"pos1 = (gaussian(xs, 0, 4) + gaussian(xs, 90, 4) + \n gaussian(xs, 180, 4) + gaussian(xs, 270, 4))\nnormalize(pos1)\nplt.plot(xs, pos1);",
"Now we can do the same predict-update step:",
"pos3 = predict_update(xs, pos1, velocity=15, measure=151)",
"After the predict step, our belief is still multimodal.\nThen I chose a measurement, 151, that's halfway between two modes. The result is bimodal (with the other two modes practically eliminated).\nIf we perform one more step:",
"pos5 = predict_update(xs, pos3, velocity=15, measure=185)",
"Now the posterior is unimodal again (and very close to Gaussian). In general, if the likelihood function is Gaussian, the result will converge to Gaussian over time, regardless of the initial distribution."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ToqueWillot/M2DAC
|
FDMS/TME5/TME4_5_FiltrageCollaboratif_tsne.ipynb
|
gpl-2.0
|
[
"TME4 FDMS Collaborative Filtering\nFlorian Toqué & Paul Willot",
"%matplotlib inline\nfrom random import random\nimport math\nimport numpy as np\nimport copy\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport pickle as pkl\nfrom scipy.spatial import distance\nimport seaborn as sns\nsns.set_style('darkgrid')",
"Loading the data",
"def loadMovieLens(path='./data/movielens'):\n #Get movie titles\n movies={}\n rev_movies={}\n for idx,line in enumerate(open(path+'/u.item')):\n idx,title=line.split('|')[0:2]\n movies[idx]=title\n rev_movies[title]=idx\n\n # Load data\n prefs={}\n for line in open(path+'/u.data'):\n (user,movieid,rating,ts)=line.split('\\t')\n prefs.setdefault(user,{})\n prefs[user][movies[movieid]]=float(rating)\n \n return prefs,rev_movies\n\ndata,movies = loadMovieLens(\"data/ml-100k\")",
"Splitting data between train/test\nWe avoid to let unseen data form the train set in the test set.\nWe also try to minimise the dataset reduction by splitting on each user.",
"def getRawArray(data):\n d = []\n for u in data.keys():\n for i in data[u].keys():\n d.append([u,i,data[u][i]])\n return np.array(d)\n# splitting while avoiding to reduce the dataset too much\ndef split_train_test(data,percent_test):\n test={}\n train={}\n movie={}\n for u in data.keys():\n test.setdefault(u,{})\n train.setdefault(u,{})\n for movie in data[u]:\n #print(data[u][movie])\n if (random()<percent_test):\n test[u][movie]=data[u][movie]\n else:\n train[u][movie]=data[u][movie]\n return train, test\ndef split_train_test_by_movies(data,percent_test):\n test={}\n train={}\n movie={}\n for u in data.keys():\n for movie in data[u]:\n if (random()<percent_test):\n try:\n test[movie][u]=data[u][movie]\n except KeyError:\n test.setdefault(movie,{})\n test[movie][u]=data[u][movie]\n else:\n try:\n train[movie][u]=data[u][movie]\n except KeyError:\n train.setdefault(movie,{})\n train[movie][u]=data[u][movie]\n return train, test",
"split used for convenience on the average by movie baseline",
"percent_test = 0.2\ntrain,test = split_train_test(data,percent_test)\nm_train,m_test = split_train_test_by_movies(data,percent_test)",
"cleaning\n18 movies have no ratings at all",
"def deleteUnseenInTest(train,test):\n for k in test.keys():\n try:\n train[k]\n except KeyError:\n test.pop(k,None)\n\ndef deleteUnknowData(triplet_test, trainUsers, trainItems) :\n to_Del = []\n for i,t in enumerate(triplet_test):\n if not t[0] in trainUsers:\n to_Del.append(i)\n elif not t[1] in trainItems:\n to_Del.append(i)\n return np.delete(triplet_test, to_Del, 0)\n\ndeleteUnseenInTest(train,test)\ndeleteUnseenInTest(m_train,m_test)",
"Matrix used for fast evaluation",
"def getRawArray(data):\n rawArray = []\n for u in data.keys():\n for i in data[u].keys():\n rawArray.append([u,i,data[u][i]])\n return rawArray\n\ndef getDataByUsers(rawArray) :\n usr = {}\n for t in rawArray:\n if not t[0] in usr.keys():\n usr[t[0]] = {}\n usr[t[0]][t[1]] = float(t[2])\n return usr\n\ndef getDataByItems(rawArray) :\n itm = {}\n for t in rawArray:\n if not t[1] in itm.keys():\n itm[t[1]] = {}\n itm[t[1]][t[0]] = float(t[2])\n return itm\n\n# Split l'ensemble des rawArrays \ndef splitTrainTest(rawArray, testProp) :\n perm = np.random.permutation(rawArray)\n splitIndex = int(testProp * len(rawArray))\n return perm[splitIndex:], perm[:splitIndex]\n\n# supprime des données de test les données inconnus en train\ndef deleteUnknowData(rawArray_test, trainUsers, trainItems) :\n to_Del = []\n for i,t in enumerate(rawArray_test):\n if not t[0] in trainUsers:\n to_Del.append(i)\n elif not t[1] in trainItems:\n to_Del.append(i)\n return np.delete(rawArray_test, to_Del, 0)\n \n\n%%time\n\nrawArray = getRawArray(data)\n\narrayTrain, arrayTest = splitTrainTest(rawArray , 0.2)\n\ntrainUsers = getDataByUsers(arrayTrain)\ntrainItems = getDataByItems(arrayTrain)\n\narrayTest = deleteUnknowData(arrayTest, trainUsers, trainItems)\n\ntestUsers = getDataByUsers(arrayTest)\ntestItems = getDataByItems(arrayTest)",
"Content example",
"arrayTest[:10,:10]",
"Baseline: mean by user",
"class baselineMeanUser:\n def __init__(self):\n self.users={}\n def fit(self,train):\n for user in train.keys():\n note=0.0\n for movie in train[user].keys():\n note+=train[user][movie]\n note=note/len(train[user])\n self.users[user]=note\n \n def predict(self,users):\n return [self.users[u] for u in users]\n\nbaseline_mu= baselineMeanUser()\nbaseline_mu.fit(train)\npred = baseline_mu.predict(arrayTest[:,0])\nprint(\"Mean Error %0.6f\" %(\n (np.array(pred) - np.array(arrayTest[:,2], float)) ** 2).mean())\n\nclass baselineMeanMovie:\n def __init__(self):\n self.movies={}\n def fit(self,train):\n for movie in train.keys():\n note=0.0\n for user in train[movie].keys():\n note+=train[movie][user]\n note=note/len(train[movie])\n self.movies[movie]=note\n \n def predict(self,movies):\n res=[]\n for m in movies:\n try:\n res.append(self.movies[m])\n except:\n res.append(3)\n return res\n\nbaseline_mm= baselineMeanMovie()\nbaseline_mm.fit(m_train)\npred = baseline_mm.predict(arrayTest[:,1])\nprint(\"Mean Error %0.6f\" %(\n (np.array(pred) - np.array(arrayTest[:,2], float)) ** 2).mean())",
"Raw matrix are used for convenience and clarity.\nStructure like scipy sparse matrix or python dictionnaries may be used for speedup.\nComplete dataset",
"rawMatrix = np.zeros((943,1682))\nfor u in data:\n for m in data[u]:\n rawMatrix[int(u)-1][int(movies[m])-1] = data[u][m]\n\nprint(np.shape(rawMatrix))\nrawMatrix[:5,:5]",
"Train and test dataset",
"rawMatrixTrain = np.zeros((len(data.keys()),1682))\nfor u in train:\n for m in train[u]:\n rawMatrixTrain[int(u)-1][int(movies[m])-1] = train[u][m]\n \nrawMatrixTest = np.zeros((len(data.keys()),1682))\nfor u in test:\n for m in test[u]:\n rawMatrixTest[int(u)-1][int(movies[m])-1] = test[u][m]",
"Non-negative Matrix Factorization\nFast implementation using numpy's matrix processing.",
"def nmf(X, latent_features, max_iter=100, eps = 1e-5,printevery=100):\n\n print \"NMF with %d latent features, %d iterations.\"%(latent_features, max_iter)\n\n # mask used to ignore null element (coded by zero)\n mask = np.sign(X)\n\n # randomly initialized matrix\n rows, columns = X.shape\n A = np.random.rand(rows, latent_features)\n \n Y = np.random.rand(latent_features, columns)\n # Not used as I couldn't get it to bring significant improvments\n # Y = linalg.lstsq(A, X)[0] # initializing that way as recommanded in a blog post\n # Y = np.maximum(Y, eps) # avoiding too low values\n\n masked_X = mask * X\n masktest = np.sign(rawMatrixTest) # used for prints\n masktrain = np.sign(rawMatrixTrain) # used for prints\n\n for i in range(1, max_iter + 1):\n\n top = np.dot(masked_X, Y.T)\n bottom = (np.dot((mask * np.dot(A, Y)), Y.T)) + eps\n A *= top / bottom\n \n top = np.dot(A.T, masked_X)\n bottom = np.dot(A.T, mask * np.dot(A, Y)) + eps\n Y *= top / bottom\n\n\n # evaluation\n if i % printevery == 0 or i == 1 or i == max_iter:\n X_est = np.dot(A, Y)\n q = masktest*X_est - rawMatrixTest\n q_train = masktrain*X_est - rawMatrixTrain\n print \"Iteration %d, Err %.05f, Err train %.05f\"%( i, (q*q).sum()/ masktest.sum(), (q_train*q_train).sum()/ masktest.sum() )\n \n return A, Y\n\n%%time\nA,Y = nmf(rawMatrixTrain,100,eps = 1e-5,max_iter=5,printevery=1)\nresMatrix = A.dot(Y)",
"We see that it quickly get better than the baseline.\nHowever, we see below that it overfit after that:",
"%%time\nA,Y = nmf(rawMatrixTrain,50,eps = 1e-5,max_iter=500,printevery=100)\nresMatrix = A.dot(Y)",
"This is due to the high sparsity of the matrix.\nWe can of course reduce the features matrix size to avoid overfitting, but that will limit further improvments:",
"%%time\nA,Y = nmf(rawMatrixTrain,1,eps = 1e-5,max_iter=100,printevery=20)\nresMatrix = A.dot(Y)",
"Despite good results in few seconds on this dataset, this can only get us so far.\nWe then have to add regularization to the cost function.\nEvaluation",
"## This class is used to make predictions\nclass evalMF:\n def __init__(self,resMatrix,dicU,dicI):\n self.resMatrix=resMatrix\n self.dicU = dicU\n self.dicI = dicI\n def fit(self):\n pass\n \n def predict(self,user,movie):\n return self.resMatrix[int(user)-1][int(self.dicI[movie])-1]\n\nmf = evalMF(resMatrix,data,movies)\n\n# np.array([ (float(ra[2]) - mf.predict(ra[0],ra[1]))**2 for ra in evalArrayTest]).mean()\n# faster evaluation\nmasqueTest=np.sign(rawMatrixTest)\nq = masqueTest*resMatrix - rawMatrixTest\n(q*q).sum()/ masqueTest.sum()",
"Let's look at some predictions",
"print data[\"1\"][\"Akira (1988)\"]\nprint mf.predict(\"1\",\"Akira (1988)\")\nprint data[\"1\"][\"I.Q. (1994)\"]\nprint mf.predict(\"1\",\"I.Q. (1994)\")",
"We usualy see an important difference between users, so we need to take the bias into account.",
"user=\"1\"\nsumm=0\nfor i in data[user]:\n summ+=(float(data[user][i]) - mf.predict(user,i))**2\nsumm/len(data[user])\n\nuser=\"3\"\nsumm=0\nfor i in data[user]:\n summ+=(float(data[user][i]) - mf.predict(user,i))**2\nsumm/len(data[user])",
"We have not been very successful with incorporating the bias and L1 into that implementation...\nWe build a simpler model below, and then add the regularization and bias.",
"class mf():\n def __init__(self, k, eps=1e-3, nIter=2000, lambd=0.5):\n self.k = k\n self.lambd = lambd\n self.eps = eps\n self.nIter = nIter\n\n def fit(self, trainUsers, trainItems, rawArray):\n print \"MF with %d latent features, %d iterations.\"%(self.k, self.nIter)\n\n printevery = np.round(self.nIter/10,decimals=-4)\n\n self.p = {}\n self.q = {}\n self.bu = {}\n self.bi = {}\n self.mu = np.random.random() * 2 - 1\n\n for j in range(len(rawArray)):\n u = rawArray[j][0]\n i = rawArray[j][1]\n if not u in self.p:\n self.p[u] = np.random.rand(1,self.k)\n self.bu[u] = np.random.rand() * 2 - 1\n if not i in self.q:\n self.q[i] = np.random.rand(self.k,1)\n self.bi[i] = np.random.rand() * 2 - 1\n loss = []\n for it in range(self.nIter):\n ind = np.random.randint(len(rawArray))\n u = rawArray[ind][0]\n i = rawArray[ind][1]\n\n # thx to A & A !\n tmp = trainUsers[u][i] - (self.mu + self.bi[i] + self.bu[u] +self.p[u].dot(self.q[i])[0][0])\n self.p[u] = (1 - self.lambd * self.eps) * self.p[u] + self.eps * 2 * tmp * self.q[i].transpose()\n self.bu[u] = (1 - self.lambd * self.eps) * self.bu[u] + self.eps * 2 * tmp\n self.q[i] = (1 - self.lambd * self.eps) * self.q[i] + self.eps * 2 * tmp * self.p[u].transpose()\n self.bi[i] = (1 - self.lambd * self.eps) * self.bi[i] + self.eps * 2 * tmp\n self.mu = (1 - self.lambd * self.eps) * self.mu + self.eps * 2 * tmp\n\n loss.append(tmp*tmp)\n\n if it % printevery == 0 or it == self.nIter-1:\n print \"Iteration %s, Err %.05f, True %.05f\"%( str(it).ljust(8), np.mean(loss) , model.score(arrayTest))\n\n return self.p, self.q\n\n\n def predict(self, rawArray_test):\n pred = np.zeros(len(rawArray_test))\n for ind,t in enumerate(rawArray_test):\n pred[ind] = self.mu + self.bu[t[0]] + self.bi[t[1]] + self.p[t[0]].dot(self.q[t[1]])[0][0]\n return pred\n\n def score(self, rawArray_test) :\n return ((self.predict(rawArray_test) - np.array(rawArray_test[:,2], float)) ** 2).mean()\n\n%%time\nk = 10\neps = 8e-3\nnIter = int(2e6)\nlambd = 0.2\nmodel = mf(k, eps=eps, nIter=nIter,lambd=lambd)\np,q = model.fit(trainUsers, trainItems, arrayTrain)\nprint \"\\nError on test: %.05f\" %(model.score(arrayTest))",
"thanks to Professeur Chen",
"class tSNE():\n def __init__(self,perp, nIter, lr, moment, dim=2):\n self.perp = perp # entre 5 et 50\n self.nIter = nIter\n self.lr = lr\n self.moment = moment\n self.dim = dim \n def fit(self,data):\n nEx = np.shape(data)[0]\n # Matrice des distances de ||xi - xj||² #\n normx = np.sum(data**2,1)\n normx = np.reshape(normx, (1, nEx))\n distancex = normx + normx.T - 2 * data.dot(data.T)\n # Calcul des sigma ---------------------------------------------------------------#\n lperp = np.log2(self.perp)\n # initialisation bornes pour la recherche dichotomique #\n sup = np.ones((nEx,1)) * np.max(distancex)\n inf = np.zeros((nEx,1))\n self.sigma = (sup + inf) / 2.\n # recherche dichotomique #\n stop = False\n while not stop:\n # Calculer la matrice des p(i|j)\n self.pcond = np.exp(-distancex / (2. * (self.sigma**2)))\n self.pcond = self.pcond / np.sum(self.pcond - np.eye(nEx),1).reshape(nEx,1)\n # Calculer l'entropie de p(i|j)\n entropy = - np.sum(self.pcond * np.log2(self.pcond), 0)\n # Mise a jour des bornes\n # Si il faut augmenter sigma\n up = entropy < lperp \n inf[up,0] = self.sigma[up,0]\n # Si il faut baisser sigma\n down = entropy > lperp \n sup[down,0] = self.sigma[down,0]\n # Mise a jour de sigma et condition d'arrêt\n old = self.sigma\n self.sigma = ((sup + inf) / 2.)\n if np.max(np.abs(old - self.sigma)) < 1e-5:\n stop = True\n #print np.exp(entropy)\n #print self.sigma.T \n #--------------------------------------------------------------------------#\n #initialiser y\n self.embeddings = np.zeros((self.nIter+2, nEx, self.dim))\n self.embeddings[1] = np.random.randn(nEx, self.dim) * 1e-4\n #--------------------------------------------------------------------------#\n # p(ij)\n self.pij = (self.pcond + self.pcond.T) / (2.*nEx)\n np.fill_diagonal(self.pij, 0)\n # Descente de Gradient\n for t in xrange(1,self.nIter+1):\n # Matrice des distances \n normy = np.sum((self.embeddings[t]**2),1)\n normy = np.reshape(normy, (1, nEx))\n distancey = normy + normy.T - 2 * self.embeddings[t].dot(self.embeddings[t].T)\n # q(ij)\n # self.qij = (distancey.sum() + nEx*(nEx-1)) / (1 + distancey)\n # np.fill_diagonal(self.qij, 0)\n self.qij = 1 / (1 + distancey)\n np.fill_diagonal(self.qij, 0)\n self.qij = self.qij / self.qij.sum()\n # Descente de gradient\n yt = self.embeddings[t]\n tmpgrad = 4 * ((self.pij - self.qij) / (1 + distancey)).reshape(nEx, nEx,1)\n for i in range(nEx):\n dy = (tmpgrad[i] * (yt[i]-yt)).sum(0)\n self.embeddings[t+1][i] = yt[i] - self.lr * dy + self.moment * (yt[i] - self.embeddings[t-1,i])\n\nfrom sklearn import datasets\nfrom scipy import stats\ndigits = datasets.load_digits()\n\nX_ini = np.vstack([digits.data[digits.target==i]\n for i in range(10)])\ncols = np.hstack([digits.target[digits.target==i]\n for i in range(10)])\n\n%%time\n\nmodel = tSNE(10,500,1000,0)\nmodel.fit(X_ini)\n\npalette = np.array(sns.color_palette(\"hls\", 10))\nt = np.shape(model.embeddings)[0] -1\n\n# We create a scatter plot.\nf = plt.figure(figsize=(8, 8))\nax = plt.subplot(aspect='equal')\nsc = ax.scatter(model.embeddings[t,:,0], model.embeddings[t,:,1], lw=0, s=40,\n c=palette[cols.astype(np.int)])\nplt.xlim(-25, 25)\nplt.ylim(-25, 25)\nax.axis('off')\nax.axis('tight')\n\n#plt.plot(mod.embedding_[12][0],mod.embedding_[12][1], 'bv')\n \nplt.show()",
"For reference, let's compare it with sklearn's TSNE",
"from sklearn.manifold import TSNE\n\nmod = TSNE(random_state=1337)\n\n%%time\nX = mod.fit_transform(X_ini)\n\npalette = np.array(sns.color_palette(\"hls\", 10))\n\n# We create a scatter plot.\nf = plt.figure(figsize=(8, 8))\nax = plt.subplot(aspect='equal')\nsc = ax.scatter(X[:,0], X[:,1], lw=0, s=40,\n c=palette[cols.astype(np.int)])\nplt.xlim(-25, 25)\nplt.ylim(-25, 25)\nax.axis('off')\nax.axis('tight')\n\n#plt.plot(mod.embedding_[12][0],mod.embedding_[12][1], 'bv')\n \nplt.show()",
"It produce similar results, albeit faster, as expected.",
"rawMatrix = np.zeros((943,1682))\nfor u in data:\n for m in data[u]:\n rawMatrix[int(u)-1][int(movies[m])-1] = data[u][m]",
"Predict unknown data and fill the others with the right number",
"%%time\nrawMatrix = np.zeros((943,1682))\nfor u in data:\n for m in movies.keys():\n try:\n rawMatrix[int(u)-1][int(movies[m])-1] = model.predict([[u,m]])\n except KeyError:\n rawMatrix[int(u)-1][int(movies[m])-1] = 3\nfor u in data:\n for m in data[u]:\n rawMatrix[int(u)-1][int(movies[m])-1] = data[u][m]\n\nrawMatrix[:5,:5]",
"0.0164939277392222 percent of unseen data",
"model.predict([[\"1\",\"101 Dalmatians (1996)\"]])\n\nrawMatrix = np.zeros((943,1682))\nfor u in data:\n for m in data[u]:\n rawMatrix[int(u)-1][int(movies[m])-1] = data[u][m]\n\nmod = TSNE(random_state=1337)\n\n%%time\nX = mod.fit_transform(rawMatrix)",
"Produced visualisation",
"palette = np.array(sns.color_palette(\"hls\", 4))\n\nf = plt.figure(figsize=(8, 8))\nax = plt.subplot(aspect='equal')\nsc = ax.scatter(X[:,0], X[:,1], lw=0, s=40)\nplt.xlim(-25, 25)\nplt.ylim(-25, 25)\nax.axis('off')\nax.axis('tight')\n \nplt.show()",
"But its not very informative...\nLet's add some colors by ratings",
"me = rawMatrix.mean(1)\nX_ini = np.vstack([rawMatrix[np.where(np.logical_and(me>i, me<=j))]\n for i,j in [(0,2),(2,3),(3,4),(4,5)]])\ncols = np.hstack( [ np.round( me[ np.where(np.logical_and(me>i, me<=j)) ] )\n for i,j in [(0,2),(2,3),(3,4),(4,5)] ] )\n\n%%time\nX = mod.fit_transform(X_ini)\n\npalette = np.array(sns.color_palette(\"hls\", 6))\n\nf = plt.figure(figsize=(8, 8))\nax = plt.subplot(aspect='equal')\nsc = ax.scatter(X[:,0], X[:,1], lw=0, s=40,\n c=palette[cols.astype(np.int)])\nplt.xlim(-25, 25)\nplt.ylim(-25, 25)\nax.axis('off')\nax.axis('tight')\n\ntxts = []\nfor i in range(2,6):\n xtext, ytext = np.median(X[cols == i, :], axis=0)\n txt = ax.text(xtext, ytext, str(i), fontsize=24)\n txts.append(txt)\n\n#plt.plot(mod.embedding_[12][0],mod.embedding_[12][1], 'bv')\n \nplt.show()",
"We see that the representation make sense, as movies with best ratings are opposed to movies with low ratings"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
anukarsh1/deep-learning-coursera
|
Neural Networks And Deep Learning/Deep Neural Network - Application.ipynb
|
mit
|
[
"Building your Deep Neural Network: Step by Step\nWelcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!\n\nIn this notebook, you will implement all the functions required to build a deep neural network.\nIn the next assignment, you will use these functions to build a deep neural network for image classification.\n\nAfter this assignment you will be able to:\n- Use non-linear units like ReLU to improve your model\n- Build a deeper neural network (with more than 1 hidden layer)\n- Implement an easy-to-use neural network class\nNotation:\n- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. \n - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.\n- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example.\n- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).\nLet's get started!\n1 - Packages\nLet's first import all the packages that you will need during this assignment. \n- numpy is the main package for scientific computing with Python.\n- matplotlib is a library to plot graphs in Python.\n- dnn_utils provides some necessary functions for this notebook.\n- testCases provides some test cases to assess the correctness of your functions\n- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed.",
"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom testCases import *\nfrom dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)",
"2 - Outline of the Assignment\nTo build your neural network, you will be implementing several \"helper functions\". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:\n\nInitialize the parameters for a two-layer network and for an $L$-layer neural network.\nImplement the forward propagation module (shown in purple in the figure below).\nComplete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$).\nWe give you the ACTIVATION function (relu/sigmoid).\nCombine the previous two steps into a new [LINEAR->ACTIVATION] forward function.\nStack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.\n\n\nCompute the loss.\nImplement the backward propagation module (denoted in red in the figure below).\nComplete the LINEAR part of a layer's backward propagation step.\nWe give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) \nCombine the previous two steps into a new [LINEAR->ACTIVATION] backward function.\nStack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function\n\n\nFinally update the parameters.\n\n<img src=\"images/final outline.png\" style=\"width:800px;height:500px;\">\n<caption><center> Figure 1</center></caption><br>\nNote that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. \n3 - Initialization\nYou will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers.\n3.1 - 2-layer Neural Network\nExercise: Create and initialize the parameters of the 2-layer neural network.\nInstructions:\n- The model's structure is: LINEAR -> RELU -> LINEAR -> SIGMOID. \n- Use random initialization for the weight matrices. Use np.random.randn(shape)*0.01 with the correct shape.\n- Use zero initialization for the biases. Use np.zeros(shape).",
"# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n \n Returns:\n parameters -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n \n np.random.seed(1)\n \n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = np.random.randn(n_h, n_x) * 0.01\n b1 = np.zeros(shape=(n_h, 1))\n W2 = np.random.randn(n_y, n_h) * 0.01\n b2 = np.zeros(shape=(n_y, 1))\n ### END CODE HERE ###\n \n assert(W1.shape == (n_h, n_x))\n assert(b1.shape == (n_h, 1))\n assert(W2.shape == (n_y, n_h))\n assert(b2.shape == (n_y, 1))\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters \n\nparameters = initialize_parameters(2,2,1)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"Expected output:\n<table style=\"width:80%\">\n <tr>\n <td> **W1** </td>\n <td> [[ 0.01624345 -0.00611756]\n [-0.00528172 -0.01072969]] </td> \n </tr>\n\n <tr>\n <td> **b1**</td>\n <td>[[ 0.]\n [ 0.]]</td> \n </tr>\n\n <tr>\n <td>**W2**</td>\n <td> [[ 0.00865408 -0.02301539]]</td>\n </tr>\n\n <tr>\n <td> **b2** </td>\n <td> [[ 0.]] </td> \n </tr>\n\n</table>\n\n3.2 - L-layer Neural Network\nThe initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the initialize_parameters_deep, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then:\n<table style=\"width:100%\">\n\n\n <tr>\n <td> </td> \n <td> **Shape of W** </td> \n <td> **Shape of b** </td> \n <td> **Activation** </td>\n <td> **Shape of Activation** </td> \n <tr>\n\n <tr>\n <td> **Layer 1** </td> \n <td> $(n^{[1]},12288)$ </td> \n <td> $(n^{[1]},1)$ </td> \n <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> \n\n <td> $(n^{[1]},209)$ </td> \n <tr>\n\n <tr>\n <td> **Layer 2** </td> \n <td> $(n^{[2]}, n^{[1]})$ </td> \n <td> $(n^{[2]},1)$ </td> \n <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> \n <td> $(n^{[2]}, 209)$ </td> \n <tr>\n\n <tr>\n <td> $\\vdots$ </td> \n <td> $\\vdots$ </td> \n <td> $\\vdots$ </td> \n <td> $\\vdots$</td> \n <td> $\\vdots$ </td> \n <tr>\n\n <tr>\n <td> **Layer L-1** </td> \n <td> $(n^{[L-1]}, n^{[L-2]})$ </td> \n <td> $(n^{[L-1]}, 1)$ </td> \n <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> \n <td> $(n^{[L-1]}, 209)$ </td> \n <tr>\n\n\n <tr>\n <td> **Layer L** </td> \n <td> $(n^{[L]}, n^{[L-1]})$ </td> \n <td> $(n^{[L]}, 1)$ </td>\n <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td>\n <td> $(n^{[L]}, 209)$ </td> \n <tr>\n\n</table>\n\nRemember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: \n$$ W = \\begin{bmatrix}\n j & k & l\\\n m & n & o \\\n p & q & r \n\\end{bmatrix}\\;\\;\\; X = \\begin{bmatrix}\n a & b & c\\\n d & e & f \\\n g & h & i \n\\end{bmatrix} \\;\\;\\; b =\\begin{bmatrix}\n s \\\n t \\\n u\n\\end{bmatrix}\\tag{2}$$\nThen $WX + b$ will be:\n$$ WX + b = \\begin{bmatrix}\n (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\\n (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\\n (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u\n\\end{bmatrix}\\tag{3} $$\nExercise: Implement initialization for an L-layer Neural Network. \nInstructions:\n- The model's structure is [LINEAR -> RELU] $ \\times$ (L-1) -> LINEAR -> SIGMOID. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.\n- Use random initialization for the weight matrices. Use np.random.rand(shape) * 0.01.\n- Use zeros initialization for the biases. Use np.zeros(shape).\n- We will store $n^{[l]}$, the number of units in different layers, in a variable layer_dims. For example, the layer_dims for the \"Planar Data classification model\" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means W1's shape was (4,2), b1 was (4,1), W2 was (1,4) and b2 was (1,1). Now you will generalize this to $L$ layers! \n- Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).\npython\n if L == 1:\n parameters[\"W\" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01\n parameters[\"b\" + str(L)] = np.zeros((layer_dims[1], 1))",
"# GRADED FUNCTION: initialize_parameters_deep\n\ndef initialize_parameters_deep(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n ### END CODE HERE ###\n \n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n \n return parameters\n\nparameters = initialize_parameters_deep([5,4,3])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"Expected output:\n<table style=\"width:80%\">\n <tr>\n <td> **W1** </td>\n <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]\n [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]\n [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]\n [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> \n </tr>\n\n <tr>\n <td>**b1** </td>\n <td>[[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]</td> \n </tr>\n\n <tr>\n <td>**W2** </td>\n <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716]\n [-0.01023785 -0.00712993 0.00625245 -0.00160513]\n [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> \n </tr>\n\n <tr>\n <td>**b2** </td>\n <td>[[ 0.]\n [ 0.]\n [ 0.]]</td> \n </tr>\n\n</table>\n\n4 - Forward propagation module\n4.1 - Linear Forward\nNow that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:\n\nLINEAR\nLINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. \n[LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID (whole model)\n\nThe linear forward module (vectorized over all the examples) computes the following equations:\n$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\\tag{4}$$\nwhere $A^{[0]} = X$. \nExercise: Build the linear part of forward propagation.\nReminder:\nThe mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find np.dot() useful. If your dimensions don't match, printing W.shape may help.",
"# GRADED FUNCTION: linear_forward\n\ndef linear_forward(A, W, b):\n \"\"\"\n Implement the linear part of a layer's forward propagation.\n\n Arguments:\n A -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n\n Returns:\n Z -- the input of the activation function, also called pre-activation parameter \n cache -- a python dictionary containing \"A\", \"W\" and \"b\" ; stored for computing the backward pass efficiently\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n Z = np.dot(W, A) + b\n ### END CODE HERE ###\n \n assert(Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n \n return Z, cache\n\nA, W, b = linear_forward_test_case()\n\nZ, linear_cache = linear_forward(A, W, b)\nprint(\"Z = \" + str(Z))",
"Expected output:\n<table style=\"width:35%\">\n\n <tr>\n <td> **Z** </td>\n <td> [[ 3.1980455 7.85763489]] </td> \n </tr>\n\n</table>\n\n4.2 - Linear-Activation Forward\nIn this notebook, you will use two activation functions:\n\n\nSigmoid: $\\sigma(Z) = \\sigma(W A + b) = \\frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the sigmoid function. This function returns two items: the activation value \"a\" and a \"cache\" that contains \"Z\" (it's what we will feed in to the corresponding backward function). To use it you could just call: \npython\nA, activation_cache = sigmoid(Z)\n\n\nReLU: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the relu function. This function returns two items: the activation value \"A\" and a \"cache\" that contains \"Z\" (it's what we will feed in to the corresponding backward function). To use it you could just call:\npython\nA, activation_cache = relu(Z)\n\n\nFor more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.\nExercise: Implement the forward propagation of the LINEAR->ACTIVATION layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation \"g\" can be sigmoid() or relu(). Use linear_forward() and the correct activation function.",
"# GRADED FUNCTION: linear_activation_forward\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n Implement the forward propagation for the LINEAR->ACTIVATION layer\n\n Arguments:\n A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n Returns:\n A -- the output of the activation function, also called the post-activation value \n cache -- a python dictionary containing \"linear_cache\" and \"activation_cache\";\n stored for computing the backward pass efficiently\n \"\"\"\n \n if activation == \"sigmoid\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n ### END CODE HERE ###\n \n elif activation == \"relu\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n ### END CODE HERE ###\n \n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n cache = (linear_cache, activation_cache)\n\n return A, cache\n\nA_prev, W, b = linear_activation_forward_test_case()\n\nA, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = \"sigmoid\")\nprint(\"With sigmoid: A = \" + str(A))\n\nA, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = \"relu\")\nprint(\"With ReLU: A = \" + str(A))",
"Expected output:\n<table style=\"width:35%\">\n <tr>\n <td> **With sigmoid: A ** </td>\n <td > [[ 0.96076066 0.99961336]]</td> \n </tr>\n <tr>\n <td> **With ReLU: A ** </td>\n <td > [[ 3.1980455 7.85763489]]</td> \n </tr>\n</table>\n\nNote: In deep learning, the \"[LINEAR->ACTIVATION]\" computation is counted as a single layer in the neural network, not two layers. \nd) L-Layer Model\nFor even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (linear_activation_forward with RELU) $L-1$ times, then follows that with one linear_activation_forward with SIGMOID.\n<img src=\"images/model_architecture_kiank.png\" style=\"width:600px;height:300px;\">\n<caption><center> Figure 2 : [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID model</center></caption><br>\nExercise: Implement the forward propagation of the above model.\nInstruction: In the code below, the variable AL will denote $A^{[L]} = \\sigma(Z^{[L]}) = \\sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called Yhat, i.e., this is $\\hat{Y}$.) \nTips:\n- Use the functions you had previously written \n- Use a for loop to replicate [LINEAR->RELU] (L-1) times\n- Don't forget to keep track of the caches in the \"caches\" list. To add a new value c to a list, you can use list.append(c).",
"# GRADED FUNCTION: L_model_forward\n\ndef L_model_forward(X, parameters):\n \"\"\"\n Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation\n \n Arguments:\n X -- data, numpy array of shape (input size, number of examples)\n parameters -- output of initialize_parameters_deep()\n \n Returns:\n AL -- last post-activation value\n caches -- list of caches containing:\n every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)\n the cache of linear_sigmoid_forward() (there is one, indexed L-1)\n \"\"\"\n\n caches = []\n A = X\n L = len(parameters) // 2 # number of layers in the neural network\n \n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n for l in range(1, L):\n A_prev = A \n ### START CODE HERE ### (≈ 2 lines of code)\n A, cache = linear_activation_forward(A_prev, \n parameters['W' + str(l)], \n parameters['b' + str(l)], \n activation='relu')\n caches.append(cache)\n \n ### END CODE HERE ###\n \n # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n ### START CODE HERE ### (≈ 2 lines of code)\n AL, cache = linear_activation_forward(A, \n parameters['W' + str(L)], \n parameters['b' + str(L)], \n activation='sigmoid')\n caches.append(cache)\n \n ### END CODE HERE ###\n \n assert(AL.shape == (1, X.shape[1]))\n \n return AL, caches\n\nX, parameters = L_model_forward_test_case()\nAL, caches = L_model_forward(X, parameters)\nprint(\"AL = \" + str(AL))\nprint(\"Length of caches list = \" + str(len(caches)))",
"<table style=\"width:40%\">\n <tr>\n <td> **AL** </td>\n <td > [[ 0.0844367 0.92356858]]</td> \n </tr>\n <tr>\n <td> **Length of caches list ** </td>\n <td > 2</td> \n </tr>\n</table>\n\nGreat! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in \"caches\". Using $A^{[L]}$, you can compute the cost of your predictions.\n5 - Cost function\nNow you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.\nExercise: Compute the cross-entropy cost $J$, using the following formula: $$-\\frac{1}{m} \\sum\\limits_{i = 1}^{m} (y^{(i)}\\log\\left(a^{[L] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{L}\\right)) \\tag{7}$$",
"# GRADED FUNCTION: compute_cost\n\ndef compute_cost(AL, Y):\n \"\"\"\n Implement the cost function defined by equation (7).\n\n Arguments:\n AL -- probability vector corresponding to your label predictions, shape (1, number of examples)\n Y -- true \"label\" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)\n\n Returns:\n cost -- cross-entropy cost\n \"\"\"\n \n m = Y.shape[1]\n\n # Compute loss from aL and y.\n ### START CODE HERE ### (≈ 1 lines of code)\n cost = (-1 / m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply(1 - Y, np.log(1 - AL)))\n ### END CODE HERE ###\n \n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n assert(cost.shape == ())\n \n return cost\n\nY, AL = compute_cost_test_case()\n\nprint(\"cost = \" + str(compute_cost(AL, Y)))",
"Expected Output:\n<table>\n\n <tr>\n <td>**cost** </td>\n <td> 0.41493159961539694</td> \n </tr>\n</table>\n\n6 - Backward propagation module\nJust like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. \nReminder: \n<img src=\"images/backprop_kiank.png\" style=\"width:650px;height:250px;\">\n<caption><center> Figure 3 : Forward and Backward propagation for LINEAR->RELU->LINEAR->SIGMOID <br> The purple blocks represent the forward propagation, and the red blocks represent the backward propagation. </center></caption>\n<!-- \nFor those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:\n\n$$\\frac{d \\mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \\frac{d\\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\\frac{{da^{[2]}}}{{dz^{[2]}}}\\frac{{dz^{[2]}}}{{da^{[1]}}}\\frac{{da^{[1]}}}{{dz^{[1]}}} \\tag{8} $$\n\nIn order to calculate the gradient $dW^{[1]} = \\frac{\\partial L}{\\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \\times \\frac{\\partial z^{[1]} }{\\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.\n\nEquivalently, in order to calculate the gradient $db^{[1]} = \\frac{\\partial L}{\\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \\times \\frac{\\partial z^{[1]} }{\\partial b^{[1]}}$.\n\nThis is why we talk about **backpropagation**.\n!-->\n\nNow, similar to forward propagation, you are going to build the backward propagation in three steps:\n- LINEAR backward\n- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation\n- [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model)\n6.1 - Linear backward\nFor layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).\nSuppose you have already calculated the derivative $dZ^{[l]} = \\frac{\\partial \\mathcal{L} }{\\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$.\n<img src=\"images/linearback_kiank.png\" style=\"width:250px;height:300px;\">\n<caption><center> Figure 4 </center></caption>\nThe three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:\n$$ dW^{[l]} = \\frac{\\partial \\mathcal{L} }{\\partial W^{[l]}} = \\frac{1}{m} dZ^{[l]} A^{[l-1] T} \\tag{8}$$\n$$ db^{[l]} = \\frac{\\partial \\mathcal{L} }{\\partial b^{[l]}} = \\frac{1}{m} \\sum_{i = 1}^{m} dZ^{l}\\tag{9}$$\n$$ dA^{[l-1]} = \\frac{\\partial \\mathcal{L} }{\\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \\tag{10}$$\nExercise: Use the 3 formulas above to implement linear_backward().",
"# GRADED FUNCTION: linear_backward\n\ndef linear_backward(dZ, cache):\n \"\"\"\n Implement the linear portion of backward propagation for a single layer (layer l)\n\n Arguments:\n dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer\n\n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW = np.dot(dZ, cache[0].T) / m\n db = np.squeeze(np.sum(dZ, axis=1, keepdims=True)) / m\n dA_prev = np.dot(cache[1].T, dZ)\n ### END CODE HERE ###\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (isinstance(db, float))\n \n return dA_prev, dW, db\n\n# Set up some test inputs\ndZ, linear_cache = linear_backward_test_case()\n\ndA_prev, dW, db = linear_backward(dZ, linear_cache)\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db))",
"Expected Output: \n<table style=\"width:90%\">\n <tr>\n <td> **dA_prev** </td>\n <td > [[ 2.38272385 5.85438014]\n [ 6.31969219 15.52755701]\n [ -3.97876302 -9.77586689]] </td> \n </tr> \n\n <tr>\n <td> **dW** </td>\n <td > [[ 2.77870358 -0.05500058 -5.13144969]] </td> \n </tr> \n\n <tr>\n <td> **db** </td>\n <td> 5.527840195 </td> \n </tr> \n\n</table>\n\n6.2 - Linear-Activation backward\nNext, you will create a function that merges the two helper functions: linear_backward and the backward step for the activation linear_activation_backward. \nTo help you implement linear_activation_backward, we provided two backward functions:\n- sigmoid_backward: Implements the backward propagation for SIGMOID unit. You can call it as follows:\npython\ndZ = sigmoid_backward(dA, activation_cache)\n\nrelu_backward: Implements the backward propagation for RELU unit. You can call it as follows:\n\npython\ndZ = relu_backward(dA, activation_cache)\nIf $g(.)$ is the activation function, \nsigmoid_backward and relu_backward compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \\tag{11}$$. \nExercise: Implement the backpropagation for the LINEAR->ACTIVATION layer.",
"# GRADED FUNCTION: linear_activation_backward\n\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n Implement the backward propagation for the LINEAR->ACTIVATION layer.\n \n Arguments:\n dA -- post-activation gradient for current layer l \n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n \n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = relu_backward(dA, activation_cache)\n ### END CODE HERE ###\n \n elif activation == \"sigmoid\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = sigmoid_backward(dA, activation_cache)\n ### END CODE HERE ###\n \n # Shorten the code\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n return dA_prev, dW, db\n\nAL, linear_activation_cache = linear_activation_backward_test_case()\n\ndA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = \"sigmoid\")\nprint (\"sigmoid:\")\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db) + \"\\n\")\n\ndA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = \"relu\")\nprint (\"relu:\")\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db))",
"Expected output with sigmoid:\n<table style=\"width:100%\">\n <tr>\n <td > dA_prev </td> \n <td >[[ 0.08982777 0.00226265]\n [ 0.23824996 0.00600122]\n [-0.14999783 -0.00377826]] </td> \n\n </tr> \n\n <tr>\n <td > dW </td> \n <td > [[-0.06001514 -0.09687383 -0.10598695]] </td> \n </tr> \n\n <tr>\n <td > db </td> \n <td > 0.061800984273 </td> \n </tr> \n</table>\n\nExpected output with relu\n<table style=\"width:100%\">\n <tr>\n <td > dA_prev </td> \n <td > [[ 2.38272385 5.85438014]\n [ 6.31969219 15.52755701]\n [ -3.97876302 -9.77586689]] </td> \n\n </tr> \n\n <tr>\n <td > dW </td> \n <td > [[ 2.77870358 -0.05500058 -5.13144969]] </td> \n </tr> \n\n <tr>\n <td > db </td> \n <td > 5.527840195 </td> \n </tr> \n</table>\n\n6.3 - L-Model Backward\nNow you will implement the backward function for the whole network. Recall that when you implemented the L_model_forward function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the L_model_backward function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. \n<img src=\"images/mn_backward.png\" style=\"width:450px;height:300px;\">\n<caption><center> Figure 5 : Backward pass </center></caption>\n Initializing backpropagation:\nTo backpropagate through this network, we know that the output is, \n$A^{[L]} = \\sigma(Z^{[L]})$. Your code thus needs to compute dAL $= \\frac{\\partial \\mathcal{L}}{\\partial A^{[L]}}$.\nTo do so, use this formula (derived using calculus which you don't need in-depth knowledge of):\npython\ndAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL\nYou can then use this post-activation gradient dAL to keep going backward. As seen in Figure 5, you can now feed in dAL into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a for loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : \n$$grads[\"dW\" + str(l)] = dW^{[l]}\\tag{15} $$\nFor example, for $l=3$ this would store $dW^{[l]}$ in grads[\"dW3\"].\nExercise: Implement backpropagation for the [LINEAR->RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID model.",
"# GRADED FUNCTION: L_model_backward\n\ndef L_model_backward(AL, Y, caches):\n \"\"\"\n Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n \n Arguments:\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)\n the cache of linear_activation_forward() with \"sigmoid\" (it's caches[L-1])\n \n Returns:\n grads -- A dictionary with the gradients\n grads[\"dA\" + str(l)] = ... \n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ... \n \"\"\"\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation\n ### START CODE HERE ### (1 line of code)\n dAL = dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n ### END CODE HERE ###\n \n # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"AL, Y, caches\". Outputs: \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n ### START CODE HERE ### (approx. 2 lines)\n current_cache = caches[-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_backward(sigmoid_backward(dAL, \n current_cache[1]), \n current_cache[0])\n ### END CODE HERE ###\n \n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 2)], caches\". Outputs: \"grads[\"dA\" + str(l + 1)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)] \n ### START CODE HERE ### (approx. 5 lines)\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_backward(sigmoid_backward(dAL, caches[1]), caches[0])\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads\n\nX_assess, Y_assess, AL, caches = L_model_backward_test_case()\ngrads = L_model_backward(AL, Y_assess, caches)\nprint (\"dW1 = \"+ str(grads[\"dW1\"]))\nprint (\"db1 = \"+ str(grads[\"db1\"]))\nprint (\"dA1 = \"+ str(grads[\"dA1\"]))",
"Expected Output\n<table style=\"width:60%\">\n\n <tr>\n <td > dW1 </td> \n <td > [[-0.09686122 -0.04840482 -0.11864308]] </td> \n </tr> \n\n <tr>\n <td > db1 </td> \n <td > -0.262594998379 </td> \n </tr> \n\n <tr>\n <td > dA1 </td> \n <td > [[-0.71011462 -0.22925516]\n [-0.17330152 -0.05594909]\n [-0.03831107 -0.01236844]] </td> \n\n </tr> \n</table>\n\n6.4 - Update Parameters\nIn this section you will update the parameters of the model, using gradient descent: \n$$ W^{[l]} = W^{[l]} - \\alpha \\text{ } dW^{[l]} \\tag{16}$$\n$$ b^{[l]} = b^{[l]} - \\alpha \\text{ } db^{[l]} \\tag{17}$$\nwhere $\\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. \nExercise: Implement update_parameters() to update your parameters using gradient descent.\nInstructions:\nUpdate parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$.",
"# GRADED FUNCTION: update_parameters\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients, output of L_model_backward\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n parameters[\"W\" + str(l)] = ... \n parameters[\"b\" + str(l)] = ...\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n ### START CODE HERE ### (≈ 3 lines of code)\n for l in range(L):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n ### END CODE HERE ###\n \n return parameters\n\nparameters, grads = update_parameters_test_case()\nparameters = update_parameters(parameters, grads, 0.1)\n\nprint (\"W1 = \" + str(parameters[\"W1\"]))\nprint (\"b1 = \" + str(parameters[\"b1\"]))\nprint (\"W2 = \" + str(parameters[\"W2\"]))\nprint (\"b2 = \" + str(parameters[\"b2\"]))\nprint (\"W3 = \" + str(parameters[\"W3\"]))\nprint (\"b3 = \" + str(parameters[\"b3\"]))",
"Expected Output:\n<table style=\"width:100%\"> \n <tr>\n <td > W1 </td> \n <td > [[ 1.72555789 0.3700272 0.07818896]\n [-1.8634927 -0.2773882 -0.35475898]\n [-0.08274148 -0.62700068 -0.04381817]\n [-0.47721803 -1.31386475 0.88462238]] </td> \n </tr> \n\n <tr>\n <td > b1 </td> \n <td > [[-0.07593768]\n [-0.07593768]\n [-0.07593768]\n [-0.07593768]] </td> \n </tr> \n <tr>\n <td > W2 </td> \n <td > [[ 0.71838378 1.70957306 0.05003364 -0.40467741]\n [-0.54535995 -1.54647732 0.98236743 -1.10106763]\n [-1.18504653 -0.2056499 1.48614836 0.23671627]] </td> \n </tr> \n\n <tr>\n <td > b2 </td> \n <td > [[-0.08616376]\n [-0.08616376]\n [-0.08616376]] </td> \n </tr> \n <tr>\n <td > W3 </td> \n <td > [[-0.88352436 -0.7129932 0.62524497]\n [-0.02025258 -0.76883635 -0.23003072]] </td> \n </tr> \n\n <tr>\n <td > b3 </td> \n <td > [[ 0.08416196]\n [ 0.08416196]] </td> \n </tr> \n\n</table>\n\n7 - Conclusion\nCongrats on implementing all the functions required for building a deep neural network! \nWe know it was a long assignment but going forward it will only get better. The next part of the assignment is easier. \nIn the next assignment you will put all these together to build two models:\n- A two-layer neural network\n- An L-layer neural network\nYou will in fact use these models to classify cat vs non-cat images!"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
alberto-antonietti/nest-simulator
|
doc/model_details/IAF_neurons_singularity.ipynb
|
gpl-2.0
|
[
"IAF neurons singularity\nThis notebook describes how NEST handles the singularities appearing in the ODE's of integrate-and-fire model neurons with alpha- or exponentially-shaped current, when the membrane and the synaptic time-constants are identical.",
"import sympy as sp\nsp.init_printing(use_latex=True)\nfrom sympy.matrices import zeros\ntau_m, tau_s, C, h = sp.symbols('tau_m, tau_s, C, h')",
"For alpha-shaped currents we have:",
"A = sp.Matrix([[-1/tau_s,0,0],[1,-1/tau_s,0],[0,1/C,-1/tau_m]])",
"Non-singular case ($\\tau_m\\neq \\tau_s$)\nThe propagator is:",
"PA = sp.simplify(sp.exp(A*h))\nPA",
"Note that the entry in the third line and the second column $A_{32}$ would also appear in the propagator matrix in case of an exponentially shaped current\nSingular case ($\\tau_m = \\tau_s$)\nWe have",
"As = sp.Matrix([[-1/tau_m,0,0],[1,-1/tau_m,0],[0,1/C,-1/tau_m]])\nAs",
"The propagator is",
"PAs = sp.simplify(sp.exp(As*h))\nPAs",
"Numeric stability of propagator elements\nFor the lines $\\tau_s\\rightarrow\\tau_m$ the entry $PA_{32}$ becomes numerically unstable, since denominator and enumerator go to zero.\n$1.$ We show that $PAs_{32}$ is the limit of $PA_{32}(\\tau_s)$ for $\\tau_s\\rightarrow\\tau_m$.:",
"PA_32 = PA.row(2).col(1)[0]\nsp.limit(PA_32, tau_s, tau_m)",
"$2.$ The Taylor-series up to the second order of the function $PA_{32}(\\tau_s)$ is:",
"PA_32_series = PA_32.series(x=tau_s,x0=tau_m,n=2)\nPA_32_series ",
"Therefore we have \n$T(PA_{32}(\\tau_s,\\tau_m))=PAs_{32}+PA_{32}^{lin}+O(2)$ where $PA_{32}^{lin}=h^2(-\\tau_m + \\tau_s)*exp(-h/\\tau_m)/(2C\\tau_m^2)$\n$3.$ We define\n$dev:=|PA_{32}-PAs_{32}|$\nWe also define $PA_{32}^{real}$ which is the correct value of P32 without misscalculation (instability).\nIn the following we assume $0<|\\tau_s-\\tau_m|<0.1$. We consider two different cases\na) When $dev \\geq 2|PA_{32}^{lin}|$ we do not trust the numeric evaluation of $PA_{32}$, since it strongly deviates from the first order correction. In this case the error we make is\n$|PAs_{32}-PA_{32}^{real}|\\approx |P_{32}^{lin}|$\nb) When $dev \\le |2PA_{32}^{lin}|$ we trust the numeric evaluation of $PA_{32}$. In this case the maximal error occurs when $dev\\approx 2 PA_{32}^{lin}$ due to numeric instabilities. The order of the error is again\n$|PAs_{32}-PA_{32}^{real}|\\approx |P_{32}^{lin}|$\nThe entry $A_{31}$ is numerically unstable, too and we treat it analogously.\nTests and examples\nWe will now show that the stability criterion explained above leads to a reasonable behavior for $\\tau_s\\rightarrow\\tau_m$",
"import nest\nimport numpy as np\nimport pylab as pl",
"Neuron, simulation and plotting parameters",
"taum = 10.\nC_m = 250.\n# array of distances between tau_m and tau_ex\nepsilon_array = np.hstack(([0.],10.**(np.arange(-6.,1.,1.))))[::-1]\ndt = 0.1\nfig = pl.figure(1)\nNUM_COLORS = len(epsilon_array)\ncmap = pl.get_cmap('gist_ncar')\nmaxVs = []",
"Loop through epsilon array",
"for i,epsilon in enumerate(epsilon_array):\n nest.ResetKernel() # reset simulation kernel \n nest.SetKernelStatus({'resolution':dt})\n\n # Current based alpha neuron \n neuron = nest.Create('iaf_psc_alpha') \n nest.SetStatus(neuron,{'C_m':C_m,'tau_m':taum,'t_ref':0.,'V_reset':-70.,'V_th':1e32,\n 'tau_syn_ex':taum+epsilon,'tau_syn_in':taum+epsilon,'I_e':0.})\n \n # create a spike generator\n spikegenerator_ex=nest.Create('spike_generator')\n nest.SetStatus(spikegenerator_ex,{'spike_times': [50.]})\n \n # create a voltmeter\n vm = nest.Create('voltmeter',params={'interval':dt})\n\n ## connect spike generator and voltmeter to the neuron\n\n nest.Connect(spikegenerator_ex, neuron,'all_to_all',{'weight':100.})\n nest.Connect(vm, neuron)\n\n # run simulation for 200ms\n nest.Simulate(200.) \n\n # read out recording time and voltage from voltmeter\n times=nest.GetStatus(vm)[0]['events']['times']\n voltage=nest.GetStatus(vm)[0]['events']['V_m']\n \n # store maximum value of voltage trace in array\n maxVs.append(np.max(voltage))\n\n # plot voltage trace\n if epsilon == 0.:\n pl.plot(times,voltage,'--',color='black',label='singular')\n else:\n pl.plot(times,voltage,color = cmap(1.*i/NUM_COLORS),label=str(epsilon))\n\npl.legend()\npl.xlabel('time t (ms)')\npl.ylabel('voltage V (mV)')",
"Show maximum values of voltage traces",
"fig = pl.figure(2)\npl.semilogx(epsilon_array,maxVs,color='red',label='maxV')\n#show singular solution as horizontal line\npl.semilogx(epsilon_array,np.ones(len(epsilon_array))*maxVs[-1],color='black',label='singular')\npl.xlabel('epsilon')\npl.ylabel('max(voltage V) (mV)')\npl.legend()\n\npl.show()",
"The maximum of the voltage traces show that the non-singular case nicely converges to the singular one and no numeric instabilities occur."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Automating-GIS-processes/2017
|
source/codes/Lesson3-point-in-polygon.ipynb
|
mit
|
[
"Spatial queries: Point in Polygon & Intersect\nFinding out if a certain point is located inside or outside of an area, or finding out if a line intersects with another line or polygon are fundamental geospatial operations that are often used e.g. to select data based on location. Such spatial queries are one of the typical first steps of the workflow when doing spatial analysis. Performing a spatial join (will be introduced later) between two spatial datasets is one of the most typical applications where Point in Polygon (PIP) query is used. \nHow to check if point is inside a polygon?\nComputationally, detecting if a point is inside a Polygon is most commonly done using a specific formula called Ray Casting algorithm. Luckily, we do not need to create such a function ourselves for conducting the Point in Polygon (PIP) query. Instead, we can take advantage of Shapely's binary predicates that can evaluate the topolocical relationships between geographical objects, such as the PIP as we're interested here. \nThere are basically two ways of conducting PIP in Shapely: \n\nusing a function called .within() that checks if a point is within a polygon\nusing a function called .contains() that checks if a polygon contains a point\n\nNotice: even though we are talking here about Point in Polygon operation, it is also possible to check if a LineString or Polygon is inside another Polygon. \n\nLet's first create a Polygon using a list of coordinate-tuples and a couple of Point objects",
"from shapely.geometry import Point, Polygon\n\n# Create Point objects\np1 = Point(24.952242, 60.1696017)\np2 = Point(24.976567, 60.1612500)\n\n\n# Create a Polygon\ncoords = [(24.950899, 60.169158), (24.953492, 60.169158), (24.953510, 60.170104), (24.950958, 60.169990)]\npoly = Polygon(coords)\n\n# Let's check what we have\nprint(p1)\nprint(p2)\nprint(poly)",
"Let's check if those points are within the polygon",
"# Check if p1 is within the polygon using the within function\np1_within = p1.within(poly)\n\n# Check if p2 is within the polygon\np2_within = p2.within(poly)\n\n# Print the results\nprint(\"Is p1 within the polygon?: \", p1_within)\nprint(\"Is p2 within the polygon?: \", p2_within)",
"Okey, so we can see that the first point seems to be inside that polygon and the other one doesn't. \n\nIn fact, the first point is close to the center of the polygon as we can see:",
"print(p1)\nprint(poly.centroid)",
"It is also possible to do PIP other way around, i.e. to check if polygon contains a point:",
"# Does polygon contain point 1\nprint(\"Does polygon contain p1?: \", poly.contains(p1))\n\n# What about the other point? \nprint(\"Does polygon contain p2?: \", poly.contains(p2))",
"Thus, both ways has the same results. \nWhich one should you use then? Well, it depends: \n\n\nif you have many points and just one polygon and you try to find out which one of them is inside the polygon:\n\nyou need to iterate over the points and check one at a time if it is within() the polygon specified\n\n\n\nif you have many polygons and just one point and you want to find out which polygon contains the point\n\n\nyou need to iterate over the polygons until you find a polygon that contains() the point specified (assuming there are no overlapping polygons)\n\n\nIntersect\nAnother typical geospatial operation is to see if a geometry intersect or touches another one. The difference between these two is that:\n\nif objects intersect, the boundary and interior of an object needs to intersect in any way with those of the other. \nIf an object touches the other one, it is only necessary to have (at least) a single point of their boundaries in common but their interiors shoud NOT intersect.\n\nLet's try these out.\n\nLet's create two LineStrings",
"from shapely.geometry import LineString, MultiLineString\n\n# Create two lines\nline_a = LineString([(0, 0), (1, 1)])\nline_b = LineString([(1, 1), (0, 2)])",
"Let's see if they intersect",
"line_a.intersects(line_b)",
"Do they also touch each other?",
"line_a.touches(line_b)",
"Indeed, they do and we can see this by plotting the features together",
"# Create a MultiLineString\nmulti_line = MultiLineString([line_a, line_b])\nmulti_line",
"Thus, the line_b continues from the same node ( (1,1) ) where line_a ends. \n\nHowever, if the lines overlap fully, they don't touch, as we can see:",
"# Check if line_a touches itself\nprint(\"Touches?: \", line_a.touches(line_a))\n\n# However, it does intersect\nprint(\"Intersects?: \", line_a.intersects(line_a))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bkuczenski/lca-tools-datafiles
|
doc/Jaccard Index matching example.ipynb
|
gpl-3.0
|
[
"import re\nfrom gensim import models\nfrom scipy import spatial\nimport numpy as np\nimport os.path\nimport urllib\nimport gzip\nimport json\n\ndef search_tags(entity, search):\n \"\"\"\n This function searches through all the 'tags' (semantic content) of a data set\n and returns 'true' if the search expression is found. case insensitive.\n \"\"\"\n all_tags = '; '.join([str(x) for x in entity['tags'].values()])\n return bool(re.search(search, all_tags, flags=re.IGNORECASE))\n\ndef gunzipFile(inFileName, outFileName):\n inF = gzip.open(inFileName, 'rb')\n outF = open(outFileName, 'wb')\n outF.write( inF.read() )\n inF.close()\n outF.close() \n\ndef jaccardDistance(sent1, sent2, stoplist):\n sent1 = re.sub('[^0-9a-zA-Z]+', ' ', sent1)\n sent2 = re.sub('[^0-9a-zA-Z]+', ' ', sent2)\n tokens1 = [word for word in sent1.replace(\"…\", \" \").lower().split() if word not in stoplist]\n tokens2 = [word for word in sent2.replace(\"…\", \" \").lower().split() if word not in stoplist]\n \n # subtract from 1, so that 0 means all words in common and 1 means no words in common\n jaccardIndex = 1.0 - float(len(set.intersection(set(tokens1), set(tokens2)))) / float(len(set.union(set(tokens1), set(tokens2))))\n return(jaccardIndex)",
"Load in the stopwords file. These are common words which we wish to exclude when performing comparisons (a, an, the, etc). Every word is separated by a new line.",
"stopWordsFile = \"en.txt\"\nwith open(stopWordsFile) as f:\n stoplist = [x.strip('\\n') for x in f.readlines()]",
"Load in the data from the catalog",
"# http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-ones-from-json-in-python\n# need this to deal with unicode errors\ndef byteify(input):\n if isinstance(input, dict):\n return {byteify(key): byteify(value)\n for key, value in input.iteritems()}\n elif isinstance(input, list):\n return [byteify(element) for element in input]\n elif isinstance(input, unicode):\n return input.encode('utf-8')\n else:\n return input\n\ngunzipFile('../catalogs/gabi_2016_professional-database-2016.json.gz', \n '../catalogs/gabi_2016_professional-database-2016.json')\ngunzipFile('../catalogs/uslci_ecospold.json.gz', \n '../catalogs/uslci_ecospold.json')\n\nwith open('../catalogs/gabi_2016_professional-database-2016.json') as data_file: \n gabi = json.load(data_file, encoding='utf-8')\n\nwith open('../catalogs/uslci_ecospold.json') as data_file: \n uslci = json.load(data_file, encoding='utf-8')\n \ngabi = byteify(gabi)\nuslci = byteify(uslci)\n\nroundwood = [flow for flow in uslci['flows'] if search_tags(flow,'roundwood, softwood')]\nroundwoodExample = roundwood[0]\n\n# number of top scores to show\nnumTopScores = 10\n\nflowNames = []\ndistValues = []\nfor flow in gabi['archives'][0]['flows']:\n name = flow['tags']['Name']\n flowNames.append(name)\n dist = jaccardDistance(roundwoodExample['tags']['Name'], name, stoplist)\n distValues.append(dist)\n\nlen(flowNames)\n \n# figure out top scores\narr = np.array(distValues)\ntopIndices = arr.argsort()[0:numTopScores]\ntopScores = np.array(distValues)[topIndices]\n\nprint 'Process name to match:'\nprint roundwoodExample['tags']['Name']\n\nprint 'Matches using Jaccard Index:'\nfor i, s in zip(topIndices, topScores):\n if s < 9999:\n print(flowNames[i],s)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
mne-tools/mne-tools.github.io
|
0.12/_downloads/plot_artifacts_correction_rejection.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
".. _tut_artifacts_reject:\nRejecting bad data (channels and segments)",
"import numpy as np\nimport mne\nfrom mne.datasets import sample\n\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nraw = mne.io.read_raw_fif(raw_fname)",
".. _marking_bad_channels:\nMarking bad channels\nSometimes some MEG or EEG channels are not functioning properly\nfor various reasons. These channels should be excluded from\nanalysis by marking them bad as. This is done by setting the 'bads'\nin the measurement info of a data container object (e.g. Raw, Epochs,\nEvoked). The info['bads'] value is a Python string. Here is\nexample:",
"raw.info['bads'] = ['MEG 2443']",
"Why setting a channel bad?: If a channel does not show\na signal at all (flat) it is important to exclude it from the\nanalysis. If a channel as a noise level significantly higher than the\nother channels it should be marked as bad. Presence of bad channels\ncan have terribe consequences on down stream analysis. For a flat channel\nsome noise estimate will be unrealistically low and\nthus the current estimate calculations will give a strong weight\nto the zero signal on the flat channels and will essentially vanish.\nNoisy channels can also affect others when signal-space projections\nor EEG average electrode reference is employed. Noisy bad channels can\nalso adversely affect averaging and noise-covariance matrix estimation by\ncausing unnecessary rejections of epochs.\nRecommended ways to identify bad channels are:\n\n\nObserve the quality of data during data\n acquisition and make notes of observed malfunctioning channels to\n your measurement protocol sheet.\n\n\nView the on-line averages and check the condition of the channels.\n\n\nCompute preliminary off-line averages with artifact rejection,\n SSP/ICA, and EEG average electrode reference computation\n off and check the condition of the channels.\n\n\nView raw data with :func:mne.io.Raw.plot without SSP/ICA\n enabled and identify bad channels.\n\n\n.. note::\n Setting the bad channels should be done as early as possible in the\n analysis pipeline. That's why it's recommended to set bad channels\n the raw objects/files. If present in the raw data\n files, the bad channel selections will be automatically transferred\n to averaged files, noise-covariance matrices, forward solution\n files, and inverse operator decompositions.\nThe actual removal happens using :func:pick_types <mne.pick_types> with\nexclude='bads' option (see :ref:picking_channels).\nInstead of removing the bad channels, you can also try to repair them.\nThis is done by interpolation of the data from other channels.\nTo illustrate how to use channel interpolation let us load some data.",
"# Reading data with a bad channel marked as bad:\nfname = data_path + '/MEG/sample/sample_audvis-ave.fif'\nevoked = mne.read_evokeds(fname, condition='Left Auditory',\n baseline=(None, 0))\n\n# restrict the evoked to EEG and MEG channels\nevoked.pick_types(meg=True, eeg=True, exclude=[])\n\n# plot with bads\nevoked.plot(exclude=[])\n\nprint(evoked.info['bads'])",
"Let's now interpolate the bad channels (displayed in red above)",
"evoked.interpolate_bads(reset_bads=False)",
"Let's plot the cleaned data",
"evoked.plot(exclude=[])",
".. note::\n Interpolation is a linear operation that can be performed also on\n Raw and Epochs objects.\nFor more details on interpolation see the page :ref:channel_interpolation.\n.. _marking_bad_segments:\nMarking bad raw segments with annotations\nMNE provides an :class:mne.Annotations class that can be used to mark\nsegments of raw data and to reject epochs that overlap with bad segments\nof data. The annotations are automatically synchronized with raw data as\nlong as the timestamps of raw data and annotations are in sync.\nSee :ref:sphx_glr_auto_tutorials_plot_brainstorm_auditory.py\nfor a long example exploiting the annotations for artifact removal.\nThe instances of annotations are created by providing a list of onsets and\noffsets with descriptions for each segment. The onsets and offsets are marked\nas seconds. onset refers to time from start of the data. offset is\nthe duration of the annotation. The instance of :class:mne.Annotations\ncan be added as an attribute of :class:mne.io.Raw.",
"eog_events = mne.preprocessing.find_eog_events(raw)\nn_blinks = len(eog_events)\n# Center to cover the whole blink with full duration of 0.5s:\nonset = eog_events[:, 0] / raw.info['sfreq'] - 0.25\nduration = np.repeat(0.5, n_blinks)\nraw.annotations = mne.Annotations(onset, duration, ['bad blink'] * n_blinks)\nraw.plot(events=eog_events) # To see the annotated segments.",
"As the data is epoched, all the epochs overlapping with segments whose\ndescription starts with 'bad' are rejected by default. To turn rejection off,\nuse keyword argument reject_by_annotation=False when constructing\n:class:mne.Epochs. When working with neuromag data, the first_samp\noffset of raw acquisition is also taken into account the same way as with\nevent lists. For more see :class:mne.Epochs and :class:mne.Annotations.\n.. _rejecting_bad_epochs:\nRejecting bad epochs\nWhen working with segmented data (Epochs) MNE offers a quite simple approach\nto automatically reject/ignore bad epochs. This is done by defining\nthresholds for peak-to-peak amplitude and flat signal detection.\nIn the following code we build Epochs from Raw object. One of the provided\nparameter is named reject. It is a dictionary where every key is a\nchannel type as a sring and the corresponding values are peak-to-peak\nrejection parameters (amplitude ranges as floats). Below we define\nthe peak-to-peak rejection values for gradiometers,\nmagnetometers and EOG:",
"reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)",
".. note::\n The rejection values can be highly data dependent. You should be careful\n when adjusting these values. Make sure not too many epochs are rejected\n and look into the cause of the rejections. Maybe it's just a matter\n of marking a single channel as bad and you'll be able to save a lot\n of data.\nWe then construct the epochs",
"events = mne.find_events(raw, stim_channel='STI 014')\nevent_id = {\"auditory/left\": 1}\ntmin = -0.2 # start of each epoch (200ms before the trigger)\ntmax = 0.5 # end of each epoch (500ms after the trigger)\nbaseline = (None, 0) # means from the first instant to t = 0\npicks_meg = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,\n stim=False, exclude='bads')\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n picks=picks_meg, baseline=baseline, reject=reject,\n reject_by_annotation=True)",
"We then drop/reject the bad epochs",
"epochs.drop_bad()",
"And plot the so-called drop log that details the reason for which some\nepochs have been dropped.",
"print(epochs.drop_log[40:45]) # only a subset\nepochs.plot_drop_log()",
"What you see is that some drop log values are empty. It means event was kept.\nIf it says 'IGNORED' is means the event_id did not contain the associated\nevent. If it gives the name of channel such as 'EOG 061' it means the\nepoch was rejected because 'EOG 061' exceeded the peak-to-peak rejection\nlimit."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
probml/pyprobml
|
notebooks/misc/logreg_ucb_admissions_numpyro.ipynb
|
mit
|
[
"<a href=\"https://colab.research.google.com/github/always-newbie161/pyprobml/blob/issue_hermes78/notebooks/logreg_ucb_admissions_numpyro.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nBinomial logistic regression for UCB admissions\nWe illustrate binary logistic regression on 2 discrete inputs using the example in sec 11.1.4 of Statistical Rethinking ed 2. \nThe numpyro code is from Du Phan's site",
"!pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro\n!pip install -q arviz\n\nimport arviz as az\n\naz.__version__\n\n!pip install causalgraphicalmodels\n\n#!pip install -U daft\n\nimport numpy as np\n\nnp.set_printoptions(precision=3)\nimport matplotlib.pyplot as plt\nimport math\nimport os\nimport warnings\nimport pandas as pd\n\nimport jax\n\nprint(\"jax version {}\".format(jax.__version__))\nprint(\"jax backend {}\".format(jax.lib.xla_bridge.get_backend().platform))\n\nimport jax.numpy as jnp\nfrom jax import random, vmap\nfrom jax.scipy.special import expit\n\nrng_key = random.PRNGKey(0)\nrng_key, rng_key_ = random.split(rng_key)\n\nimport numpyro\nimport numpyro.distributions as dist\nfrom numpyro.distributions import constraints\nfrom numpyro.distributions.transforms import AffineTransform\nfrom numpyro.diagnostics import hpdi, print_summary\nfrom numpyro.infer import Predictive\nfrom numpyro.infer import MCMC, NUTS\nfrom numpyro.infer import SVI, Trace_ELBO, init_to_value\nfrom numpyro.infer.autoguide import AutoLaplaceApproximation\nimport numpyro.optim as optim\n\n\nimport daft\nfrom causalgraphicalmodels import CausalGraphicalModel\n\nfrom sklearn.preprocessing import StandardScaler\n\nn = jax.local_device_count()\nprint(n)",
"Data",
"url = \"https://raw.githubusercontent.com/fehiepsi/rethinking-numpyro/master/data/UCBadmit.csv\"\nUCBadmit = pd.read_csv(url, sep=\";\")\nd = UCBadmit\ndisplay(d)\n\nprint(d.to_latex(index=False))\n\ndat_list = dict(\n admit=d.admit.values,\n applications=d.applications.values,\n gid=(d[\"applicant.gender\"] != \"male\").astype(int).values,\n)\n\ndat_list[\"dept_id\"] = jnp.repeat(jnp.arange(6), 2)\n\nprint(dat_list)\n\n# extract number of applicaitons for dept 2 (C)\nd.applications[dat_list[\"dept_id\"].copy() == 2]\n\nd.applications[dat_list[\"dept_id\"].copy() == 2].sum()\n\n# application rate per department\npg = jnp.stack(\n list(\n map(\n lambda k: jnp.divide(\n d.applications[dat_list[\"dept_id\"].copy() == k].values,\n d.applications[dat_list[\"dept_id\"].copy() == k].sum(),\n ),\n range(6),\n )\n ),\n axis=0,\n).T\npg = pd.DataFrame(pg, index=[\"male\", \"female\"], columns=d.dept.unique())\ndisplay(pg.round(2))\nprint(pg.to_latex())\n\n# admisions rate per department\npg = jnp.stack(\n list(\n map(\n lambda k: jnp.divide(\n d.admit[dat_list[\"dept_id\"].copy() == k].values,\n d.applications[dat_list[\"dept_id\"].copy() == k].values,\n ),\n range(6),\n )\n ),\n axis=0,\n).T\npg = pd.DataFrame(pg, index=[\"male\", \"female\"], columns=d.dept.unique())\ndisplay(pg.round(2))\nprint(pg.to_latex())",
"Model 1",
"dat_list = dict(\n admit=d.admit.values,\n applications=d.applications.values,\n gid=(d[\"applicant.gender\"] != \"male\").astype(int).values,\n)\n\n\ndef model(gid, applications, admit=None):\n a = numpyro.sample(\"a\", dist.Normal(0, 1.5).expand([2]))\n logit_p = a[gid]\n numpyro.sample(\"admit\", dist.Binomial(applications, logits=logit_p), obs=admit)\n\n\nm11_7 = MCMC(NUTS(model), num_warmup=500, num_samples=500, num_chains=4)\nm11_7.run(random.PRNGKey(0), **dat_list)\nm11_7.print_summary(0.89)\n\npost = m11_7.get_samples()\ndiff_a = post[\"a\"][:, 0] - post[\"a\"][:, 1]\ndiff_p = expit(post[\"a\"][:, 0]) - expit(post[\"a\"][:, 1])\nprint_summary({\"diff_a\": diff_a, \"diff_p\": diff_p}, 0.89, False)",
"Posterior predictive check",
"def ppc(mcmc_run, model_args):\n post = mcmc_run.get_samples()\n pred = Predictive(mcmc_run.sampler.model, post)(random.PRNGKey(2), **model_args)\n admit_pred = pred[\"admit\"]\n admit_rate = admit_pred / d.applications.values\n plt.errorbar(\n range(1, 13),\n jnp.mean(admit_rate, 0),\n jnp.std(admit_rate, 0) / 2,\n fmt=\"o\",\n c=\"k\",\n mfc=\"none\",\n ms=7,\n elinewidth=1,\n )\n plt.plot(range(1, 13), jnp.percentile(admit_rate, 5.5, 0), \"k+\")\n plt.plot(range(1, 13), jnp.percentile(admit_rate, 94.5, 0), \"k+\")\n # draw lines connecting points from same dept\n for i in range(1, 7):\n x = 1 + 2 * (i - 1) # 1,3,5,7,9,11\n y1 = d.admit.iloc[x - 1] / d.applications.iloc[x - 1] # male\n y2 = d.admit.iloc[x] / d.applications.iloc[x] # female\n plt.plot((x, x + 1), (y1, y2), \"bo-\")\n plt.annotate(d.dept.iloc[x], (x + 0.5, (y1 + y2) / 2 + 0.05), ha=\"center\", color=\"royalblue\")\n plt.gca().set(ylim=(0, 1), xticks=range(1, 13), ylabel=\"admit\", xlabel=\"case\")\n\nppc(m11_7, {\"gid\": dat_list[\"gid\"], \"applications\": dat_list[\"applications\"]})\nplt.savefig(\"admissions_ppc.pdf\", dpi=300)\nplt.show()",
"Model 2 (departmental-specific offset)",
"dat_list[\"dept_id\"] = jnp.repeat(jnp.arange(6), 2)\n\n\ndef model(gid, dept_id, applications, admit=None):\n a = numpyro.sample(\"a\", dist.Normal(0, 1.5).expand([2]))\n delta = numpyro.sample(\"delta\", dist.Normal(0, 1.5).expand([6]))\n logit_p = a[gid] + delta[dept_id]\n numpyro.sample(\"admit\", dist.Binomial(applications, logits=logit_p), obs=admit)\n\n\nm11_8 = MCMC(NUTS(model), num_warmup=2000, num_samples=2000, num_chains=4)\nm11_8.run(random.PRNGKey(0), **dat_list)\nm11_8.print_summary(0.89)\n\npost = m11_8.get_samples()\ndiff_a = post[\"a\"][:, 0] - post[\"a\"][:, 1]\ndiff_p = expit(post[\"a\"][:, 0]) - expit(post[\"a\"][:, 1])\nprint_summary({\"diff_a\": diff_a, \"diff_p\": diff_p}, 0.89, False)\n\ndata_dict = {\"gid\": dat_list[\"gid\"], \"dept_id\": dat_list[\"dept_id\"], \"applications\": dat_list[\"applications\"]}\nppc(m11_8, data_dict)\n# ppc(m11_8, dat_list) # must exclude 'admit' for predictive distribution\nplt.savefig(\"admissions_ppc_per_dept.pdf\", dpi=300)\nplt.show()",
"Poisson regression\nWe now show we can emulate binomial regresison using 2 poisson regressions,\nfollowing sec 11.3.3 of rethinking. We use a simplified model that just predicts outcomes, and has no features (just an offset term).",
"# binomial model of overall admission probability\ndef model(applications, admit):\n a = numpyro.sample(\"a\", dist.Normal(0, 1.5))\n logit_p = a\n numpyro.sample(\"admit\", dist.Binomial(applications, logits=logit_p), obs=admit)\n\n\n\"\"\"\nm_binom = AutoLaplaceApproximation(model)\nsvi = SVI(\n model,\n m_binom,\n optim.Adam(1),\n Trace_ELBO(),\n applications=d.applications.values,\n admit=d.admit.values,\n)\np_binom, losses = svi.run(random.PRNGKey(0), 1000)\n\"\"\"\n\nm_binom = MCMC(NUTS(model), num_warmup=500, num_samples=500, num_chains=4)\nm_binom.run(random.PRNGKey(0), d.applications.values, d.admit.values)\nm_binom.print_summary(0.95)\n\nlogit = jnp.mean(m_binom.get_samples()[\"a\"])\nprint(expit(logit))\n\ndef model(rej, admit):\n a1, a2 = numpyro.sample(\"a\", dist.Normal(0, 1.5).expand([2]))\n lambda1 = jnp.exp(a1)\n lambda2 = jnp.exp(a2)\n numpyro.sample(\"rej\", dist.Poisson(lambda2), obs=rej)\n numpyro.sample(\"admit\", dist.Poisson(lambda1), obs=admit)\n\n\nm_pois = MCMC(NUTS(model), num_warmup=1000, num_samples=1000, num_chains=3)\nm_pois.run(random.PRNGKey(0), d.reject.values, d.admit.values)\nm_pois.print_summary(0.95)\n\nparams = jnp.mean(m_pois.get_samples()[\"a\"], 0)\na1 = params[0]\na2 = params[1]\nlam1 = jnp.exp(a1)\nlam2 = jnp.exp(a2)\nprint([lam1, lam2])\nprint(lam1 / (lam1 + lam2))",
"Beta-binomial regression\nSec 12.1.1 of rethinking.\nCode from snippet 12.2 of Du Phan's site",
"d = UCBadmit\nd[\"gid\"] = (d[\"applicant.gender\"] != \"male\").astype(int)\ndat = dict(A=d.admit.values, N=d.applications.values, gid=d.gid.values)\n\n\ndef model(gid, N, A=None):\n a = numpyro.sample(\"a\", dist.Normal(0, 1.5).expand([2]))\n phi = numpyro.sample(\"phi\", dist.Exponential(1))\n theta = numpyro.deterministic(\"theta\", phi + 2) # shape\n pbar = expit(a[gid]) # mean\n numpyro.sample(\"A\", dist.BetaBinomial(pbar * theta, (1 - pbar) * theta, N), obs=A)\n\n\nm12_1 = MCMC(NUTS(model), num_warmup=500, num_samples=500, num_chains=4)\nm12_1.run(random.PRNGKey(0), **dat)\n\npost = m12_1.get_samples()\npost[\"theta\"] = Predictive(m12_1.sampler.model, post)(random.PRNGKey(1), **dat)[\"theta\"]\npost[\"da\"] = post[\"a\"][:, 0] - post[\"a\"][:, 1]\nprint_summary(post, 0.89, False)\n\npost\n\ngid = 1\n# draw posterior mean beta distribution\nx = jnp.linspace(0, 1, 101)\npbar = jnp.mean(expit(post[\"a\"][:, gid]))\ntheta = jnp.mean(post[\"theta\"])\nplt.plot(x, jnp.exp(dist.Beta(pbar * theta, (1 - pbar) * theta).log_prob(x)))\nplt.gca().set(ylabel=\"Density\", xlabel=\"probability admit\", ylim=(0, 3))\n\n# draw 50 beta distributions sampled from posterior\nfor i in range(50):\n p = expit(post[\"a\"][i, gid])\n theta = post[\"theta\"][i]\n plt.plot(x, jnp.exp(dist.Beta(p * theta, (1 - p) * theta).log_prob(x)), \"k\", alpha=0.2)\nplt.title(\"distribution of female admission rates\")\nplt.savefig(\"admissions_betabinom_female_rate.pdf\")\nplt.show()\n\nfig, ax = plt.subplots()\nlabels = [\"male\", \"female\"]\ncolors = [\"b\", \"r\"]\nfor gid in [0, 1]:\n # draw posterior mean beta distribution\n x = jnp.linspace(0, 1, 101)\n pbar = jnp.mean(expit(post[\"a\"][:, gid]))\n theta = jnp.mean(post[\"theta\"])\n y = jnp.exp(dist.Beta(pbar * theta, (1 - pbar) * theta).log_prob(x))\n ax.plot(x, y, label=labels[gid], color=colors[gid])\n ax.set_ylabel(\"Density\")\n ax.set_xlabel(\"probability admit\")\n ax.set_ylim(0, 3)\n\n # draw some beta distributions sampled from posterior\n for i in range(10):\n p = expit(post[\"a\"][i, gid])\n theta = post[\"theta\"][i]\n y = jnp.exp(dist.Beta(p * theta, (1 - p) * theta).log_prob(x))\n plt.plot(x, y, colors[gid], alpha=0.2)\n\nplt.title(\"distribution of admission rates\")\nplt.legend()\nplt.savefig(\"admissions_betabinom_rates.pdf\")\nplt.show()\n\npost = m12_1.get_samples()\nadmit_pred = Predictive(m12_1.sampler.model, post)(random.PRNGKey(1), gid=dat[\"gid\"], N=dat[\"N\"])[\"A\"]\nadmit_rate = admit_pred / dat[\"N\"]\nplt.scatter(range(1, 13), dat[\"A\"] / dat[\"N\"])\nplt.errorbar(\n range(1, 13),\n jnp.mean(admit_rate, 0),\n jnp.std(admit_rate, 0) / 2,\n fmt=\"o\",\n c=\"k\",\n mfc=\"none\",\n ms=7,\n elinewidth=1,\n)\nplt.plot(range(1, 13), jnp.percentile(admit_rate, 5.5, 0), \"k+\")\nplt.plot(range(1, 13), jnp.percentile(admit_rate, 94.5, 0), \"k+\")\nplt.savefig(\"admissions_betabinom_post_pred.pdf\")\nplt.show()",
"Mixed effects model with joint prior\nThis code is from https://numpyro.readthedocs.io/en/latest/examples/ucbadmit.html.",
"from numpyro.examples.datasets import UCBADMIT, load_dataset\n\n\ndef glmm(dept, male, applications, admit=None):\n v_mu = numpyro.sample(\"v_mu\", dist.Normal(0, jnp.array([4.0, 1.0])))\n\n sigma = numpyro.sample(\"sigma\", dist.HalfNormal(jnp.ones(2)))\n L_Rho = numpyro.sample(\"L_Rho\", dist.LKJCholesky(2, concentration=2))\n scale_tril = sigma[..., jnp.newaxis] * L_Rho\n # non-centered parameterization\n num_dept = len(np.unique(dept))\n z = numpyro.sample(\"z\", dist.Normal(jnp.zeros((num_dept, 2)), 1))\n v = jnp.dot(scale_tril, z.T).T\n\n logits = v_mu[0] + v[dept, 0] + (v_mu[1] + v[dept, 1]) * male\n if admit is None:\n # we use a Delta site to record probs for predictive distribution\n probs = expit(logits)\n numpyro.sample(\"probs\", dist.Delta(probs), obs=probs)\n numpyro.sample(\"admit\", dist.Binomial(applications, logits=logits), obs=admit)\n\n\ndef run_inference(dept, male, applications, admit, rng_key):\n kernel = NUTS(glmm)\n mcmc = MCMC(kernel, num_warmup=500, num_samples=1000, num_chains=1)\n mcmc.run(rng_key, dept, male, applications, admit)\n return mcmc.get_samples()\n\n\ndef print_results(header, preds, dept, male, probs):\n columns = [\"Dept\", \"Male\", \"ActualProb\", \"Pred(p25)\", \"Pred(p50)\", \"Pred(p75)\"]\n header_format = \"{:>10} {:>10} {:>10} {:>10} {:>10} {:>10}\"\n row_format = \"{:>10.0f} {:>10.0f} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}\"\n quantiles = jnp.quantile(preds, jnp.array([0.25, 0.5, 0.75]), axis=0)\n print(\"\\n\", header, \"\\n\")\n print(header_format.format(*columns))\n for i in range(len(dept)):\n print(row_format.format(dept[i], male[i], probs[i], *quantiles[:, i]), \"\\n\")\n\n_, fetch_train = load_dataset(UCBADMIT, split=\"train\", shuffle=False)\ndept, male, applications, admit = fetch_train()\nrng_key, rng_key_predict = random.split(random.PRNGKey(1))\nzs = run_inference(dept, male, applications, admit, rng_key)\npred_probs = Predictive(glmm, zs)(rng_key_predict, dept, male, applications)[\"probs\"]\nheader = \"=\" * 30 + \"glmm - TRAIN\" + \"=\" * 30\nprint_results(header, pred_probs, dept, male, admit / applications)\n\n# make plots\nfig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)\n\nax.plot(range(1, 13), admit / applications, \"o\", ms=7, label=\"actual rate\")\nax.errorbar(\n range(1, 13),\n jnp.mean(pred_probs, 0),\n jnp.std(pred_probs, 0),\n fmt=\"o\",\n c=\"k\",\n mfc=\"none\",\n ms=7,\n elinewidth=1,\n label=r\"mean $\\pm$ std\",\n)\nax.plot(range(1, 13), jnp.percentile(pred_probs, 5, 0), \"k+\")\nax.plot(range(1, 13), jnp.percentile(pred_probs, 95, 0), \"k+\")\nax.set(\n xlabel=\"cases\",\n ylabel=\"admit rate\",\n title=\"Posterior Predictive Check with 90% CI\",\n)\nax.legend()\n\nplt.savefig(\"ucbadmit_plot.pdf\")",
"PGMs",
"# p344\ndag = CausalGraphicalModel(nodes=[\"G\", \"D\", \"A\"], edges=[(\"G\", \"D\"), (\"G\", \"A\"), (\"D\", \"A\")])\nout = dag.draw()\ndisplay(out)\nout.render(filename=\"admissions_dag\", format=\"pdf\")\n\n# p345\ndag = CausalGraphicalModel(nodes=[\"G\", \"D\", \"A\"], edges=[(\"G\", \"D\"), (\"G\", \"A\"), (\"D\", \"A\")], latent_edges=[(\"D\", \"A\")])\nout = dag.draw()\ndisplay(out)\nout.render(filename=\"admissions_dag_hidden\", format=\"pdf\")",
"Causal inference with the latent DAG\nThis is based on sec 6.3 (collider bias) of the Rethinking book.\nCode is from Du Phan, code snippet 6.25. We change the names to match our current example: P (parents) -> D (department), C (child) -> A (admit).\nLinear regression version",
"N = 200 # number of samples\nb_GP = 1 # direct effect of G on P\nb_GC = 0 # direct effect of G on C\nb_PC = 1 # direct effect of P on C\nb_U = 2 # direct effect of U on P and C\n\nwith numpyro.handlers.seed(rng_seed=1):\n U = 2 * numpyro.sample(\"U\", dist.Bernoulli(0.5).expand([N])) - 1\n G = numpyro.sample(\"G\", dist.Normal().expand([N]))\n P = numpyro.sample(\"P\", dist.Normal(b_GP * G + b_U * U))\n C = numpyro.sample(\"C\", dist.Normal(b_PC * P + b_GC * G + b_U * U))\n df_gauss = pd.DataFrame({\"C\": C, \"P\": P, \"G\": G, \"U\": U})\n\ndef model_linreg(P, G, C):\n a = numpyro.sample(\"a\", dist.Normal(0, 1))\n b_PC = numpyro.sample(\"b_PC\", dist.Normal(0, 1))\n b_GC = numpyro.sample(\"b_GC\", dist.Normal(0, 1))\n sigma = numpyro.sample(\"sigma\", dist.Exponential(1))\n mu = a + b_PC * P + b_GC * G\n numpyro.sample(\"C\", dist.Normal(mu, sigma), obs=C)\n\n\ndata_gauss = {\"P\": df_gauss.P.values, \"G\": df_gauss.G.values, \"C\": df_gauss.C.values}\n\nm6_11 = AutoLaplaceApproximation(model_linreg)\nsvi = SVI(model_linreg, m6_11, optim.Adam(0.3), Trace_ELBO(), **data_gauss)\np6_11, losses = svi.run(random.PRNGKey(0), 1000)\npost = m6_11.sample_posterior(random.PRNGKey(1), p6_11, (1000,))\nprint_summary(post, 0.89, False)\n\nmcmc_run = MCMC(NUTS(model_linreg), num_warmup=200, num_samples=200, num_chains=4)\nmcmc_run.run(random.PRNGKey(0), **data)\nmcmc_run.print_summary(0.89)\n\ndef model_linreg_hidden(P, G, U, C):\n a = numpyro.sample(\"a\", dist.Normal(0, 1))\n b_PC = numpyro.sample(\"b_PC\", dist.Normal(0, 1))\n b_GC = numpyro.sample(\"b_GC\", dist.Normal(0, 1))\n b_U = numpyro.sample(\"U\", dist.Normal(0, 1))\n sigma = numpyro.sample(\"sigma\", dist.Exponential(1))\n mu = a + b_PC * P + b_GC * G + b_U * U\n numpyro.sample(\"C\", dist.Normal(mu, sigma), obs=C)\n\n\nm6_12 = AutoLaplaceApproximation(model_linreg_hidden)\nsvi = SVI(\n model_linreg_hidden,\n m6_12,\n optim.Adam(1),\n Trace_ELBO(),\n P=d.P.values,\n G=d.G.values,\n U=d.U.values,\n C=d.C.values,\n)\np6_12, losses = svi.run(random.PRNGKey(0), 1000)\npost = m6_12.sample_posterior(random.PRNGKey(1), p6_12, (1000,))\nprint_summary(post, 0.89, False)",
"Logistic regression version\nWe modify the scenario to match the UC Berkeley admissions scenario (with binary data) in sec 11.1.4.",
"N = 200 # number of samples\nb_GP = 1 # direct effect of G on P\nb_GC = 0 # direct effect of G on C\nb_PC = 1 # direct effect of P on C\nb_U = 2 # direct effect of U on P and C\n\nwith numpyro.handlers.seed(rng_seed=1):\n # U = 2 * numpyro.sample(\"U\", dist.Bernoulli(0.5).expand([N])) - 1\n U = numpyro.sample(\"U\", dist.Normal().expand([N]))\n # G = numpyro.sample(\"G\", dist.Normal().expand([N]))\n G = numpyro.sample(\"G\", dist.Bernoulli(0.5).expand([N]))\n P = numpyro.sample(\"P\", dist.Normal(b_GP * G + b_U * U))\n # C = numpyro.sample(\"C\", dist.Normal(b_PC * P + b_GC * G + b_U * U))\n logits = b_PC * P + b_GC * G + b_U * U\n probs = expit(logits)\n C = numpyro.sample(\"C\", dist.BernoulliProbs(probs))\n df_binary = pd.DataFrame({\"C\": C, \"G\": G, \"P\": P, \"U\": U, \"probs\": probs})\n\ndisplay(df_binary.head(10))\n\ndef model_causal(C=None, G=None, P=None, U=None):\n U = numpyro.sample(\"U\", dist.Normal(), obs=U)\n G = numpyro.sample(\"G\", dist.Bernoulli(0.5), obs=G)\n P = numpyro.sample(\"P\", dist.Normal(b_GP * G + b_U * U), obs=P)\n logits = b_PC * P + b_GC * G + b_U * U\n probs = expit(logits)\n C = numpyro.sample(\"C\", dist.BernoulliProbs(probs), obs=C)\n return np.array([C, G, P, U])\n\ndef make_samples(C=None, G=None, P=None, U=None, nsamples=200):\n data_list = []\n with numpyro.handlers.seed(rng_seed=0):\n for i in range(nsamples):\n out = model_causal(C, G, P, U)\n data_list.append(out)\n df = pd.DataFrame.from_records(data_list, columns=[\"C\", \"G\", \"P\", \"U\"])\n return df\n\n\ndf_binary = make_samples()\ndisplay(df_binary.head())\n\nCbar = df_binary[\"C\"].values.mean()\nGbar = df_binary[\"G\"].values.mean()\nPbar = df_binary[\"P\"].values.mean()\nUbar = df_binary[\"U\"].values.mean()\nprint([Cbar, Gbar, Pbar, Ubar])\nprint(b_GP * Gbar + b_U * Ubar) # expected Pbar\n\nN = len(df0)\nprob_admitted0 = np.sum(df0.C.values) / N\nprob_admitted1 = np.sum(df1.C.values) / N\nprint([prob_admitted0, prob_admitted1])\n\ndef model_logreg(C=None, G=None, P=None):\n a = numpyro.sample(\"a\", dist.Normal(0, 1))\n b_PC = numpyro.sample(\"b_PC\", dist.Normal(0, 0.1))\n b_GC = numpyro.sample(\"b_GC\", dist.Normal(0, 0.1))\n logits = a + b_PC * P + b_GC * G\n numpyro.sample(\"C\", dist.Bernoulli(logits=logits), obs=C)\n\n\ndata_binary = {\"P\": df_binary.P.values, \"G\": df_binary.G.values, \"C\": df_binary.C.values}\n\nwarmup = 1000\nsamples = 500\nmcmc_run = MCMC(NUTS(model_logreg), num_warmup=warmup, num_samples=samples, num_chains=4)\nmcmc_run.run(random.PRNGKey(0), **data)\nmcmc_run.print_summary(0.89)",
"Counterfactual plot\nSimilar to p140",
"# p(C | do(G), do(P))\nPfixed = 0\n\ndf0 = make_samples(G=0, P=Pfixed, nsamples=200)\ndisplay(df0.head())\nCbar0 = df0[\"C\"].values.mean()\n\ndf1 = make_samples(G=1, P=Pfixed, nsamples=200)\ndisplay(df1.head())\nCbar1 = df1[\"C\"].values.mean()\n\nprint([Cbar0, Cbar1])\n\nsim_dat = dict(G=jnp.array([0, 1]), P=jnp.array(Pfixed))\npost = mcmc_run.get_samples()\npred = Predictive(model_logreg, post)(random.PRNGKey(22), **sim_dat)\nprint(pred[\"C\"].shape)\nprint(np.mean(pred[\"C\"], axis=0))\n\na_est = post[\"a\"].mean()\nb_PC_est = post[\"b_PC\"].mean()\nb_GC_est = post[\"b_GC\"].mean()\nP = Pfixed\n\nG = np.array([0, 1])\nlogits = a_est + b_PC_est * P + b_GC_est * G\nnp.set_printoptions(formatter={\"float\": lambda x: \"{0:0.3f}\".format(x)})\nprint(expit(logits))\n\npred"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
sysid/nbs
|
Overfitting.ipynb
|
mit
|
[
"Training a better model",
"#from theano.sandbox import cuda\n\n%matplotlib inline\nimport utils\nimport importlib\nimportlib.reload(utils)\nfrom utils import *\nfrom __future__ import division, print_function\n\n#path = \"data/dogscats/sample/\"\npath = \"data/dogscats/\"\nmodel_path = path + 'models/'\nif not os.path.exists(model_path): os.mkdir(model_path)\n\nbatch_size=64",
"Are we underfitting?\nOur validation accuracy so far has generally been higher than our training accuracy. That leads to two obvious questions:\n\nHow is this possible?\nIs this desirable?\n\nThe answer to (1) is that this is happening because of dropout. Dropout refers to a layer that randomly deletes (i.e. sets to zero) each activation in the previous layer with probability p (generally 0.5). This only happens during training, not when calculating the accuracy on the validation set, which is why the validation set can show higher accuracy than the training set.\nThe purpose of dropout is to avoid overfitting. By deleting parts of the neural network at random during training, it ensures that no one part of the network can overfit to one part of the training set. The creation of dropout was one of the key developments in deep learning, and has allowed us to create rich models without overfitting. However, it can also result in underfitting if overused, and this is something we should be careful of with our model.\nSo the answer to (2) is: this is probably not desirable. It is likely that we can get better validation set results with less (or no) dropout, if we're seeing that validation accuracy is higher than training accuracy - a strong sign of underfitting. So let's try removing dropout entirely, and see what happens!\n(We had dropout in this model already because the VGG authors found it necessary for the imagenet competition. But that doesn't mean it's necessary for dogs v cats, so we will do our own analysis of regularization approaches from scratch.)\nRemoving dropout\nOur high level approach here will be to start with our fine-tuned cats vs dogs model (with dropout), then fine-tune all the dense layers, after removing dropout from them. The steps we will take are:\n- Re-create and load our modified VGG model with binary dependent (i.e. dogs v cats)\n- Split the model between the convolutional (conv) layers and the dense layers\n- Pre-calculate the output of the conv layers, so that we don't have to redundently re-calculate them on every epoch\n- Create a new model with just the dense layers, and dropout p set to zero\n- Train this new model using the output of the conv layers as training data.\nAs before we need to start with a working model, so let's bring in our working VGG 16 model and change it to predict our binary dependent...",
"model = vgg_ft(2)",
"...and load our fine-tuned weights.",
"model.load_weights(model_path+'finetune3.h5')",
"We're going to be training a number of iterations without dropout, so it would be best for us to pre-calculate the input to the fully connected layers - i.e. the Flatten() layer. We'll start by finding this layer in our model, and creating a new model that contains just the layers up to and including this layer:",
"layers = model.layers\n\nlast_conv_idx = [index for index,layer in enumerate(layers) \n if type(layer) is Convolution2D][-1]\n\nlast_conv_idx\n\nlayers[last_conv_idx]\n\nconv_layers = layers[:last_conv_idx+1]\nconv_model = Sequential(conv_layers)\n# Dense layers - also known as fully connected or 'FC' layers\nfc_layers = layers[last_conv_idx+1:]",
"Now we can use the exact same approach to creating features as we used when we created the linear model from the imagenet predictions in the last lesson - it's only the model that has changed. As you're seeing, there's a fairly small number of \"recipes\" that can get us a long way!",
"batches = get_batches(path+'train', shuffle=False, batch_size=batch_size)\nval_batches = get_batches(path+'valid', shuffle=False, batch_size=batch_size)\n\nval_classes = val_batches.classes\ntrn_classes = batches.classes\nval_labels = onehot(val_classes)\ntrn_labels = onehot(trn_classes)\n\nval_features = conv_model.predict_generator(val_batches, val_batches.nb_sample)\n\ntrn_features = conv_model.predict_generator(batches, batches.nb_sample)\n\nsave_array(model_path + 'train_convlayer_features.bc', trn_features)\nsave_array(model_path + 'valid_convlayer_features.bc', val_features)\n\ntrn_features = load_array(model_path+'train_convlayer_features.bc')\nval_features = load_array(model_path+'valid_convlayer_features.bc')\n\ntrn_features.shape",
"For our new fully connected model, we'll create it using the exact same architecture as the last layers of VGG 16, so that we can conveniently copy pre-trained weights over from that model. However, we'll set the dropout layer's p values to zero, so as to effectively remove dropout.",
"# Copy the weights from the pre-trained model.\n# NB: Since we're removing dropout, we want to half the weights\ndef proc_wgts(layer): return [o/2 for o in layer.get_weights()]\n\n# Such a finely tuned model needs to be updated very slowly!\nopt = RMSprop(lr=0.00001, rho=0.7)\n\ndef get_fc_model():\n model = Sequential([\n MaxPooling2D(input_shape=conv_layers[-1].output_shape[1:]),\n Flatten(),\n Dense(4096, activation='relu'),\n Dropout(0.),\n Dense(4096, activation='relu'),\n Dropout(0.),\n Dense(2, activation='softmax')\n ])\n\n for l1,l2 in zip(model.layers, fc_layers): l1.set_weights(proc_wgts(l2))\n\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\nfc_model = get_fc_model()",
"And fit the model in the usual way:",
"fc_model.fit(trn_features, trn_labels, nb_epoch=8, \n batch_size=batch_size, validation_data=(val_features, val_labels))\n\nfc_model.save_weights(model_path+'no_dropout.h5')\n\nfc_model.load_weights(model_path+'no_dropout.h5')",
"Reducing overfitting\nNow that we've gotten the model to overfit, we can take a number of steps to reduce this.\nApproaches to reducing overfitting\nWe do not necessarily need to rely on dropout or other regularization approaches to reduce overfitting. There are other techniques we should try first, since regularlization, by definition, biases our model towards simplicity - which we only want to do if we know that's necessary. This is the order that we recommend using for reducing overfitting (more details about each in a moment):\n\nAdd more data\nUse data augmentation\nUse architectures that generalize well\nAdd regularization\nReduce architecture complexity.\n\nWe'll assume that you've already collected as much data as you can, so step (1) isn't relevant (this is true for most Kaggle competitions, for instance). So the next step (2) is data augmentation. This refers to creating additional synthetic data, based on reasonable modifications of your input data. For images, this is likely to involve one or more of: flipping, rotation, zooming, cropping, panning, minor color changes.\nWhich types of augmentation are appropriate depends on your data. For regular photos, for instance, you'll want to use horizontal flipping, but not vertical flipping (since an upside down car is much less common than a car the right way up, for instance!)\nWe recommend always using at least some light data augmentation, unless you have so much data that your model will never see the same input twice.\nAbout data augmentation\nKeras comes with very convenient features for automating data augmentation. You simply define what types and maximum amounts of augmentation you want, and keras ensures that every item of every batch randomly is changed according to these settings. Here's how to define a generator that includes data augmentation:",
"# dim_ordering='tf' uses tensorflow dimension ordering,\n# which is the same order as matplotlib uses for display.\n# Therefore when just using for display purposes, this is more convenient\ngen = image.ImageDataGenerator(rotation_range=10, width_shift_range=0.1, \n height_shift_range=0.1, width_zoom_range=0.2, shear_range=0.15, zoom_range=0.1, \n channel_shift_range=10., horizontal_flip=True, dim_ordering='tf')",
"Let's take a look at how this generator changes a single image (the details of this code don't matter much, but feel free to read the comments and keras docs to understand the details if you're interested).",
"# Create a 'batch' of a single image\nimg = np.expand_dims(ndimage.imread('cat.jpg'),0)\n# Request the generator to create batches from this image\naug_iter = gen.flow(img)\n\n# Get eight examples of these augmented images\naug_imgs = [next(aug_iter)[0].astype(np.uint8) for i in range(8)]\n\n# The original\nplt.imshow(img[0])",
"As you can see below, there's no magic to data augmentation - it's a very intuitive approach to generating richer input data. Generally speaking, your intuition should be a good guide to appropriate data augmentation, although it's a good idea to test your intuition by checking the results of different augmentation approaches.",
"# Augmented data\nplots(aug_imgs, (20,7), 2)\n\n# Ensure that we return to theano dimension ordering\nK.set_image_dim_ordering('th')",
"Adding data augmentation\nLet's try adding a small amount of data augmentation, and see if we reduce overfitting as a result. The approach will be identical to the method we used to finetune the dense layers in lesson 2, except that we will use a generator with augmentation configured. Here's how we set up the generator, and create batches from it:",
"gen = image.ImageDataGenerator(rotation_range=15, width_shift_range=0.1, \n height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True)\n\nbatches = get_batches(path+'train', gen, batch_size=batch_size)\n# NB: We don't want to augment or shuffle the validation set\nval_batches = get_batches(path+'valid', shuffle=False, batch_size=batch_size)",
"When using data augmentation, we can't pre-compute our convolutional layer features, since randomized changes are being made to every input image. That is, even if the training process sees the same image multiple times, each time it will have undergone different data augmentation, so the results of the convolutional layers will be different.\nTherefore, in order to allow data to flow through all the conv layers and our new dense layers, we attach our fully connected model to the convolutional model--after ensuring that the convolutional layers are not trainable:",
"fc_model = get_fc_model()\n\nfor layer in conv_model.layers: layer.trainable = False\n# Look how easy it is to connect two models together!\nconv_model.add(fc_model)",
"Now we can compile, train, and save our model as usual - note that we use fit_generator() since we want to pull random images from the directories on every batch.",
"conv_model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n\nconv_model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=8, \n validation_data=val_batches, nb_val_samples=val_batches.nb_sample)\n\nconv_model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=3, \n validation_data=val_batches, nb_val_samples=val_batches.nb_sample)\n\nconv_model.save_weights(model_path + 'aug1.h5')\n\nconv_model.load_weights(model_path + 'aug1.h5')",
"Batch normalization\nAbout batch normalization\nBatch normalization (batchnorm) is a way to ensure that activations don't become too high or too low at any point in the model. Adjusting activations so they are of similar scales is called normalization. Normalization is very helpful for fast training - if some activations are very high, they will saturate the model and create very large gradients, causing training to fail; if very low, they will cause training to proceed very slowly. Furthermore, large or small activations in one layer will tend to result in even larger or smaller activations in later layers, since the activations get multiplied repeatedly across the layers.\nPrior to the development of batchnorm in 2015, only the inputs to a model could be effectively normalized - by simply subtracting their mean and dividing by their standard deviation. However, weights in intermediate layers could easily become poorly scaled, due to problems in weight initialization, or a high learning rate combined with random fluctuations in weights.\nBatchnorm resolves this problem by normalizing each intermediate layer as well. The details of how it works are not terribly important (although I will outline them in a moment) - the important takeaway is that all modern networks should use batchnorm, or something equivalent. There are two reasons for this:\n1. Adding batchnorm to a model can result in 10x or more improvements in training speed\n2. Because normalization greatly reduces the ability of a small number of outlying inputs to over-influence the training, it also tends to reduce overfitting.\nAs promised, here's a brief outline of how batchnorm works. As a first step, it normalizes intermediate layers in the same way as input layers can be normalized. But this on its own would not be enough, since the model would then just push the weights up or down indefinitely to try to undo this normalization. Therefore, batchnorm takes two additional steps:\n1. Add two more trainable parameters to each layer - one to multiply all activations to set an arbitrary standard deviation, and one to add to all activations to set an arbitary mean\n2. Incorporate both the normalization, and the learnt multiply/add parameters, into the gradient calculations during backprop.\nThis ensures that the weights don't tend to push very high or very low (since the normalization is included in the gradient calculations, so the updates are aware of the normalization). But it also ensures that if a layer does need to change the overall mean or standard deviation in order to match the output scale, it can do so.\nAdding batchnorm to the model\nWe can use nearly the same approach as before - but this time we'll add batchnorm layers (and dropout layers):",
"conv_layers[-1].output_shape[1:]\n\ndef get_bn_layers(p):\n return [\n MaxPooling2D(input_shape=conv_layers[-1].output_shape[1:]),\n Flatten(),\n Dense(4096, activation='relu'),\n Dropout(p),\n BatchNormalization(),\n Dense(4096, activation='relu'),\n Dropout(p),\n BatchNormalization(),\n Dense(1000, activation='softmax')\n ]\n\np=0.6\n\nbn_model = Sequential(get_bn_layers(0.6))\n\nbn_model.load_weights('/data/jhoward/ILSVRC2012_img/bn_do3_1.h5')\n\ndef proc_wgts(layer, prev_p, new_p):\n scal = (1-prev_p)/(1-new_p)\n return [o*scal for o in layer.get_weights()]\n\nfor l in bn_model.layers: \n if type(l)==Dense: l.set_weights(proc_wgts(l, 0.3, 0.6))\n\nbn_model.pop()\nfor layer in bn_model.layers: layer.trainable=False\n\nbn_model.add(Dense(2,activation='softmax'))\n\nbn_model.compile(Adam(), 'categorical_crossentropy', metrics=['accuracy'])\n\nbn_model.fit(trn_features, trn_labels, nb_epoch=8, validation_data=(val_features, val_labels))\n\nbn_model.save_weights(model_path+'bn.h5')\n\nbn_model.load_weights(model_path+'bn.h5')\n\nbn_layers = get_bn_layers(0.6)\nbn_layers.pop()\nbn_layers.append(Dense(2,activation='softmax'))\n\nfinal_model = Sequential(conv_layers)\nfor layer in final_model.layers: layer.trainable = False\nfor layer in bn_layers: final_model.add(layer)\n\nfor l1,l2 in zip(bn_model.layers, bn_layers):\n l2.set_weights(l1.get_weights())\n\nfinal_model.compile(optimizer=Adam(), \n loss='categorical_crossentropy', metrics=['accuracy'])\n\nfinal_model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=1, \n validation_data=val_batches, nb_val_samples=val_batches.nb_sample)\n\nfinal_model.save_weights(model_path + 'final1.h5')\n\nfinal_model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=4, \n validation_data=val_batches, nb_val_samples=val_batches.nb_sample)\n\nfinal_model.save_weights(model_path + 'final2.h5')\n\nfinal_model.optimizer.lr=0.001\n\nfinal_model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=4, \n validation_data=val_batches, nb_val_samples=val_batches.nb_sample)\n\nbn_model.save_weights(model_path + 'final3.h5')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kongjy/hyperAFM
|
Notebooks/PCAonSyntheticData.ipynb
|
mit
|
[
"import numpy as np \nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import scale\nfrom sklearn.decomposition import PCA as sklearnPCA\nfrom matplotlib.mlab import PCA\n\nimport FindPeaks\nimport syntheticspectra\nimport PCAsynthetic\n\n%matplotlib inline\n\nspectralmatrix = syntheticspectra.spectralmatrix",
"1. Calculate an average spectrum to ID peaks",
"averagespectrum = PCAsynthetic.get_hyper_peaks(spectralmatrix, threshold = 0.01)\n\nplt.plot(averagespectrum)",
"2. Make a feature matrix, n x p, where n = number of samples, p = number of features",
"featurematrix = PCAsynthetic.makefeaturematrix(spectralmatrix, averagespectrum)\nfeaturematrix[10:13,:]",
"3. Standardize: zero mean, unit variance. (and check!)",
"featurematrix_std = PCAsynthetic.stdfeature(featurematrix, axis = 0)\n#along axis 0 = running vertically downwards, across rows; 1 = columns\nmean = featurematrix_std.mean(axis=0)\nvariance = featurematrix_std.std(axis=0)\nprint(mean, variance)",
"4. Sklearn PCA",
"#define number of principal components \nsklearn_pca = sklearnPCA(n_components=9)\n\n#matrix with each sample in terms of the PCs\nSkPC = sklearn_pca.fit_transform(featurematrix_std)\n\n#covariance matrix \nSkcov = sklearn_pca.get_covariance()\n\n#score matrix \n#Skscore = sklearn_pca.score_samples(featurematrix_std)\n\n#explained variance\nSkvariance = sklearn_pca.explained_variance_\nSkvarianceratio = sklearn_pca.explained_variance_ratio_\n\nSkvarianceratio\n\n\nSkvariance",
"5. Matrix decomposition (see A User's Guide to Principal Components by Jackson, 1991.)\nU’SU = L \nU = orthonormal matrix, characteristic vectors\nS = covariance matrix\nL = diagonal matrix, characteristic roots \nGet characteristic roots: |𝑺−𝒍𝑰|=𝟎\nGet characteristic vector: |𝑺−𝒍𝑰|𝒕𝒊=𝟎\nThe projection of sample n onto principal component i: z$i$ = u$^{’}{i}$[x${n}$-x${avg}$]",
"mean_vec = np.mean(featurematrix_std, axis=0)\n\n#need to take transpose, since rowvar = true by default\ncov_mat = np.cov(featurematrix_std.T)\n\n#solve for characteristic roots and vectors \neig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\n#check that the loadings squared sum to 1: \nLsquared = sum(eig_vecs**2)",
"6. Matlab's PCA",
"mlPCA = PCA(featurematrix_std)\n#get projections of samples into PCA space\nmltrans = mlPCA.Y\n#reshape\nmltransreshape = mltrans.reshape((256,256,9))\nmlloadings = mlPCA.Wt\n#mltrans[513,:] should be the same as mltransreshape[2,1,:]\nmlloadings.shape",
"Check that all three give similar results",
"#projection of first sample, on to the first PC \nP11 = np.dot(eig_vecs[:,0], featurematrix_std[0,:]-mean_vec) \nmlP11 = mlPCA.Y[0,0]\nSkP11 = SkPC[0,0]\n\nP12 = np.dot(eig_vecs[:,1], featurematrix_std[0,:]-mean_vec) \nmlP12 = mlPCA.Y[0,1]\nSkP12 = SkPC[0,1]\n\nP152 = np.dot(eig_vecs[:,1], featurematrix_std[15,:]-mean_vec) \nmlP152 = mlPCA.Y[15,1]\nSkP152 = SkPC[15,1]\n\nprint(P11, mlP11, SkP11)\nprint(P12, mlP12, SkP12)\nprint(P152, mlP152, SkP152)\n\nprint(mlloadings[0,7])\nprint(eig_vecs[0,7])",
"PCA with the entire IR spectrum.",
"#Reshape spectral matrix \nIRmatrix=spectralmatrix.reshape(65536,559)\nprint(IRmatrix[1,:].shape)\n\n#make sure we've reshaped correctly \nplt.plot(reshapespect[555,:])",
"Standardize matrix",
"IRmatrix=np.concatenate((IRmatrix[:,20:60], IRmatrix[:,230:270], IRmatrix[:,420:460], IRmatrix[:,100:140],IRmatrix[:,305:345], IRmatrix[:,470:510], IRmatrix[:,158:198], IRmatrix[:,354:394], IRmatrix[:,512:552] ), axis=1)\n#IRmatrix=np.concatenate((IRmatrix[:,30:40], IRmatrix[:,240:260], IRmatrix[:,430:450], IRmatrix[:,90:130],IRmatrix[:,395:335], IRmatrix[:,460:500], IRmatrix[:,148:188], IRmatrix[:,364:384], IRmatrix[:,522:542] ), axis=1)\n\n\nIRmatrix_std = PCAsynthetic.stdfeature(IRmatrix, axis = 0)\nIRmean = IRmatrix_std.mean(axis=0)\nIRvariance = IRmatrix_std.std(axis=0)\nprint(IRvariance)\n\nIRmlPCA = PCA(IRmatrix_std)\n#get projections of samples into PCA space\nIRmltrans = IRmlPCA.Y\n#reshape\nIRmlloadings = IRmlPCA.Wt\nIRmltrans.shape\n\nIRmltransreshape=IRmltrans.reshape(256,256,360)\n\nscore1image = IRmltransreshape[:,:,0]\nscore2image = IRmltransreshape[:,:,1]\nscore3image = IRmltransreshape[:,:,2]\nscore4image = IRmltransreshape[:,:,3]\nscore5image = IRmltransreshape[:,:,4]\nscore6image = IRmltransreshape[:,:,5]\nscore7image = IRmltransreshape[:,:,6]\nscore8image = IRmltransreshape[:,:,7]\nscore9image = IRmltransreshape[:,:,8]\n\nplt.imshow(syntheticspectra.Cmatrix)\n\nplt.imshow(score1image)\n\nplt.imshow(score2image)\n\nplt.imshow(score3image)\n\nplt.imshow(score4image)\n\nplt.imshow(score5image)\n\nplt.imshow(score6image)\n\nplt.imshow(score7image)\n\nplt.imshow(score8image)\n\nplt.imshow(score9image)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Luke035/dlnd-lessons
|
gan_mnist/Intro_to_GANs_Exercises.ipynb
|
mit
|
[
"Generative Adversarial Network\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\nGANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\nPix2Pix \nCycleGAN\nA whole list\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator.\nThe output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.",
"%matplotlib inline\n\nimport pickle as pkl\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data')",
"Model Inputs\nFirst we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.\n\nExercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively.",
"def model_inputs(real_dim, z_dim):\n #(Batch_size, Dim)\n inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')\n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n \n return inputs_real, inputs_z",
"Generator network\n\nHere we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\nVariable Scope\nHere we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.\nWe could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.\nTo use tf.variable_scope, you use a with statement:\npython\nwith tf.variable_scope('scope_name', reuse=False):\n # code here\nHere's more from the TensorFlow documentation to get another look at using tf.variable_scope.\nLeaky ReLU\nTensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:\n$$\nf(x) = max(\\alpha * x, x)\n$$\nTanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.\n\nExercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope.",
"def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n ''' Build the generator network.\n \n Arguments\n ---------\n z : Input tensor for the generator\n out_dim : Shape of the generator output\n n_units : Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n \n Returns\n -------\n out: \n '''\n with tf.variable_scope('generator', reuse=reuse): # Netowrk creating -> reuse set to false\n # Hidden layer\n #activation_fn is None given that it should be activated through the LRELU layer\n h1 = tf.layers.dense(inputs=z, units=n_units, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n activation=None\n )\n # Leaky ReLU\n h1 = tf.maximum(h1 * alpha, h1)\n \n # Logits and tanh output\n #Read out layer\n logits = tf.layers.dense(inputs=h1, units=out_dim, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n activation=None\n )\n out = tf.tanh(logits)\n \n return out",
"Discriminator\nThe discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.\n\nExercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope.",
"def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n ''' Build the discriminator network.\n \n Arguments\n ---------\n x : Input tensor for the discriminator\n n_units: Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n \n Returns\n -------\n out, logits: \n '''\n with tf.variable_scope('discriminator', reuse=reuse): # Netowrk creating -> reuse set to false\n # Hidden layer\n h1 = tf.layers.dense(inputs=x, units=n_units, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n activation=None\n )\n # Leaky ReLU\n h1 = tf.maximum(h1 * alpha, h1)\n \n #Out dim is 1, it should be simgmoided, return a 0 to 1 prob value after sigmoid\n logits = tf.layers.dense(inputs=h1, units=1, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n activation=None\n )\n \n out = tf.sigmoid(logits)\n \n return out, logits",
"Hyperparameters",
"# Size of input image to discriminator\ninput_size = 784 # 28x28 MNIST images flattened\n# Size of latent vector to generator\nz_size = 100\n# Sizes of hidden layers in generator and discriminator\ng_hidden_size = 128\nd_hidden_size = 128\n# Leak factor for leaky ReLU\nalpha = 0.01\n# Label smoothing \nsmooth = 0.1",
"Build network\nNow we're building the network from the functions defined above.\nFirst is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.\nThen, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.\nThen the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).\n\nExercise: Build the network from the functions you defined earlier.",
"tf.reset_default_graph()\n# Create our input placeholders\ninput_real, input_z = model_inputs(real_dim=input_size, z_dim=z_size)\n\n# Generator network here\n#Output dim for generator is the same as real input dim\ng_model = generator(z=input_z, alpha=alpha, n_units=g_hidden_size, out_dim=input_size)\n# g_model is the generator output\n\n# Disriminator network here\nd_model_real, d_logits_real = discriminator(x=input_real, alpha=alpha, n_units=d_hidden_size)\n#Si passa il dato ottenuto dal generatore riutilizzando le variabili\nd_model_fake, d_logits_fake = discriminator(x=g_model, alpha=alpha, n_units=d_hidden_size, reuse=True)",
"Discriminator and Generator Losses\nNow we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropies, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like \npython\ntf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\nFor the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)\nThe discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\nFinally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.\n\nExercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.",
"# Calculate losses\n#Cross entropy tra logits e label sempre a 1 (sono le immagini vere)\nreal_labels = tf.ones_like(d_logits_real) * (1 - smooth) #Smoothed labels\nd_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=real_labels))\n#Fake uguale ma con label a 0\nfake_labels = tf.zeros_like(d_logits_fake)\nd_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=fake_labels))\n\nd_loss = d_loss_real + d_loss_fake\n\n#G loss needs flipped labels, and needs all ones for all the generated fake images\n#La perdita parte dal risultato del discriminatore, non dall'output del generatore e deve essere girata!!\nflipped_fake_labels = tf.ones_like(d_logits_fake)\ng_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=flipped_fake_labels))",
"Optimizers\nWe want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.\nFor the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables that start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). \nWe can do something similar with the discriminator. All the variables in the discriminator start with discriminator.\nThen, in the optimizer we pass the variable lists to the var_list keyword argument of the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.\n\nExercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately.",
"for var in tf.trainable_variables():\n if 'generator' in var.name:\n print(var.name)\n\n# Optimizers\nlearning_rate = 0.002\n\n# Get the trainable_variables, split into G and D parts\nt_vars = tf.trainable_variables()\ng_vars = [var for var in t_vars if var.name.startswith('generator')]\nd_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n\nd_train_opt = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars)\ng_train_opt = tf.train.AdamOptimizer().minimize(g_loss, var_list=g_vars)",
"Training",
"batch_size = 100\nepochs = 100\nsamples = []\nlosses = []\nsaver = tf.train.Saver(var_list = g_vars)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n \n # Get images, reshape and rescale to pass to D\n batch_images = batch[0].reshape((batch_size, 784))\n batch_images = batch_images*2 - 1\n \n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n \n # Run optimizers\n _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n \n # At the end of each epoch, get the losses and print them out\n train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})\n train_loss_g = g_loss.eval({input_z: batch_z})\n \n print(\"Epoch {}/{}...\".format(e+1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g)) \n # Save losses to view after training\n losses.append((train_loss_d, train_loss_g))\n \n # Sample from generator as we're training for viewing afterwards\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n feed_dict={input_z: sample_z})\n samples.append(gen_samples)\n saver.save(sess, './checkpoints/generator.ckpt')\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)",
"Training loss\nHere we'll check out the training losses for the generator and discriminator.",
"%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()",
"Generator samples from training\nHere we can view samples of images from the generator. First we'll look at images taken while training.",
"def view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n \n return fig, axes\n\n# Load samples from generator taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)",
"These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.",
"_ = view_samples(-1, samples)",
"Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!",
"rows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)",
"It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.\nSampling from the generator\nWe can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!",
"saver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n feed_dict={input_z: sample_z})\nview_samples(0, [gen_samples])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mdiaz236/DeepLearningFoundations
|
sentiment_network/Sentiment Classification - Mini Project 2.ipynb
|
mit
|
[
"Sentiment Classification & How To \"Frame Problems\" for a Neural Network\nby Andrew Trask\n\nTwitter: @iamtrask\nBlog: http://iamtrask.github.io\n\nWhat You Should Already Know\n\nneural networks, forward and back-propagation\nstochastic gradient descent\nmean squared error\nand train/test splits\n\nWhere to Get Help if You Need it\n\nRe-watch previous Udacity Lectures\nLeverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)\nShoot me a tweet @iamtrask\n\nTutorial Outline:\n\n\nIntro: The Importance of \"Framing a Problem\"\n\n\nCurate a Dataset\n\nDeveloping a \"Predictive Theory\"\n\nPROJECT 1: Quick Theory Validation\n\n\nTransforming Text to Numbers\n\n\nPROJECT 2: Creating the Input/Output Data\n\n\nPutting it all together in a Neural Network\n\n\nPROJECT 3: Building our Neural Network\n\n\nUnderstanding Neural Noise\n\n\nPROJECT 4: Making Learning Faster by Reducing Noise\n\n\nAnalyzing Inefficiencies in our Network\n\n\nPROJECT 5: Making our Network Train and Run Faster\n\n\nFurther Noise Reduction\n\n\nPROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary\n\n\nAnalysis: What's going on in the weights?\n\n\nLesson: Curate a Dataset",
"def pretty_print_review_and_label(i):\n print(labels[i] + \"\\t:\\t\" + reviews[i][:80] + \"...\")\n\ng = open('reviews.txt','r') # What we know!\nreviews = list(map(lambda x:x[:-1],g.readlines()))\ng.close()\n\ng = open('labels.txt','r') # What we WANT to know!\nlabels = list(map(lambda x:x[:-1].upper(),g.readlines()))\ng.close()\n\nlen(reviews)\n\nreviews[0]\n\nlabels[0]",
"Lesson: Develop a Predictive Theory",
"print(\"labels.txt \\t : \\t reviews.txt\\n\")\npretty_print_review_and_label(2137)\npretty_print_review_and_label(12816)\npretty_print_review_and_label(6267)\npretty_print_review_and_label(21934)\npretty_print_review_and_label(5297)\npretty_print_review_and_label(4998)",
"Project 1: Quick Theory Validation",
"from collections import Counter\nimport numpy as np\n\npositive_counts = Counter()\nnegative_counts = Counter()\ntotal_counts = Counter()\n\nfor i in range(len(reviews)):\n if(labels[i] == 'POSITIVE'):\n for word in reviews[i].split(\" \"):\n positive_counts[word] += 1\n total_counts[word] += 1\n else:\n for word in reviews[i].split(\" \"):\n negative_counts[word] += 1\n total_counts[word] += 1\n\npositive_counts.most_common()\n\npos_neg_ratios = Counter()\n\nfor term,cnt in list(total_counts.most_common()):\n if(cnt > 100):\n pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)\n pos_neg_ratios[term] = pos_neg_ratio\n\nfor word,ratio in pos_neg_ratios.most_common():\n if(ratio > 1):\n pos_neg_ratios[word] = np.log(ratio)\n else:\n pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))\n\n# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()\n\n# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]",
"Transforming Text into Numbers",
"from IPython.display import Image\n\nreview = \"This was a horrible, terrible movie.\"\n\nImage(filename='sentiment_network.png')\n\nreview = \"The movie was excellent\"\n\nImage(filename='sentiment_network_pos.png')\n\nvocab = set(total_counts.keys())\nvocab_size = len(vocab)\nprint(vocab_size)\n\nlayer_0 = np.zeros((1, vocab_size))\nlayer_0\n\nword2Index = {}\n\nfor i, word in enumerate(vocab):\n word2Index[word] = i\nword2Index\n\ndef update_input_layer(review):\n \"\"\" Modify the global layer_0 to represent the vector form of review.\n The element at a given index of layer_0 should represent \\\n how many times the given word occurs in the review.\n Args:\n review(string) - the string of the review\n Returns:\n None\n \"\"\"\n global layer_0\n # clear out previous state, reset the layer to be all 0s\n layer_0 *= 0\n ## Your code here\n for word in review.split(\" \"):\n layer_0[0][word2Index[word]] += 1\n \nupdate_input_layer(reviews[0])\n\nlayer_0\n\ndef get_target_for_label(label):\n \"\"\"Convert a label to `0` or `1`.\n Args:\n label(string) - Either \"POSITIVE\" or \"NEGATIVE\".\n Returns:\n `0` or `1`.\n \"\"\"\n if label == 'POSITIVE':\n return 1\n else:\n return 0\n \n\nget_target_for_label(labels[0])\n\nlabels[1]\n\nget_target_for_label(labels[1])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
trangel/Data-Science
|
reinforcement_learning/crossentropy_method.ipynb
|
gpl-3.0
|
[
"Crossentropy method\nThis notebook will teach you to solve reinforcement learning problems with crossentropy method. We'll follow-up by scaling everything up and using neural network policy.",
"# In Google Colab, uncomment this:\n# !wget https://bit.ly/2FMJP5K -O setup.py && bash setup.py\n\n# XVFB will be launched if you run on a server\nimport os\nif type(os.environ.get(\"DISPLAY\")) is not str or len(os.environ.get(\"DISPLAY\")) == 0:\n !bash ../xvfb start\n os.environ['DISPLAY'] = ':1'\n\nimport gym\nimport numpy as np\nimport pandas as pd\n\nenv = gym.make(\"Taxi-v2\")\nenv.reset()\nenv.render()\n\nn_states = env.observation_space.n\nn_actions = env.action_space.n\n\nprint(\"n_states=%i, n_actions=%i\" % (n_states, n_actions))",
"Create stochastic policy\nThis time our policy should be a probability distribution.\npolicy[s,a] = P(take action a | in state s)\nSince we still use integer state and action representations, you can use a 2-dimensional array to represent the policy.\nPlease initialize policy uniformly, that is, probabililities of all actions should be equal.",
"policy = np.ones(shape=(n_states, n_actions)) * 1 / n_actions\n\n\nassert type(policy) in (np.ndarray, np.matrix)\nassert np.allclose(policy, 1./n_actions)\nassert np.allclose(np.sum(policy, axis=1), 1)\n",
"Play the game\nJust like before, but we also record all states and actions we took.",
"def generate_session(policy, t_max=10**4):\n \"\"\"\n Play game until end or for t_max ticks.\n :param policy: an array of shape [n_states,n_actions] with action probabilities\n :returns: list of states, list of actions and sum of rewards\n \"\"\"\n states, actions = [], []\n total_reward = 0.\n\n s = env.reset()\n\n def sample_action(policy, s):\n action_p = policy[s, :].reshape(-1,)\n #highest_p_actions = np.argwhere(action_p == np.amax(action_p)).reshape(-1,)\n #non_zero_p_actions = np.argwhere(action_p > 0).reshape(-1,)\n #random_choice = np.random.choice(highest_p_actions)\n #random_choice = np.random.choice(non_zero_p_actions)\n random_choice = np.random.choice(np.arange(len(action_p)), p=action_p)\n return random_choice\n \n for t in range(t_max):\n\n a = sample_action(policy, s) #<sample action from policy(hint: use np.random.choice) >\n\n new_s, r, done, info = env.step(a)\n\n # Record state, action and add up reward to states,actions and total_reward accordingly.\n states.append(s)\n actions.append(a)\n total_reward += r\n\n s = new_s\n if done:\n break\n return states, actions, total_reward\n\ns, a, r = generate_session(policy)\nassert type(s) == type(a) == list\nassert len(s) == len(a)\nassert type(r) in [float, np.float]\n\n# let's see the initial reward distribution\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nsample_rewards = [generate_session(policy, t_max=1000)[-1] for _ in range(200)]\n\nplt.hist(sample_rewards, bins=20)\nplt.vlines([np.percentile(sample_rewards, 50)], [0], [100], label=\"50'th percentile\", color='green')\nplt.vlines([np.percentile(sample_rewards, 90)], [0], [100], label=\"90'th percentile\", color='red')\nplt.legend()",
"Crossentropy method steps",
"def select_elites(states_batch, actions_batch, rewards_batch, percentile=50):\n \"\"\"\n Select states and actions from games that have rewards >= percentile\n :param states_batch: list of lists of states, states_batch[session_i][t]\n :param actions_batch: list of lists of actions, actions_batch[session_i][t]\n :param rewards_batch: list of rewards, rewards_batch[session_i]\n\n :returns: elite_states,elite_actions, both 1D lists of states and respective actions from elite sessions\n\n Please return elite states and actions in their original order \n [i.e. sorted by session number and timestep within session]\n\n If you are confused, see examples below. Please don't assume that states are integers\n (they will become different later).\n \"\"\"\n #<Compute minimum reward for elite sessions. Hint: use np.percentile >\n reward_threshold = np.percentile(rewards_batch, percentile)\n\n #elite_states = <your code here >\n #elite_actions = <your code here >\n elite_states = []\n elite_actions = []\n for i, reward in enumerate(rewards_batch):\n if reward >= reward_threshold:\n elite_states = elite_states + states_batch[i]\n elite_actions = elite_actions + actions_batch[i]\n\n return elite_states, elite_actions\n\nstates_batch = [\n [1, 2, 3], # game1\n [4, 2, 0, 2], # game2\n [3, 1], # game3\n]\n\nactions_batch = [\n [0, 2, 4], # game1\n [3, 2, 0, 1], # game2\n [3, 3], # game3\n]\nrewards_batch = [\n 3, # game1\n 4, # game2\n 5, # game3\n]\n\ntest_result_0 = select_elites(\n states_batch, actions_batch, rewards_batch, percentile=0)\ntest_result_40 = select_elites(\n states_batch, actions_batch, rewards_batch, percentile=30)\ntest_result_90 = select_elites(\n states_batch, actions_batch, rewards_batch, percentile=90)\ntest_result_100 = select_elites(\n states_batch, actions_batch, rewards_batch, percentile=100)\n\nassert np.all(test_result_0[0] == [1, 2, 3, 4, 2, 0, 2, 3, 1]) \\\n and np.all(test_result_0[1] == [0, 2, 4, 3, 2, 0, 1, 3, 3]),\\\n \"For percentile 0 you should return all states and actions in chronological order\"\nassert np.all(test_result_40[0] == [4, 2, 0, 2, 3, 1]) and \\\n np.all(test_result_40[1] == [3, 2, 0, 1, 3, 3]),\\\n \"For percentile 30 you should only select states/actions from two first\"\nassert np.all(test_result_90[0] == [3, 1]) and \\\n np.all(test_result_90[1] == [3, 3]),\\\n \"For percentile 90 you should only select states/actions from one game\"\nassert np.all(test_result_100[0] == [3, 1]) and\\\n np.all(test_result_100[1] == [3, 3]),\\\n \"Please make sure you use >=, not >. Also double-check how you compute percentile.\"\nprint(\"Ok!\")\n\ndef update_policy(elite_states, elite_actions):\n \"\"\"\n Given old policy and a list of elite states/actions from select_elites,\n return new updated policy where each action probability is proportional to\n\n policy[s_i,a_i] ~ #[occurences of si and ai in elite states/actions]\n\n Don't forget to normalize policy to get valid probabilities and handle 0/0 case.\n In case you never visited a state, set probabilities for all actions to 1./n_actions\n\n :param elite_states: 1D list of states from elite sessions\n :param elite_actions: 1D list of actions from elite sessions\n\n \"\"\"\n\n new_policy = np.zeros([n_states, n_actions])\n\n \n #<Your code here: update probabilities for actions given elite states & actions >\n # Don't forget to set 1/n_actions for all actions in unvisited states.\n\n for state, action in zip(elite_states, elite_actions):\n new_policy[state, action] = new_policy[state, action] + 1\n for state in range(n_states):\n s = np.sum(new_policy[state, :])\n if s == 0:\n new_policy[state, :] = 1. / n_actions\n else:\n new_policy[state, :] = new_policy[state, :] / s\n \n return new_policy\n\nelite_states = [1, 2, 3, 4, 2, 0, 2, 3, 1]\nelite_actions = [0, 2, 4, 3, 2, 0, 1, 3, 3]\n\nnew_policy = update_policy(elite_states, elite_actions)\n\nassert np.isfinite(new_policy).all(\n), \"Your new policy contains NaNs or +-inf. Make sure you don't divide by zero.\"\nassert np.all(\n new_policy >= 0), \"Your new policy can't have negative action probabilities\"\nassert np.allclose(new_policy.sum(\n axis=-1), 1), \"Your new policy should be a valid probability distribution over actions\"\nreference_answer = np.array([\n [1., 0., 0., 0., 0.],\n [0.5, 0., 0., 0.5, 0.],\n [0., 0.33333333, 0.66666667, 0., 0.],\n [0., 0., 0., 0.5, 0.5]])\nassert np.allclose(new_policy[:4, :5], reference_answer)\nprint(\"Ok!\")",
"Training loop\nGenerate sessions, select N best and fit to those.",
"from IPython.display import clear_output\n\ndef show_progress(rewards_batch, log, percentile, reward_range=[-990, +10]):\n \"\"\"\n A convenience function that displays training progress. \n No cool math here, just charts.\n \"\"\"\n\n mean_reward = np.mean(rewards_batch)\n threshold = np.percentile(rewards_batch, percentile)\n log.append([mean_reward, threshold])\n\n clear_output(True)\n print(\"mean reward = %.3f, threshold=%.3f\" % (mean_reward, threshold))\n plt.figure(figsize=[8, 4])\n plt.subplot(1, 2, 1)\n plt.plot(list(zip(*log))[0], label='Mean rewards')\n plt.plot(list(zip(*log))[1], label='Reward thresholds')\n plt.legend()\n plt.grid()\n\n plt.subplot(1, 2, 2)\n plt.hist(rewards_batch, range=reward_range)\n plt.vlines([np.percentile(rewards_batch, percentile)],\n [0], [100], label=\"percentile\", color='red')\n plt.legend()\n plt.grid()\n\n plt.show()\n\n# reset policy just in case\npolicy = np.ones([n_states, n_actions]) / n_actions\n\nn_sessions = 250 # sample this many sessions\npercentile = 30 # take this percent of session with highest rewards\nlearning_rate = 0.5 # add this thing to all counts for stability\n\nlog = []\n\nfor i in range(100):\n\n %time sessions = [generate_session(policy) for x in range(n_sessions)] #[ < generate a list of n_sessions new sessions > ]\n \n states_batch, actions_batch, rewards_batch = zip(*sessions)\n\n elite_states, elite_actions = select_elites(states_batch, actions_batch, rewards_batch, percentile=percentile) #<select elite states/actions >\n\n new_policy = update_policy(elite_states, elite_actions) #<compute new policy >\n\n policy = learning_rate * new_policy + (1 - learning_rate) * policy\n\n # display results on chart\n show_progress(rewards_batch, log, percentile)",
"Reflecting on results\nYou may have noticed that the taxi problem quickly converges from <-1000 to a near-optimal score and then descends back into -50/-100. This is in part because the environment has some innate randomness. Namely, the starting points of passenger/driver change from episode to episode.\nIn case CEM failed to learn how to win from one distinct starting point, it will simply discard it because no sessions from that starting point will make it into the \"elites\".\nTo mitigate that problem, you can either reduce the threshold for elite sessions (duct tape way) or change the way you evaluate strategy (theoretically correct way). You can first sample an action for every possible state and then evaluate this choice of actions by running several games and averaging rewards.\nSubmit to coursera",
"from submit import submit_taxi\nsubmit_taxi(generate_session, policy, 'tonatiuh_rangel@hotmail.com', '7uvgN7bBzpJzVw9f')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bicepjai/Deep-Survey-Text-Classification
|
data_prep/word_vectors.ipynb
|
mit
|
[
"Building word vectors\nSetup",
"import sys\nimport os\n\nimport re\nimport collections\nimport itertools\nimport bcolz\nimport pickle\nsys.path.append('../lib')\n\nimport gc\nimport random\nimport smart_open\nimport h5py\nimport csv\nimport tensorflow as tf\nimport gensim\n\nimport datetime as dt\nfrom tqdm import tqdm_notebook as tqdm\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nrandom_state_number = 967898\n\nfrom tensorflow.python.client import device_lib\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nsess = tf.Session(config=config)\nget_available_gpus()\n\n%pylab\n%matplotlib inline\n%load_ext autoreload\n%autoreload\n\npd.options.mode.chained_assignment = None\npd.options.display.max_columns = 999\ncolor = sns.color_palette()",
"Data\nload corpus vocab and wordidx",
"corpus_vocab_list, corpus_vocab_wordidx = None, None\nwith open('processed/stage1/vocab_words_wordidx.pkl', 'rb') as f:\n (corpus_vocab_list, corpus_wordidx) = pickle.load(f)\nprint(len(corpus_vocab_list), len(corpus_wordidx))",
"load data",
"store = pd.HDFStore('processed/stage1/data_frames.h5')\ntrain_df = store['train_df']\ntest_df = store['test_df']",
"Word Vectors Pre Trained\ncollecting biolab words",
"from gensim.models.keyedvectors import KeyedVectors\nbiolab_keyed_vectors_pubmed_pmc_wiki = KeyedVectors.load_word2vec_format('external/biolab_wvs/wikipedia-pubmed-and-PMC-w2v.bin', binary=True)\n\nbiolab_words_pubmed_pmc_wiki = biolab_keyed_vectors_pubmed_pmc_wiki.vocab.keys()\nbiolab_words = set(biolab_words_pubmed_pmc_wiki)\nlen(biolab_words)\n\nvocab_biolab = set(biolab_words) & set(vocab_words)\nprint (len(vocab_biolab))\nvocab_biolab\n\nvocab_not_in_biolab =set(vocab_words) - set(biolab_words)\nprint(len(vocab_not_in_biolab))\nvocab_not_in_biolab",
"dont need word to id dict since this is indexed with words\nusing biolab words for missing corpus words",
"undesirable_ascii_characters = list(range(32))\nundesirable_ascii_characters.remove(10) #keep new line since this might be used for sentence tokenizer\nundesirable_charmap = dict.fromkeys(undesirable_ascii_characters)\n\nfrom nltk import word_tokenize\nfrom utils import custom_word_tokenizer, apply_custom_regx\n\ncustom_tokenized_biolab_pubmed_pmc_wiki_wv = {}\nfor word in vocab_biolab:\n vector = biolab_keyed_vectors_pubmed_pmc_wiki.word_vec(word)\n custom_tokenized_biolab_pubmed_pmc_wiki_wv[word.lower()] = vector\n word = word.lower().encode('ascii', 'ignore').decode('utf-8', 'ignore')\n word = str(word).translate(undesirable_charmap)\n word = apply_custom_regx(word)\n word = word.replace('\\\\t', '')\n for part in word_tokenize(word):\n if part in custom_tokenized_biolab_pubmed_pmc_wiki_wv:\n custom_tokenized_biolab_pubmed_pmc_wiki_wv[part] += vector\n custom_tokenized_biolab_pubmed_pmc_wiki_wv[part] /= 2\n\nlen(custom_tokenized_biolab_pubmed_pmc_wiki_wv)",
"for tensorboard",
"tb_vocab_size=5000\n\ntb_vocab_biolab = list(vocab_biolab)[:tb_vocab_size]\nwith open(\"view_wvs_tb/tb_vocab.tsv\", \"w\") as fp:\n wr = csv.writer(fp, delimiter='\\n')\n wr.writerow(tb_vocab_biolab)\n\ntb_word_vectors = np.random.randn(tb_vocab_size, 200)\nfor i,word in enumerate(tb_vocab_biolab):\n tb_word_vectors[i] = custom_tokenized_biolab_pubmed_pmc_wiki_wv[word]\n\n%autoreload\nfrom utils import visualize_embeddings_in_tensorboard\nvisualize_this_embedding = tb_word_vectors\nprint(visualize_this_embedding.shape)\nmetadata_path = \"/home/bicepjai/Projects/dsotc/data_prep/view_wvs_tb/tb_vocab.tsv\"\nvisualize_embeddings_in_tensorboard(visualize_this_embedding, metadata_path, \"/home/bicepjai/Projects/dsotc/data_prep/view_wvs_tb\")\n\ndel tb_word_vectors",
"building word vectors of 200d for model",
"corpus_word_vectors = np.random.randn(len(vocab_words), 200)\ncorpus_word_vectors.shape",
"fill in biolab vectors available",
"for word in vocab_biolab:\n dataset_corpus_word_index = vocab_wordidx[word]\n corpus_word_vectors[dataset_corpus_word_index] = custom_tokenized_biolab_pubmed_pmc_wiki_wv[word]",
"total words not updated with training from biolab",
"words_not_updated = set(vocab_words) - vocab_biolab\nlen(words_not_updated)\n\nwords_not_updated\n\nnp.save(\"processed/stage1/biolab_updated_wvs.npy\", corpus_word_vectors)",
"gcloud tensorboard serving",
"dataset_corpus_words_list = np.load(\"dataset_corpus_words_list.npy\")\ncorpus_word_vectors = np.load(\"corpus_word_vectors.npy\")\n\ntb_vocab_size = 10000\n\nlocal_tb_dir = \"/home/bicepjai/Projects/ml-compete/kaggle/mskrct/data_prep_2_ft/model_wv_visualize/gcloud/\"\n\nwith open(local_tb_dir+\"/vocab.tsv\", \"wb\") as fp:\n wr = csv.writer(fp, delimiter='\\n')\n wr.writerow(dataset_corpus_words_list[:tb_vocab_size])",
"for http://projector.tensorflow.org/ vectors need to be in tsv form",
"# np.savetxt(\"model_wv_visualize/word_vectors.tsv\",corpus_word_vectors[:tb_vocab_size], delimiter='\\t')",
"write to checkpoint file",
"!rm $local_tb_dir/checkpoint\n!ls $local_tb_dir\n\nfrom word2vec import visualize_embeddings_in_tensorboard\nvisualize_this_embedding = corpus_word_vectors[:tb_vocab_size]\nprint visualize_this_embedding.shape\n# path for gcloud tensorboard\nmetadata_path = \"/home/bicepjai/projects/tb_visual/vocab.tsv\"\n# metadata_path = \"/home/bicepjai/Projects/ml-compete/kaggle/mskrct/data_prep_2_ft/model_wv_visualize/vocab.tsv\"\nvisualize_embeddings_in_tensorboard(visualize_this_embedding, metadata_path, local_tb_dir)\n\ncheckpoint_txt = \"model_checkpoint_path: \\\"/home/bicepjai/projects/tb_visual/visual_embed.ckpt-1\\\"\\n\\\nall_model_checkpoint_paths: \\\"/home/bicepjai/projects/tb_visual/visual_embed.ckpt-1\\\"\"\nwith open(local_tb_dir+\"/checkpoint\",\"w\") as f:\n f.seek(0)\n f.truncate()\n f.write(checkpoint_txt)",
"FastText Vectors\nfasttext commands used\nfasttext skipgram -minCount 1 -dim 200 -epoch 10 -input corpus_text_for_fast_text.txt -output ft_wvs_200d_10e\nfasttext cbow -minCount 1 -dim 200 -epoch 10 -input corpus_text_for_fast_text.txt -output ft_wvs_200d_10e\nreading ft vectors",
"fasttext_vec_file = \"processed/stage2/pretrained_word_vectors/ft_sg_200d_10e.vec\"\n\nft_lines = None\nwith open(fasttext_vec_file,\"r\") as f:\n ft_lines = f.readlines()\n\nprint(ft_lines[0])\nprint(type(ft_lines), len(ft_lines))\nft_shape = tuple([int(i.strip()) for i in ft_lines[0].split()])\nft_shape\n\nprint(len(ft_lines[1].split()))\nft_lines[1]\n\nft_vocab_size=ft_shape[0]\nft_vocab_size\n\nft_word_vectors = np.random.randn(ft_vocab_size, ft_shape[1])\nft_words = []\n\nfor i, line in enumerate(ft_lines[1:]):\n str_list =line.split()\n ft_words.append(str_list[0].strip())\n vec = np.array([np.float(f) for f in str_list[1:]])\n ft_word_vectors[i] = vec\n\nft_word_vectors.shape\n\na = list(ft_words)\na.sort(key=len, reverse=True)\nprint(a[:10])\ndel a\n\nft_wordidx = {w:i for i,w in enumerate(ft_words)}\nft_vocab_size, len(ft_wordidx)\n\nlen(set(vocab_words) - set(ft_words))\n\nset(vocab_words) - set(ft_words)\n\n%autoreload\nimport global_utils\nfasttext_vec_file=\"/home/bicepjai/Projects/dsotc/data_prep/processed/stage1/pretrained_word_vectors/ft_cbow_200d_20e.vec\"\nwvs = global_utils.get_corpus_wvs_from_ft(fasttext_vec_file, 200, vocab_words)\nwvs.shape",
"saving all trained fast text vectors",
"%ll /home/bicepjai/Projects/dsotc/data_prep/processed/stage1/pretrained_word_vectors\n\nlen(vocab_words)\n\n%autoreload\nimport global_utils\nft_vector_files = [\n (100,\"ft_cbow_100d_20e\"),(200,\"ft_cbow_200d_20e\"),(200,\"ft_cbow_300d_20e\"),\n (100,\"ft_sg_100d_20e\"),(200,\"ft_sg_200d_20e\"),(200,\"ft_sg_300d_20e\"),\n (100,\"ft_cbow_100d_50e\"),(200,\"ft_cbow_200d_50e\"),(200,\"ft_cbow_300d_50e\"),\n (100,\"ft_sg_100d_50e\"),(200,\"ft_sg_200d_50e\"),(200,\"ft_sg_300d_50e\"),\n (100,\"ft_cbow_100d_100e\"),(200,\"ft_cbow_200d_100e\"),(200,\"ft_cbow_300d_100e\"),\n (100,\"ft_sg_100d_100e\"),(200,\"ft_sg_200d_100e\"),(200,\"ft_sg_300d_100e\")\n ]\n\nfor dim_file_name in ft_vector_files:\n file_path = \"/home/bicepjai/Projects/dsotc/data_prep/processed/stage1/pretrained_word_vectors/\"+dim_file_name[1]+\".vec\"\n dim = dim_file_name[0]\n if not os.path.exists(file_path):\n print(\"file doesnt exist\",file_path)\n continue\n ft_vec = global_utils.get_corpus_wvs_from_ft(file_path, dim, vocab_words)\n print(ft_vector_file,ft_vec.shape)\n np.save(\"processed/stage1/pretrained_word_vectors/\"+dim_file_name[1]+\".npy\", ft_vec)",
"Viewing word vectors",
"%autoreload\nimport global_utils\n\nWORD_EMB_SIZE=200\nft_file_path = \"/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/processed/stage1/pretrained_word_vectors/ft_sg_200d_50e.vec\"\ntrained_embeddings = global_utils.get_embeddings_from_ft(ft_file_path, WORD_EMB_SIZE, corpus_vocab_list)\ntrained_embeddings.shape\n\ntb_vocab_size=5000\n\ntb_vocab_biolab = list(trained_embeddings)[:tb_vocab_size]\nwith open(\"view_wvs_tb/tb_vocab.tsv\", \"w\") as fp:\n wr = csv.writer(fp, delimiter='\\n')\n wr.writerow(corpus_vocab_list)\n\ntb_word_vectors = np.random.randn(tb_vocab_size, 200)\nfor i,word in enumerate(tb_vocab_biolab):\n tb_word_vectors[i] = trained_embeddings[i]\n\n%autoreload\nfrom utils import visualize_embeddings_in_tensorboard\nvisualize_this_embedding = tb_word_vectors\nprint(visualize_this_embedding.shape)\nmetadata_path = \"/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/view_wvs_tb/tb_vocab.tsv\"\nvisualize_embeddings_in_tensorboard(visualize_this_embedding, metadata_path, \"/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/view_wvs_tb\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mne-tools/mne-tools.github.io
|
0.12/_downloads/plot_resample.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Resampling data\nWhen performing experiments where timing is critical, a signal with a high\nsampling rate is desired. However, having a signal with a much higher sampling\nrate than is necessary needlessly consumes memory and slows down computations\noperating on the data.\nThis example downsamples from 600 Hz to 100 Hz. This achieves a 6-fold\nreduction in data size, at the cost of an equal loss of temporal resolution.",
"# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>\n#\n# License: BSD (3-clause)\n#\nfrom __future__ import print_function\n\nfrom matplotlib import pyplot as plt\n\nimport mne\nfrom mne.datasets import sample",
"Setting up data paths and loading raw data (skip some data for speed)",
"data_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\nraw = mne.io.read_raw_fif(raw_fname).crop(120, 240).load_data()",
"Since downsampling reduces the timing precision of events, we recommend\nfirst extracting epochs and downsampling the Epochs object:",
"events = mne.find_events(raw)\nepochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True)\n\n# Downsample to 100 Hz\nprint('Original sampling rate:', epochs.info['sfreq'], 'Hz')\nepochs_resampled = epochs.copy().resample(100, npad='auto')\nprint('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz')\n\n# Plot a piece of data to see the effects of downsampling\nplt.figure(figsize=(7, 3))\n\nn_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data\nplt.plot(epochs.times[:n_samples_to_plot],\n epochs.get_data()[0, 0, :n_samples_to_plot], color='black')\n\nn_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq'])\nplt.plot(epochs_resampled.times[:n_samples_to_plot],\n epochs_resampled.get_data()[0, 0, :n_samples_to_plot],\n '-o', color='red')\n\nplt.xlabel('time (s)')\nplt.legend(['original', 'downsampled'], loc='best')\nplt.title('Effect of downsampling')\nmne.viz.tight_layout()",
"When resampling epochs is unwanted or impossible, for example when the data\ndoesn't fit into memory or your analysis pipeline doesn't involve epochs at\nall, the alternative approach is to resample the continuous data. This\ncan also be done on non-preloaded data.",
"# Resample to 300 Hz\nraw_resampled = raw.copy().resample(300, npad='auto')",
"Because resampling also affects the stim channels, some trigger onsets might\nbe lost in this case. While MNE attempts to downsample the stim channels in\nan intelligent manner to avoid this, the recommended approach is to find\nevents on the original data before downsampling.",
"print('Number of events before resampling:', len(mne.find_events(raw)))\n\n# Resample to 100 Hz (generates warning)\nraw_resampled = raw.copy().resample(100, npad='auto')\nprint('Number of events after resampling:',\n len(mne.find_events(raw_resampled)))\n\n# To avoid losing events, jointly resample the data and event matrix\nevents = mne.find_events(raw)\nraw_resampled, events_resampled = raw.copy().resample(\n 100, npad='auto', events=events)\nprint('Number of events after resampling:', len(events_resampled))"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
geektoni/shogun
|
doc/ipython-notebooks/converter/Tapkee.ipynb
|
bsd-3-clause
|
[
"Dimensionality Reduction with the Shogun Machine Learning Toolbox\nBy Sergey Lisitsyn (lisitsyn) and Fernando J. Iglesias Garcia (iglesias).\nThis notebook illustrates <a href=\"http://en.wikipedia.org/wiki/Unsupervised_learning\">unsupervised learning</a> using the suite of dimensionality reduction algorithms available in Shogun. Shogun provides access to all these algorithms using Tapkee, a C++ library especialized in <a href=\"http://en.wikipedia.org/wiki/Dimensionality_reduction\">dimensionality reduction</a>.\nHands-on introduction to dimension reduction\nFirst of all, let us start right away by showing what the purpose of dimensionality reduction actually is. To this end, we will begin by creating a function that provides us with some data:",
"import numpy as np\nimport os\nSHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')\n\ndef generate_data(curve_type, num_points=1000):\n\tif curve_type=='swissroll':\n\t\ttt = np.array((3*np.pi/2)*(1+2*np.random.rand(num_points)))\n\t\theight = np.array((np.random.rand(num_points)-0.5))\n\t\tX = np.array([tt*np.cos(tt), 10*height, tt*np.sin(tt)])\n\t\treturn X,tt\n\tif curve_type=='scurve':\n\t\ttt = np.array((3*np.pi*(np.random.rand(num_points)-0.5)))\n\t\theight = np.array((np.random.rand(num_points)-0.5))\n\t\tX = np.array([np.sin(tt), 10*height, np.sign(tt)*(np.cos(tt)-1)])\n\t\treturn X,tt\n\tif curve_type=='helix':\n\t\ttt = np.linspace(1, num_points, num_points).T / num_points\n\t\ttt = tt*2*np.pi\n\t\tX = np.r_[[(2+np.cos(8*tt))*np.cos(tt)],\n\t\t [(2+np.cos(8*tt))*np.sin(tt)],\n\t\t [np.sin(8*tt)]]\n\t\treturn X,tt",
"The function above can be used to generate three-dimensional datasets with the shape of a Swiss roll, the letter S, or an helix. These are three examples of datasets which have been extensively used to compare different dimension reduction algorithms. As an illustrative exercise of what dimensionality reduction can do, we will use a few of the algorithms available in Shogun to embed this data into a two-dimensional space. This is essentially the dimension reduction process as we reduce the number of features from 3 to 2. The question that arises is: what principle should we use to keep some important relations between datapoints? In fact, different algorithms imply different criteria to answer this question.\nJust to start, lets pick some algorithm and one of the data sets, for example lets see what embedding of the Swissroll is produced by the Isomap algorithm. The Isomap algorithm is basically a slightly modified Multidimensional Scaling (MDS) algorithm which finds embedding as a solution of the following optimization problem:\n$$\n\\min_{x'_1, x'_2, \\dots} \\sum_i \\sum_j \\| d'(x'_i, x'_j) - d(x_i, x_j)\\|^2,\n$$\nwith defined $x_1, x_2, \\dots \\in X~~$ and unknown variables $x_1, x_2, \\dots \\in X'~~$ while $\\text{dim}(X') < \\text{dim}(X)~~~$,\n$d: X \\times X \\to \\mathbb{R}~~$ and $d': X' \\times X' \\to \\mathbb{R}~~$ are defined as arbitrary distance functions (for example Euclidean). \nSpeaking less math, the MDS algorithm finds an embedding that preserves pairwise distances between points as much as it is possible. The Isomap algorithm changes quite small detail: the distance - instead of using local pairwise relationships it takes global factor into the account with shortest path on the neighborhood graph (so-called geodesic distance). The neighborhood graph is defined as graph with datapoints as nodes and weighted edges (with weight equal to the distance between points). The edge between point $x_i~$ and $x_j~$ exists if and only if $x_j~$ is in $k~$ nearest neighbors of $x_i$. Later we will see that that 'global factor' changes the game for the swissroll dataset.\nHowever, first we prepare a small function to plot any of the original data sets together with its embedding.",
"%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n%matplotlib inline\n\ndef plot(data, embedded_data, colors='m'):\n\tfig = plt.figure()\n\tfig.set_facecolor('white')\n\tax = fig.add_subplot(121,projection='3d')\n\tax.scatter(data[0],data[1],data[2],c=colors,cmap=plt.cm.Spectral)\n\tplt.axis('tight'); plt.axis('off')\n\tax = fig.add_subplot(122)\n\tax.scatter(embedded_data[0],embedded_data[1],c=colors,cmap=plt.cm.Spectral)\n\tplt.axis('tight'); plt.axis('off')\n\tplt.show()\n\nimport shogun as sg\n\n# wrap data into Shogun features\ndata, colors = generate_data('swissroll')\nfeats = sg.create_features(data)\n\n# create instance of Isomap converter and set number of neighbours used in kNN search to 20\nisomap = sg.create_transformer('Isomap', target_dim=2, k=20)\n\n# create instance of Multidimensional Scaling converter and configure it\nmds = sg.create_transformer('MultidimensionalScaling', target_dim=2)\n\n# embed Swiss roll data\nembedded_data_mds = mds.transform(feats).get('feature_matrix')\nembedded_data_isomap = isomap.transform(feats).get('feature_matrix')\n\nplot(data, embedded_data_mds, colors)\nplot(data, embedded_data_isomap, colors)",
"As it can be seen from the figure above, Isomap has been able to \"unroll\" the data, reducing its dimension from three to two. At the same time, points with similar colours in the input space are close to points with similar colours in the output space. This is, a new representation of the data has been obtained; this new representation maintains the properties of the original data, while it reduces the amount of information required to represent it. Note that the fact the embedding of the Swiss roll looks good in two dimensions stems from the intrinsic dimension of the input data. Although the original data is in a three-dimensional space, its intrinsic dimension is lower, since the only degree of freedom are the polar angle and distance from the centre, or height. \nFinally, we use yet another method, Stochastic Proximity Embedding (SPE) to embed the helix:",
"# wrap data into Shogun features\ndata, colors = generate_data('helix')\nfeatures = sg.create_features(data)\n\n# create MDS instance\nconverter = sg.create_transformer('StochasticProximityEmbedding', target_dim=2)\n\n# embed helix data\nembedded_features = converter.transform(features)\nembedded_data = embedded_features.get('feature_matrix')\n\nplot(data, embedded_data, colors)",
"References\n\nLisitsyn, S., Widmer, C., Iglesias Garcia, F. J. Tapkee: An Efficient Dimension Reduction Library. (Link to paper in JMLR.)\nTenenbaum, J. B., de Silva, V. and Langford, J. B. A Global Geometric Framework for Nonlinear Dimensionality Reduction. (Link to Isomap's website.)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Luke035/dlnd-lessons
|
into-to-tflearn/TFLearn_Sentiment_Analysis_Solution.ipynb
|
mit
|
[
"Sentiment analysis with TFLearn\nIn this notebook, we'll continue Andrew Trask's work by building a network for sentiment analysis on the movie review data. Instead of a network written with Numpy, we'll be using TFLearn, a high-level library built on top of TensorFlow. TFLearn makes it simpler to build networks just by defining the layers. It takes care of most of the details for you.\nWe'll start off by importing all the modules we'll need, then load and prepare the data.",
"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport tflearn\nfrom tflearn.data_utils import to_categorical",
"Preparing the data\nFollowing along with Andrew, our goal here is to convert our reviews into word vectors. The word vectors will have elements representing words in the total vocabulary. If the second position represents the word 'the', for each review we'll count up the number of times 'the' appears in the text and set the second position to that count. I'll show you examples as we build the input data from the reviews data. Check out Andrew's notebook and video for more about this.\nRead the data\nUse the pandas library to read the reviews and postive/negative labels from comma-separated files. The data we're using has already been preprocessed a bit and we know it uses only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way.",
"reviews = pd.read_csv('reviews.txt', header=None)\nlabels = pd.read_csv('labels.txt', header=None)",
"Counting word frequency\nTo start off we'll need to count how often each word appears in the data. We'll use this count to create a vocabulary we'll use to encode the review data. This resulting count is known as a bag of words. We'll use it to select our vocabulary and build the word vectors. You should have seen how to do this in Andrew's lesson. Try to implement it here using the Counter class.\n\nExercise: Create the bag of words from the reviews data and assign it to total_counts. The reviews are stores in the reviews Pandas DataFrame. If you want the reviews as a Numpy array, use reviews.values. You can iterate through the rows in the DataFrame with for idx, row in reviews.iterrows(): (documentation). When you break up the reviews into words, use .split(' ') instead of .split() so your results match ours.",
"from collections import Counter\ntotal_counts = Counter()\nfor _, row in reviews.iterrows():\n total_counts.update(row[0].split(' '))\nprint(\"Total words in data set: \", len(total_counts))",
"Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.",
"vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]\nprint(vocab[:60])",
"What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.",
"print(vocab[-1], ': ', total_counts[vocab[-1]])",
"The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.\nNote: When you run, you may see a different word from the one shown above, but it will also have the value 30. That's because there are many words tied for that number of counts, and the Counter class does not guarantee which one will be returned in the case of a tie.\nNow for each review in the data, we'll make a word vector. First we need to make a mapping of word to index, pretty easy to do with a dictionary comprehension.\n\nExercise: Create a dictionary called word2idx that maps each word in the vocabulary to an index. The first word in vocab has index 0, the second word has index 1, and so on.",
"word2idx = {word: i for i, word in enumerate(vocab)}",
"Text to vector function\nNow we can write a function that converts a some text to a word vector. The function will take a string of words as input and return a vector with the words counted up. Here's the general algorithm to do this:\n\nInitialize the word vector with np.zeros, it should be the length of the vocabulary.\nSplit the input string of text into a list of words with .split(' '). Again, if you call .split() instead, you'll get slightly different results than what we show here.\nFor each word in that list, increment the element in the index associated with that word, which you get from word2idx.\n\nNote: Since all words aren't in the vocab dictionary, you'll get a key error if you run into one of those words. You can use the .get method of the word2idx dictionary to specify a default returned value when you make a key error. For example, word2idx.get(word, None) returns None if word doesn't exist in the dictionary.",
"def text_to_vector(text):\n word_vector = np.zeros(len(vocab), dtype=np.int_)\n for word in text.split(' '):\n idx = word2idx.get(word, None)\n if idx is None:\n continue\n else:\n word_vector[idx] += 1\n return np.array(word_vector)",
"If you do this right, the following code should return\n```\ntext_to_vector('The tea is for a party to celebrate '\n 'the movie so she has no time for a cake')[:65]\narray([0, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])\n```",
"text_to_vector('The tea is for a party to celebrate '\n 'the movie so she has no time for a cake')[:65]",
"Now, run through our entire review data set and convert each review to a word vector.",
"word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)\nfor ii, (_, text) in enumerate(reviews.iterrows()):\n word_vectors[ii] = text_to_vector(text[0])\n\n# Printing out the first 5 word vectors\nword_vectors[:5, :23]",
"Train, Validation, Test sets\nNow that we have the word_vectors, we're ready to split our data into train, validation, and test sets. Remember that we train on the train data, use the validation data to set the hyperparameters, and at the very end measure the network performance on the test data. Here we're using the function to_categorical from TFLearn to reshape the target data so that we'll have two output units and can classify with a softmax activation function. We actually won't be creating the validation set here, TFLearn will do that for us later.",
"Y = (labels=='positive').astype(np.int_)\nrecords = len(labels)\n\nshuffle = np.arange(records)\nnp.random.shuffle(shuffle)\ntest_fraction = 0.9\n\ntrain_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]\ntrainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)\ntestX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)\n\ntrainY",
"Building the network\nTFLearn lets you build the network by defining the layers. \nInput layer\nFor the input layer, you just need to tell it how many units you have. For example, \nnet = tflearn.input_data([None, 100])\nwould create a network with 100 input units. The first element in the list, None in this case, sets the batch size. Setting it to None here leaves it at the default batch size.\nThe number of inputs to your network needs to match the size of your data. For this example, we're using 10000 element long vectors to encode our input data, so we need 10000 input units.\nAdding layers\nTo add new hidden layers, you use \nnet = tflearn.fully_connected(net, n_units, activation='ReLU')\nThis adds a fully connected layer where every unit in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call. It's telling the network to use the output of the previous layer as the input to this layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling net = tflearn.fully_connected(net, n_units).\nOutput layer\nThe last layer you add is used as the output layer. There for, you need to set the number of units to match the target data. In this case we are predicting two classes, positive or negative sentiment. You also need to set the activation function so it's appropriate for your model. Again, we're trying to predict if some input data belongs to one of two classes, so we should use softmax.\nnet = tflearn.fully_connected(net, 2, activation='softmax')\nTraining\nTo set how you train the network, use \nnet = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')\nAgain, this is passing in the network you've been building. The keywords: \n\noptimizer sets the training method, here stochastic gradient descent\nlearning_rate is the learning rate\nloss determines how the network error is calculated. In this example, with the categorical cross-entropy.\n\nFinally you put all this together to create the model with tflearn.DNN(net). So it ends up looking something like \nnet = tflearn.input_data([None, 10]) # Input\nnet = tflearn.fully_connected(net, 5, activation='ReLU') # Hidden\nnet = tflearn.fully_connected(net, 2, activation='softmax') # Output\nnet = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')\nmodel = tflearn.DNN(net)\n\nExercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc.",
"# Network building\ndef build_model():\n # This resets all parameters and variables, leave this here\n tf.reset_default_graph()\n \n # Inputs\n net = tflearn.input_data([None, 10000])\n\n # Hidden layer(s)\n net = tflearn.fully_connected(net, 200, activation='ReLU')\n net = tflearn.fully_connected(net, 25, activation='ReLU')\n\n # Output layer\n net = tflearn.fully_connected(net, 2, activation='softmax')\n net = tflearn.regression(net, optimizer='sgd', \n learning_rate=0.1, \n loss='categorical_crossentropy')\n \n model = tflearn.DNN(net)\n return model",
"Intializing the model\nNext we need to call the build_model() function to actually build the model. In my solution I haven't included any arguments to the function, but you can add arguments so you can change parameters in the model if you want.\n\nNote: You might get a bunch of warnings here. TFLearn uses a lot of deprecated code in TensorFlow. Hopefully it gets updated to the new TensorFlow version soon.",
"model = build_model()",
"Training the network\nNow that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively. Below is the code to fit our the network to our word vectors.\nYou can rerun model.fit to train the network further if you think you can increase the validation accuracy. Remember, all hyperparameter adjustments must be done using the validation set. Only use the test set after you're completely done training the network.",
"# Training\nmodel.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=100)",
"Testing\nAfter you're satisified with your hyperparameters, you can run the network on the test set to measure it's performance. Remember, only do this after finalizing the hyperparameters.",
"predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)\ntest_accuracy = np.mean(predictions == testY[:,0], axis=0)\nprint(\"Test accuracy: \", test_accuracy)",
"Try out your own text!",
"# Helper function that uses your model to predict sentiment\ndef test_sentence(sentence):\n positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]\n print('Sentence: {}'.format(sentence))\n print('P(positive) = {:.3f} :'.format(positive_prob), \n 'Positive' if positive_prob > 0.5 else 'Negative')\n\nsentence = \"Moonlight is by far the best movie of 2016.\"\ntest_sentence(sentence)\n\nsentence = \"It's amazing anyone could be talented enough to make something this spectacularly awful\"\ntest_sentence(sentence)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
PyLCARS/PythonUberHDL
|
myHDL_ComputerFundamentals/Memorys/FirstInFirstOutMemory.ipynb
|
bsd-3-clause
|
[
"\\title{First in First out (FIFO) memory in myHDL}\n\\author{Steven K Armour}\n\\maketitle\nThe FIFO memory, also called queue-ed (as in an English queue) memory is a common write-read scheme employed with sequential memory such as time measurements. The fundamental scheme is that the first data to be written into the memory storage(RAM, etc) is the first to be read out followed by the second data read and so on. \n<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#References\" data-toc-modified-id=\"References-1\"><span class=\"toc-item-num\">1 </span>References</a></span></li><li><span><a href=\"#Libarys-and-Helper-functions\" data-toc-modified-id=\"Libarys-and-Helper-functions-2\"><span class=\"toc-item-num\">2 </span>Libarys and Helper functions</a></span></li><li><span><a href=\"#Writer-Pointer\" data-toc-modified-id=\"Writer-Pointer-3\"><span class=\"toc-item-num\">3 </span>Writer Pointer</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-3.1\"><span class=\"toc-item-num\">3.1 </span>myHDL Testing</a></span></li><li><span><a href=\"#Verilog-Code\" data-toc-modified-id=\"Verilog-Code-3.2\"><span class=\"toc-item-num\">3.2 </span>Verilog Code</a></span></li><li><span><a href=\"#Verilog-Testbench\" data-toc-modified-id=\"Verilog-Testbench-3.3\"><span class=\"toc-item-num\">3.3 </span>Verilog Testbench</a></span></li></ul></li><li><span><a href=\"#Read-Pointer\" data-toc-modified-id=\"Read-Pointer-4\"><span class=\"toc-item-num\">4 </span>Read Pointer</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-testing\" data-toc-modified-id=\"myHDL-testing-4.1\"><span class=\"toc-item-num\">4.1 </span>myHDL testing</a></span></li><li><span><a href=\"#Verilog-Code\" data-toc-modified-id=\"Verilog-Code-4.2\"><span class=\"toc-item-num\">4.2 </span>Verilog Code</a></span></li><li><span><a href=\"#Verilog-Testbench\" data-toc-modified-id=\"Verilog-Testbench-4.3\"><span class=\"toc-item-num\">4.3 </span>Verilog Testbench</a></span></li></ul></li><li><span><a href=\"#Memory-Array\" data-toc-modified-id=\"Memory-Array-5\"><span class=\"toc-item-num\">5 </span>Memory Array</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-5.1\"><span class=\"toc-item-num\">5.1 </span>myHDL Testing</a></span></li><li><span><a href=\"#Verilog-Code\" data-toc-modified-id=\"Verilog-Code-5.2\"><span class=\"toc-item-num\">5.2 </span>Verilog Code</a></span></li><li><span><a href=\"#Verilog-Testbench\" data-toc-modified-id=\"Verilog-Testbench-5.3\"><span class=\"toc-item-num\">5.3 </span>Verilog Testbench</a></span><ul class=\"toc-item\"><li><span><a href=\"#Conversion-Issue-:\" data-toc-modified-id=\"Conversion-Issue-:-5.3.1\"><span class=\"toc-item-num\">5.3.1 </span>Conversion Issue :</a></span></li></ul></li></ul></li><li><span><a href=\"#Status-Signal\" data-toc-modified-id=\"Status-Signal-6\"><span class=\"toc-item-num\">6 </span>Status Signal</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-6.1\"><span class=\"toc-item-num\">6.1 </span>myHDL Testing</a></span></li><li><span><a href=\"#Verilog-Code\" data-toc-modified-id=\"Verilog-Code-6.2\"><span class=\"toc-item-num\">6.2 </span>Verilog Code</a></span></li><li><span><a href=\"#Verilog-Testbench\" data-toc-modified-id=\"Verilog-Testbench-6.3\"><span class=\"toc-item-num\">6.3 </span>Verilog Testbench</a></span></li></ul></li><li><span><a href=\"#FIFO\" data-toc-modified-id=\"FIFO-7\"><span class=\"toc-item-num\">7 </span>FIFO</a></span><ul class=\"toc-item\"><li><span><a href=\"#myHDL-Testing\" data-toc-modified-id=\"myHDL-Testing-7.1\"><span class=\"toc-item-num\">7.1 </span>myHDL Testing</a></span></li><li><span><a href=\"#Verilog-Code\" data-toc-modified-id=\"Verilog-Code-7.2\"><span class=\"toc-item-num\">7.2 </span>Verilog Code</a></span></li><li><span><a href=\"#Verilog-Testbench\" data-toc-modified-id=\"Verilog-Testbench-7.3\"><span class=\"toc-item-num\">7.3 </span>Verilog Testbench</a></span><ul class=\"toc-item\"><li><span><a href=\"#Conversion-Issue-:\" data-toc-modified-id=\"Conversion-Issue-:-7.3.1\"><span class=\"toc-item-num\">7.3.1 </span>Conversion Issue :</a></span></li></ul></li></ul></li></ul></div>\n\nReferences\n@misc{loi le_2017,\ntitle={Verilog code for FIFO memory},\nurl={http://www.fpga4student.com/2017/01/verilog-code-for-fifo-memory.html},\njournal={Fpga4student.com},\nauthor={Loi Le, Van},\nyear={2017}\n}\nLibarys and Helper functions",
"from myhdl import *\nfrom myhdlpeek import Peeker\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom sympy import *\ninit_printing()\n\nimport random\n\n#https://github.com/jrjohansson/version_information\n%load_ext version_information\n%version_information myhdl, myhdlpeek, numpy, pandas, matplotlib, sympy, random\n\n#helper functions to read in the .v and .vhd generated files into python\ndef VerilogTextReader(loc, printresult=True):\n with open(f'{loc}.v', 'r') as vText:\n VerilogText=vText.read()\n if printresult:\n print(f'***Verilog modual from {loc}.v***\\n\\n', VerilogText)\n return VerilogText\n\ndef VHDLTextReader(loc, printresult=True):\n with open(f'{loc}.vhd', 'r') as vText:\n VerilogText=vText.read()\n if printresult:\n print(f'***VHDL modual from {loc}.vhd***\\n\\n', VerilogText)\n return VerilogText",
"Writer Pointer\nIn order to use RAM memory, we must employ memory pointers which are values stored in the FIFO that tell the FIFO where the memory is stored. The write pointer (wptr) is an incremental counter that is increased for each data entry that is added to the memory. Thus the write_pointer is simply a counter with some extra controls\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{WriterPointer.png}}\n\\caption{\\label{fig:WP} Writer Pointer Functianl Digram }\n\\end{figure}",
"@block\ndef write_pointer(wr, fifo_full, wptr, fifo_we, clk, rst_n):\n \"\"\"\n Input:\n wr(bool):write signal\n fifo_full(bool): fifo full signal\n clk(bool): clock\n rst_n(bool): negtive reset signal\n \n Ouput:\n wptr(5bit): the write in memory pointer\n fifo_we(bool): the write enable indication signal\n \"\"\"\n \n fifo_we_i=Signal(bool(0))\n @always_comb\n def enableLogic():\n fifo_we_i.next= not fifo_full and wr\n \n #counter\n wptr_i=Signal(intbv(0)[5:0])\n @always(clk.posedge, rst_n.negedge)\n def pointerUpdate():\n if rst_n:\n wptr_i.next=0\n elif fifo_we_i:\n wptr_i.next=wptr_i+1\n else:\n wptr_i.next=wptr_i\n \n @always_comb\n def OuputBuffer():\n fifo_we.next=fifo_we_i\n wptr.next=wptr_i\n \n return instances()\n ",
"myHDL Testing",
"Peeker.clear()\nwr=Signal(bool(0)); Peeker(wr, 'wr')\nfifo_full=Signal(bool(0)); Peeker(fifo_full, 'fifo_full')\nwptr=Signal(intbv(0)[5:]); Peeker(wptr, 'wptr')\nfifo_we=Signal(bool(0)); Peeker(fifo_we, 'fifo_we')\nclk=Signal(bool(0)); Peeker(clk, 'clk')\nrst_n=Signal(bool(0)); Peeker(rst_n, 'rst_n')\n\nDUT=write_pointer(wr, fifo_full, wptr, fifo_we, clk, rst_n)\n\ndef write_pointerTB():\n \"\"\"\n myHDL only Testbench for `write_pointer module`\n \"\"\"\n \n @always(delay(1))\n def ClkGen():\n clk.next=not clk\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n wr.next=1\n elif i==10:\n wr.next=0\n elif i==12:\n wr.next=1\n elif i==14:\n fifo_full.next=1\n elif i==16:\n rst_n.next=1\n elif i==18:\n rst_n.next=0\n elif i==20:\n raise StopSimulation()\n \n i+=1\n yield clk.posedge\n \n \n return instances()\n \n \nsim=Simulation(DUT, write_pointerTB(), *Peeker.instances()).run()\n\nPeeker.to_wavedrom()\n\nwrite_pointerData=Peeker.to_dataframe()\nwrite_pointerData=write_pointerData[write_pointerData['clk']==1]\nwrite_pointerData.drop('clk', axis=1, inplace=True)\nwrite_pointerData.reset_index(drop=True, inplace=True)\nwrite_pointerData",
"Verilog Code",
"DUT.convert()\nVerilogTextReader('write_pointer');",
"\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{write_pointerRTL.png}}\n\\caption{\\label{fig:WPRTL} write_pointer RTL schematic; Xilinx Vivado 2017.4}\n\\end{figure}\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{write_pointerSyn.png}}\n\\caption{\\label{fig:WPSYN} write_pointer Synthesized schematic; Xilinx Vivado 2017.4}\n\\end{figure}\nVerilog Testbench",
"@block\ndef write_pointerTBV():\n \"\"\"\n myHDL->Verilog Testbench for `write_pointer module`\n \"\"\"\n wr=Signal(bool(0))\n fifo_full=Signal(bool(0))\n wptr=Signal(intbv(0)[5:])\n fifo_we=Signal(bool(0))\n clk=Signal(bool(0))\n rst_n=Signal(bool(0))\n \n @always_comb\n def print_data():\n print(wr, fifo_full, wptr, fifo_we, clk, rst_n)\n\n DUT=write_pointer(wr, fifo_full, wptr, fifo_we, clk, rst_n)\n\n\n \n @instance\n def clk_signal():\n while True:\n clk.next = not clk\n yield delay(1)\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n wr.next=1\n elif i==10:\n wr.next=0\n elif i==12:\n wr.next=1\n elif i==14:\n fifo_full.next=1\n elif i==16:\n rst_n.next=1\n elif i==18:\n rst_n.next=0\n elif i==20:\n raise StopSimulation()\n else:\n pass\n \n i+=1\n yield clk.posedge\n \n \n return instances()\n \n \nTB=write_pointerTBV()\nTB.convert(hdl=\"Verilog\", initial_values=True)\nVerilogTextReader('write_pointerTBV');",
"Read Pointer\nThe Read pointer serves the same function as the write_pointer but increments the read pointer that calls up sequentially the memory location to read from.\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{ReadPointer.png}}\n\\caption{\\label{fig:RP} Read Pointer Functianl Digram }\n\\end{figure}",
"@block\ndef read_pointer(rd, fifo_empty, rptr, fifo_rd, clk, rst_n):\n \"\"\"\n Input:\n rd(bool):write signal\n fifo_empty(bool): fifo empty signal\n clk(bool): clock\n rst_n(bool): negtive reset signal\n \n Ouput:\n rptr(5bit): the read out memory pointer\n fifo_rd(bool): the read enable indication signal\n \"\"\"\n \n fifo_rd_i=Signal(bool(0))\n @always_comb\n def enableLogic():\n fifo_rd_i.next=not fifo_empty and rd\n \n rptr_i=Signal(intbv(0)[5:0])\n @always(clk.posedge, rst_n.negedge)\n def pointerUpdate():\n if rst_n:\n rptr_i.next=0\n elif fifo_rd_i:\n rptr_i.next=rptr_i+1\n else:\n rptr_i.next=rptr_i\n \n \n @always_comb\n def output():\n fifo_rd.next=fifo_rd_i\n rptr.next=rptr_i\n \n return instances()",
"myHDL testing",
"Peeker.clear()\nrd=Signal(bool(0)); Peeker(rd, 'rd')\nfifo_empty=Signal(bool(0)); Peeker(fifo_empty, 'fifo_empty')\nrptr=Signal(intbv(0)[5:]); Peeker(rptr, 'rptr')\nfifo_rd=Signal(bool(0)); Peeker(fifo_rd, 'fifo_rd')\nclk=Signal(bool(0)); Peeker(clk, 'clk')\nrst_n=Signal(bool(0)); Peeker(rst_n, 'rst_n')\n\nDUT=read_pointer(rd, fifo_empty, rptr, fifo_rd, clk, rst_n)\n\ndef read_pointerTB():\n \"\"\"\n myHDL only Testbench for `read_pointer module`\n\n \"\"\"\n \n @always(delay(1))\n def ClkGen():\n clk.next=not clk\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n rd.next=1\n elif i==10:\n rd.next=0\n elif i==12:\n rd.next=1\n elif i==14:\n fifo_empty.next=1\n elif i==16:\n rst_n.next=1\n elif i==18:\n rst_n.next=0\n elif i==20:\n raise StopSimulation()\n \n i+=1\n yield clk.posedge\n \n \n return instances()\n \n \nsim=Simulation(DUT, read_pointerTB(), *Peeker.instances()).run()\n\nPeeker.to_wavedrom()\n\nread_pointerData=Peeker.to_dataframe()\nread_pointerData=read_pointerData[read_pointerData['clk']==1]\nread_pointerData.drop('clk', axis=1, inplace=True)\nread_pointerData.reset_index(drop=True, inplace=True)\nread_pointerData",
"Verilog Code",
"DUT.convert()\nVerilogTextReader('read_pointer');",
"\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{read_pointerRTL.png}}\n\\caption{\\label{fig:RPRTL} read_pointer RTL Schematic; Xilinx Vivado 2017.4}\n\\end{figure}\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{read_pointerSYN.png}}\n\\caption{\\label{fig:RPRTL} read_pointer Synthesized schematic; Xilinx Vivado 2017.4}\n\\end{figure}\nVerilog Testbench",
"@block\ndef read_pointerTBV():\n \"\"\"\n myHDL -> Verilog Testbench for `read_pointer` module\n\n \"\"\"\n rd=Signal(bool(0))\n fifo_empty=Signal(bool(0))\n rptr=Signal(intbv(0)[5:])\n fifo_rd=Signal(bool(0))\n clk=Signal(bool(0))\n rst_n=Signal(bool(0))\n \n @always_comb\n def print_data():\n print(rd, fifo_empty, rptr, fifo_rd, clk, rst_n)\n\n DUT=read_pointer(rd, fifo_empty, rptr, fifo_rd, clk, rst_n)\n\n\n \n @instance\n def clk_signal():\n while True:\n clk.next = not clk\n yield delay(1)\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n rd.next=1\n elif i==10:\n rd.next=0\n elif i==12:\n rd.next=1\n elif i==14:\n fifo_empty.next=1\n elif i==16:\n rst_n.next=1\n elif i==18:\n rst_n.next=0\n elif i==20:\n raise StopSimulation()\n else:\n pass\n \n i+=1\n yield clk.posedge\n \n \n return instances()\n \n \nTB=read_pointerTBV()\nTB.convert(hdl=\"Verilog\", initial_values=True)\nVerilogTextReader('read_pointerTBV');",
"Memory Array\nThe memory array is a simple RAM memory that uses the wptr to assign the data_in location in the RAM and the rptr to pull the memory to output to data_out\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{Memory_array.png}}\n\\caption{\\label{fig:WP} Memory Array Functional Diagram}\n\\end{figure}",
"@block\ndef memory_array(data_in, fifo_we, wptr, rptr, data_out, clk, clear):\n \"\"\"\n Input:\n data_in(8bit): data to be writen \n fifo_we(bool): write enable\n wptr(5bit): write memory address pointer\n rptr(5bit): read memory address pointer\n clk(bool): clock\n clear(bool): signal to clear clear memeory to 0 \n Ouput:\n data_out(8bit): data to be read out based on`rptr`\n \"\"\"\n data_out_i=[Signal(intbv(0)[8:]) for _ in range(16)]\n @always(clk.posedge)\n def uptake():\n if fifo_we:\n data_out_i[wptr[4:]].next=data_in\n \n @always_comb\n def output():\n data_out.next=data_out_i[rptr[4:]]\n \n @always(clear.negedge)\n def clearMem():\n for i in range(16):\n data_out_i[i].next=0\n \n \n \n return instances()",
"myHDL Testing",
"Peeker.clear()\ndata_in=Signal(intbv(0)[8:]); Peeker(data_in, 'data_in')\nfifo_we=Signal(bool(0)); Peeker(fifo_we, 'fifo_we') \nwptr=Signal(intbv(0)[5:]); Peeker(wptr, 'wptr')\nrptr=Signal(intbv(0)[5:]); Peeker(rptr, 'rptr')\ndata_out=Signal(intbv(0)[8:]); Peeker(data_out, 'data_out') \nclk=Signal(bool(0)); Peeker(clk, 'clk')\nclear=Signal(bool(0)); Peeker(clear, 'clear')\n\nTestData=np.random.randint(low=data_in.min, high=data_in.max, \n size=16)\nTestData=TestData.astype(int)\n\nDUT=memory_array(data_in, fifo_we, wptr, rptr, data_out, clk, clear)\n\n\ndef memory_arrayTB():\n \"\"\"\n myHDL only testbench for `memory_array` module\n \"\"\"\n @instance\n def clk_signal():\n while True:\n clk.next = not clk\n yield delay(1)\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n fifo_we.next=0\n elif i==2:\n fifo_we.next=1\n \n elif i==13:\n clear.next=1\n elif i==14:\n clear.next=0\n \n elif i==16:\n raise StopSimulation()\n data_in.next=int(TestData[wptr])\n wptr.next=wptr+1\n if i!=0:\n rptr.next=rptr+1\n i+=1\n yield clk.posedge\n \n return instances()\n\nsim=Simulation(DUT, memory_arrayTB(), *Peeker.instances()).run()\n\nPeeker.to_wavedrom()\n\nmemoryData=Peeker.to_dataframe()\nmemoryData=memoryData[memoryData['clk']==1]\nmemoryData.drop('clk', axis=1, inplace=True)\nmemoryData.reset_index(drop=True, inplace=True)\nmemoryData\n\nmemoryData.drop([0, 1], axis=0, inplace=True)\nmemoryData.drop(['fifo_we', 'rptr', 'wptr'], axis=1, inplace=True)\nmemoryData.reset_index(inplace=True, drop=True)\nmemoryData['data_out_shift-1']=np.array(memoryData.data_out.shift(-1)).astype(int)\nmemoryData.drop(12, axis=0, inplace=True)\nmemoryData\n\n(memoryData['data_in']==memoryData['data_out_shift-1']).all()",
"Verilog Code",
"DUT.convert()\nVerilogTextReader('memory_array');",
"\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{memory_arrayRTL.png}}\n\\caption{\\label{fig:MARTL} memory_array RTL Schematic; Xilinx Vivado 2017.4}\n\\end{figure}\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{memory_arraySYN.png}}\n\\caption{\\label{fig:MASYN} memory_array Synthesized schematic; Xilinx Vivado 2017.4}\n\\end{figure}\nVerilog Testbench\nConversion Issue :\nAt present I can not get the values stored in TestData numpy array to be transcribed to the output Verilog code memory_arrayTBV If someone can figure out how to, or make an improvement to the myHDL converter. The fix would be greatly appreciated by myself and the rest of the myHDL user base",
"@block\ndef memory_arrayTBV():\n \"\"\"\n myHDL -> verilog testbench for `memory_array` module\n \"\"\"\n data_in=Signal(intbv(0)[8:])\n fifo_we=Signal(bool(0))\n wptr=Signal(intbv(0)[5:])\n rptr=Signal(intbv(0)[5:])\n data_out=Signal(intbv(0)[8:])\n clk=Signal(bool(0))\n clear=Signal(bool(0))\n \n TestData_i=[Signal(intbv(int(i))[8:]) for i in TestData]\n\n\n @always_comb\n def print_data():\n print(data_in, fifo_we, wptr, rptr, data_out, clk, clear)\n \n\n DUT=memory_array(data_in, fifo_we, wptr, rptr, data_out, clk, clear)\n\n\n\n @instance\n def clk_signal():\n while True:\n clk.next = not clk\n yield delay(1)\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n fifo_we.next=0\n elif i==2:\n fifo_we.next=1\n \n elif i==13:\n clear.next=1\n elif i==14:\n clear.next=0\n \n elif i==16:\n raise StopSimulation()\n else:\n pass\n \n data_in.next=TestData_i[wptr]\n wptr.next=wptr+1\n if i!=0:\n rptr.next=rptr+1\n i+=1\n yield clk.posedge\n \n return instances()\n\nTB=memory_arrayTBV()\nTB.convert(hdl=\"Verilog\", initial_values=True)\nVerilogTextReader('memory_arrayTBV');",
"Status Signal\nThe status signal module is a internal check module that checks for impending overflow, overflow, and underflow of the FIFO memory\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{fifoStatus.png}}\n\\caption{\\label{fig:WP} fifoStatus Functional Diagram}\n\\end{figure}",
"@block\ndef fifoStatus(wr, rd, fifo_we, fifo_rd, wptr, rptr, \n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow, \n clk, rst_n):\n \"\"\"\n Input:\n wr(bool): write signal\n rd(bool): read signal \n fifo_we(bool): write enable signal\n fifo_rd(bool): read enable signal\n wptr(5bit): write pointer \n rptr(5bit): read pointer\n clk(bool): clock\n rst_n(bool): reset \n \n Ouput:\n fifo_full(bool): signal indicating the fifo memory is full\n fifo_empty(bool):signal indicating the fifo memory is empty\n fifo_threshold(bool): signal indicating that the fifo is about to overflow\n fifo_overflow(bool): signal indicating that the fifo rptr has overflowed\n fifo_underflow(bool): signal indicating that the fifo wptr has underflowed\n \"\"\"\n \n #interalStores\n fifo_full_i=Signal(bool(0))\n fifo_empty_i=Signal(bool(0))\n fifo_threshold_i=Signal(bool(0))\n fifo_overflow_i=Signal(bool(0))\n fifo_underflow_i=Signal(bool(0))\n \n \n #interal wires\n fbit_comp=Signal(bool(0))\n pointer_equal=Signal(bool(0))\n pointer_result=Signal(intbv(0)[5:].signed())\n overflow_set=Signal(bool(0))\n underflow_set=Signal(bool(0))\n \n \n \n \n @always_comb\n def logic1():\n fbit_comp.next=wptr[4]^rptr[4]\n \n if wptr[3:0]-rptr[3:0]:\n pointer_equal.next=0\n else:\n pointer_equal.next=1\n \n \n pointer_result.next=wptr[4:0]-rptr[4:0]\n \n overflow_set.next=fifo_full_i & wr\n underflow_set.next=fifo_empty_i & rd\n \n @always_comb\n def logic2():\n fifo_full_i.next=fbit_comp & pointer_equal\n fifo_empty_i.next=(not fbit_comp) & pointer_equal\n \n if pointer_result[4] or pointer_result[3]:\n fifo_threshold_i.next=1\n else:\n fifo_threshold_i.next=0\n \n \n \n @always(clk.posedge, rst_n.negedge)\n def overflowControl():\n if rst_n:\n fifo_overflow_i.next=0\n elif overflow_set==1 and fifo_rd==0:\n fifo_overflow_i.next=1\n elif fifo_rd:\n fifo_overflow_i.next=0\n else:\n fifo_overflow_i.next=fifo_overflow_i\n \n @always(clk.posedge, rst_n.negedge)\n def underflowControl():\n if rst_n:\n fifo_underflow_i.next=0\n elif underflow_set==1 and fifo_we==0:\n fifo_underflow_i.next=1\n elif fifo_we:\n fifo_underflow_i.next=0\n else:\n fifo_underflow_i.next=fifo_underflow_i\n \n @always_comb\n def outputBuffer():\n fifo_full.next=fifo_full_i\n fifo_empty.next=fifo_empty_i\n fifo_threshold.next=fifo_threshold_i\n fifo_overflow.next=fifo_overflow_i\n fifo_underflow.next=fifo_underflow_i\n \n \n \n \n return instances()\n\n",
"myHDL Testing",
"Peeker.clear()\nwr=Signal(bool(0)); Peeker(wr, 'wr')\nrd=Signal(bool(0)); Peeker(rd, 'rd')\n\nfifo_we=Signal(bool(0)); Peeker(fifo_we, 'fifo_we')\nfifo_rd=Signal(bool(0)); Peeker(fifo_rd, 'fifo_rd')\nwptr=Signal(intbv(0)[5:]); Peeker(wptr, 'wptr')\nrptr=Signal(intbv(0)[5:]); Peeker(rptr, 'rptr')\n\nfifo_full=Signal(bool(0)); Peeker(fifo_full, 'fifo_full')\nfifo_empty=Signal(bool(0)); Peeker(fifo_empty, 'fifo_empty')\nfifo_threshold=Signal(bool(0)); Peeker(fifo_threshold, 'fifo_threshold')\nfifo_overflow=Signal(bool(0)); Peeker(fifo_overflow, 'fifo_overflow')\nfifo_underflow=Signal(bool(0)); Peeker(fifo_underflow, 'fifo_underflow')\n\nclk=Signal(bool(0)); Peeker(clk, 'clk')\nrst_n=Signal(bool(0)); Peeker(rst_n, 'rst_n')\n\n\n\nDUT=fifoStatus(wr, rd, fifo_we, fifo_rd, wptr, rptr, \n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow, \n clk, rst_n)\n\n\ndef fifoStatusTB():\n \"\"\"\n myHDL only test bench for `fifoStatus` module\n Note:\n Not a complet testbench, could be better\n \"\"\"\n @always(delay(1))\n def ClkGen():\n clk.next=not clk\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n wr.next=1; rd.next=1\n fifo_we.next=0; fifo_rd.next=0\n \n elif i==2:\n wr.next=0; rd.next=0\n fifo_we.next=1; fifo_rd.next=1\n \n elif i==4:\n wr.next=1; rd.next=1\n fifo_we.next=1; fifo_rd.next=1\n \n if i>=6 and i<=20:\n wptr.next=wptr+1\n if i>=7 and i<=20:\n rptr.next=rptr+1\n \n if i==20:\n rst_n.next=1\n elif i==21:\n rst_n.next=0\n elif i==23:\n raise StopSimulation()\n \n i+=1\n yield clk.posedge\n \n return instances()\n\nsim=Simulation(DUT, fifoStatusTB(), *Peeker.instances()).run()\n\nPeeker.to_wavedrom()\n\nPeeker.to_dataframe()",
"Verilog Code",
"DUT.convert()\nVerilogTextReader('fifoStatus');",
"\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{fifoStatusRTL.png}}\n\\caption{\\label{fig:FIFOStatusRTL} fifoStatus RTL Schematic; Xilinx Vivado 2017.4}\n\\end{figure}\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{fifoStatusSYN.png}}\n\\caption{\\label{fig:MASYN} fifoStatus Synthesized Schematic; Xilinx Vivado 2017.4}\n\\end{figure}\nVerilog Testbench",
"@block\ndef fifoStatusTBV():\n \"\"\"\n myHDL -> verilog test bench for `fifoStatus` module\n Note:\n Not a complet testbench, could be better\n \"\"\"\n \n wr=Signal(bool(0)); Peeker(wr, 'wr')\n rd=Signal(bool(0)); Peeker(rd, 'rd')\n\n fifo_we=Signal(bool(0)); Peeker(fifo_we, 'fifo_we')\n fifo_rd=Signal(bool(0)); Peeker(fifo_rd, 'fifo_rd')\n wptr=Signal(intbv(0)[5:]); Peeker(wptr, 'wptr')\n rptr=Signal(intbv(0)[5:]); Peeker(rptr, 'rptr')\n\n fifo_full=Signal(bool(0)); Peeker(fifo_full, 'fifo_full')\n fifo_empty=Signal(bool(0)); Peeker(fifo_empty, 'fifo_empty')\n fifo_threshold=Signal(bool(0)); Peeker(fifo_threshold, 'fifo_threshold')\n fifo_overflow=Signal(bool(0)); Peeker(fifo_overflow, 'fifo_overflow')\n fifo_underflow=Signal(bool(0)); Peeker(fifo_underflow, 'fifo_underflow')\n\n clk=Signal(bool(0)); Peeker(clk, 'clk')\n rst_n=Signal(bool(0)); Peeker(rst_n, 'rst_n')\n \n @always_comb\n def print_data():\n print(wr, rd, \n fifo_we, fifo_rd, wptr, rptr,\n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow)\n\n\n\n DUT=fifoStatus(wr, rd, fifo_we, fifo_rd, wptr, rptr, \n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow, \n clk, rst_n)\n\n\n\n @instance\n def clk_signal():\n while True:\n clk.next = not clk\n yield delay(1)\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n wr.next=1; rd.next=1\n fifo_we.next=0; fifo_rd.next=0\n \n elif i==2:\n wr.next=0; rd.next=0\n fifo_we.next=1; fifo_rd.next=1\n \n elif i==4:\n wr.next=1; rd.next=1\n fifo_we.next=1; fifo_rd.next=1\n else:\n pass\n \n if i>=6 and i<=20:\n wptr.next=wptr+1\n if i>=7 and i<=20:\n rptr.next=rptr+1\n \n if i==20:\n rst_n.next=1\n elif i==21:\n rst_n.next=0\n elif i==23:\n raise StopSimulation()\n else:\n pass\n \n i+=1\n yield clk.posedge\n \n return instances()\n\nTB=fifoStatusTBV()\nTB.convert(hdl=\"Verilog\", initial_values=True)\nVerilogTextReader('fifoStatusTBV');",
"FIFO\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{FIFO.png}}\n\\caption{\\label{fig:WP} FIFO Functional Diagram}\n\\end{figure}",
"@block\ndef fifo_mem(wr, rd, data_in, \n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow,\n data_out, clk, rst_n, clear):\n \"\"\"\n Input:\n wr(bool):write signal\n rd(bool):write signal\n data_in(8bit): data to be writen\n clk(bool): clock\n rst_n(bool): negtive reset signal\n clear(bool): signal to clear clear memeory to 0 \n \n Output:\n fifo_full(bool): signal indicating the fifo memory is full\n fifo_empty(bool):signal indicating the fifo memory is empty\n fifo_threshold(bool): signal indicating that the fifo is about to overflow\n fifo_overflow(bool): signal indicating that the fifo has overflowed\n fifo_underflow(bool): signal indicating that the fifo has underflowed\n data_out(8bit): data to be read out \n \"\"\"\n \n wptr=Signal(intbv(0)[5:]); rptr=Signal(intbv(0)[5:])\n fifo_we=Signal(bool(0)); fifo_rd=Signal(bool(0))\n \n WPointerAcum=write_pointer(wr, fifo_full, wptr, fifo_we, clk, rst_n)\n RPointerAcum=read_pointer(rd, fifo_empty, rptr, fifo_rd, clk, rst_n)\n InternalMem=memory_array(data_in, fifo_we, wptr, rptr, data_out, clk, clear)\n FIFOControl=fifoStatus(wr, rd, fifo_we, fifo_rd, wptr, rptr, \n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow, \n clk, rst_n)\n return instances()\n\n",
"myHDL Testing",
"Peeker.clear()\nwr=Signal(bool(0)); Peeker(wr, 'wr')\nrd=Signal(bool(0)); Peeker(rd, 'rd')\ndata_in=Signal(intbv(0)[8:]); Peeker(data_in, 'data_in')\n\n\n\nfifo_full=Signal(bool(0)); Peeker(fifo_full, 'fifo_full')\nfifo_empty=Signal(bool(0)); Peeker(fifo_empty, 'fifo_empty')\nfifo_threshold=Signal(bool(0)); Peeker(fifo_threshold, 'fifo_threshold')\nfifo_overflow=Signal(bool(0)); Peeker(fifo_overflow, 'fifo_overflow')\nfifo_underflow=Signal(bool(0)); Peeker(fifo_underflow, 'fifo_underflow')\n\ndata_out=Signal(intbv(0)[8:]); Peeker(data_out, 'data_out') \n\n\nclk=Signal(bool(0)); Peeker(clk, 'clk')\nrst_n=Signal(bool(0)); Peeker(rst_n, 'rst_n')\nclear=Signal(bool(0)); Peeker(clear, 'clear')\n\nDUT=fifo_mem(wr, rd, data_in, \n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow,\n data_out, clk, rst_n, clear)\n\n\n\ndef fifo_memTB():\n \"\"\"\n myHDL only test bench for `fifo_mem` module\n Note:\n Not a complet testbench, could be better\n \"\"\"\n @always(delay(1))\n def ClkGen():\n clk.next=not clk\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n wr.next=1; rd.next=1\n elif i==16:\n wr.next=0; rd.next=1\n elif i==32:\n wr.next=0; rd.next=1\n elif i==48:\n rst_n.next=1\n elif i==49:\n rst_n.next=0\n elif i==50:\n wr.next=1; rd.next=1\n \n \n \n if i<16:\n data_in.next=int(TestData[i])\n elif i>=16 and i<32:\n data_in.next=int(TestData[i-16])\n elif i>=32 and i<48:\n data_in.next=int(TestData[i-32])\n elif i==48 or i==49:\n pass\n else:\n data_in.next=int(TestData[i-51])\n\n if i==66:\n raise StopSimulation()\n \n \n i+=1\n yield clk.posedge\n \n \n \n return instances()\n\n\nsim=Simulation(DUT, fifo_memTB(), *Peeker.instances()).run()\n\n \n \n\n\nPeeker.to_wavedrom()\n\nfifoData=Peeker.to_dataframe(); fifoData\n\nfifoData=fifoData[fifoData['clk']==1]\nfifoData.drop('clk', axis=1, inplace=True)\nfifoData.reset_index(drop=True, inplace=True)\nfifoData\n\nfifoData.tail(20)",
"Verilog Code",
"DUT.convert()\nVerilogTextReader('fifo_mem');",
"\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{fifo_memRTL.png}}\n\\caption{\\label{fig:FIFORTL} fifo_mem RTL Schematic; Xilinx Vivado 2017.4}\n\\end{figure}\n\\begin{figure}\n\\centerline{\\includegraphics[width=10cm]{fifo_memSYN.png}}\n\\caption{\\label{fig:fifo_memSYN} fifo_mem Synthesized schematic; Xilinx Vivado 2017.4}\n\\end{figure}\nVerilog Testbench\nConversion Issue :\nAt present I can not get the values stored in TestData numpy array to be transcribed to the output Verilog code memory_arrayTBV If someone can figure out how to, or make an improvement to the myHDL converter. The fix would be greatly appreciated by myself and the rest of the myHDL user base",
"@block\ndef fifo_memTBV():\n \"\"\"\n myHDL ->Verilog test bench for `fifo_mem` module\n Note:\n Not a complet testbench, could be better\n \"\"\"\n \n wr=Signal(bool(0))\n rd=Signal(bool(0))\n data_in=Signal(intbv(0)[8:])\n TestData_i=[Signal(intbv(int(i))[8:]) for i in TestData]\n\n\n\n\n fifo_full=Signal(bool(0))\n fifo_empty=Signal(bool(0))\n fifo_threshold=Signal(bool(0))\n fifo_overflow=Signal(bool(0))\n fifo_underflow=Signal(bool(0))\n\n data_out=Signal(intbv(0)[8:])\n\n\n clk=Signal(bool(0))\n rst_n=Signal(bool(0))\n clear=Signal(bool(0))\n \n @always_comb\n def print_data():\n print(wr, rd, data_in, \n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow,\n data_out, clk, rst_n, clear)\n\n DUT=fifo_mem(wr, rd, data_in, \n fifo_full, fifo_empty, fifo_threshold, fifo_overflow, fifo_underflow,\n data_out, clk, rst_n, clear)\n\n\n @instance\n def clk_signal():\n while True:\n clk.next = not clk\n yield delay(1)\n \n @instance\n def stimules():\n i=0\n while True:\n if i==0:\n wr.next=1; rd.next=1\n elif i==16:\n wr.next=0; rd.next=1\n elif i==32:\n wr.next=0; rd.next=1\n elif i==48:\n rst_n.next=1\n elif i==49:\n rst_n.next=0\n elif i==50:\n wr.next=1; rd.next=1\n else:\n pass\n \n \n \n if i<16:\n data_in.next=int(TestData_i[i])\n elif i>=16 and i<32:\n data_in.next=int(TestData_i[i-16])\n elif i>=32 and i<48:\n data_in.next=int(TestData_i[i-32])\n elif i==48 or i==49:\n pass\n else:\n data_in.next=int(TestData_i[i-51])\n\n if i==66:\n raise StopSimulation()\n \n \n i+=1\n yield clk.posedge\n \n \n \n return instances()\n\n\n \nTB=fifo_memTBV()\nTB.convert(hdl=\"Verilog\", initial_values=True)\nVerilogTextReader('fifo_memTBV');"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
infotranecon/IEtools
|
Dynamic Equilibrium Example-Bitcoin.ipynb
|
mit
|
[
"These were run in Python 3 using the Anaconda distribution.\nThe dynamic equilibrium model is described in more detail here:\nhttp://informationtransfereconomics.blogspot.com/2017/01/dynamic-equilibrium-presentation.html\nHave fun,\nJason Smith",
"import numpy as np\nimport pylab as pl\nfrom IEtools import *\nimport xlrd\n%pylab inline",
"Example: bitcoin\nHere we read in the bitcoin exchange rate data and plot it. We take only the most recent data because the version of python IEtools used here could only look at two shocks.",
"filename='C:/econdata/btcUSDtimeseries_recent.xls'\n\n\nbook = xlrd.open_workbook(filename)\nsheet = book.sheet_by_index(0)\noutputList = []\noutputName = 'USD/btc'\nfor rowIndex in range(sheet.nrows):\n outputList.append([sheet.cell(rowIndex,0).value, sheet.cell(rowIndex,1).value])\ndataOutput = np.array(outputList)\noutput = {'name':outputName,'data':dataOutput}\n\nplotData = output['data']\nplotType = pl.plot\nplotScale = 1\nplotLabel = output['name']\npl.figure(figsize=(12,6))\nplotType(plotData[:,0],plotData[:,1]*plotScale)\npl.ylabel(plotLabel)\npl.show()",
"Dynamic equilibrium model\nThis applies the entropy minimization method of finding the dynamic equilibrium.",
"forecastDuration = 0.25\nforecastResolution = 100\nforecastTime = np.array(list(range(int(output['data'][-1,0]*forecastResolution),int((output['data'][-1,0]+forecastDuration)*forecastResolution),1)))/forecastResolution\n\ntemp = dynamic_equilibrium_optimize(output['data'],alphaRange=(-2.9,-2.4),binWidth=0.5,alphaDelta=0.001,method='brute')\ntransform = log_linear_transform(output['data'], temp)\n\nfitFunction = two_shock\n\ntransform[:,1] = np.exp(transform[:,1])\nguess = [1.0,-0.01,2017.35,1.0,-0.01,2017.6,0.0]\nresult = dynamic_eq_fit(fitFunction, transform, guess)\n\nfitData = result['fit']\npopt = result['params']\npcov = result['cov'] \n\n\nforecastFit = np.array(list(map(lambda x:np.exp(fitFunction(x,*popt)),forecastTime)))\n\nplotData[:,1] = np.exp(np.log(transform[:,1])+temp*(plotData[:,0]-plotData[0,0]))\nplotScale = 1\nplotLabel = output['name']\nplotData2 = np.exp(np.log(fitData)+temp*(plotData[:,0]-plotData[0,0]))\nplotData3 = np.exp(np.log(forecastFit)+temp*(forecastTime-plotData[0,0]))\n\npl.figure(figsize=(12,6))\nfor index in range(len(result['transitions'])):\n pl.axvline(x=result['transitions'][index],color='0.5',linewidth=0.5) \npl.plot(plotData[:,0],plotData[:,1],plotData[:,0],plotData2,forecastTime,plotData3)\npl.ylabel(plotLabel)\npl.show()\n\nprint('Dynamic equilibrium growth rate (entropy min) = ',np.round(100*temp,decimals=2))\nprint('Shock centers = ',np.round(result['transitions'],decimals=1))\nprint('Shock widths = ',np.round(result['shock_widths'],decimals=2))\nprint('Shock magnitudes = ',np.round(result['shock_mags'],decimals=2))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
turbomanage/training-data-analyst
|
courses/machine_learning/feateng/feateng.ipynb
|
apache-2.0
|
[
"<h1> Feature Engineering </h1>\n\nIn this notebook, you will learn how to incorporate feature engineering into your pipeline.\n<ul>\n<li> Working with feature columns </li>\n<li> Adding feature crosses in TensorFlow </li>\n<li> Reading data from BigQuery </li>\n<li> Creating datasets using Dataflow </li>\n<li> Using a wide-and-deep model </li>\n</ul>\n\nNote: You may ignore specific errors related to \"papermill\", \"google-cloud-storage\", and \"datalab\". You may also ignore warnings related to '/home/jupyter/.local/bin'. These components and issues do not impact your ability to complete the lab.",
"!pip install --user apache-beam[gcp]==2.16.0 \n!pip install --user httplib2==0.12.0 ",
"After doing a pip install, restart your kernel by selecting kernel from the menu and clicking Restart Kernel before proceeding further",
"import tensorflow as tf\nimport apache_beam as beam\nimport shutil\nprint(tf.__version__)",
"<h2> 1. Environment variables for project and bucket </h2>\n\n<li> Your project id is the *unique* string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: <b>Project ID:</b> cloud-training-demos </li>\n<li> Cloud training often involves saving and restoring model files. Therefore, we should <b>create a single-region bucket</b>. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available) </li>\n</ol>\n<b>Change the cell below</b> to reflect your Project ID and bucket name.",
"import os\nPROJECT = 'cloud-training-demos' # CHANGE THIS\nBUCKET = 'cloud-training-demos' # REPLACE WITH YOUR BUCKET NAME. Use a regional bucket in the region you selected.\nREGION = 'us-central1' # Choose an available region for Cloud AI Platform\n\n# for bash\nos.environ['PROJECT'] = PROJECT\nos.environ['BUCKET'] = BUCKET\nos.environ['REGION'] = REGION\nos.environ['TFVERSION'] = '1.15' \n\n## ensure we're using python3 env\nos.environ['CLOUDSDK_PYTHON'] = 'python3'\n\n%%bash\ngcloud config set project $PROJECT\ngcloud config set compute/region $REGION\n\n## ensure we predict locally with our current Python environment\ngcloud config set ml_engine/local_python `which python`",
"<h2> 2. Specifying query to pull the data </h2>\n\nLet's pull out a few extra columns from the timestamp.",
"def create_query(phase, EVERY_N):\n if EVERY_N == None:\n EVERY_N = 4 #use full dataset\n \n #select and pre-process fields\n base_query = \"\"\"\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n DAYOFWEEK(pickup_datetime) AS dayofweek,\n HOUR(pickup_datetime) AS hourofday,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers,\n CONCAT(STRING(pickup_datetime), STRING(pickup_longitude), STRING(pickup_latitude), STRING(dropoff_latitude), STRING(dropoff_longitude)) AS key\nFROM\n [nyc-tlc:yellow.trips]\nWHERE\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0\n \"\"\"\n \n #add subsampling criteria by modding with hashkey\n if phase == 'train': \n query = \"{} AND ABS(HASH(pickup_datetime)) % {} < 2\".format(base_query,EVERY_N)\n elif phase == 'valid': \n query = \"{} AND ABS(HASH(pickup_datetime)) % {} == 2\".format(base_query,EVERY_N)\n elif phase == 'test':\n query = \"{} AND ABS(HASH(pickup_datetime)) % {} == 3\".format(base_query,EVERY_N)\n return query\n \nprint(create_query('valid', 100)) #example query using 1% of data",
"Try the query above in https://bigquery.cloud.google.com/table/nyc-tlc:yellow.trips if you want to see what it does (ADD LIMIT 10 to the query!)\n<h2> 3. Preprocessing Dataflow job from BigQuery </h2>\n\nThis code reads from BigQuery and saves the data as-is on Google Cloud Storage. We can do additional preprocessing and cleanup inside Dataflow, but then we'll have to remember to repeat that prepreprocessing during inference. It is better to use tf.transform which will do this book-keeping for you, or to do preprocessing within your TensorFlow model. We will look at this in future notebooks. For now, we are simply moving data from BigQuery to CSV using Dataflow.\nWhile we could read from BQ directly from TensorFlow (See: https://www.tensorflow.org/api_docs/python/tf/contrib/cloud/BigQueryReader), it is quite convenient to export to CSV and do the training off CSV. Let's use Dataflow to do this at scale.\nBecause we are running this on the Cloud, you should go to the GCP Console (https://console.cloud.google.com/dataflow) to look at the status of the job. It will take several minutes for the preprocessing job to launch.",
"%%bash\nif gsutil ls | grep -q gs://${BUCKET}/taxifare/ch4/taxi_preproc/; then\n gsutil -m rm -rf gs://$BUCKET/taxifare/ch4/taxi_preproc/\nfi",
"First, let's define a function for preprocessing the data",
"import datetime\n\n####\n# Arguments:\n# -rowdict: Dictionary. The beam bigquery reader returns a PCollection in\n# which each row is represented as a python dictionary\n# Returns:\n# -rowstring: a comma separated string representation of the record with dayofweek\n# converted from int to string (e.g. 3 --> Tue)\n####\ndef to_csv(rowdict):\n days = ['null', 'Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']\n CSV_COLUMNS = 'fare_amount,dayofweek,hourofday,pickuplon,pickuplat,dropofflon,dropofflat,passengers,key'.split(',')\n rowdict['dayofweek'] = days[rowdict['dayofweek']]\n rowstring = ','.join([str(rowdict[k]) for k in CSV_COLUMNS])\n return rowstring\n\n\n####\n# Arguments:\n# -EVERY_N: Integer. Sample one out of every N rows from the full dataset.\n# Larger values will yield smaller sample\n# -RUNNER: 'DirectRunner' or 'DataflowRunner'. Specfy to run the pipeline\n# locally or on Google Cloud respectively. \n# Side-effects:\n# -Creates and executes dataflow pipeline. \n# See https://beam.apache.org/documentation/programming-guide/#creating-a-pipeline\n####\ndef preprocess(EVERY_N, RUNNER):\n job_name = 'preprocess-taxifeatures' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')\n print('Launching Dataflow job {} ... hang on'.format(job_name))\n OUTPUT_DIR = 'gs://{0}/taxifare/ch4/taxi_preproc/'.format(BUCKET)\n\n #dictionary of pipeline options\n options = {\n 'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),\n 'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),\n 'job_name': 'preprocess-taxifeatures' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S'),\n 'project': PROJECT,\n 'runner': RUNNER,\n 'num_workers' : 4,\n 'max_num_workers' : 5\n }\n #instantiate PipelineOptions object using options dictionary\n opts = beam.pipeline.PipelineOptions(flags=[], **options)\n #instantantiate Pipeline object using PipelineOptions\n with beam.Pipeline(options=opts) as p:\n for phase in ['train', 'valid']:\n query = create_query(phase, EVERY_N) \n outfile = os.path.join(OUTPUT_DIR, '{}.csv'.format(phase))\n (\n p | 'read_{}'.format(phase) >> beam.io.Read(beam.io.BigQuerySource(query=query))\n | 'tocsv_{}'.format(phase) >> beam.Map(to_csv)\n | 'write_{}'.format(phase) >> beam.io.Write(beam.io.WriteToText(outfile))\n )\n print(\"Done\")",
"Now, let's run pipeline locally. This takes upto <b>5 minutes</b>. You will see a message \"Done\" when it is done.",
"preprocess(50*10000, 'DirectRunner') \n\n%%bash\ngsutil ls gs://$BUCKET/taxifare/ch4/taxi_preproc/",
"4. Run Beam pipeline on Cloud Dataflow\nRun pipeline on cloud on a larger sample size.",
"%%bash\nif gsutil ls | grep -q gs://${BUCKET}/taxifare/ch4/taxi_preproc/; then\n gsutil -m rm -rf gs://$BUCKET/taxifare/ch4/taxi_preproc/\nfi",
"The following step will take <b>15-20 minutes.</b> Monitor job progress on the Cloud Console, in the Dataflow section",
"preprocess(50*100, 'DataflowRunner') \n",
"Once the job completes, observe the files created in Google Cloud Storage",
"%%bash\ngsutil ls -l gs://$BUCKET/taxifare/ch4/taxi_preproc/\n\n%%bash\n#print first 10 lines of first shard of train.csv\ngsutil cat \"gs://$BUCKET/taxifare/ch4/taxi_preproc/train.csv-00000-of-*\" | head",
"5. Develop model with new inputs\nDownload the first shard of the preprocessed data to enable local development.",
"%%bash\nif [ -d sample ]; then\n rm -rf sample\nfi\nmkdir sample\ngsutil cat \"gs://$BUCKET/taxifare/ch4/taxi_preproc/train.csv-00000-of-*\" > sample/train.csv\ngsutil cat \"gs://$BUCKET/taxifare/ch4/taxi_preproc/valid.csv-00000-of-*\" > sample/valid.csv",
"We have two new inputs in the INPUT_COLUMNS, three engineered features, and the estimator involves bucketization and feature crosses.",
"%%bash\ngrep -A 20 \"INPUT_COLUMNS =\" taxifare/trainer/model.py\n\n%%bash\ngrep -A 50 \"build_estimator\" taxifare/trainer/model.py\n\n%%bash\ngrep -A 15 \"add_engineered(\" taxifare/trainer/model.py",
"Try out the new model on the local sample (this takes <b>5 minutes</b>) to make sure it works fine.",
"%%bash\nrm -rf taxifare.tar.gz taxi_trained\nexport PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare\npython -m trainer.task \\\n --train_data_paths=${PWD}/sample/train.csv \\\n --eval_data_paths=${PWD}/sample/valid.csv \\\n --output_dir=${PWD}/taxi_trained \\\n --train_steps=10 \\\n --job-dir=/tmp\n\n%%bash\nls taxi_trained/export/exporter/",
"You can use saved_model_cli to look at the exported signature. Note that the model doesn't need any of the engineered features as inputs. It will compute latdiff, londiff, euclidean from the provided inputs, thanks to the add_engineered call in the serving_input_fn.",
"%%bash\nmodel_dir=$(ls ${PWD}/taxi_trained/export/exporter | tail -1)\nsaved_model_cli show --dir ${PWD}/taxi_trained/export/exporter/${model_dir} --all\n\n%%writefile /tmp/test.json\n{\"dayofweek\": \"Sun\", \"hourofday\": 17, \"pickuplon\": -73.885262, \"pickuplat\": 40.773008, \"dropofflon\": -73.987232, \"dropofflat\": 40.732403, \"passengers\": 2}\n\n%%bash\nmodel_dir=$(ls ${PWD}/taxi_trained/export/exporter)\ngcloud ai-platform local predict \\\n --model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \\\n --json-instances=/tmp/test.json",
"6. Train on cloud\nThis will take <b> 10-15 minutes </b> even though the prompt immediately returns after the job is submitted. Monitor job progress on the Cloud Console, in the AI Platform section and wait for the training job to complete.",
"%%bash\nOUTDIR=gs://${BUCKET}/taxifare/ch4/taxi_trained\nJOBNAME=lab4a_$(date -u +%y%m%d_%H%M%S)\necho $OUTDIR $REGION $JOBNAME\ngsutil -m rm -rf $OUTDIR\ngcloud ai-platform jobs submit training $JOBNAME \\\n --region=$REGION \\\n --module-name=trainer.task \\\n --package-path=${PWD}/taxifare/trainer \\\n --job-dir=$OUTDIR \\\n --staging-bucket=gs://$BUCKET \\\n --scale-tier=BASIC \\\n --runtime-version=$TFVERSION \\\n -- \\\n --train_data_paths=\"gs://$BUCKET/taxifare/ch4/taxi_preproc/train*\" \\\n --eval_data_paths=\"gs://${BUCKET}/taxifare/ch4/taxi_preproc/valid*\" \\\n --train_steps=5000 \\\n --output_dir=$OUTDIR",
"The RMSE is now 8.33249, an improvement over the 9.3 that we were getting ... of course, we won't know until we train/validate on a larger dataset. Still, this is promising. But before we do that, let's do hyper-parameter tuning.\n<b>Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.</b>",
"%%bash\ngsutil ls gs://${BUCKET}/taxifare/ch4/taxi_trained/export/exporter | tail -1\n\n%%bash\nmodel_dir=$(gsutil ls gs://${BUCKET}/taxifare/ch4/taxi_trained/export/exporter | tail -1)\nsaved_model_cli show --dir ${model_dir} --all\n\n%%bash\nmodel_dir=$(gsutil ls gs://${BUCKET}/taxifare/ch4/taxi_trained/export/exporter | tail -1)\ngcloud ai-platform local predict \\\n --model-dir=${model_dir} \\\n --json-instances=/tmp/test.json",
"Optional: deploy model to cloud",
"%%bash\nMODEL_NAME=\"feateng\"\nMODEL_VERSION=\"v1\"\nMODEL_LOCATION=$(gsutil ls gs://${BUCKET}/taxifare/ch4/taxi_trained/export/exporter | tail -1)\necho \"Run these commands one-by-one (the very first time, you'll create a model and then create a version)\"\n#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}\n#gcloud ai-platform delete ${MODEL_NAME}\ngcloud ai-platform models create ${MODEL_NAME} --regions $REGION\ngcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION\n\n%%bash\ngcloud ai-platform predict --model=feateng --version=v1 --json-instances=/tmp/test.json",
"Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
MaxPowerWasTaken/MaxPowerWasTaken.github.io
|
jupyter_notebooks/Pandas_View_vs_Copy.ipynb
|
gpl-3.0
|
[
"Pandas Data Munging: Avoiding that 'SettingWithCopyWarning'\nIf you use Python for data analysis, you probably use Pandas for Data Munging. And if you use Pandas, you've probably come across the warning below:\n```\nSettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n```\nThe Pandas documentation is great in general, but it's easy to read through the link above and still be confused. Or if you're like me, you'll read the documentation page, think \"Oh, I get it,\" and then get the same warning again.\nA Simple Reproducible Example of The Warning<sup>(tm)</sup>\nHere's where this issue pops up. Say you have some data:",
"import pandas as pd\ndf = pd.DataFrame({'Number' : [100,200,300,400,500], 'Letter' : ['a','b','c', 'd', 'e']})\ndf",
"...and you want to filter it on some criteria. Pandas makes that easy with Boolean Indexing",
"criteria = df['Number']>300\ncriteria\n\n#Keep only rows which correspond to 'Number'>300 ('True' in the 'criteria' vector above)\ndf[criteria]",
"This works great right? Unfortunately not, because once we:\n1. Use that filtering code to create a new Pandas DataFrame, and\n2. Assign a new column or change an existing column in that DataFrame\nlike so...",
"#Create a new DataFrame based on filtering criteria\ndf_2 = df[criteria]\n\n#Assign a new column and print output\ndf_2['new column'] = 'new value'\ndf_2",
"There's the warning.\nSo what should we have done differently? The warning suggests using \".loc[row_indexer, col_indexer]\". So let's try subsetting the DataFrame the same way as before, but this time using the df.loc[ ] method.\nRe-Creating Our New Dataframe Using .loc[]",
"df.loc[criteria, :]\n\n#Create New DataFrame Based on Filtering Criteria\ndf_2 = df.loc[criteria, :]\n\n#Add a New Column to the DataFrame\ndf_2.loc[:, 'new column'] = 'new value'\ndf_2",
"Two warnings this time!\nOK, So What's Going On?\nRecall that our \"criteria\" variable is a Pandas Series of Boolean True/False values, corresponding to whether a row of 'df' meets our Number>300 criteria.",
"criteria",
"The Pandas Docs say a \"common operation is the use of boolean vectors to filter the data\" as we've done here. But apparently a boolean vector is not the \"row_indexer\" the warning advises us to use with .loc[] for creating new dataframes. Instead, Pandas wants us to use .loc[] with a vector of row-numbers (technically, \"row labels\", which here are numbers).\nSolution\nWe can get to that \"row_indexer\" with one extra line of code. Building on what we had before. Instead of creating our new dataframe by filtering rows with a vector of True/False like below...",
"df_2 = df[criteria]",
"We first grab the indices of that filtered dataframe using .index...",
"criteria_row_indices = df[criteria].index\ncriteria_row_indices",
"And pass that list of indices to .loc[ ] to create our new dataframe",
"new_df = df.loc[criteria_row_indices, :]\nnew_df",
"Now we can add a new column without throwing The Warning <sup>(tm)</sup>",
"new_df['New Column'] = 'New Value'\nnew_df",
"Final Note - Did That Warning Even Mean Our Results Were Wrong?\nIn each of the instances above where we got a warning, you may have noticed that we also got the results we expected. Maybe the warning isn't such a big deal? It's not an error right? \nThe Pandas documentation page linked in the warning states that the results may be correct, but are not reliably correct, because of the unpredictable nature of when an underlying _getitem_ call returns a view vs a copy. After reading some StackOverflow discussions, at least one dev is confident that \"if you know what you are doing\", you can ignore these warnings (or suppress them) and rest assured your results are reliable. \nI'm sure that works for him, but even if I managed to convince myself when it's safe to ignore this warning, what happens in a year when I forget if some old code which throws the warning is reliable or not? Was this written before I figured it out? What happens when someone else is using my code, asks about the warning, and I say \"don't worry it's fine, but I forget why\" and wave my hands a lot.\nPlus, doesn't that warning just bother you? Either out of prudence or neuroticism, I'm not interested in peppering my logs with warnings from the Pandas devs, and I'm not cavalier enough to suppress the warning messages. \nTo me, the clean code solution requires using code that provides reliably correct results without these warnings."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tensorflow/docs-l10n
|
site/en-snapshot/model_optimization/guide/pruning/pruning_with_sparsity_2_by_4.ipynb
|
apache-2.0
|
[
"Copyright 2021 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"Sparse weights using structural pruning\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/model_optimization/guide/pruning/pruning_with_sparsity_2_by_4\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_sparsity_2_by_4.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_sparsity_2_by_4.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/model-optimization/tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_sparsity_2_by_4.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nStructural pruning weights from your model to make it sparse in specific pattern can accelerate model inference time with appropriate HW supports. \nThis tutorial shows you how to:\n* Define and train a model on the mnist dataset with a specific structural sparsity\n* Convert the pruned model to tflite format\n* Visualize structure of the pruned weights\nFor a general overview of the pruning technique for the model optimization, see the pruning overview. For tutorial on general weight pruning, see Pruning in Keras.\nStructural pruning of weights\nStructural pruning systematically zeroes out model weights at the beginning of the training process. You apply this pruning techniques to regular blocks of weights to speed up inference on supporting HWs, for example: grouping weights in the model by blocks of four and zeroing out two of those weights in each block, known as a 2 by 4 reduction. This technique applies only to the last dimension of the weight tensor for the model that is converted by TensorFlow Lite. For example, Conv2D layer weights in TensorFlow Lite have the structure [channel_out, height, width, channel_in] and Dense layer weights have the structure [channel_out, channel_in]. The sparsity pattern is applied to the weights in the last dimension: channel_in.\nCompare to the random sparsity, the structured sparsity generally has lower accuracy due to restrictive structure, however, it can reduce inference time significantly on the supported hardware.\nPruning can be applied to a model together with other model compression techniques for better compression rate. See quantization and clustering examples in collaborative optimization technique for more details.\nSetup\nPrepare your development environment and data.",
"! pip install -q tensorflow\n! pip install -q tensorflow-model-optimization\n! pip install -q matplotlib\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport tensorflow_model_optimization as tfmot\nprune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude",
"Download and normalize image data from the MNIST dataset",
"# Load MNIST dataset.\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 and 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0",
"Define structural pruning parameters\nDefine parameters for pruning and specify the type of structural pruning. Set the parameters for pruning to (2, 4).\nThese settings mean that in a block of four elements, at least two with the lowest magnitude are set to zero.\nYou don't have to set the pruning_schedule parameter. By default, the pruning mask is defined at the first step and it is not updated during the training.",
"pruning_params_2_by_4 = {\n 'sparsity_m_by_n': (2, 4),\n}",
"Define parameters for random pruning with the target sparsity of 50%.",
"pruning_params_sparsity_0_5 = {\n 'pruning_schedule': tfmot.sparsity.keras.ConstantSparsity(target_sparsity=0.5,\n begin_step=0,\n frequency=100)\n}",
"Define the model architecture and specify which layers to prune. Structural pruning is applied based on the layers of the model you select.\nIn the example below, we prune only some of the layers. We prune the second Conv2D layer and the first Dense layer.\nNotice that the first Conv2D layer cannot be pruned structurally. To be pruned structurally, it should have more than one input channels. Instead, we prune the first Conv2D layer with random pruning.",
"model = keras.Sequential([\n prune_low_magnitude(\n keras.layers.Conv2D(\n 32, 5, padding='same', activation='relu',\n input_shape=(28, 28, 1),\n name=\"pruning_sparsity_0_5\"),\n **pruning_params_sparsity_0_5),\n keras.layers.MaxPooling2D((2, 2), (2, 2), padding='same'),\n prune_low_magnitude(\n keras.layers.Conv2D(\n 64, 5, padding='same',\n name=\"structural_pruning\"),\n **pruning_params_2_by_4),\n keras.layers.BatchNormalization(),\n keras.layers.ReLU(),\n keras.layers.MaxPooling2D((2, 2), (2, 2), padding='same'),\n keras.layers.Flatten(),\n prune_low_magnitude(\n keras.layers.Dense(\n 1024, activation='relu',\n name=\"structural_pruning_dense\"),\n **pruning_params_2_by_4),\n keras.layers.Dropout(0.4),\n keras.layers.Dense(10)\n])\n\nmodel.compile(optimizer='adam',\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.summary()",
"Train and evaluate the model.",
"batch_size = 128\nepochs = 2\n\nmodel.fit(\n train_images,\n train_labels,\n batch_size=batch_size,\n epochs=epochs,\n verbose=0,\n callbacks=tfmot.sparsity.keras.UpdatePruningStep(),\n validation_split=0.1)\n\n_, pruned_model_accuracy = model.evaluate(test_images, test_labels, verbose=0)\nprint('Pruned test accuracy:', pruned_model_accuracy)",
"Remove the pruning wrapper so that it is not included in the model when you convert it to TensorFlow Lite format.",
"model = tfmot.sparsity.keras.strip_pruning(model)",
"Convert model to tflite format",
"import tempfile\n\nconverter = tf.lite.TFLiteConverter.from_keras_model(model)\ntflite_model = converter.convert()\n\n_, tflite_file = tempfile.mkstemp('.tflite')\nprint('Saved converted pruned model to:', tflite_file)\nwith open(tflite_file, 'wb') as f:\n f.write(tflite_model)",
"Visualize and check weights\nNow visualize the structure of weights in the Dense layer pruned with 2 by 4 sparsity. Extract the weights from the tflite file.",
"# Load tflite file with the created pruned model\ninterpreter = tf.lite.Interpreter(model_path=tflite_file)\ninterpreter.allocate_tensors()\n\ndetails = interpreter.get_tensor_details()\n\n# Weights of the dense layer that has been pruned.\ntensor_name = 'structural_pruning_dense/MatMul'\ndetail = [x for x in details if tensor_name in x[\"name\"]]\n\n# We need the first layer.\ntensor_data = interpreter.tensor(detail[0][\"index\"])()",
"To verify that we selected the correct layer that has been pruned, print the shape of the weight tensor.",
"print(f\"Shape of Dense layer is {tensor_data.shape}\")",
"Now we visualize the structure for a small subset of the weight tensor. The structure of the weight tensor is sparse in the last dimension, using the (2,4) pattern: two elements out of four are zeros. To make the visualization more clear, we replace all non-zero values with ones.",
"import matplotlib.pyplot as plt\nimport numpy as np\n\n# The value 24 is chosen for convenience.\nwidth = height = 24\n\nsubset_values_to_display = tensor_data[0:height, 0:width]\n\nval_ones = np.ones([height, width])\nval_zeros = np.zeros([height, width])\nsubset_values_to_display = np.where(abs(subset_values_to_display) > 0, val_ones, val_zeros)",
"Define the auxiliary function to draw separation lines to see the structure clearly.",
"def plot_separation_lines(height, width):\n\n block_size = [1, 4]\n\n # Add separation lines to the figure.\n num_hlines = int((height - 1) / block_size[0])\n num_vlines = int((width - 1) / block_size[1])\n line_y_pos = [y * block_size[0] for y in range(1, num_hlines + 1)]\n line_x_pos = [x * block_size[1] for x in range(1, num_vlines + 1)]\n\n for y_pos in line_y_pos:\n plt.plot([-0.5, width], [y_pos - 0.5 , y_pos - 0.5], color='w')\n\n for x_pos in line_x_pos:\n plt.plot([x_pos - 0.5, x_pos - 0.5], [-0.5, height], color='w')",
"Now visualize the subset of the weight tensor.",
"plot_separation_lines(height, width)\n\nplt.axis('off')\nplt.imshow(subset_values_to_display)\nplt.colorbar()\nplt.title(\"Structural pruning for Dense layer\")\nplt.show()",
"Visualize weights for the Conv2D layer. The structural sparsity is applied in the last channel, similar to the Dense layer. Only the second Conv2D layer is structurally pruned as pointed out above.",
"# Get weights of the convolutional layer that has been pruned with 2 by 4 sparsity.\ntensor_name = 'structural_pruning/Conv2D'\ndetail = [x for x in details if tensor_name in x[\"name\"]]\ntensor_data = interpreter.tensor(detail[1][\"index\"])()\nprint(f\"Shape of the weight tensor is {tensor_data.shape}\")",
"Similar to the weights of Dense layer, the last dimension of the kernel has a (2, 4) structure.",
"weights_to_display = tf.reshape(tensor_data, [tf.reduce_prod(tensor_data.shape[:-1]), -1])\nweights_to_display = weights_to_display[0:width, 0:height]\n\nval_ones = np.ones([height, width])\nval_zeros = np.zeros([height, width])\nsubset_values_to_display = np.where(abs(weights_to_display) > 1e-9, val_ones, val_zeros)\n\nplot_separation_lines(height, width)\n\nplt.axis('off')\nplt.imshow(subset_values_to_display)\nplt.colorbar()\nplt.title(\"Structurally pruned weights for Conv2D layer\")\nplt.show()",
"Let's see how those randomly pruned weights look. We extract them and display a subset of the weight tensor.",
"# Get weights of the convolutional layer that has been pruned with random pruning.\ntensor_name = 'pruning_sparsity_0_5/Conv2D'\ndetail = [x for x in details if tensor_name in x[\"name\"]]\ntensor_data = interpreter.tensor(detail[0][\"index\"])()\nprint(f\"Shape of the weight tensor is {tensor_data.shape}\")\n\nweights_to_display = tf.reshape(tensor_data, [tensor_data.shape[0],tf.reduce_prod(tensor_data.shape[1:])])\nweights_to_display = weights_to_display[0:width, 0:height]\n\nval_ones = np.ones([height, width])\nval_zeros = np.zeros([height, width])\nsubset_values_to_display = np.where(abs(weights_to_display) > 0, val_ones, val_zeros)\n\nplot_separation_lines(height, width)\n\nplt.axis('off')\nplt.imshow(subset_values_to_display)\nplt.colorbar()\nplt.title(\"Unstructed pruned weights for Conv2D layer\")\nplt.show()",
"The TensorFlow Model Optimization Toolkit includes a python script that can be used to check whether which layers in the model from the given tflite file have the structurally pruned weights: check_sparsity_m_by_n.py. The following command demonstrates how to use this tool to check for 2 by 4 sparsity in a specific model.",
"! python3 ./tensorflow_model_optimization/python/core/sparsity/keras/tools/check_sparsity_m_by_n.py --model_tflite=pruned_model.tflite --m_by_n=2,4\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
RaoUmer/lightning-example-notebooks
|
plots/line.ipynb
|
mit
|
[
"<img style='float: left' src=\"http://lightning-viz.github.io/images/logo.png\"> <br> <br> Line plots in <a href='http://lightning-viz.github.io/'><font color='#9175f0'>Lightning</font></a>\n<hr> Setup",
"from lightning import Lightning\n\nfrom numpy import random, asarray, arange\nfrom sklearn import datasets\nfrom scipy.ndimage.filters import gaussian_filter\nfrom seaborn import color_palette",
"Connect to server",
"lgn = Lightning(ipython=True, host='http://public.lightning-viz.org')",
"<hr> One random line with default styles\nTo experience Lightning's custom zoom behaviors, try zooming and panning with the alt or command keys held down.\n<br>\nAlt will only zoom/pan in x (especially useful for time series), and command for y.",
"y = gaussian_filter(random.rand(100), 3)\nlgn.line(y)",
"<hr> Setting line width and color\nFor a single line you can pass one size and color.",
"y = gaussian_filter(random.rand(100), 3)\nlgn.line(y, thickness=10, color=[255,100,100])",
"<hr> Multiple lines\nColors for multiple lines will automatically be assigned. Try hovering over a line to highlight it!",
"y = gaussian_filter(random.rand(5,100), [0, 3])\ny = (y.T + arange(0,5)*0.2).T\nlgn.line(y, thickness=6)",
"You can also set colors and thicknesses yourself, providing one per line. Here we do so using a palette from seaborn.",
"y = gaussian_filter(random.rand(5,100), [0, 3])\ny = (y.T + arange(0,5)*0.2).T\nc = map(lambda x: list(asarray(x)*255), color_palette('Blues', 5))\ns = [8, 10, 12, 14, 16]\nlgn.line(y, thickness=s, color=c)",
"<hr> Staggered lines and indices\nIt's possible to show multiple lines of unequal length. \n<br>\nHere we also demonstrate passing an index to set the xaxis (we assume the index corresponds to the longest of the lines).",
"y1 = gaussian_filter(random.rand(50), 5).tolist()\ny2 = gaussian_filter(random.rand(75), 5).tolist()\ny3 = gaussian_filter(random.rand(100), 5).tolist()\nx = range(50,150)\nlgn.line([y1,y2,y3], thickness=6, index=x)",
"<hr> Clustered series with group labels\nInstead of specifying colors directly as rgb, you can specify group assignments.\n<br>\nHere we use scikitlearn to generate clusters and then color according to cluster label.",
"d, g = datasets.make_blobs(n_features=5, n_samples=20, centers=5, cluster_std=1.0, random_state=100)\nlgn.line(d, group=g)",
"<hr> Axis labels\nYou can also label the axes.",
"y = gaussian_filter(random.rand(100), 3)\nlgn.line(y, thickness=10, xaxis='variable #1', yaxis='variable #2')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tiagoft/curso_audio
|
estatisticas_de_timbre.ipynb
|
mit
|
[
"Estatísticas de Timbre\nObjetivo\nAo fim desta unidade, o aluno será capaz de utilizar estatísticas de descritores de timbre como um indicador do conteúdo de um arquivo de áudio\nPré-requisitos\nAntes de iniciar esta unidade, o aluno deve sentir-se confortável com a seguinte afirmação:\nÉ possível calcular características objetivas de um espectrograma de forma a caracterizar aspectos de seu conteúdo acústico.\nVetores\nUm vetor é um conjunto ordenado de medidas escalares. Por exemplo, podemos indicar um ponto na superfície terrestre através de um par ordenado $(a, b)$, onde $a$ é a longitude e $b$ é a latitude de um ponto. O mesmo vale para o plano Euclidiano: nele, qualquer ponto pode ser definido por um par $(x, y)$.\nUm vetor é, usualmente, denotado por uma letra minúscula em negrito, tal qual $\\boldsymbol x$. O $n$-ésmio elemento de um vetor é denotado $x_n$ (sem negrito). Para vetores, valem as seguintes propriedades:\nSoma de vetores\nA soma de dois vetores é um vetor composto pela soma de seus elementos correspondentes. Assim, se $\\boldsymbol z = \\boldsymbol y + \\boldsymbol x$, então $z_n = y_n + x_n$.\nMultiplicação por escalar\nA multiplicação de um vetor por um número escalar resulta num vetor no qual cada um dos elementos do vetor original foi multiplicado por esse número escalar. Assim, se $\\boldsymbol y = a \\boldsymbol x$, então $y_n = a x_n$.\nMódulo\nO módulo de um vetor é igual ao comprimento da reta que o liga à origem. Para vetores de dimensão arbitrária $N$, a norma $||\\boldsymbol x ||$ é dada por:\n$$|| \\boldsymbol x || = \\sqrt{\\sum_{n=1}^N x_n^2}.$$\nDistância Euclidiana\nA distância Euclidiana entre dois vetores é igual ao módulo de sua diferença, isto é:\n$$d(\\boldsymbol x, \\boldsymbol y) = || \\boldsymbol y - \\boldsymbol x || = || \\boldsymbol x - \\boldsymbol y || = \\sqrt{\\sum_{n=1}^N (x_n-y_n)^2}.$$",
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Demonstrando propriedades de vetores\n# Ideia: coloque mais dimensoes nos vetores e veja o que acontece!\nx = np.array([4, 3])\ny = np.array([3, 4])\n\nprint x\nprint y\nprint x + y # Soma de vetores\nprint 10 * x # Multiplicacao por escalar\nprint np.linalg.norm(y), np.linalg.norm(x) # Modulo de vetores\nprint np.linalg.norm(y-x), np.linalg.norm(x-y) # Distancia euclidiana",
"Estatística e vetores\nPodemos utilizar vetores para registrar uma série de medições de um mesmo fenômeno. Um sinal digitalizado, por exemplo, pode ser considerado um vetor. Assim, é possível tomar um certo conjunto de amostras de dois sinais e calcular sua distância Euclidiana, por exemplo. Na notação que aplicamos para sinais, teríamos:\n$$d(\\boldsymbol x, \\boldsymbol y) = \\sqrt{\\sum_{n=0}^{N-1} (x[n]-y[n])^2}.$$\nAssim, verificamos que vetores e sinais podem ser representados da mesma maneira, e, portanto, podemos aplicar propriedades de sinais em vetores e propriedades de vetores em sinais.\nEm vetores, tanto quanto em sinais ou em um conjunto qualquer de números, podemos calcular medidas estatísticas. Elas descrevem como um sinal se comporta ao longo de várias medições, sob um ponto de vista estatístico.\nMédia e Variância\nA média de um conjunto de amostras é dada por:\n$$\\mu_x = \\frac{\\sum_{n=0}^{N-1} x[n]}{N}.$$\nA média é o valor $a$ que minimiza a soma dos quadrados das distâncias $(a-x[n])^2$ ao longo de todas as amostras, isto é:\n$$\\mu_x = \\arg \\min_a \\sum_{n=0}^{N-1} (a-x[n])^2.$$\nAinda assim, sempre que houver qualquer tipo de variação em um sinal $x[n]$, é impossível escolher um valor que leva a soma de todas as distâncias a zero. A medida que indica qual é a dispersão residual é a variância, dada por:\n$$\\sigma^2_x = \\sum_{n=0} ^{N-1} (\\mu_x - x[n])^2.$$\nA media $\\sigma_x = \\sqrt{\\sigma_x^2}$ é chamada de desvio padrão.",
"# Demonstrando propriedades de vetores\n# Ideia: coloque mais dimensoes nos vetores e veja o que acontece!\nx = np.array([-40, -30, 30, 40])\ny = np.array([3, 4, -3, -4])\nz = np.array([0, 1, -6, -7])\n\nprint np.mean(x), np.mean(y), np.mean(z) # Medias\nprint np.var(x), np.var(y), np.var(z) # Variancias",
"Normalizando vetores\nUma operação matemática interessante que podemos fazer é a de normalizar vetores. Esta operação resulta em um vetor com média zero e variância unitária. O vetor $\\boldsymbol x_{\\mbox{normalizado}}$ pode ser calculado por:\n$$\\boldsymbol x_{\\mbox{normalizado}} = \\frac{\\boldsymbol x-\\mu_x}{\\sigma_x}.$$",
"x = np.array([4, 3, 2, 4, 3.2, 1, 2, 90, 1, 2, 3, 4])\nx_norm = (x - np.mean(x))/np.sqrt(np.var(x))\nprint np.mean(x), np.mean(x_norm)\nprint np.var(x), np.var(x_norm)",
"Análise estatística de descritores de áudio\nNesta análise, vamos verificar como as estatísticas de descritores de áudio podem ser utilizadas para identificar, de forma objetiva, o conteúdo de descritores.\nNo código que segue, calcularemos o flatness espectral ao longo de quadros de curta duração de três trechos de áudio, contendo, respectivamente uma percussão, uma guitarra e um coral. Após, mostraremos os valores calculados em um histograma.",
"import mir3.modules.tool.wav2spectrogram as spectrogram\nimport mir3.modules.features.flatness as flatness\n\nfnames = ['audio/tabla.wav', 'audio/bbking.wav', 'audio/chorus.wav']\nflat_samples = []\nfor fname in fnames:\n wav2spec = spectrogram.Wav2Spectrogram() # Objeto que converte arquivos wav para espectrogramas\n s = wav2spec.convert(open(fname, 'rb'), window_length=1024, window_step=512, spectrum_type='magnitude')\n\n fness = flatness.Flatness()\n f = fness.calc_track(s)\n flat_samples.append(f.data)\n \nplt.figure();\nplt.hist(flat_samples, 15, normed=1, histtype='bar',\n color=['red', 'blue', 'green'],\n label=['Tabla', 'Guitarra', 'Coral']);\nplt.xlabel('Flatness');\nplt.ylabel('Quantidade de quadros');\nplt.legend(loc=1);",
"Podemos, neste momento, detectar alguns comportamentos interessantes:\n* O flatness do coral permanece o tempo todo abaixo de 0.1\n* Embora o flatness da guitarra seja, em geral, baixo, também pode subir para valores mais altos\n* O flatness da tabla tem uma dispersão muito grande entre 0 e 0.45.\nVeja que essas observações indicam que a média (a tendência geral) e a variância (dispersão) podem ser boas formas de descrever esses histogramas em uma forma mais compacta",
"m = []\ns =[]\n\nfor a in xrange(3):\n m.append(np.mean(flat_samples[a]))\n s.append(np.var(flat_samples[a]))\n \n print fnames[a], np.mean(flat_samples[a]), np.var(flat_samples[a])",
"É importante perceber que este procedimento permite relacionar arquivos de áudio a um vetor, de tantas dimensões quanto se queira (poderíamos expandir este procedimento para incluir outros descritores, por exemplo).\nEsses vetores podem ser mostrados numa figura:",
"color=['red', 'blue', 'green']\nlabel=['Tabla', 'Guitarra', 'Coral']\nplt.figure();\nfor a in xrange(len(m)):\n plt.scatter(m[a], s[a], color=color[a], label=label[a])\nplt.xlabel('Media de flatness')\nplt.ylabel('Variancia de flatness')\nplt.legend(label, loc=4);\nplt.show()",
"Generalização\nAté o momento, mostramos como relacionar arquivos de áudio a vetores, mas não há nenhuma prova de que esses vetores são representativos quanto ao conteúdo acústico que utilizamos como base. Vamos começar esse processo de forma anedotal, utilizando o arquivo de áudio que foi usado como exercício na unidade anterior. Calcularemos a média e a variância de seu flatness espectral ao longo do tempo, e então mostraremos como o vetor resultante se relaciona com os anteriores.",
"wav2spec = spectrogram.Wav2Spectrogram() # Objeto que converte arquivos wav para espectrogramas\nspec = wav2spec.convert(open('audio/testing.wav', 'rb'), window_length=1024, window_step=512, spectrum_type='magnitude')\nfness = flatness.Flatness()\nf = fness.calc_track(spec)\nm.append(np.mean(f.data))\ns.append(np.var(f.data))\n\ncolor=['red', 'blue', 'green', 'black']\nlabel=['Tabla', 'Guitarra', 'Coral', 'Teste']\nplt.figure();\nfor a in xrange(len(m)):\n plt.scatter(m[a], s[a], color=color[a], label=label[a])\nplt.xlabel('Media de flatness')\nplt.ylabel('Variancia de flatness')\nplt.legend(label, loc=4);\nplt.show()",
"Podemos visualizar que o ponto de teste parece mais próximo do vetor da Tabla que dos demais. Podemos calcular essa distância:",
"for i in xrange(3):\n print label[i], np.linalg.norm( np.array([s[-1]-s[i], m[-1]-m[i]]))",
"Portanto, o ponto está mais próximo de nossa referência para o som de tabla que dos demais.\nExercícios\n\nModifique o código computacional fornecido de forma a utilizar outros descritores, já vistos em aula.\nQuais deles parecem mais eficazes para separar quais tipos de sons (por exemplo: há algum descritor de acordo com o qual a distância entre o a guitarra e a tabla seja significativamente maior que a distância entre esses dois elementos e o coral?). \n\nConsidere também os arquivos audio/chirp.wav e audio/piano.wav, que contém, respectivamente, um glissando e uma nota de piano. Utilizando descritores e medidas objetivas, defina se eles se parecem mais com um coral, uma guitarra ou uma tabla. Essa resposta muda, dependendo dos descritores que forem utilizados?\n\n\n(começar em sala, continuar em casa) Encontre por volta de 20 arquivos de áudio que podem ser agrupados em duas categorias diferentes, à sua escolha (sugestão: use o FreeSound). Mostre, através de histogramas, como as duas categorias podem ser diferenciadas por meio de descritores calculados automaticamente. Na aula seguinte, apresente seus resultados para a classe."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ioos/notebooks_demos
|
notebooks/2017-01-18-siphon-explore-thredds.ipynb
|
mit
|
[
"Exploring the THREDDS catalog with Unidata's Siphon\nSiphon is a Python module for accessing data hosted on a THREDDS data server.\nSiphon works by parsing the catalog XML and exposing it with higher level functions.\nIn this notebook we will explore data available on the Central & Northern California Ocean Observing System (CeNCOOS) THREDDS. The cell below extracts the catalog information",
"from siphon.catalog import TDSCatalog\n\ncatalog = TDSCatalog(\"https://thredds.cencoos.org/thredds/catalog.xml\")\n\n\ninfo = \"\"\"\nCatalog information\n-------------------\n\nBase THREDDS URL: {}\nCatalog name: {}\nCatalog URL: {}\nMetadata: {}\n\"\"\".format(\n catalog.base_tds_url, catalog.catalog_name, catalog.catalog_url, catalog.metadata\n)\n\nprint(info)",
"Unfortunately this catalog has no metadata. So let's check what kind of services are available.",
"for service in catalog.services:\n print(service.name)",
"And what datasets are there?",
"print(\"\\n\".join(catalog.datasets.keys()))",
"It looks like model runs as well as satellite and HFR data. One can also check the catalog refs for more information",
"print(\"\\n\".join(catalog.catalog_refs.keys()))\n\nref = catalog.catalog_refs[\"Global\"]\n\n[value for value in dir(ref) if not value.startswith(\"__\")]\n\ninfo = \"\"\"\nHref: {}\nName: {}\nTitle: {}\n\"\"\".format(\n ref.href, ref.name, ref.title\n)\n\nprint(info)",
"The follow method navigates to that catalog ref and returns a new siphon.catalog.TDSCatalog object for that part of the THREDDS catalog.",
"cat = ref.follow()\n\nprint(type(cat))",
"That makes it easier to explore a small subset of the datasets available in the catalog.\nHere are the data from the Global subset.",
"print(\"\\n\".join(cat.datasets.keys()))",
"Let's extract the Global 1-km Sea Surface Temperature dataset from the global ref.",
"dataset = \"Global 1-km Sea Surface Temperature (G1SST)\"\n\nds = cat.datasets[dataset]\n\nds.name, ds.url_path",
"Siphon has a ncss (NetCDF subset service) access, here is a quote from the documentation:\n\nThis module contains code to support making data requests to\nthe NetCDF subset service (NCSS) on a THREDDS Data Server (TDS). This includes\nforming proper queries as well as parsing the returned data.\n\nLet's check if the catalog offers the NetcdfSubset in the access_urls.",
"for name, ds in catalog.datasets.items():\n if ds.access_urls:\n print(name)",
"All access_urls returned empty.... Maybe that is just a metadata issue because there is NetcdfSubset access when navigating in the webpage.",
"from IPython.display import HTML\n\niframe = (\n '<iframe src=\"{src}\" width=\"800\" height=\"550\" style=\"border:none;\"></iframe>'.format\n)\n\nurl = \"https://thredds.cencoos.org/thredds/catalog.html?dataset=G1_SST_US_WEST_COAST\"\nHTML(iframe(src=url))",
"To finish the post let's check if there is any WMS service available and overlay the data in a slippy (interactive) map.",
"services = [service for service in catalog.services if service.name == \"wms\"]\n\nservices",
"Found only one, let's tease that out and check the URL.",
"service = services[0]\n\nurl = service.base\n\nurl",
"OWSLib helps to inspect the available layers before plotting. Here we will get the first layer that has G1_SST_US_WEST_COAST on it.\nNote, however, we are skipping the discovery step of the wms information and hard-coding it instead.\nThat is to save time because parsing the URL http://pdx.axiomalaska.com/ncWMS/wms takes ~ 10 minutes. See this issue for more information.",
"from owslib.wms import WebMapService\n\nif False:\n web_map_services = WebMapService(url)\n layer = [\n key for key in web_map_services.contents.keys() if \"G1_SST_US_WEST_COAST\" in key\n ][0]\n wms = web_map_services.contents[layer]\n\n title = wms.title\n lon = (wms.boundingBox[0] + wms.boundingBox[2]) / 2.0\n lat = (wms.boundingBox[1] + wms.boundingBox[3]) / 2.0\n time = wms.defaulttimeposition\nelse:\n layer = \"G1_SST_US_WEST_COAST/analysed_sst\"\n title = \"Sea Surface Temperature\"\n lon, lat = -122.50, 39.50\n time = \"undefined\"\n\nimport folium\n\nm = folium.Map(location=[lat, lon], zoom_start=4)\n\nfolium.WmsTileLayer(\n name=\"{} at {}\".format(title, time),\n url=url,\n layers=layer,\n fmt=\"image/png\",\n transparent=True,\n).add_to(m)\n\nfolium.LayerControl().add_to(m)\n\nm",
"Last but not least a static image for the page thumbnail.",
"from IPython.display import Image\n\nImage(m._to_png())"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mintcloud/deep-learning
|
tv-script-generation/.ipynb_checkpoints/dlnd_tv_script_generation-checkpoint.ipynb
|
mit
|
[
"TV Script Generation\nIn this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.\nGet the Data\nThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like \"Moe's Cavern\", \"Flaming Moe's\", \"Uncle Moe's Family Feed-Bag\", etc..",
"%pdb\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\n\ndata_dir = './data/simpsons/moes_tavern_lines.txt'\ntext = helper.load_data(data_dir)\n# Ignore notice, since we don't use it for analysing the data\ntext = text[81:]",
"Explore the Data\nPlay around with view_sentence_range to view different parts of the data.",
"view_sentence_range = (1000, 1010)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))\nscenes = text.split('\\n\\n')\nprint('Number of scenes: {}'.format(len(scenes)))\nsentence_count_scene = [scene.count('\\n') for scene in scenes]\nprint('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))\n\nsentences = [sentence for scene in scenes for sentence in scene.split('\\n')]\nprint('Number of lines/sentences: {}'.format(len(sentences)))\nword_count_sentence = [len(sentence.split()) for sentence in sentences]\nprint('Average number of words in each line: {}'.format(np.average(word_count_sentence)))\n\nprint()\nprint('The sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))",
"Implement Preprocessing Functions\nThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:\n- Lookup Table\n- Tokenize Punctuation\nLookup Table\nTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:\n- Dictionary to go from the words to an id, we'll call vocab_to_int\n- Dictionary to go from the id to word, we'll call int_to_vocab\nReturn these dictionaries in the following tuple (vocab_to_int, int_to_vocab)",
"import numpy as np\nimport problem_unittests as tests\nfrom collections import Counter\n\ndef create_lookup_tables(text):\n \"\"\"\n Create lookup tables for vocabulary\n :param text: The text of tv scripts split into words\n :return: A tuple of dicts (vocab_to_int, int_to_vocab)\n \"\"\"\n # TODO: Implement Function\n \n words_counter = Counter(text)\n words_sorted = sorted(words_counter, key=words_counter.get,reverse=True)\n \n int_to_vocab = dict([i,words_sorted[i]] for i in range(len(words_sorted)))\n vocab_to_int = dict([words_sorted[i],i] for i in range(len(words_sorted)))\n return vocab_to_int, int_to_vocab\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_create_lookup_tables(create_lookup_tables)",
"Tokenize Punctuation\nWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word \"bye\" and \"bye!\".\nImplement the function token_lookup to return a dict that will be used to tokenize symbols like \"!\" into \"||Exclamation_Mark||\". Create a dictionary for the following symbols where the symbol is the key and value is the token:\n- Period ( . )\n- Comma ( , )\n- Quotation Mark ( \" )\n- Semicolon ( ; )\n- Exclamation mark ( ! )\n- Question mark ( ? )\n- Left Parentheses ( ( )\n- Right Parentheses ( ) )\n- Dash ( -- )\n- Return ( \\n )\nThis dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token \"dash\", try using something like \"||dash||\".",
"def token_lookup():\n \"\"\"\n Generate a dict to turn punctuation into a token.\n :return: Tokenize dictionary where the key is the punctuation and the value is the token\n \"\"\"\n # TODO: Implement Function\n token_lookup_dict = {\n \".\" : \"||PERIOD||\",\n \",\" : \"||COMMA||\",\n \"\\\"\" : \"||QUOTATION||\",\n \";\" : \"||SEMICOLON||\",\n \"!\" : \"||EXLAMATIONMARK||\",\n \"?\" : \"||QUESTIONMARK||\",\n \"(\" : \"||LEFTPARENTHESIS||\",\n \")\" : \"||RIGHTPARENTHESIS||\",\n \"--\" : \"||DASH||\",\n \"\\n\" : \"||RETURN||\"\n }\n return token_lookup_dict\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_tokenize(token_lookup)",
"Preprocess all the data and save it\nRunning the code cell below will preprocess all the data and save it to file.",
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)",
"Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.",
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport numpy as np\nimport problem_unittests as tests\n\nint_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()",
"Build the Neural Network\nYou'll build the components necessary to build a RNN by implementing the following functions below:\n- get_inputs\n- get_init_cell\n- get_embed\n- build_rnn\n- build_nn\n- get_batches\nCheck the Version of TensorFlow and Access to GPU",
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom distutils.version import LooseVersion\nimport warnings\nimport tensorflow as tf\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))",
"Input\nImplement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n- Input text placeholder named \"input\" using the TF Placeholder name parameter.\n- Targets placeholder\n- Learning Rate placeholder\nReturn the placeholders in the following the tuple (Input, Targets, LearingRate)",
"def get_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, and learning rate.\n :return: Tuple (input, targets, learning rate)\n \"\"\"\n input = tf.placeholder(tf.int32,[None,None], name='input')\n targets = tf.placeholder(tf.int32,[None,None], name='targets')\n learning_rate = tf.placeholder(tf.float32,shape=(), name = 'learning_rate')\n return input, targets, learning_rate\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_inputs(get_inputs)",
"Build RNN Cell and Initialize\nStack one or more BasicLSTMCells in a MultiRNNCell.\n- The Rnn size should be set using rnn_size\n- Initalize Cell State using the MultiRNNCell's zero_state() function\n - Apply the name \"initial_state\" to the initial state using tf.identity()\nReturn the cell and initial state in the following tuple (Cell, InitialState)",
"def get_init_cell(batch_size, rnn_size, keep_prob = 0.5, num_layers = 1):\n \"\"\"\n Create an RNN Cell and initialize it.\n :param batch_size: Size of batches\n :param rnn_size: Size of RNNs\n :return: Tuple (cell, initialize state)\n \"\"\"\n # TODO: Implement Function\n \n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n cell = tf.contrib.rnn.MultiRNNCell([lstm]*num_layers)\n \n initial_state = cell.zero_state(batch_size = batch_size, dtype=tf.float32)\n initial_state = tf.identity(initial_state, name=\"initial_state\")\n return cell, initial_state\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_init_cell(get_init_cell)",
"Word Embedding\nApply embedding to input_data using TensorFlow. Return the embedded sequence.",
"def get_embed(input_data, vocab_size, embed_dim):\n \"\"\"\n Create embedding for <input_data>.\n :param input_data: TF placeholder for text input.\n :param vocab_size: Number of words in vocabulary.\n :param embed_dim: Number of embedding dimensions\n :return: Embedded input.\n \"\"\"\n\n embedding = tf.Variable(tf.random_uniform([vocab_size,embed_dim],-0.5,0.5), name = 'embedding')\n embed = tf.nn.embedding_lookup(embedding,input_data) \n return embed\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_embed(get_embed)",
"Build RNN\nYou created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.\n- Build the RNN using the tf.nn.dynamic_rnn()\n - Apply the name \"final_state\" to the final state using tf.identity()\nReturn the outputs and final_state state in the following tuple (Outputs, FinalState)",
"def build_rnn(cell, inputs):\n \"\"\"\n Create a RNN using a RNN Cell\n :param cell: RNN Cell\n :param inputs: Input text data\n :return: Tuple (Outputs, Final State)\n \"\"\"\n output,final_state = tf.nn.dynamic_rnn(cell,inputs,dtype=tf.float32)\n final_state = tf.identity(final_state,\"final_state\")\n \n return output, final_state\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_rnn(build_rnn)",
"Build the Neural Network\nApply the functions you implemented above to:\n- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.\n- Build RNN using cell and your build_rnn(cell, inputs) function.\n- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.\nReturn the logits and final state in the following tuple (Logits, FinalState)",
"def build_nn(cell, rnn_size, input_data, vocab_size):\n \"\"\"\n Build part of the neural network\n :param cell: RNN cell\n :param rnn_size: Size of rnns\n :param input_data: Input data\n :param vocab_size: Vocabulary size\n :return: Tuple (Logits, FinalState)\n \"\"\"\n #get embeddings\n embed_layer = get_embed(input_data,vocab_size,300)\n #get the rnn\n output, final_state = build_rnn(cell,embed_layer)\n #fully connected layer\n logits_pre = tf.contrib.layers.fully_connected(output, \n 300,\n activation_fn=tf.nn.relu,\n weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.05),\n biases_initializer=tf.zeros_initializer()\n )\n #adding an extra layer\n logits = tf.contrib.layers.fully_connected(logits_pre, \n vocab_size,\n activation_fn=tf.nn.relu,\n weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.05),\n biases_initializer=tf.zeros_initializer()\n )\n return logits, final_state\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_nn(build_nn)",
"Batches\nImplement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:\n- The first element is a single batch of input with the shape [batch size, sequence length]\n- The second element is a single batch of targets with the shape [batch size, sequence length]\nIf you can't fill the last batch with enough data, drop the last batch.\nFor exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:\n```\n[\n # First Batch\n [\n # Batch of Input\n [[ 1 2 3], [ 7 8 9]],\n # Batch of targets\n [[ 2 3 4], [ 8 9 10]]\n ],\n# Second Batch\n [\n # Batch of Input\n [[ 4 5 6], [10 11 12]],\n # Batch of targets\n [[ 5 6 7], [11 12 13]]\n ]\n]\n```",
"#%pdb\ndef get_batches(int_text, batch_size, seq_length):\n \"\"\"\n Return batches of input and target\n :param int_text: Text with the words replaced by their ids\n :param batch_size: The size of batch\n :param seq_length: The length of sequence\n :return: Batches as a Numpy array\n \"\"\"\n # TODO: Implement Function\n #print(\"length int_text before: \",len(int_text))\n n_batches = len(int_text)//(batch_size * seq_length)\n print(\"Losing\",len(int_text)%(batch_size * seq_length),\"characters\")\n int_text = int_text[:n_batches*batch_size*seq_length +1]\n #print(\"length int_text after: \",len(int_text))\n\n #print(\"batch size = \",batch_size, \" , seq_length = \",seq_length , \" n_batches = \", n_)\n batches = np.zeros(shape=(n_batches,2,batch_size,seq_length),dtype=int)\n \n batch_served = 0\n for i in range(0,len(int_text)-2,seq_length):\n #print(\"i: \",i,\" batch served: \",batch_served)\n batches[batch_served,0,i//(seq_length*n_batches)]= int_text[i:i+seq_length]\n batches[batch_served,1,i//(seq_length*n_batches)] =int_text[i+1:i+seq_length+1]\n batch_served+=1\n if(batch_served == n_batches):\n batch_served =0 \n \n return batches\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_batches(get_batches)",
"Neural Network Training\nHyperparameters\nTune the following parameters:\n\nSet num_epochs to the number of epochs.\nSet batch_size to the batch size.\nSet rnn_size to the size of the RNNs.\nSet seq_length to the length of sequence.\nSet learning_rate to the learning rate.\nSet show_every_n_batches to the number of batches the neural network should print progress.",
"# Number of Epochs\nnum_epochs = 150\n# Batch Size\nbatch_size = 64\n# RNN Size\nrnn_size = 512\n# Sequence Length\nseq_length = 16\n# Learning Rate\nlearning_rate = 0.001\n# Show stats for every n number of batches\nshow_every_n_batches = 50\n#adding keep prob\nkeep_prob = 0.8\n#adding number of LSTM layers\nnum_layers = 1\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nsave_dir = './save'",
"Build the Graph\nBuild the graph using the neural network you implemented.",
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom tensorflow.contrib import seq2seq\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n vocab_size = len(int_to_vocab)\n input_text, targets, lr = get_inputs()\n input_data_shape = tf.shape(input_text)\n cell, initial_state = get_init_cell(input_data_shape[0], rnn_size, keep_prob,num_layers)\n logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)\n\n # Probabilities for generating words\n probs = tf.nn.softmax(logits, name='probs')\n\n # Loss function\n cost = seq2seq.sequence_loss(\n logits,\n targets,\n tf.ones([input_data_shape[0], input_data_shape[1]]))\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]\n train_op = optimizer.apply_gradients(capped_gradients)",
"Train\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.",
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nbatches = get_batches(int_text, batch_size, seq_length)\n\nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch_i in range(num_epochs):\n state = sess.run(initial_state, {input_text: batches[0][0]})\n\n for batch_i, (x, y) in enumerate(batches):\n feed = {\n input_text: x,\n targets: y,\n initial_state: state,\n lr: learning_rate}\n train_loss, state, _ = sess.run([cost, final_state, train_op], feed)\n\n # Show every <show_every_n_batches> batches\n if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:\n print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(\n epoch_i,\n batch_i,\n len(batches),\n train_loss))\n\n # Save Model\n saver = tf.train.Saver()\n saver.save(sess, save_dir)\n print('Model Trained and Saved')",
"Save Parameters\nSave seq_length and save_dir for generating a new TV script.",
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Save parameters for checkpoint\nhelper.save_params((seq_length, save_dir))",
"Checkpoint",
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\nseq_length, load_dir = helper.load_params()",
"Implement Generate Functions\nGet Tensors\nGet tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:\n- \"input:0\"\n- \"initial_state:0\"\n- \"final_state:0\"\n- \"probs:0\"\nReturn the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)",
"def get_tensors(loaded_graph):\n \"\"\"\n Get input, initial state, final state, and probabilities tensor from <loaded_graph>\n :param loaded_graph: TensorFlow graph loaded from file\n :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)\n \"\"\"\n # TODO: Implement Function\n return loaded_graph.get_tensor_by_name(\"input:0\"), loaded_graph.get_tensor_by_name(\"initial_state:0\"), loaded_graph.get_tensor_by_name(\"final_state:0\"), loaded_graph.get_tensor_by_name(\"probs:0\")\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_tensors(get_tensors)",
"Choose Word\nImplement the pick_word() function to select the next word using probabilities.",
"def pick_word(probabilities, int_to_vocab):\n \"\"\"\n Pick the next word in the generated text\n :param probabilities: Probabilites of the next word\n :param int_to_vocab: Dictionary of word ids as the keys and words as the values\n :return: String of the predicted word\n \"\"\"\n # TODO: Implement Function\n return int_to_vocab[np.argmax(probabilities)]\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_pick_word(pick_word)",
"Generate TV Script\nThis will generate the TV script for you. Set gen_length to the length of TV script you want to generate.",
"gen_length = 500\n# homer_simpson, moe_szyslak, or Barney_Gumble\nprime_word = 'moe_szyslak'\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nloaded_graph = tf.Graph()\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n input_text, initial_state, final_state, probs = get_tensors(loaded_graph)\n\n # Sentences generation setup\n gen_sentences = [prime_word + ':']\n prev_state = sess.run(initial_state, {input_text: np.array([[1]])})\n\n # Generate sentences\n for n in range(gen_length):\n # Dynamic Input\n dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]\n dyn_seq_length = len(dyn_input[0])\n\n # Get Prediction\n probabilities, prev_state = sess.run(\n [probs, final_state],\n {input_text: dyn_input, initial_state: prev_state})\n \n pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)\n\n gen_sentences.append(pred_word)\n \n # Remove tokens\n tv_script = ' '.join(gen_sentences)\n for key, token in token_dict.items():\n ending = ' ' if key in ['\\n', '(', '\"'] else ''\n tv_script = tv_script.replace(' ' + token.lower(), key)\n tv_script = tv_script.replace('\\n ', '\\n')\n tv_script = tv_script.replace('( ', '(')\n \n print(tv_script)",
"The TV Script is Nonsensical\nIt's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of another dataset. We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.\nSubmitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_tv_script_generation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
GoogleCloudPlatform/training-data-analyst
|
courses/ai-for-finance/solution/arima_model.ipynb
|
apache-2.0
|
[
"Building an ARIMA Model for a Financial Dataset\nIn this notebook, you will build an ARIMA model for AAPL stock closing prices. The lab objectives are:\n\nPull data from Google Cloud Storage into a Pandas dataframe\nLearn how to prepare raw stock closing data for an ARIMA model\nApply the Dickey-Fuller test \nBuild an ARIMA model using the statsmodels library\n\nMake sure you restart the Python kernel after executing the pip install command below! After you restart the kernel you don't have to execute the command again.",
"!pip install --user statsmodels\n\n%matplotlib inline\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport datetime\n\n%config InlineBackend.figure_format = 'retina'",
"Import data from Google Clod Storage\nIn this section we'll read some ten years' worth of AAPL stock data into a Pandas dataframe. We want to modify the dataframe such that it represents a time series. This is achieved by setting the date as the index.",
"df = pd.read_csv('gs://cloud-training/ai4f/AAPL10Y.csv')\n\ndf['date'] = pd.to_datetime(df['date'])\ndf.sort_values('date', inplace=True)\ndf.set_index('date', inplace=True)\n\nprint(df.shape)\n\ndf.head()",
"Prepare data for ARIMA\nThe first step in our preparation is to resample the data such that stock closing prices are aggregated on a weekly basis.",
"df_week = df.resample('w').mean()\ndf_week = df_week[['close']]\ndf_week.head()",
"Let's create a column for weekly returns. Take the log to of the returns to normalize large fluctuations.",
"df_week['weekly_ret'] = np.log(df_week['close']).diff()\ndf_week.head()\n\n# drop null rows\ndf_week.dropna(inplace=True)\n\ndf_week.weekly_ret.plot(kind='line', figsize=(12, 6));\n\nudiff = df_week.drop(['close'], axis=1)\nudiff.head()",
"Test for stationarity of the udiff series\nTime series are stationary if they do not contain trends or seasonal swings. The Dickey-Fuller test can be used to test for stationarity.",
"import statsmodels.api as sm\nfrom statsmodels.tsa.stattools import adfuller\n\nrolmean = udiff.rolling(20).mean()\nrolstd = udiff.rolling(20).std()\n\nplt.figure(figsize=(12, 6))\norig = plt.plot(udiff, color='blue', label='Original')\nmean = plt.plot(rolmean, color='red', label='Rolling Mean')\nstd = plt.plot(rolstd, color='black', label = 'Rolling Std Deviation')\nplt.title('Rolling Mean & Standard Deviation')\nplt.legend(loc='best')\nplt.show(block=False)\n\n# Perform Dickey-Fuller test\ndftest = sm.tsa.adfuller(udiff.weekly_ret, autolag='AIC')\ndfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])\nfor key, value in dftest[4].items():\n dfoutput['Critical Value ({0})'.format(key)] = value\n \ndfoutput",
"With a p-value < 0.05, we can reject the null hypotehsis. This data set is stationary.\nACF and PACF Charts\nMaking autocorrelation and partial autocorrelation charts help us choose hyperparameters for the ARIMA model.\nThe ACF gives us a measure of how much each \"y\" value is correlated to the previous n \"y\" values prior.\nThe PACF is the partial correlation function gives us (a sample of) the amount of correlation between two \"y\" values separated by n lags excluding the impact of all the \"y\" values in between them.",
"from statsmodels.graphics.tsaplots import plot_acf\n\n# the autocorrelation chart provides just the correlation at increasing lags\nfig, ax = plt.subplots(figsize=(12,5))\nplot_acf(udiff.values, lags=10, ax=ax)\nplt.show()\n\nfrom statsmodels.graphics.tsaplots import plot_pacf\n\nfig, ax = plt.subplots(figsize=(12,5))\nplot_pacf(udiff.values, lags=10, ax=ax)\nplt.show()",
"The table below summarizes the patterns of the ACF and PACF.\n<img src=\"../imgs/How_to_Read_PACF_ACF.jpg\" alt=\"drawing\" width=\"300\" height=\"300\"/>\nThe above chart shows that reading PACF gives us a lag \"p\" = 3 and reading ACF gives us a lag \"q\" of 1. Let's Use Statsmodel's ARMA with those parameters to build a model. The way to evaluate the model is to look at AIC - see if it reduces or increases. The lower the AIC (i.e. the more negative it is), the better the model.\nBuild ARIMA Model\nSince we differenced the weekly closing prices, we technically only need to build an ARMA model. The data has already been integrated and is stationary.",
"from statsmodels.tsa.arima.model import ARIMA\n\n# Notice that you have to use udiff - the differenced data rather than the original data.\nar1 = ARIMA(udiff.values, order = (3, 0,1)).fit()\nar1.summary()",
"Our model doesn't do a good job predicting variance in the original data (peaks and valleys).",
"plt.figure(figsize=(12, 8))\nplt.plot(udiff.values, color='blue')\npreds = ar1.fittedvalues\nplt.plot(preds, color='red')\nplt.show()",
"Let's make a forecast 2 weeks ahead:",
"steps = 2\n\nforecast = ar1.forecast(steps=steps)\n\nplt.figure(figsize=(12, 8))\nplt.plot(udiff.values, color='blue')\n\npreds = ar1.fittedvalues\nplt.plot(preds, color='red')\n\nplt.plot(pd.DataFrame(np.array([preds[-1],forecast[0]]).T,index=range(len(udiff.values)+1, len(udiff.values)+3)), color='green')\nplt.plot(pd.DataFrame(forecast,index=range(len(udiff.values)+1, len(udiff.values)+1+steps)), color='green')\nplt.title('Display the predictions with the ARIMA model')\nplt.show()",
"The forecast is not great but if you tune the hyper parameters some more, you might be able to reduce the errors."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
RainFool/Udacity_Anwser_RainFool
|
Project2/finding_donors.ipynb
|
mit
|
[
"机器学习纳米学位\n监督学习\n项目2: 为CharityML寻找捐献者\n欢迎来到机器学习工程师纳米学位的第二个项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以'练习'开始的标题表示接下来的代码部分中有你必须要实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示!\n除了实现代码外,你还必须回答一些与项目和你的实现有关的问题。每一个需要你回答的问题都会以'问题 X'为标题。请仔细阅读每个问题,并且在问题后的'回答'文字框中写出完整的答案。我们将根据你对问题的回答和撰写代码所实现的功能来对你提交的项目进行评分。\n\n提示:Code 和 Markdown 区域可通过Shift + Enter快捷键运行。此外,Markdown可以通过双击进入编辑模式。\n\n开始\n在这个项目中,你将使用1994年美国人口普查收集的数据,选用几个监督学习算法以准确地建模被调查者的收入。然后,你将根据初步结果从中选择出最佳的候选算法,并进一步优化该算法以最好地建模这些数据。你的目标是建立一个能够准确地预测被调查者年收入是否超过50000美元的模型。这种类型的任务会出现在那些依赖于捐款而存在的非营利性组织。了解人群的收入情况可以帮助一个非营利性的机构更好地了解他们要多大的捐赠,或是否他们应该接触这些人。虽然我们很难直接从公开的资源中推断出一个人的一般收入阶层,但是我们可以(也正是我们将要做的)从其他的一些公开的可获得的资源中获得一些特征从而推断出该值。\n这个项目的数据集来自UCI机器学习知识库。这个数据集是由Ron Kohavi和Barry Becker在发表文章_\"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid\"_之后捐赠的,你可以在Ron Kohavi提供的在线版本中找到这个文章。我们在这里探索的数据集相比于原有的数据集有一些小小的改变,比如说移除了特征'fnlwgt' 以及一些遗失的或者是格式不正确的记录。\n\n探索数据\n运行下面的代码单元以载入需要的Python库并导入人口普查数据。注意数据集的最后一列'income'将是我们需要预测的列(表示被调查者的年收入会大于或者是最多50,000美元),人口普查数据中的每一列都将是关于被调查者的特征。",
"# 检查你的Python版本\nfrom sys import version_info\nif version_info.major != 2 and version_info.minor != 7:\n raise Exception('请使用Python 2.7来完成此项目')\n\n# 为这个项目导入需要的库\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom IPython.display import display # 允许为DataFrame使用display()\n\n# 导入附加的可视化代码visuals.py\nimport visuals as vs\n\n# 为notebook提供更加漂亮的可视化\n%matplotlib inline\n\n# 导入人口普查数据\ndata = pd.read_csv(\"census.csv\")\n\n# 成功 - 显示第一条记录\ndisplay(data.head(n=1))",
"练习:数据探索\n首先我们对数据集进行一个粗略的探索,我们将看看每一个类别里会有多少被调查者?并且告诉我们这些里面多大比例是年收入大于50,000美元的。在下面的代码单元中,你将需要计算以下量:\n\n总的记录数量,'n_records'\n年收入大于50,000美元的人数,'n_greater_50k'.\n年收入最多为50,000美元的人数 'n_at_most_50k'.\n年收入大于50,000美元的人所占的比例, 'greater_percent'.\n\n提示: 您可能需要查看上面的生成的表,以了解'income'条目的格式是什么样的。",
"# TODO:总的记录数\nn_records = len(data)\n\n# TODO:被调查者的收入大于$50,000的人数\nn_greater_50k = len(data[data['income']=='>50K'])\n\n# TODO:被调查者的收入最多为$50,000的人数\nn_at_most_50k = len(data[data['income']=='<=50K'])\n\n# TODO:被调查者收入大于$50,000所占的比例\ngreater_percent = n_greater_50k / float(n_records) * 100\n\n# 打印结果\nprint \"Total number of records: {}\".format(n_records)\nprint \"Individuals making more than $50,000: {}\".format(n_greater_50k)\nprint \"Individuals making at most $50,000: {}\".format(n_at_most_50k)\nprint \"Percentage of individuals making more than $50,000: {:.2f}%\".format(greater_percent)",
"准备数据\n在数据能够被作为输入提供给机器学习算法之前,它经常需要被清洗,格式化,和重新组织 - 这通常被叫做预处理。幸运的是,对于这个数据集,没有我们必须处理的无效或丢失的条目,然而,由于某一些特征存在的特性我们必须进行一定的调整。这个预处理都可以极大地帮助我们提升几乎所有的学习算法的结果和预测能力。\n获得特征和标签\nincome 列是我们需要的标签,记录一个人的年收入是否高于50K。 因此我们应该把他从数据中剥离出来,单独存放。",
"# 将数据切分成特征和对应的标签\nincome_raw = data['income']\nfeatures_raw = data.drop('income', axis = 1)",
"转换倾斜的连续特征\n一个数据集有时可能包含至少一个靠近某个数字的特征,但有时也会有一些相对来说存在极大值或者极小值的不平凡分布的的特征。算法对这种分布的数据会十分敏感,并且如果这种数据没有能够很好地规一化处理会使得算法表现不佳。在人口普查数据集的两个特征符合这个描述:'capital-gain'和'capital-loss'。\n运行下面的代码单元以创建一个关于这两个特征的条形图。请注意当前的值的范围和它们是如何分布的。",
"# 可视化 'capital-gain'和'capital-loss' 两个特征\nvs.distribution(features_raw)",
"对于高度倾斜分布的特征如'capital-gain'和'capital-loss',常见的做法是对数据施加一个<a href=\"https://en.wikipedia.org/wiki/Data_transformation_(statistics)\">对数转换</a>,将数据转换成对数,这样非常大和非常小的值不会对学习算法产生负面的影响。并且使用对数变换显著降低了由于异常值所造成的数据范围异常。但是在应用这个变换时必须小心:因为0的对数是没有定义的,所以我们必须先将数据处理成一个比0稍微大一点的数以成功完成对数转换。\n运行下面的代码单元来执行数据的转换和可视化结果。再次,注意值的范围和它们是如何分布的。",
"# 对于倾斜的数据使用Log转换\nskewed = ['capital-gain', 'capital-loss']\nfeatures_raw[skewed] = data[skewed].apply(lambda x: np.log(x + 1))\n\n# 可视化对数转换后 'capital-gain'和'capital-loss' 两个特征\nvs.distribution(features_raw, transformed = True)",
"规一化数字特征\n除了对于高度倾斜的特征施加转换,对数值特征施加一些形式的缩放通常会是一个好的习惯。在数据上面施加一个缩放并不会改变数据分布的形式(比如上面说的'capital-gain' or 'capital-loss');但是,规一化保证了每一个特征在使用监督学习器的时候能够被平等的对待。注意一旦使用了缩放,观察数据的原始形式不再具有它本来的意义了,就像下面的例子展示的。\n运行下面的代码单元来规一化每一个数字特征。我们将使用sklearn.preprocessing.MinMaxScaler来完成这个任务。",
"from sklearn.preprocessing import MinMaxScaler\n\n# 初始化一个 scaler,并将它施加到特征上\nscaler = MinMaxScaler()\nnumerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']\nfeatures_raw[numerical] = scaler.fit_transform(data[numerical])\n\n# 显示一个经过缩放的样例记录\ndisplay(features_raw.head(n = 1))",
"练习:数据预处理\n从上面的数据探索中的表中,我们可以看到有几个属性的每一条记录都是非数字的。通常情况下,学习算法期望输入是数字的,这要求非数字的特征(称为类别变量)被转换。转换类别变量的一种流行的方法是使用独热编码方案。独热编码为每一个非数字特征的每一个可能的类别创建一个_“虚拟”_变量。例如,假设someFeature有三个可能的取值A,B或者C,。我们将把这个特征编码成someFeature_A, someFeature_B和someFeature_C.\n| 特征X | | 特征X_A | 特征X_B | 特征X_C |\n| :-: | | :-: | :-: | :-: |\n| B | | 0 | 1 | 0 |\n| C | ----> 独热编码 ----> | 0 | 0 | 1 |\n| A | | 1 | 0 | 0 |\n此外,对于非数字的特征,我们需要将非数字的标签'income'转换成数值以保证学习算法能够正常工作。因为这个标签只有两种可能的类别(\"<=50K\"和\">50K\"),我们不必要使用独热编码,可以直接将他们编码分别成两个类0和1,在下面的代码单元中你将实现以下功能:\n - 使用pandas.get_dummies()对'features_raw'数据来施加一个独热编码。\n - 将目标标签'income_raw'转换成数字项。\n - 将\"<=50K\"转换成0;将\">50K\"转换成1。",
"# TODO:使用pandas.get_dummies()对'features_raw'数据进行独热编码\nfeatures = pd.get_dummies(features_raw)\n\n# TODO:将'income_raw'编码成数字值\nincome = income_raw.map(lambda x: 0 if x == '<=50K' else 1)\n\n# print income.head(n=9)\n\n# 打印经过独热编码之后的特征数量\nencoded = list(features.columns)\nprint \"{} total features after one-hot encoding.\".format(len(encoded))\n\n# 移除下面一行的注释以观察编码的特征名字\nprint encoded",
"混洗和切分数据\n现在所有的 类别变量 已被转换成数值特征,而且所有的数值特征已被规一化。和我们一般情况下做的一样,我们现在将数据(包括特征和它们的标签)切分成训练和测试集。其中80%的数据将用于训练和20%的数据用于测试。然后再进一步把训练数据分为训练集和验证集,用来选择和优化模型。\n运行下面的代码单元来完成切分。",
"# 导入 train_test_split\nfrom sklearn.model_selection import train_test_split\n\n# 将'features'和'income'数据切分成训练集和测试集\nX_train, X_test, y_train, y_test = train_test_split(features, income, test_size = 0.2, random_state = 0,\n stratify = income)\n# 将'X_train'和'y_train'进一步切分为训练集和验证集\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0,\n stratify = y_train)\n# 显示切分的结果\nprint \"Training set has {} samples.\".format(X_train.shape[0])\nprint \"Validation set has {} samples.\".format(X_val.shape[0])\nprint \"Testing set has {} samples.\".format(X_test.shape[0])",
"评价模型性能\n在这一部分中,我们将尝试四种不同的算法,并确定哪一个能够最好地建模数据。四种算法包含一个天真的预测器 和三个你选择的监督学习器。\n评价方法和朴素的预测器\nCharityML通过他们的研究人员知道被调查者的年收入大于\\$50,000最有可能向他们捐款。因为这个原因CharityML对于准确预测谁能够获得\\$50,000以上收入尤其有兴趣。这样看起来使用准确率作为评价模型的标准是合适的。另外,把没有收入大于\\$50,000的人识别成年收入大于\\$50,000对于CharityML来说是有害的,因为他想要找到的是有意愿捐款的用户。这样,我们期望的模型具有准确预测那些能够年收入大于\\$50,000的能力比模型去查全这些被调查者更重要。我们能够使用F-beta score作为评价指标,这样能够同时考虑查准率和查全率:\n$$ F_{\\beta} = (1 + \\beta^2) \\cdot \\frac{precision \\cdot recall}{\\left( \\beta^2 \\cdot precision \\right) + recall} $$\n尤其是,当 $\\beta = 0.5$ 的时候更多的强调查准率,这叫做F$_{0.5}$ score (或者为了简单叫做F-score)。\n问题 1 - 天真的预测器的性能\n通过查看收入超过和不超过 \\$50,000 的人数,我们能发现多数被调查者年收入没有超过 \\$50,000。如果我们简单地预测说“这个人的收入没有超过 \\$50,000”,我们就可以得到一个 准确率超过 50% 的预测。这样我们甚至不用看数据就能做到一个准确率超过 50%。这样一个预测被称作是天真的。通常对数据使用一个天真的预测器是十分重要的,这样能够帮助建立一个模型表现是否好的基准。 使用下面的代码单元计算天真的预测器的相关性能。将你的计算结果赋值给'accuracy', ‘precision’, ‘recall’ 和 'fscore',这些值会在后面被使用,请注意这里不能使用scikit-learn,你需要根据公式自己实现相关计算。\n如果我们选择一个无论什么情况都预测被调查者年收入大于 \\$50,000 的模型,那么这个模型在验证集上的准确率,查准率,查全率和 F-score是多少?",
"#不能使用scikit-learn,你需要根据公式自己实现相关计算。\n\n# 不知道这里是不是将y_val传过来就可以了,请指导下\n\nincome_pred = y_val.apply(lambda x : 1)\n\nTP = sum(map(lambda x,y:1 if x==1 and y==1 else 0,y_val,income_pred))\nFN = sum(map(lambda x,y:1 if x==1 and y==0 else 0,y_val,income_pred))\nFP = sum(map(lambda x,y:1 if x==0 and y==1 else 0,y_val,income_pred))\nTN = sum(map(lambda x,y:1 if x==0 and y==0 else 0,y_val,income_pred))\n\nprint TP\nprint FN\nprint FP\nprint TN\n\n#TODO: 计算准确率\naccuracy = float(TP + TN)/len(y_val)\n\n# TODO: 计算查准率 Precision\nprecision = TP/float(TP + FP)\n\n# TODO: 计算查全率 Recall\nrecall = TP/float(TP + FN)\n\n# TODO: 使用上面的公式,设置beta=0.5,计算F-score\nfscore = (1 + 0.5*0.5)*(precision * recall)/(0.5*0.5*precision + recall)\n\n# 打印结果\nprint \"Naive Predictor on validation data: \\n \\\n Accuracy score: {:.4f} \\n \\\n Precision: {:.4f} \\n \\\n Recall: {:.4f} \\n \\\n F-score: {:.4f}\".format(accuracy, precision, recall, fscore)",
"监督学习模型\n问题 2 - 模型应用\n你能够在 scikit-learn 中选择以下监督学习模型\n- 高斯朴素贝叶斯 (GaussianNB)\n- 决策树 (DecisionTree)\n- 集成方法 (Bagging, AdaBoost, Random Forest, Gradient Boosting)\n- K近邻 (K Nearest Neighbors)\n- 随机梯度下降分类器 (SGDC)\n- 支撑向量机 (SVM)\n- Logistic回归(LogisticRegression)\n从上面的监督学习模型中选择三个适合我们这个问题的模型,并回答相应问题。\n模型1\n模型名称\n回答:决策树\n描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)\n回答:慢性胃炎中医辨证分型中的应用。(http://www.airitilibrary.com/Publication/alDetailedMesh?docid=0258879x-200409-25-9-1009-1012-a)\n雷电潜势预报中的应用。(http://www.airitilibrary.com/Publication/alDetailedMesh?docid=16742184-200812-28-4-55-58-a)\n这个模型的优势是什么?他什么情况下表现最好?\n回答:优势:\n1. 容易解释、算法简单,可以可视化\n2. 几乎不需要数据预处理\n3. 可以同时处理数值变量和输入变量\n适用于:数据拥有比较清晰的特征(较容易区分),每个可区分的特征都能分出部分数据,最终结果是布尔类型。\n这个模型的缺点是什么?什么条件下它表现很差?\n回答:缺点:\n1. 容易被攻击,只需要伪造很少的特征即可瞒过分类器。\n2. 数据中非常小的变异也会造成一颗完全不同的树\n3. \n当样本的数据特征不能或很难将整个样本分类的话\n根据我们当前数据集的特点,为什么这个模型适合这个问题。\n回答:决策树作为一个简单的模型,理论上任何数据拿到后都可以使用此模型进行一次尝试。当前数据集可以使用特征来进行分类,最终输出一个二元标签(收入是否大于50K)。\n模型2\n模型名称\n回答:SVM\n描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)\n回答:\n测试用例生成(http://www.arocmag.com/getarticle/?aid=cff7c760dfdd88ca)\n基因数据表达分类(http://d.wanfangdata.com.cn/periodical/jsjyyyhx200305004)\n这个模型的优势是什么?他什么情况下表现最好?\n回答:\n1. 的分类效果非常好。\n2. 可以有效地处理高维空间数据。\n3. 可以有效地处理变量个数大于样本个数的数据。\n4. 只利用一部分子集来训练模型,所以 SVM 模型不需要太大的内存。\n当数据比较完善,没有太多噪声,变量较多时表现较好。\n这个模型的缺点是什么?什么条件下它表现很差?\n回答:\n1. 无法很好地处理大规模数据集,因为此时它需要较长的训练时间。\n2. 无法处理包含太多噪声的数据集。\n根据我们当前数据集的特点,为什么这个模型适合这个问题。\n回答:\n当前模型的feature非常多,SVM适合处理这种feature比较多的DataSet。\n输出Label为二元,符合SVM的分类输出特性\n模型3\n模型名称\n回答:\n神经网络\n描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)\n回答:\n神经网络应用于电力变压器故障诊断(http://aeps.alljournals.ac.cn/aeps/ch/reader/create_pdf.aspx?file_no=5586&flag=&journal_id=aeps&year_id=1996)\n这个模型的优势是什么?他什么情况下表现最好?\n回答:\n分类的准确度高,并行分布处理能力强,分布存储及学习能力强,对噪声神经有较强的鲁棒性和容错能力,能充分逼近复杂的非线性关系,具备联想记忆的功能等。\n数据量比较大,参数之间存在联系的时候,表现最好\n这个模型的缺点是什么?什么条件下它表现很差?\n回答:\n神经网络需要大量的参数,如网络拓扑结构、权值和阈值的初始值;不能观察之间的学习过程,输出结果难以解释,会影响到结果的可信度和可接受程度;学习时间过长,甚至可能达不到学习的目的。\n准确率依赖于庞大的训练集,原本受限于计算机的速度。因此在数据集比较小,计算机速度过低时表现较差。\n根据我们当前数据集的特点,为什么这个模型适合这个问题。\n回答:\n当前数据是没有那么大,而且训练会在我的个人电脑上进行,所以不太适合。但是可以将此算法作为其他两个的对比。\n练习 - 创建一个训练和预测的流水线\n为了正确评估你选择的每一个模型的性能,创建一个能够帮助你快速有效地使用不同大小的训练集并在验证集上做预测的训练和验证的流水线是十分重要的。\n你在这里实现的功能将会在接下来的部分中被用到。在下面的代码单元中,你将实现以下功能:\n\n从sklearn.metrics中导入fbeta_score和accuracy_score。\n用训练集拟合学习器,并记录训练时间。\n对训练集的前300个数据点和验证集进行预测并记录预测时间。\n计算预测训练集的前300个数据点的准确率和F-score。\n计算预测验证集的准确率和F-score。",
"# TODO:从sklearn中导入两个评价指标 - fbeta_score和accuracy_score\nfrom sklearn.metrics import fbeta_score, accuracy_score\n\ndef train_predict(learner, sample_size, X_train, y_train, X_val, y_val): \n '''\n inputs:\n - learner: the learning algorithm to be trained and predicted on\n - sample_size: the size of samples (number) to be drawn from training set\n - X_train: features training set\n - y_train: income training set\n - X_val: features validation set\n - y_val: income validation set\n '''\n \n results = {}\n \n # TODO:使用sample_size大小的训练数据来拟合学习器\n # TODO: Fit the learner to the training data using slicing with 'sample_size'\n start = time() # 获得程序开始时间\n learner.fit(X_train[:sample_size],y_train[:sample_size])\n end = time() # 获得程序结束时间\n \n # TODO:计算训练时间\n results['train_time'] = end - start\n \n # TODO: 得到在验证集上的预测值\n # 然后得到对前300个训练数据的预测结果\n start = time() # 获得程序开始时间\n predictions_val = learner.predict(X_val)\n predictions_train = learner.predict(X_train[:300])\n end = time() # 获得程序结束时间\n \n # TODO:计算预测用时\n results['pred_time'] = end - start\n \n # TODO:计算在最前面的300个训练数据的准确率\n results['acc_train'] = accuracy_score(y_train[:300],predictions_train)\n \n # TODO:计算在验证上的准确率\n results['acc_val'] = accuracy_score(y_val,predictions_val)\n \n # TODO:计算在最前面300个训练数据上的F-score\n results['f_train'] = fbeta_score(y_train[:300],predictions_train,beta=0.5)\n \n # TODO:计算验证集上的F-score\n results['f_val'] = fbeta_score(y_val,predictions_val,beta=0.5)\n \n # 成功\n print \"{} trained on {} samples.\".format(learner.__class__.__name__, sample_size)\n \n # 返回结果\n return results",
"练习:初始模型的评估\n在下面的代码单元中,您将需要实现以下功能: \n- 导入你在前面讨论的三个监督学习模型。 \n- 初始化三个模型并存储在'clf_A','clf_B'和'clf_C'中。\n - 使用模型的默认参数值,在接下来的部分中你将需要对某一个模型的参数进行调整。 \n - 设置random_state (如果有这个参数)。 \n- 计算1%, 10%, 100%的训练数据分别对应多少个数据点,并将这些值存储在'samples_1', 'samples_10', 'samples_100'中\n注意:取决于你选择的算法,下面实现的代码可能需要一些时间来运行!",
"# TODO:从sklearn中导入三个监督学习模型\nfrom sklearn import tree\nfrom sklearn import svm\nfrom sklearn.neural_network import MLPClassifier\n# TODO:初始化三个模型\nclf_A = tree.DecisionTreeClassifier(random_state=1)\nclf_B = svm.SVC(random_state=1)\nclf_C = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1)\n\n# TODO:计算1%, 10%, 100%的训练数据分别对应多少点\nsamples_1 = len(X_train)/100\nsamples_10 = len(X_train)/10\nsamples_100 = len(X_train)\n\n# 收集学习器的结果\nresults = {}\nfor clf in [clf_A, clf_B, clf_C]:\n clf_name = clf.__class__.__name__\n results[clf_name] = {}\n for i, samples in enumerate([samples_1, samples_10, samples_100]):\n results[clf_name][i] = train_predict(clf, samples, X_train, y_train, X_val, y_val)\n\n# 对选择的三个模型得到的评价结果进行可视化\nvs.evaluate(results, accuracy, fscore)",
"提高效果\n在这最后一节中,您将从三个有监督的学习模型中选择 最好的 模型来使用学生数据。你将在整个训练集(X_train和y_train)上使用网格搜索优化至少调节一个参数以获得一个比没有调节之前更好的 F-score。\n问题 3 - 选择最佳的模型\n基于你前面做的评价,用一到两段话向 CharityML 解释这三个模型中哪一个对于判断被调查者的年收入大于 \\$50,000 是最合适的。 \n提示:你的答案应该包括评价指标,预测/训练时间,以及该算法是否适合这里的数据。\n回答:\n出乎意料,神经网络的各项指标竟然是最好,训练时间短,在测试集上的准确率和FScrore都是三个算法中最高的。\n算法适用性这边理解比较浅,请助教解答下,应该从那几个方面选择算法,最好提供一些资料可以查阅。\n问题 4 - 用通俗的话解释模型\n用一到两段话,向 CharityML 用外行也听得懂的话来解释最终模型是如何工作的。你需要解释所选模型的主要特点。例如,这个模型是怎样被训练的,它又是如何做出预测的。避免使用高级的数学或技术术语,不要使用公式或特定的算法名词。\n回答: 我们使用了多层神经网络去预测捐款者,神经网络主要由一堆神经元构成,每个神经元都会负责一个很小的逻辑判断,接收几个输入参数,然后通过激活函数决定神经元最后的输出,而这个输出又可能作为输入传到下一个不同的神经元中。经过多层神经元的转换,会形成一套体系,这个体系可以接受我们的输入,最后的输出结果就是预测结果。\n多层神经网络中的反向传播算法,类似于一个自适应的反馈系统;\n就像一个公司要做一些决策,一级领导指示二级领导,二级领导布置任务给底层员工,这是一般的正向决策过程,反向传播就是,当底层员工发现一些问题后,报告给二级领导,二级领导又报告给一级领导,然后一、二级领导都会根据反馈调整自己的决策,以便下次取得更好的结果。\n反向传播这块确实还没理解深入,算法也看不懂,还请老师给些资料看看,我自己搜到的都是5000字以内的那种,很粗略,希望有点比较系统的知识。\n练习:模型调优\n调节选择的模型的参数。使用网格搜索(GridSearchCV)来至少调整模型的重要参数(至少调整一个),这个参数至少需尝试3个不同的值。你要使用整个训练集来完成这个过程。在接下来的代码单元中,你需要实现以下功能:\n\n导入sklearn.model_selection.GridSearchCV 和 sklearn.metrics.make_scorer.\n初始化你选择的分类器,并将其存储在clf中。\n设置random_state (如果有这个参数)。\n创建一个对于这个模型你希望调整参数的字典。\n例如: parameters = {'parameter' : [list of values]}。\n注意: 如果你的学习器有 max_features 参数,请不要调节它!\n使用make_scorer来创建一个fbeta_score评分对象(设置$\\beta = 0.5$)。\n在分类器clf上用'scorer'作为评价函数运行网格搜索,并将结果存储在grid_obj中。\n用训练集(X_train, y_train)训练grid search object,并将结果存储在grid_fit中。\n\n注意: 取决于你选择的参数列表,下面实现的代码可能需要花一些时间运行!",
"# TODO:导入'GridSearchCV', 'make_scorer'和其他一些需要的库\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.metrics import fbeta_score, make_scorer\nfrom sklearn.neural_network import MLPClassifier\n# TODO:初始化分类器\nclf = MLPClassifier(alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1)\n\n# TODO:创建你希望调节的参数列表\nparameters = {'solver':['lbfgs', 'sgd', 'adam'],'learning_rate_init':[0.1,0.01,0.001]}\n\n# TODO:创建一个fbeta_score打分对象\nscorer = make_scorer(fbeta_score, beta=0.5)\n\n# TODO:在分类器上使用网格搜索,使用'scorer'作为评价函数\ngrid_obj = GridSearchCV(clf, parameters,scoring=scorer)\n\n# TODO:用训练数据拟合网格搜索对象并找到最佳参数\ngrid_obj.fit(X_train, y_train)\n\n# 得到estimator\nbest_clf = grid_obj.best_estimator_\n\n# 使用没有调优的模型做预测\npredictions = (clf.fit(X_train, y_train)).predict(X_val)\nbest_predictions = best_clf.predict(X_val)\n\n# 汇报调参前和调参后的分数\nprint \"Unoptimized model\\n------\"\nprint \"Accuracy score on validation data: {:.4f}\".format(accuracy_score(y_val, predictions))\nprint \"F-score on validation data: {:.4f}\".format(fbeta_score(y_val, predictions, beta = 0.5))\nprint \"\\nOptimized Model\\n------\"\nprint \"Final accuracy score on the validation data: {:.4f}\".format(accuracy_score(y_val, best_predictions))\nprint \"Final F-score on the validation data: {:.4f}\".format(fbeta_score(y_val, best_predictions, beta = 0.5))",
"问题 5 - 最终模型评估\n你的最优模型在测试数据上的准确率和 F-score 是多少?这些分数比没有优化的模型好还是差?你优化的结果相比于你在问题 1中得到的天真预测器怎么样?\n注意:请在下面的表格中填写你的结果,然后在答案框中提供讨论。\n结果:\n| 评价指标 | 天真预测器 | 未优化的模型 | 优化的模型 |\n| :------------: | :-----------------: | :---------------: | :-------------: | \n| 准确率 | 0.2 | 0.8512 | 0.8512 |\n| F-score |0.2917 | 0.7028 | 0.7028 |\n回答:\n比起天真预测器的低分数,未优化的多层神经网络已经表现很好,优化后的分数没有变化,说明调节的几个参数对于多层神经网络来讲没有什么很大的影响\n\n特征的重要性\n在数据上(比如我们这里使用的人口普查的数据)使用监督学习算法的一个重要的任务是决定哪些特征能够提供最强的预测能力。专注于少量的有效特征和标签之间的关系,我们能够更加简单地理解这些现象,这在很多情况下都是十分有用的。在这个项目的情境下这表示我们希望选择一小部分特征,这些特征能够在预测被调查者是否年收入大于\\$50,000这个问题上有很强的预测能力。\n选择一个有 'feature_importance_' 属性的scikit学习分类器(例如 AdaBoost,随机森林)。'feature_importance_' 属性是对特征的重要性排序的函数。在下一个代码单元中用这个分类器拟合训练集数据并使用这个属性来决定人口普查数据中最重要的5个特征。\n问题 6 - 观察特征相关性\n当探索数据的时候,它显示在这个人口普查数据集中每一条记录我们有十三个可用的特征。 \n在这十三个记录中,你认为哪五个特征对于预测是最重要的,选择每个特征的理由是什么?你会怎样对他们排序?\n回答:\n- 特征1:age:年龄,年轻的用户经济还未独立,或资产还不充足,收入可能不足50K\n- 特征2:education-num:教育水平,受教育水平较高的收入可能将较高\n- 特征3:native-country:国籍,国籍很可能影响人的收入,并且本国居民更易捐款\n- 特征4:workclass:工作类别,在政府工作或在公益机构工作的人,收入可能大于50K\n- 特征5:income:收入高的人更易捐款\n练习 - 提取特征重要性\n选择一个scikit-learn中有feature_importance_属性的监督学习分类器,这个属性是一个在做预测的时候根据所选择的算法来对特征重要性进行排序的功能。\n在下面的代码单元中,你将要实现以下功能:\n - 如果这个模型和你前面使用的三个模型不一样的话从sklearn中导入一个监督学习模型。\n - 在整个训练集上训练一个监督学习模型。\n - 使用模型中的 'feature_importances_'提取特征的重要性。",
"# TODO:导入一个有'feature_importances_'的监督学习模型\nfrom sklearn.ensemble import AdaBoostClassifier\n# TODO:在训练集上训练一个监督学习模型\nmodel = AdaBoostClassifier(random_state=0,n_estimators=500).fit(X_train, y_train)\n\n# TODO: 提取特征重要性\nimportances = model.feature_importances_\n\n# 绘图\nvs.feature_plot(importances, X_train, y_train)",
"问题 7 - 提取特征重要性\n观察上面创建的展示五个用于预测被调查者年收入是否大于\\$50,000最相关的特征的可视化图像。\n这五个特征的权重加起来是否超过了0.5?<br>\n这五个特征和你在问题 6中讨论的特征比较怎么样?<br>\n如果说你的答案和这里的相近,那么这个可视化怎样佐证了你的想法?<br>\n如果你的选择不相近,那么为什么你觉得这些特征更加相关?\n回答:超过了\n有些相似,但是整体不准确\n我选取的特征,一个是一些基本属性,而且很有可能影响其收入或同理心,但是数据表现的却十分冷酷,是否捐款和赚钱花钱有最大的关系。\n特征选择\n如果我们只是用可用特征的一个子集的话模型表现会怎么样?通过使用更少的特征来训练,在评价指标的角度来看我们的期望是训练和预测的时间会更少。从上面的可视化来看,我们可以看到前五个最重要的特征贡献了数据中所有特征中超过一半的重要性。这提示我们可以尝试去减小特征空间,简化模型需要学习的信息。下面代码单元将使用你前面发现的优化模型,并只使用五个最重要的特征在相同的训练集上训练模型。",
"# 导入克隆模型的功能\nfrom sklearn.base import clone\n\n# 减小特征空间\nX_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]\nX_val_reduced = X_val[X_val.columns.values[(np.argsort(importances)[::-1])[:5]]]\n\n# 在前面的网格搜索的基础上训练一个“最好的”模型\nclf_on_reduced = (clone(best_clf)).fit(X_train_reduced, y_train)\n\n# 做一个新的预测\nreduced_predictions = clf_on_reduced.predict(X_val_reduced)\n\n# 对于每一个版本的数据汇报最终模型的分数\nprint \"Final Model trained on full data\\n------\"\nprint \"Accuracy on validation data: {:.4f}\".format(accuracy_score(y_val, best_predictions))\nprint \"F-score on validation data: {:.4f}\".format(fbeta_score(y_val, best_predictions, beta = 0.5))\nprint \"\\nFinal Model trained on reduced data\\n------\"\nprint \"Accuracy on validation data: {:.4f}\".format(accuracy_score(y_val, reduced_predictions))\nprint \"F-score on validation data: {:.4f}\".format(fbeta_score(y_val, reduced_predictions, beta = 0.5))",
"问题 8 - 特征选择的影响\n最终模型在只是用五个特征的数据上和使用所有的特征数据上的 F-score 和准确率相比怎么样?\n如果训练时间是一个要考虑的因素,你会考虑使用部分特征的数据作为你的训练集吗?\n回答:均有下降\n如果在数据比较大、硬件资源比较匮乏的时候,我会考虑使用,因为选取主要特征的方法会极大提高训练速度\n但是再中小型数据或者说硬件资源足够时,我会尽量保证其准确性,一个良好准确的模型的训练时间损耗是值得的\n问题 9 - 在测试集上测试你的模型\n终于到了测试的时候,记住,测试集只能用一次。\n使用你最有信心的模型,在测试集上测试,计算出准确率和 F-score。\n简述你选择这个模型的原因,并分析测试结果",
"#TODO test your model on testing data and report accuracy and F score\n\nfinal_predictions = best_clf.predict(X_test)\n\nprint \"最终准确率: {:.4f}\".format(accuracy_score(y_test, final_predictions))\nprint \"最终F-Score: {:.4f}\".format(fbeta_score(y_test, final_predictions, beta = 0.5))",
"还是选择了在上一步做调优时的训练的模型,使用多重神经网络的算法,这个模型训练时间短,而且在上面的实践中有最高的得分。\n测试结果还不错,准确率较高,F-Score得分下降了,但是还在接受范围内。\n\n注意: 当你写完了所有的代码,并且回答了所有的问题。你就可以把你的 iPython Notebook 导出成 HTML 文件。你可以在菜单栏,这样导出File -> Download as -> HTML (.html)把这个 HTML 和这个 iPython notebook 一起做为你的作业提交。"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ToqueWillot/M2DAC
|
FDMS/TME3/Kaggle_rain-Copy4_from2.ipynb
|
gpl-2.0
|
[
"## FDMS TME3 \n\nKaggle [How Much Did It Rain? II](https://www.kaggle.com/c/how-much-did-it-rain-ii)\n\nFlorian Toque & Paul Willot ",
"Dear professor Denoyer...\nWarning\nThis is an early version of our entry for the Kaggle challenge \nIt's still very messy and we send it because we forgot that we had to submit our progress step by step...\nTo summarize our goal, we plan to use a RNN to take advantage of the sequential data",
"# from __future__ import exam_success\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\n%matplotlib inline\nimport sklearn\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport random\nimport pandas as pd\nimport scipy.stats as stats\n\n# Sk cheats\nfrom sklearn.cross_validation import cross_val_score # cross val\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.preprocessing import Imputer # get rid of nan",
"13.765.202 lines in train.csv \n8.022.757 lines in test.csv \n\nReduced to\n\n10.000\n5.000",
"%%time\nfilename = \"data/reduced_train_100000.csv\"\n#filename = \"data/reduced_test_100000.csv\"\nraw = pd.read_csv(filename)\nraw = raw.set_index('Id')\n#train = train.dropna()\n\nl = float(len(raw[\"minutes_past\"]))\ncomp = []\nfor i in raw.columns:\n #print(raw\"%.03f, %s\"%(1-train[i].isnull().sum()/l , i) )\n comp.append([1-raw[i].isnull().sum()/l , i])\ncomp.sort(key=lambda x: x[0], reverse=True)\ncomp\n\nraw = raw.dropna()\n\nraw.columns\n\ni = raw[list([u'Ref', u'Ref_5x5_10th',\n u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',\n u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',\n u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',\n u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',\n u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',\n u'Kdp_5x5_50th', u'Kdp_5x5_90th'])].dropna(how='all').index\n\nfullna.difference(i)\n\nidWithNoNan = np.unique(i)\n\nidWithNoNan[:10]\n\nAllId = np.unique(raw.index)\n\nAllId[:10]\n\ntr = [burg not in idWithNoNan for burg in AllId]\n\ntmp = []\nfor idx,i in enumerate(tr):\n if i:\n tmp.append(idx+1)\n\ntmp[-10:]\n\ntttt = AllId*tr\n\nfullna = raw.drop(raw[list([u'Ref', u'Ref_5x5_10th',\n u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',\n u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',\n u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',\n u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',\n u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',\n u'Kdp_5x5_50th', u'Kdp_5x5_90th'])].dropna(how='all').index).index\n\nfullna.head(100)",
"verifier val aberantes sur labels",
"raw.head(20)\n\nraw[\"Expected\"].describe()",
"Get rid of Nan value for now",
"#train_clean = train[[not i for i in np.isnan(train[\"Ref_5x5_10th\"])]]",
"Forums indicate that a higher than 1m rainfall is probably an error. Which is quite understandable. We filter that out",
"raw = raw[raw['Expected'] < 1000]\n\nraw['Expected'].describe()\n\nsplit = 0.2\ntrain = raw.tail(int(len(raw)*1-split))\ntest = raw.tail(int(len(raw)*split))",
"",
"#columns = [u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',\n# u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',\n# u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',\n# u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',\n# u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',\n# u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',\n# u'Kdp_5x5_50th', u'Kdp_5x5_90th']\n#columns = [u'radardist_km', u'Ref', u'Ref_5x5_10th']\ncolumns = [ u'radardist_km', u'Ref', u'Ref_5x5_10th',\n u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',\n u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',\n u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',\n u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',\n u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',\n u'Kdp_5x5_50th', u'Kdp_5x5_90th']\nnb_features = len(columns)\ndata = raw[list(columns)]\ndata.head(5)\n\ndata.describe()\n\ndata.head(40)\n\n#%%time\n#max_padding = 20\ndocX, docY = [], []\nfor i in train.index.unique():\n if isinstance(train.loc[i],pd.core.series.Series):\n m = [data.loc[i].as_matrix()]\n #pad = np.pad(m, ((max_padding -len(m), 0),(0,0)), 'constant') # pre-padding\n docX.append(m)\n docY.append(float(train.loc[i][\"Expected\"]))\n else:\n m = data.loc[i].as_matrix()\n #pad = np.pad(m, ((max_padding -len(m), 0),(0,0)), 'constant')\n docX.append(m)\n docY.append(float(train.loc[i][:1][\"Expected\"]))\n #docY.append(train.loc[i][:1][\"Expected\"].as_matrix)\nX = np.array(docX)\ny = np.array(docY)\n\ntrain.index.unique()\n\nnp.shape(X)\n\nX[2]\n\ntmp = []\nfor i in X:\n tmp.append(len(i))\ntmp = np.array(tmp)\npd.DataFrame(tmp).describe()\n\nsns.countplot(tmp,order=range(tmp.min(),tmp.max()+1))\nplt.title(\"Number of ID per number of observations\")\nplt.plot()\n\ndicPerObs = {}\nfor idx,i in enumerate(X):\n l = len(i)\n try:\n dicPerObs[str(l)].append(y[idx])\n except KeyError:\n dicPerObs[str(l)]=[y[idx]]\n\nfor i in dicPerObs.keys():\n t = np.array(dicPerObs[i])\n print(t.mean())\n\ndicPerObs = {}\nfor idx,i in enumerate(X):\n l = len(i)\n try:\n dicPerObs[str(l)].append(np.count_nonzero(~np.isnan(i)) / float(i.size))\n except KeyError:\n dicPerObs[str(l)]=[np.count_nonzero(~np.isnan(i)) / float(i.size)]\n\n# percentage of data filled\n# the more the less sparse\nfor i in dicPerObs.keys():\n t = np.array(dicPerObs[i])\n print(t.mean())\n\nsns.countplot(tmp,order=range(tmp.min(),tmp.max()+1),zorder=1)\nsns.countplot(tmp2,order=range(tmp.min(),tmp.max()+1),zorder=10)\nplt.title(\"Number of ID per number of obesrvations\")\nplt.plot()\n\nXX = [np.array(t).mean(0) for t in X]\n\n#XX[0]\n\nnp.shape(XX)\n\nglobal_means = np.nanmean(data,0)\n##global_means = data.mean(0).values",
"t = []\nfor i in XX:\n t.append(np.count_nonzero(~np.isnan(i)) / float(i.size))\npd.DataFrame(np.array(t)).describe()",
"XX = []\nfor i in X:\n nm = np.nanmean(i,0)\n for idx,j in enumerate(nm):\n if np.isnan(j):\n nm[idx]=global_means[idx]\n XX.append(np.array(nm))\n\nXX = [np.array(t).mean(0) for t in X]\n\nsplit = 0.2\nps = int(len(XX) * (1-split))\nX_train = XX[:ps]\ny_train = y[:ps]\nX_test = XX[ps:]\ny_test = y[ps:]\n\netreg = ExtraTreesRegressor(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0)\n\n%%time\netreg.fit(X_train,y_train)\n\n%%time\net_score = cross_val_score(etreg, XX, y, cv=5)\n \nprint(\"Features: %s\\nScore: %s\\tMean: %.03f\"%(columns, et_score,et_score.mean()))\n\nerr = (etreg.predict(X_test)-y_test)**2\nerr.sum()/len(err)",
"",
"def marshall_palmer(ref, minutes_past):\n #print(\"Estimating rainfall from {0} observations\".format(len(minutes_past)))\n # how long is each observation valid?\n valid_time = np.zeros_like(minutes_past)\n valid_time[0] = minutes_past.iloc[0]\n for n in xrange(1, len(minutes_past)):\n valid_time[n] = minutes_past.iloc[n] - minutes_past.iloc[n-1]\n valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)\n valid_time = valid_time / 60.0\n\n # sum up rainrate * validtime\n sum = 0\n for dbz, hours in zip(ref, valid_time):\n # See: https://en.wikipedia.org/wiki/DBZ_(meteorology)\n if np.isfinite(dbz):\n mmperhr = pow(pow(10, dbz/10)/200, 0.625)\n sum = sum + mmperhr * hours\n return sum\n\n\ndef simplesum(ref,hour):\n hour.sum()\n\n# each unique Id is an hour of data at some gauge\ndef myfunc(hour):\n #rowid = hour['Id'].iloc[0]\n # sort hour by minutes_past\n hour = hour.sort('minutes_past', ascending=True)\n est = marshall_palmer(hour['Ref'], hour['minutes_past'])\n return est\n\nestimates = raw.groupby(raw.index).apply(myfunc)\nestimates.head(20)\n\nerr = (estimates-(np.hstack((y_train,y_test))))**2\nerr.sum()/len(err)",
"Memento (mauri)",
"etreg = ExtraTreesRegressor(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0)\n\n\"\"\"\ncolumns = train_clean.columns\ncolumns = [\"minutes_past\",\"radardist_km\",\"Ref\",\"Ref_5x5_10th\", \"Ref_5x5_50th\"]\ncolumns = [u'Id', u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',\n u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',\n u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',\n u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',\n u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',\n u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',\n u'Kdp_5x5_50th', u'Kdp_5x5_90th', u'Expected']\n\"\"\"\ncolumns = [u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',\n u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',\n u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',\n u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',\n u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',\n u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',\n u'Kdp_5x5_50th', u'Kdp_5x5_90th']\n \nlabels = train[\"Expected\"].values\nfeatures = train[list(columns)].values\n\nimp = Imputer(missing_values='NaN', strategy='mean', axis=0)\nimp.fit(features)\nfeatures_trans = imp.transform(features)\n\nlen(features_trans)\n\nsplit = 0.2\nps = int(len(features_trans) * split)\nftrain = features_trans[:ps]\nltrain = labels[:ps]\nftest = features_trans[ps:]\nltest = labels[ps:]\n\n%%time\netreg.fit(ftrain,ltrain)\n\ndef scorer(estimator, X, y):\n return (estimator.predict(X[0])-y)**2\n\n%%time\net_score = cross_val_score(etreg, features_trans, labels, cv=3)\n \nprint(\"Features: %s\\nScore: %s\\tMean: %.03f\"%(columns, et_score,et_score.mean()))\n\nr = random.randrange(len(ltrain))\nprint(r)\nprint(etreg.predict(ftrain[r]))\nprint(ltrain[r])\n\nr = random.randrange(len(ltest))\nprint(r)\nprint(etreg.predict(ftest[r]))\nprint(ltest[r])\n\nerr = (etreg.predict(ftest)-ltest)**2\n\nerr.sum()/len(err)",
"Submit",
"filename = \"data/reduced_test_5000.csv\"\ntest = pd.read_csv(filename)\n\ncolumns = [u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',\n u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',\n u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',\n u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',\n u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',\n u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',\n u'Kdp_5x5_50th', u'Kdp_5x5_90th']\nfeatures = test[list(columns)].values\n\nimp = Imputer(missing_values='NaN', strategy='mean', axis=0)\nimp.fit(features)\nfeatures_trans = imp.transform(features)\n\nfall = test[test.columns].values\n\nfall[20]\n\nfeatures_trans[0]\n\ni = 1\npred = 0\nwhile fall[i][0] == 1:\n #print(fall[i])\n pred+=etreg.predict(features_trans[i])[0]\n #print(etreg.predict(features_trans[i])[0])\n i+=1\nprint(i)\n\nfall[-1][0]\n\n%%time\nres=[]\ni=0\nwhile i<len(fall) and i < 10000:\n pred = 0\n lenn = 0\n curr=fall[i][0]\n while i<len(fall) and fall[i][0] == curr:\n #print(fall[i])\n pred+=etreg.predict(features_trans[i])[0]\n #print(etreg.predict(features_trans[i])[0])\n i+=1\n lenn += 1\n res.append((curr,pred/lenn))\n #i+=1\n #print(i)\n\nlen(res)\n\nres[:10]\n\ndef myfunc(hour):\n #rowid = hour['Id'].iloc[0]\n # sort hour by minutes_past\n hour = hour.sort('minutes_past', ascending=True)\n #est = (hour['Id'],random.random())\n est = random.random()\n return est\n\ndef marshall_palmer(ref, minutes_past):\n #print(\"Estimating rainfall from {0} observations\".format(len(minutes_past)))\n # how long is each observation valid?\n valid_time = np.zeros_like(minutes_past)\n valid_time[0] = minutes_past.iloc[0]\n for n in xrange(1, len(minutes_past)):\n valid_time[n] = minutes_past.iloc[n] - minutes_past.iloc[n-1]\n valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)\n valid_time = valid_time / 60.0\n\n # sum up rainrate * validtime\n sum = 0\n for dbz, hours in zip(ref, valid_time):\n # See: https://en.wikipedia.org/wiki/DBZ_(meteorology)\n if np.isfinite(dbz):\n mmperhr = pow(pow(10, dbz/10)/200, 0.625)\n sum = sum + mmperhr * hours\n return sum\n\n\ndef simplesum(ref,hour):\n hour.sum()\n\n# each unique Id is an hour of data at some gauge\ndef myfunc(hour):\n #rowid = hour['Id'].iloc[0]\n # sort hour by minutes_past\n hour = hour.sort('minutes_past', ascending=True)\n est = marshall_palmer(hour['Ref'], hour['minutes_past'])\n return est\n\nestimates = test.groupby(train.index).apply(myfunc)\nestimates.head(20)\n\nestimates = train.groupby(train.index).apply(myfunc)\nestimates.head(20)\n\ntrain[\"Expected\"].head(20)\n\nprint(features_trans[0])\nprint(etreg.predict(features_trans[0]))\n\ndef marshall_palmer(data):\n res=[]\n for n in data:\n res.append(etreg.predict(n)[0])\n return np.array(res).mean()\n\n\ndef simplesum(ref,hour):\n hour.sum()\n\ndef myfunc(hour):\n hour = hour.sort('minutes_past', ascending=True)\n est = marshall_palmer(hour[train.columns])\n return est\n\nestimates = train_clean.groupby(train_clean.index).apply(myfunc)\nestimates.head(20)",
"RNN",
"import pandas as pd \nfrom random import random\n\nflow = (list(range(1,10,1)) + list(range(10,1,-1)))*1000 \npdata = pd.DataFrame({\"a\":flow, \"b\":flow}) \npdata.b = pdata.b.shift(9) \ndata = pdata.iloc[10:] * random() # some noise \n\n\n#columns = [u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',\n# u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',\n# u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',\n# u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',\n# u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',\n# u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',\n# u'Kdp_5x5_50th', u'Kdp_5x5_90th']\ncolumns = [u'radardist_km', u'Ref', u'Ref_5x5_10th']\nnb_features = len(columns)\ndata = train[list(columns)]\ndata.head(10)\n\ndata.iloc[0].as_matrix()\n\ntrain.head(5)\n\ntrain.loc[11]\n\ntrain.loc[11][:1][\"Expected\"].as_matrix\n\n#train.index.unique()\n\ndef _load_data(data, n_prev = 100): \n \"\"\"\n data should be pd.DataFrame()\n \"\"\"\n\n docX, docY = [], []\n for i in range(len(data)-n_prev):\n docX.append(data.iloc[i:i+n_prev].as_matrix())\n docY.append(data.iloc[i+n_prev].as_matrix())\n alsX = np.array(docX)\n alsY = np.array(docY)\n\n return alsX, alsY\n\ndef train_test_split(df, test_size=0.1): \n ntrn = round(len(df) * (1 - test_size))\n\n X_train, y_train = _load_data(df.iloc[0:ntrn])\n X_test, y_test = _load_data(df.iloc[ntrn:])\n \n return (X_train, y_train), (X_test, y_test)\n\n(X_train, y_train), (X_test, y_test) = train_test_split(data)\n\nnp.shape(X_train)\n\nt = np.array([2,1])\nt.shape = (1,2)\nt.tolist()[0]\n\nnp.shape(t)\n\nX_train[:2,:2]\n\ntrain.index.unique()\n\nmax_padding = 20\n\n%%time\n\ndocX, docY = [], []\nfor i in train.index.unique():\n if isinstance(train.loc[i],pd.core.series.Series):\n m = [data.loc[i].as_matrix()]\n pad = np.pad(m, ((max_padding -len(m), 0),(0,0)), 'constant') # pre-padding\n docX.append(pad)\n docY.append(float(train.loc[i][\"Expected\"]))\n else:\n m = data.loc[i].as_matrix()\n pad = np.pad(m, ((max_padding -len(m), 0),(0,0)), 'constant')\n docX.append(pad)\n docY.append(float(train.loc[i][:1][\"Expected\"]))\n #docY.append(train.loc[i][:1][\"Expected\"].as_matrix)\nXX = np.array(docX)\nyy = np.array(docY)\n\nnp.shape(XX)\n\nXX[0].mean()\n\n#from keras.preprocessing import sequence\n#sequence.pad_sequences(X_train, maxlen=maxlen)\n\ndef _load_data(data): \n \"\"\"\n data should be pd.DataFrame()\n \"\"\"\n docX, docY = [], []\n for i in data.index.unique():\n #np.pad(tmp, ((0, max_padding -len(tmp) ),(0,0)), 'constant')\n m = data.loc[i].as_matrix()\n pad = np.pad(m, ((0, max_padding -len(m) ),(0,0)), 'constant')\n docX.append(pad)\n if isinstance(train.loc[i],pd.core.series.Series):\n docY.append(float(train.loc[i][\"Expected\"]))\n else:\n docY.append(float(train.loc[i][:1][\"Expected\"]))\n alsX = np.array(docX)\n alsY = np.array(docY)\n\n return alsX, alsY\n\ndef train_test_split(df, test_size=0.1): \n ntrn = round(len(df) * (1 - test_size))\n\n X_train, y_train = _load_data(df.iloc[0:ntrn])\n X_test, y_test = _load_data(df.iloc[ntrn:])\n \n return (X_train, y_train), (X_test, y_test)\n\n(X_train, y_train), (X_test, y_test) = train_test_split(train)\n\nlen(X_train[0])\n\ntrain.head()\n\nX_train[0][:10]\n\nyt = []\nfor i in y_train:\n yt.append([i[0]])\n\nyt[0]\n\nX_train.shape\n\nlen(fea[0])\n\nlen(X_train[0][0])\n\nf = np.array(fea)\n\nf.shape()\n\n#(X_train, y_train), (X_test, y_test) = train_test_split(data) # retrieve data\n\n# and now train the model\n# batch_size should be appropriate to your memory size\n# number of epochs should be higher for real world problems\nmodel.fit(X_train, yt, batch_size=450, nb_epoch=2, validation_split=0.05) \n\nfrom keras.models import Sequential \nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.embeddings import Embedding\n\n%%time\ninput_dim = nb_features\nout_dim = 1 \nhidden_dim = 200\n\nmodel = Sequential()\n#Embedding(input_dim, hidden_dim, mask_zero=True)\n#model.add(LSTM(hidden_dim, hidden_dim, return_sequences=False)) \nmodel.add(LSTM(input_dim, hidden_dim, return_sequences=False)) \nmodel.add(Dropout(0.5))\nmodel.add(Dense(hidden_dim, out_dim)) \nmodel.add(Activation(\"linear\")) \nmodel.compile(loss=\"mean_squared_error\", optimizer=\"rmsprop\") \n\nmodel.fit(XX, yy, batch_size=10, nb_epoch=10, validation_split=0.1) \n\ntest = random.randint(0,len(XX))\nprint(model.predict(XX[test:test+1])[0][0])\nprint(yy[test])"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
KMFleischer/PyEarthScience
|
Visualization/PyNGL/PyEarthScience_xy_PyNGL.ipynb
|
mit
|
[
"PyEarthScience: Python examples for Earth Scientists\nXY-plots\nUsing PyNGL\nLine plot with\n- marker\n- different colors\n- legend\n- title\n- x-axis label\n- y-axis label",
"import numpy as np\nimport Ngl, Nio",
"Generate x- and y-values.",
"x2 = np.arange(100)\n\ndata = np.arange(1,40,5)\nlinear = np.arange(100)\nsquare = [v * v for v in np.arange(0,10,0.1)]\n\n#-- retrieve maximum size of plotting data\nmaxdim = max(len(data),len(linear),len(square))\n\n#-- create 2D arrays to hold 1D arrays above\ny = -999.*np.ones((3,maxdim),'f') #-- assign y array containing missing values\ny[0,0:(len(data))] = data\ny[1,0:(len(linear))] = linear\ny[2,0:(len(square))] = square",
"Draw data, set title and axis labels.",
"#-- open a workstation\nwkres = Ngl.Resources() #-- generate an res object for workstation\nwks = Ngl.open_wks(\"png\",\"plot_xy_simple_PyNGL\",wkres)\n\n#-- set resources\nres = Ngl.Resources() #-- generate an res object for plot\nres.tiMainString = \"Title string\" #-- set x-axis label\nres.tiXAxisString = \"x-axis label\" #-- set x-axis label\nres.tiYAxisString = \"y-axis label\" #-- set y-axis label\n\nres.vpWidthF = 0.9 #-- viewport width\nres.vpHeightF = 0.6 #-- viewport height\n\nres.caXMissingV = -999. #-- indicate missing value\nres.caYMissingV = -999. #-- indicate missing value\n\n#-- marker and line settings\nres.xyLineColors = [\"blue\",\"green\",\"red\"] #-- set line colors\nres.xyLineThicknessF = 3.0 #-- define line thickness\nres.xyDashPatterns = [0,0,2] #-- ( none, solid, cross )\nres.xyMarkLineModes = [\"Markers\",\"Lines\",\"Markers\"] #-- marker mode for each line\nres.xyMarkers = [16,0,2] #-- marker type of each line\nres.xyMarkerSizeF = 0.01 #-- default is 0.01\nres.xyMarkerColors = [\"blue\",\"green\",\"red\"] #-- set marker colors\n\n#-- legend settings\nres.xyExplicitLegendLabels = [\" data\",\" linear\",\" square\"] #-- set explicit legend labels\nres.pmLegendDisplayMode = \"Always\" #-- turn on the drawing\nres.pmLegendOrthogonalPosF = -1.13 #-- move the legend upwards\nres.pmLegendParallelPosF = 0.15 #-- move the legend to the right\nres.pmLegendWidthF = 0.2 #-- change width\nres.pmLegendHeightF = 0.10 #-- change height\nres.lgBoxMinorExtentF = 0.16 #-- legend lines shorter\n\n#-- draw the plot\nplot = Ngl.xy(wks,x2,y,res)\n\n#-- the end\nNgl.delete_wks(wks) #-- this need to be done to close the graphics output file\nNgl.end()",
"Show the plot in this notebook.",
"from IPython.display import Image\nImage(filename='plot_xy_simple_PyNGL.png') "
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
whitead/numerical_stats
|
unit_11/lectures/lecture_1.ipynb
|
gpl-3.0
|
[
"Optimization in 1-D and N-D. Debugging Functions\nUnit 11, Lecture 1\nNumerical Methods and Statistics\n\nProf. Andrew White, April 19, 2020\nGoals:\n\nLearn the meaning of root-finding and minimization, the two types of optimization\nUnderstand the iterative nature of these methods\nDebug common problems when defining functions, which is essential for optimization\nBe able to identify convex problems and understand their complexties\nLearn the two standard methods for minimize and root-finding and how to call them in Python",
"import numpy as np\nimport matplotlib.pyplot as plt",
"Numerical Optimization - Root Finding\nWhat is $x$ in\n$$\\cos (x) = x$$\nTo use a root-finding method, we must make our equation have one side be 0.\n$$\\cos (x) - x = 0$$\nNewton's Method - For finding Roots",
"from scipy.optimize import newton\n\nroot = newton(lambda x: np.cos(x) - x, x0=0)\nprint(root, np.cos(root) - root)",
"$$x_{i+1} = x_i - \\frac{f(x_i)}{f'(x_i)}$$\nThis method, like all this unit are iterative. This is modifiable by you, either through choosing tolerance, or maximum iterations. The functions have sensible defaults",
"root = newton(lambda x: np.cos(x) - x, x0=0, tol=1e-3)\nprint(root, np.cos(root) - root)\n\nroot = newton(lambda x: np.cos(x) - x, x0=0, tol=1e-3, maxiter=5)\nprint(root, np.cos(root) - root)",
"Newton's Method\nUse root instead, this is just for instructional purposes. root is better\nType: Root Finding\nDiscrete/Continuous: Continuous\nDimensions: 1\nDerivative: optional\nNon-Convex: not recommended\nPython: newton\nLet's try to solve a more interesting equation with Newton's method!\n$$ \\int_0^x e^{-s^2}\\,ds = \\frac{1}{2}$$\nFind $x$",
"#Note: this code is intentionally really bad\nfrom scipy.integrate import quad\ndef equation(x):\n return quad(lambda s: np.exp(-s**2), 0, x)\nroot = newton(equation, x0=0)",
"Steps for Debugging Code\n\nCorrect any Python errors\nRestart the Kernel\nCorrect any Python errors\nPrint to discover logical errors\n\nMost Common Mistakes\n\nCopy-Pasta leaving in old variables\nMixing Numpy arrays and for loops\nForgetting order of function args and return values\n\nReview of Functions\nFor functions like quad, newton, and minimize the function should take in 1 argument and return 1 argument.\nCorrect Examples\n$$x^2 - 3x + 2$$",
"def version_1(x):\n return x**2 - 3 *x + 2\n\nversion_2 = lambda x: x ** 2 - 3 *x + 2\n\nnp_version = np.vectorize(version_1)\n\nprint(version_1(3.))\nprint(version_2(3.))\n\nsome_threes = np.zeros(10)\nsome_threes[:] = 3.0 # -> Notice how we don't replace the numpy array with 3, but instead make all elements in it equal to three\n#some_threes = 3 -> This would delete the numpy array and now some_threes would be a single 3\n\nprint(np_version(some_threes))",
"$$\\int_{\\pi}^x \\sin^2(s)\\,ds$$",
"from scipy.integrate import quad\nimport numpy as np\nfrom math import pi\n\ndef integrate_sin2(x):\n ans, err = quad(lambda s: np.sin(s) ** 2, pi, x)\n return ans\n \nprint(integrate_sin2(2 * pi))",
"2 Vectors, $\\vec{x}$ and $\\vec{y}$, where one component of $\\vec{x}$ is changing and we want to know the distance between the two vectors.",
"from math import sqrt\n\ndef distance(x, y):\n sum_sq = np.sum((x - y)**2)\n return sqrt(sum_sq)\n\ndef my_distance(s):\n x = np.zeros(3)\n y = np.zeros(3)\n x[0] = 2.0\n x[1] = s\n x[2] = -3.5\n y[0] = 1.0\n y[1] = -3.0\n y[2] = 0.0\n return distance(x, y)\n\nprint(my_distance(1))",
"Incorrect Examples\nNo Return Value",
"def version_1(x):\n x**2 - 3 *x + 2\n\nprint(version_1(3.))",
"Bad Return Value",
"def integrate_sin2(x):\n return quad(lambda s: np.sin(s) ** 2, pi, x)\n\nprint(integrate_sin2(2 * pi))",
"Too many arguments",
"def distance(x, y):\n sum_sq = np.sum((x - y)**2)\n return sqrt(sum_sq)\n\nprint(distance(1))",
"Let's return to our example from above:\n$$ \\int_0^x e^{-s^2}\\,ds = \\frac{1}{2}$$\nFind $x$",
"#note still wrong\ndef equation(x):\n ans, err = quad(lambda s: np.exp(-s**2), 0, x)\n return ans\nroot = newton(equation, x0=0)\nprint(root)\n\nequation(root)",
"We forgot to rearrange the equation to be equal to $0$",
"root = newton(lambda x: equation(x) - 0.5, x0=0)\nprint(root, equation(root))",
"Scope\nScope means the set of variables and functions which are defined and accessible in your code",
"x = 4\ny = 2\n\n#Right now, there is x,y and all other functions/variables defined or imported above in the scope",
"Scopes nest",
"x = 4\ny = 2\n#Here, I have x and y in scope\ndef scope_example():\n z = 2\n #Here, I have x,y and z in scope\n \n#Here I again have only x and y in scope\n\nx = 4 \ny = 2\nprint(y,\"Before function\")\ndef scope_example():\n y = 25 #This is a new version of y that exists only in this scope\n print(y, \"Inside function\")\nscope_example()\nprint(y, \"After Function\")\n\nx = 4 \ny = [2]\nprint(y,\"Before function\")\ndef scope_example():\n y[0] = 25 #Here I'm not creating a y, but modifying y\n print(y, \"Inside function\")\nscope_example()\nprint(y, \"After Function\")",
"Things to remember about scope:\n\nScopes nest, so that you can access things above your scope\nYou can modify variables from any scope you can see, but ones you create disappear outside of the scope\n\nReturning to Optimization\nApplications of Optimization\n\nSolving non-linear equations\nSolving systems of equations\nOptimizing equations with or without constraints\nFitting models to data\n\nChoosing which method to use\nThere are five things to consider when doing optimization:\n\nIs it 1 or N dimensions?\nAre you minimizing or root-finding?\nAre there constraints?\nAre there bounds?\nIs it convex or non-convex?\n\nIdentifying Convexity\nIf a problem has more than one minimum (derivative is 0), then the problem is non-convex. The opposite of convex is non-convex. A concave function can be made convex with a negative sign. A non-convex function cannot be made convex.\nKnowing about convexity can come from:\n\nPlots\nRunning convex optimization in two starting positions and getting different minimums\nKnowing something specific about the problem\n\nConsider a function with two minimums:",
"def two_well(x):\n if x < 0.125:\n return (x + 2) ** 2\n if x >= 0.125:\n return (x - 2) ** 2 + 1\n \nnp_two_well = np.vectorize(two_well)\n\nx = np.linspace(-4, 4, 1000)\nplt.plot(x, np_two_well(x))\nplt.show()",
"BFGS Optimization - Minimization\nBroyden–Fletcher–Goldfarb–Shanno\nBFGS\nType: Minimization\nDiscrete/Continuous: Continuous\nDimensions: N\nDerivative: optional\nNon-Convex: not recommended\nPython: minimize if no constraints or bounds are given\nNomenclature\nIn optimization, you have a function called the objective function. That's what you're minimizing. It always returns a single value. It is sometimes called the fit, the error, the residual, or the penalty.",
"def obj(x):\n return x**2\n\nx = np.linspace(-1,1,100)\nplt.plot(x, obj(x))\nplt.show()\n\nfrom scipy.optimize import minimize\n\nminimize(obj, x0=3)",
"Minimize Return value\n fun: The value of the function at the minimum\nhess_inv: The inverse of the the Hessian\n jac: The value of the Jacobian\n message: A string describing what happened\n nfev: Number of function evaluations\n nit: Number of iterations of the x point\n njev: Number of times it computed the Jacobian\n status: The single digit message (0 = success, != 0 some error)\n success: Boolean indicating success\n x: The minimum x\n\nObjective Functions\nBe careful that your objective function is convex and it's minimum isn't at $\\infty$ or $-\\infty$. \nA good objective function\nMinimize the following:\n$$f(x) = \\frac{(x - 4)^2}{2} + \\frac{(x - 2)^2}{4}$$",
"def f(x):\n return ((x-4)**2)/2+((x-2)**2)/4\nx = np.linspace(-10, 10, 100)\nplt.plot(x, f(x))\nplt.show()\n\nresult = minimize(f, x0=0)\nprint(f'the minimum value occurs at {result.x} and is {result.fun}')",
"A bad objective function\nMinimize the following:\n$$\nf(x) = \\frac{(x - 4)} { 2 }\n$$\nThe minimum is at $-\\infty$!",
"result = minimize(lambda x: (x - 4) / 2, x0=0)\nprint(f'the minimum value occurs at {result.x} and is {result.fun}')",
"A good objective function but bad x0\nMinimize the following function\n$$4 \\left[ r^{-12} - r^{-6}\\right]$$",
"result = minimize(lambda r: 4 * (r**(-12) - r**(-6)), x0=0)\nprint(f'the minimum is at {result.x}')",
"Our initial value was not in the domain!",
"r = np.linspace(0.9, 2, 100)\ny = 4 * (r**(-12) - r**(-6))\nplt.plot(r, y)\nplt.show()\n\nresult = minimize(lambda r: 4 * (r**(-12) - r**(-6)), x0=1)\nprint(f'the minimum is at {result.x} and its value is {result.fun}')",
"Maximizing\nIn order to maximzie a function, just add a minus sign\nMaximize the following function:\n$$\n-\\left[x - \\cos(x)\\right]^2\n$$",
"#place - sign to make it a maxmimization problem\nresult = minimize(lambda x: (x - np.cos(x))**2, x0=1)\nprint(f'the maximum is at {result.x}')",
"Multiple Dimensions\nJust indicate multiple dimensions by using a multidimensional x0. Note that your $x$ becomes a vector!\nMinimize the following:\n$$\nf(x, y) = \\frac{(x - 4)^2} { 2 } + \\frac{(y + 3)^2} { 5 }\n$$",
"result = minimize(lambda x: (x[0] - 4)**2 / 2 + (x[1] + 3)**2 / 5, x0=[0,0])\nprint(f'maximum occurs when x = {result.x[0]} and y = {result.x[1]}')",
"Identifying Convexity for Minimization - Example\nThe best ways to identify convexity are:\n\nplot it\ntry optimizing in multiple starting positions",
"x = np.linspace(-1, 1, 1000)\nplt.plot(x, 2 * x ** 3 - 0 * x **2 - x)\nplt.show()\n\nfrom scipy.optimize import minimize\nminimize(lambda x: 2 * x ** 3 - 0 * x **2 - x, x0=0.05)\n\nx = np.linspace(-1, 1, 1000)\nplt.plot(x, 2 * x ** 3 - 0 * x **2 - x)\nplt.show()\n\nfrom scipy.optimize import minimize\nminimize(lambda x: 2 * x ** 3 - 0 * x **2 - x, x0=-0.5)",
"Powell hybrid method\nType: Root finding\nDiscrete/Continuous: Continuous\nDimensions: N\nDerivative: optional\nNon-Convex: not recommended\nPython: root unless method argument specifies a different method\n1D Example\nThis is exactly like newton from above. Solve this equation:\n$$\n\\cos x + \\sin x = x\n$$",
"from scipy.optimize import root\n#rearranged equation so all terms on one side\nresult = root(lambda x: np.cos(x) + np.sin(x) - x, x0=1)\nprint(result)",
"The result type is like what we saw for minimize. Similar terms are here, including the root and the value of the function at the root. Notice it's not exactly $0$ at the root.",
"x = result.x \nprint(np.cos(x) + np.sin(x) - x)",
"Solve the following system of equations:\n$$ 3 x^2 - 2 y = 4$$\n$$ x - 4 y ^ 2 = -2$$",
"def sys(v):\n #I'm using v here to distinguish from the x in the equations\n #extract x and y\n x = v[0]\n y = v[1]\n #compute equations\n eq1 = 3 * x ** 2 - 2 * y - 4\n eq2 = x - 4 * y**2 + 2\n #pack into list\n sys_eq = [eq1, eq2]\n return sys_eq\nroot(sys, x0=[1,1])",
"So the answer is $x = 1.40,\\: y = 0.921$\nYou should not ignore the information in the output of root. Imagine this small modification:\n$$ 3 x^2 - 2 y^2 = 4$$\n$$ x^3 - 4 y = -2$$",
"def sys2(v):\n #I'm using v here to distinguish from the x in the equations\n #extract x and y\n x = v[0]\n y = v[1]\n #compute equations\n eq1 = 3 * x ** 2 - 2 * y**2 - 4\n eq2 = x**3 - 4 * y + 2\n #pack into list\n sys_eq = [eq1, eq2]\n return sys_eq\nroot(sys2, x0=[1,1])",
"If you did not read the message or status, you might believe the method succeeded. \nMore compact version:",
"root(lambda x: [3 * x[0] ** 2 - 2 * x[1] - 4, x[0] - 4 * x[1] ** 2 + 2], x0=[1,1])",
"Importance of starting position\nBy the way, there are two roots to this function!",
"root(lambda x: [3 * x[0] ** 2 - 2 * x[1] - 4, x[0] - 4 * x[1] ** 2 + 2], x0=[0, 0])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
MaxPowerWasTaken/MaxPowerWasTaken.github.io
|
jupyter_notebooks/Process many find_replace rules in a corpus fast.ipynb
|
gpl-3.0
|
[
"Processing Many Find/Replace Rules Across A Text Corpus, in Constant Time\nIntroduction\nSay you'd like to pre-process some body of text data for a natural language processing (NLP) task. Preprocessing raw text data is important because there are many terms which mean the same thing, but may not contain exactly the same text. We preprocess text data so that terms which mean the same thing will appear the same to our NLP model.\n\"Basketball\" and \"basketball\" mean the same thing, for example; just because one occurred at the start of a sentence and so has a capital \"B\" doesn't change the meaning of the word. So standardizing case (e.g. converting all characters to lower-case) is important, as a trivial example. In a recent project, I needed to preprocess each document in a text corpus to:\n - Standardize between British and American spellings (e.g. colour -> color)\n - Standardize contractions (e.g. don't -> do not)\n - Correct spelling mistakes \nIt turns out a quick google can quickly provide quite good lists of mappings for each of the above rules. For example, after copy-pasting some tables from a quick google search into spreadsheets, I now have tables with find/replace rules for each:\nData\nFirst I'll load the tables I found on google, for the text standardization steps mentioned above.",
"import pandas as pd\nfrom datetime import datetime\n\n# Read in text-cleaning rules\nfolder = 'datasets/text_cleaning/'\nbrit_to_amer = pd.read_csv(folder + 'british to american spellings.csv', header=None)\nmisspellings = pd.read_csv(folder + 'common_misspellings.csv', header=None)\ncontractions = pd.read_csv(folder + 'contractions.csv', header=None)\n\n# Each df has terms in 1st column which should be replaced with terms in 2nd column \nfor df in [brit_to_amer, misspellings, contractions]:\n df.columns = ['Find', 'Replace']\n\n\nprint(brit_to_amer.head(3), '\\n')\nprint(misspellings.head(3), '\\n')\nprint(contractions.head(3))",
"For a text corpus, I'll load the Quora Duplicate Question dataset",
"quora = pd.read_csv('datasets/quora_kaggle.csv')\nquora.head(3)",
"Finally, I'll consolidate our Pandas DataFrames so that we have a single DataFrame of find/replace rules, and a single column/Series of questions text:",
"find_replace_rules = pd.concat([brit_to_amer, misspellings, contractions], axis=0)\nquestions = pd.concat([quora.question1, quora.question2], axis=0).str.lower()",
"The Naive Approach\nThe simplest way to approach this preprocessing task would be to:\n - for each row in find_replace_rules: \n - for each row in questions:\n - update the question text with the find/replace rule\nThe problem with this approach is that we expect it to take about O(m x n) time, where: \n - m is the number of text strings we have to process (i.e. rows in questions)\n - n is the number of find/replace rules to apply (i.e. rows in find_replace_rules). \nThis is impractical if we have a large number of texts to preprocess, or a large nubmer of find/replace rules we want to clean our text with.\nThe Vectorized Approach\nUsing a built-in, vectorized Pandas function (like Series.str.replace) to process our Series of texts should speed things up significantly, vs looping through each text in our corpus (meaning each row in our questions dataframe, in this example).\nBut we'd still expect to need to loop through each rule in our find_replace_rules dataframe. This means, if we have 10 find/replace rules for example, we'd still expect to pass through our entire questions dataset 10 times. In the Big O notation used earlier, we still expect this to take about O(n log(m)) time. \nEnter Compiled Regular Expressions\nIt turns out, a single regular expression can include all of our find/replace rules, compiled as a single pattern-matching expression. I owe this technique to this awesome Stack Overflow answer \nFirst we'll convert our find/replace dataframe into a dictionary:",
"find_repl_dict = {row[0]: row[1] for row in find_replace_rules.itertuples(index=False)}",
"We can use this dictionary, along with a compiled regular expression, to process our questions text series in one pass-through using Pandas' Series.str.replace.\nSeries.str.replace takes two required parameters:\n - pat: A string or regex pattern to search for, and\n - repl: A replacement string or callable. If a callable is used here, it's passed the regex match object from pat, and must return a replacement string for it",
"def mass_find_replace(text_corpus, find_replace_rules):\n '''thanks for the regex used here to https://stackoverflow.com/a/13824401/1870832'''\n \n # Texts in text corpus should be elements in a Pandas Series\n assert type(text_corpus) == pd.Series \n \n # convert find/replace rules from dataframe to dictionary, for generating Series.str.replace params\n fr_rules_dict = {row[0]: row[1] for row in find_replace_rules.itertuples(index=False)}\n \n # generate regex to match on any word which is in one of our 'find' dict keys \n pat = r'\\b' + '|'.join(fr_rules_dict.keys()) + r'\\b'\n \n # generate callable which returns the 'replace' value for any regex match object from 'pat' above\n repl= lambda m: find_repl_dict[m.group(0)]\n \n #return processed text\n return text_corpus.str.replace(pat, repl)\n\n\npat = r'\\b' + '|'.join(find_repl_dict.keys()) + r'\\b'\nrepl= lambda m: find_repl_dict[m.group(0)]\n\nquestions_processed = questions.str.replace(pat, repl)",
"Clocking Performance",
"def clock_performance(text_corpus, num_docs, num_fr_rules, preprocessing_fn):\n\n assert type(text_corpus) == pd.Series\n \n # take subset of questions, find/replace rules\n questions_subset = questions.sample(num_docs)\n fr_rules_subset = find_replace_rules.sample(num_fr_rules)\n \n # Process text, clock time\n time0 = datetime.now()\n processed_text = mass_find_replace(questions_subset, fr_rules_subset)\n processing_time = datetime.now() - time0\n \n return(processed_text, processing_time)\n \n ",
"Now let's see how performance of this function scales with the size of our text corpus, or our number of find/replace rules.\nFirst we'll hold the number of find/replace rules constant, and vary our number of text-documents:",
"fr_rules = 100\nfor num_texts in [1e5, 2e5, 4e5, 8e5]:\n clock_time = clock_performance(questions, int(num_texts), fr_rules, mass_find_replace)[1]\n print('Clocked time with {} find/replace rules and {} text documents to process: {}'.format(fr_rules, \n num_texts,\n clock_time))\n\nnum_texts = 1e5\nfor fr_rules in [200, 400, 800, 1600]:\n clock_time = clock_performance(questions, int(num_texts), fr_rules, mass_find_replace)[1]\n print('Clocked time with {} find/replace rules and {} text documents to process: {}'.format(fr_rules, \n num_texts,\n clock_time))\n\nquestions.shape\n\ndf_100_10, time100_10 = clock_performance(questions, 100, 10, mass_find_replace)\n\ndf_100_10.head(20)\n\nquestions.head(20)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
QuantEcon/QuantEcon.notebooks
|
ddp_ex_MF_7_6_3_py.ipynb
|
bsd-3-clause
|
[
"DiscreteDP Example: Asset Replacement with Maintenance\nDaisuke Oyama\nFaculty of Economics, University of Tokyo\nFrom Miranda and Fackler, <i>Applied Computational Economics and Finance</i>, 2002,\nSection 7.6.3",
"%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport quantecon as qe\nfrom quantecon.markov import DiscreteDP\n\nmaxage = 5 # Maximum asset age\nrepcost = 75 # Replacement cost\nmancost = 10 # Maintainance cost\nbeta = 0.9 # Discount factor\nm = 3 # Number of actions; 0: keep, 1: service, 2: replace\n\n# Construct the state space which is two-dimensional\ns0 = np.arange(1, maxage+1) # Possible ages\ns1 = np.arange(maxage) # Possible servicings\nS = qe.cartesian([s0, s1]) # State space\nn = len(S) # Number of states\n\nS",
"Here, in the state space we include states that are not reached\ndue to the constraint that the asset can be serviced at most one per year,\ni.e., those pairs of the age of asset $a$ and the number of services $s$\nsuch that $s \\geq a$.\nOne can alternatively define the state space excluding those states;\nsee the section Alternative formulation below.",
"# We need a routine to get the index of a age-serv pair\ndef getindex(age, serv, S):\n \"\"\"\n Get the index of [age, serv] in S.\n We know that elements in S are aligned in a lexicographic order.\n \"\"\"\n n = len(S)\n for i in range(n):\n if S[i, 0] == age:\n for k in range(n-i):\n if S[i+k, 1] == serv:\n return i+k\n\n# Profit function as a function of the age and the number of service\ndef p(age, serv):\n return (1 - (age - serv)/5) * (50 - 2.5 * age - 2.5 * age**2)\n\n# Reward array\nR = np.empty((n, m))\nR[:, 0] = p(S[:, 0], S[:, 1])\nR[:, 1] = p(S[:, 0], S[:, 1]+1) - mancost\nR[:, 2] = p(0, 0) - repcost\n\n# Infeasible actions\nfor serv in range(maxage):\n R[getindex(maxage, serv, S), [0, 1]] = -np.inf\n\nR\n\n# (Degenerate) transition probability array\nQ = np.zeros((n, m, n))\nfor i in range(n):\n Q[i, 0, getindex(min(S[i, 0]+1, maxage), S[i, 1], S)] = 1\n Q[i, 1, getindex(min(S[i, 0]+1, maxage), min(S[i, 1]+1, maxage-1), S)] = 1\n Q[i, 2, getindex(1, 0, S)] = 1\n\n# Create a DiscreteDP\nddp = DiscreteDP(R, Q, beta)\n\n# Solve the dynamic optimization problem (by policy iteration)\nres = ddp.solve()\n\n# Number of iterations\nres.num_iter\n\n# Optimal policy\nres.sigma\n\n# Optimal actions for reachable states\nfor i in range(n):\n if S[i, 0] > S[i, 1]:\n print(S[i], res.sigma[i])\n\n# Simulate the controlled Markov chain\nres.mc.state_values = S # Set the state values\ninitial_state_value = [1, 0]\nnyrs = 12\nspath = res.mc.simulate(nyrs+1, init=initial_state_value)\n\n# Plot sample paths of the age of asset (0th coordinate of `spath`)\n# and the number of services (1st coordinate of `spath`)\nfig, axes = plt.subplots(1, 2, figsize=(12, 4))\ncaptions = ['Age of Asset', 'Number of Services']\nfor i, caption in zip(range(2), captions):\n axes[i].plot(spath[:, i])\n axes[i].set_xlim(0, 12)\n axes[i].set_xlabel('Year')\n axes[i].set_ylabel(caption)\n axes[i].set_title('Optimal State Path: ' + caption)\naxes[0].set_yticks(np.linspace(1, 4, 4, endpoint=True))\naxes[0].set_ylim(1, 4)\naxes[1].set_yticks(np.linspace(0, 2, 3, endpoint=True))\naxes[1].set_ylim(0, 2.25)\nplt.show()",
"Alternative formulation\nDefine the state space excluding the age-serv pairs that do not realize:",
"# Construct the state space which is two-dimensional\ns0 = np.arange(1, maxage+1) # Possible ages\ns1 = np.arange(maxage) # Possible servicings\nS = qe.cartesian([s0, s1]) # Including infeasible pairs as previously\n\nS = S[S[:, 0] > S[:, 1]] # Exclude infeasible pairs\nn = len(S) # Number of states\n\nS",
"We follow the state-action pairs formulation approach.",
"# Reward array\nR = np.empty((n, m))\nfor i, (age, serv) in enumerate(S):\n R[i, 0] = p(age, serv) if age < maxage else -np.infty\n R[i, 1] = p(age, serv+1) - mancost if age < maxage else -np.infty\n R[i, 2] = p(0, 0) - repcost\n\nR\n\n# Remove the state-action pairs yielding a reward negative infinity\ns_indices, a_indices = np.where(R > -np.infty)\nR = R[s_indices, a_indices]\n\nR\n\n# Number of feasible state-action pairs\nL = len(R)\n\n# (Degenerate) transition probability array\nQ = np.zeros((L, n)) # One may use a scipy.sparse matrix for a larger problem\nit = np.nditer((s_indices, a_indices), flags=['c_index'])\nfor s, a in it:\n i = it.index\n if a == 0:\n Q[i, getindex(min(S[s, 0]+1, maxage), S[s, 1], S)] = 1\n elif a == 1:\n Q[i, getindex(min(S[s, 0]+1, maxage), min(S[s, 1]+1, maxage-1), S)] = 1\n else:\n Q[i, getindex(1, 0, S)] = 1\n\n# Create a DiscreteDP\nddp = DiscreteDP(R, Q, beta, s_indices, a_indices)\n\n# Solve the dynamic optimization problem (by policy iteration)\nres = ddp.solve()\n\n# Number of iterations\nres.num_iter\n\n# Optimal policy\nres.sigma\n\n# Simulate the controlled Markov chain\nres.mc.state_values = S # Set the state values\ninitial_state_value = [1, 0]\nnyrs = 12\nspath = res.mc.simulate(nyrs+1, init=initial_state_value)\n\n# Plot sample paths of the age of asset (0th coordinate of `spath`)\n# and the number of services (1st coordinate of `spath`)\nfig, axes = plt.subplots(1, 2, figsize=(12, 4))\ncaptions = ['Age of Asset', 'Number of Services']\nfor i, caption in zip(range(2), captions):\n axes[i].plot(spath[:, i])\n axes[i].set_xlim(0, 12)\n axes[i].set_xlabel('Year')\n axes[i].set_ylabel(caption)\n axes[i].set_title('Optimal State Path: ' + caption)\naxes[0].set_yticks(np.linspace(1, 4, 4, endpoint=True))\naxes[0].set_ylim(1, 4)\naxes[1].set_yticks(np.linspace(0, 2, 3, endpoint=True))\naxes[1].set_ylim(0, 2.25)\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ilogue/pyrsa
|
demos/example_dataset.ipynb
|
lgpl-3.0
|
[
"Dataset objects in pyrsa\nThese exercises show how to load and structure a dataset object.\nIn this demo, we will first provide a walkthrough for loading a single-subject dataset from a .mat file and arranging it into a pyRSA dataset object.\nWe then demonstrate how to create dataset objects using data from multiple subjects.",
"# relevant imports\nimport numpy as np\nfrom scipy import io\nimport matplotlib.pyplot as plt\nimport pyrsa\nimport pyrsa.data as rsd # abbreviation to deal with dataset",
"1. Single-subject dataset example\nGetting started\nWe will use a dataset where one subject was presented with 92 different visual stimuli while brain responses were measured in 100 voxels.\nThe different visual stimuli (each row) are the conditions, and the voxels (each column) are the measurement channels.",
"# import the measurements for the dataset\nmeasurements = io.matlab.loadmat('92imageData/simTruePatterns.mat')\nmeasurements = measurements['simTruePatterns']\nnCond = measurements.shape[0]\nnVox = measurements.shape[1]\n\n# plot the imported data\nplt.imshow(measurements,cmap='gray') \nplt.xlabel('Voxels')\nplt.ylabel('Conditions')\nplt.title('Measurements')",
"Creating the dataset object\nWe will now arrange the loaded data into a dataset object for use in pyrsa.\nA dataset object contains all the information needed to calculate a representational dissimilarity matrix (RDM). Therefore, the dataest must include:\n - measurements: [NxP] numpy.ndarray. These are the observations (N) from each measurement channel (P).\n - obs_descriptors: dict that defines the condition label associated with each observation in measurements\nBecause we also want to include helpful information about this dataset, we include the additional information:\n - descriptors: dict with metadata about this dataset object (e.g. experiment session #, subject #, experiment name). Basically general descriptions\n - channel_descriptors: dict that identifies each column (channel) in measurements\nTo start, we will note the session # (e.g. the first scanning session) and the subject # for this dataset. In addition, we will create labels for each of the 92 conditions and 100 voxels. Finally, we package this information into a pyrsa dataset object.",
"# now create a dataset object\ndes = {'session': 1, 'subj': 1}\nobs_des = {'conds': np.array(['cond_' + str(x) for x in np.arange(nCond)])}\nchn_des = {'voxels': np.array(['voxel_' + str(x) for x in np.arange(nVox)])}\n#obs_des = {'conds': np.array(['cond_' + str(x) for x in np.arange(1,nCond+1)])} # indices from 1\n#chn_des = {'conds': np.array(['voxel' + str(x) for x in np.arange(1,nVox+1)])} # indices from 1\ndata = rsd.Dataset(measurements=measurements,\n descriptors=des,\n obs_descriptors=obs_des,\n channel_descriptors=chn_des)\nprint(data)",
"Sometimes we wish to consider only a subset of data - either a subset of observations (conditions), or subset of measurement channels. This might be to only consider the measurement channels where all the subjects have data, or conditions which occur across all subjects / sessions. Using dataset functionality, we can subset the datasets according to a subset of the conditions or channels via 'subset_obs' and 'subset_channel', respectively.",
"# create an example dataset with random data, subset some conditions\nnChannel = 50\nnObs = 12\nrandomData = np.random.rand(nObs, nChannel)\ndes = {'session': 1, 'subj': 1}\nobs_des = {'conds': np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5])}\nchn_des = {'voxels': np.array(['voxel_' + str(x) for x in np.arange(nChannel)])}\ndata = rsd.Dataset(measurements=randomData,\n descriptors=des,\n obs_descriptors=obs_des,\n channel_descriptors=chn_des\n )\n# select a subset of the dataset: select data only from conditions 0:4\nsub_data = data.subset_obs(by='conds', value=[0,1,2,3,4])\nprint(sub_data)",
"Additionally, you might want to split the data in a certain way and analyze the splits as separate datasets. For instance, if your data is organized such that there are different ROIs, you might wish to perform the subsequent analyses separately for each ROI. Similarly, you could split the observations. This is supported with 'split_obs' and 'split_channel' options on the dataset object.",
"# Split by channels\nnChannel = 3 \nnChannelVox = 10 # three ROIs, each with 10 voxels\nnObs = 4\nrandomData = np.random.rand(nObs, nChannel*nChannelVox)\ndes = {'session': 1, 'subj': 1}\nobs_des = {'conds': np.array([0, 1, 2, 3])}\nchn_des = np.matlib.repmat(['ROI1','ROI2','ROI3'],1,nChannelVox)\nchn_des = {'ROIs': np.array(chn_des[0])}\ndata = rsd.Dataset(measurements=randomData,\n descriptors=des,\n obs_descriptors=obs_des,\n channel_descriptors=chn_des\n )\nsplit_data = data.split_channel(by='ROIs')\nprint(split_data)",
"2. Multi-subject dataset example\nFirst, we generate random data for a number of subjects. For simplicity, here we set each subject to have the same number of voxels and conditions.",
"# create a datasets with random data\nnVox = 50 # 50 voxels/electrodes/measurement channels\nnCond = 10 # 10 conditions\nnSubj = 5 # 5 different subjects\nrandomData = np.random.rand(nConds, nChannel, nSubj)",
"We can then create a list of dataset objects by appending each dataset for each subject.",
"obs_des = {'conds': np.array(['cond_' + str(x) for x in np.arange(nCond)])}\nchn_des = {'voxels': np.array(['voxel_' + str(x) for x in np.arange(nVox)])}\n\ndata = [] # list of dataset objects\nfor i in np.arange(nSubj):\n des = {'session': 1, 'subj': i+1}\n # append the dataset object to the data list\n data.append(rsd.Dataset(measurements=randomData[:,:,0],\n descriptors=des,\n obs_descriptors=obs_des,\n channel_descriptors=chn_des\n )\n )"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
McIntyre-Lab/papers
|
fear_sem_sd_2015/scripts/dspr_ril_allele_table.ipynb
|
lgpl-3.0
|
[
"DSPR RIL Allele Table\nI have made allele tables for the DSPR founder strains, but want to look at the \nRILS and see if there is any interesting patterns. The DSPR population has 15 \nfounder strains from around the world. These were mixed for 50 generations in \ntwo subpopulations, RILs were created by inbreeding for an additional 25 \ngnerations, for a total of 1700 RILs. In other words, each RIL is a combination \nof alleles from up to 8 founder strains. \nWe are arguing that the DSPR does not add any genes to the SD pathway, because \nit has less allelic variation than the CEGS population. As reviewer 1 points \nout, it is really the combination of alleles that matters. Are genes in the SD \nmade up of a few combinations of founder alleles? Or is each RIL a unique \ncombination of alleles?\nThis notebook is trying to get to the bottom of these question.",
"%run 'ipython_startup.py'",
"Figure out RIL make up\nFirst I want to count to estimate the number of haplotypes present by looking \nat SNPs in the coding sequence. I am guessing SNP density for the RILs is low \nbecause of the expense, so later I will use King's founder assignment of 10kb \nblocks.\nRIL SNP table\nKing et al. 2014 provide SNPs for each RIL.\nAllelic varaition in the coding sequence of genes in SD\nHere I import SNP information from King et al. and pull out genomic regions \nthat correspond to coding sequence for genes in the sex hierarchy. I then count \nthe number of unique haplotypes present across the RIL population.",
"import bed as mcbed\nfrom collections import defaultdict\n\n# Import bed with CDS for genes in sd\nbedName = os.path.join(PROJ, 'exported_data/sd_coding_sequence.bed')\nbed = mcbed.Bed(bedName)\n\n# Create data frame with positions of interest (poi)\npoi = list()\nfor row in bed:\n start = min(row[1], row[2])\n end = max(row[1], row[2])\n poi.append(pd.DataFrame([(row[3], row[0], x) for x in xrange(start, end)], columns=['gene', 'chrom', 'pos']))\n\ndfPoi = pd.concat(poi)\n\n# Create a list of RILs used in the F1-hybrid population\nhybName = os.path.join(MCLAB, 'dspr_data/mel_expression_head_F/FemaleHeadExpression.txt')\nRILs = list()\nwith open(hybName, 'r') as FH:\n for row in FH:\n cols = row.strip().split('\\t')\n RILs.append(cols[0])\n RILs.append(cols[1])\n \nhybRILs = set(RILs)\n\n# Import RIL SNP calls\nfname = '/home/jfear/storage/dspr_variants/RILSNP_R2.txt'\n\nout = list()\nwith open(fname, 'r') as FH:\n for row in FH:\n chrom, pos, RILID, minor, major, minorCnt, majorCnt = row.strip().split('\\t')\n \n if RILID in hybRILs:\n # Only keep the RILs that were in the Expression experiment\n pos = int(pos)\n minorCnt = int(minorCnt)\n majorCnt = int(majorCnt)\n\n if minorCnt > 0:\n out.append((chrom, pos, RILID, minor))\n else:\n out.append((chrom, pos, RILID, major))\ndf = pd.DataFrame(out, columns=['chrom', 'pos', 'RILID', 'base'])\n\n# Merge positions of interest to SNP data\ndfFilter = df.merge(dfPoi, how='inner', on=['chrom', 'pos'])\n\n# Make wide data set of filtered data\ndfFilter.set_index(['gene', 'chrom', 'pos', 'RILID'], inplace=True)\ndfWide = dfFilter.unstack(level='RILID')\n\n# Iterate over gene and count the number of haplotypes\ngrp = dfWide.groupby(level='gene')\nout = list()\nfor name, g in grp:\n uniq = set([tuple(x) for x in g.T.values])\n out.append([name, len(uniq)])\n \ndspr_ril_cds = pd.DataFrame(out, columns=['gene', 'dspr_ril_cds'])\ndspr_ril_cds.set_index('gene', inplace=True)",
"There are a lot of missing values present in this dataset. Unfortunately, I \ndon't have easy access to the reference base, instead I will set missing values \nto the most frequent base in that row. This has a lot of caveats, but will give \nme a better estimate of haplotype frequency.",
"# Fill missing values with most frequent value\ndfFull = dfWide.apply(lambda x: x.fillna(x.mode()[0]), axis=1)\n\n# Iterate over gene and count the number of haplotypes\ngrp = dfFull.groupby(level='gene')\nout = list()\nfor name, g in grp:\n uniq = set([tuple(x) for x in g.T.values])\n out.append([name, len(uniq)])\n \ndspr_ril_cds_no_miss = pd.DataFrame(out, columns=['gene', 'dspr_ril_cds_no_miss'])\ndspr_ril_cds_no_miss.set_index('gene', inplace=True)\n\n# merge results\ndspr_ril_cds.join(dspr_ril_cds_no_miss)",
"Most genes were not present in the SNP calls. The RIL SNP density is very low. \nFru is the only gene that appears to have a signal, and its haplotype count is \nsimilar to the CEGS data. I think the other counts are underestimated.\nLooking at the King et al. paper they use a HMM with a 10kb sliding windows to \ndetermine the parent of origin. This may be a better measure.\nParent of origin for each RIL\nHere I use King's HMM data to assign parent of origin to each gene in the RIL \npopulation. I first identify which 10kb window(s) each gene in the sex \nhierarchy is in, I then call the RIL genotype based on the HMM results.",
"# Import bed with Gene Region\nbedName = os.path.join(PROJ, 'exported_data/sd_gene_sequence.bed')\nbed = mcbed.Bed(bedName)\n\n# Create data frame with positions of interest (poi). The HMM results are done on 10kb\n# chunks. Here I round the start down to the nearest 10kb and round the end up to the nearest\n# 10kb. I then figure out how many 10kb chunks that gene overlapps and split them up.\npoi = defaultdict(lambda : defaultdict(list))\nfor row in bed:\n # If on minus strand, make sure start is the smallest.\n start = min(int(row['start']), int(row['end']))\n end = max(int(row['start']), int(row['end']))\n \n # Round to nearest 10kb\n coordLow = np.floor(start / 10000.0) * 10000\n coordHigh = np.ceil(end / 10000.0) * 10000\n\n # In a dictionary relate each 10kb chunk to its overlapping genes.\n for pos in range(coordLow.astype(int), coordHigh.astype(int) + 1, 10000):\n poi[row['chrom']][pos].append(row['name'])\n\n# Parental combos from http://wfitch.bio.uci.edu/~dspr/DatFILES/Release3README.txt\ncombosA = ['A1A1','A1A2','A1A3','A1A4','A1A5','A1A6','A1A7','A1A8','A2A2','A2A3','A2A4',\n 'A2A5','A2A6','A2A7','A2A8','A3A3','A3A4','A3A5','A3A6','A3A7','A3A8','A4A4',\n 'A4A5','A4A6','A4A7','A4A8','A5A5','A5A6','A5A7','A5A8','A6A6','A6A7','A6A8',\n 'A7A7','A7A8','A8A8']\n\ncombosB = ['B1B1', 'B1B2', 'B1B3', 'B1B4', 'B1B5', 'B1B6', 'B1B7', 'B1B8', 'B2B2', 'B2B3', \n 'B2B4', 'B2B5', 'B2B6', 'B2B7', 'B2B8', 'B3B3', 'B3B4', 'B3B5', 'B3B6', 'B3B7', \n 'B3B8', 'B4B4', 'B4B5', 'B4B6', 'B4B7', 'B4B8', 'B5B5', 'B5B6', 'B5B7', 'B5B8', \n 'B6B6', 'B6B7', 'B6B8', 'B7B7', 'B7B8', 'B8B8']\n\n# Create genotype dictionary from HMM results from flyrils.org\ndef genotypeDict(fname, genotypes, combos):\n \"\"\" Parse the HMM results.\n \n Assigns founder strain combo to each gene based \n on the 10kb chunks where it is located.\n \n \"\"\" \n with open(fname, 'r') as FH:\n # Iterate over HMM file\n for row in FH:\n cols = row.strip().split('\\t')\n chrom = cols[0]\n pos = int(cols[1])\n \n if chrom in poi and pos in poi[chrom]:\n gene = poi[chrom][pos]\n RILID = cols[2]\n\n # What is the most-likely genotype\n best = np.argmax([float(x) for x in cols[3:39]])\n geno = combos[best]\n\n # Iterate over genes in the region and assign the genotype\n for g in gene:\n genotypes[g][RILID].add(geno)\n\n# Import HMM results population A\ngenotypesA = defaultdict(lambda : defaultdict(set))\ngenotypeDict('/home/jfear/storage/dspr_variants/HMMregA_R2.txt', genotypesA, combosA)\n\n# Import HMM results population B\ngenotypesB = defaultdict(lambda : defaultdict(set))\ngenotypeDict('/home/jfear/storage/dspr_variants/HMMregB_R2.txt', genotypesB, combosB)\n\n# Count the number of times a gene had a specific genotype.\ndef genoCnt(genotypes, combos):\n \"\"\" Count the number of times a gene had a specific genotype.\n \n For each gene, look at all of the RILS and count the number of times\n a RIL had a given genotype. \n \n \"\"\"\n out = list()\n\n for gene in genotypes.keys():\n # Create counter Series\n cnt = pd.Series(data=[0]*len(combos), index=combos)\n cnt.name = gene\n for ril in genotypes[gene]:\n for geno in genotypes[gene][ril]:\n cnt[geno] += 1\n out.append(cnt)\n return pd.concat(out, axis=1)\n\n# Plot the number of times a founder strain is ID in SD genes\n\n# Count the number of RILS had a given founder strain at a location\ngenoCountsA = genoCnt(genotypesA, combosA)\ngenoCountsB = genoCnt(genotypesB, combosB)\n\n# Plot these counts\nfig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))\n\ngenoCountsA.plot(kind='bar', stacked=True, ax=ax1, rot=90, title='Pop A', sharex=False, cmap='Set1')\nlgd = ax1.get_legend_handles_labels()\nax1.legend(lgd[0], lgd[1], loc='center', bbox_to_anchor=(1.1, 0.5), fontsize=9)\n\ngenoCountsB.plot(kind='bar', stacked=True, ax=ax2, rot=90, title='Pop B', sharex=False, cmap='Set1')\nlgd = ax2.get_legend_handles_labels()\nax2.legend(lgd[0], lgd[1], loc='center', bbox_to_anchor=(1.1, 0.5), fontsize=9)\n\nplt.tight_layout()",
"It looks like most genes are homozygous for one of the parental lines, this is \nnot surprising in a RIL population. When only looking at the genes in SD, there \nis not an equal frequency of use of the different parental strains. (A3A3, \nA4A4, A7A7) are the most common in the A population, while (B1B1, B2B2, B3B3, \nB6B6, B7B7) are all frequent in the B population.\nGenes are also not evenly distributed, for example most RILs had ps from (A3; \nA4, B2; B6).\nF1-hybrid parent of origin make-up",
"# Iterate over each F1-hybrid and Pull genotypes.\n\n# Create list of genes in SD\ngenes = genotypesA.keys()\n\n# Create a dictionary with F1-hybrid genotypes\nhybName = os.path.join(MCLAB, 'dspr_data/mel_expression_head_F/FemaleHeadExpression.txt')\nF1Geno = defaultdict(dict)\nwith open(hybName, 'r') as FH:\n # skip header\n FH.next()\n \n for row in FH:\n cols = row.strip().split('\\t')\n F1ID = '_'.join([str(cols[0]), str(cols[1])])\n \n for gene in genes:\n g1 = list(genotypesA[gene][cols[1]])[0]\n g2 = list(genotypesB[gene][cols[0]])[0]\n F1Geno[gene][F1ID] = (g1, g2)\n\n# For each gene create a set of pA-pB unique combinations\n\noutGeno = defaultdict(set)\noutCnt = list()\nfor gene in genes:\n for key in F1Geno[gene]:\n outGeno[gene].add(F1Geno[gene][key])\n cnt = len(outGeno[gene])\n outCnt.append((gene, cnt))\ndfF1hap = pd.DataFrame(outCnt, columns=['gene', 'haplo_cnt'])\ndfF1hap.set_index('gene', inplace=True)\ndfF1hap"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
stinebuu/nest-simulator
|
doc/userdoc/model_details/IAF_neurons_singularity.ipynb
|
gpl-2.0
|
[
"IAF neurons singularity\nThis notebook describes how NEST handles the singularities appearing in the ODE's of integrate-and-fire model neurons with alpha- or exponentially-shaped current, when the membrane and the synaptic time-constants are identical.",
"import sympy as sp\nsp.init_printing(use_latex=True)\nfrom sympy.matrices import zeros\ntau_m, tau_s, C, h = sp.symbols('tau_m, tau_s, C, h')",
"For alpha-shaped currents we have:",
"A = sp.Matrix([[-1/tau_s,0,0],[1,-1/tau_s,0],[0,1/C,-1/tau_m]])",
"Non-singular case ($\\tau_m\\neq \\tau_s$)\nThe propagator is:",
"PA = sp.simplify(sp.exp(A*h))\nPA",
"Note that the entry in the third line and the second column $A_{32}$ would also appear in the propagator matrix in case of an exponentially shaped current\nSingular case ($\\tau_m = \\tau_s$)\nWe have",
"As = sp.Matrix([[-1/tau_m,0,0],[1,-1/tau_m,0],[0,1/C,-1/tau_m]])\nAs",
"The propagator is",
"PAs = sp.simplify(sp.exp(As*h))\nPAs",
"Numeric stability of propagator elements\nFor the lines $\\tau_s\\rightarrow\\tau_m$ the entry $PA_{32}$ becomes numerically unstable, since denominator and enumerator go to zero.\n1. We show that $PAs_{32}$ is the limit of $PA_{32}(\\tau_s)$ for $\\tau_s\\rightarrow\\tau_m$.:",
"PA_32 = PA.row(2).col(1)[0]\nsp.limit(PA_32, tau_s, tau_m)",
"2. The Taylor-series up to the second order of the function $PA_{32}(\\tau_s)$ is:",
"PA_32_series = PA_32.series(x=tau_s,x0=tau_m,n=2)\nPA_32_series ",
"Therefore we have \n$T(PA_{32}(\\tau_s,\\tau_m))=PAs_{32}+PA_{32}^{lin}+O(2)$ where $PA_{32}^{lin}=h^2(-\\tau_m + \\tau_s)*exp(-h/\\tau_m)/(2C\\tau_m^2)$\n3. We define\n$dev:=|PA_{32}-PAs_{32}|$\nWe also define $PA_{32}^{real}$ which is the correct value of P32 without misscalculation (instability).\nIn the following we assume $0<|\\tau_s-\\tau_m|<0.1$. We consider two different cases\na) When $dev \\geq 2|PA_{32}^{lin}|$ we do not trust the numeric evaluation of $PA_{32}$, since it strongly deviates from the first order correction. In this case the error we make is\n$|PAs_{32}-PA_{32}^{real}|\\approx |P_{32}^{lin}|$\nb) When $dev \\le |2PA_{32}^{lin}|$ we trust the numeric evaluation of $PA_{32}$. In this case the maximal error occurs when $dev\\approx 2 PA_{32}^{lin}$ due to numeric instabilities. The order of the error is again\n$|PAs_{32}-PA_{32}^{real}|\\approx |P_{32}^{lin}|$\nThe entry $A_{31}$ is numerically unstable, too and we treat it analogously.\nTests and examples\nWe will now show that the stability criterion explained above leads to a reasonable behavior for $\\tau_s\\rightarrow\\tau_m$",
"import nest\nimport numpy as np\nimport pylab as pl",
"Neuron, simulation and plotting parameters",
"taum = 10.\nC_m = 250.\n# array of distances between tau_m and tau_ex\nepsilon_array = np.hstack(([0.],10.**(np.arange(-6.,1.,1.))))[::-1]\ndt = 0.1\nfig = pl.figure(1)\nNUM_COLORS = len(epsilon_array)\ncmap = pl.get_cmap('gist_ncar')\nmaxVs = []",
"Loop through epsilon array",
"for i,epsilon in enumerate(epsilon_array):\n nest.ResetKernel() # reset simulation kernel \n nest.SetKernelStatus({'resolution':dt})\n\n # Current based alpha neuron \n neuron = nest.Create('iaf_psc_alpha') \n neuron.set(C_m=C_m, tau_m=taum, t_ref=0., V_reset=-70., V_th=1e32,\n tau_syn_ex=taum+epsilon, tau_syn_in=taum+epsilon, I_e=0.)\n \n # create a spike generator\n spikegenerator_ex = nest.Create('spike_generator')\n spikegenerator_ex.spike_times = [50.]\n \n # create a voltmeter\n vm = nest.Create('voltmeter', params={'interval':dt})\n\n ## connect spike generator and voltmeter to the neuron\n nest.Connect(spikegenerator_ex, neuron, 'all_to_all', {'weight':100.})\n nest.Connect(vm, neuron)\n\n # run simulation for 200ms\n nest.Simulate(200.) \n\n # read out recording time and voltage from voltmeter\n times = vm.get('events','times')\n voltage = vm.get('events', 'V_m')\n \n # store maximum value of voltage trace in array\n maxVs.append(np.max(voltage))\n\n # plot voltage trace\n if epsilon == 0.:\n pl.plot(times,voltage,'--',color='black',label='singular')\n else:\n pl.plot(times,voltage,color = cmap(1.*i/NUM_COLORS),label=str(epsilon))\n\npl.legend()\npl.xlabel('time t (ms)')\npl.ylabel('voltage V (mV)')",
"Show maximum values of voltage traces",
"fig = pl.figure(2)\npl.semilogx(epsilon_array,maxVs,color='red',label='maxV')\n#show singular solution as horizontal line\npl.semilogx(epsilon_array,np.ones(len(epsilon_array))*maxVs[-1],color='black',label='singular')\npl.xlabel('epsilon')\npl.ylabel('max(voltage V) (mV)')\npl.legend()\n\npl.show()",
"The maximum of the voltage traces show that the non-singular case nicely converges to the singular one and no numeric instabilities occur."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Dans-labs/dariah
|
static/tools/country_compose/.ipynb_checkpoints/countries-checkpoint.ipynb
|
mit
|
[
"Building the country information files\nThe DARIAH app contains a visualization of the number of member country contribution on a map.\nWe show the map using Leaflet, which loads files containing the boundaries. These files are in geojson format.\nHere we bundle all the necessary information of all European countries in one file.\nPer country that is:\n\ncountry code (ISO 2 letter)\nlatitude and longitude (the place where to put markers or other features)\ngeojson polygons, representing the boundaries\n\nWe have obtained data from the github repo\nmledoze/countries. \nWe use these files:\n\ndist/countries_unescaped.json\ndata/ccc.geo.json (where ccc is the three letter code of a country)\n\nWe have compiled manually a selection of European countries from\n\ndist/countries.csv\n\nand transformed it to the file\n\neurope_countries.csv (with only the name, the 2 letter and 3 letter codes of the country)\n\nThe bundle we are producing will be a geojson file with as little information as needed.\nWe also will round the coordinates and weed out duplicate points, in order to reduce the file size.\nNB:\nFor Kosovo we have made manual adjustments:\n\nWe downloaded a geojson file from elsewhere\nused KOS as a temporary three letter code",
"EU_FILE = 'europe_countries.csv'\nGEO_DIR = 'geojson'\nCOUNTRIES = 'all_countries.json'\nOUTFILE = '../../../client/src/js/helpers/europe.geo.js'\nCENTER_PRECISION = 1\n\nimport sys, collections, json",
"Read the list of European countries",
"eu_countries = {}\nwith open(EU_FILE) as f:\n for line in f:\n if line[0] == '#': continue\n fields = line.strip().split(';')\n if len(fields) == 3:\n (name, iso2, iso3) = fields\n eu_countries[iso2] = dict(iso3=iso3, name=name)\nfor (i, (iso2, info)) in enumerate(sorted(eu_countries.items())):\n print('{:>2} {} {} {}'.format(i+1, iso2, info['iso3'], info['name']))",
"Read and filter the country file",
"with open(COUNTRIES) as f:\n countries = json.load(f)\nprint('Total number of countries: {}'.format(len(countries)))\ni = 0\ncoord_fmt = '{{:>{}.{}f}}'.format(4+CENTER_PRECISION, CENTER_PRECISION)\npair_fmt = '({}, {})'.format(coord_fmt, coord_fmt)\nline_fmt = '{{:>2}} {{}} {} {{}}'.format(pair_fmt)\n\nfor country in countries:\n iso2 = country['cca2']\n if iso2 in eu_countries:\n i += 1\n (lat, lng) = country['latlng']\n info = eu_countries[iso2]\n info['lat'] = round(lat, CENTER_PRECISION)\n info['lng'] = round(lng, CENTER_PRECISION)\nprint('Found info for {} European countries'.format(i))\nfor (i, (iso2, info)) in enumerate(sorted(eu_countries.items())):\n print(line_fmt.format(\n i+1, iso2,\n info['lat'], info['lng'],\n info['name'],\n )) ",
"Gather the boundary information",
"def n_points(tp, data):\n if tp == 'll': return len(data)\n if tp == 'Polygon': return sum(len(ll) for ll in data)\n if tp == 'MultiPolygon': return sum(sum(len(ll) for ll in poly) for poly in data)\n return -1\n\ndef n_ll(tp, data):\n if tp == 'Polygon': return len(data)\n if tp == 'MultiPolygon': return sum(len(poly) for poly in data)\n return -1\n\nfor iso2 in eu_countries:\n info = eu_countries[iso2]\n with open('{}/{}.geo.json'.format(GEO_DIR, info['iso3'])) as f:\n geoinfo = json.load(f)\n geometry = geoinfo['features'][0]['geometry']\n info['geometry'] = geometry\n\ntotal_ng = 0\ntotal_nl = 0\ntotal_np = 0\n\nfor (i, (iso2, info)) in enumerate(sorted(eu_countries.items())):\n geo = info['geometry']\n shape = geo['type']\n data = geo['coordinates']\n ng = 1 if shape == 'Polygon' else len(data)\n np = n_points(shape, data)\n nl = n_ll(shape, data)\n total_ng += ng\n total_nl += nl\n total_np += np\n\n print('{:>2} {} {:<25} {:<15} {:>2} poly, {:>3} linear ring, {:>5} point'.format(\n i+1, iso2,\n info['name'],\n shape,\n ng, nl, np,\n )) \nprint('{:<47}{:>2} poly, {:>3} linear ring, {:>5} point'.format(\n 'TOTAL', total_ng, total_nl, total_np,\n))",
"Condense coordinates\nWe are going to reduce the information in the boundaries in a number of ways.\nA shape is organized as follows:\nMultipolygon: a set of Polygons\nPolygon: a set of linear rings\nLinear rings: a list of coordinates, of which the last is equal to the first\nCoordinate: a longitude and a latitude\nGEO_PRECISION\nFor coordinates we use a resolution of GEO_PRECISION digits behind the decimal point.\nWe round the coordinates. This may cause repetition of identical points in a shape.\nWe weed those out. We must take care that we do not weed out the first and last points.\nMIN_POINTS\nIf a linear ring has too few points, we just ignore it.\nThat is, a linear ring must have at least MIN_POINTS in order to pass.\nMAX_POINTS\nIf a linear ring has too many points, we weed them out, until there are MAX_POINTS left.\nMAX_MULTI\nIf a multipolygon has too many polygons, we retain only MAX_MULTI of them. We order the polygons by the number of points they contain, and we retain the richest ones.",
"# maximal\nGEO_PRECISION = 3 # number of digits in coordinates of shapes\nMIN_POINTS = 1 # minimum number of points in a linear ring\nMAX_POINTS = 500 # maximum number of points in a linear ring\nMAX_POLY = 100 # maximum number of polygons in a multipolygon\n\n# minimal\nGEO_PRECISION = 1 # number of digits in coordinates of shapes\nMIN_POINTS = 10 # minimum number of points in a linear ring\nMAX_POINTS = 12 # maximum number of points in a linear ring\nMAX_POLY = 5 # maximum number of polygons in a multipolygon\n\n# medium\nGEO_PRECISION = 1 # number of digits in coordinates of shapes\nMIN_POINTS = 15 # minimum number of points in a linear ring\nMAX_POINTS = 60 # maximum number of points in a linear ring\nMAX_POLY = 7 # maximum number of polygons in a multipolygon\n\ndef weed_ll(ll):\n new_ll = tuple(collections.OrderedDict(\n ((round(lng, GEO_PRECISION), round(lat, GEO_PRECISION)), None) for (lng, lat) in ll\n ).keys())\n if len(new_ll) > MAX_POINTS:\n new_ll = new_ll[::(int(len(new_ll) / MAX_POINTS) + 1)] \n return new_ll + (new_ll[0],)\n\ndef weed_poly(poly):\n new_poly = tuple(weed_ll(ll) for ll in poly)\n return tuple(ll for ll in new_poly if len(ll) >= MIN_POINTS)\n\ndef weed_multi(multi):\n new_multi = tuple(weed_poly(poly) for poly in multi)\n return tuple(sorted(new_multi, key=lambda poly: -n_points('Polygon', poly))[0:MAX_POLY])\n\ndef weed(tp, data):\n if tp == 'll': return weed_ll(data)\n if tp == 'Polygon': return weed_poly(data)\n if tp == 'MultiPolygon': return weed_multi(data)\n\nll = [\n [8.710255,47.696808],\n [8.709721,47.70694],\n [8.708332,47.710548],\n [8.705,47.713051],\n [8.698889,47.713608],\n [8.675278,47.712494],\n [8.670555,47.711105],\n [8.670277,47.707497],\n [8.673298,47.701771],\n [8.675554,47.697495],\n [8.678595,47.693344],\n [8.710255,47.696808],\n]\nll2 = [\n [8.710255,47.696808],\n [9.709721,47.70694],\n [10.708332,47.710548],\n [11.705,47.713051],\n [12.698889,47.713608],\n [13.675278,47.712494],\n [14.670555,47.711105],\n [15.670277,47.707497],\n [16.673298,47.701771],\n [17.675554,47.697495],\n [18.678595,47.693344],\n [19.710255,47.696808],\n [20.710255,47.696808],\n [8.710255,47.696808],\n]\n\npoly = [ll, ll2]\n\nprint(weed_ll(ll))\nprint('=====')\nprint(weed_ll(ll2))\nprint('=====')\nprint(weed_poly(poly))\n\nwtotal_ng = 0\nwtotal_nl = 0\nwtotal_np = 0\n\nfor (i, (iso2, info)) in enumerate(sorted(eu_countries.items())):\n geo = info['geometry']\n shape = geo['type']\n data = geo['coordinates']\n new_data = weed(shape, data)\n geo['coordinates'] = new_data\n data = new_data\n ng = 1 if shape == 'Polygon' else len(data)\n np = n_points(shape, data)\n nl = n_ll(shape, data)\n wtotal_ng += ng\n wtotal_nl += nl\n wtotal_np += np\n\n print('{:>2} {} {:<25} {:<15} {:>2} poly, {:>3} linear ring, {:>5} point'.format(\n i+1, iso2,\n info['name'],\n shape,\n ng, nl, np,\n )) \nprint('{:<47}{:>2} poly, {:>3} linear ring, {:>5} point'.format(\n 'TOTAL after weeding', wtotal_ng, wtotal_nl, wtotal_np,\n))\nprint('{:<47}{:>2} poly, {:>3} linear ring, {:>5} point'.format(\n 'TOTAL', total_ng, total_nl, total_np,\n))\nprint('{:<47}{:>2} poly, {:>3} linear ring, {:>5} point'.format(\n 'IMPROVEMENT', total_ng - wtotal_ng, total_nl - wtotal_nl, total_np - wtotal_np,\n))",
"Produce geojson file",
"features = dict(\n type='FeatureCollection',\n features=[],\n)\nfor (iso2, info) in sorted(eu_countries.items()):\n feature = dict(\n type='Feature',\n properties=dict(\n iso2=iso2,\n lng=info['lng'],\n lat=info['lat'],\n ),\n geometry=info['geometry'],\n )\n features['features'].append(feature)\n\nwith open(OUTFILE, 'w') as f:\n f.write('''\n/**\n * European country borders\n *\n * @module europe_geo_js\n */\n/**\n * Contains low resulution geographical coordinates of borders of European countries.\n * These coordinates can be drawn on a map, e.g. by [Leaflet](http://leafletjs.com).\n * \n * More information, and the computation itself is in \n * [countries.ipynb](/api/file/tools/country_compose/countries.html)\n * a Jupyer notebook that you can run for yourself, if you want to tweak the\n * resolution and precision of the border coordinates.\n */\n''')\n f.write('export const countryBorders = ')\n json.dump(features, f)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
qutip/qutip-notebooks
|
examples/qasm.ipynb
|
lgpl-3.0
|
[
"Imports and Exports QASM circuit\nNotebook Author: Sidhant Saraogi(sid1397@gmail.com)\nThis notebook introduces the OpenQASM import and export functions. It can also serve as a short introduction to the QASM format. The Quantum Assembly Language(QASM) acts as an intermediate representation for Quantum Circuits. This is one way to export/import from/to with QuTiP. In this way, we can make the QIP module of QuTiP compatible with Qiskit and Cirq.",
"from qutip_qip.qasm import read_qasm\nfrom qutip import rand_ket, tensor, basis\nfrom qutip_qip.circuit import Measurement\nimport numpy as np",
"The process is quite simple and only requires the user to store the .qasm file in an appropriate location and maintain the absolute path of the file. This will reading the file simpler. For this demonstration, we already saved a few qasm circuit examples in the directory qasm_files. You can find more examples at OpenQASM repository Let's start off by reading one of the examples:",
"path = \"qasm_files/swap.qasm\"\nqasm_file = open(path, \"r\")\nprint(qasm_file.read())",
"Qasm Import\nThis QASM file imitates the SWAP gate native to QuTiP in the QASM format. To import it, we use the read_qasm function with the arguments being the file path, the mode which defaults to \"qiskit\" and the version which defaults to \"2.0\". \nWe can check that the circuit indeed implements the swap gate by checking the unitary matrix corresponding\nto the circuit. This can be done by using the gate_sequence_product function and the propagators function of the \nQubitCircuit class.",
"from qutip_qip.operations.gates import gate_sequence_product\nfrom qutip import tensor, basis\n\nqc = read_qasm(path, mode=\"qiskit\", version=\"2.0\")\ngate_sequence_product(qc.propagators()) ",
"The mode refers to the internal way in which QuTiP processes the QASM files. \nWith \"qiskit\" mode, QASM skips the include command for the file qelib1.inc and maps all custom gates defined in it to QuTiP gates without parsing the gate definitions. \nNote: \"qelib1.inc\" is a \"header\" file that contains some QASM gate definitions. It is available in the OpenQASM repository (as a standard file) and is included with QASM exports by QuTiP (and also by Qiskit/Cirq).\nThe version refers to the version of the OpenQASM standard being processed. The documentation for the same can be found in the OpenQASM repository. Currently, only OpenQASM 2.0 is supported which is the most popular QASM standard. \nQASM Export\nWe can also convert a QubitCircuit to the QASM format. This can be particularly useful when we are trying to export quantum circuits to other quantum packages such as Qiskit and Cirq. There are three different ways to output QASM files, print_qasm, str_qasm and write_qasm.",
"from qutip_qip.qasm import print_qasm\n\nprint_qasm(qc)",
"Custom Gates\nQASM also offers the option to define custom gates in terms of already defined gates using the \"gate\" keyword. In \"qiskit\" mode, our QASM interpreter can be assumed to already allow for all the gates defined in the file qelib1.inc provided by the OpenQASM repository.\nIn the file swap_custom.qasm, we define the swap gate in terms of the pre-defined cx gates.",
"path = \"qasm_files/swap_custom.qasm\"\nqasm_file = open(path, \"r\")\nprint(qasm_file.read()) ",
"Furthermore, the circuit also measures the two qubits q[0] and q[1] and stores the results in the classical registers c[0] and c[1]",
"qc = read_qasm(path)",
"We can now run the circuit to confirm that the circuit is correctly loaded and performs the correct operations. To do this, we can use the QubitCircuit.run function with the appropriate input state. In our case, we can take the state |01⟩.",
"from qutip import tensor, basis\n\nqc.run(tensor(basis(2, 0), basis(2, 1)))",
"As predicted the output is the state after swapping which is |10⟩\nMeasurements and Classical Control\nThe QASM format also allows for other circuit features such as measurement and control of gates by classical bits. \nThis is also supported by QuTiP. For an example, we can refer to the example of quantum teleportation. A more complete explanation of teleportation can be found in the notebook on quantum teleportation.",
"path = \"qasm_files/teleportation.qasm\"\nqasm_file = open(path, \"r\")\nqasm_str = qasm_file.read()\nprint(qasm_str)",
"We can also read in a QASM file from a string by specifying strmode=True to read_qasm",
"teleportation = read_qasm(qasm_str, strmode=True)",
"Note: \nThe above warning is expected to inform the user that the import from QASM to QuTiP does not retain any information about the different qubit/classical bit register names. This could potentially be an issue when the circuit is exported if the user wants to maintain the consistency. \nWe can quickly check that the teleportation circuit works properly by teleporting the first qubit into the third qubit.",
"state = tensor(rand_ket(2), basis(2, 0), basis(2, 0))\n\ninitial_measurement = Measurement(\"start\", targets=[0])\n_, initial_probabilities = initial_measurement.measurement_comp_basis(state)\n\nstate_final = teleportation.run(state)\n\nfinal_measurement = Measurement(\"start\", targets=[2])\n_, final_probabilities = final_measurement.measurement_comp_basis(state_final)\n\nnp.testing.assert_allclose(initial_probabilities, final_probabilities)",
"Note: Custom gates imported in the QASM format cannot be easily exported. Currently, only gates that are defined native to QuTiP can be exported. QuTiP also produces custom gate definitions for gates not provided in the qelib1.inc \"header\" file. In these cases, QuTiP will add it's own gate definitions directly to the the exported .qasm file but this is restricted only to already gates native to QuTiP. \nExport from QuTiP handles both gates and measurements. However, it does not allow for export of controlled gates.",
"from qutip.ipynbtools import version_table\nversion_table()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ml4a/ml4a-guides
|
examples/fundamentals/fundamentals.ipynb
|
gpl-2.0
|
[
"Fundamentals, introduction to machine learning\nThe purpose of these guides is to go a bit deeper into the details behind common machine learning methods, assuming little math background, and teach you how to use popular machine learning Python packages. In particular, we'll focus on the Numpy and PyTorch libraries.\nI'll assume you have some experience programming with Python -- if not, check out the initial fundamentals of Python guide or for a longer, more comprehensive resource: Learn Python the Hard Way. It will really help to illustrate the concepts introduced here.\nNumpy underlies most Python machine learning packages and is great for performing quick sketches or working through calculations. PyTorch rivals alternative libraries, such as TensorFlow, for its flexibility and ease of use. Despite the high level appearance of PyTorch, it can be quite low-level, which is great for experimenting with novel algorithms. PyTorch can seamlessly be integrated with distributed computation libraries, like Ray, to make the Kessel Run in less than 12 parsecs (citation needed). \nThese guides will present the formal math for concepts alongside Python code examples since this often (for me at least) is a lot easier to develop an intuition for. Each guide is also available as an iPython notebook for your own experimentation.\nThe guides are not meant to exhaustively cover the field of machine learning but I hope they will instill you with the confidence and knowledge to explore further on your own.\nIf you do want more details, you might enjoy my artificial intelligence notes.\nModeling the world\nYou've probably seen various machine learning algorithms pop up -- linear regression, SVMs, neural networks, random forests, etc. How are they all related? What do they have in common? What is machine learning for anyways?\nFirst, let's consider the general, fundamental problem all machine learning is concerned with, leaving aside the algorithm name soup for now. The primary concern of machine learning is modeling the world.\nWe can model phenomena or systems -- both natural and artificial, if you want to make that distinction -- with mathematical functions. We see something out in the world and want to describe it in some way, we want to formalize how two or more things are related, and we can do that with a function. The problem is, for a given phenomenon, how do we figure out what function to use? There are infinitely many to choose from!\nBefore this gets too abstract, let's use an example to make things more concrete.\nSay we have a bunch of data about the heights and weights of a species of deer. We want to understand how these two variables are related -- in particular, given the weight of a deer, can we predict its height?\nYou might see where this is going. The data looks like a line, and lines in general are described by functions of the form $y = mx + b$.\nRemember that lines vary depending on what the values of $m$ and $b$ are:\n\nThus $m$ and $b$ uniquely define a function -- thus they are called the parameters of the function -- and when it comes to machine learning, these parameters are what we ultimately want to learn. So when I say there are infinitely many functions to choose from, it is because $m$ and $b$ can pretty much take on any value. Machine learning techniques essentially search through these possible functions to find parameters that best fit the data you have. One way machine learning algorithms are differentiated is by how exactly they conduct this search (i.e. how they learn parameters).\nIn this case we've (reasonably) assumed the function takes the form $y = mx + b$, but conceivably you may have data that doesn't take the form of a line. Real world data is typically a lot more convoluted-looking. Maybe the true function has a $sin$ in it, for example.\nThis is where another main distinction between machine learning algorithms comes in -- certain algorithms can model only certain forms of functions. Linear regression, for example, can only model linear functions, as indicated by its name. Neural networks, on the other hand, are universal function approximators, which mean they can (in theory) approximate any function, no matter how exotic. This doesn't necessarily make them a better method, just better suited for certain circumstances (there are many other considerations when choosing an algorithm).\nFor now, let's return to the line function. Now that we've looked at the $m$ and $b$ variables, let's consider the input variable $x$. A function takes a numerical input; that is $x$ must be a number of some kind. That's pretty straightforward here since the deer weights are already numbers. But this is not always the case! What if we want to predict the sales price of a house. A house is not a number. We have to find a way to represent it as a number (or as several numbers, i.e. a vector, which will be detailed in a moment), e.g. by its square footage. This challenge of representation is a major part of machine learning; the practice of building representations is known as feature engineering since each variable (e.g. square footage or zip code) used for the representation is called a feature.\nIf you think about it, representation is a practice we regularly engage in. The word \"house\" is not a house any more than an image of a house is -- there is no true \"house\" anyways, it is always a constellation of various physical and nonphysical components.\nThat's about it -- broadly speaking, machine learning is basically a bunch of algorithms that learn you a function, which is to say they learn the parameters that uniquely define a function.\nVectors\nIn the line example before I mentioned that we might have multiple numbers representing an input. For example, a house probably can't be solely represented by its square footage -- perhaps we also want to consider how many bedrooms it has, or how high the ceilings are, or its distance from local transportation. How do we group these numbers together?\nThat's what vectors are for (they come up for many other reasons too, but we'll focus on representation for now). Vectors, along with matrices and other tensors (which will be explained a bit further down), could be considered the \"primitives\" of machine learning.\nThe Numpy library is best for dealing with vectors (and other tensors) in Python. A more complete introduction to Numpy is provided in the numpy and basic mathematics guide.\nLet's import numpy with the alias nf:",
"import numpy as np",
"You may have encountered vectors before in high school or college -- to use Python terms, a vector is like a list of numbers. The mathematical notation is quite similar to Python code, e.g. [5,4], but numpy has its own way of instantiating a vector:",
"v = np.array([5, 4])",
"$$\nv = \\begin{bmatrix} 5 \\ 4 \\end{bmatrix}\n$$\nVectors are usually represented with lowercase variables.\nNote that we never specified how many numbers (also called components) a vector has - because it can have any amount. The amount of components a vector has is called its dimensionality. The example vector above has two dimensions. The vector x = [8,1,3] has three dimensions, and so on. Components are usually indicated by their index (usually using 1-indexing), e.g. in the previous vector, $x_1$ refers to the value $8$.\n\"Dimensions\" in the context of vectors is just like the spatial dimensions you spend every day in. These dimensions define a space, so a two-dimensional vector, e.g. [5,4], can describe a point in 2D space and a three-dimensional vector, e.g. [8,1,3], can describe a point in 3D space. As mentioned before, there is no limit to the amount of dimensions a vector may have (technically, there must be one or more dimensions), so we could conceivably have space consisting of thousands or tens of thousands of dimensions. At that point we can't rely on the same human intuitions about space as we could when working with just two or three dimensions. In practice, most interesting applications of machine learning deal with many, many dimensions.\nWe can get a better sense of this by plotting a vector out. For instance, a 2D vector [5,0] would look like:\n\nSo in a sense vectors can be thought of lines that \"point\" to the position they specify - here the vector is a line \"pointing\" to [5,0]. If the vector were 3D, e.g. [8,1,3], then we would have to visualize it in 3D space, and so on.\nSo vectors are great - they allow us to form logical groupings of numbers. For instance, if we're talking about cities on a map we would want to group their latitude and longitude together. We'd represent Lagos with [6.455027, 3.384082] and Beijing separately with [39.9042, 116.4074]. If we have an inventory of books for sale, we could represent each book with its own vector consisting of its price, number of pages, and remaining stock.\nTo use vectors in functions, there are a few mathematical operations you need to know.\nBasic vector operations\nVectors can be added (and subtracted) easily:",
"np.array([6, 2]) + np.array([-4, 4])",
"$$\n\\begin{bmatrix} 6 \\ 2 \\end{bmatrix} + \\begin{bmatrix} -4 \\ 4 \\end{bmatrix} = \\begin{bmatrix} 6 + -4 \\ 2 + 4 \\end{bmatrix} = \\begin{bmatrix} 2 \\ 6 \\end{bmatrix}\n$$\nHowever, when it comes to vector multiplication there are many different kinds.\nThe simplest is vector-scalar multiplication:",
"3 * np.array([2, 1])",
"$$\n3\\begin{bmatrix} 2 \\ 1 \\end{bmatrix} = \\begin{bmatrix} 3 \\times 2 \\ 3 \\times 1\n\\end{bmatrix} = \\begin{bmatrix} 6 \\ 3 \\end{bmatrix}\n$$\nBut when you multiply two vectors together you have a few options. I'll cover the two most important ones here.\nThe one you might have thought of is the element-wise product, also called the pointwise product, component-wise product, or the Hadamard product, typically notated with $\\odot$. This just involves multiplying the corresponding elements of each vector together, resulting in another vector:",
"np.array([6, 2]) * np.array([-4, 4])",
"$$\n\\begin{bmatrix} 6 \\ 2 \\end{bmatrix} \\odot \\begin{bmatrix} -4 \\ 4 \\end{bmatrix} = \\begin{bmatrix} 6 \\times -4 \\ 2 \\times 4 \\end{bmatrix} = \\begin{bmatrix} -24 \\ 8 \\end{bmatrix}\n$$\nThe other vector product, which you'll encounter a lot, is the dot product, also called inner product, usually notated with $\\cdot$ (though when vectors are placed side-by-side this often implies dot multiplication). This involves multiplying corresponding elements of each vector and then summing the resulting vector's components (so this results in a scalar rather than another vector).",
"np.dot(np.array([6, 2]), np.array([-4, 4]))",
"$$\n\\begin{bmatrix} 6 \\ 2 \\end{bmatrix} \\cdot \\begin{bmatrix} -4 \\ 4 \\end{bmatrix} = (6 \\times -4) + (2 \\times 4) = -16\n$$\nThe more general formulation is:",
"# a slow pure-Python dot product\ndef dot(a, b):\n assert len(a) == len(b)\n return sum(a_i * b_i for a_i, b_i in zip(a,b))",
"$$\n\\begin{aligned}\n\\vec{a} \\cdot \\vec{b} &= \\begin{bmatrix} a_1 \\ a_2 \\ \\vdots \\ a_n \\end{bmatrix} \\cdot \\begin{bmatrix} b_1 \\ b_2 \\ \\vdots \\ b_n \\end{bmatrix} = a_1b_1 + a_2b_2 + \\dots + a_nb_n \\\n&= \\sum^n_{i=1} a_i b_i\n\\end{aligned}\n$$\nNote that the vectors in these operations must have the same dimensions!\nPerhaps the most important vector operation mentioned here is the dot product. We'll return to the house example to see why. Let's say want to represent a house with three variables: square footage, number of bedrooms, and the number of bathrooms. For convenience we'll notate the variables $x_1, x_2, x_3$, respectively. We're working in three dimensions now so instead of learning a line we're learning a hyperplane (if we were working with two dimensions we'd be learning a plane, \"hyperplane\" is the term for the equivalent of a plane in higher dimensions).\nAside from the different name, the function we're learning is essentially of the same form as before, just with more variables and thus more parameters. We'll notate each parameter as $\\theta_i$ as is the convention (you may see $\\beta_i$ used elsewhere), and for the intercept (what was the $b$ term in the original line), we'll add in a dummy variable $x_0 = 1$ as is the typical practice (thus $\\theta_0$ is equivalent to $b$):",
"# this is so clumsy in python;\n# this will become more concise in a bit\ndef f(x0, x1, x2, x3, theta0, theta1, theta2, theta3):\n return theta0 * x0\\\n + theta1 * x1\\\n + theta2 * x2\\\n + theta3 * x3",
"$$\ny = \\theta_0 x_0 + \\theta_1 x_1 + \\theta_2 x_2 + \\theta_3 x_3\n$$\nThis kind of looks like the dot product, doesn't it? In fact, we can re-write this entire function as a dot product. We define our feature vector $x = [x_0, x_1, x_2, x_3]$ and our parameter vector $\\theta = [\\theta_0, \\theta_1, \\theta_2, \\theta_3]$, then re-write the function:",
"def f(x, theta):\n return x.dot(theta)",
"$$\ny = \\theta x\n$$\nSo that's how we incorporate multiple features in a representation.\nThere's a whole lot more to vectors than what's presented here, but this is the ground-level knowledge you should have of them. Other aspects of vectors will be explained as they come up.\nLearning\nSo machine learning algorithms learn parameters - how do they do it?\nHere we're focusing on the most common kind of machine learning - supervised learning. In supervised learning, the algorithm learns parameters from data which includes both the inputs and the true outputs. This data is called training data.\nAlthough they vary on specifics, there is a general approach that supervised machine learning algorithms use to learn parameters. The idea is that the algorithm takes an input example, inputs it into the current guess at the function (called the hypothesis, notate $h_{\\theta}$), and then checks how wrong its output is against the true output. The algorithm then updates its hypothesis (that is, its guesses for the parameters), accordingly.\n\"How wrong\" an algorithm is, can vary depending on the loss function it is using. The loss function takes the algorithm's current guess for the output, $\\hat y$, and the true output, $y$, and returns some value quantifying its wrongness. Certain loss functions are more appropriate for certain tasks, which we'll get into later.\nWe'll get into the specifies of how the algorithm determines what kind of update to perform (i.e. how much each parameter changes), but before we do that we should consider how we manage batches of training examples (i.e. multiple training vectors) simultaneously.\nMatrices\nMatrices are in a sense a \"vector\" of vectors. That is, where a vector can be thought of as a logical grouping of numbers, a matrix can be thought of as a logical grouping of vectors. So if a vector represents a book in our catalog (id, price, number in stock), a matrix could represent the entire catalog (each row refers to a book). Or if we want to represent a grayscale image, the matrix can represent the brightness values of the pixels in the image.",
"A = np.array([\n [6, 8, 0],\n [8, 2, 7],\n [3, 3, 9],\n [3, 8, 6]\n])",
"$$\n\\mathbf A =\n\\begin{bmatrix}\n6 & 8 & 0 \\\n8 & 2 & 7 \\\n3 & 3 & 9 \\\n3 & 8 & 6\n\\end{bmatrix}\n$$\nMatrices are usually represented with uppercase variables.\nNote that the \"vectors\" in the matrix must have the same dimension. The matrix's dimensions are expressed in the form $m \\times n$, meaning that there are $m$ rows and $n$ columns. So the example matrix has dimensions of $4 \\times 3$. Numpy calls these dimensions a matrix's \"shape\".\nWe can access a particular element, $A_{i,j}$, in a matrix by its indices. Say we want to refer to the element in the 2nd row and the 3rd column (remember that python uses 0-indexing):",
"A[1,2]",
"Basic matrix operations\nLike vectors, matrix addition and subtraction is straightforward (again, they must be of the same dimensions):",
"B = np.array([\n [8, 3, 7],\n [2, 9, 6],\n [2, 5, 6],\n [5, 0, 6]\n])\n\nA + B",
"$$\n\\begin{aligned}\n\\mathbf B &=\n\\begin{bmatrix}\n8 & 3 & 7 \\\n2 & 9 & 6 \\\n2 & 5 & 6 \\\n5 & 0 & 6\n\\end{bmatrix} \\\nA + B &=\n\\begin{bmatrix}\n8+6 & 3+8 & 7+0 \\\n2+8 & 9+2 & 6+7 \\\n2+3 & 5+3 & 6+9 \\\n5+3 & 0+8 & 6+6\n\\end{bmatrix} \\\n&=\n\\begin{bmatrix}\n14 & 11 & 7 \\\n10 & 11 & 13 \\\n5 & 8 & 15 \\\n8 & 8 & 12\n\\end{bmatrix} \\\n\\end{aligned}\n$$\nMatrices also have a few different multiplication operations, like vectors.\nMatrix-scalar multiplication is similar to vector-scalar multiplication - you just distribute the scalar, multiplying it with each element in the matrix.\nMatrix-vector products require that the vector has the same dimension as the matrix has columns, i.e. for an $m \\times n$ matrix, the vector must be $n$-dimensional. The operation basically involves taking the dot product of each matrix row with the vector:",
"# a slow pure-Python matrix-vector product,\n# using our previous dot product implementation\ndef matrix_vector_product(M, v):\n return [np.dot(row, v) for row in M]\n\n# or, with numpy, you could use np.matmul(A,v)",
"$$\n\\mathbf M v =\n\\begin{bmatrix}\nM_{1} \\cdot v \\\n\\vdots \\\nM_{m} \\cdot v \\\n\\end{bmatrix}\n$$\nWe have a few options when it comes to multiplying matrices with matrices.\nHowever, before we go any further we should talk about the tranpose operation - this just involves switching the columns and rows of a matrix. The transpose of a matrix $A$ is notated $A^T$:",
"A = np.array([\n [1,2,3],\n [4,5,6]\n ])\n\nnp.transpose(A)",
"$$\n\\begin{aligned}\n\\mathbf A &=\n\\begin{bmatrix}\n1 & 2 & 3 \\\n4 & 5 & 6\n\\end{bmatrix} \\\n\\mathbf A^T &=\n\\begin{bmatrix}\n1 & 4 \\\n2 & 5 \\\n3 & 6\n\\end{bmatrix}\n\\end{aligned}\n$$\nFor matrix-matrix products, the matrix on the lefthand must have the same number of columns as the righthand's rows. To be more concrete, we'll represent a matrix-matrix product as $A B$ and we'll say that $A$ has $m \\times n$ dimensions. For this operation to work, $B$ must have $n \\times p$ dimensions. The resulting product will have $m \\times p$ dimensions.",
"# a slow pure-Python matrix Hadamard product\ndef matrix_matrix_product(A, B):\n _, a_cols = np.shape(A)\n b_rows, _ = np.shape(B)\n assert a_cols == b_rows\n\n result = []\n # tranpose B so we can iterate over its columns\n for col in np.tranpose(B):\n # using our previous implementation\n result.append(\n matrix_vector_product(A, col))\n return np.transpose(result)",
"$$\n\\mathbf AB =\n\\begin{bmatrix}\nA B^T_1 \\\n\\vdots \\\nA B^T_p\n\\end{bmatrix}^T\n$$\nFinally, like with vectors, we also have Hadamard (element-wise) products:",
"# a slow pure-Python matrix-matrix product\n# or, with numpy, you can use A * B\ndef matrix_matrix_hadamard(A, B):\n result = []\n for a_row, b_row in zip(A, B):\n result.append(\n zip(a_i * b_i for a_i, b_i in zip(a_row, b_row)))",
"$$\n\\mathbf A \\odot B =\n\\begin{bmatrix}\nA_{1,1} B_{1,1} & \\dots & A_{1,n} B_{1,n} \\\n\\vdots & \\dots & \\vdots \\\nA_{m,1} B_{m,1} & \\dots & A_{m,n} B_{m,n}\n\\end{bmatrix}\n$$\nLike vector Hadamard products, this requires that the two matrices share the same dimensions.\nTensors\nWe've seen vectors, which is like a list of numbers, and matrices, which is like a list of a list of numbers. We can generalize this concept even further, for instance, with a list of a list of a list of numbers and so on. What all of these structures are called are tensors (i.e. the \"tensor\" in \"TensorFlow\"). They are distinguished by their rank, which, if you're thinking in the \"list of lists\" way, refers to the number of nestings. So a vector has a rank of one (just a list of numbers) and a matrix has a rank of two (a list of a list of numbers).\nAnother way to think of rank is by number of indices necessary to access an element in the tensor. An element in a vector is accessed by one index, e.g. v[i], so it is of rank one. An element in a matrix is accessed by two indices, e.g. M[i,j], so it is of rank two.\nWhy is the concept of a tensor useful? Before we referred to vectors as a logical grouping of numbers and matrices as a logical grouping of vectors. What if we need a logical grouping of matrices? That's what 3rd-rank tensors are! A matrix can represent a grayscale image, but what about a color image with three color channels (red, green, blue)? With a 3rd-rank tensor, we could represent each channel as its own matrix and group them together.\nLearning continued\nWhen the current hypothesis is wrong, how does the algorithm know how to adjust the parameters?\nLet's take a step back and look at it another way. The loss function measures the wrongness of the hypothesis $h_{\\theta}$ - another way of saying this is the loss function is a function of the parameters $\\theta$. So we could notate it as $L(\\theta)$.\nThe minimum of $L(\\theta)$ is the point where the parameters guess $\\theta$ is least wrong (at best, $L(\\theta) = 0$, i.e. a perfect score, though this is not always good, as will be explained later); i.e. the best guess for the parameters.\nSo the algorithm learns the best-fitting function by minimizing its loss function. That is, we can frame this as an optimization problem.\nThere are many techniques to solve an optimization problem - sometimes they can be solved analytically (i.e. by moving around variables and isolating the one you want to solve for), but more often than not we must solve them numerically, i.e. by guessing a lot of different values - but not randomly!\nThe prevailing technique now is called gradient descent, and to understand how it works, we have to understand derivatives.\nDerivatives\nDerivatives are everywhere in machine learning, so it's worthwhile become a bit familiar with them. I won't go into specifics on differentiation (how to calculate derivatives) because now we're spoiled with automatic differentiation, but it's still good to have a solid intuition about derivatives themselves.\nA derivative expresses a rate of (instantaneous) change - they are always about how one variable quantity changes with respect to another variable quantity. That's basically all there is to it. For instance, velocity is a derivative which expresses how position changes with respect to time. Another interpretation, which is more relevant to machine learning, is that a derivative tells us how to change one variable to achieve a desired change in the other variable. Velocity, for instance, tells us how to change position by \"changing\" time.\nTo get a better understanding of instantaneous change, consider a cyclist, cycling on a line. We have data about their position over time. We could calculate an average velocity over the data's entire time period, but we typically prefer to know the velocity at any given moment (i.e. at any instant).\nLet's get more concrete first. Let's say we have data for $n$ seconds, i.e. from $t_0$ to $t_n$ seconds, and the position at any given second $i$ is $p_i$. If we wanted to get the rate of change in position over the entire time interval, we'd just do:",
"positions = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, # moving forward\n 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, # pausing\n 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] # moving backwards\nt_0 = 0\nt_n = 29\n(positions[t_n] - positions[t_0])/t_n",
"$$\nv = \\frac{p_n - p_0}{n}\n$$\nThis kind of makes it look like the cyclist didn't move at all. It would probably be more useful to identify the velocity at a given second $t$. Thus we want to come up with some function $v(t)$ which gives us the velocity at some second $t$. We can apply the same approach we just used to get the velocity over the entire time interval, but we focus on a shorter time interval instead. To get the instantaneous change at $t$ we just keep reducing the interval we look at until it is basically 0.\nDerivatives have a special notation. A derivative of a function $f(x)$ with respect to a variable $x$ is notated:\n$$\n\\frac{\\delta f(x)}{\\delta x}\n$$\nSo if position is a function of time, e.g. $p = f(t)$, then velocity can be represented as $\\frac{\\delta p}{\\delta t}$. To drive the point home, this derivative is also a function of time (derivatives are functions of what their \"with respect to\" variable is).\nSince we are often computing derivatives of a function with respect to its input, a shorthand for the derivative of a function $f(x)$ with respect to $x$ can also be notated $f'(x)$.\nThe Chain Rule\nA very important property of derivatives is the chain rule (there are other \"chain rules\" throughout mathematics, if we want to be specific, this is the \"chain rule of derivatives\"). The chain rule is important because it allows us to take complicated nested functions and more manageably differentiate them.\nLet's look at an example to make this concrete:",
"def g(x):\n return x**2\n\ndef h(x):\n return x**3\n\ndef f(x):\n return g(h(x))\n\n# derivatives\ndef g_(x):\n return 2*x\n\ndef h_(x):\n return 3*(x**2)",
"$$\n\\begin{aligned}\ng(x) &= x^2 \\\nh(x) &= x^3 \\\nf(x) &= g(h(x)) \\\ng'(x) &= 2x \\\nh'(x) &= 3x^2\n\\end{aligned}\n$$\nWe're interested in understanding how $f(x)$ changes with respect to $x$, so we want to compute the derivative of $f(x)$. The chain rule allows us to individually differentiate the component functions of $f(x)$ and multiply those to get $f'(x)$:",
"def f_(x):\n return g_(x) * h_(x)",
"$$\n\\frac{df}{dx} = \\frac{dg}{dh} \\frac{dh}{dx}\n$$\nThis example is a bit contrived (there is a very easy way to differentiate this particular example that doesn't involve the chain rule) but if $g(x)$ and $h(x)$ were really nasty functions, the chain rule makes them quite a lot easier to deal with.\nThe chain rule can be applied to nested functions ad nauseaum! You can apply it to something crazy like $f(g(h(u(q(p(x))))))$. In fact, with deep neural networks, you are typically dealing with function compositions even more gnarly than this, so the chain rule is cornerstone there.\nPartial derivatives and gradients\nThe functions we've looked at so far just have a single input, but you can imagine many scenarios where you'd want to work with functions with some arbitrary number of inputs (i.e. a multivariable function), like $f(x,y,z)$.\nHere's where partial deriatives come into play. Partial derivatives are just like regular derivatives except we use them for multivariable functions; it just means we only differentiate with respect to one variable at a time. So for $f(x,y,z)$, we'd have a partial derivative with respect to $x$, i.e. $\\frac{\\partial f}{\\partial x}$ (note the slightly different notation), one with respect to $y$, i.e. $\\frac{\\partial f}{\\partial y}$, and one with respect to $z$, i.e. $\\frac{\\partial f}{\\partial z}$.\nThat's pretty simple! But it would be useful to group these partial derivatives together in some way. If we put these partial derivatives together in a vector, the resulting vector is the gradient of $f$, notated $\\nabla f$ (the symbol is called \"nabla\").\nHigher-order derivatives\nWe saw that velocity is the derivative of position because it describes how position changes over time. Acceleration similarly describes how velocity changes over time, so we'd say that acceleration is the derivative of velocity. We can also say that acceleration is the second-order derivative of position (that is, it is the derivative of its derivative).\nThis is the general idea behind higher-order derivatives.\nGradient descent\nOnce you understand derivatives, gradient descent is really, really simple. The basic idea is that we use the derivative of the loss $L(\\theta)$ with respect to $\\theta$ and figure out which way the loss is decreasing, then \"move\" the parameter guess in that direction."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mne-tools/mne-tools.github.io
|
0.13/_downloads/plot_movement_compensation.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Maxwell filter data with movement compensation\nDemonstrate movement compensation on simulated data. The simulated data\ncontains bilateral activation of auditory cortices, repeated over 14\ndifferent head rotations (head center held fixed). See the following for\ndetails:\nhttps://github.com/mne-tools/mne-misc-data/blob/master/movement/simulate.py",
"# Authors: Eric Larson <larson.eric.d@gmail.com>\n#\n# License: BSD (3-clause)\n\nfrom os import path as op\n\nimport mne\nfrom mne.preprocessing import maxwell_filter\n\nprint(__doc__)\n\ndata_path = op.join(mne.datasets.misc.data_path(verbose=True), 'movement')\n\npos = mne.chpi.read_head_pos(op.join(data_path, 'simulated_quats.pos'))\nraw = mne.io.read_raw_fif(op.join(data_path, 'simulated_movement_raw.fif'))\nraw_stat = mne.io.read_raw_fif(op.join(data_path,\n 'simulated_stationary_raw.fif'))",
"Process our simulated raw data (taking into account head movements)",
"# extract our resulting events\nevents = mne.find_events(raw, stim_channel='STI 014')\nevents[:, 2] = 1\nraw.plot(events=events)\n\ntopo_kwargs = dict(times=[0, 0.1, 0.2], ch_type='mag', vmin=-500, vmax=500)\n\n# 0. Take average of stationary data (bilateral auditory patterns)\nevoked_stat = mne.Epochs(raw_stat, events, 1, -0.2, 0.8).average()\nevoked_stat.plot_topomap(title='Stationary', **topo_kwargs)\n\n# 1. Take a naive average (smears activity)\nevoked = mne.Epochs(raw, events, 1, -0.2, 0.8).average()\nevoked.plot_topomap(title='Moving: naive average', **topo_kwargs)\n\n# 2. Use raw movement compensation (restores pattern)\nraw_sss = maxwell_filter(raw, head_pos=pos)\nevoked_raw_mc = mne.Epochs(raw_sss, events, 1, -0.2, 0.8).average()\nevoked_raw_mc.plot_topomap(title='Moving: movement compensated', **topo_kwargs)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
NYUDataBootcamp/Materials
|
Code/notebooks/bootcamp_exam_practice_answerkey.ipynb
|
mit
|
[
"Data Bootcamp: Exam practice & review (answers)\nWe review the material we've covered to date: Python fundamentals, data input with Pandas, and graphics with Matplotlib. Questions marked Bonus are more difficult and are there to give the experts something to do. \nThis IPython notebook was created by Dave Backus, Chase Coleman, and Spencer Lyon for the NYU Stern course Data Bootcamp. \nThis version was modified by (add your name in bold here). And add your initials to the notebook's name at the top. \nPreliminaries\nImport packages and check the date.",
"# import packages \nimport pandas as pd # data management\nimport matplotlib.pyplot as plt # graphics \n\n# IPython command, puts plots in notebook \n%matplotlib inline\n\n# check Python version \nimport datetime as dt \nimport sys\nprint('Today is', dt.date.today())\nprint('What version of Python are we running? \\n', sys.version, sep='') ",
"IPython review\nWe review some of the basics of IPython. You won't be asked about IPython on the exam, but since the exam is an IPython notebook, it's essential for you to be able to work with one. \nQuestion 1. \n\nHow do you set/choose the current cell? \nHow do you edit the current cell? \nHow do you add a new cell below the current cell? \nHow do you specify the current cell as code or text? \nHow do you delete the current cell? \nHow do you move the current cell up or down? \nHow do you run the current cell? \nAdd your name in bold to the bottom of the first cell in this notebook. Bonus: Add a link to your LinkedIn or Facebook page. \nHow do you save the contents of your notebook? \n\nAnswers. Enter your answers below: \n\nclick on it\nclick again\nclick on the plus (+) at the top\nchoose the appropriate one in the menu below Help at the top\nchoose the cell and click on the scirrors at the top\nchoose the cell and click on the up or down arrow at the top\ntwo ways: click on the run cell icon at the top, or shift-enter\n**name** \ntwo ways: File, Save and Checkpoint, or cntl-S. \n\nPython fundamentals\nQuestion 2. Describe the type and content of these expressions: \n\nx = 2 \ny = 3.0\nz = \"3.0\"\nx/y\nletters = 'abcd'\nletters[-1]\nxyz = [x, y, z]\nxyz[1]\nabcd = list(letters)\nabcd[-2]\ncase = {'a': 'A', 'b': 'B', 'c': 'C'} \ncase['c'] \n2 >= 1\nx == 2 \n\nAnswers. Enter your answers below: \nBy content and type we mean the content of the variable or expression and its type as return by the type() function.\n\ncontent 2, type int\ncontent 3.0, type float\ncontent '3.0', type str\ncontent 0.66666, type float\ncontent 'abcd', type str\ncontent 'd', type str\ncontent [x, y, z], type list\ncontent y=3.0, type float \ncontent ['a', 'b', 'c', 'd'], type list \ncontent 'c', type str\ncontent as stated, type dictionary or dict\ncontent 'C', type str\ncontent True, type bool\ncontent True, type bool",
"# code cell for experimenting \n",
"Question 3. These get progressively more difficult: \n\nWhat type is dollars = '$1,234.5'?\nFind and apply a method that eliminates the dollar sign from dollars. \nFind and apply a method that eliminates the comma from dollars. \nEliminate both the dollar sign and comma from dollars and covert the result to a float.\nCombine the last three steps in one line. \n\nIn each case, create a code cell that delivers the answer. Please write the question number in a comment in each cell.",
"dollars = '$1,234.5'\ntype(dollars)\n\ndollars = dollars.replace('$','')\ndollars\n\ndollars = dollars.replace(',','')\ndollars\n\ndollars = float(dollars)\ndollars\n\n# we can glue the pieces together \ndollars = '$1,234.5'\ndollars = float(dollars.replace('$','').replace(',',''))\ndollars",
"Question 4. \nFor this problem we set letters = 'abcd' as in problem 2. \n\nFind and apply a method that converts the lower case letter 'a' to the upper case letter 'A'. \nWrite a loop that goes through the elements of letters and prints their upper case versions.\nBonus: Write a loop that goes through the elements of letters. On each interation, print a string consisting of the upper and lower case versions together; eg, 'Aa'. \n\nIn each case, create a code cell that delivers the answer. Please write the question number in a comment in each cell.",
"'a'.upper()\n\nletters = 'abcd'\nfor letter in letters:\n print(letter.upper())\n\nletters = 'abcd'\nfor letter in letters:\n print(letter.upper()+letter)",
"Question 5.\nFor this problem xyz is the same as defined in problem 2\n\nWrite a loop that goes through the elements of xyz and prints them.\nModify the loop to print both the elements of xyz and their type. \nModify the loop to print only those elements that are not strings. \n\nIn each case, create a code cell that delivers the answer. Please write the question number in a comment in each cell.",
"xyz = [2, 3.0, 2/3.0]\n\nfor item in xyz:\n print(item)\n\nfor item in xyz:\n print(item, type(item))\n\nfor item in xyz:\n if type(item) != str:\n print(item, type(item))",
"Data input with Pandas\nWe explore the public indebtedness of Argentina (country code ARG), Germany (DEU), and Greece (GRC). For each one, we provide the ratio of government debt to GDP for every second year starting in 2002. The data come from the IMF's World Economic Outlook.\nQuestion 6. Write code in the cell below that reads the csv file we posted at\nhttp://pages.stern.nyu.edu/~dbackus/Data/debt.csv \nAssign the contents of the file to the object debt. \nThe rest of the questions in this notebook will refer to the object debt you create below.",
"url = 'http://pages.stern.nyu.edu/~dbackus/Data/debt.csv'\ndebt = pd.read_csv(url)\ndebt.tail(3)\n\n# if that failed, you can generate the same data with \ndata = {'ARG': [137.5, 106.0, 61.8, 47.0, 39.1, 37.3, 48.6], \n 'DEU': [59.2, 64.6, 66.3, 64.9, 80.3, 79.0, 73.1], \n 'GRC': [98.1, 94.9, 102.9, 108.8, 145.7, 156.5, 177.2],\n 'Year': [2002, 2004, 2006, 2008, 2010, 2012, 2014]} \ndebt = pd.DataFrame(data)",
"Question 7. Let's describe the object debt: \n\nWhat type of object is debt?\nWhat are its dimensions?\nWhat are its column labels? Row labels?\nWhat dtypes are the columns? \n\nIn each case, create a code cell that delivers the answer. Please write the question number in a comment in each cell.",
"type(debt)\n\ndebt.shape\n\ndebt.columns\n\ndebt.index\n\ndebt.dtypes",
"Question 8. Do the following with debt: \n\nSet Year as the index. \nChange the column labels from country codes to country names. Do this using both a dictionary and a list.\nPrint the result to verify your changes. \n\nThe next three get progressively more difficult: \n\nCompute the mean (average) debt for each country.\nBonus: Compute the mean debt for each year.\nBonus: Compute the mean debt over both countries and years. \n\nSome simple plots: \n\nPlot each country's debt against Year using a plot method. \nChange the linewidth to 2. \n\nIn each case, create a code cell that delivers the answer. Please write the question number in a comment in each cell.",
"debt = debt.set_index('Year')\n\nrn = {\"ARG\": \"Argentina\", \"DEU\": \"Germany\", \"GRC\": \"Greece\"}\ndebt.rename(columns=rn)\n\ndebt.columns = ['Argentina', 'Germany', 'Greece']\ndebt\n\ndebt.mean()\n\ndebt.mean(axis=1)\n\ndebt.mean().mean()",
"Python graphics with Matplotlib\nWe'll continue to use the data in debt. Make sure the index is the year. \nQuestion 9. \n\nCreate figure and axis objects with plt.subplots(). \nGraph public indebtedness over time using our debt data and the axis object we just created. \nChange the line width to 2.\nChange the colors to ['red', 'green', 'blue']. \nChange the lower limit on the y axis to zero. \nAdd a title to the graph. \nAdd a label to the y axis -- something like \"Public Debt (% of GDP)\". \nBonus: Make the line for Argentina thicker than the others. Hint: Do this by plotting a separate line applied to the same axis object. \n\nIn each case, create a code cell that delivers the answer. Please write the question number in a comment in each cell.",
"fig, ax = plt.subplots()\ndebt.plot(ax=ax, \n linewidth=2, \n color=['red', 'green', 'blue'])\nax.set_ylim(0)\nax.set_title('Public debt')\nax.set_ylabel('Public Debt (% of GDP)')\ndebt['Argentina'].plot(ax=ax, linewidth=4, color='red')",
"Optional challenging questions\nGood practice, but more than you'll see on the exam. \nQuestion 10. In the figure of the previous question: \n\nAdd a title, 14-point font, right-justified. \nPut the legend in the lower left corner. \nChange the line style to dashed. (This will take some Googling, or a good guess.)\nEliminate the top and right \"spines,\" the lines that outline the figure. [This doesn't make sense with the 538 style, which eliminates all the spines.] \nSave the figure as a pdf file. \nChange the style to 538.",
"plt.style.use('fivethirtyeight')\nfig, ax = plt.subplots()\ndebt.plot(ax=ax, \n linewidth=2, \n linestyle='dashed',\n color=['red', 'green', 'blue'])\nax.set_title('Public debt', fontsize=14, loc='right')\nax.set_ylabel('Public Debt (% of GDP)')\nax.legend(loc='lower left')\nfig.savefig('debt.pdf')",
"Question 11. We ran across this one in the OECD healthcare data. The country names had numbers appended, which served as footnotes in the original spreadsheet but looked dumb when we used them as index labels. The question is how to eliminate them. A short version of the country names is \nnames = ['Australia 1', 'Canada 2', 'Chile 3', 'United States 1']\nDo each of these in a separate code cell: \n\nApply the rsplit() method to us = names[-1]. What do you get?\nConsult the documentation for rsplit to split us into two pieces, the country name and the number 1. How would you extract just the country name?\nUse a loop to strip the numbers from all of the elements of names.\nUse a list comprehension to strip the numbers from all of the elements of names. \n\nHints. rsplit means split from the right. One input is the number of splits.",
"names = ['Australia 1', 'Canada 2', 'Chile 3', 'United States 1']\n\nnames[-1].rsplit()\n\nnames[-1].rsplit(maxsplit=1)\n\n# apologies, this is harder than we thought\nfor n in range(len(names)):\n item = names[n]\n names[n] = item.rsplit(maxsplit=1)[0]\n \nprint(names) \n\n# this one's easier \nnames = ['Australia 1', 'Canada 2', 'Chile 3', 'United States 1']\n[item.rsplit(maxsplit=1)[0] for item in names] "
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/cas/cmip6/models/sandbox-2/atmos.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Atmos\nMIP Era: CMIP6\nInstitute: CAS\nSource ID: SANDBOX-2\nTopic: Atmos\nSub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos. \nProperties: 156 (127 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:45\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cas', 'sandbox-2', 'atmos')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties --> Overview\n2. Key Properties --> Resolution\n3. Key Properties --> Timestepping\n4. Key Properties --> Orography\n5. Grid --> Discretisation\n6. Grid --> Discretisation --> Horizontal\n7. Grid --> Discretisation --> Vertical\n8. Dynamical Core\n9. Dynamical Core --> Top Boundary\n10. Dynamical Core --> Lateral Boundary\n11. Dynamical Core --> Diffusion Horizontal\n12. Dynamical Core --> Advection Tracers\n13. Dynamical Core --> Advection Momentum\n14. Radiation\n15. Radiation --> Shortwave Radiation\n16. Radiation --> Shortwave GHG\n17. Radiation --> Shortwave Cloud Ice\n18. Radiation --> Shortwave Cloud Liquid\n19. Radiation --> Shortwave Cloud Inhomogeneity\n20. Radiation --> Shortwave Aerosols\n21. Radiation --> Shortwave Gases\n22. Radiation --> Longwave Radiation\n23. Radiation --> Longwave GHG\n24. Radiation --> Longwave Cloud Ice\n25. Radiation --> Longwave Cloud Liquid\n26. Radiation --> Longwave Cloud Inhomogeneity\n27. Radiation --> Longwave Aerosols\n28. Radiation --> Longwave Gases\n29. Turbulence Convection\n30. Turbulence Convection --> Boundary Layer Turbulence\n31. Turbulence Convection --> Deep Convection\n32. Turbulence Convection --> Shallow Convection\n33. Microphysics Precipitation\n34. Microphysics Precipitation --> Large Scale Precipitation\n35. Microphysics Precipitation --> Large Scale Cloud Microphysics\n36. Cloud Scheme\n37. Cloud Scheme --> Optical Cloud Properties\n38. Cloud Scheme --> Sub Grid Scale Water Distribution\n39. Cloud Scheme --> Sub Grid Scale Ice Distribution\n40. Observation Simulation\n41. Observation Simulation --> Isscp Attributes\n42. Observation Simulation --> Cosp Attributes\n43. Observation Simulation --> Radar Inputs\n44. Observation Simulation --> Lidar Inputs\n45. Gravity Waves\n46. Gravity Waves --> Orographic Gravity Waves\n47. Gravity Waves --> Non Orographic Gravity Waves\n48. Solar\n49. Solar --> Solar Pathways\n50. Solar --> Solar Constant\n51. Solar --> Orbital Parameters\n52. Solar --> Insolation Ozone\n53. Volcanos\n54. Volcanos --> Volcanoes Treatment \n1. Key Properties --> Overview\nTop level key properties\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.3. Model Family\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of atmospheric model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"AGCM\" \n# \"ARCM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.4. Basic Approximations\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nBasic approximations made in the atmosphere.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"primitive equations\" \n# \"non-hydrostatic\" \n# \"anelastic\" \n# \"Boussinesq\" \n# \"hydrostatic\" \n# \"quasi-hydrostatic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2. Key Properties --> Resolution\nCharacteristics of the model resolution\n2.1. Horizontal Resolution Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.2. Canonical Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.3. Range Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.4. Number Of Vertical Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of vertical levels resolved on the computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"2.5. High Top\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.high_top') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"3. Key Properties --> Timestepping\nCharacteristics of the atmosphere model time stepping\n3.1. Timestep Dynamics\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTimestep for the dynamics, e.g. 30 min.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.2. Timestep Shortwave Radiative Transfer\nIs Required: FALSE Type: STRING Cardinality: 0.1\nTimestep for the shortwave radiative transfer, e.g. 1.5 hours.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.3. Timestep Longwave Radiative Transfer\nIs Required: FALSE Type: STRING Cardinality: 0.1\nTimestep for the longwave radiative transfer, e.g. 3 hours.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Key Properties --> Orography\nCharacteristics of the model orography\n4.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of the orography.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"modified\" \n# TODO - please enter value(s)\n",
"4.2. Changes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nIf the orography type is modified describe the time adaptation changes.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.changes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"related to ice sheets\" \n# \"related to tectonics\" \n# \"modified mean\" \n# \"modified variance if taken into account in model (cf gravity waves)\" \n# TODO - please enter value(s)\n",
"5. Grid --> Discretisation\nAtmosphere grid discretisation\n5.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of grid discretisation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6. Grid --> Discretisation --> Horizontal\nAtmosphere discretisation in the horizontal\n6.1. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spectral\" \n# \"fixed grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.2. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"finite elements\" \n# \"finite volumes\" \n# \"finite difference\" \n# \"centered finite difference\" \n# TODO - please enter value(s)\n",
"6.3. Scheme Order\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation function order",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"second\" \n# \"third\" \n# \"fourth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.4. Horizontal Pole\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nHorizontal discretisation pole singularity treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"filter\" \n# \"pole rotation\" \n# \"artificial island\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.5. Grid Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal grid type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gaussian\" \n# \"Latitude-Longitude\" \n# \"Cubed-Sphere\" \n# \"Icosahedral\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"7. Grid --> Discretisation --> Vertical\nAtmosphere discretisation in the vertical\n7.1. Coordinate Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nType of vertical coordinate system",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"isobaric\" \n# \"sigma\" \n# \"hybrid sigma-pressure\" \n# \"hybrid pressure\" \n# \"vertically lagrangian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8. Dynamical Core\nCharacteristics of the dynamical core\n8.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of atmosphere dynamical core",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the dynamical core of the model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.3. Timestepping Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTimestepping framework type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Adams-Bashforth\" \n# \"explicit\" \n# \"implicit\" \n# \"semi-implicit\" \n# \"leap frog\" \n# \"multi-step\" \n# \"Runge Kutta fifth order\" \n# \"Runge Kutta second order\" \n# \"Runge Kutta third order\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.4. Prognostic Variables\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of the model prognostic variables",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface pressure\" \n# \"wind components\" \n# \"divergence/curl\" \n# \"temperature\" \n# \"potential temperature\" \n# \"total water\" \n# \"water vapour\" \n# \"water liquid\" \n# \"water ice\" \n# \"total water moments\" \n# \"clouds\" \n# \"radiation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9. Dynamical Core --> Top Boundary\nType of boundary layer at the top of the model\n9.1. Top Boundary Condition\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTop boundary condition",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.2. Top Heat\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop boundary heat treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.3. Top Wind\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop boundary wind treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Dynamical Core --> Lateral Boundary\nType of lateral boundary condition (if the model is a regional model)\n10.1. Condition\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nType of lateral boundary condition",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11. Dynamical Core --> Diffusion Horizontal\nHorizontal diffusion scheme\n11.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nHorizontal diffusion scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11.2. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal diffusion scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"iterated Laplacian\" \n# \"bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12. Dynamical Core --> Advection Tracers\nTracer advection scheme\n12.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nTracer advection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heun\" \n# \"Roe and VanLeer\" \n# \"Roe and Superbee\" \n# \"Prather\" \n# \"UTOPIA\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.2. Scheme Characteristics\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nTracer advection scheme characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Eulerian\" \n# \"modified Euler\" \n# \"Lagrangian\" \n# \"semi-Lagrangian\" \n# \"cubic semi-Lagrangian\" \n# \"quintic semi-Lagrangian\" \n# \"mass-conserving\" \n# \"finite volume\" \n# \"flux-corrected\" \n# \"linear\" \n# \"quadratic\" \n# \"quartic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.3. Conserved Quantities\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nTracer advection scheme conserved quantities",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"dry mass\" \n# \"tracer mass\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.4. Conservation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTracer advection scheme conservation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Priestley algorithm\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13. Dynamical Core --> Advection Momentum\nMomentum advection scheme\n13.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nMomentum advection schemes name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"VanLeer\" \n# \"Janjic\" \n# \"SUPG (Streamline Upwind Petrov-Galerkin)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Scheme Characteristics\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMomentum advection scheme characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"2nd order\" \n# \"4th order\" \n# \"cell-centred\" \n# \"staggered grid\" \n# \"semi-staggered grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.3. Scheme Staggering Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMomentum advection scheme staggering type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa D-grid\" \n# \"Arakawa E-grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.4. Conserved Quantities\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMomentum advection scheme conserved quantities",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Angular momentum\" \n# \"Horizontal momentum\" \n# \"Enstrophy\" \n# \"Mass\" \n# \"Total energy\" \n# \"Vorticity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.5. Conservation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMomentum advection scheme conservation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14. Radiation\nCharacteristics of the atmosphere radiation process\n14.1. Aerosols\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nAerosols whose radiative effect is taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.aerosols') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sulphate\" \n# \"nitrate\" \n# \"sea salt\" \n# \"dust\" \n# \"ice\" \n# \"organic\" \n# \"BC (black carbon / soot)\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"polar stratospheric ice\" \n# \"NAT (nitric acid trihydrate)\" \n# \"NAD (nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particle)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15. Radiation --> Shortwave Radiation\nProperties of the shortwave radiation scheme\n15.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of shortwave radiation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.3. Spectral Integration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nShortwave radiation scheme spectral integration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.4. Transport Calculation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nShortwave radiation transport calculation methods",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.5. Spectral Intervals\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nShortwave radiation scheme number of spectral intervals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"16. Radiation --> Shortwave GHG\nRepresentation of greenhouse gases in the shortwave radiation scheme\n16.1. Greenhouse Gas Complexity\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nComplexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.2. ODS\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOzone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.3. Other Flourinated Gases\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOther flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17. Radiation --> Shortwave Cloud Ice\nShortwave radiative properties of ice crystals in clouds\n17.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud ice crystals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud ice crystals in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18. Radiation --> Shortwave Cloud Liquid\nShortwave radiative properties of liquid droplets in clouds\n18.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud liquid droplets",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"19. Radiation --> Shortwave Cloud Inhomogeneity\nCloud inhomogeneity in the shortwave radiation scheme\n19.1. Cloud Inhomogeneity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20. Radiation --> Shortwave Aerosols\nShortwave radiative properties of aerosols\n20.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with aerosols",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of aerosols in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to aerosols in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"21. Radiation --> Shortwave Gases\nShortwave radiative properties of gases\n21.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with gases",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22. Radiation --> Longwave Radiation\nProperties of the longwave radiation scheme\n22.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of longwave radiation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the longwave radiation scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.3. Spectral Integration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nLongwave radiation scheme spectral integration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.4. Transport Calculation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nLongwave radiation transport calculation methods",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.5. Spectral Intervals\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nLongwave radiation scheme number of spectral intervals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"23. Radiation --> Longwave GHG\nRepresentation of greenhouse gases in the longwave radiation scheme\n23.1. Greenhouse Gas Complexity\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nComplexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. ODS\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOzone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.3. Other Flourinated Gases\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOther flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24. Radiation --> Longwave Cloud Ice\nLongwave radiative properties of ice crystals in clouds\n24.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with cloud ice crystals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.2. Physical Reprenstation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud ice crystals in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25. Radiation --> Longwave Cloud Liquid\nLongwave radiative properties of liquid droplets in clouds\n25.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with cloud liquid droplets",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26. Radiation --> Longwave Cloud Inhomogeneity\nCloud inhomogeneity in the longwave radiation scheme\n26.1. Cloud Inhomogeneity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27. Radiation --> Longwave Aerosols\nLongwave radiative properties of aerosols\n27.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with aerosols",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of aerosols in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to aerosols in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"28. Radiation --> Longwave Gases\nLongwave radiative properties of gases\n28.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with gases",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"29. Turbulence Convection\nAtmosphere Convective Turbulence and Clouds\n29.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of atmosphere convection and turbulence",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"30. Turbulence Convection --> Boundary Layer Turbulence\nProperties of the boundary layer turbulence scheme\n30.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nBoundary layer turbulence scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Mellor-Yamada\" \n# \"Holtslag-Boville\" \n# \"EDMF\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"30.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nBoundary layer turbulence scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TKE prognostic\" \n# \"TKE diagnostic\" \n# \"TKE coupled with water\" \n# \"vertical profile of Kz\" \n# \"non-local diffusion\" \n# \"Monin-Obukhov similarity\" \n# \"Coastal Buddy Scheme\" \n# \"Coupled with convection\" \n# \"Coupled with gravity waves\" \n# \"Depth capped at cloud base\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"30.3. Closure Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nBoundary layer turbulence scheme closure order",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"30.4. Counter Gradient\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nUses boundary layer turbulence scheme counter gradient",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"31. Turbulence Convection --> Deep Convection\nProperties of the deep convection scheme\n31.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDeep convection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"31.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nDeep convection scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"adjustment\" \n# \"plume ensemble\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.3. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nDeep convection scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CAPE\" \n# \"bulk\" \n# \"ensemble\" \n# \"CAPE/WFN based\" \n# \"TKE/CIN based\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.4. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of deep convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vertical momentum transport\" \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"updrafts\" \n# \"downdrafts\" \n# \"radiative effect of anvils\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.5. Microphysics\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nMicrophysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32. Turbulence Convection --> Shallow Convection\nProperties of the shallow convection scheme\n32.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nShallow convection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"32.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nshallow convection scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"cumulus-capped boundary layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32.3. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nshallow convection scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"same as deep (unified)\" \n# \"included in boundary layer turbulence\" \n# \"separate diagnosis\" \n# TODO - please enter value(s)\n",
"32.4. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of shallow convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32.5. Microphysics\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nMicrophysics scheme for shallow convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"33. Microphysics Precipitation\nLarge Scale Cloud Microphysics and Precipitation\n33.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of large scale cloud microphysics and precipitation",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34. Microphysics Precipitation --> Large Scale Precipitation\nProperties of the large scale precipitation scheme\n34.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name of the large scale precipitation parameterisation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34.2. Hydrometeors\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPrecipitating hydrometeors taken into account in the large scale precipitation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"liquid rain\" \n# \"snow\" \n# \"hail\" \n# \"graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"35. Microphysics Precipitation --> Large Scale Cloud Microphysics\nProperties of the large scale cloud microphysics scheme\n35.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name of the microphysics parameterisation scheme used for large scale clouds.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"35.2. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nLarge scale cloud microphysics processes",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mixed phase\" \n# \"cloud droplets\" \n# \"cloud ice\" \n# \"ice nucleation\" \n# \"water vapour deposition\" \n# \"effect of raindrops\" \n# \"effect of snow\" \n# \"effect of graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"36. Cloud Scheme\nCharacteristics of the cloud scheme\n36.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of the atmosphere cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"36.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"36.3. Atmos Coupling\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nAtmosphere components that are linked to the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"atmosphere_radiation\" \n# \"atmosphere_microphysics_precipitation\" \n# \"atmosphere_turbulence_convection\" \n# \"atmosphere_gravity_waves\" \n# \"atmosphere_solar\" \n# \"atmosphere_volcano\" \n# \"atmosphere_cloud_simulator\" \n# TODO - please enter value(s)\n",
"36.4. Uses Separate Treatment\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDifferent cloud schemes for the different types of clouds (convective, stratiform and boundary layer)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.5. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nProcesses included in the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"entrainment\" \n# \"detrainment\" \n# \"bulk cloud\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"36.6. Prognostic Scheme\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the cloud scheme a prognostic scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.7. Diagnostic Scheme\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the cloud scheme a diagnostic scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.8. Prognostic Variables\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nList the prognostic variables used by the cloud scheme, if applicable.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud amount\" \n# \"liquid\" \n# \"ice\" \n# \"rain\" \n# \"snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"37. Cloud Scheme --> Optical Cloud Properties\nOptical cloud properties\n37.1. Cloud Overlap Method\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nMethod for taking into account overlapping of cloud layers",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"random\" \n# \"maximum\" \n# \"maximum-random\" \n# \"exponential\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"37.2. Cloud Inhomogeneity\nIs Required: FALSE Type: STRING Cardinality: 0.1\nMethod for taking into account cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"38. Cloud Scheme --> Sub Grid Scale Water Distribution\nSub-grid scale water distribution\n38.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSub-grid scale water distribution type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n",
"38.2. Function Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nSub-grid scale water distribution function name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"38.3. Function Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nSub-grid scale water distribution function type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"38.4. Convection Coupling\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSub-grid scale water distribution coupling with convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n",
"39. Cloud Scheme --> Sub Grid Scale Ice Distribution\nSub-grid scale ice distribution\n39.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSub-grid scale ice distribution type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n",
"39.2. Function Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nSub-grid scale ice distribution function name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"39.3. Function Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nSub-grid scale ice distribution function type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"39.4. Convection Coupling\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSub-grid scale ice distribution coupling with convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n",
"40. Observation Simulation\nCharacteristics of observation simulation\n40.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of observation simulator characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"41. Observation Simulation --> Isscp Attributes\nISSCP Characteristics\n41.1. Top Height Estimation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nCloud simulator ISSCP top height estimation methodUo",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"no adjustment\" \n# \"IR brightness\" \n# \"visible optical depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"41.2. Top Height Direction\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator ISSCP top height direction",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"lowest altitude level\" \n# \"highest altitude level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"42. Observation Simulation --> Cosp Attributes\nCFMIP Observational Simulator Package attributes\n42.1. Run Configuration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator COSP run configuration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Inline\" \n# \"Offline\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"42.2. Number Of Grid Points\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of grid points",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"42.3. Number Of Sub Columns\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of sub-cloumns used to simulate sub-grid variability",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"42.4. Number Of Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of levels",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"43. Observation Simulation --> Radar Inputs\nCharacteristics of the cloud radar simulator\n43.1. Frequency\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nCloud simulator radar frequency (Hz)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"43.2. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator radar type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface\" \n# \"space borne\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"43.3. Gas Absorption\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nCloud simulator radar uses gas absorption",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"43.4. Effective Radius\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nCloud simulator radar uses effective radius",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"44. Observation Simulation --> Lidar Inputs\nCharacteristics of the cloud lidar simulator\n44.1. Ice Types\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator lidar ice type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ice spheres\" \n# \"ice non-spherical\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"44.2. Overlap\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nCloud simulator lidar overlap",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"max\" \n# \"random\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45. Gravity Waves\nCharacteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.\n45.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of gravity wave parameterisation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"45.2. Sponge Layer\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSponge layer in the upper levels in order to avoid gravity wave reflection at the top.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rayleigh friction\" \n# \"Diffusive sponge layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45.3. Background\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nBackground wave distribution",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"continuous spectrum\" \n# \"discrete spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45.4. Subgrid Scale Orography\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSubgrid scale orography effects taken into account.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"effect on drag\" \n# \"effect on lifting\" \n# \"enhanced topography\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46. Gravity Waves --> Orographic Gravity Waves\nGravity waves generated due to the presence of orography\n46.1. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the orographic gravity wave scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"46.2. Source Mechanisms\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOrographic gravity wave source mechanisms",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear mountain waves\" \n# \"hydraulic jump\" \n# \"envelope orography\" \n# \"low level flow blocking\" \n# \"statistical sub-grid scale variance\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.3. Calculation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOrographic gravity wave calculation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"non-linear calculation\" \n# \"more than two cardinal directions\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.4. Propagation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrographic gravity wave propogation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"includes boundary layer ducting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.5. Dissipation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrographic gravity wave dissipation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47. Gravity Waves --> Non Orographic Gravity Waves\nGravity waves generated by non-orographic processes.\n47.1. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the non-orographic gravity wave scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"47.2. Source Mechanisms\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nNon-orographic gravity wave source mechanisms",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convection\" \n# \"precipitation\" \n# \"background spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47.3. Calculation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nNon-orographic gravity wave calculation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spatially dependent\" \n# \"temporally dependent\" \n# TODO - please enter value(s)\n",
"47.4. Propagation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nNon-orographic gravity wave propogation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47.5. Dissipation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nNon-orographic gravity wave dissipation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"48. Solar\nTop of atmosphere solar insolation characteristics\n48.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of solar insolation of the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"49. Solar --> Solar Pathways\nPathways for solar forcing of the atmosphere\n49.1. Pathways\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPathways for the solar forcing of the atmosphere model domain",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"SW radiation\" \n# \"precipitating energetic particles\" \n# \"cosmic rays\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"50. Solar --> Solar Constant\nSolar constant and top of atmosphere insolation characteristics\n50.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of the solar constant.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n",
"50.2. Fixed Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf the solar constant is fixed, enter the value of the solar constant (W m-2).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"50.3. Transient Characteristics\nIs Required: TRUE Type: STRING Cardinality: 1.1\nsolar constant transient characteristics (W m-2)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"51. Solar --> Orbital Parameters\nOrbital parameters and top of atmosphere insolation characteristics\n51.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of orbital parameters",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n",
"51.2. Fixed Reference Date\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nReference date for fixed orbital parameters (yyyy)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"51.3. Transient Method\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescription of transient orbital parameters",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"51.4. Computation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod used for computing orbital parameters.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Berger 1978\" \n# \"Laskar 2004\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"52. Solar --> Insolation Ozone\nImpact of solar insolation on stratospheric ozone\n52.1. Solar Ozone Impact\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes top of atmosphere insolation impact on stratospheric ozone?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"53. Volcanos\nCharacteristics of the implementation of volcanoes\n53.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of the implementation of volcanic effects in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"54. Volcanos --> Volcanoes Treatment\nTreatment of volcanoes in the atmosphere\n54.1. Volcanoes Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow volcanic effects are modeled in the atmosphere.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"high frequency solar constant anomaly\" \n# \"stratospheric aerosols optical thickness\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
erickpeirson/statistical-computing
|
.ipynb_checkpoints/Hamiltonian MCMC (HMC)-checkpoint.ipynb
|
cc0-1.0
|
[
"%pylab inline\n\nfrom scipy.stats import beta, multivariate_normal, uniform, norm\nfrom scipy.misc import derivative\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport pandas as pd",
"Use the (local) shape of the distribution to make smarter proposals.\nHamiltonian: quantity that is conserved regardless of position in space.\nMetaphor: hockey puck sliding on a (non-flat) surface. Want to be able to describe the state of the puck. The state has two quantities: \n\nCurrent position, $q$\nMomentum, $p$\n\nHamiltonian: $H(q, p) = U(q) + K(p)$\n\n$U(q)$ -- potential energy\n$K(p)$ -- kinetic energy",
"dtarget = lambda x: multivariate_normal.pdf(x, mean=(3, 10), cov=[[1, 0], [0, 1]])\nx1 = np.linspace(-6, 12, 101)\nx2 = np.linspace(-11, 31, 101)\nX, Y = np.meshgrid(x1, x2)\nZ = np.array(map(dtarget, zip(X.flat, Y.flat))).reshape(101, 101)\n\nplt.figure(figsize=(10,7))\nplt.contour(X, Y, Z)\nplt.xlim(0, 6)\nplt.ylim(7, 13)\nplt.show()",
"The surface of interest will be $U(q) = -\\log{f(q)}$\n$K(p) = \\frac{p^T p}{2m}$, where $m$ = mass of the puck.\nPosition over time is a function of momentum: \n$\\frac{dq_i}{dt} = \\frac{p_i}{m}$\nChange in momentum over time is a function of surface gradient:\n$\\frac{dp_i}{dt} = -\\frac{\\delta U}{\\delta q_i}$\nLeap-frog algorithm\n$ p_i(t + \\frac{\\epsilon}{2}) = p_i(t) - \\frac{\\epsilon}{2} \\frac{\\delta U}{\\delta q_i} U(q(t))$\n$ q_i(t + \\epsilon) = q_i(t) + \\frac{\\epsilon}{m}p_i(t+\\frac{\\epsilon}{2})$\n$ p_i(t + \\epsilon) = p_i(t + \\frac{\\epsilon}{2}) - \\frac{\\epsilon}{2} \\frac{\\delta U}{\\delta q_i}(q(t+\\epsilon))$\n$\\epsilon$ -- step size",
"def HMC_one_step(U, current_q, Eps, L, m=1):\n \"\"\"\n One step of the Hamiltonian Monte Carlo.\n \n Parameters\n ----------\n U : callable\n A function that takes a single argument, the position.\n q : array-like\n Current position.\n Eps : float\n The step size, epsilon.\n L : int\n Number of leapfrog stpes.\n m : float\n Mass of the particle.\n \n Returns\n -------\n q_out : array\n Path from ``q`` to the proposed position.\n \"\"\"\n\n q = copy.copy(current_q)\n Nq = len(q)\n p = multivariate_normal.rvs([0. for i in xrange(Nq)])\n current_p = copy.copy(p)\n\n out = {}\n \n out['p'] = np.zeros((L, Nq))\n out['p'][0,:] = copy.copy(p)\n out['q'] = np.zeros((L, Nq))\n out['q'][0,:] = copy.copy(q)\n \n for i in xrange(1, L):\n p -= Eps*derivative(U, q, 0.01)/2.\n q += (Eps/m)*p\n out['q'][i, :] = copy.copy(q)\n p -= Eps*derivative(U, q, 0.01)/2.\n out['p'][i, :] = copy.copy(p)\n \n current_U = U(current_q)\n current_K = (current_p**2).sum()/2.\n proposed_U = U(q)\n proposed_K = (p**2).sum()/2.\n \n if uniform.rvs() < exp(current_U - proposed_U + current_K - proposed_K):\n out['value'] = q\n else:\n out['value'] = current_q\n \n return out\n\nplt.figure(figsize=(10,7))\nplt.contour(X, Y, Z)\nU = lambda x: -1.*np.log(dtarget(x))\nchain = HMC_one_step(U, np.array([4., 10.]), Eps=0.2, L=10, m=2)['q']\nplt.plot(chain[:, 0], chain[:, 1], 'ro')\nplt.plot(chain[:, 0], chain[:, 1], 'r-')\nplt.plot(chain[0, 0], chain[0,1], 'bo')\nplt.xlim(0, 6)\nplt.ylim(7, 13)\nplt.xlabel('x1')\nplt.ylabel('x2')\nplt.show()\n\ndef HMC(dtarget, start, Eps=0.2, L=10, m=2, N=1000, num_chains=4):\n \"\"\"\n Perform an HMC simulation.\n \n Parameters\n ----------\n dtarget : callable\n Target PDF.\n \n \"\"\"\n \n # Invert the target PDF into a concave surface.\n neg_log_dtarget = lambda x: -1.*np.log(dtarget(x))\n \n # If only one starting position is provided, use it for all chains.\n if len(start.shape) == 1:\n start = np.array([np.array(start) for i in xrange(num_chains)])\n \n chains = []\n for j in xrange(num_chains):\n chain = [start[j, :]]\n for i in xrange(N):\n proposal = HMC_one_step(neg_log_dtarget, \n copy.copy(chain[-1]), \n Eps, L, m)['value']\n chain.append(proposal)\n chains.append(np.array(chain))\n return np.array(chains) ",
"Tuning parameters: step size, number of steps, and \"mass\" \nHMC does not work discrete parameters. STAN is all HMC.\nGelman metric still applies -- we just have a better way of proposing values.",
"def Gelman(chains):\n if len(chains.shape) == 3:\n N_p = chains.shape[2]\n else:\n N_p = 1\n generate = lambda ptn: np.array([np.array([np.array([ptn(p, i, c) \n for p in xrange(N_p)\n for i in xrange(chains.shape[1])])\n for c in xrange(chains.shape[0])])])\n params = generate(lambda p, i, c: 'x{0}'.format(p))\n iters = generate(lambda p, i, c: i)\n labels = generate(lambda p, i, c: c)\n \n data = zip(chains.flat, params.flat, iters.flat, labels.flat)\n dataframe = pd.DataFrame(data, columns=('Value', 'Parameter', 'Iteration', 'Chain'))\n\n xbar = dataframe.groupby('Parameter').Value.mean()\n m = chains.shape[0]\n xbar_i = dataframe.groupby(('Parameter', 'Chain')).Value.mean()\n s2_i = dataframe.groupby(('Parameter', 'Chain')).Value.var()\n n = dataframe.groupby(('Parameter', 'Chain')).Value.count().mean()\n\n W = s2_i.mean()\n B = (n/(m-1.)) * ((xbar_i - xbar)**2).sum()\n sigma2_hat = W*(n-1.)/n + B/n\n R_hat = np.sqrt(sigma2_hat/W)\n n_eff = m*n*sigma2_hat/B # I missed what this was for.\n \n return R_hat, n_eff\n\nchains = HMC(dtarget, array([4., 10.]), Eps=0.2, L=5, N=1000)\n\nplt.figure(figsize=(10,7))\nplt.contour(X, Y, Z)\nplt.plot(chains[0][:, 0], chains[0][:, 1], alpha=0.5)\nplt.plot(chains[1][:, 0], chains[1][:, 1], alpha=0.5)\nplt.plot(chains[2][:, 0], chains[2][:, 1], alpha=0.5)\nplt.plot(chains[3][:, 0], chains[3][:, 1], alpha=0.5)\nplt.xlim(0, 6)\nplt.ylim(7, 13)\nplt.show()\n\nplt.subplot(211)\nfor i in xrange(chains.shape[0]):\n plt.plot(chains[i,:,0])\nplt.ylabel('x1')\n\nplt.subplot(212)\nfor i in xrange(chains.shape[0]):\n plt.plot(chains[i,:,1])\nplt.ylabel('x2')\n\nGelman(chains)",
"Banana-shaped target distribution",
"dtarget = lambda x: exp( (-x[0]**2)/200. - 0.5*(x[1]+(0.05*x[0]**2) - 100.*0.05)**2)\n\nx1 = np.linspace(-20, 20, 101)\nx2 = np.linspace(-15, 10, 101)\nX, Y = np.meshgrid(x1, x2)\nZ = np.array(map(dtarget, zip(X.flat, Y.flat))).reshape(101, 101)\n\nplt.figure(figsize=(10,7))\nplt.contour(X, Y, Z)\nplt.show()\n\nstart = np.array([[uniform.rvs(loc=-10., scale=15.), \n uniform.rvs(loc=0., scale=10)]\n for i in xrange(4)])\nchains = HMC(dtarget, start, Eps=0.7, L=12, m=2, N=10000)\n\nplt.figure(figsize=(10,7))\nplt.contour(X, Y, Z)\n\nplt.plot(chains[0][:, 0], chains[0][:, 1], alpha=0.5)\nplt.plot(chains[1][:, 0], chains[1][:, 1], alpha=0.5)\nplt.plot(chains[2][:, 0], chains[2][:, 1], alpha=0.5)\nplt.plot(chains[3][:, 0], chains[3][:, 1], alpha=0.5)\nplt.show()\n\nplt.subplot(211)\nplt.title(Gelman(chains)[0])\nfor i in xrange(chains.shape[0]):\n plt.plot(chains[i,:,0])\nplt.ylabel('x1')\n\nplt.subplot(212)\nfor i in xrange(chains.shape[0]):\n plt.plot(chains[i,:,1])\nplt.ylabel('x2')\n\nplt.tight_layout()\nplt.show()",
"NUTS Sampler\nToy implementation of No-U-Turn Sampler, described by Hoffman and Gelman (2011). Algorithm 3, page 14.",
"def Leapfrog(U, theta, r, Eps, m=1.):\n \"\"\"\n Slightly different update rules, since the negative log of the \n target PDF is not used.\n \"\"\"\n gradient = lambda U, theta: derivative(U, theta, 0.01)\n r += (Eps/2.)*gradient(U, theta)\n theta += (Eps/m)*r\n r += (Eps/2.)*gradient(U, theta)\n return copy.copy(theta), copy.copy(r)\n\ndef BuildTree(U, theta, r, u, v, j, Eps, m=1., delta_max=1000):\n \"\"\"\n Recursive tree-building.\n \n TODO: Make this less ugly.\n \"\"\"\n if j == 0:\n # Take one leapfrog step in the direction v.\n theta_p, r_p = Leapfrog(U, theta, r, v*Eps, m=m)\n n_p = float(u <= exp(U(theta_p) - np.dot(0.5*r_p, r_p)))\n s_p = float(u < exp(delta_max + U(theta_p) - np.dot(0.5*r_p, r_p)))\n return theta_p, r_p, theta_p, r_p, theta_p, n_p, s_p\n else:\n # Recursion -- implicitly build the left and right subtrees.\n rargs = (u, v, j-1., Eps)\n rkwargs = {'m':m}\n theta_n, r_n, theta_f, r_f, theta_p, n_p, s_p = BuildTree(U, theta, r, *rargs, **rkwargs)\n if s_p == 1:\n if v == -1:\n theta_n, r_n, null, null, theta_dp, n_dp, s_dp = BuildTree(U, theta_n, r_n, *rargs, **rkwargs)\n else:\n null, null, theta_f, r_f, theta_dp, n_dp, s_dp = BuildTree(U, theta_f, r_f, *rargs, **rkwargs)\n try:\n if uniform.rvs() <= (n_dp/(n_p + n_dp)):\n theta_p = copy.copy(theta_dp)\n except ZeroDivisionError:\n pass\n s_p = s_p*s_dp*int(np.dot((theta_f - theta_n), r_n) >= 0)*int( np.dot((theta_f - theta_n), r_f) >= 0)\n n_p += n_dp\n return theta_n, r_n, theta_f, r_f, theta_p, n_p, s_p\n\ndef NUTS_one_step(U, theta_last, Eps, m=1.):\n \"\"\"\n TODO: clean up all the copies -- stop being so paranoid.\n \"\"\"\n r_not = norm.rvs(0, 1., size=len(theta_last))\n u = uniform.rvs(0, exp(U(theta_last) - np.dot(0.5*r_not, r_not)))\n \n # Initialize.\n theta_m = copy.copy(theta_last)\n theta_n, theta_f = copy.copy(theta_last), copy.copy(theta_last)\n r_n, r_f = copy.copy(r_not), copy.copy(r_not)\n j = 0.\n s = 1.\n n = 1.\n\n while s == 1.:\n v_j = np.random.choice(np.array([-1., 1.])) # Choose a direction.\n if v_j == -1:\n theta_n, r_n, null, null, theta_p, n_p, s_p = BuildTree(U, theta_n, r_n, u, v_j, j, Eps, m=m)\n else:\n null, null, theta_f, r_f, theta_p, n_p, s_p = BuildTree(U, theta_f, r_f, u, v_j, j, Eps, m=m)\n\n if s_p == 1:\n try:\n if uniform.rvs() <= min(1., (n_p/n)):\n theta_m = copy.copy(theta_p)\n except ZeroDivisionError:\n pass\n s = s_p*int(np.dot((theta_f - theta_n), r_n) >= 0)*int( np.dot((theta_f - theta_n), r_f) >= 0)\n j += 1.\n\n return theta_m\n\nNUTS_one_step(lambda x: np.log(dtarget(x)), np.array([3.2, 9.1]), 0.02)\n\ndef NUTS(dtarget, theta_not, Eps, num_iters=1000, delta_max=1000, m=1.):\n U = lambda x: np.log(dtarget(x))\n \n theta = [theta_not]\n for i in xrange(num_iters):\n theta_i = NUTS_one_step(U, theta[-1], Eps, m=m)\n theta.append(theta_i)\n return theta",
"Testing on the banana",
"start = np.array([[uniform.rvs(loc=-10., scale=15.), \n uniform.rvs(loc=0., scale=10)]\n for i in xrange(4)])\n\nchains = np.array([ np.array(NUTS(dtarget, start[i, :], Eps=0.55, m=1.5, num_iters=10000)) for i in xrange(start.shape[0])])\n\nplt.figure(figsize=(10,7))\nplt.contour(X, Y, Z)\n\nfor i in xrange(chains.shape[0]):\n plt.scatter(chains[i, :, 0], chains[i, :, 1], alpha=0.5, s=0.02)\nplt.show()\n\nplt.subplot(211)\nplt.title(Gelman(chains)[0])\nfor i in xrange(chains.shape[0]):\n plt.plot(chains[i, :, 0])\nplt.ylabel('x1')\n\nplt.subplot(212)\nfor i in xrange(chains.shape[0]):\n plt.plot(chains[i, :, 1])\nplt.ylabel('x2')\n\nplt.tight_layout()\nplt.show()\n\nplt.hist(chains[0,:,0])"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/docs-l10n
|
site/ja/guide/migrate.ipynb
|
apache-2.0
|
[
"Copyright 2018 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"TensorFlow 1 のコードを TensorFlow 2 に移行する\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/guide/migrate\"> <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org で表示</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/guide/migrate.ipynb\"> <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\"> Google Colab で実行</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/guide/migrate.ipynb\"> <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub でソースを表示</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/guide/migrate.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード</a></td>\n</table>\n\n本ドキュメントは、低レベル TensorFlow API のユーザーを対象としています。高レベル API(tf.keras)をご使用の場合は、コードを TensorFlow 2.x と完全互換にするためのアクションはほとんどまたはまったくありません。\n\nオプティマイザのデフォルトの学習率を確認してください。\nメトリクスが記録される「名前」が変更されている可能性があることに注意してください。\n\nTensorFlow 2.x で 1.X のコードを未修正で実行することは、(contrib を除き)依然として可能です。\npython\nimport tensorflow.compat.v1 as tf tf.disable_v2_behavior()\nしかし、これでは TensorFlow 2.0 で追加された改善の多くを活用できません。このガイドでは、コードのアップグレード、さらなる単純化、パフォーマンス向上、そしてより容易なメンテナンスについて説明します。\n自動変換スクリプト\nこのドキュメントで説明される変更を実装する前に行うべき最初のステップは、アップグレードスクリプトを実行してみることです。\nこれはコードを TensorFlow 2.x にアップグレードする際の初期パスとしては十分ですが、v2 特有のコードに変換するわけではありません。コードは依然として tf.compat.v1 エンドポイントを使用して、プレースホルダー、セッション、コレクション、その他 1.x スタイルの機能へのアクセスが可能です。\nトップレベルの動作の変更\ntf.compat.v1.disable_v2_behavior() を使用することで TensorFlow 2.x でコードが機能する場合でも、対処すべきグローバルな動作の変更があります。主な変更点は次のとおりです。\n\n\nEager execution、v1.enable_eager_execution(): 暗黙的に tf.Graph を使用するコードは失敗します。このコードは必ず with tf.Graph().as_default() コンテキストでラップしてください。\n\n\nリソース変数、v1.enable_resource_variables(): 一部のコードは、TensorFlow 参照変数によって有効化される非決定的な動作に依存する場合があります。 リソース変数は書き込み中にロックされるため、より直感的な一貫性を保証します。\n\nこれによりエッジケースでの動作が変わる場合があります。\nこれにより余分なコピーが作成されるため、メモリ使用量が増える可能性があります。\nこれを無効にするには、use_resource=False を tf.Variable コンストラクタに渡します。\n\n\n\nテンソルの形状、v1.enable_v2_tensorshape(): TensorFlow 2.x は、テンソルの形状の動作を簡略化されており、t.shape[0].value の代わりに t.shape[0] とすることができます。簡単な変更なので、すぐに修正しておくことをお勧めします。例については TensorShape をご覧ください。\n\n\n制御フロー、v1.enable_control_flow_v2(): TensorFlow 2.x 制御フローの実装が簡略化されたため、さまざまなグラフ表現を生成します。問題が生じた場合には、バグを報告してください。\n\n\nTensorFlow 2.x のコードを作成する\nこのガイドでは、TensorFlow 1.x のコードを TensorFlow 2.x に変換するいくつかの例を確認します。これらの変更によって、コードがパフォーマンスの最適化および簡略化された API 呼び出しを活用できるようになります。\nそれぞれのケースのパターンは次のとおりです。\n1. v1.Session.run 呼び出しを置き換える\nすべての v1.Session.run 呼び出しは、Python 関数で置き換える必要があります。\n\nfeed_dictおよびv1.placeholderは関数の引数になります。\nfetch は関数の戻り値になります。\nEager execution では、pdb などの標準的な Python ツールを使用して、変換中に簡単にデバッグできます。\n\n次に、tf.function デコレータを追加して、グラフで効率的に実行できるようにします。 この機能についての詳細は、AutoGraph ガイドをご覧ください。\n注意点:\n\n\nv1.Session.run とは異なり、tf.function は固定のリターンシグネチャを持ち、常にすべての出力を返します。これによってパフォーマンスの問題が生じる場合は、2 つの個別の関数を作成します。\n\n\ntf.control_dependencies または同様の演算は必要ありません。tf.function は、記述された順序で実行されたかのように動作します。たとえば、tf.Variable 割り当てと tf.assert は自動的に実行されます。\n\n\n「モデルを変換する」セクションには、この変換プロセスの実際の例が含まれています。\n2. Python オブジェクトを変数と損失の追跡に使用する\nTensorFlow 2.x では、いかなる名前ベースの変数追跡もまったく推奨されていません。 変数の追跡には Python オブジェクトを使用します。\nv1.get_variable の代わりに tf.Variable を使用してください。\nすべてのv1.variable_scopeは Python オブジェクトに変換が可能です。通常は次のうちの 1 つになります。\n\ntf.keras.layers.Layer\ntf.keras.Model\ntf.Module\n\ntf.Graph.get_collection(tf.GraphKeys.VARIABLES) などの変数のリストを集める必要がある場合には、Layer および Model オブジェクトの .variables と .trainable_variables 属性を使用します。\nこれら Layer クラスと Model クラスは、グローバルコレクションの必要性を除去した別のプロパティを幾つか実装します。.losses プロパティは、tf.GraphKeys.LOSSES コレクション使用の置き換えとなります。\n詳細は Keras ガイドをご覧ください。\n警告 : 多くの tf.compat.v1 シンボルはグローバルコレクションを暗黙的に使用しています。\n3. トレーニングループをアップグレードする\nご利用のユースケースで動作する最高レベルの API を使用してください。独自のトレーニングループを構築するよりも tf.keras.Model.fit の選択を推奨します。\nこれらの高レベル関数は、独自のトレーニングループを書く場合に見落とされやすい多くの低レベル詳細を管理します。例えば、それらは自動的に正則化損失を集めて、モデルを呼び出す時にtraining=True引数を設定します。\n4. データ入力パイプラインをアップグレードする\nデータ入力には tf.data データセットを使用してください。それらのオブジェクトは効率的で、表現力があり、TensorFlow とうまく統合します。\n次のように、tf.keras.Model.fit メソッドに直接渡すことができます。\npython\nmodel.fit(dataset, epochs=5)\nまた、標準的な Python で直接にイテレートすることもできます。\npython\nfor example_batch, label_batch in dataset: break\n5. compat.v1シンボルを移行する\ntf.compat.v1モジュールには、元のセマンティクスを持つ完全な TensorFlow 1.x API が含まれています。\nTensorFlow 2 アップグレードスクリプトは、変換が安全な場合、つまり v2 バージョンの動作が完全に同等であると判断できる場合は、シンボルを 2.0 と同等のものに変換します。(たとえば、これらは同じ関数なので、v1.arg_max の名前を tf.argmax に変更します。)\nコードの一部を使用してアップグレードスクリプトを実行した後に、compat.v1 が頻出する可能性があります。 コードを調べ、それらを手動で同等の v2 のコードに変換する価値はあります。(該当するものがある場合には、ログに表示されているはずです。)\nモデルを変換する\n低レベル変数 & 演算子実行\n低レベル API の使用例を以下に示します。\n\n\n変数スコープを使用して再利用を制御する。\n\n\nv1.get_variableで変数を作成する。\n\n\nコレクションに明示的にアクセスする。\n\n\n次のようなメソッドでコレクションに暗黙的にアクセスする。\n\nv1.global_variables\nv1.losses.get_regularization_loss\n\n\n\nv1.placeholder を使用してグラフ入力のセットアップをする。\n\n\nSession.runでグラフを実行する。\n\n\n変数を手動で初期化する。\n\n\n変換前\nTensorFlow 1.x を使用したコードでは、これらのパターンは以下のように表示されます。",
"import tensorflow as tf\nimport tensorflow.compat.v1 as v1\n\nimport tensorflow_datasets as tfds\n\ng = v1.Graph()\n\nwith g.as_default():\n in_a = v1.placeholder(dtype=v1.float32, shape=(2))\n in_b = v1.placeholder(dtype=v1.float32, shape=(2))\n\n def forward(x):\n with v1.variable_scope(\"matmul\", reuse=v1.AUTO_REUSE):\n W = v1.get_variable(\"W\", initializer=v1.ones(shape=(2,2)),\n regularizer=lambda x:tf.reduce_mean(x**2))\n b = v1.get_variable(\"b\", initializer=v1.zeros(shape=(2)))\n return W * x + b\n\n out_a = forward(in_a)\n out_b = forward(in_b)\n reg_loss=v1.losses.get_regularization_loss(scope=\"matmul\")\n\nwith v1.Session(graph=g) as sess:\n sess.run(v1.global_variables_initializer())\n outs = sess.run([out_a, out_b, reg_loss],\n \t feed_dict={in_a: [1, 0], in_b: [0, 1]})\n\nprint(outs[0])\nprint()\nprint(outs[1])\nprint()\nprint(outs[2])",
"変換後\n変換されたコードでは :\n\n変数はローカル Python オブジェクトです。\nforward関数は依然として計算を定義します。\nSession.run呼び出しはforwardへの呼び出しに置き換えられます。\nパフォーマンス向上のためにオプションでtf.functionデコレータを追加可能です。\nどのグローバルコレクションも参照せず、正則化は手動で計算されます。\nセッションやプレースホルダーはありません。",
"W = tf.Variable(tf.ones(shape=(2,2)), name=\"W\")\nb = tf.Variable(tf.zeros(shape=(2)), name=\"b\")\n\n@tf.function\ndef forward(x):\n return W * x + b\n\nout_a = forward([1,0])\nprint(out_a)\n\nout_b = forward([0,1])\n\nregularizer = tf.keras.regularizers.l2(0.04)\nreg_loss=regularizer(W)",
"tf.layersベースのモデル\nv1.layersモジュールは、変数を定義および再利用するv1.variable_scopeに依存するレイヤー関数を含めるために使用されます。\n変換前",
"def model(x, training, scope='model'):\n with v1.variable_scope(scope, reuse=v1.AUTO_REUSE):\n x = v1.layers.conv2d(x, 32, 3, activation=v1.nn.relu,\n kernel_regularizer=lambda x:0.004*tf.reduce_mean(x**2))\n x = v1.layers.max_pooling2d(x, (2, 2), 1)\n x = v1.layers.flatten(x)\n x = v1.layers.dropout(x, 0.1, training=training)\n x = v1.layers.dense(x, 64, activation=v1.nn.relu)\n x = v1.layers.batch_normalization(x, training=training)\n x = v1.layers.dense(x, 10)\n return x\n\ntrain_data = tf.ones(shape=(1, 28, 28, 1))\ntest_data = tf.ones(shape=(1, 28, 28, 1))\n\ntrain_out = model(train_data, training=True)\ntest_out = model(test_data, training=False)\n\nprint(train_out)\nprint()\nprint(test_out)",
"変換後\n\nレイヤーの単純なスタックが tf.keras.Sequentialにぴったり収まります。(より複雑なモデルについてはカスタムレイヤーとモデルおよび Functional API をご覧ください。)\nモデルが変数と正則化損失を追跡します。\nv1.layersからtf.keras.layersへの直接的なマッピングがあるため、変換は一対一対応でした。\n\nほとんどの引数はそのままです。しかし、以下の点は異なります。\n\ntraining引数は、それが実行される時点でモデルによって各レイヤーに渡されます。\n元のmodel関数への最初の引数(入力 x)はなくなりました。これはオブジェクトレイヤーがモデルの呼び出しからモデルの構築を分離するためです。\n\nまた以下にも注意してください。\n\ntf.contribからの初期化子の正則化子を使用している場合は、他よりも多くの引数変更があります。\nコードはコレクションに書き込みを行わないため、v1.losses.get_regularization_lossなどの関数はそれらの値を返さなくなり、トレーニングループが壊れる可能性があります。",
"model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, 3, activation='relu',\n kernel_regularizer=tf.keras.regularizers.l2(0.04),\n input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(10)\n])\n\ntrain_data = tf.ones(shape=(1, 28, 28, 1))\ntest_data = tf.ones(shape=(1, 28, 28, 1))\n\ntrain_out = model(train_data, training=True)\nprint(train_out)\n\ntest_out = model(test_data, training=False)\nprint(test_out)\n\n# Here are all the trainable variables.\nlen(model.trainable_variables)\n\n# Here is the regularization loss.\nmodel.losses",
"変数とv1.layersの混在\n既存のコードは低レベルの TensorFlow 1.x 変数と演算子に高レベルのv1.layersが混ざっていることがよくあります。\n変換前",
"def model(x, training, scope='model'):\n with v1.variable_scope(scope, reuse=v1.AUTO_REUSE):\n W = v1.get_variable(\n \"W\", dtype=v1.float32,\n initializer=v1.ones(shape=x.shape),\n regularizer=lambda x:0.004*tf.reduce_mean(x**2),\n trainable=True)\n if training:\n x = x + W\n else:\n x = x + W * 0.5\n x = v1.layers.conv2d(x, 32, 3, activation=tf.nn.relu)\n x = v1.layers.max_pooling2d(x, (2, 2), 1)\n x = v1.layers.flatten(x)\n return x\n\ntrain_out = model(train_data, training=True)\ntest_out = model(test_data, training=False)",
"変換後\nこのコードを変換するには、前の例で示したレイヤーからレイヤーへのマッピングのパターンに従います。\n一般的なパターンは次の通りです。\n\n__init__でレイヤーパラメータを収集する。\nbuildで変数を構築する。\ncallで計算を実行し、結果を返す。\n\nv1.variable_scopeは事実上それ自身のレイヤーです。従ってtf.keras.layers.Layerとして書き直します。詳細はガイドをご覧ください。",
"# Create a custom layer for part of the model\nclass CustomLayer(tf.keras.layers.Layer):\n def __init__(self, *args, **kwargs):\n super(CustomLayer, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self.w = self.add_weight(\n shape=input_shape[1:],\n dtype=tf.float32,\n initializer=tf.keras.initializers.ones(),\n regularizer=tf.keras.regularizers.l2(0.02),\n trainable=True)\n\n # Call method will sometimes get used in graph mode,\n # training will get turned into a tensor\n @tf.function\n def call(self, inputs, training=None):\n if training:\n return inputs + self.w\n else:\n return inputs + self.w * 0.5\n\ncustom_layer = CustomLayer()\nprint(custom_layer([1]).numpy())\nprint(custom_layer([1], training=True).numpy())\n\ntrain_data = tf.ones(shape=(1, 28, 28, 1))\ntest_data = tf.ones(shape=(1, 28, 28, 1))\n\n# Build the model including the custom layer\nmodel = tf.keras.Sequential([\n CustomLayer(input_shape=(28, 28, 1)),\n tf.keras.layers.Conv2D(32, 3, activation='relu'),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n])\n\ntrain_out = model(train_data, training=True)\ntest_out = model(test_data, training=False)\n",
"注意点:\n\n\nサブクラス化された Keras モデルとレイヤーは v1 グラフ(自動制御依存性なし)と eager モードの両方で実行される必要があります。\n\ncall()をtf.function()にラップして、AutoGraph と自動制御依存性を得るようにします。\n\n\n\ntraining引数を受け取ってcallすることを忘れないようにしてください。\n\nそれはtf.Tensorである場合があります。\nそれは Python ブール型である場合があります。\n\n\n\nself.add_weight()を使用して、コンストラクタまたはModel.buildでモデル変数を作成します。\n\nModel.buildでは、入力形状にアクセスできるため、適合する形状で重みを作成できます。\ntf.keras.layers.Layer.add_weightを使用すると、Keras が変数と正則化損失を追跡できるようになります。\n\n\n\nオブジェクトにtf.Tensorsを保持してはいけません。\n\nそれらはtf.functionまたは eager コンテキスト内のいずれかで作成される可能性がありますが、それらのテンソルは異なる振る舞いをします。\n状態にはtf.Variableを使用してください。これは常に両方のコンテキストから使用可能です。\ntf.Tensorsは中間値専用です。\n\n\n\nSlim & contrib.layers に関する注意\n古い TensorFlow 1.x コードの大部分は Slim ライブラリを使用しており、これはtf.contrib.layersとして TensorFlow 1.x でパッケージ化されていました。 contribモジュールに関しては、TensorFlow 2.x ではtf.compat.v1内でも、あっても利用できなくなりました。Slim を使用したコードの TensorFlow 2.x への変換は、v1.layersを使用したレポジトリの変換よりも複雑です。現実的には、まず最初に Slim コードをv1.layersに変換してから Keras に変換するほうが賢明かもしれません。\n\narg_scopesを除去します。すべての引数は明示的である必要があります。\nそれらを使用する場合、 normalizer_fnとactivation_fnをそれら自身のレイヤーに分割します。\n分離可能な畳み込みレイヤーは 1 つまたはそれ以上の異なる Keras レイヤー(深さ的な、ポイント的な、分離可能な Keras レイヤー)にマップします。\nSlim とv1.layersには異なる引数名とデフォルト値があります。\n一部の引数には異なるスケールがあります。\nSlim 事前トレーニング済みモデルを使用する場合は、tf.keras.applicationsから Keras 事前トレーニング済みモデル、または元の Slim コードからエクスポートされた TensorFlow ハブの TensorFlow 2 SavedModel をお試しください。\n\n一部のtf.contribレイヤーはコアの TensorFlow に移動されていない可能性がありますが、代わりに TensorFlow アドオンパッケージに移動されています。\nトレーニング\ntf.kerasモデルにデータを供給する方法は沢山あります。それらは Python ジェネレータと Numpy 配列を入力として受け取ります。\nモデルへのデータ供給方法として推奨するのは、データ操作用の高パフォーマンスクラスのコレクションを含むtf.dataパッケージの使用です。\n依然としてtf.queueを使用している場合、これらは入力パイプラインとしてではなく、データ構造としてのみサポートされます。\nデータセットを使用する\nTensorFlow Dataset パッケージ(tfds)には、事前定義されたデータセットをtf.data.Datasetオブジェクトとして読み込むためのユーティリティが含まれています。\nこの例として、tfdsを使用して MNISTdataset を読み込んでみましょう。",
"datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)\nmnist_train, mnist_test = datasets['train'], datasets['test']",
"次に、トレーニング用のデータを準備します。\n\n各画像をリスケールする。\n例の順序をシャッフルする。\n画像とラベルのバッチを集める。",
"BUFFER_SIZE = 10 # Use a much larger value for real code.\nBATCH_SIZE = 64\nNUM_EPOCHS = 5\n\n\ndef scale(image, label):\n image = tf.cast(image, tf.float32)\n image /= 255\n\n return image, label",
"例を短く保つために、データセットをトリミングして 5 バッチのみを返すようにします。",
"train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\ntest_data = mnist_test.map(scale).batch(BATCH_SIZE)\n\nSTEPS_PER_EPOCH = 5\n\ntrain_data = train_data.take(STEPS_PER_EPOCH)\ntest_data = test_data.take(STEPS_PER_EPOCH)\n\nimage_batch, label_batch = next(iter(train_data))",
"Keras トレーニングループを使用する\nトレーニングプロセスの低レベル制御が不要な場合は、Keras 組み込みのfit、evaluate、predictメソッドの使用が推奨されます。これらのメソッドは(シーケンシャル、関数型、またはサブクラス化)実装を問わず、モデルをトレーニングするための統一インターフェースを提供します。\nこれらのメソッドには次のような優位点があります。\n\nNumpy 配列、Python ジェネレータ、tf.data.Datasetsを受け取ります。\n正則化と活性化損失を自動的に適用します。\nマルチデバイストレーニングのためにtf.distributeをサポートします。\n任意の callable は損失とメトリクスとしてサポートします。\ntf.keras.callbacks.TensorBoardのようなコールバックとカスタムコールバックをサポートします。\n自動的に TensorFlow グラフを使用し、高性能です。\n\nここにDatasetを使用したモデルのトレーニング例を示します。(この機能ついての詳細はチュートリアルをご覧ください。)",
"model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, 3, activation='relu',\n kernel_regularizer=tf.keras.regularizers.l2(0.02),\n input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(10)\n])\n\n# Model is the full model w/o custom layers\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(train_data, epochs=NUM_EPOCHS)\nloss, acc = model.evaluate(test_data)\n\nprint(\"Loss {}, Accuracy {}\".format(loss, acc))",
"ループを自分で書く\nKeras モデルのトレーニングステップは動作していても、そのステップの外でより制御が必要な場合は、データ イテレーション ループでtf.keras.Model.train_on_batchメソッドの使用を検討してみてください。\ntf.keras.callbacks.Callbackとして、多くのものが実装可能であることに留意してください。\nこのメソッドには前のセクションで言及したメソッドの優位点の多くがありますが、外側のループのユーザー制御も与えます。\ntf.keras.Model.test_on_batchまたはtf.keras.Model.evaluateを使用して、トレーニング中のパフォーマンスをチェックすることも可能です。\n注意: train_on_batchとtest_on_batchは、デフォルトで単一バッチの損失とメトリクスを返します。reset_metrics=Falseを渡すと累積メトリックを返しますが、必ずメトリックアキュムレータを適切にリセットすることを忘れないようにしてくだい。また、AUCのような一部のメトリクスは正しく計算するためにreset_metrics=Falseが必要なことも覚えておいてください。\n上のモデルのトレーニングを続けます。",
"# Model is the full model w/o custom layers\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nfor epoch in range(NUM_EPOCHS):\n #Reset the metric accumulators\n model.reset_metrics()\n\n for image_batch, label_batch in train_data:\n result = model.train_on_batch(image_batch, label_batch)\n metrics_names = model.metrics_names\n print(\"train: \",\n \"{}: {:.3f}\".format(metrics_names[0], result[0]),\n \"{}: {:.3f}\".format(metrics_names[1], result[1]))\n for image_batch, label_batch in test_data:\n result = model.test_on_batch(image_batch, label_batch,\n # return accumulated metrics\n reset_metrics=False)\n metrics_names = model.metrics_names\n print(\"\\neval: \",\n \"{}: {:.3f}\".format(metrics_names[0], result[0]),\n \"{}: {:.3f}\".format(metrics_names[1], result[1]))",
"<a name=\"custom_loop\"></a>\nトレーニングステップをカスタマイズする\nより多くの柔軟性と制御を必要とする場合、独自のトレーニングループを実装することでそれが可能になります。以下の 3 つのステップを踏みます。\n\nPython ジェネレータかtf.data.Datasetをイテレートして例のバッチを作成します。\ntf.GradientTapeを使用して勾配を集めます。\ntf.keras.optimizersの 1 つを使用して、モデルの変数に重み更新を適用します。\n\n留意点:\n\nサブクラス化されたレイヤーとモデルのcallメソッドには、常にtraining引数を含めます。\ntraining引数を確実に正しくセットしてモデルを呼び出します。\n使用方法によっては、モデルがデータのバッチ上で実行されるまでモデル変数は存在しないかもしれません。\nモデルの正則化損失などを手動で処理する必要があります。\n\nv1 と比べて簡略化されている点に注意してください :\n\n変数初期化子を実行する必要はありません。作成時に変数は初期化されます。\nたとえtf.function演算が eager モードで振る舞う場合でも、手動の制御依存性を追加する必要はありません。",
"model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, 3, activation='relu',\n kernel_regularizer=tf.keras.regularizers.l2(0.02),\n input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(10)\n])\n\noptimizer = tf.keras.optimizers.Adam(0.001)\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n@tf.function\ndef train_step(inputs, labels):\n with tf.GradientTape() as tape:\n predictions = model(inputs, training=True)\n regularization_loss=tf.math.add_n(model.losses)\n pred_loss=loss_fn(labels, predictions)\n total_loss=pred_loss + regularization_loss\n\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\nfor epoch in range(NUM_EPOCHS):\n for inputs, labels in train_data:\n train_step(inputs, labels)\n print(\"Finished epoch\", epoch)\n",
"新しいスタイルのメトリクスと損失\nTensorFlow 2.x では、メトリクスと損失はオブジェクトです。Eager で実行的にtf.function内で動作します。\n損失オブジェクトは呼び出し可能で、(y_true, y_pred) を引数として期待します。",
"cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\ncce([[1, 0]], [[-1.0,3.0]]).numpy()",
"メトリックオブジェクトには次のメソッドがあります 。\n\nMetric.update_state() — 新しい観測を追加する\nMetric.result() — 観測値が与えられたとき、メトリックの現在の結果を得る\nMetric.reset_states() — すべての観測をクリアする\n\nオブジェクト自体は呼び出し可能です。呼び出しはupdate_stateと同様に新しい観測の状態を更新し、メトリクスの新しい結果を返します。\nメトリックの変数を手動で初期化する必要はありません。また、TensorFlow 2.x は自動制御依存性を持つため、それらについても気にする必要はありません。\n次のコードは、メトリックを使用してカスタムトレーニングループ内で観測される平均損失を追跡します。",
"# Create the metrics\nloss_metric = tf.keras.metrics.Mean(name='train_loss')\naccuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\n@tf.function\ndef train_step(inputs, labels):\n with tf.GradientTape() as tape:\n predictions = model(inputs, training=True)\n regularization_loss=tf.math.add_n(model.losses)\n pred_loss=loss_fn(labels, predictions)\n total_loss=pred_loss + regularization_loss\n\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n # Update the metrics\n loss_metric.update_state(total_loss)\n accuracy_metric.update_state(labels, predictions)\n\n\nfor epoch in range(NUM_EPOCHS):\n # Reset the metrics\n loss_metric.reset_states()\n accuracy_metric.reset_states()\n\n for inputs, labels in train_data:\n train_step(inputs, labels)\n # Get the metric results\n mean_loss=loss_metric.result()\n mean_accuracy = accuracy_metric.result()\n\n print('Epoch: ', epoch)\n print(' loss: {:.3f}'.format(mean_loss))\n print(' accuracy: {:.3f}'.format(mean_accuracy))\n",
"<a id=\"keras_metric_names\"></a>\nKeras メトリック名\nTensorFlow 2.x では、Keras モデルはメトリクス名の処理に関してより一貫性があります。\nメトリクスリストで文字列を渡すと、まさにその文字列がメトリクスのnameとして使用されます。これらの名前は<br>model.fitによって返される履歴オブジェクトと、keras.callbacksに渡されるログに表示されます。これはメトリクスリストで渡した文字列に設定されています。",
"model.compile(\n optimizer = tf.keras.optimizers.Adam(0.001),\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics = ['acc', 'accuracy', tf.keras.metrics.SparseCategoricalAccuracy(name=\"my_accuracy\")])\nhistory = model.fit(train_data)\n\nhistory.history.keys()",
"これはmetrics=[\"accuracy\"]を渡すとdict_keys(['loss', 'acc'])になっていた、以前のバージョンとは異なります。 \nKeras オプティマイザ\nv1.train.AdamOptimizerやv1.train.GradientDescentOptimizerなどのv1.train内のオプティマイザは、tf.keras.optimizers内に同等のものを持ちます。\nv1.trainをkeras.optimizersに変換する\nオプティマイザを変換する際の注意事項を次に示します。\n\nオプティマイザをアップグレードすると、古いチェックポイントとの互換性がなくなる可能性があります。\nepsilon のデフォルトはすべて1e-8ではなく1e-7になりました。(これはほとんどのユースケースで無視できます。)\nv1.train.GradientDescentOptimizerはtf.keras.optimizers.SGDで直接置き換えが可能です。\nv1.train.MomentumOptimizerはモメンタム引数(tf.keras.optimizers.SGD(..., momentum=...))を使用してSGDオプティマイザで直接置き換えが可能です。\nv1.train.AdamOptimizerを変換してtf.keras.optimizers.Adamを使用することが可能です。<code>beta1</code>引数とbeta2引数の名前は、beta_1とbeta_2に変更されています。\nv1.train.RMSPropOptimizerはtf.keras.optimizers.RMSpropに変換可能です。 decay引数の名前はrhoに変更されています。\nv1.train.AdadeltaOptimizerはtf.keras.optimizers.Adadeltaに直接変換が可能です。\ntf.train.AdagradOptimizerは tf.keras.optimizers.Adagradに直接変換が可能です。\ntf.train.FtrlOptimizerはtf.keras.optimizers.Ftrlに直接変換が可能です。accum_nameおよびlinear_name引数は削除されています。\ntf.contrib.AdamaxOptimizerとtf.contrib.NadamOptimizerは tf.keras.optimizers.Adamaxとtf.keras.optimizers.Nadamに直接変換が可能です。beta1引数とbeta2引数の名前は、beta_1とbeta_2に変更されています。\n\n一部のtf.keras.optimizersの新しいデフォルト\n<a id=\"keras_optimizer_lr\"></a>\n警告: モデルの収束挙動に変化が見られる場合には、デフォルトの学習率を確認してください。\noptimizers.SGD、optimizers.Adam、またはoptimizers.RMSpropに変更はありません。\n次のデフォルトの学習率が変更されました。\n\noptimizers.Adagrad 0.01 から 0.001 へ\noptimizers.Adadelta 1.0 から 0.001 へ\noptimizers.Adamax 0.002 から 0.001 へ\noptimizers.Nadam 0.002 から 0.001 へ\n\nTensorBoard\nTensorFlow 2 には、TensorBoard で視覚化するための要約データを記述するために使用されるtf.summary API の大幅な変更が含まれています。新しいtf.summaryの概要については、TensorFlow 2 API を使用した複数のチュートリアルがあります。これには、TensorBoard TensorFlow 2 移行ガイドも含まれています。\n保存と読み込み\n<a id=\"checkpoints\"></a>\nチェックポイントの互換性\nTensorFlow 2.x はオブジェクトベースのチェックポイントを使用します。\n古いスタイルの名前ベースのチェックポイントは、注意を払えば依然として読み込むことができます。コード変換プロセスは変数名変更という結果になるかもしれませんが、回避方法はあります。\n最も単純なアプローチは、チェックポイント内の名前と新しいモデルの名前を揃えて並べることです。\n\n変数にはすべて依然として設定が可能なname引数があります。\nKeras モデルはまた name引数を取り、それらの変数のためのプレフィックスとして設定されます。\nv1.name_scope関数は、変数名のプレフィックスの設定に使用できます。これはtf.variable_scopeとは大きく異なります。これは名前だけに影響するもので、変数と再利用の追跡はしません。\n\nご利用のユースケースで動作しない場合は、v1.train.init_from_checkpointを試してみてください。これはassignment_map引数を取り、古い名前から新しい名前へのマッピングを指定します。\n注意 : 読み込みを遅延できるオブジェクトベースのチェックポイントとは異なり、名前ベースのチェックポイントは関数が呼び出される時に全ての変数が構築されていることを要求します。一部のモデルは、buildを呼び出すかデータのバッチでモデルを実行するまで変数の構築を遅延します。\nTensorFlow Estimatorリポジトリには事前作成された Estimator のチェックポイントを TensorFlow 1.X から 2.0 にアップグレードするための変換ツールが含まれています。これは、同様のユースケースのツールを構築する方法の例として有用な場合があります。\n保存されたモデルの互換性\n保存されたモデルには、互換性に関する重要な考慮事項はありません。\n\nTensorFlow 1.x saved_models は TensorFlow 2.x で動作します。\nTensorFlow 2.x saved_models は全ての演算がサポートされていれば TensorFlow 1.x で動作します。\n\nGraph.pb または Graph.pbtxt\n未加工のGraph.pbファイルを TensorFlow 2.x にアップグレードする簡単な方法はありません。確実な方法は、ファイルを生成したコードをアップグレードすることです。\nただし、「凍結グラフ」(変数が定数に変換されたtf.Graph)がある場合、v1.wrap_functionを使用してconcrete_functionへの変換が可能です。",
"def wrap_frozen_graph(graph_def, inputs, outputs):\n def _imports_graph_def():\n tf.compat.v1.import_graph_def(graph_def, name=\"\")\n wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])\n import_graph = wrapped_import.graph\n return wrapped_import.prune(\n tf.nest.map_structure(import_graph.as_graph_element, inputs),\n tf.nest.map_structure(import_graph.as_graph_element, outputs))",
"たとえば、次のような凍結された Inception v1 グラフ(2016 年)があります。",
"path = tf.keras.utils.get_file(\n 'inception_v1_2016_08_28_frozen.pb',\n 'http://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz',\n untar=True)",
"tf.GraphDefを読み込みます。",
"graph_def = tf.compat.v1.GraphDef()\nloaded = graph_def.ParseFromString(open(path,'rb').read())",
"これをconcrete_functionにラップします。",
"inception_func = wrap_frozen_graph(\n graph_def, inputs='input:0',\n outputs='InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu:0')",
"入力としてテンソルを渡します。",
"input_img = tf.ones([1,224,224,3], dtype=tf.float32)\ninception_func(input_img).shape",
"Estimator\nEstimator でトレーニングする\nEstimator は TensorFlow 2.0 でサポートされています。\nEstimator を使用する際には、TensorFlow 1.x. からのinput_fn()、tf.estimator.TrainSpec、tf.estimator.EvalSpecを使用できます。\nここに train と evaluate specs を伴う input_fn を使用する例があります。\ninput_fn と train/eval specs を作成する",
"# Define the estimator's input_fn\ndef input_fn():\n datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)\n mnist_train, mnist_test = datasets['train'], datasets['test']\n\n BUFFER_SIZE = 10000\n BATCH_SIZE = 64\n\n def scale(image, label):\n image = tf.cast(image, tf.float32)\n image /= 255\n\n return image, label[..., tf.newaxis]\n\n train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n return train_data.repeat()\n\n# Define train & eval specs\ntrain_spec = tf.estimator.TrainSpec(input_fn=input_fn,\n max_steps=STEPS_PER_EPOCH * NUM_EPOCHS)\neval_spec = tf.estimator.EvalSpec(input_fn=input_fn,\n steps=STEPS_PER_EPOCH)\n",
"Keras モデル定義を使用する\nTensorFlow 2.x で Estimator を構築する方法には、いくつかの違いがあります。\nモデルは Keras を使用して定義することを推奨します。次にtf.keras.estimator.model_to_estimatorユーティリティを使用して、モデルを Estimator に変更します。次のコードは Estimator を作成してトレーニングする際に、このユーティリティをどのように使用するかを示します。",
"def make_model():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, 3, activation='relu',\n kernel_regularizer=tf.keras.regularizers.l2(0.02),\n input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(10)\n ])\n\nmodel = make_model()\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nestimator = tf.keras.estimator.model_to_estimator(\n keras_model = model\n)\n\ntf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)",
"注意 : Keras で重み付きメトリクスを作成し、model_to_estimatorを使用してそれらを Estimator API で重み付きメトリクスを変換することはサポートされません。それらのメトリクスは、add_metrics関数を使用して Estimator 仕様で直接作成する必要があります。\nカスタム model_fn を使用する\n保守する必要がある既存のカスタム Estimator model_fn を持つ場合には、model_fnを変換して Keras モデルを使用できるようにすることが可能です。\nしかしながら、互換性の理由から、カスタムmodel_fnは依然として1.x スタイルのグラフモードで動作します。これは eager execution はなく自動制御依存性もないことも意味します。\n注意: 長期的には、特にカスタムの model_fn を使って、tf.estimator から移行することを計画する必要があります。代替の API は tf.keras と tf.distribute です。トレーニングの一部に Estimator を使用する必要がある場合は、tf.keras.estimator.model_to_estimator コンバータを使用して keras.Model から <code>Estimator</code> を作成する必要があります。\n<a name=\"minimal_changes\"></a>\n最小限の変更で model_fn をカスタマイズする\nTensorFlow 2.0 でカスタムmodel_fnを動作させるには、既存のコードの変更を最小限に留めたい場合、optimizersやmetricsなどのtf.compat.v1シンボルを使用することができます。\nカスタムmodel_fnで Keras モデルを使用することは、それをカスタムトレーニングループで使用することに類似しています。\n\nmode引数を基に、training段階を適切に設定します。\nモデルのtrainable_variablesをオプティマイザに明示的に渡します。\n\nしかし、カスタムループと比較して、重要な違いがあります。\n\nModel.lossesを使用する代わりにModel.get_losses_forを使用して損失を抽出します。\nModel.get_updates_forを使用してモデルの更新を抽出します。\n\n注意 : 「更新」は各バッチの後にモデルに適用される必要がある変更です。例えば、layers.BatchNormalizationレイヤーの平均と分散の移動平均などです。\n次のコードはカスタムmodel_fnから Estimator を作成し、これらの懸念事項をすべて示しています。",
"def my_model_fn(features, labels, mode):\n model = make_model()\n\n optimizer = tf.compat.v1.train.AdamOptimizer()\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n training = (mode == tf.estimator.ModeKeys.TRAIN)\n predictions = model(features, training=training)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n reg_losses = model.get_losses_for(None) + model.get_losses_for(features)\n total_loss=loss_fn(labels, predictions) + tf.math.add_n(reg_losses)\n\n accuracy = tf.compat.v1.metrics.accuracy(labels=labels,\n predictions=tf.math.argmax(predictions, axis=1),\n name='acc_op')\n\n update_ops = model.get_updates_for(None) + model.get_updates_for(features)\n minimize_op = optimizer.minimize(\n total_loss,\n var_list=model.trainable_variables,\n global_step=tf.compat.v1.train.get_or_create_global_step())\n train_op = tf.group(minimize_op, update_ops)\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=total_loss,\n train_op=train_op, eval_metric_ops={'accuracy': accuracy})\n\n# Create the Estimator & Train\nestimator = tf.estimator.Estimator(model_fn=my_model_fn)\ntf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)",
"TensorFlow 2.x シンボルでmodel_fnをカスタマイズする\nTensorFlow 1.x シンボルをすべて削除し、カスタムmodel_fn をネイティブの TensorFlow 2.x にアップグレードする場合は、オプティマイザとメトリクスをtf.keras.optimizersとtf.keras.metricsにアップグレードする必要があります。\nカスタムmodel_fnでは、上記の変更に加えて、さらにアップグレードを行う必要があります。\n\nv1.train.Optimizer の代わりに tf.keras.optimizers を使用します。\n損失が呼び出し可能(関数など)な場合は、Optimizer.minimize()を使用してtrain_op/minimize_opを取得します。\ntrain_op/minimize_opを計算するには、\n損失がスカラー損失Tensor(呼び出し不可)の場合は、Optimizer.get_updates()を使用します。返されるリストの最初の要素は目的とするtrain_op/minimize_opです。\n損失が呼び出し可能(関数など)な場合は、Optimizer.minimize()を使用してtrain_op/minimize_opを取得します。\n\n\n評価にはtf.compat.v1.metricsの代わりにtf.keras.metricsを使用します。\n\n上記のmy_model_fnの例では、2.0 シンボルの移行されたコードは次のように表示されます。",
"def my_model_fn(features, labels, mode):\n model = make_model()\n\n training = (mode == tf.estimator.ModeKeys.TRAIN)\n loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n predictions = model(features, training=training)\n\n # Get both the unconditional losses (the None part)\n # and the input-conditional losses (the features part).\n reg_losses = model.get_losses_for(None) + model.get_losses_for(features)\n total_loss=loss_obj(labels, predictions) + tf.math.add_n(reg_losses)\n\n # Upgrade to tf.keras.metrics.\n accuracy_obj = tf.keras.metrics.Accuracy(name='acc_obj')\n accuracy = accuracy_obj.update_state(\n y_true=labels, y_pred=tf.math.argmax(predictions, axis=1))\n\n train_op = None\n if training:\n # Upgrade to tf.keras.optimizers.\n optimizer = tf.keras.optimizers.Adam()\n # Manually assign tf.compat.v1.global_step variable to optimizer.iterations\n # to make tf.compat.v1.train.global_step increased correctly.\n # This assignment is a must for any `tf.train.SessionRunHook` specified in\n # estimator, as SessionRunHooks rely on global step.\n optimizer.iterations = tf.compat.v1.train.get_or_create_global_step()\n # Get both the unconditional updates (the None part)\n # and the input-conditional updates (the features part).\n update_ops = model.get_updates_for(None) + model.get_updates_for(features)\n # Compute the minimize_op.\n minimize_op = optimizer.get_updates(\n total_loss,\n model.trainable_variables)[0]\n train_op = tf.group(minimize_op, *update_ops)\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=total_loss,\n train_op=train_op,\n eval_metric_ops={'Accuracy': accuracy_obj})\n\n# Create the Estimator & Train.\nestimator = tf.estimator.Estimator(model_fn=my_model_fn)\ntf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)",
"事前作成された Estimator\ntf.estimator.DNN*、tf.estimator.Linear*、 tf.estimator.DNNLinearCombined*のファミリーに含まれる事前作成された Estimator は、依然として TensorFlow 2.0 API でもサポートされていますが、一部の引数が変更されています。\n\ninput_layer_partitioner: v2 で削除されました。\nloss_reduction: tf.compat.v1.losses.Reductionの代わりにtf.keras.losses.Reductionに更新されました。デフォルト値もtf.compat.v1.losses.Reduction.SUMからtf.keras.losses.Reduction.SUM_OVER_BATCH_SIZEに変更されています。\noptimizer、dnn_optimizer、linear_optimizer: これらの引数はtf.compat.v1.train.Optimizerの代わりにtf.keras.optimizersに更新されています。\n\n上記の変更を移行するには :\n\nTensorFlow 2.x では配布戦略が自動的に処理するため、input_layer_partitionerの移行は必要ありません。\nloss_reductionについてはtf.keras.losses.Reductionでサポートされるオプションを確認してください。\noptimizer 引数の場合:\n1) optimizer、dnn_optimizer、または linear_optimizer 引数を渡さない場合、または 2) optimizer 引数を string としてコードに指定しない場合、デフォルトで tf.keras.optimizers が使用されるため、何も変更する必要はありません。\noptimizer引数については、optimizer、dnn_optimizer、linear_optimizer引数を渡さない場合、またはoptimizer引数をコード内の内のstringとして指定する場合は、何も変更する必要はありません。デフォルトでtf.keras.optimizersを使用します。それ以外の場合は、tf.compat.v1.train.Optimizerから対応するtf.keras.optimizersに更新する必要があります。\n\n\n\nチェックポイントコンバータ\n<a id=\"checkpoint_converter\"></a>\ntf.keras.optimizersは異なる変数セットを生成してチェックポイントに保存するするため、keras.optimizersへの移行は TensorFlow 1.x を使用して保存されたチェックポイントを壊してしまいます。TensorFlow 2.x への移行後に古いチェックポイントを再利用できるようにするには、チェックポイントコンバータツールをお試しください。",
"! curl -O https://raw.githubusercontent.com/tensorflow/estimator/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py",
"ツールにはヘルプが組み込まれています。",
"! python checkpoint_converter.py -h",
"<a id=\"tensorshape\"></a>\nTensorShape\nこのクラスはtf.compat.v1.Dimensionオブジェクトの代わりにintを保持することにより単純化されました。従って、.value()を呼び出してintを取得する必要はありません。\n個々のtf.compat.v1.Dimensionオブジェクトは依然としてtf.TensorShape.dimsからアクセス可能です。\n以下に TensorFlow 1.x と TensorFlow 2.x 間の違いを示します。",
"# Create a shape and choose an index\ni = 0\nshape = tf.TensorShape([16, None, 256])\nshape",
"TensorFlow 1.x で次を使っていた場合:\npython\nvalue = shape[i].value\nThen do this in TensorFlow 2.x:",
"value = shape[i]\nvalue",
"TensorFlow 1.x で次を使っていた場合:\npython\nfor dim in shape: value = dim.value print(value)\nTensorFlow 2.0 では次のようにします:",
"for value in shape:\n print(value)",
"TensorFlow 1.x で次を使っていた場合(またはその他の次元のメソッドを使用していた場合):\npython\ndim = shape[i] dim.assert_is_compatible_with(other_dim)\nTensorFlow 2.0 では次のようにします:",
"other_dim = 16\nDimension = tf.compat.v1.Dimension\n\nif shape.rank is None:\n dim = Dimension(None)\nelse:\n dim = shape.dims[i]\ndim.is_compatible_with(other_dim) # or any other dimension method\n\nshape = tf.TensorShape(None)\n\nif shape:\n dim = shape.dims[i]\n dim.is_compatible_with(other_dim) # or any other dimension method",
"tf.TensorShape のブール型の値は、階数がわかっている場合は Trueで、そうでない場合はFalseです。",
"print(bool(tf.TensorShape([]))) # Scalar\nprint(bool(tf.TensorShape([0]))) # 0-length vector\nprint(bool(tf.TensorShape([1]))) # 1-length vector\nprint(bool(tf.TensorShape([None]))) # Unknown-length vector\nprint(bool(tf.TensorShape([1, 10, 100]))) # 3D tensor\nprint(bool(tf.TensorShape([None, None, None]))) # 3D tensor with no known dimensions\nprint()\nprint(bool(tf.TensorShape(None))) # A tensor with unknown rank.",
"その他の変更点\n\n\ntf.colocate_withを削除する : TensorFlow のデバイス配置アルゴリズムが大幅に改善されたため、これはもう必要ありません。削除したことによってパフォーマンスが低下する場合には、バグを報告してください。\n\n\nv1.ConfigProtoの使用をtf.configの同等の関数に置き換える。\n\n\nまとめ\n全体のプロセスは次のとおりです。\n\nアップグレードスクリプトを実行する。\ncontrib シンボルを除去する。\nモデルをオブジェクト指向スタイル (Keras) に切り替える。\n可能なところでは tf.kerasまたはtf.estimatorトレーニングと評価ループを使用する。\nそうでない場合はカスタムループを使用してよいが、セッションとコレクションを回避すること。\n\nコードを慣用的な TensorFlow 2.0 に変換するには少し作業を要しますが、変更するごとに次のような結果が得られます。\n\nコード行が減少する。\n明瞭さと簡略性が向上する。\nデバッグが容易になる。"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
INM-6/Python-Module-of-the-Week
|
session11_Tensorflow/TensorFlow by example.ipynb
|
mit
|
[
"TensorFlow by example\nhttps://github.com/INM-6/Python-Module-of-the-Week",
"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nplt.xkcd()\n# In case there are font warnings: \n# https://github.com/ipython/xkcd-font/blob/master/xkcd/build/xkcd.otf\n\n# Fixing the seeds to prevent things from breaking unexpectedly...\nnp.random.seed(42)\ntf.set_random_seed(42)",
"Part 1: A toy example\nSetup\nLet's consider a toy 'neural network' consisting of two 'neurons' that map an input $x$ to an output $z$:\n$$\ny = f(w_1 x + b_1)\n$$\n$$\nz = f(w_2 y + b_2)\n$$\nHere, $y$ is a vector (i.e. we have many 'hidden neurons'):\n<img src=\"img/sketch.png\" alt=\"sketch\" width=\"200\" align=\"middle\"/>\nFor the nonlinearity, we use a sigmoid:\n$$f(x) = 1 / (1 + e^{-x})$$\nWe want out network to approximate an arbitrary function $t(x)$. In principle, the above network should be able to do this with any precision according to the universal approximation theorem - if we use enough hidden neurons.\nFor the loss, we use a simple quadratic loss: \n$$L = [z - t(x)]^2$$\nComputational Graph\nIn TensorFlow, we have to define the computational graph before doing any calculations. For our setup, we have this graph (created using TensorBoard):\n<img src=\"img/graph.png\" alt=\"drawing\" width=\"700\" align=\"middle\"/>\nDisclaimer: With eager execution the above is not strictly true anymore.\nTensorFlow implementation",
"num_hidden = 5 # number of hidden neurons\nx_linspace = np.linspace(0, 1, 100) # some input values\n\n# A placeholder for the input (batch size unknown)\nx = tf.placeholder(tf.float32, [None, 1], name='x')\n\n# Our hidden neurons\nw1 = tf.Variable(tf.random_normal([1, num_hidden], stddev=.1), name='w1')\nb1 = tf.Variable(tf.random_normal([num_hidden], stddev=.1), name='b1')\ny = tf.sigmoid(tf.add(tf.matmul(x, w1), b1))\n\n# The output neuron\nw2 = tf.Variable(tf.random_normal([num_hidden, 1], stddev=.1), name='w2')\nb2 = tf.Variable(tf.random_normal([1], stddev=.1), name='b2')\nz = tf.sigmoid(tf.add(tf.matmul(y, w2), b2))\n\n# Execute the graph\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n z_out = sess.run(z, feed_dict={x: x_linspace[:, np.newaxis]})",
"Let's have a look",
"def target_funct(x):\n return 4*(x - 0.5)**2 # our target function\n # return 0.5*np.sin(4*np.pi*x) + 0.5 # another target function\n\n# Plot network output against target function\nplt.plot(x_linspace, z_out, label='NETWORK')\nplt.plot(x_linspace, target_funct(x_linspace), label='TARGET')\nplt.legend()",
"Backprop\nNow we want to optimize. To do so, we need the derivatives of the loss $L = [z - t(x)]^2$ wrt. the parameters $w_1, b_1, w_2, b_2$. Let's start with the bias of our output neuron $z = f(w_2 y + b_2)$:\n$$\n\\frac{dL}{db_2} = \\frac{\\partial L}{\\partial z}\\frac{dz}{db_2} = \\frac{\\partial L}{\\partial z} f^\\prime(w_2 y + b_2)\n$$\nwith $\\frac{\\partial L}{\\partial z} = 2[z - t(x)]$. From the forward pass we know $y$ and $z$ so the expression can be evaluated. Now the weights:\n$$\n\\frac{dL}{dw_2} = \\frac{\\partial L}{\\partial z}\\frac{dz}{dw_2} = \\frac{dL}{db_2} y\n$$\nNeat - we don't need to compute anything here because we know $\\frac{dL}{db_2}$ already! Finally:\n$$\n\\frac{dL}{db_1} = \\frac{\\partial L}{\\partial z}\\frac{\\partial z}{\\partial y}\\frac{dy}{db_1} = \\frac{dL}{db_2} w_2f^\\prime(w_1 x + b_1)\n$$\n$$\n\\frac{dL}{dw_1} = \\frac{\\partial L}{\\partial z}\\frac{\\partial z}{\\partial y}\\frac{dy}{dw_1} = \\frac{dL}{db_1} x\n$$\nOf course with more layers we could play this game ad infinitum. The nice part here is that many operations can be parallelized.\nOptimization\nThat's all there is - backprop is just the chain rule and a clever strategy to compute it. A few notes:\n* The name is pretty obvious: We go backwards along the computational graph here.\n* We see a problem on the horizon: Any vanishing term in the products above make them vanish entirely,\n * e.g. when $f^\\prime(\\dots) \\approx 0$ for the saturated sigmoid -> ReLu's, ELU's ...\n * e.g. when the weights are initialized too small or too big -> Xavier init, ...\n* The main purpose (afaik) of TensorFlow is to automate the calculation of the derivatives and distribute them optimally.\nThe dénouement is the update of the parameters along the gradient, e.g.\n$$\nw_1 = w_1 - \\lambda \\frac{dL}{dw_1}\n$$\nwhere $\\lambda$ is the learning rate. Of course, there plenty of better schemes than this vanilla gradient descent. Again, a few notes:\n* The loss function is not convex, i.e. there are in most cases plenty of local optima / saddle points\n* Because it is non-convex, a simple gradient descent would get stuck in a local minimum -> SGD, Adam, ...\n* The learning rate is crucial: If it is too large you get lost (could even diverge), if it is too small you never arrive -> Grad Student Descent\nTraining the model",
"learning_rate = 1e-2 # learning rate\nepochs = int(3e3) # number of epochs\nbatch_size = 128 # batch size\n\n# target and loss\ntarget = tf.placeholder(tf.float32, [None, 1], name='target')\nloss = tf.reduce_mean(tf.square(z - target))\n\n# optimizer\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n loss_storage = np.empty(epochs)\n # iterate 'epochs'\n for epoch in range(epochs):\n # generate samples\n batch_x = np.random.rand(batch_size)\n batch_target = target_funct(batch_x)\n # calculate loss and execute train_op\n _, loss_storage[epoch] = sess.run(\n [train_op, loss], feed_dict={\n x: batch_x[:, np.newaxis], \n target: batch_target[:, np.newaxis]})\n # generate prediction\n z_out = sess.run(z, feed_dict={x: x_linspace[:, np.newaxis]})",
"Loss",
"plt.plot(loss_storage, label='LOSS')\nplt.legend(loc=0)",
"Let's have a look",
"plt.plot(x_linspace, z_out, label='NETWORK')\nplt.plot(x_linspace, target_funct(x_linspace), label='TARGET')\nplt.legend(loc=0)",
"Part 2: MNIST - what else?",
"# clear the tensorflow graph\ntf.reset_default_graph()",
"Setup\nEssentially, we use the same architecture as above. Differences:\n* $28 \\times 28 = 784$ inputs instead of a single one\n* $10$ output neurons for the ten digits\n* $300$ hidden neurons\n* relu (hidden) and softmax (output) nonlinearity\n* We use another optimizer\n* We use another loss function (cross entropy)\nBasically this is a concise code-only example without all the annoying text.\nLet's start by getting the data:",
"# tensorflow.examples.tutorials.mnist is deprecated\n# Because it is useful we just suppress the warnings...\nold_tf_verbosity = tf.logging.get_verbosity()\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# ... and restore the warnings again\ntf.logging.set_verbosity(old_tf_verbosity)",
"Building the model",
"# input placeholder - for 28 x 28 pixels = 784\nx = tf.placeholder(tf.float32, [None, 784])\n# label placeholder - 10 digits\ny = tf.placeholder(tf.float32, [None, 10])\n\n# hidden layer\nw1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='w1')\nb1 = tf.Variable(tf.random_normal([300]), name='b1')\nhidden_out = tf.nn.relu(tf.add(tf.matmul(x, w1), b1))\n\n# output layer\nw2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='w2')\nb2 = tf.Variable(tf.random_normal([10]), name='b2')\ny_ = tf.nn.softmax(tf.add(tf.matmul(hidden_out, w2), b2))",
"Setting up the optimization",
"learning_rate = 0.5\n\n# loss function\ny_clipped = tf.clip_by_value(y_, 1e-10, 0.9999999) # needed for logarithm\ncross_entropy = -tf.reduce_mean(tf.reduce_sum(\n y * tf.log(y_clipped) + (1 - y) * tf.log(1 - y_clipped), axis=1))\n\n# optimizer\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(cross_entropy)\n\n# accuracy\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))",
"Running it",
"epochs = 10\nbatch_size = 128\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_batch = int(len(mnist.train.labels) / batch_size)\n loss_storage = np.empty(epochs)\n for epoch in range(epochs):\n avg_loss = 0.\n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)\n _, batch_loss = sess.run([train_op, cross_entropy], \n feed_dict={x: batch_x, y: batch_y})\n avg_loss += batch_loss / total_batch\n loss_storage[epoch] = avg_loss\n final_acc = sess.run(accuracy, feed_dict={\n x: mnist.test.images, y: mnist.test.labels})",
"Let's have a look",
"plt.plot(loss_storage, label='LOSS')\nplt.legend(loc=0)\n\nprint(\"Final accuracy: {:.1f}%\".format(100*final_acc))",
"References\n\nStrongly influenced by this Python TensorFlow Tutorial\nA high-bias, low-variance introduction to Machine Learning for physicists \nThe CS231n Stanford class\nThe Neural Networks and Deep Learning online book"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/hammoz-consortium/cmip6/models/sandbox-3/ocnbgchem.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Ocnbgchem\nMIP Era: CMIP6\nInstitute: HAMMOZ-CONSORTIUM\nSource ID: SANDBOX-3\nTopic: Ocnbgchem\nSub-Topics: Tracers. \nProperties: 65 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:03\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-3', 'ocnbgchem')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties\n2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport\n3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks\n4. Key Properties --> Transport Scheme\n5. Key Properties --> Boundary Forcing\n6. Key Properties --> Gas Exchange\n7. Key Properties --> Carbon Chemistry\n8. Tracers\n9. Tracers --> Ecosystem\n10. Tracers --> Ecosystem --> Phytoplankton\n11. Tracers --> Ecosystem --> Zooplankton\n12. Tracers --> Disolved Organic Matter\n13. Tracers --> Particules\n14. Tracers --> Dic Alkalinity \n1. Key Properties\nOcean Biogeochemistry key properties\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of ocean biogeochemistry model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of ocean biogeochemistry model code (PISCES 2.0,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.3. Model Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of ocean biogeochemistry model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Geochemical\" \n# \"NPZD\" \n# \"PFT\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.4. Elemental Stoichiometry\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDescribe elemental stoichiometry (fixed, variable, mix of the two)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Fixed\" \n# \"Variable\" \n# \"Mix of both\" \n# TODO - please enter value(s)\n",
"1.5. Elemental Stoichiometry Details\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe which elements have fixed/variable stoichiometry",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.6. Prognostic Variables\nIs Required: TRUE Type: STRING Cardinality: 1.N\nList of all prognostic tracer variables in the ocean biogeochemistry component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.7. Diagnostic Variables\nIs Required: TRUE Type: STRING Cardinality: 1.N\nList of all diagnotic tracer variables in the ocean biogeochemistry component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.8. Damping\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe any tracer damping used (such as artificial correction or relaxation to climatology,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.damping') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport\nTime stepping method for passive tracers transport in ocean biogeochemistry\n2.1. Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime stepping framework for passive tracers",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"use ocean model transport time step\" \n# \"use specific time step\" \n# TODO - please enter value(s)\n",
"2.2. Timestep If Not From Ocean\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nTime step for passive tracers (if different from ocean)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks\nTime stepping framework for biology sources and sinks in ocean biogeochemistry\n3.1. Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime stepping framework for biology sources and sinks",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"use ocean model transport time step\" \n# \"use specific time step\" \n# TODO - please enter value(s)\n",
"3.2. Timestep If Not From Ocean\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nTime step for biology sources and sinks (if different from ocean)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"4. Key Properties --> Transport Scheme\nTransport scheme in ocean biogeochemistry\n4.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of transport scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Offline\" \n# \"Online\" \n# TODO - please enter value(s)\n",
"4.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTransport scheme used",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Use that of ocean model\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"4.3. Use Different Scheme\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDecribe transport scheme if different than that of ocean model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5. Key Properties --> Boundary Forcing\nProperties of biogeochemistry boundary forcing\n5.1. Atmospheric Deposition\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDescribe how atmospheric deposition is modeled",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"from file (climatology)\" \n# \"from file (interannual variations)\" \n# \"from Atmospheric Chemistry model\" \n# TODO - please enter value(s)\n",
"5.2. River Input\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDescribe how river input is modeled",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"from file (climatology)\" \n# \"from file (interannual variations)\" \n# \"from Land Surface model\" \n# TODO - please enter value(s)\n",
"5.3. Sediments From Boundary Conditions\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList which sediments are speficied from boundary condition",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.4. Sediments From Explicit Model\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList which sediments are speficied from explicit sediment model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6. Key Properties --> Gas Exchange\n*Properties of gas exchange in ocean biogeochemistry *\n6.1. CO2 Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs CO2 gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.2. CO2 Exchange Type\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nDescribe CO2 gas exchange",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.3. O2 Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs O2 gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.4. O2 Exchange Type\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nDescribe O2 gas exchange",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.5. DMS Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs DMS gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.6. DMS Exchange Type\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify DMS gas exchange scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.7. N2 Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs N2 gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.8. N2 Exchange Type\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify N2 gas exchange scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.9. N2O Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs N2O gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.10. N2O Exchange Type\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify N2O gas exchange scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.11. CFC11 Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs CFC11 gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.12. CFC11 Exchange Type\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify CFC11 gas exchange scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.13. CFC12 Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs CFC12 gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.14. CFC12 Exchange Type\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify CFC12 gas exchange scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.15. SF6 Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs SF6 gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.16. SF6 Exchange Type\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify SF6 gas exchange scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.17. 13CO2 Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs 13CO2 gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.18. 13CO2 Exchange Type\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify 13CO2 gas exchange scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.19. 14CO2 Exchange Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs 14CO2 gas exchange modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.20. 14CO2 Exchange Type\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify 14CO2 gas exchange scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.21. Other Gases\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify any other gas exchange",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7. Key Properties --> Carbon Chemistry\nProperties of carbon chemistry biogeochemistry\n7.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDescribe how carbon chemistry is modeled",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other protocol\" \n# TODO - please enter value(s)\n",
"7.2. PH Scale\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nIf NOT OMIP protocol, describe pH scale.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea water\" \n# \"Free\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"7.3. Constants If Not OMIP\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf NOT OMIP protocol, list carbon chemistry constants.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Tracers\nOcean biogeochemistry tracers\n8.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of tracers in ocean biogeochemistry",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Sulfur Cycle Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs sulfur cycle modeled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"8.3. Nutrients Present\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList nutrient species present in ocean biogeochemistry model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Nitrogen (N)\" \n# \"Phosphorous (P)\" \n# \"Silicium (S)\" \n# \"Iron (Fe)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.4. Nitrous Species If N\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nIf nitrogen present, list nitrous species.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Nitrates (NO3)\" \n# \"Amonium (NH4)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.5. Nitrous Processes If N\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nIf nitrogen present, list nitrous processes.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dentrification\" \n# \"N fixation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9. Tracers --> Ecosystem\nEcosystem properties in ocean biogeochemistry\n9.1. Upper Trophic Levels Definition\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDefinition of upper trophic level (e.g. based on size) ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.2. Upper Trophic Levels Treatment\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDefine how upper trophic level are treated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Tracers --> Ecosystem --> Phytoplankton\nPhytoplankton properties in ocean biogeochemistry\n10.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of phytoplankton",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Generic\" \n# \"PFT including size based (specify both below)\" \n# \"Size based only (specify below)\" \n# \"PFT only (specify below)\" \n# TODO - please enter value(s)\n",
"10.2. Pft\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nPhytoplankton functional types (PFT) (if applicable)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diatoms\" \n# \"Nfixers\" \n# \"Calcifiers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"10.3. Size Classes\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nPhytoplankton size classes (if applicable)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Microphytoplankton\" \n# \"Nanophytoplankton\" \n# \"Picophytoplankton\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11. Tracers --> Ecosystem --> Zooplankton\nZooplankton properties in ocean biogeochemistry\n11.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of zooplankton",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Generic\" \n# \"Size based (specify below)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11.2. Size Classes\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nZooplankton size classes (if applicable)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Microzooplankton\" \n# \"Mesozooplankton\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12. Tracers --> Disolved Organic Matter\nDisolved organic matter properties in ocean biogeochemistry\n12.1. Bacteria Present\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there bacteria representation ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"12.2. Lability\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDescribe treatment of lability in dissolved organic matter",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Labile\" \n# \"Semi-labile\" \n# \"Refractory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13. Tracers --> Particules\nParticulate carbon properties in ocean biogeochemistry\n13.1. Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is particulate carbon represented in ocean biogeochemistry?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diagnostic\" \n# \"Diagnostic (Martin profile)\" \n# \"Diagnostic (Balast)\" \n# \"Prognostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Types If Prognostic\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nIf prognostic, type(s) of particulate matter taken into account",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"POC\" \n# \"PIC (calcite)\" \n# \"PIC (aragonite\" \n# \"BSi\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.3. Size If Prognostic\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nIf prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"No size spectrum used\" \n# \"Full size spectrum\" \n# \"Discrete size classes (specify which below)\" \n# TODO - please enter value(s)\n",
"13.4. Size If Discrete\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf prognostic and discrete size, describe which size classes are used",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"13.5. Sinking Speed If Prognostic\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nIf prognostic, method for calculation of sinking speed of particules",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Function of particule size\" \n# \"Function of particule type (balast)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14. Tracers --> Dic Alkalinity\nDIC and alkalinity properties in ocean biogeochemistry\n14.1. Carbon Isotopes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nWhich carbon isotopes are modelled (C13, C14)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"C13\" \n# \"C14)\" \n# TODO - please enter value(s)\n",
"14.2. Abiotic Carbon\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs abiotic carbon modelled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"14.3. Alkalinity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is alkalinity modelled ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Prognostic\" \n# \"Diagnostic)\" \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mrustl/flopy
|
examples/Notebooks/flopy3_multi-component_SSM.ipynb
|
bsd-3-clause
|
[
"FloPy\nUsing FloPy to simplify the use of the MT3DMS SSM package\nA multi-component transport demonstration",
"import os\nimport numpy as np\nfrom flopy import modflow, mt3d, seawat",
"First, we will create a simple model structure",
"nlay, nrow, ncol = 10, 10, 10\nperlen = np.zeros((10), dtype=np.float) + 10\nnper = len(perlen)\n\nibound = np.ones((nlay,nrow,ncol), dtype=np.int)\n\nbotm = np.arange(-1,-11,-1)\ntop = 0.",
"Create the MODFLOW packages",
"model_ws = 'data'\nmodelname = 'ssmex'\nmf = modflow.Modflow(modelname, model_ws=model_ws)\ndis = modflow.ModflowDis(mf, nlay=nlay, nrow=nrow, ncol=ncol, \n perlen=perlen, nper=nper, botm=botm, top=top, \n steady=False)\nbas = modflow.ModflowBas(mf, ibound=ibound, strt=top)\nlpf = modflow.ModflowLpf(mf, hk=100, vka=100, ss=0.00001, sy=0.1)\noc = modflow.ModflowOc(mf)\npcg = modflow.ModflowPcg(mf)\nrch = modflow.ModflowRch(mf)",
"We'll track the cell locations for the SSM data using the MODFLOW boundary conditions.\nGet a dictionary (dict) that has the SSM itype for each of the boundary types.",
"itype = mt3d.Mt3dSsm.itype_dict()\nprint(itype)\nprint(mt3d.Mt3dSsm.get_default_dtype())\nssm_data = {}",
"Add a general head boundary (ghb). The general head boundary head (bhead) is 0.1 for the first 5 stress periods with a component 1 (comp_1) concentration of 1.0 and a component 2 (comp_2) concentration of 100.0. Then bhead is increased to 0.25 and comp_1 concentration is reduced to 0.5 and comp_2 concentration is increased to 200.0",
"ghb_data = {}\nprint(modflow.ModflowGhb.get_default_dtype())\nghb_data[0] = [(4, 4, 4, 0.1, 1.5)]\nssm_data[0] = [(4, 4, 4, 1.0, itype['GHB'], 1.0, 100.0)]\nghb_data[5] = [(4, 4, 4, 0.25, 1.5)]\nssm_data[5] = [(4, 4, 4, 0.5, itype['GHB'], 0.5, 200.0)]\n\nfor k in range(nlay):\n for i in range(nrow):\n ghb_data[0].append((k, i, 0, 0.0, 100.0))\n ssm_data[0].append((k, i, 0, 0.0, itype['GHB'], 0.0, 0.0))\n \nghb_data[5] = [(4, 4, 4, 0.25, 1.5)]\nssm_data[5] = [(4, 4, 4, 0.5, itype['GHB'], 0.5, 200.0)]\nfor k in range(nlay):\n for i in range(nrow):\n ghb_data[5].append((k, i, 0, -0.5, 100.0))\n ssm_data[5].append((k, i, 0, 0.0, itype['GHB'], 0.0, 0.0))",
"Add an injection well. The injection rate (flux) is 10.0 with a comp_1 concentration of 10.0 and a comp_2 concentration of 0.0 for all stress periods. WARNING: since we changed the SSM data in stress period 6, we need to add the well to the ssm_data for stress period 6.",
"wel_data = {}\nprint(modflow.ModflowWel.get_default_dtype())\nwel_data[0] = [(0, 4, 8, 10.0)]\nssm_data[0].append((0, 4, 8, 10.0, itype['WEL'], 10.0, 0.0))\nssm_data[5].append((0, 4, 8, 10.0, itype['WEL'], 10.0, 0.0))",
"Add the GHB and WEL packages to the mf MODFLOW object instance.",
"ghb = modflow.ModflowGhb(mf, stress_period_data=ghb_data)\nwel = modflow.ModflowWel(mf, stress_period_data=wel_data)",
"Create the MT3DMS packages",
"mt = mt3d.Mt3dms(modflowmodel=mf, modelname=modelname, model_ws=model_ws)\nbtn = mt3d.Mt3dBtn(mt, sconc=0, ncomp=2, sconc2=50.0)\nadv = mt3d.Mt3dAdv(mt)\nssm = mt3d.Mt3dSsm(mt, stress_period_data=ssm_data)\ngcg = mt3d.Mt3dGcg(mt)",
"Let's verify that stress_period_data has the right dtype",
"print(ssm.stress_period_data.dtype)",
"Create the SEAWAT packages",
"swt = seawat.Seawat(modflowmodel=mf, mt3dmodel=mt, \n modelname=modelname, namefile_ext='nam_swt', model_ws=model_ws)\nvdf = seawat.SeawatVdf(swt, mtdnconc=0, iwtable=0, indense=-1)\n\nmf.write_input()\nmt.write_input()\nswt.write_input()",
"And finally, modify the vdf package to fix indense.",
"fname = modelname + '.vdf'\nf = open(os.path.join(model_ws, fname),'r')\nlines = f.readlines()\nf.close()\nf = open(os.path.join(model_ws, fname),'w')\nfor line in lines:\n f.write(line)\nfor kper in range(nper):\n f.write(\"-1\\n\")\nf.close() \n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/production_ml/labs/tfdv_advanced_taxi.ipynb
|
apache-2.0
|
[
"TensorFlow Data Validation (Advanced)\nLearning Objectives\n\nInstall TFDV\nCompute and visualize statistics\nInfer a schema\nCheck evaluation data for errors\nCheck for evaluation anomalies and fix it\nCheck for drift and skew\nFreeze the schema\n\nIntroduction\nThis notebook illustrates how TensorFlow Data Validation (TFDV) can be used to investigate and visualize your dataset. That includes looking at descriptive statistics, inferring a schema, checking for and fixing anomalies, and checking for drift and skew in our dataset. It's important to understand your dataset's characteristics, including how it might change over time in your production pipeline. It's also important to look for anomalies in your data, and to compare your training, evaluation, and serving datasets to make sure that they're consistent.\nWe'll use data from the Taxi Trips dataset released by the City of Chicago.\nNote: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.\nRead more about the dataset in Google BigQuery. Explore the full dataset in the BigQuery UI.\nKey Point: As a modeler and developer, think about how this data is used and the potential benefits and harm a model's predictions can cause. A model like this could reinforce societal biases and disparities. Is a feature relevant to the problem you want to solve or will it introduce bias? For more information, read about ML fairness.\nEach learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the Solution Notebook for reference. \nThe columns in the dataset are:\n<table>\n<tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr>\n\n<tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr>\n<tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr>\n<tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr>\n<tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr>\n<tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr>\n</table>\n\nInstall Libraries",
"!pip install pyarrow==5.0.0\n!pip install numpy==1.19.2\n!pip install tensorflow-data-validation",
"Restart the kernel (Kernel > Restart kernel > Restart).\nRe-run the above cell and proceed further.\nNote: Please ignore any incompatibility warnings and errors.\nInstall TFDV\nThis will pull in all the dependencies, which will take a minute. Please ignore the warnings or errors regarding incompatible dependency versions.",
"import pandas as pd\nimport tensorflow_data_validation as tfdv\nimport sys\nimport warnings\nwarnings.filterwarnings('ignore')\n\nprint('Installing TensorFlow Data Validation')\n!pip install -q tensorflow_data_validation[visualization]",
"Load the Files\nWe will download our dataset from Google Cloud Storage.",
"import os\nimport tempfile, urllib, zipfile\n\n# Set up some globals for our file paths\nBASE_DIR = tempfile.mkdtemp()\nDATA_DIR = os.path.join(BASE_DIR, 'data')\nOUTPUT_DIR = os.path.join(BASE_DIR, 'chicago_taxi_output')\nTRAIN_DATA = os.path.join(DATA_DIR, 'train', 'data.csv')\nEVAL_DATA = os.path.join(DATA_DIR, 'eval', 'data.csv')\nSERVING_DATA = os.path.join(DATA_DIR, 'serving', 'data.csv')\n\n# Download the zip file from GCP and unzip it\nzip, headers = urllib.request.urlretrieve('https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/chicago_data.zip')\nzipfile.ZipFile(zip).extractall(BASE_DIR)\nzipfile.ZipFile(zip).close()\n\nprint(\"Here's what we downloaded:\")\n!ls -R {os.path.join(BASE_DIR, 'data')}",
"Check the version",
"import tensorflow_data_validation as tfdv\nprint('TFDV version: {}'.format(tfdv.version.__version__))",
"Compute and visualize statistics\nFirst we'll use tfdv.generate_statistics_from_csv to compute statistics for our training data. (ignore the snappy warnings)\nTFDV can compute descriptive statistics that provide a quick overview of the data in terms of the features that are present and the shapes of their value distributions.\nInternally, TFDV uses Apache Beam's data-parallel processing framework to scale the computation of statistics over large datasets. For applications that wish to integrate deeper with TFDV (e.g., attach statistics generation at the end of a data-generation pipeline), the API also exposes a Beam PTransform for statistics generation.",
"# Compute data statistics from CSV files.\n# TODO: Your code goes here\n",
"Now let's use tfdv.visualize_statistics, which uses Facets to create a succinct visualization of our training data:\n\nNotice that numeric features and categorical features are visualized separately, and that charts are displayed showing the distributions for each feature.\nNotice that features with missing or zero values display a percentage in red as a visual indicator that there may be issues with examples in those features. The percentage is the percentage of examples that have missing or zero values for that feature.\nNotice that there are no examples with values for pickup_census_tract. This is an opportunity for dimensionality reduction!\nTry clicking \"expand\" above the charts to change the display\nTry hovering over bars in the charts to display bucket ranges and counts\nTry switching between the log and linear scales, and notice how the log scale reveals much more detail about the payment_type categorical feature\nTry selecting \"quantiles\" from the \"Chart to show\" menu, and hover over the markers to show the quantile percentages",
"# Visualize the input statistics using Facets.\n# TODO: Your code goes here\n",
"Infer a schema\nNow let's use tfdv.infer_schema to create a schema for our data. A schema defines constraints for the data that are relevant for ML. Example constraints include the data type of each feature, whether it's numerical or categorical, or the frequency of its presence in the data. For categorical features the schema also defines the domain - the list of acceptable values. Since writing a schema can be a tedious task, especially for datasets with lots of features, TFDV provides a method to generate an initial version of the schema based on the descriptive statistics.\nGetting the schema right is important because the rest of our production pipeline will be relying on the schema that TFDV generates to be correct. The schema also provides documentation for the data, and so is useful when different developers work on the same data. Let's use tfdv.display_schema to display the inferred schema so that we can review it.",
"# Infers schema from the input statistics.\n# TODO: Your code goes here\ntfdv.display_schema(schema=schema)",
"Check evaluation data for errors\nSo far we've only been looking at the training data. It's important that our evaluation data is consistent with our training data, including that it uses the same schema. It's also important that the evaluation data includes examples of roughly the same ranges of values for our numerical features as our training data, so that our coverage of the loss surface during evaluation is roughly the same as during training. The same is true for categorical features. Otherwise, we may have training issues that are not identified during evaluation, because we didn't evaluate part of our loss surface.\n\nNotice that each feature now includes statistics for both the training and evaluation datasets.\nNotice that the charts now have both the training and evaluation datasets overlaid, making it easy to compare them.\nNotice that the charts now include a percentages view, which can be combined with log or the default linear scales.\nNotice that the mean and median for trip_miles are different for the training versus the evaluation datasets. Will that cause problems?\nWow, the max tips is very different for the training versus the evaluation datasets. Will that cause problems?\nClick expand on the Numeric Features chart, and select the log scale. Review the trip_seconds feature, and notice the difference in the max. Will evaluation miss parts of the loss surface?",
"# Compute stats for evaluation data\neval_stats = tfdv.generate_statistics_from_csv(data_location=EVAL_DATA)\n\n# Compare evaluation data with training data\ntfdv.visualize_statistics(lhs_statistics=eval_stats, rhs_statistics=train_stats,\n lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET')",
"Check for evaluation anomalies\nDoes our evaluation dataset match the schema from our training dataset? This is especially important for categorical features, where we want to identify the range of acceptable values.\nKey Point: What would happen if we tried to evaluate using data with categorical feature values that were not in our training dataset? What about numeric features that are outside the ranges in our training dataset?",
"# Check eval data for errors by validating the eval data stats using the previously inferred schema.\n# TODO: Your code goes here\ntfdv.display_anomalies(anomalies)",
"Fix evaluation anomalies in the schema\nOops! It looks like we have some new values for company in our evaluation data, that we didn't have in our training data. We also have a new value for payment_type. These should be considered anomalies, but what we decide to do about them depends on our domain knowledge of the data. If an anomaly truly indicates a data error, then the underlying data should be fixed. Otherwise, we can simply update the schema to include the values in the eval dataset.\nKey Point: How would our evaluation results be affected if we did not fix these problems?\nUnless we change our evaluation dataset we can't fix everything, but we can fix things in the schema that we're comfortable accepting. That includes relaxing our view of what is and what is not an anomaly for particular features, as well as updating our schema to include missing values for categorical features. TFDV has enabled us to discover what we need to fix.\nLet's make those fixes now, and then review one more time.",
"# Relax the minimum fraction of values that must come from the domain for feature company.\ncompany = tfdv.get_feature(schema, 'company')\ncompany.distribution_constraints.min_domain_mass = 0.9\n\n# Add new value to the domain of feature payment_type.\npayment_type_domain = tfdv.get_domain(schema, 'payment_type')\npayment_type_domain.value.append('Prcard')\n\n# Validate eval stats after updating the schema \n# TODO: Your code goes here\ntfdv.display_anomalies(updated_anomalies)",
"Hey, look at that! We verified that the training and evaluation data are now consistent! Thanks TFDV ;)\nSchema Environments\nWe also split off a 'serving' dataset for this example, so we should check that too. By default all datasets in a pipeline should use the same schema, but there are often exceptions. For example, in supervised learning we need to include labels in our dataset, but when we serve the model for inference the labels will not be included. In some cases introducing slight schema variations is necessary.\nEnvironments can be used to express such requirements. In particular, features in schema can be associated with a set of environments using default_environment, in_environment and not_in_environment.\nFor example, in this dataset the tips feature is included as the label for training, but it's missing in the serving data. Without environment specified, it will show up as an anomaly.",
"serving_stats = tfdv.generate_statistics_from_csv(SERVING_DATA)\nserving_anomalies = tfdv.validate_statistics(serving_stats, schema)\n\ntfdv.display_anomalies(serving_anomalies)",
"We'll deal with the tips feature below. We also have an INT value in our trip seconds, where our schema expected a FLOAT. By making us aware of that difference, TFDV helps uncover inconsistencies in the way the data is generated for training and serving. It's very easy to be unaware of problems like that until model performance suffers, sometimes catastrophically. It may or may not be a significant issue, but in any case this should be cause for further investigation.\nIn this case, we can safely convert INT values to FLOATs, so we want to tell TFDV to use our schema to infer the type. Let's do that now.",
"options = tfdv.StatsOptions(schema=schema, infer_type_from_schema=True)\nserving_stats = tfdv.generate_statistics_from_csv(SERVING_DATA, stats_options=options)\nserving_anomalies = tfdv.validate_statistics(serving_stats, schema)\n\ntfdv.display_anomalies(serving_anomalies)",
"Now we just have the tips feature (which is our label) showing up as an anomaly ('Column dropped'). Of course we don't expect to have labels in our serving data, so let's tell TFDV to ignore that.",
"# All features are by default in both TRAINING and SERVING environments.\nschema.default_environment.append('TRAINING')\nschema.default_environment.append('SERVING')\n\n# Specify that 'tips' feature is not in SERVING environment.\ntfdv.get_feature(schema, 'tips').not_in_environment.append('SERVING')\n\nserving_anomalies_with_env = tfdv.validate_statistics(\n serving_stats, schema, environment='SERVING')\n\ntfdv.display_anomalies(serving_anomalies_with_env)",
"Check for drift and skew\nIn addition to checking whether a dataset conforms to the expectations set in the schema, TFDV also provides functionalities to detect drift and skew. TFDV performs this check by comparing the statistics of the different datasets based on the drift/skew comparators specified in the schema.\nDrift\nDrift detection is supported for categorical features and between consecutive spans of data (i.e., between span N and span N+1), such as between different days of training data. We express drift in terms of L-infinity distance, and you can set the threshold distance so that you receive warnings when the drift is higher than is acceptable. Setting the correct distance is typically an iterative process requiring domain knowledge and experimentation.\nSkew\nTFDV can detect three different kinds of skew in your data - schema skew, feature skew, and distribution skew.\nSchema Skew\nSchema skew occurs when the training and serving data do not conform to the same schema. Both training and serving data are expected to adhere to the same schema. Any expected deviations between the two (such as the label feature being only present in the training data but not in serving) should be specified through environments field in the schema.\nFeature Skew\nFeature skew occurs when the feature values that a model trains on are different from the feature values that it sees at serving time. For example, this can happen when:\n\nA data source that provides some feature values is modified between training and serving time\nThere is different logic for generating features between training and serving. For example, if you apply some transformation only in one of the two code paths.\n\nDistribution Skew\nDistribution skew occurs when the distribution of the training dataset is significantly different from the distribution of the serving dataset. One of the key causes for distribution skew is using different code or different data sources to generate the training dataset. Another reason is a faulty sampling mechanism that chooses a non-representative subsample of the serving data to train on.",
"# Add skew comparator for 'payment_type' feature.\npayment_type = tfdv.get_feature(schema, 'payment_type')\npayment_type.skew_comparator.infinity_norm.threshold = 0.01\n\n# Add drift comparator for 'company' feature.\ncompany=tfdv.get_feature(schema, 'company')\ncompany.drift_comparator.infinity_norm.threshold = 0.001\n\n# TODO: Your code goes here\n\ntfdv.display_anomalies(skew_anomalies)",
"In this example we do see some drift, but it is well below the threshold that we've set.\nFreeze the schema\nNow that the schema has been reviewed and curated, we will store it in a file to reflect its \"frozen\" state.",
"from tensorflow.python.lib.io import file_io\nfrom google.protobuf import text_format\n\nfile_io.recursive_create_dir(OUTPUT_DIR)\nschema_file = os.path.join(OUTPUT_DIR, 'schema.pbtxt')\ntfdv.write_schema_text(schema, schema_file)\n\n!cat {schema_file}",
"When to use TFDV\nIt's easy to think of TFDV as only applying to the start of your training pipeline, as we did here, but in fact it has many uses. Here are a few more:\n\nValidating new data for inference to make sure that we haven't suddenly started receiving bad features\nValidating new data for inference to make sure that our model has trained on that part of the decision surface\nValidating our data after we've transformed it and done feature engineering (probably using TensorFlow Transform) to make sure we haven't done something wrong"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
opengeostat/pygslib
|
pygslib/Ipython_templates/qqplt_html.ipynb
|
mit
|
[
"PyGSLIB\nQQ and PP plots",
"#general imports \nimport pygslib\n",
"Getting the data ready for work\nIf the data is in GSLIB format you can use the function pygslib.gslib.read_gslib_file(filename) to import the data into a Pandas DataFrame.",
"#get the data in gslib format into a pandas Dataframe\ncluster= pygslib.gslib.read_gslib_file('../data/cluster.dat') \ntrue= pygslib.gslib.read_gslib_file('../data/true.dat') \n\ntrue['Declustering Weight'] = 1\n",
"QQ-Plot",
"npoints = len(cluster['Primary'])\ntrue['Declustering Weight'] = 1\n\n\n#using declustering wight\nparameters_qpplt = {\n # gslib parameters for qq-pp calculation \n 'qqorpp': 0, # integer (Optional, default 0, Q-Q plot). Q-Q plot (qqorpp=0); P-P plot (qqorpp=1)\n #'npts' : None, # integer (Optional, default min length of va1 and va2). Number of points to use on the Q-Q or P-P plot (should not exceed the smallest number of data in data1 / data2\n 'va1' : cluster['Primary'], # rank-1 array('d') with bounds (nd). Variable 1\n 'wt1' : cluster['Declustering Weight'], # rank-1 array('d') with bounds (nd) (Optional, set to array of ones). Declustering weight for variable 1. \n 'va2' : true['Primary'], # rank-1 array('d') with bounds (nd). Variable 2\n 'wt2' : true['Declustering Weight'], # rank-1 array('d') with bounds (nd) (Optional, set to array of ones). Declustering weight for variable 2.\n # visual parameters for figure (if a new figure is created)\n #'figure' : None, # a bokeh figure object (Optional: new figure created if None). Set none or undefined if creating a new figure. \n #'title' : None, # string (Optional, \"QQ plot\" or \"PP plot\"). Figure title\n #'xlabel' : 'Z1', # string (Optional, default \"Z1\" or \"P1\"). X axis label \n #'ylabel' : 'Z2', # string (Optional, default \"Z2\" or \"P2\"). Y axis label\n #'xlog' : True, # boolean (Optional, default True). If true plot X axis in log sale.\n #'ylog' : True, # boolean (Optional, default True). If true plot Y axis in log sale. \n # visual parameter for the probplt\n #'style' : None, # string with valid bokeh chart type \n 'color' : 'black', # string with valid CSS colour (https://www.w3schools.com/colors/colors_names.asp), or an RGB(A) hex value, or tuple of integers (r,g,b), or tuple of (r,g,b,a) (Optional, default \"navy\")\n 'legend': 'Declustered', # string (Optional, default \"NA\"). \n #'alpha' : None, # float [0-1] (Optional, default 0.5). Transparency of the fill colour \n #'lwidth': None, # float (Optional, default 1). Line width\n # leyend\n 'legendloc': None} # float (Optional, default 'bottom_right'). Any of top_left, top_center, top_right, center_right, bottom_right, bottom_center, bottom_left, center_left \n\n# Calculate the non declustered qq plot \nresults, fig = pygslib.plothtml.qpplt(parameters_qpplt)\n\n# Calculate declustered qqplot\n\n# a) get array of ones as weights \ncluster['naive']= cluster['Declustering Weight'].values*0 +1\n\n# update parameter dic\nparameters_qpplt['wt1'] = cluster['naive']\nparameters_qpplt['color'] = 'blue'\nparameters_qpplt['legend']='Clustered'\nresults, fig = pygslib.plothtml.qpplt(parameters_qpplt)\n\n# show the plot\npygslib.plothtml.show(fig)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bureaucratic-labs/yargy
|
docs/cookbook.ipynb
|
mit
|
[
"Полезные практики, неочевидные моменты\nНеоднозная грамматика\nЕсть примитивная рекурсивная грамматика:\nA -> a | a a\nB -> A B | A\nЕсть строка \"a a a\". Парсер может разобрать её 3 способами:\n(a) (a) (a)\n(a) (a a)\n(a a) (a)\nYargy парсер перебирает все варианты разбора. Используем непубличный метод extract, посмотрим все варианты:",
"from yargy.parser import prepare_trees\nfrom yargy import Parser, or_, rule\n\nA = or_(\n rule('a'),\n rule('a', 'a') \n)\nB = A.repeatable()\ndisplay(B.normalized.as_bnf)\n\nparser = Parser(B)\nmatches = parser.extract('a a a')\nfor match in matches:\n # кроме 3-х полных разборов, парсёр найдёт ещё 7 частичных: (a) _ _, (a a) _, (a) (a) _, ...\n # не будем их показывать\n if len(match.tokens) == 3:\n display(match.tree.as_dot)",
"Число вариантов быстро растёт. Для строки \"a x 10\", парсер переберёт 89 разборов. Для \"a x 20\" — 979 и потратит заметное количество времени.\nПри работе с естественным русским языком, мы построянно сталкиваемся с неоднозначными грамматиками. Например, список из трёх взысканий по арбиражному делу: \"5 тыс. р. штраф пени 3 тыс. р. необоснованного обогащения\". Эскиз грамматики:\n```\nMONEY -> INT тыс. р.\nTYPE -> штраф | пени | необоснованное обогащение\n1. \"5 тыс. р. штраф\"\n2. \"штраф 5 тыс. р.\"\n3. \"3 тыс. р.\" — только сумма\n4. \"пени\" — только тип\nPENALTY -> MONEY TYPE | TYPE MONEY | MONEY | TYPE\nPENALTIES -> PENALTY+\n```\nПолучаем много вариантов разбора:\n(5 тыс. р. штраф) (пени) (3 тыс. р. необоснованного обогащения)\n(5 тыс. р. штраф) (пени) (3 тыс. р.) (необоснованного обогащения)\n(5 тыс. р.) (штраф) (пени) (3 тыс. р.) (необоснованного обогащения)\n(5 тыс. р. штраф) (пени 3 тыс. р.) (необоснованного обогащения)\n...\nСамый просто способ избежать комбинаторного взрыва числа разборов — ограничить repeatable. Вместо PENALTIES = PENALTY.repeatable(), напишем PENALTIES = PENALTY.repeatable(max=5). Такое правило отбросить 6-е и последующие взыскания, но завершится в ограниченное время.\nCappedParser\nЕсть ещё один способ избежать комбинаторного взрыва числа разборов: выключать парсер, когда число состояний превысило порог. CappedParser наследует Parser, оборачивает внутренние методы chart, predict, scan, complete — шаги алгоритма Earley-парсера:",
"class TooManyStates(Exception): pass\n\n\ndef capped(method):\n def wrap(self, column, *args):\n before = len(column.states)\n method(self, column, *args)\n after = len(column.states)\n\n self.states += (after - before)\n if self.cap and self.states > self.cap:\n raise TooManyStates\n\n return wrap\n\n\nclass CappedParser(Parser):\n def reset(self):\n self.states = 0\n\n def __init__(self, *args, cap=None, **kwargs):\n self.cap = cap\n self.reset()\n Parser.__init__(self, *args, **kwargs)\n\n def chart(self, *args, **kwargs):\n self.reset()\n return Parser.chart(self, *args, **kwargs)\n\n predict = capped(Parser.predict)\n scan = capped(Parser.scan)\n complete = capped(Parser.complete)\n \n\nparser = CappedParser(B, cap=100)\nfor size in range(3, 10):\n text = 'a ' * size\n print(text)\n try:\n parser.match(text)\n except TooManyStates:\n print('TooManyStates')\n else:\n print('OK')\n",
"Порядок аргументов в or_ имеет значение\nКогда разборов больше одного, парсер возвращает самый левый вариант:",
"A = or_(\n rule('a'),\n rule('a', 'a') \n)\nB = A.repeatable()\n\nparser = Parser(B)\nmatch = parser.match('a a a')\nmatch.tree.as_dot",
"Переставим местами a a и a, результат поменяется:",
"A = or_(\n rule('a', 'a'),\n rule('a')\n)\nB = A.repeatable()\n\nparser = Parser(B)\nmatch = parser.match('a a a')\nmatch.tree.as_dot",
"На практике это важно. В примере со взыскиниями грамматика:\nPENALTY -> MONEY TYPE | TYPE MONEY | MONEY | TYPE\nЛевый разбор, не то, что ожидалось:\n(5 тыс. р. штраф) (пени 3 тыс. р.) (необоснованного обогащения)`\nПереставим аргументы:\nPENALTY -> MONEY TYPE | TYPE | TYPE MONEY | MONEY\nПолучим:\n(5 тыс. р. штраф) (пени) (3 тыс. р. необоснованного обогащения)`\nIdTokenizer\nParser принимает на вход текст. Первым делом парсер разделяет текст на токены. Токенизатор передаётся необязательным аргументом tokenizer: Parser(RULE, tokenizer=Tokenizer()). Токенизатор по-умолчанию — yargy.tokenizer.MorphTokenizer.\nБывает нужно обработать уже токенизированный текст. Например, есть два парсера, нужно обоими обработать один текст. Хотим сэкономить время, не токенизировать текст дважды. Заведём парсер-обёртку, он ничего не делает, принимает и возращает токены:",
"from yargy.tokenizer import (\n Tokenizer,\n MorphTokenizer,\n EOL\n)\n\n\n# Стандартный токенизатор. Удаляем правило для переводом строк.\n# Обычно токены с '\\n' только мешаются.\nTOKENIZER = MorphTokenizer().remove_types(EOL)\n\n\nclass IdTokenizer(Tokenizer):\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n # Используется при инициализации morph_pipeline, caseless_pipeline.\n # Строки-аргументы pipeline нужно разделить на слова. Как разделить,\n # например, \"кейс| |dvd-диска\" или \"кейс| |dvd|-|диска\"? Используем стандартный токенизатор.\n def split(self, text):\n return self.tokenizer.split(text)\n\n # Используется при инициализации предикатов. Например, есть предикат type('INT').\n # Поддерживает ли токенизатор тип INT?\n def check_type(self, type):\n return self.tokenizer.check_type(type)\n\n @property\n def morph(self):\n return self.tokenizer.morph\n\n def __call__(self, tokens):\n return tokens\n\n\nID_TOKENIZER = IdTokenizer(TOKENIZER)\n\ntokens = TOKENIZER('a a a a')\nparser = Parser(B, tokenizer=ID_TOKENIZER)\nparser.match(tokens);",
"ValueError: no .interpretation(...) for root rule\nЕсть два правила, хотим найти факты, где сработало одно из них:",
"from yargy.interpretation import fact\n\n\nF = fact('F', ['a'])\nG = fact('G', ['b'])\n\n\nA = rule('a').interpretation(F.a).interpretation(F)\nB = rule('b').interpretation(G.b).interpretation(G)\nC = or_(A, B)\nparser = Parser(C)\n\nmatch = parser.match('a')\n# match.fact ValueError",
"Ожидаем F(a='a'), получаем ValueError: no .interpretation(...) for root rule. На вершине-корне нет пометки контруктора факта:",
"C.as_dot",
"Создадим прокси-факт:",
"Proxy = fact('Proxy', ['value'])\n\nC = or_(A, B).interpretation(Proxy.value).interpretation(Proxy)\ndisplay(C.as_dot)\n\nparser = Parser(C)\nmatch = parser.match('a')\nmatch.fact.value\n",
"TypeError: mixed types\nНабор аргументов or_ бывает двух видов:\n1. Все предикаты, тогда результат — предикат\n2. Все rule, тогда результат — rule\nИногда правило состоит из одного предиката, передаём его в or_, получаем ошибку:",
"from yargy.predicates import caseless\n\nA = rule('a')\nB = caseless('b')\n# or_(A, B) # TypeError: mixed types: [<class 'yargy.rule.constructors.Rule'>, <class 'yargy.predicates.bank.eq'>]",
"Явно завернём предикат в rule:",
"B = rule(caseless('b'))\nC = or_(A, B)",
"Машинное обучение и Yargy\nЕсть текст размеченный BIO-тегами:",
"text = '15 апреля в Симферополе Леонид Рожков ...'\ntags = 'B I O B B I O'.split()",
"Parser принимает необязательный аргумент tagger: Parser(RULE, tagger=Tagger). Tagger принимает и возвращает список токенов. Добавим внешнюю разметку tags в токены. Используем предикат tag, извлечём сущности:",
"from yargy.tagger import Tagger\nfrom yargy.predicates import tag\n\n\nclass Tagger(Tagger):\n # Все возможные теги. Используется при инициализации предиката tag.\n # Если пользователь создаст tag('FOO'), будет ошибка\n tags = {'B', 'I', 'O'}\n\n def __call__(self, tokens):\n for token, tag in zip(tokens, tags):\n yield token.tagged(tag)\n\n\nRULE = rule(\n tag('B'),\n tag('I').repeatable().optional()\n)\nparser = Parser(RULE, tagger=Tagger())\n\nmatches = parser.findall(text)\nfor match in matches:\n print([_.value for _ in match.tokens])",
"Пропустить часть текста\nЕсть текст \"взыскать 5 тыс. р. штрафа, а также пени и неустойку\". Нужно выделить 3 взыскания \"5 тыс. р. штраф\", \"пени\", \"неустойка\", пропустить подстроки \", а также\", \"и\". Запустить парсер 2 раза: сначала выделим взыскания, удалим лишние токены, запустим парсер ещё раз:",
"from yargy.pipelines import morph_pipeline\n\ntext = 'взыскать 5 тыс. р. штрафа, а также пени и неустойку'\ntokens = list(TOKENIZER(text))\n\n\nPAYMENT = morph_pipeline([\n '5 тыс. р. штраф',\n 'пени',\n 'неустойка'\n])\nparser = Parser(PAYMENT, tokenizer=ID_TOKENIZER)\n\nmatches = parser.findall(tokens)\nspans = [_.span for _ in matches]\nprint(spans)\n\n\ndef is_inside_span(token, span):\n token_span = token.span\n return span.start <= token_span.start and token_span.stop <= span.stop\n\n\ndef select_span_tokens(tokens, spans):\n for token in tokens:\n if any(is_inside_span(token, _) for _ in spans):\n yield token\n\n\ntokens = list(select_span_tokens(tokens, spans))\nprint([_.value for _ in tokens])\n\nPAYMENTS = PAYMENT.repeatable()\nparser = Parser(PAYMENTS, tokenizer=ID_TOKENIZER)\nmatch = parser.match(tokens)",
"Генерация правил\nВ Yargy все правила описываются на языке Python. Создадим функцию, которая генерирует правило. Например, правило для текста в скобочка и кавычках:",
"from yargy import not_\nfrom yargy.predicates import eq\n\n\ndef bounded(start, stop):\n return rule(\n eq(start),\n not_(eq(stop)).repeatable(),\n eq(stop)\n )\n\n\nBOUNDED = or_(\n bounded('[', ']'),\n bounded('«', '»')\n)\nparser = Parser(BOUNDED)\nmatches = parser.findall('[a b] {c d} «e f»')\nfor match in matches:\n print([_.value for _ in match.tokens])",
"Правило — аналог join в Python:",
"from yargy.predicates import in_\n\n\ndef joined(ITEM, SEP):\n return rule(\n ITEM,\n rule(\n SEP,\n ITEM\n ).repeatable().optional()\n )\n\n\nSEP = in_(',;')\nJOINED = joined(BOUNDED, SEP)\nparser = Parser(JOINED)\nmatch = parser.match('[a b], [c d], [e f g]')",
"Правило для BIO-разметки:",
"def bio(type):\n return rule(\n tag('B-%s' % type),\n tag('I-%s' % type).repeatable().optional()\n )\n\n\nPER = bio('PER')\nLOC = bio('LOC')\n\n\ntext = '15 апреля в Симферополе Леонид Рожков ...'\ntags = 'B-DATE I-DATE O B-LOC B-PER I-PER O'.split()\n\n\nclass Tagger(Tagger):\n tags = {'B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'O'}\n\n def __call__(self, tokens):\n for token, tag in zip(tokens, tags):\n yield token.tagged(tag)\n\n\nRULE = or_(PER, LOC)\nparser = Parser(RULE, tagger=Tagger())\nmatches = parser.findall(text)\nfor match in matches:\n print([_.value for _ in match.tokens])",
"Генерация pipeline\nСоздадим pipeline из словаря пар \"полное название\", \"сокращение\":",
"from yargy.pipelines import (\n morph_pipeline,\n pipeline\n)\nfrom yargy import interpretation as interp\n\n\nTYPES = [\n ('Общество с ограниченной ответственностью', 'ООО'),\n ('Акционерное общество', 'АО'),\n ('Страховая компания', 'СК'),\n ('Строительная компания', 'СК')\n]\n\nTYPE = or_(\n morph_pipeline([\n name for name, abbr in TYPES\n ]),\n pipeline([\n abbr for name, abbr in TYPES\n ])\n)\n\nRULE = TYPE.repeatable()\nparser = Parser(RULE)\nmatches = parser.findall('Акционерное общество, в Акционерном обществе; АО, СК')\nfor match in matches:\n print([_.value for _ in match.tokens])",
"Наследование fact\nfact создаёт Python-класс, отнаследуемся, добавим методы и атрибуты. Например, есть ссылка на статьи \"ст. 15-17 п.1\", результат список объектов Ref(art=15, punkt=1), Ref(art=16, punkt=1), ...:",
"from collections import namedtuple\n\nfrom yargy.predicates import type\n\n\nRef_ = namedtuple(\n 'Ref',\n ['art', 'punkt']\n)\n\n\nArt = fact(\n 'Art',\n ['start', 'stop']\n)\nclass Art(Art):\n def range(self):\n if self.stop:\n return range(self.start, self.stop + 1)\n else:\n return [self.start]\n\n\nPunkt = fact(\n 'Punkt',\n ['number']\n)\n\n\nRef = fact(\n 'Ref',\n ['art', 'punkt']\n)\nclass Ref(Ref):\n def range(self):\n for art in self.art.range():\n punkt = (\n self.punkt.number\n if self.punkt\n else None\n )\n yield Ref_(art, punkt)\n \n \nINT = type('INT')\n\nART = rule(\n 'ст', '.',\n INT.interpretation(Art.start.custom(int)),\n rule(\n '-',\n INT.interpretation(Art.stop.custom(int))\n ).optional()\n).interpretation(Art)\n\nPUNKT = rule(\n 'п', '.',\n INT.interpretation(Punkt.number.custom(int))\n).interpretation(Punkt)\n\nREF = rule(\n ART.interpretation(Ref.art),\n PUNKT.optional().interpretation(Ref.punkt)\n).interpretation(Ref)\n\nparser = Parser(REF)\nlines = [\n 'ст. 15-17 п.1',\n 'ст. 15 п.2',\n 'ст. 16'\n]\nfor line in lines:\n print(line)\n match = parser.match(line)\n print(list(match.fact.range()))",
"Есть периоды \"1917-1918г.\", \"21 век\", приведём их к единому формату: Period(1917, 1919), Period(2000, 2100).",
"Period_ = namedtuple('Period', ['start', 'stop'])\n\n\nYear = fact(\n 'Year',\n ['value']\n)\nclass Year(Year):\n @property\n def normalized(self):\n return Period_(self.value, self.value + 1)\n\n\nYearRange = fact(\n 'YearRange',\n ['start', 'stop']\n)\nclass YearRange(YearRange):\n @property\n def normalized(self):\n return Period_(self.start, self.stop + 1)\n \n \nCentury = fact(\n 'Century',\n ['value']\n)\nclass Century(Century):\n @property\n def normalized(self):\n start = (self.value - 1) * 100\n return Period_(start, start + 100)\n\n\nPeriod = fact(\n 'Period',\n ['value']\n)\nclass Period(Period):\n @property\n def normalized(self):\n return self.value.normalized\n\n\nYEAR = rule(\n INT.interpretation(Year.value.custom(int)),\n 'г', '.'\n).interpretation(Year)\n\nYEAR_RANGE = rule(\n INT.interpretation(YearRange.start.custom(int)),\n '-',\n INT.interpretation(YearRange.stop.custom(int)),\n 'г', '.'\n).interpretation(YearRange)\n\nCENTURY = rule(\n INT.interpretation(Century.value.custom(int)),\n 'век'\n).interpretation(Century)\n\nPERIOD = or_(\n YEAR,\n YEAR_RANGE,\n CENTURY\n).interpretation(Period.value).interpretation(Period)\n\nparser = Parser(PERIOD)\nlines = [\n '1917-1918г.',\n '21 век',\n '1990г.'\n]\nfor line in lines:\n match = parser.match(line)\n print(line)\n print(match.fact.normalized)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mohanprasath/Course-Work
|
numpy/numpy_exercises_from_kyubyong/Sorting_searching_and_counting_Solutions.ipynb
|
gpl-3.0
|
[
"Soring, searching, and counting",
"import numpy as np\n\nnp.__version__\n\nauthor = 'kyubyong. longinglove@nate.com'",
"Sorting\nQ1. Sort x along the second axis.",
"x = np.array([[1,4],[3,1]])\nout = np.sort(x, axis=1)\nx.sort(axis=1)\nassert np.array_equal(out, x)\nprint out",
"Q2. Sort pairs of surnames and first names and return their indices. (first by surname, then by name).",
"surnames = ('Hertz', 'Galilei', 'Hertz')\nfirst_names = ('Heinrich', 'Galileo', 'Gustav')\nprint np.lexsort((first_names, surnames))",
"Q3. Get the indices that would sort x along the second axis.",
"x = np.array([[1,4],[3,1]])\nout = np.argsort(x, axis=1)\nprint out",
"Q4. Create an array such that its fifth element would be the same as the element of sorted x, and it divide other elements by their value.",
"x = np.random.permutation(10)\nprint \"x =\", x\nprint \"\\nCheck the fifth element of this new array is 5, the first four elements are all smaller than 5, and 6th through the end are bigger than 5\\n\", \nout = np.partition(x, 5)\nx.partition(5) # in-place equivalent\nassert np.array_equal(x, out)\nprint out\n",
"Q5. Create the indices of an array such that its third element would be the same as the element of sorted x, and it divide other elements by their value.",
"x = np.random.permutation(10)\nprint \"x =\", x\npartitioned = np.partition(x, 3)\nindices = np.argpartition(x, 3)\nprint \"partitioned =\", partitioned\nprint \"indices =\", partitioned\nassert np.array_equiv(x[indices], partitioned)",
"Searching\nQ6. Get the maximum and minimum values and their indices of x along the second axis.",
"x = np.random.permutation(10).reshape(2, 5)\nprint \"x =\", x\nprint \"maximum values =\", np.max(x, 1)\nprint \"max indices =\", np.argmax(x, 1)\nprint \"minimum values =\", np.min(x, 1)\nprint \"min indices =\", np.argmin(x, 1)\n",
"Q7. Get the maximum and minimum values and their indices of x along the second axis, ignoring NaNs.",
"x = np.array([[np.nan, 4], [3, 2]])\nprint \"maximum values ignoring NaNs =\", np.nanmax(x, 1)\nprint \"max indices =\", np.nanargmax(x, 1)\nprint \"minimum values ignoring NaNs =\", np.nanmin(x, 1)\nprint \"min indices =\", np.nanargmin(x, 1)",
"Q8. Get the values and indices of the elements that are bigger than 2 in x.",
"x = np.array([[1, 2, 3], [1, 3, 5]])\nprint \"Values bigger than 2 =\", x[x>2]\nprint \"Their indices are \", np.nonzero(x > 2)\nassert np.array_equiv(x[x>2], x[np.nonzero(x > 2)])\nassert np.array_equiv(x[x>2], np.extract(x > 2, x))",
"Q9. Get the indices of the elements that are bigger than 2 in the flattend x.",
"x = np.array([[1, 2, 3], [1, 3, 5]])\nprint np.flatnonzero(x)\nassert np.array_equiv(np.flatnonzero(x), x.ravel().nonzero())",
"Q10. Check the elements of x and return 0 if it is less than 0, otherwise the element itself.",
"x = np.arange(-5, 4).reshape(3, 3)\nprint np.where(x <0, 0, x)",
"Q11. Get the indices where elements of y should be inserted to x to maintain order.",
"x = [1, 3, 5, 7, 9]\ny = [0, 4, 2, 6]\nnp.searchsorted(x, y)",
"Counting\nQ12. Get the number of nonzero elements in x.",
"x = [[0,1,7,0,0],[3,0,0,2,19]]\nprint np.count_nonzero(x)\nassert np.count_nonzero(x) == len(x[x!=0])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
econ-ark/HARK
|
examples/GenIncProcessModel/GenIncProcessModel.ipynb
|
apache-2.0
|
[
"Permanent versus Persistent Income Shocks",
"# Initial imports and notebook setup\nimport matplotlib.pyplot as plt\n\nfrom copy import copy\nfrom HARK.utilities import plot_funcs\nimport numpy as np\n\nfrom HARK.ConsumptionSaving.ConsGenIncProcessModel import (\n IndShockExplicitPermIncConsumerType,\n PersistentShockConsumerType,\n)\nfrom HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType",
"ConsIndShockModel assumes that income has a permanent component $p$ which is subject to \"permanent\" shocks:\n$\\log p_{t+1} = \\log p_{t} + \\log \\psi_{t+1}$\nMany papers in the literature instead examine models in which shocks merely have some persistence,\n$\\log p_{t+1} = \\gamma \\log p_{t} + \\log \\psi_{t+1}$\nwhere if $0 < \\gamma < 1$ then $\\lim_{n \\uparrow \\infty} \\mathbb{E}{t}[\\log p{t+n}]=0$ (which means that the level of $p$ reverts to its mean of $p=1$. The two models become identical as $\\gamma$ approaches 1.\nThis notebook describes HARK's tools to solve models with persistent shocks.\n\nConsGenIncProcessModel extends ConsIndShockModel by explicitly tracking persistent income $p_t$ as a state variable.\nIndShockExplicitPermIncConsumerType is a type of consumer created for comparison and for whom we know for sure that their income process is one in which $\\gamma=1$\n\nGeneral Income Process model\nIn ConsGenIncProcessModel the user can define a generic function $G$ that translates current $p_{t}$ into expected next period persistent income $p_{t+1}$ (subject to shocks). \nThe agent's problem can be written in Bellman form as:\n\\begin{eqnarray}\nv_t(M_t,p_t) &=& \\max_{c_t} U(c_t) + \\beta (1-\\mathsf{D}{t+1}) \\mathbb{E}{t} [v_{t+1}(M_{t+1}, p_{t+1})] \\\na_t &=& M_t - c_t \\\na_t &\\geq& \\underline{a} \\\nM_{t+1} &=& R a_t + \\theta_{t+1} \\\np_{t+1} &=& G_{t+1}(p_t)\\psi_{t+1} \\\n\\psi_t \\sim F_{\\psi t} &\\qquad& \\theta_t \\sim F_{\\theta t} \\\n \\mathbb{E} [F_{\\psi t}] = 1 & & \\mathbb{E} [F_{\\psi t}] =1 \\\nU(c) &=& \\frac{c^{1-\\rho}}{1-\\rho}\n\\end{eqnarray}\nThe one-period problem for this model is solved by the function solveConsGenIncProcess, which creates an instance of the class ConsGenIncProcessSolver. The class GenIncProcessConsumerType extends IndShockConsumerType to represent agents in this model. To construct an instance of this class, several parameters must be passed to the constructor, as shown in the table below (parameters can be either \"primitive\" or \"constructed\" if they have already built-in capabilities from previous models).\nExample parameter values to solve GenIncProcess model\n| Param | Description | Code | Value | Constructed |\n| :---: | --- | --- | --- | :---: |\n| $\\beta$ |Intertemporal discount factor | $\\texttt{DiscFac}$ | 0.96 | |\n| $\\rho$ |Coefficient of relative risk aversion | $\\texttt{CRRA}$ | 2.0 | |\n| $R$ | Risk free interest factor | $\\texttt{Rfree}$ | 1.03 | |\n| $1 - \\mathsf{D}$ |Survival probability | $\\texttt{LivPrb}$ | [0.98] | |\n| $\\underline{a}$ |Artificial borrowing constraint | $\\texttt{BoroCnstArt}$ | 0.0 | | \n| $(none)$ |Indicator of whether $\\texttt{vFunc}$ should be computed | $\\texttt{vFuncBool}$ | 'True' | |\n| $(none)$ |Indicator of whether $\\texttt{cFunc}$ should use cubic lines | $\\texttt{CubicBool}$ | 'False' | |\n|$F$ |A list containing three arrays of floats, representing a discrete <br> approximation to the income process: <br>event probabilities, persistent shocks, transitory shocks | $\\texttt{IncomeDstn}$ | - |$\\surd$ |\n| $G$ |Expected persistent income next period | $\\texttt{pLvlNextFunc}$ | - | $\\surd$ |\n| $(none)$ |Array of time-varying persistent income levels | $\\texttt{pLvlGrid}$ | - |$\\surd$ |\n| $(none)$ | Array of \"extra\" end-of-period asset values | $\\texttt{aXtraGrid}$ | - |$\\surd$ |\nConstructed inputs to solve GenIncProcess\nThe \"constructed\" inputs above are using expected attributes and are drawn on various methods as explained below.\n\n\nThe input $\\texttt{IncomeDstn}$ is created by the method update_income_process which inherits from IndShockConsumerType.\n\n\nThe input $\\texttt{pLvlNextFunc}$ is created by the method updatepLvlNextFunc which uses the initial sequence of $\\texttt{pLvlNextFunc}$, the mean and standard deviation of the (log) initial permanent income, $\\texttt{pLvlInitMean}$ and $\\texttt{pLvlInitStd}$. \nIn this model, the method creates a trivial $\\texttt{pLvlNextFunc}$ attribute with no persistent income dynamics. But we can overwrite it by subclasses in order to make an AR1 income process for example. \n\n\nThe input $\\texttt{pLvlGrid}$ is created by the method updatepLvlGrid which updates the grid of persistent income levels for infinite horizon models (cycles=0) and lifecycle models (cycles=1). This method draws on the initial distribution of persistent income, the $\\texttt{pLvlNextFuncs}$, $\\texttt{pLvlInitMean}$, $\\texttt{pLvlInitStd}$ and the attribute $\\texttt{pLvlPctiles}$ (percentiles of the distribution of persistent income). It then uses a simulation approach to generate the $\\texttt{pLvlGrid}$ at each period of the cycle.\n\n\nThe input $\\texttt{aXtraGrid}$ is created by $\\texttt{updateAssetsGrid}$ which updates the agent's end-of-period assets grid by constructing a multi-exponentially spaced grid of aXtra values, based on $\\texttt{aNrmInitMean}$ and $\\texttt{aNrmInitStd}$. \n\n\n1. Consumer with Explicit Permanent Income\nLet's make an example of our generic model above with an \"explicit permanent income\" consumer who experiences idiosyncratic shocks to permanent and transitory, and faces permanent income growth.\nThe agent's problem can be written in Bellman form as:\n\\begin{eqnarray}\nv_t(M_t,p_t) &=& \\max_{c_t} U(c_t) + \\beta (1-\\mathsf{D}{t+1}) \\mathbb{E} [v{t+1}(M_{t+1}, p_{t+1}) ], \\\na_t &=& M_t - c_t, \\\na_t &\\geq& \\underline{a}, \\\nM_{t+1} &=& R/(\\Gamma_{t+1} \\psi_{t+1}) a_t + \\theta_{t+1}, \\\np_{t+1} &=& G_{t+1}(p_t)\\psi_{t+1}, \\\n\\psi \\sim F_{\\psi}, \\mathbb{E} [F_{\\psi t}] = 1 &\\qquad& \\theta_t \\sim F_{\\theta}, \\mathbb{E} [F_{\\psi}] = 1, \\\nU(c) &=& \\frac{c^{1-\\rho}}{1-\\rho}.\n\\end{eqnarray}\nThis agent type is identical to an IndShockConsumerType but for explicitly tracking $\\texttt{pLvl}$ as a state variable during solution as shown in the mathematical representation of GenIncProcess model. \nTo construct IndShockExplicitPermIncConsumerType as an instance of GenIncProcessConsumerType, we need to pass additional parameters to the constructor as shown in the table below.\nAdditional parameters to solve ExplicitPermInc model\n| Param | Description | Code | Value | Constructed |\n| :---: | --- | --- | --- | :---: |\n|(none)|percentiles of the distribution of persistent income|$\\texttt{pLvlPctiles}$|||\n| $G$ |Expected persistent income next period | $\\texttt{pLvlNextFunc}$ | - | $\\surd$ |\n|$\\Gamma$|Permanent income growth factor|$\\texttt{PermGroFac}$|[1.0]| |\nConstructed inputs to solve ExplicitPermInc\n\nIn this \"explicit permanent income\" model, we overwrite the method updatepLvlNextFunc to create $\\texttt{pLvlNextFunc}$ as a sequence of linear functions, indicating constant expected permanent income growth across permanent income levels. This method uses the attribute $\\texttt{PermGroFac}$, and installs a special retirement function when it exists.",
"# This cell defines a dictionary to make an instance of \"explicit permanent income\" consumer.\nGenIncDictionary = { \n \"CRRA\": 2.0, # Coefficient of relative risk aversion\n \"Rfree\": 1.03, # Interest factor on assets\n \"DiscFac\": 0.96, # Intertemporal discount factor\n \"LivPrb\" : [0.98], # Survival probability\n \"AgentCount\" : 10000, # Number of agents of this type (only matters for simulation)\n \"aNrmInitMean\" : 0.0, # Mean of log initial assets (only matters for simulation)\n \"aNrmInitStd\" : 1.0, # Standard deviation of log initial assets (only for simulation)\n \"pLvlInitMean\" : 0.0, # Mean of log initial permanent income (only matters for simulation)\n \"pLvlInitStd\" : 0.0, # Standard deviation of log initial permanent income (only matters for simulation)\n \"PermGroFacAgg\" : 1.0, # Aggregate permanent income growth factor (only matters for simulation)\n \"T_age\" : None, # Age after which simulated agents are automatically killed\n \"T_cycle\" : 1, # Number of periods in the cycle for this agent type\n# Parameters for constructing the \"assets above minimum\" grid\n \"aXtraMin\" : 0.001, # Minimum end-of-period \"assets above minimum\" value\n \"aXtraMax\" : 30, # Maximum end-of-period \"assets above minimum\" value \n \"aXtraExtra\" : [0.005,0.01], # Some other value of \"assets above minimum\" to add to the grid\n \"aXtraNestFac\" : 3, # Exponential nesting factor when constructing \"assets above minimum\" grid\n \"aXtraCount\" : 48, # Number of points in the grid of \"assets above minimum\"\n# Parameters describing the income process\n \"PermShkCount\" : 7, # Number of points in discrete approximation to permanent income shocks\n \"TranShkCount\" : 7, # Number of points in discrete approximation to transitory income shocks\n \"PermShkStd\" : [0.1], # Standard deviation of log permanent income shocks\n \"TranShkStd\" : [0.1], # Standard deviation of log transitory income shocks\n \"UnempPrb\" : 0.05, # Probability of unemployment while working\n \"UnempPrbRet\" : 0.005, # Probability of \"unemployment\" while retired\n \"IncUnemp\" : 0.3, # Unemployment benefits replacement rate\n \"IncUnempRet\" : 0.0, # \"Unemployment\" benefits when retired\n \"tax_rate\" : 0.0, # Flat income tax rate\n \"T_retire\" : 0, # Period of retirement (0 --> no retirement)\n \"BoroCnstArt\" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets\n \"CubicBool\" : False, # Use cubic spline interpolation when True, linear interpolation when False\n \"vFuncBool\" : True, # Whether to calculate the value function during solution \n# More parameters specific to \"Explicit Permanent income\" shock model\n \"cycles\": 0,\n \"pLvlPctiles\" : np.concatenate(([0.001, 0.005, 0.01, 0.03], np.linspace(0.05, 0.95, num=19),[0.97, 0.99, 0.995, 0.999])),\n \"PermGroFac\" : [1.0], # Permanent income growth factor - long run permanent income growth doesn't work yet \n}",
"Let's now create an instance of the type of consumer we are interested in and solve this agent's problem with an infinite horizon (cycles=0).",
"# Make and solve an example \"explicit permanent income\" consumer with idiosyncratic shocks\nExplicitExample = IndShockExplicitPermIncConsumerType(**GenIncDictionary)\n \nprint('Here, the lowest percentile is ' + str(GenIncDictionary['pLvlPctiles'][0]*100))\nprint('and the highest percentile is ' + str(GenIncDictionary['pLvlPctiles'][-1]*100) + '.\\n')\n \nExplicitExample.solve()",
"In the cell below, we generate a plot of the consumption function for explicit permanent income consumer at different income levels.",
"# Plot the consumption function at various permanent income levels.\nprint('Consumption function by pLvl for explicit permanent income consumer:')\npLvlGrid = ExplicitExample.pLvlGrid[0]\nmLvlGrid = np.linspace(0,20,300)\nfor p in pLvlGrid:\n M_temp = mLvlGrid + ExplicitExample.solution[0].mLvlMin(p)\n C = ExplicitExample.solution[0].cFunc(M_temp,p*np.ones_like(M_temp))\n plt.plot(M_temp,C)\nplt.xlim(0.,20.)\nplt.xlabel('Market resource level mLvl')\nplt.ylabel('Consumption level cLvl')\nplt.show()",
"Permanent income normalized\nAn alternative model is to normalize it by dividing all variables by permanent income $p_t$ and solve the model again.",
"# Make and solve an example of normalized model\nNormalizedExample = IndShockConsumerType(**GenIncDictionary, verbose=0)\nNormalizedExample.solve()\n\n# Compare the normalized problem with and without explicit permanent income and plot the consumption functions\nprint('Normalized consumption function by pLvl for explicit permanent income consumer:')\npLvlGrid = ExplicitExample.pLvlGrid[0]\nmNrmGrid = np.linspace(0,20,300)\nfor p in pLvlGrid:\n M_temp = mNrmGrid*p + ExplicitExample.solution[0].mLvlMin(p)\n C = ExplicitExample.solution[0].cFunc(M_temp,p*np.ones_like(M_temp))\n plt.plot(M_temp/p,C/p)\n\nplt.xlim(0.,20.)\nplt.xlabel('Normalized market resources mNrm')\nplt.ylabel('Normalized consumption cNrm')\nplt.show()\n\nprint('Consumption function for normalized problem (without explicit permanent income):')\nmNrmMin = NormalizedExample.solution[0].mNrmMin\nplot_funcs(NormalizedExample.solution[0].cFunc,mNrmMin,mNrmMin+20.)",
"The figures above show that the normalized consumption function for the \"explicit permanent income\" consumer is almost identical for every permanent income level (and the same as the normalized problem's $\\texttt{cFunc}$), but is less accurate due to extrapolation outside the bounds of $\\texttt{pLvlGrid}$. \nThe \"explicit permanent income\" solution deviates from the solution to the normalized problem because of errors from extrapolating beyond the bounds of the $\\texttt{pLvlGrid}$. The error is largest for $\\texttt{pLvl}$ values near the upper and lower bounds, and propagates toward the center of the distribution.",
"# Plot the value function at various permanent income levels\nif ExplicitExample.vFuncBool:\n pGrid = np.linspace(0.1,3.0,24)\n M = np.linspace(0.001,5,300)\n for p in pGrid:\n M_temp = M+ExplicitExample.solution[0].mLvlMin(p)\n C = ExplicitExample.solution[0].vFunc(M_temp,p*np.ones_like(M_temp))\n plt.plot(M_temp,C)\n plt.ylim([-200,0])\n plt.xlabel('Market resource level mLvl')\n plt.ylabel('Value v')\n plt.show()\n\n# Simulate many periods to get to the stationary distribution\nExplicitExample.T_sim = 500\nExplicitExample.track_vars = ['mLvl','cLvl','pLvl']\nExplicitExample.initialize_sim()\nExplicitExample.simulate()\nplt.plot(np.mean(ExplicitExample.history['mLvl'],axis=1))\nplt.xlabel('Simulated time period')\nplt.ylabel('Average market resources mLvl')\nplt.show()",
"2. Persistent income shock consumer\nClass to solve consumption-saving models with idiosyncratic shocks to income in which shocks are persistent and transitory. This model extends ConsGenIndShockModel by allowing (log) persistent income to follow an AR(1) process.\nThe agent's problem can be written in Bellman form as:\n\\begin{eqnarray}\nv_t(M_t,p_t) &=& \\max_{c_t} U(c_t) + \\beta (1-\\mathsf{D}{t+1}) \\mathbb{E} [v{t+1}(M_{t+1}, p_{t+1}) ], \\\na_t &=& M_t - c_t, \\\na_t &\\geq& \\underline{a}, \\\nM_{t+1} &=& R a_t + \\theta_{t+1}, \\\nlog(p_{t+1}) &=& \\varphi log(p_t)+(1-\\varphi log(\\overline{p}{t+1} ) +log(\\Gamma{t+1})+log(\\psi_{t+1}), \\\n\\\n\\psi_t \\sim F_{\\psi t} &\\qquad& \\theta_t \\sim F_{\\theta t}, \\mathbb{E} [F_{\\psi t}] = 1 \\\n\\end{eqnarray}\nAdditional parameters to solve PersistentShock model\n| Param | Description | Code | Value | Constructed |\n| :---: | --- | --- | --- | :---: |\n|$\\varphi$|Serial correlation coefficient for permanent income|$\\texttt{PrstIncCorr}$|0.98||\n||||||\nConstructed inputs to solve PersistentShock\n\nFor this model, we overwrite the method $\\texttt{updatepLvlNextFunc}$ to create the input $\\texttt{pLvlNextFunc}$ as a sequence of AR1-style functions. The method uses now the attributes $\\texttt{PermGroFac}$ and $\\texttt{PrstIncCorr}$. If cycles=0, the product of $\\texttt{PermGroFac}$ across all periods must be 1.0, otherwise this method is invalid.",
"# Make a dictionary for the \"persistent idiosyncratic shocks\" model\nPrstIncCorr = 0.98 # Serial correlation coefficient for persistent income\n\npersistent_shocks = copy(GenIncDictionary)\npersistent_shocks['PrstIncCorr'] = PrstIncCorr",
"The PersistentShockConsumerType class solves the problem of a consumer facing idiosyncratic shocks to his persistent and transitory income, and for which the (log) persistent income follows an AR1 process rather than random walk.",
"# Make and solve an example of \"persistent idisyncratic shocks\" consumer\nPersistentExample = PersistentShockConsumerType(**persistent_shocks)\nPersistentExample.solve()\n\n# Plot the consumption function at various levels of persistent income pLvl\nprint('Consumption function by persistent income level pLvl for a consumer with AR1 coefficient of ' + str(PersistentExample.PrstIncCorr) + ':')\npLvlGrid = PersistentExample.pLvlGrid[0]\nmLvlGrid = np.linspace(0,20,300)\nfor p in pLvlGrid:\n M_temp = mLvlGrid + PersistentExample.solution[0].mLvlMin(p)\n C = PersistentExample.solution[0].cFunc(M_temp,p*np.ones_like(M_temp))\n plt.plot(M_temp,C)\nplt.xlim(0.,20.)\nplt.xlabel('Market resource level mLvl')\nplt.ylabel('Consumption level cLvl')\nplt.show()\n\n# Plot the value function at various persistent income levels\nif PersistentExample.vFuncBool:\n pGrid = PersistentExample.pLvlGrid[0]\n M = np.linspace(0.001,5,300)\n for p in pGrid:\n M_temp = M+PersistentExample.solution[0].mLvlMin(p)\n C = PersistentExample.solution[0].vFunc(M_temp,p*np.ones_like(M_temp))\n plt.plot(M_temp,C)\n plt.ylim([-200,0])\n plt.xlabel('Market resource level mLvl')\n plt.ylabel('Value v')\n plt.show()\n\n# Simulate some data\nPersistentExample.T_sim = 500\nPersistentExample.track_vars = ['mLvl','cLvl','pLvl']\nPersistentExample.initialize_sim()\nPersistentExample.simulate()\nplt.plot(np.mean(PersistentExample.history['mLvl'],axis=1))\nplt.xlabel('Simulated time period')\nplt.ylabel('Average market resources mLvl')\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/docs-l10n
|
site/ja/addons/tutorials/average_optimizers_callback.ipynb
|
apache-2.0
|
[
"Copyright 2020 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"モデルの平均化\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/addons/tutorials/average_optimizers_callback\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.orgで 表示</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/addons/tutorials/average_optimizers_callback.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Run in Google Colab</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/addons/tutorials/average_optimizers_callback.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub でソースを表示</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/addons/tutorials/average_optimizers_callback.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード</a></td>\n</table>\n\n概要\nこのノートブックでは、TensorFlow アドオンパッケージから移動平均オプティマイザとモデル平均チェックポイントを使用する方法を紹介します。\n移動平均化\n\n移動平均化の利点は、最新のバッチで激しい損失の変化や不規則なデータ表現が発生しにくいことです。ある時点までモデルのトレーニングがスムーズになり、より一般的なアイデアを提供します。\n\n確率的平均化\n\n確率的重み平均化は、より広いオプティマイザに収束します。これは幾何学的なアンサンブルに似ています。確率的重み平均化は、他のオプティマイザのラッパーとして使用し、内側のオプティマイザのトラジェクトリの異なる点からの結果を平均化することでモデルの性能を向上させる、シンプルな方法です。\n\nモデル平均チェックポイント\n\ncallbacks.ModelCheckpointにはトレーニングの途中で移動平均の重みを保存するオプションがないため、モデル平均オプティマイザにはカスタムコールバックが必要でした。update_weightsパラメータを使用すると、ModelAverageCheckpointで以下が可能になります。\n\n\nモデルに移動平均重みを割り当てて保存する。\n古い平均化されていない重みはそのままにして、保存されたモデルは平均化された重みを使用する。\n\nセットアップ",
"!pip install -U tensorflow-addons\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\nimport numpy as np\nimport os",
"モデルを構築する",
"def create_model(opt):\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(), \n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n ])\n\n model.compile(optimizer=opt,\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model",
"データセットを準備する",
"#Load Fashion MNIST dataset\ntrain, test = tf.keras.datasets.fashion_mnist.load_data()\n\nimages, labels = train\nimages = images/255.0\nlabels = labels.astype(np.int32)\n\nfmnist_train_ds = tf.data.Dataset.from_tensor_slices((images, labels))\nfmnist_train_ds = fmnist_train_ds.shuffle(5000).batch(32)\n\ntest_images, test_labels = test",
"ここでは、次の 3 つのオプティマイザを比較してみます。\n\nラップされていない SGD\n移動平均を適用した SGD\n確率的重み平均を適用した SGD\n\n同じモデルを使用してパフォーマンスを見てみましょう。",
"#Optimizers \nsgd = tf.keras.optimizers.SGD(0.01)\nmoving_avg_sgd = tfa.optimizers.MovingAverage(sgd)\nstocastic_avg_sgd = tfa.optimizers.SWA(sgd)",
"MovingAverageオプティマイザとStocasticAverageオプティマイザは、どちらもModelAverageCheckpointを使用します。",
"#Callback \ncheckpoint_path = \"./training/cp-{epoch:04d}.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_dir,\n save_weights_only=True,\n verbose=1)\navg_callback = tfa.callbacks.AverageModelCheckpoint(filepath=checkpoint_dir, \n update_weights=True)",
"モデルをトレーニングする\nVanilla SGD オプティマイザ",
"#Build Model\nmodel = create_model(sgd)\n\n#Train the network\nmodel.fit(fmnist_train_ds, epochs=5, callbacks=[cp_callback])\n\n#Evalute results\nmodel.load_weights(checkpoint_dir)\nloss, accuracy = model.evaluate(test_images, test_labels, batch_size=32, verbose=2)\nprint(\"Loss :\", loss)\nprint(\"Accuracy :\", accuracy)",
"移動平均 SGD",
"#Build Model\nmodel = create_model(moving_avg_sgd)\n\n#Train the network\nmodel.fit(fmnist_train_ds, epochs=5, callbacks=[avg_callback])\n\n#Evalute results\nmodel.load_weights(checkpoint_dir)\nloss, accuracy = model.evaluate(test_images, test_labels, batch_size=32, verbose=2)\nprint(\"Loss :\", loss)\nprint(\"Accuracy :\", accuracy)",
"確率的重み平均 SGD",
"#Build Model\nmodel = create_model(stocastic_avg_sgd)\n\n#Train the network\nmodel.fit(fmnist_train_ds, epochs=5, callbacks=[avg_callback])\n\n#Evalute results\nmodel.load_weights(checkpoint_dir)\nloss, accuracy = model.evaluate(test_images, test_labels, batch_size=32, verbose=2)\nprint(\"Loss :\", loss)\nprint(\"Accuracy :\", accuracy)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ga7g08/ga7g08.github.io
|
_notebooks/2015-12-03-Hierarchical-NonLinear-Regression-Models-In-PyMC3.ipynb
|
mit
|
[
"%matplotlib inline\n\nfrom pymc3 import Normal, Model\nimport pymc3 as pm\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 22\nimport seaborn",
"Hierarchical Non-Linear Regression Models in PyMC3: Part II\nThis is a follow up to a previous post, extending to the case where we have nonlinear responces.\nFirst, some data",
"colors = ['r', 'g', 'b']\nN_cars = 3\nN_sightings = 10\nidxs = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])\n\na_val = 45\nmu_b_val = 0.1\nsigma_b_val = 3\nmu_c_val = 0.5\nsigma_c_val = 0.5\n\nb = np.random.normal(mu_b_val, sigma_b_val, N_cars)\nc = np.random.normal(mu_c_val, sigma_c_val, N_cars)\nxobs = np.random.uniform(0, 10, N_sightings) + np.random.normal(0, 1, N_sightings)\nyobs = a_val + b[idxs]*xobs + c[idxs]*xobs**2\n\nfor i in range(N_cars):\n xobs_idv = xobs[idxs==i]\n yobs_idv = yobs[idxs==i]\n plt.plot(xobs_idv, yobs_idv, \"o\", color=colors[i])\n \nplt.ylabel(r\"$y_\\mathrm{obs}$\")\nplt.xlabel(r\"$x_\\mathrm{obs}$\")\nplt.show()",
"The clever bit:\nIn the following code we flatten the data, but create a set of indexes which maps the responces to the respondant. For example if our data data consisted of 2 repondants, with 3 responces from the first and 2 from the second, then the data above would be:\nxobs_stacked = [[1.1, 2.2, 4.5], \n [0.5, 0.4]]\nyobs_stacked = [[10.2, 10.3, 10.8], \n [12.5, 12.5]]\nThen we flatten these to get\nxobs = [1.1, 2.2, 4.5, 0.5, 0.4]\nand create an index as follows\nidxs = [0, 0, 0, 1, 1, ]\nwhich says the first, second, and thrid entries below to the 0th respondant, while the last two are from the second. The importance of this will become apparent in a moment. In this instance, we always have the same number of responces from each respondant, so we can use the following trick:\nThe hierachical model",
"with pm.Model() as hierarchical_model:\n # hyperparameters\n mu_b = pm.Normal('mu_b', mu=0., sd=100)\n sigma_b = pm.Uniform('sigma_b', lower=0, upper=100)\n mu_c = pm.Normal('mu_c', mu=0., sd=100)\n sigma_c = pm.Uniform('sigma_c', lower=0, upper=100)\n\n # Common effects\n a = pm.Normal('a', mu=45, sd=100, testval=10) \n eps = pm.Uniform('eps', lower=0, upper=10)\n \n # Group effects\n b = pm.Normal('b', mu=mu_b, sd=sigma_b, shape=N_cars)\n c = pm.Normal('c', mu=mu_b, sd=sigma_b, shape=N_cars)\n\n mu = a + b[idxs] * xobs + c[idxs] * xobs**2\n \n # Likelihood (sampling distribution) of observations\n Y_obs = Normal('Y_obs', mu=mu, sd=eps, observed=yobs)",
"Now we generate samples using the Metropolis algorithm. Note that in some of the linked examples they initiate the MCMC chains with a MLE. I found that this degraded the performance, but I don't have the time to figure out why at the moment.",
"nsamples = 40000\nnburn = 10000\nwith hierarchical_model:\n step = pm.Metropolis()\n hierarchical_trace = pm.sample(nsamples, step, progressbar=True)",
"Now let's use the handy traceplot to inspect the chains and the posteriors having discarded the first half of the samples.",
"pm.traceplot(hierarchical_trace[30000:], \n vars=['mu_b', 'mu_c', 'a', 'eps'],\n #lines={'mu_b': mu_b_val, 'sigma_b': sigma_b_val}\n )\nplt.show()",
"The posterior distributions (in blue) can be compared with vertical (red) lines indicating the \"true\" values used to generate the data. This shows that we have not fully captured the features of the model, but compared to the diffuse prior we have learnt a great deal. Note that in generating the data $\\epsilon$ was effectively zero: so the fact it's posterior is non-zero supports our understanding that we have not fully converged onto the idea solution.\nPosterior predictive check\nFinally we will plot a few of the data points along with straight lines from several draws of the posterior. We color code 5 random data points, then draw 100 realisations of the parameters from the posteriors and plot the corresponding straight lines. This shows that the posterior is doing an excellent job at inferring the individual $b_i$ values.",
"s['b']\n\nnpp = 5\nrepeats = 100\nfig, ax = plt.subplots(figsize=(10, 10))\nxfit = np.linspace(-1, 10, 100)\n\nfor i in range(N_cars):\n for j in range(repeats):\n s = hierarchical_trace[np.random.randint(nburn, nsamples)]\n yfit = s['a'] + s['b'][i] * xfit+ s['c'][i] * xfit**2\n ax.plot(xfit, yfit, \"-\", lw=0.1, color=colors[i])\n ax.plot(xobs[idxs == i], yobs[idxs == i], \"o\", color=colors[i], markersize=10)\n \nplt.show()"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bikeviz/bikeviz.github.io
|
bikeshares.ipynb
|
apache-2.0
|
[
"A Look Into Affordances of Citi and Capital Bikeshare Stations\nPart 1: What are Bikeshares? What are routes?\nWith the rise of traffic congestion in big cities and the need for more environmentally friendly solutions to travel, cities and businesses are looking to a common pastime to solve the need for alternative means to travel. In two large cities, New York City and Washington DC, have been met so positively that stationwise they have grown to become the two largest bikeshare programs in America. \nWith the immense number of riders participating in the programs, different users will find different purposes for the system, whether they are tourists taking a bike out for a day as a convenient means of transportation between attractions or workers in New York City's financial district heading out from East Village in Manhattan in the morning for work. Given the locations of each station and their proximity to other locations in a city, people may use a station for different purposes. Let's begin by quantifying these \"uses\" in the form of bike trips. More specifically, we define a \"trip\" as one entry in our data having a \"start\" and \"end\" station.\nWe'll keep the libraries imported in a list here for convenience.",
"import glob\nimport csv\nfrom collections import Counter\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport re\n\n%matplotlib inline\n\ndef get_top_trips(path,N=10):\n \n #the headers on the CSV are slightly different depending on whether the data is from Citi or Capital\n if path==\"capital\":\n start_station = \"Start station\"\n end_station = \"End station\"\n if path==\"citi\":\n start_station = \"start station name\"\n end_station = \"end station name\"\n \n trips = []\n \n for filename in glob.glob('./'+path+'/*.csv'): \n with open(filename,'rU') as f:\n reader = csv.DictReader(f)\n for row in reader:\n trips.append((row[start_station],row[end_station]))\n\n return Counter(trips).most_common(N)\n\nget_top_trips(\"capital\")\n\nget_top_trips(\"citi\")",
"In looking at the top 10 trips for each station, we see some very interesting results. For Capital bikeshare, the top most common trip and the 4th most common trip share the same stations. This could imply some sort of round trip behavior. For example, \"Eastern Market Metro / Pennsylvania Ave & 7th St SE\" is right beside a train station, which could indicate that many people are taking a bike from the train station to Capitol Hil for the day, and when it is time to go home, they take a bike back to the train station. For New York, the most frequent trip begins and ends at Central Park, which might indicate that people are taking the bike for a leisure drive and returning to the station after a stroll around the park. Furthermore, this may indicate that stations are being used for different purposes: some might be used to commute, as seen in the DC example, and others might be used for leisure, as seen in the NYC example.\nBut once again, we need to think computationally. Our data is limited in that we have no access to information about the individual people who ride the bikes, and we definitely can't interview people to figure out why people are riding bikes. What can we look at then? Patterns! \nPart 2: A Look at Trip Duration as a Metric\nWe'll start by looking at the duration of the bike trips people take. What we're hoping to find from this is that people take different amounts of time biking if they use a station for a specific purpose. One way to reason through this is that people who take a bike to work do not want to come into work looking tired and sweaty. Additionally, due to work schedules, the time taken to go from one station to another would vary less due to the habitual commutes of workers. On the other hand, the amount of time taken to complete a round trip would vary greatly due to the less time-constrained nature of the trips.",
"def get_duration(path):\n #once again, the files are formatted slightly differently...\n if path==\"citi\":\n trip_duration = 'tripduration'\n start_station = 'start station id'\n end_station = 'end station id'\n if path==\"capital\":\n trip_duration = 'Duration'\n start_station = 'Start station ID'\n end_station = 'End station ID'\n duration = []\n for filename in glob.glob(\"./\"+path+\"/*.csv\"):\n with open(filename,'rU') as f:\n reader = csv.DictReader(f)\n for row in reader:\n duration.append((row[trip_duration],row[start_station],row[end_station]))\n return duration\n\ncitibike = get_duration('citi')\nprint len(citibike)\n\ncapitalbike = get_duration('capital')\nprint len(capitalbike)",
"We'll define a round trip by looking at the start and end station to see if they are the same. One small issue, though--Capital bikeshare's data is formatted slightly differently, as shown below:",
"capital[1][0]",
"Since NYC's data comes in seconds, let's convert DC's data to seconds as well.",
"def parse_time(time):\n hms = re.sub(\"\\s\\s\",\"\",re.sub(\"[a-z].\",\" \",time)).split(\" \")\n return int(hms[0])*60+60*int(hms[1])+int(hms[2])\n\nnyc_round_trip = [int(entry[0]) for entry in citibike if entry[1]==entry[2]]\nnyc_one_way = [int(entry[0]) for entry in citibike if entry[1]!=entry[2]]\n\ndc_round_trip = [parse_time(entry[0]) for entry in capitalbike if entry[1]==entry[2]]\ndc_one_way = [parse_time(entry[0]) for entry in capitalbike if entry[1]!=entry[2]]\n\nmin(nyc_round_trip), min(dc_round_trip)\n\nmax(nyc_round_trip), max(dc_round_trip)",
"Something isn't quite right. Looking at the data, the shortest time for NYC we're looking at is 60 seconds, and a staggering 0 seconds for DC. At a first glance, this is way too short for a \"round trip\". One possible explanation is that someone realized a bike they rented was malfunctioning and returned it immediately to the station. We'll need to filter out these low times before continuing so that our results aren't skewed. For now, let's say a round trip has to be at least five minutes to count. Likewise, for trip durations, both the Capital and Citi websites urge members to seek out a local bike rental shop if they need a bike for more than 24 hours. The longest trip for the NYC data is almost a week long, which will skew our results as well, so let's remove the long outliers too.",
"nyc_round_trip = [trip for trip in nyc_round_trip if trip>=300 and trip<20000]\ndc_round_trip = [trip for trip in dc_round_trip if trip>=300 and trip<20000]\n\nmin(nyc_round_trip), min(dc_round_trip)",
"Let's look at this visually:",
"def plot_freq(trip_data,city,nbins=500):\n\n plt.xlim([0, 15500])\n hist = plt.hist(trip_data,bins=np.arange(0,max(trip_data),len(trip_data)*1./nbins))\n plt.ylim([0,22500])\n plt.title(city + ' Round Trip Frequency')\n plt.xlabel('Trip Duration in Seconds')\n plt.ylabel('Counts')\n\n plt.show()\n\nplot_freq(nyc_round_trip,\"NYC\",500)\n\nplot_freq(dc_round_trip,\"DC\",100)",
"Now that we have a better idea of how round trips work, let's take a look at one way trips. First things first, we need to check our data integrity and remove anything that doesn't make sense.",
"min(nyc_one_way),min(dc_one_way),max(nyc_one_way),max(dc_one_way)",
"Some of these numbers are unbelievable. In some other world, a 27 bike ride <i>might</i> be justifiable, but the max one way trip length for NYC is over 62 days, which is completely unreasonable. Let's filter those out.",
"nyc_one_way = [trip for trip in nyc_one_way if trip>=300 and trip<20000]\ndc_one_way = [trip for trip in dc_one_way if trip>=300 and trip<20000]\n\nmin(nyc_one_way),min(dc_one_way),max(nyc_one_way),max(dc_one_way)\n\ndef plot_freq_one_way(trip_data,city,nbins=500):\n \n plt.xlim([0, 15000])\n hist = plt.hist(trip_data,bins=np.arange(0,max(trip_data),len(trip_data)*1./nbins))\n plt.ylim([0,1600000])\n plt.title(city + ' One Way Trip Frequency')\n plt.xlabel('Trip Duration in Seconds')\n plt.ylabel('Counts')\n\n plt.show()\n\nplot_freq_one_way(nyc_one_way,\"NYC\",45000)\n\nplot_freq_one_way(dc_one_way,\"DC\",3500)",
"Looking at the frequencies for each of the locations, there seems to be a general bin for trip duration that has the highest frequency across all the categories. Aside from that, it is difficult to say that frequency alone, if at all, can indicate whether a one way trip or a round trip occurs.\nPart 3: A Look at Usage Throughout the Day\nAnother idea is to look at how often people use bikes for various purposes throughout the day. One way of looking at this is that because people in the morning may use a bike to go to work, certain bike stations will be more likely to see activity in \"bursts\" at certain points throughout the day, whereas for people who exercise, those bursts may occur at a different time. To visualize this phenomenon, we'll turn to using heatmaps.\nFirst things first, we need to reformat the data in a usable format. This means we need to start by segmenting our data by hours and day of the week.",
"from dateutil import parser\n\n'''\nset param='depart' or param='arrive'\n'''\ndef count_trips(times,param,param2=None):\n trips = OrderedDict()\n for day in range(0,7):\n trips[str(day)] = OrderedDict()\n for hour in range(0,24):\n trips[str(day)][str(hour)] = 0\n\n counter = 0\n for time in times[param]:\n hour = parser.parse(time).strftime(\"%-H\")\n day = parser.parse(time).strftime(\"%w\")\n trips[day][hour] += 1\n counter += 1\n# if (counter % 20000 == 0):\n# print \"Counted {0} trips\".format(counter)\n \n if param2 is not None:\n for time in times[param2]:\n hour = parser.parse(time).strftime(\"%-H\")\n day = parser.parse(time).strftime(\"%w\")\n trips[day][hour] += 1\n counter += 1\n# if (counter % 20000 == 0):\n# print \"Counted {0} trips\".format(counter)\n \n return trips",
"To visualize the heatmap, we plan on using d3js, so we'll output the files to a .tsv format that our other code can read in.",
"def write_tsv(file_name, trips_dict):\n trips_array = list()\n for day in trips_dict:\n for hour in trips_dict[day]:\n trips_array.append({'day':day,'hour':hour,'count':trips_dict[day][hour]})\n\n with open(file_name, 'w') as f:\n dict_writer = csv.DictWriter(f, delimiter='\\t',fieldnames=['day','hour','count'])\n dict_writer.writeheader()\n dict_writer.writerows(trips_array)\n \n\n\ndef get_one_way_dc():\n times = dict()\n times['depart'] = []\n times['arrive'] = []\n\n for filename in glob.glob(\"./capital/*.csv\"): \n with open(filename,'rU') as f:\n reader = csv.DictReader(f)\n for row in reader:\n if row['Start station ID'] == row['End station ID']:\n times['depart'].append(row['Start date'])\n times['arrive'].append(row['End date'])\n return times\n\ndef get_one_way_nyc():\n times = dict()\n times['depart'] = []\n times['arrive'] = []\n\n for filename in glob.glob(\"./citi/*.csv\"): \n with open(filename,'rU') as f:\n reader = csv.DictReader(f)\n for row in reader:\n if row['start station id'] != row['end station id']:\n times['depart'].append(row['starttime'])\n times['arrive'].append(row['stoptime'])\n return times\n\ndef get_round_trip_dc():\n times = dict()\n times['depart'] = []\n times['arrive'] = []\n\n for filename in glob.glob(\"./capital/*.csv\"): \n with open(filename,'rU') as f:\n reader = csv.DictReader(f)\n for row in reader:\n if row['Start station ID'] != row['End station ID']:\n times['depart'].append(row['Start date'])\n times['arrive'].append(row['End date'])\n return times\n\ndef get_round_trip_nyc():\n times = dict()\n times['depart'] = []\n times['arrive'] = []\n\n for filename in glob.glob(\"./citi/*.csv\"): \n with open(filename,'rU') as f:\n reader = csv.DictReader(f)\n for row in reader:\n if row['start station id'] == row['end station id']:\n times['depart'].append(row['starttime'])\n times['arrive'].append(row['stoptime'])\n return times\n\ndc_one_way_data = get_one_way_dc()\ndc_one_way_trips = count_trips(dc_one_way_data, 'arrive', 'depart');\nwrite_tsv('./data/dc_one_way.tsv',dc_one_way_trips)\n\nnyc_one_way_data = get_one_way_nyc()\nnyc_one_way_trips = count_trips(nyc_one_way_data, 'arrive', 'depart');\nwrite_tsv('./data/nyc_one_way.tsv',nyc_one_way_trips)\n\ndc_round_trip_data = get_round_trip_dc()\ndc_round_trips = count_trips(dc_round_trip_data, 'arrive', 'depart');\nwrite_tsv('./data/dc_round_trips.tsv',dc_round_trips)\n\nnyc_round_trip_data = get_round_trip_nyc()\nnyc_round_trips = count_trips(nyc_round_trip_data, 'arrive','depart');\nwrite_tsv('./data/nyc_round_trips.tsv',nyc_round_trips)",
"Thanks to the \"%%javascript\" magic word, we can embed JavaScript right into the cells! First thing we want to do is import d3js into the notebook.",
"%%javascript\nrequire.config({\n paths: {\n d3: \"http://d3js.org/d3.v3.min\"\n }\n});\n\nrequire([\"d3\"], function(d3) {\n console.log(d3.version);\n});",
"The \"%%html\" magic word allows us to style our HTML and SVG that we are about to create. Running this cell after the HTML and SVG is created will change the styling to match any changes made to the cell. It can also be run before running the HTML and SVG to predefine styles.",
"%%html\n<style>\n rect.bordered {\n stroke: #E6E6E6;\n stroke-width:2px; \n }\n text.mono {\n font-size: 9pt;\n font-family: Consolas, courier;\n fill: #aaa;\n }\n text.axis-workweek {\n fill: #000;\n }\n text.axis-worktime {\n fill: #000;\n }\n body {\n font-size: 9pt;\n font-family: Consolas, courier;\n fill: #aaa;\n }\n</style>",
"With our styles in place, we have to define our div elements so that d3 can populate them. The code to do that is located below these images, as once again, running a cell in a notebook can apply changes retroactively. Unforutnately, due to security reasons, iPython does not allow arbitrary execution of JavaScript code unless it's on your own machine, so the code you see below must be run on your own machine to display the graphs inline. Images of the graphs are embedded after the code for viewing, though!",
"%%html\n\nOne-way trip in DC\n<div id=\"dccommuter\"></div>\nRound trip in DC\n<div id=\"dcleisure\"></div>\nOne-way trip in NYC\n<div id=\"nyccommuter\"></div>\nRound trip in NYC\n<div id=\"nycleisure\"></div>\n\n%%javascript\n\n//$(document).ready(function(){\ndraw_heatmap(\"./data/dc_round_trips.tsv\", \"#dccommuter\");\ndraw_heatmap(\"./data/dc_one_way.tsv\", \"#dcleisure\");\ndraw_heatmap(\"./data/nyc_one_way.tsv\", \"#nyccommuter\");\ndraw_heatmap(\"./data/nyc_round_trips.tsv\", \"#nycleisure\");\n//});\nvar margin = { top: 50, right: 0, bottom: 100, left: 30 },\n width = 960 - margin.left - margin.right,\n height = 430 - margin.top - margin.bottom,\n gridSize = Math.floor(width / 24),\n legendElementWidth = gridSize*2,\n buckets = 9,\n colors = [\"#ffffd9\",\"#edf8b1\",\"#c7e9b4\",\"#7fcdbb\",\"#41b6c4\",\"#1d91c0\",\"#225ea8\",\"#253494\",\"#081d58\"], // alternatively colorbrewer.YlGnBu[9]\n // colors = [\"#ffffd9\",\"#edf8b1\",\"#c7e9b4\",\"#7fcdbb\",\"#41b6c4\",\"#1d91c0\",\"#225ea8\",\"#253494\",\"#081d58\"], // alternatively colorbrewer.YlGnBu[9]\n days = [\"Su\", \"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\"],\n times = [\"1a\", \"2a\", \"3a\", \"4a\", \"5a\", \"6a\", \"7a\", \"8a\", \"9a\", \"10a\", \"11a\", \"12a\", \"1p\", \"2p\", \"3p\", \"4p\", \"5p\", \"6p\", \"7p\", \"8p\", \"9p\", \"10p\", \"11p\", \"12p\"];\nfunction draw_heatmap(source, div) {\n d3.tsv(source,\n function(d) {\n return {\n day: +d.day,\n hour: +d.hour,\n value: +d.count\n };\n },\n function(error, data) {\n var maxCount = d3.max(data, function (d) { return d.value; });\n var colorScale = d3.scale.quantile()\n .domain([0, buckets - 1, 100])\n .range(colors);\n var svg = d3.select(div).append(\"svg\")\n .attr(\"width\", width + margin.left + margin.right)\n .attr(\"height\", height + margin.top + margin.bottom)\n .append(\"g\")\n .attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\");\n var dayLabels = svg.selectAll(\".dayLabel\")\n .data(days)\n .enter().append(\"text\")\n .text(function (d) { return d; })\n .attr(\"x\", 0)\n .attr(\"y\", function (d, i) { return i * gridSize; })\n .style(\"text-anchor\", \"end\")\n .attr(\"transform\", \"translate(-6,\" + gridSize / 1.5 + \")\")\n .attr(\"class\", function (d, i) { return ((i >= 1 && i <= 5) ? \"dayLabel mono axis axis-workweek\" : \"dayLabel mono axis\"); });\n var timeLabels = svg.selectAll(\".timeLabel\")\n .data(times)\n .enter().append(\"text\")\n .text(function(d) { return d; })\n .attr(\"x\", function(d, i) { return i * gridSize; })\n .attr(\"y\", 0)\n .style(\"text-anchor\", \"middle\")\n .attr(\"transform\", \"translate(\" + gridSize / 2 + \", -6)\")\n .attr(\"class\", function(d, i) { return ((i >= 7 && i <= 16) ? \"timeLabel mono axis axis-worktime\" : \"timeLabel mono axis\"); });\n var heatMap = svg.selectAll(\".hour\")\n .data(data)\n .enter().append(\"rect\")\n // .attr(\"x\", function(d) { return (d.hour - 1) * gridSize; })\n // .attr(\"y\", function(d) { return (d.day - 1) * gridSize; })\n .attr(\"x\", function(d) { return (d.hour) * gridSize; })\n .attr(\"y\", function(d) { return (d.day) * gridSize; })\n .attr(\"rx\", 4)\n .attr(\"ry\", 4)\n .attr(\"class\", \"hour bordered\")\n .attr(\"width\", gridSize)\n .attr(\"height\", gridSize)\n .style(\"fill\", colors[0]);\n heatMap.transition().duration(1000)\n .style(\"fill\", function(d) { return colorScale(d.value * 100 / maxCount); });\n heatMap.append(\"title\").text(function(d) { return Math.round(d.value * 100 / maxCount) + \" %\"; });\n\n var legend = svg.selectAll(\".legend\")\n .data([0].concat(colorScale.quantiles()), function(d) { return d; })\n .enter().append(\"g\")\n .attr(\"class\", \"legend\");\n legend.append(\"rect\")\n .attr(\"x\", function(d, i) { return legendElementWidth * i; })\n .attr(\"y\", height)\n .attr(\"width\", legendElementWidth)\n .attr(\"height\", gridSize / 2)\n .style(\"fill\", function(d, i) { return colors[i]; });\n legend.append(\"text\")\n .attr(\"class\", \"mono\")\n .text(function(d) { return \"≥ \" + (Math.round(d)) + \" %\"; })\n .attr(\"x\", function(d, i) { return legendElementWidth * i; })\n .attr(\"y\", height + gridSize);\n svg.append(\"text\")\n .text(\"percentage of riders\")\n .attr(\"x\", function(d) { return legendElementWidth * 9; })\n .attr(\"y\", height + gridSize)\n });\n}",
"With the code aside, let's focus on the heatmaps themselves. A couple things stand out. First of all, the darkest regions for the one-way trips are at exactly 9am and from 6-7pm. This happens to overlap with the rush-hour times, indicating that the majority of one-way trips in both DC and NYC are for the purpose of commuting on weekdays. On weekends, most trips occur in the afternoon and early evening. For the round trips, there are no real patterns discernable for the weekdays, although on the weekends, there is a very high frequency of riders, which might indicate that those riders choose to ride bikes for leisurely purposes.\n<img src='https://raw.githubusercontent.com/girikuncoro/bikeshare/master/viz/one_way_dc.png'/>\n<img src='https://raw.githubusercontent.com/girikuncoro/bikeshare/master/viz/round_trip_dc.png'/>\n<img src='https://raw.githubusercontent.com/girikuncoro/bikeshare/master/viz/one_way_nyc.png'/>\n<img src='https://raw.githubusercontent.com/girikuncoro/bikeshare/master/viz/round_trip_nyc.png'/>"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
hannorein/reboundx
|
ipython_examples/GettingStartedParameters.ipynb
|
gpl-3.0
|
[
"Adding Parameters With REBOUNDx\nWe start by creating a simulation, attaching REBOUNDx, and adding the effects of general relativity:",
"import rebound\nimport reboundx\n\nsim = rebound.Simulation()\nsim.add(m=1.)\nsim.add(a=1.)\nps = sim.particles\n\nrebx = reboundx.Extras(sim)\ngr = rebx.load_force('gr')\nrebx.add_force(gr)",
"The documentation page https://reboundx.readthedocs.io/en/latest/effects.html lists the various required and optional parameters that need to be set for each effect in REBOUNDx. Adding these parameters to particles, forces and operators is easy. We do it through the params attribute:",
"ps[1].params['primary'] = 1\ngr.params['c'] = 3.e8",
"We would now sim.integrate as usual. If we want, we can access these values later (e.g., some effects could update these values as the simulation progresses). Here they don't:",
"sim.integrate(10.)\ngr.params['c']",
"Details\nFor simples types (ints and floats), assigning variables to parameters makes a copy of the value. For example:",
"speed = 5\ngr.params['c'] = speed",
"If we now update speed, this will not be reflected in our 'c' parameter:",
"speed = 10\ngr.params['c']",
"More complicated objects are assigned as pointers. For example, adding REBOUNDx structures like forces works out of the box. As a simple example (with no meaning whatsoever):",
"ps[1].params['force'] = gr",
"Now if we update gr, the changes will be reflected in the 'force' parameter:",
"gr.params['c'] = 10\nnewgr = ps[1].params['force']\nnewgr.params['c']",
"If the parameter doesn't exist REBOUNDx will raise an exception, which we can catch and handle however we want",
"try:\n waterfrac = ps[1].params['waterfrac']\nexcept:\n print('No water on this planet')",
"Adding Your Own Parameters\nIn order to go back and forth between Python and C, REBOUNDx keeps a list of registered parameter names with their corresponding types. This list is compiled from all the parameters used by the various forces and operators in REBOUNDx listed here: https://reboundx.readthedocs.io/en/latest/effects.html.\nIf you try to add one that's not on the list, it will complain:",
"try:\n gr.params['q'] = 7\nexcept AttributeError as e:\n print(e)",
"You can register the name permanently on the C side, but can also do it from Python. You must pass a name along with one of the C types:",
"from reboundx.extras import REBX_C_PARAM_TYPES\nREBX_C_PARAM_TYPES",
"For example, say we want a double:",
"rebx.register_param(\"q\", \"REBX_TYPE_DOUBLE\")\ngr.params['q'] = 7\ngr.params['q']",
"Custom Parameters\nYou can also add your own more complicated custom types (for example from another library) straightfowardly, with a couple caveats. First, the object must be wrapped as a ctypes object in order to communicate with the REBOUNDx C library, e.g.",
"from ctypes import *\nclass SPH_sim(Structure):\n _fields_ = [(\"dt\", c_double),\n (\"Nparticles\", c_int)]\n\nmy_sph_sim = SPH_sim()\nmy_sph_sim.dt = 0.1\nmy_sph_sim.Nparticles = 10000",
"We also have to register it as a generic POINTER:",
"rebx.register_param(\"sph\", \"REBX_TYPE_POINTER\")\ngr.params['sph'] = my_sph_sim",
"Now when we get the parameter, REBOUNDx does not know how to cast it. You get a ctypes.c_void_p object back, which you have to manually cast to the Structure class we've created. See the ctypes library documentation for details:",
"mysph = gr.params['sph']\nmysph = cast(mysph, POINTER(SPH_sim)).contents\n\nmysph.dt",
"Caveats for Custom Parameters\nSince REBOUNDx does not know about the custom objects created in this way there are two main caveats.\n\n\nThe user is responsible for ensuring that the memory for their custom objects remains allocated. For example if the custom parameter is instantiated in a function and assigned as a parameter, but then leaves the function and gets garbage collected, this will obviously lead to undefined behavior! By contrast, REBOUNDx will retain memory for simple ints and floats, as well as any objects that it has created (e.g. forces or operators).\n\n\nCustom parameters will not be written or read from REBOUNDx binaries"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
empet/Plotly-plots
|
Plotly-Slice-in-volumetric-data.ipynb
|
gpl-3.0
|
[
"Slice in volumetric data, via Plotly\nA volume included in a parallelepiped is described by the values of a scalar field, $f(x,y,z)$, with $x\\in[a,b]$, $y\\in [c,d]$, $z\\in[e,f]$.\nA slice in this volume is visualized by coloring the surface of the slice, according to the values of the function f, restricted to that surface.\nIn order to plot a planar or a nonlinear slice of equation z=s(x,y) one proceeds as follows:\n\ndefine a meshgrid in x,y;\nevaluate z=s(x,y)\ndefine an instance of the Plotly Surface class, that represents the surface z=s(x,y)\nthis surface is colored according to the values, f(x,y,z), at its points. More precisely, the normalized values of the function f are mapped to a colormap/colorscale.\n\nWith obvious modications we get slices of equation $x=s(y,z), y=s(z,x)$.",
"import numpy as np\nimport plotly.graph_objects as go\nfrom IPython",
"Define a function that returns a slice as a Plotly Surface:",
"def get_the_slice(x,y,z, surfacecolor):\n return go.Surface(x=x,\n y=y,\n z=z,\n surfacecolor=surfacecolor,\n coloraxis='coloraxis')\n\ndef get_lims_colors(surfacecolor):# color limits for a slice\n return np.min(surfacecolor), np.max(surfacecolor)",
"Let us plot the slices z=0 and y=-0.5 in the volume defined by:",
"scalar_f = lambda x,y,z: x*np.exp(-x**2-y**2-z**2)\n\nx = np.linspace(-2,2, 50)\ny = np.linspace(-2,2, 50)\nx, y = np.meshgrid(x,y)\nz = np.zeros(x.shape)\nsurfcolor_z = scalar_f(x,y,z)\nsminz, smaxz = get_lims_colors(surfcolor_z)\n\nslice_z = get_the_slice(x, y, z, surfcolor_z)\n\nx = np.linspace(-2,2, 50)\nz = np.linspace(-2,2, 50)\nx, z = np.meshgrid(x,y)\ny = -0.5 * np.ones(x.shape)\nsurfcolor_y = scalar_f(x,y,z)\nsminy, smaxy = get_lims_colors(surfcolor_y)\nvmin = min([sminz, sminy])\nvmax = max([smaxz, smaxy])\nslice_y = get_the_slice(x, y, z, surfcolor_y)",
"In order to be able to compare the two slices, we choose a unique interval of values to be mapped to the colorscale:",
"def colorax(vmin, vmax):\n return dict(cmin=vmin,\n cmax=vmax)\n\nfig1 = go.Figure(data=[slice_z, slice_y])\nfig1.update_layout(\n title_text='Slices in volumetric data', \n title_x=0.5,\n width=700,\n height=700,\n scene_zaxis_range=[-2,2], \n coloraxis=dict(colorscale='BrBG',\n colorbar_thickness=25,\n colorbar_len=0.75,\n **colorax(vmin, vmax))) \n \n#fig1.show() \n\nfrom IPython.display import IFrame \nIFrame('https://chart-studio.plotly.com/~empet/13862', width=700, height=700)",
"Oblique slice in volumetric data\nAs an example we plot comparatively two slices: a slice through $z=0$ and an oblique planar slice, that is defined by rotating the plane z=0 by $\\alpha=\\pi/4$, about Oy.\nRotating the plane $z=c$ about Oy (from Oz towards Ox) with $\\alpha$ radians we get the plane of equation\n$z=c/\\cos(\\alpha)-x\\tan(\\alpha)$",
"alpha = np.pi/4\nx = np.linspace(-2, 2, 50)\ny = np.linspace(-2, 2, 50)\nx, y = np.meshgrid(x,y)\nz = -x * np.tan(alpha)\n\nsurfcolor_obl = scalar_f(x,y,z)\n\nsmino, smaxo = get_lims_colors(surfcolor_obl)\nvmin = min([sminz, smino])\nvmax = max([smaxz, smaxo])\n\nslice_obl = get_the_slice(x,y,z, surfcolor_obl)\n\n\nfig2 = go.Figure(data=[slice_z, slice_obl], layout=fig1.layout)\nfig2.update_layout( coloraxis=colorax(vmin, vmax))\n\n#fig2.show() \n\nIFrame('https://chart-studio.plotly.com/~empet/13864', width=700, height=700)\n\nfrom IPython.core.display import HTML\ndef css_styling():\n styles = open(\"./custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
hershaw/data-science-101
|
course/class1/pca/iris/PCA - Iris dataset.ipynb
|
mit
|
[
"Principal Component Analysis with Iris Dataset",
"from sklearn import datasets\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom mpl_toolkits.mplot3d import Axes3D\n# %matplotlib inline\n%matplotlib notebook",
"Load Iris dataset\nThe Iris Dataset here.\nThis data sets consists of 3 different types of irises’ (Setosa, Versicolour, and Virginica) petal and sepal length, stored in a 150x4 numpy.ndarray.\nThe rows being the samples and the columns being: Sepal Length, Sepal Width, Petal Length and Petal Width.",
"iris = datasets.load_iris()\nX = pd.DataFrame(iris.data, columns=iris.feature_names)\ny = pd.Series(iris.target, name='FlowerType')\nX.head()\n\nplt.figure(2, figsize=(8, 6))\nplt.clf()\n\n# Plot the training points\nplt.scatter(X['sepal length (cm)'], X['sepal width (cm)'], s=35, c=y, cmap=plt.cm.brg)\nplt.xlabel('Sepal length')\nplt.ylabel('Sepal width')\nplt.title('Sepal length vs. Sepal width')\nplt.show()",
"PCA\nCan we reduce the dimensionality of our dataset withour losing much information? PCA will help us decide.",
"pca_iris = PCA(n_components=3).fit(iris.data)\n\npca_iris.explained_variance_ratio_\n\npca_iris.transform(iris.data)",
"The P.C. #0 explained variance is one order of magnitude higher than P.C. #1 and #2, and two orders of magnitude higher than P.C. #3. We can us use this knowledge to reduce our dataset from 4D to 3D.\nWe could have done everything in one line by setting the number of components we want (3), fitting the PCA and transforming it to 3D:",
"iris_reduced = PCA(n_components=3).fit(iris.data)\n\niris_reduced.components_\n\niris_reduced = PCA(n_components=3).fit_transform(iris.data)\n\nfig = plt.figure(1, figsize=(8, 6))\nax = Axes3D(fig, elev=-150, azim=110)\nX_reduced = PCA(n_components=3).fit_transform(iris.data)\nax.scatter(iris_reduced[:, 0], iris_reduced[:, 1], iris_reduced[:, 2],\n cmap=plt.cm.Paired, c=iris.target)\nfor k in range(3):\n ax.scatter(iris_reduced[y==k, 0], iris_reduced[y==k, 1], iris_reduced[y==k, 2], label=iris.target_names[k])\nax.set_title(\"First three P.C.\")\nax.set_xlabel(\"P.C. 1\")\nax.w_xaxis.set_ticklabels([])\nax.set_ylabel(\"P.C. 2\")\nax.w_yaxis.set_ticklabels([])\nax.set_zlabel(\"P.C. 3\")\nax.w_zaxis.set_ticklabels([])\nplt.legend(numpoints=1)\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kscottz/PythonFromSpace
|
OpenStreetMapsExample.ipynb
|
bsd-3-clause
|
[
"Let's start with our crazy stock list of imports and setup our environment",
"# See requirements.txt to set up your dev environment.\nimport os\nimport sys\nimport utm\nimport json\nimport scipy\nimport overpy\nimport urllib\nimport datetime \nimport urllib3\nimport rasterio\nimport subprocess\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom osgeo import gdal\nfrom planet import api\nfrom planet.api import filters\nfrom traitlets import link\nimport rasterio.tools.mask as rio_mask\nfrom shapely.geometry import mapping, shape\nfrom IPython.display import display, Image, HTML\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n#from scipy import ndimage\nimport warnings\nfrom osgeo import gdal\n\n\nfrom osmapi import OsmApi\nfrom geopy.geocoders import Nominatim\n\nurllib3.disable_warnings()\nfrom ipyleaflet import (\n Map,\n Marker,\n TileLayer, ImageOverlay,\n Polyline, Polygon, Rectangle, Circle, CircleMarker,\n GeoJSON,\n DrawControl\n)\n\n%matplotlib inline\n# will pick up api_key via environment variable PL_API_KEY\n# but can be specified using `api_key` named argument\napi_keys = json.load(open(\"apikeys.json\",'r'))\nclient = api.ClientV1(api_key=api_keys[\"PLANET_API_KEY\"])\ngdal.UseExceptions()\napi = overpy.Overpass()",
"Let's bring up our slippy map once again.",
"# Basemap Mosaic (v1 API)\nmosaicsSeries = 'global_quarterly_2017q1_mosaic'\n# Planet tile server base URL (Planet Explorer Mosaics Tiles)\nmosaicsTilesURL_base = 'https://tiles0.planet.com/experimental/mosaics/planet-tiles/' + mosaicsSeries + '/gmap/{z}/{x}/{y}.png'\n# Planet tile server url\nmosaicsTilesURL = mosaicsTilesURL_base + '?api_key=' + api_keys[\"PLANET_API_KEY\"]\n# Map Settings \n# Define colors\ncolors = {'blue': \"#009da5\"}\n# Define initial map center lat/long\ncenter = [45.5231, -122.6765]\n# Define initial map zoom level\nzoom = 13\n# Set Map Tiles URL\nplanetMapTiles = TileLayer(url= mosaicsTilesURL)\n# Create the map\nm = Map(\n center=center, \n zoom=zoom,\n default_tiles = planetMapTiles # Uncomment to use Planet.com basemap\n)\n# Define the draw tool type options\npolygon = {'shapeOptions': {'color': colors['blue']}}\nrectangle = {'shapeOptions': {'color': colors['blue']}} \n\n# Create the draw controls\n# @see https://github.com/ellisonbg/ipyleaflet/blob/master/ipyleaflet/leaflet.py#L293\ndc = DrawControl(\n polygon = polygon,\n rectangle = rectangle\n)\n# Initialize an action counter variable\nactionCount = 0\nAOIs = {}\n\n# Register the draw controls handler\ndef handle_draw(self, action, geo_json):\n # Increment the action counter\n global actionCount\n actionCount += 1\n # Remove the `style` property from the GeoJSON\n geo_json['properties'] = {}\n # Convert geo_json output to a string and prettify (indent & replace ' with \")\n geojsonStr = json.dumps(geo_json, indent=2).replace(\"'\", '\"')\n AOIs[actionCount] = json.loads(geojsonStr)\n \n# Attach the draw handler to the draw controls `on_draw` event\ndc.on_draw(handle_draw)\nm.add_control(dc)\nm",
"Let's review from last time.\n\nWe'll query the Planet API and get a list of scenes.\nWe'll then use pandas and shapely to clean up and filter the results\nWe'll then render the footprints of the good scenes over our AOI",
"print AOIs[1]\nmyAOI = AOIs[1][\"geometry\"]\n\n# build a query using the AOI and\n# a cloud_cover filter that excludes 'cloud free' scenes\n\nold = datetime.datetime(year=2017,month=1,day=1)\n\nquery = filters.and_filter(\n filters.geom_filter(myAOI),\n filters.range_filter('cloud_cover', lt=5),\n filters.date_range('acquired', gt=old)\n)\n\n# build a request for only PlanetScope imagery\nrequest = filters.build_search_request(\n query, item_types=['PSScene3Band']\n)\n\n# if you don't have an API key configured, this will raise an exception\nresult = client.quick_search(request)\nscenes = []\nplanet_map = {}\nfor item in result.items_iter(limit=500):\n planet_map[item['id']]=item\n props = item['properties']\n props[\"id\"] = item['id']\n props[\"geometry\"] = item[\"geometry\"]\n props[\"thumbnail\"] = item[\"_links\"][\"thumbnail\"]\n scenes.append(props)\nscenes = pd.DataFrame(data=scenes)\n# now let's clean up the datetime stuff\n# make a shapely shape from our aoi\nportland = shape(myAOI)\nfootprints = []\noverlaps = []\n# go through the geometry from our api call, convert to a shape and calculate overlap area.\n# also save the shape for safe keeping\nfor footprint in scenes[\"geometry\"].tolist():\n s = shape(footprint)\n footprints.append(s)\n overlap = 100.0*(portland.intersection(s).area / portland.area)\n overlaps.append(overlap)\n# take our lists and add them back to our dataframe\nscenes['overlap'] = pd.Series(overlaps, index=scenes.index)\nscenes['footprint'] = pd.Series(footprints, index=scenes.index)\n# now make sure pandas knows about our date/time columns.\nscenes[\"acquired\"] = pd.to_datetime(scenes[\"acquired\"])\nscenes[\"published\"] = pd.to_datetime(scenes[\"published\"])\nscenes[\"updated\"] = pd.to_datetime(scenes[\"updated\"])\n\nscenes = scenes[scenes['overlap']>0.9]\n\n\nprint len(scenes)\n# now let's clean up the datetime stuff\n# make a shapely shape from our aoi\nportland = shape(myAOI)\nfootprints = []\noverlaps = []\n# go through the geometry from our api call, convert to a shape and calculate overlap area.\n# also save the shape for safe keeping\nfor footprint in scenes[\"geometry\"].tolist():\n s = shape(footprint)\n footprints.append(s)\n overlap = 100.0*(portland.intersection(s).area / portland.area)\n overlaps.append(overlap)\n# take our lists and add them back to our dataframe\nscenes['overlap'] = pd.Series(overlaps, index=scenes.index)\nscenes['footprint'] = pd.Series(footprints, index=scenes.index)\n# now make sure pandas knows about our date/time columns.\nscenes[\"acquired\"] = pd.to_datetime(scenes[\"acquired\"])\nscenes[\"published\"] = pd.to_datetime(scenes[\"published\"])\nscenes[\"updated\"] = pd.to_datetime(scenes[\"updated\"])\n\n# first create a list of colors\ncolors = [\"#ff0000\",\"#00ff00\",\"#0000ff\",\"#ffff00\",\"#ff00ff\",\"#00ffff\"]\n# grab our scenes from the geometry/footprint geojson\nfootprints = scenes[\"geometry\"].tolist()\n# for each footprint/color combo\n\nfor footprint,color in zip(footprints,colors):\n # create the leaflet object\n feat = {'geometry':footprint,\"properties\":{\n 'style':{'color': color,'fillColor': color,'fillOpacity': 0.1,'weight': 1}},\n 'type':u\"Feature\"}\n # convert to geojson\n gjson = GeoJSON(data=feat)\n # add it our map\n m.add_layer(gjson)\n# now we will draw our original AOI on top \nfeat = {'geometry':myAOI,\"properties\":{\n 'style':{'color': \"#FFFFFF\",'fillColor': \"#FFFFFF\",'fillOpacity': 0.1,'weight': 2}},\n 'type':u\"Feature\"}\ngjson = GeoJSON(data=feat)\nm.add_layer(gjson) \nm ",
"Now we'll add in our boiler plate activation code for reference.",
"def get_products(client, scene_id, asset_type='PSScene3Band'): \n \"\"\"\n Ask the client to return the available products for a \n given scene and asset type. Returns a list of product \n strings\n \"\"\"\n out = client.get_assets_by_id(asset_type,scene_id)\n temp = out.get()\n return temp.keys()\n\ndef activate_product(client, scene_id, asset_type=\"PSScene3Band\",product=\"analytic\"):\n \"\"\"\n Activate a product given a scene, an asset type, and a product.\n \n On success return the return value of the API call and an activation object\n \"\"\"\n temp = client.get_assets_by_id(asset_type,scene_id) \n products = temp.get()\n if( product in products.keys() ):\n return client.activate(products[product]),products[product]\n else:\n return None \n\ndef download_and_save(client,product):\n \"\"\"\n Given a client and a product activation object download the asset. \n This will save the tiff file in the local directory and return its \n file name. \n \"\"\"\n out = client.download(product)\n fp = out.get_body()\n fp.write()\n return fp.name\n\ndef scenes_are_active(scene_list):\n \"\"\"\n Check if all of the resources in a given list of\n scene activation objects is read for downloading.\n \"\"\"\n retVal = True\n for scene in scene_list:\n if scene[\"status\"] != \"active\":\n print \"{} is not ready.\".format(scene)\n return False\n return True",
"Now we'll activate our scenes",
"to_get = scenes[\"id\"][0:7].tolist()\nactivated = []\n# for each scene to get\nfor scene in to_get:\n # get the product \n product_types = get_products(client,scene)\n for p in product_types:\n # if there is a visual product\n if p == \"visual\": # p == \"basic_analytic_dn\"\n print \"Activating {0} for scene {1}\".format(p,scene)\n # activate the product\n _,product = activate_product(client,scene,product=p)\n activated.append(product)",
"And then download them.",
"tiff_files = []\nasset_type = \"_3B_Visual\"\n# check if our scenes have been activated\nif True:#scenes_are_active(activated):\n for to_download,name in zip(activated,to_get):\n # create the product name\n name = name + asset_type + \".tif\"\n # if the product exists locally\n if( os.path.isfile(name) ):\n # do nothing \n print \"We have scene {0} already, skipping...\".format(name)\n tiff_files.append(name)\n elif to_download[\"status\"] == \"active\":\n # otherwise download the product\n print \"Downloading {0}....\".format(name)\n fname = download_and_save(client,to_download)\n tiff_files.append(fname)\n print \"Download done.\"\n else:\n print \"Could not download, still activating\"\nelse:\n print \"Scenes aren't ready yet\"\n\nsorted(tiff_files)\nprint tiff_files ",
"Let's get going with Open Street Maps.\n\nOpen Street Maps is a huge and open collection of data about the Earth. \nOSM is free to query. The interfaces are powerful, but hella cryptic. \nLet's say we had a pixel in an image and we wanted to know what in the world was at that pixel. \nWe can use the Open Street Maps Nominatim function to look up what is there, like Google maps.\nWe can also use the OSM interface to find the 'nodes' near our pixel. \nOSM Nominatim works through Lat Long values. To get these lat long values we are going to through UTM coordinates.\nTo get correct the UTM values we'll need to ask GDAL what our UDM zone is.",
"infile = tiff_files[0]\n# Open the file\ngtif = gdal.Open(infile)\n# Get the project reference object this knows the UTM zone\nreff = gtif.GetProjectionRef()\n# arr is the actual image data.\narr = gtif.ReadAsArray()\n# Trans is our geo transfrom array. \ntrans = gtif.GetGeoTransform()\n# print the ref object\nprint reff\n# find our UTM zone\ni = reff.find(\"UTM\")\nprint reff[i:i+12]",
"Now we are going to write a function to convert pixels to UTM\n\nAlso a quick function to plot a point",
"def pixel2utm(ds, x, y):\n \"\"\"\n Returns utm coordinates from pixel x, y coords\n \"\"\"\n xoff, a, b, yoff, d, e = ds.GetGeoTransform()\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return(xp, yp)\n\ndef draw_point(x,y,img):\n t = 20\n # a cloud_cover filter that ex\n img[y-t:y+t,x-t:x+t,:] = [255,0,0]",
"Now let's query a point on our scene and see what OSM tells us.\n\nFirst we'll define a pixel position\nWe'll use GDAL to open the scene and then map a pixel to UTM\nWe'll then convert the UTM value to Lat / Lon using the UTM region we found before. \nThen we'll instantiate a Nominatim object and perform a revers lookup and print the results. \nWe'll then use the OSM Api to get node at this place.",
"pos = [3000,1400] # this is the pixel we want info abou\nds = gdal.Open(infile)\n# take the GDAL info and make it into UTM\nmy_utm = pixel2utm(ds,pos[0],pos[1])\n# convert UTM into Lat Long\n# need to figure out how to get zone info\nmy_lla = utm.to_latlon(my_utm[0],my_utm[1],10,\"N\")\n# do the lat long look up from OSM\ngeolocator = Nominatim()\n# reverse look up the are based on lat lon\nlocation = geolocator.reverse(\"{0},{1}\".format(my_lla[0],my_lla[1]))\n# print location info\nprint location.address\nprint location.raw\n# get the OSM ID info\nosm_id = int(location.raw[\"place_id\"])\nprint osm_id\n# create an interface to the OSM API\nMyApi = OsmApi()\n# Look up our position \nprint MyApi.NodeGet(osm_id)\n",
"Now for completeness we'll plot our scene and add the annotation about the spot we found.",
"from matplotlib.patches import Circle\nfig,ax = plt.subplots(1)\n\n# create our plot\nplt.imshow(arr[:3,:,:].transpose((1, 2, 0)))#, extent=extent)\nfig = plt.gcf()\n# add our annotation\nplt.annotate(location.address, xy=pos, xycoords='data',\n xytext=(0.25, 0.5), textcoords='figure fraction',color=\"red\",\n arrowprops=dict(arrowstyle=\"->\"))\nax.set_aspect('equal')\n# Set a point\ncirc = Circle((pos[0],pos[1]),60,color=\"red\")\nax.add_patch(circ)\nfig.set_size_inches(18.5, 10.5)\nplt.show()",
"Now, well, OSM is hard.\n\nThis is I wanted to show where to programaticaly query OSM for all sorts of data.\nTurns out that it is a lot harder than it should be, especially if you want to work with GeoJson.\nOut of scope for this talk, but let's punt.\nOSM has a feature called Overpass. It is like the most convoluted Google maps ever using a very complex query language that I still don't grok. \nWe're going to use it to get all of the parks in Portland as GeoJSON using the web interface called Overpass Turbo.\nLet's take a look at that.\nHere's the query to run. Then export as GeoJSON\n[bbox:{{bbox}}][timeout:1800];\nway[\"leisure\"=\"park\"];map_to_area->.a;\nforeach(\n (._;>;);\n is_in;\n way(pivot)[\"leisure\"=\"park\"];\n out geom;\n);\n\nLet's load up our park data\n\nLoad the file using GeoPandas (some syntactic sugar on Pandas). \nAlso load the raw json, and chunk out each park.\nUpdate the area value because there is no value, not really useful except as a proxy measurement.\nUpdate and sort our data frame.",
"import geopandas as gpd\nfname = \"./portland_parks_small.geojson\"\npark_df = gpd.read_file(fname)\nportland_parks = json.load(open(fname,'r'))\n# raw geojson works better with GDAL\ngeojson = [p for p in portland_parks[\"features\"]]\n# no area out of the box\np = [p.area for p in park_df[\"geometry\"].tolist()]\npark_df[\"area\"] = pd.Series(p)\npark_df[\"geojson\"] = pd.Series(geojson)\npark_df.sort_values(['area',], ascending=[1])\npark_df.head()\n#len(park_df)\n#print park_df[\"wikipedia\"].dropna()",
"Now we'll update our slippy map.\n\nJust toss the aois in, just like our scene footprints.",
"for p in portland_parks[\"features\"]:\n feat = {'geometry':p[\"geometry\"],\"properties\":{\n 'style':{'color': \"#00FF00\",'fillColor': \"#00FF00\",'fillOpacity': 0.0,'weight': 1}},\n 'type':u\"Feature\"}\n # convert to geojson\n gjson = GeoJSON(data=feat)\n # add it our map\n m.add_layer(gjson)\nm",
"Now let's find the big parks.\n\nThe pandas dataframe can have multiple enteries per park.\nWe can use the group by command to sum up these disparate areas. \nFinally we'll output the results",
"park_sz = park_df.groupby(\"name\").sum() \npark_sz = park_sz.sort_values(by='area',ascending=[0])\ndisplay(park_sz)",
"Now to the meat of the problem.\n\nOur goal is to get each park as a small image so we can analyze it.\nWe'll write a function to create a geojson file from our big geojson file\nWe'll also write a function that takes in our scene list, an input geojson file, and calls gdal warp to generate our small park image.",
"def write_geojson_by_name(df,name,outfile):\n \"\"\"\n Take in a dataframe, a park name, and an output file name \n Save the park's geojson to the specified file.\n \"\"\"\n temp = df[df[\"name\"]==name]\n to_write = {\"type\": \"FeatureCollection\",\n \"features\": temp[\"geojson\"].tolist()}\n with open(outfile,'w') as fp:\n fp.write(json.dumps(to_write))\ndef crop_scenes_to_geojson(geojson,scenes,out_name):\n \"\"\"\n Take in a geojson file, a list of scenes, and an output name\n Call gdal and warp the scenes to match the geojson file and save the results to outname.\n \"\"\"\n commands = [\"gdalwarp\", # t\n \"-t_srs\",\"EPSG:3857\",\n \"-cutline\",geojson,\n \"-crop_to_cutline\",\n \"-tap\",\n \"-tr\", \"3\", \"3\"\n \"-overwrite\"]\n for tiff in scenes:\n commands.append(tiff)\n commands.append(out_name)\n print \" \".join(commands)\n subprocess.call(commands)",
"Let's put it all together\n\nWe're going to use the scenes we downloaded earlier as our input and build a little image for every park in Portland!\nWe just have to make a few file names and call the functions above.\nIf we really wanted to get fancy we could do this for every image that has our park and make a sick movie or lots of different types of a analysis.",
"geo_json_files = []\ntif_file_names = []\nunique_park_names = list(set(park_df[\"name\"].tolist()))\nfor name in list(unique_park_names):\n # Generate our file names \n geojson_name = \"./parks/\"+name.replace(\" \",\"_\")+\".geojson\"\n tif_name = \"./parks/\"+name.replace(\" \",\"_\")+\".tif\"\n # write geojson\n write_geojson_by_name(park_df,name,geojson_name)\n # write to park file\n crop_scenes_to_geojson(geojson_name,tiff_files,tif_name)\n # Save the results to lists\n geo_json_files.append(geojson_name)\n tif_file_names.append(tif_name)\n",
"Let's take a look at the first few parks!\n\nmatplotlib and tifs can be a bit heavy handed. \nimma teach you a protip use image magick and the built in image display.\nUse subprocess to tell imagemagick to convert tifs to jpg.\nThen load and display the images. \nWARNING: do not use imagemagick to modify geotiffs!",
"magic = [\"mogrify\",\"-format\", \"jpg\", \"./parks/*.tif\"]\nsubprocess.call(magic)\nfor p in tif_file_names[0:20]:\n print p\n display(Image(p.replace('tif','jpg')))\n",
"Not let's do some quick analytics -- your code goes here.\n\nFor completeness let's do some basic image processing.\nFor each of parks we are going to calculate the average \"greeness\" per pixel over the other two channels.\nWe do this as it controls for white pixels, like clouds.\nSince there are black pixels we'll have to controll for that by only using only the non-black pixels. \nWe'll use numpy here, but scikit image and OpenCV have many more features. \nIt is also worth noting that the visual product is probably only useful for calculating areas. If you want to do real science use the Analytics products.\nThe real way to do this is to calculate a Normalized Difference Vegetation Index (NDVI) using the analytic product.\nHere is an example of NDVI calculations.",
"def load_image3(filename):\n \"\"\"Return a 3D (r, g, b) numpy array with the data in the specified TIFF filename.\"\"\"\n path = os.path.abspath(os.path.join('./', filename))\n if os.path.exists(path):\n with rasterio.open(path) as src:\n b,g,r,mask = src.read()\n return np.dstack([b, g, r])\n \n\ndef get_avg_greeness(filename):\n retVal = -1.0\n try:\n # load the image\n img = load_image3(filename)\n if img is not None:\n # add all the channels together, black pixels will still be zero\n # this isn't a perfect method but there are very few truly black spots \n # on eart\n black_like_my_soul = np.add(np.add(img[:,:,0],img[:,:,1]),img[:,:,2])\n # sum up the not black pixels\n not_black = np.count_nonzero(black_like_my_soul)\n # sum up all the green\n img = np.array(img,dtype='int16')\n total_green = np.sum(img[:,:,1]-((np.add(img[:,:,0],img[:,:,2])/2)))\n # calculate our metric\n if total_green != 0 and not_black > 0:\n retVal = total_green / float(not_black)\n return retVal\n except Exception as e:\n print e\n return -1.0\n\ngreens = [get_avg_greeness(f) for f in tif_file_names]\nprint greens\n\npaired = zip(tif_file_names,greens)\npaired.sort(key=(lambda tup: tup[1]))\npaired.reverse()\nlabels = [p[0][8:-4].replace(\"_\",\" \") for p in paired]\ndata = [p[1] for p in paired]\nplt.figure(figsize=(20,6))\nxlocations = np.array(range(len(paired)))+0.5\nwidth = 1\nplt.bar(xlocations, data, width=width)\nplt.yticks(range(-1,25,1))\nplt.xticks(xlocations+ width/2, labels)\nplt.xlim(0, xlocations[-1]+width*2)\nplt.ylim(-2,np.max(data)+1)\nplt.title(\"Greeness over Average Red and Blue Per Park\")\nplt.gca().get_xaxis().tick_bottom()\nplt.gca().get_yaxis().tick_left()\nxa = plt.gca()\nxa.set_xticklabels(xa.xaxis.get_majorticklabels(), rotation=90)\nplt.show()\n",
"Let's take a look at what this looks like in terms of images.",
"imgs = [p[0] for p in paired]\nfor p in imgs[0:35]:\n print p[8:-4].replace(\"_\",\" \")\n display(Image(p.replace('tif','jpg')))",
"Now let's plot over our slippy map.\n\nWe'll calculate a non-linear opacity per park and then use that for plotting.",
"opacity_map = {}\ngmax = np.max(greens)\ngmin = np.min(greens)\n# this is a nonlinear mapping\nopacity = [np.clip((float(g**2)-gmin)/float(gmax-gmin),0,1) for g in greens]\nfor op,name in zip(opacity,imgs):\n opacity_map[name]=op\n\nm = Map(\n center=center, \n zoom=zoom,\n default_tiles = planetMapTiles # Uncomment to use Planet.com basemap\n)\ndc = DrawControl(\n polygon = polygon,\n rectangle = rectangle\n)\n# Initialize an action counter variable\nactionCount = 0\nAOIs = {}\n\n# Register the draw controls handler\ndef handle_draw(self, action, geo_json):\n # Increment the action counter\n global actionCount\n actionCount += 1\n # Remove the `style` property from the GeoJSON\n geo_json['properties'] = {}\n # Convert geo_json output to a string and prettify (indent & replace ' with \")\n geojsonStr = json.dumps(geo_json, indent=2).replace(\"'\", '\"')\n AOIs[actionCount] = json.loads(geojsonStr)\n \n# Attach the draw handler to the draw controls `on_draw` event\ndc.on_draw(handle_draw)\nm.add_control(dc)\nm\n\nfor p in portland_parks[\"features\"]:\n t = \"./parks/\"+p[\"properties\"][\"name\"].replace(\" \",\"_\") + \".tif\"\n feat = {'geometry':p[\"geometry\"],\"properties\":{\n 'style':{'color': \"#00FF00\",'fillColor': \"#00FF00\",'fillOpacity': opacity_map[t],'weight': 1}},\n 'type':u\"Feature\"}\n # convert to geojson\n gjson = GeoJSON(data=feat)\n # add it our map\n m.add_layer(gjson)\nm"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.