repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | cells
list | types
list |
|---|---|---|---|---|
AllenDowney/ProbablyOverthinkingIt
|
inspection2.ipynb
|
mit
|
[
"The Inspection Paradox is Everywhere\nAllen Downey 2019\nMIT License",
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom empiricaldist import Pmf\n\nfrom utils import decorate\n\n# set the random seed so we get the same results every time\nnp.random.seed(17)\n\n# make the directory for the figures\nimport os\n\nif not os.path.exists('inspection'):\n !mkdir inspection",
"Class size\nHere's the data summarizing the distribution of undergraduate class sizes at Purdue University in 2013-14.",
"# Class size data originally from\n# https://www.purdue.edu/datadigest/2013-14/InstrStuLIfe/DistUGClasses.html\n# now available from\n# https://web.archive.org/web/20160415011613/https://www.purdue.edu/datadigest/2013-14/InstrStuLIfe/DistUGClasses.html\n\nsizes = [(1, 1), \n (2, 9),\n (10, 19),\n (20, 29),\n (30, 39),\n (40, 49),\n (50, 99),\n (100, 300)]\n \ncounts = [138, 635, 1788, 1979, 796, 354, 487, 333]",
"I generate a sample from this distribution, assuming a uniform distribution in each range and an upper bound of 300.",
"def generate_sample(sizes, counts):\n \"\"\"Generate a sample from a distribution.\n \n sizes: sequence of (low, high) pairs\n counts: sequence of integers\n \n returns: NumPy array\n \"\"\"\n t = []\n for (low, high), count in zip(sizes, counts):\n print(count, low, high)\n sample = np.random.randint(low, high+1, count)\n t.extend(sample)\n return np.array(t)",
"The \"unbiased\" sample is as seen by the college, with each class equally likely to be in the sample.",
"unbiased = generate_sample(sizes, counts)",
"To generate a biased sample, we use the values themselves as weights and resample with replacement.",
"def resample_weighted(sample, weights):\n \"\"\"Resample values from `sample` with the given weights. \n \n sample: NumPy array\n weights: NumPy array\n \n returns: NumPy array\n \"\"\"\n n = len(sample)\n p = weights / np.sum(weights)\n return np.random.choice(sample, n, p=p)\n\nbiased = resample_weighted(unbiased, unbiased)",
"To plot the distribution, I use KDE to estimate the density function, then evaluate it over the given sequence of xs.",
"from scipy.stats import gaussian_kde\n\ndef kdeplot(sample, xs, label=None, **options):\n \"\"\"Use KDE to plot the density function.\n \n sample: NumPy array\n xs: NumPy array\n label: string\n \"\"\"\n density = gaussian_kde(sample, **options).evaluate(xs)\n plt.plot(xs, density, label=label)\n decorate(ylabel='Relative likelihood')",
"The following plot shows the distribution of class size as seen by the Dean, and as seen by a sample of students.",
"xs = np.arange(1, 300)\nkdeplot(unbiased, xs, 'Reported by the Dean')\nkdeplot(biased, xs, 'Reported by students')\n\ndecorate(xlabel='Class size',\n title='Distribution of class sizes')\n\nplt.savefig('inspection/class_size.png', dpi=150)",
"Here are the means of the unbiased and biased distributions.",
"np.mean(unbiased)\n\nnp.mean(biased)\n\nfrom empiricaldist import Cdf\n\ndef cdfplot(sample, xs, label=None, **options):\n \"\"\"Plot the CDF of the sample.\n \n sample: NumPy array\n xs: NumPy array (ignored)\n label: string\n \"\"\"\n cdf = Cdf.from_seq(sample, **options)\n cdf.plot(label=label)\n decorate(ylabel='CDF')\n\nxs = np.arange(1, 300)\ncdfplot(unbiased, xs, 'Reported by the Dean')\ncdfplot(biased, xs, 'Reported by students')\n\ndecorate(xlabel='Class size',\n title='Distribution of class sizes')\n\nplt.savefig('inspection/class_size.png', dpi=150)",
"Red Line\nHere are times between trains in seconds.",
"unbiased = [\n 428.0, 705.0, 407.0, 465.0, 433.0, 425.0, 204.0, 506.0, 143.0, 351.0, \n 450.0, 598.0, 464.0, 749.0, 341.0, 586.0, 754.0, 256.0, 378.0, 435.0, \n 176.0, 405.0, 360.0, 519.0, 648.0, 374.0, 483.0, 537.0, 578.0, 534.0, \n 577.0, 619.0, 538.0, 331.0, 186.0, 629.0, 193.0, 360.0, 660.0, 484.0, \n 512.0, 315.0, 457.0, 404.0, 740.0, 388.0, 357.0, 485.0, 567.0, 160.0, \n 428.0, 387.0, 901.0, 187.0, 622.0, 616.0, 585.0, 474.0, 442.0, 499.0, \n 437.0, 620.0, 351.0, 286.0, 373.0, 232.0, 393.0, 745.0, 636.0, 758.0,\n]",
"Here's the same data in minutes.",
"unbiased = np.array(unbiased) / 60",
"We can use the same function to generate a biased sample.",
"biased = resample_weighted(unbiased, unbiased)",
"And plot the results.",
"xs = np.linspace(1, 16.5, 101)\nkdeplot(unbiased, xs, 'Seen by MBTA')\nkdeplot(biased, xs, 'Seen by passengers')\n\ndecorate(xlabel='Time between trains (min)',\n title='Distribution of time between trains')\n\nplt.savefig('inspection/red_line.png', dpi=150)\n\nxs = np.linspace(1, 16.5, 101)\ncdfplot(unbiased, xs, 'Seen by MBTA')\ncdfplot(biased, xs, 'Seen by passengers')\n\ndecorate(xlabel='Time between trains (min)',\n title='Distribution of time between trains')\n\nplt.savefig('inspection/red_line.png', dpi=150)",
"Here are the means of the distributions and the percentage difference.",
"np.mean(biased), np.mean(unbiased)\n\n(np.mean(biased) - np.mean(unbiased)) / np.mean(unbiased) * 100",
"Social network\nThe following function reads the Facebook data.",
"import networkx as nx\n\ndef read_graph(filename):\n \"\"\"Read a graph from a file.\n \n filename: string\n \n return: nx.Graph\n \"\"\"\n G = nx.Graph()\n array = np.loadtxt(filename, dtype=int)\n G.add_edges_from(array)\n return G\n\n# https://snap.stanford.edu/data/facebook_combined.txt.gz\n\nfb = read_graph('facebook_combined.txt.gz')\nn = len(fb)\nm = len(fb.edges())\nn, m",
"The unbiased sample is the number of friends for each user.",
"unbiased = [fb.degree(node) for node in fb]\nlen(unbiased)\n\nnp.max(unbiased)",
"We can use the same function to generate a biased sample.",
"biased = resample_weighted(unbiased, unbiased)",
"And generate the plot.",
"xs = np.linspace(0, 300, 101)\nkdeplot(unbiased, xs, 'Random sample of people')\nkdeplot(biased, xs, 'Random sample of friends')\n\ndecorate(xlabel='Number of friends in social network',\n title='Distribution of social network size')\n\nplt.savefig('inspection/social.png', dpi=150)\n\nxs = np.linspace(0, 300, 101)\ncdfplot(unbiased, xs, 'Random sample of people')\ncdfplot(biased, xs, 'Random sample of friends')\n\ndecorate(xlabel='Number of friends in social network',\n title='Distribution of social network size',\n xlim=[-10, 310])\n\nplt.savefig('inspection/social.png', dpi=150)",
"Here are the means of the distributions.",
"np.mean(biased), np.mean(unbiased)",
"And the probability that the friend of a user has more friends than the user.",
"np.mean(biased > unbiased)",
"Relay race\nThe following function read the data from the 2010 James Joyce Ramble 10K, where I ran my personal record time.",
"import relay\nresults = relay.ReadResults()\nunbiased = relay.GetSpeeds(results)",
"In this case, the weights are related to the difference between each element of the sample and the hypothetical speed of the observer.",
"weights = np.abs(np.array(unbiased) - 7)\nbiased = resample_weighted(unbiased, weights)",
"And here's the plot.",
"xs = np.linspace(3, 11, 101)\nkdeplot(unbiased, xs, 'Seen by spectator')\nkdeplot(biased, xs, 'Seen by runner at 7 mph', bw_method=0.2)\n\ndecorate(xlabel='Running speed (mph)',\n title='Distribution of running speed')\n\nplt.savefig('inspection/relay.png', dpi=150)\n\nxs = np.linspace(3, 11, 101)\ncdfplot(unbiased, xs, 'Seen by spectator')\ncdfplot(biased, xs, 'Seen by runner at 7 mph')\n\ndecorate(xlabel='Running speed (mph)',\n title='Distribution of running speed')\n\nplt.savefig('inspection/relay.png', dpi=150)",
"Prison sentences\nFirst we read the data from the Bureau of Prisons web page.",
"tables = pd.read_html('BOP Statistics_ Sentences Imposed.html')\ndf = tables[0]\ndf",
"Here are the low and I sentences for each range. I assume that the minimum sentence is about a week, that sentences \"less than life\" are 40 years, and that a life sentence is between 40 and 60 years.",
"sentences = [(0.02, 1),\n (1, 3),\n (3, 5),\n (5, 10),\n (10, 15),\n (15, 20),\n (20, 40),\n (40, 60)]",
"We can get the counts from the table.",
"counts = df['# of Inmates']",
"Here's a different version of generate_sample for a continuous quantity.",
"def generate_sample(sizes, counts):\n \"\"\"Generate a sample from a distribution.\n \n sizes: sequence of (low, high) pairs\n counts: sequence of integers\n \n returns: NumPy array\n \"\"\"\n t = []\n for (low, high), count in zip(sizes, counts):\n print(count, low, high)\n sample = np.random.uniform(low, high, count)\n t.extend(sample)\n return np.array(t)",
"In this case, the data are biased.",
"biased = generate_sample(sentences, counts)",
"So we have to unbias them with weights inversely proportional to the values.\nPrisoners in federal prison typically serve 85% of their nominal sentence. We can take that into account in the weights.",
"weights = 1 / (0.85 * np.array(biased))",
"Here's the unbiased sample.",
"unbiased = resample_weighted(biased, weights)",
"And the plotted distributions.",
"xs = np.linspace(0, 60, 101)\nkdeplot(unbiased, xs, 'Seen by judge', bw_method=0.5)\nkdeplot(biased, xs, 'Seen by prison visitor', bw_method=0.5)\n\ndecorate(xlabel='Prison sentence (years)',\n title='Distribution of federal prison sentences')\n\nplt.savefig('inspection/orange.png', dpi=150)\n\nxs = np.linspace(0, 60, 101)\ncdfplot(unbiased, xs, 'Seen by judge')\ncdfplot(biased, xs, 'Seen by prison visitor')\n\ndecorate(xlabel='Prison sentence (years)',\n title='Distribution of federal prison sentences')\n\nplt.savefig('inspection/orange.png', dpi=150)",
"We can also compute the distribution of sentences as seen by someone at the prison for 13 months.",
"x = 0.85 * unbiased\ny = 13 / 12\n\nweights = x + y",
"Here's the sample.",
"kerman = resample_weighted(unbiased, weights)",
"And here's what it looks like.",
"xs = np.linspace(0, 60, 101)\nkdeplot(unbiased, xs, 'Seen by judge', bw_method=0.5)\nkdeplot(kerman, xs, 'Seen by Kerman', bw_method=0.5)\nkdeplot(biased, xs, 'Seen by visitor', bw_method=0.5)\n\ndecorate(xlabel='Prison sentence (years)',\n title='Distribution of federal prison sentences')\n\nplt.savefig('inspection/orange.png', dpi=150)\n\nxs = np.linspace(0, 60, 101)\ncdfplot(unbiased, xs, 'Seen by judge')\ncdfplot(kerman, xs, 'Seen by Kerman')\ncdfplot(biased, xs, 'Seen by visitor')\n\ndecorate(xlabel='Prison sentence (years)',\n title='Distribution of federal prison sentences')\n\nplt.savefig('inspection/orange.png', dpi=150)",
"In the unbiased distribution, almost half of prisoners serve less than one year.",
"np.mean(unbiased<1)",
"But if we sample the prison population, barely 3% are short timers.",
"np.mean(biased<1)",
"Here are the means of the distributions.",
"np.mean(unbiased)\n\nnp.mean(biased)\n\nnp.mean(kerman)",
"The dartboard problem",
"from matplotlib.patches import Circle\n\ndef draw_dartboard():\n ax = plt.gca()\n\n c1 = Circle((0, 0), 170, color='C3', alpha=0.3)\n c2 = Circle((0, 0), 160, color='white')\n c3 = Circle((0, 0), 107, color='C3', alpha=0.3)\n c4 = Circle((0, 0), 97, color='white')\n c5 = Circle((0, 0), 16, color='C3', alpha=0.3)\n c6 = Circle((0, 0), 6, color='white')\n for circle in [c1, c2, c3, c4, c5, c6]:\n ax.add_patch(circle)\n plt.axis('equal')\n \n\ndraw_dartboard()\nplt.text(0, 10, '25 ring')\nplt.text(0, 110, 'triple ring')\nplt.text(0, 170, 'double ring')\n\nplt.savefig('inspection/darts0.png', dpi=150)\n\nsigma = 50\nn = 100\nerror_x = np.random.normal(0, sigma, size=(n))\nerror_y = np.random.normal(0, sigma, size=(n))\n\ndraw_dartboard()\nplt.plot(error_x, error_y, '.')\nplt.savefig('inspection/darts1.png', dpi=150)\n\nsigma = 50\nn = 10000\nerror_x = np.random.normal(0, sigma, size=(n))\nerror_y = np.random.normal(0, sigma, size=(n))\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as pl\n\nax = sns.kdeplot(error_x, error_y, shade=True, cmap=\"PuBu\")\nax.collections[0].set_alpha(0)\nplt.axis([-240, 240, -175, 175])\n\ndecorate(xlabel='x distance from center (mm)',\n ylabel='y distance from center (mm)',\n title='Estimated density')\n\n\nplt.savefig('inspection/darts2.png', dpi=150)\n\nrs = np.hypot(error_x, error_y)\n\nnp.random.seed(18)\nsigma = 50\nn = 10000\nerror_x = np.random.normal(0, sigma, size=(n))\nerror_y = np.random.normal(0, sigma, size=(n))\n\nxs = np.linspace(-200, 200, 101)\n\n#ys = np.exp(-(xs/sigma)**2/2)\n#pmf = Pmf(ys, index=xs)\n#pmf.normalize()\n#pmf.plot(color='gray')\n\nunbiased = error_x\nbiased = resample_weighted(unbiased, np.abs(unbiased))\n\nkdeplot(unbiased, xs, 'Density at a point')\nkdeplot(biased, xs, 'Total density in a ring')\n#kdeplot(rs, xs, 'Total density in a ring')\n\ndecorate(xlabel='Distance from center (mm)',\n ylabel='Density',\n xlim=[0, 210])\n\nplt.savefig('inspection/darts3.png', dpi=150)\n\nxs = np.linspace(0, 200, 101)\nunbiased = np.abs(error_x)\nbiased = resample_weighted(unbiased, unbiased)\n\ncdfplot(unbiased, xs, 'Density at a point')\ncdfplot(biased, xs, 'Total density in a ring')\n\ndecorate(xlabel='Distance from center (mm)',\n ylabel='Density')\n\nplt.savefig('inspection/darts4.png', dpi=150)\n\ntriple = (biased > 97) & (biased < 107)\ntriple.mean() * 100\n\nring50 = (biased > 6) & (biased < 16)\nring50.mean() * 100\n\ndouble = (biased > 160) & (biased < 170)\ndouble.mean() * 100\n\nbull = (biased < 6)\nbull.mean() * 100"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bmcmenamin/fa_kit
|
examples/Tutorial_Episode2.ipynb
|
mit
|
[
"Tutorial Episode 2: Ignoring noise\nIn this notebook, I show you how to use some experimental features that help you ignore the contributions of noise.",
"import os\nimport sys\nsys.path.append(os.path.pardir)\n\n%matplotlib inline\nimport numpy as np\n\nfrom fa_kit import FactorAnalysis\nfrom fa_kit import plotting as fa_plotting",
"Synthesizing fake data\nMaking fake data and putting it into a DataFrame, df_data, just like Episode 1.",
"import pandas as pd\nimport string\n\ndef make_random_data(n_samp=10000, n_feat=100):\n \"\"\"\n make some random data with correlated features\n \"\"\"\n data = np.random.randn(n_samp, n_feat)\n \n signal_width = 10\n signal_overlap = 2\n step_size = signal_width - signal_overlap\n for i in range(0, data.shape[1], step_size):\n shared_signal = 0.3*np.random.randn(n_samp, 1)\n data[:, i:(i+signal_width)] += shared_signal\n return data\n\ndata = make_random_data()\n\ncolumn_names = [\n \"{}{}\".format(i, string.lowercase[i % len(string.lowercase)])\n for i in range(data.shape[1])]\n\ndf_data = pd.DataFrame(\n data,\n columns=column_names\n )\n",
"Setting up a factor analysis pipeline\nThe function run_pipeline will take a set of data and run through each of the steps in a factor analysis.\nExcept now, the extra argument is whether you pass in a second matrix that describes the covariance structure of noise in your dataset. If you leave is as None, you'll just do a normal analysis by extracting the eigenvectors from a correlation/covariance matrix. But if you pass in a second n_features-by-n_features noise matrix, we'll switch over to an solving the Generalized Eigenvalue Problem.\nIn the normal eigendecomposition, the components you extract are constructed to maximize the total amount of variation they can explain in the data. In the Generalized Eigenvalue Problem, they are constructed to maximize the ratio of (variation explained in A) / (variation explained in B). This can be useful if B is a 'noise' matrix that explains patterns you'd like to ignore (e.g., within-group variability, or measurements of intrumentation noise).\nSo in this example, we show how you can steer your solution away from 'noise' by passing in a noise covariance matrix.\nWarning: this feature is still under development. it might get a little weird.\nAlso Note: If you try to find the correct number of components to keep using the broken stick distribution, it won't work right. I've decided to clamp the retention at the vales corresponding to the natual dropoffs in the scree plots rather than cherry-pick a noise regime where everything just-so-happens to look good with my broken-stick.",
"def run_pipeline(data, noise_cov=None, num_to_keep=12, **kwargs):\n\n fa = FactorAnalysis.load_data_samples(\n data,\n preproc_demean=True,\n preproc_scale=True,\n **kwargs\n )\n\n if noise_cov is not None:\n fa.add_noise_cov(noise_cov)\n \n fa.extract_components()\n\n fa.find_comps_to_retain(\n 'top_n',\n num_keep=num_to_keep,\n **kwargs\n )\n\n fa.reextract_using_paf()\n\n # Apply factor rotation\n # Right now there are both 'varimax' and 'quartimax'\n fa.rotate_components(\n method='varimax'\n )\n\n return fa",
"Demo: Normal, not steering away from noise\nYou've seen this in the previous two episodes. It's just here to make comparisons easier",
"fa_1 = run_pipeline(df_data, num_to_keep=13)\nfig = fa_plotting.graph_summary(fa_1)",
"Demo: Steering components away from explaining noise.\nWe make a sample noise matrix, noise_cov that assumes every dimension has independent noise, and the first 50 features has low-noise and the second 50 features have high noise, so we just put those low/high noise values along the diagonal.\nThen we pass that noise matrix into the analysis, and we see that the solution is steered away (quite dramatically) from explaining variability in the high-noise components.",
"noise = np.std(data, axis=0)\nfor i in range(len(noise)):\n if i < 50:\n noise[i] = 0.01\n else:\n noise[i] = 0.02\nnoise_cov = np.diag(noise)\n\nfa_2 = run_pipeline(df_data, noise_cov=noise_cov, num_to_keep=7)\nfig = fa_plotting.graph_summary(fa_2)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jtwhite79/pyemu
|
examples/pstfrom_mf6.ipynb
|
bsd-3-clause
|
[
"Setting up a PEST interface from MODFLOW6 using the PstFrom class\nThe PstFrom class is a generalization of the prototype PstFromFlopy class. The generalization in PstFrom means users need to explicitly define what files are to be parameterized and what files contain model outputs to treat as observations. Two primary types of files are supported: arrays and lists. Array files contain a data type (usually floating points) while list files will have a few columns that contain index information and then columns of floating point values.",
"import os\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pyemu\nimport flopy",
"An existing MODFLOW6 model is in the directory freyberg_mf6. Lets check it out:",
"org_model_ws = os.path.join('freyberg_mf6')\nos.listdir(org_model_ws)",
"You can see that all the input array and list data for this model have been written \"externally\" - this is key to using the PstFrom class. \nLet's quickly viz the model top just to remind us of what we are dealing with:",
"id_arr = np.loadtxt(os.path.join(org_model_ws,\"freyberg6.dis_idomain_layer3.txt\"))\ntop_arr = np.loadtxt(os.path.join(org_model_ws,\"freyberg6.dis_top.txt\"))\ntop_arr[id_arr==0] = np.nan\nplt.imshow(top_arr)",
"Now let's copy those files to a temporary location just to make sure we don't goof up those original files:",
"tmp_model_ws = \"temp_pst_from\"\nif os.path.exists(tmp_model_ws):\n shutil.rmtree(tmp_model_ws)\nshutil.copytree(org_model_ws,tmp_model_ws)\nos.listdir(tmp_model_ws)",
"Now we need just a tiny bit of info about the spatial discretization of the model - this is needed to work out separation distances between parameters for build a geostatistical prior covariance matrix later.\nHere we will load the flopy sim and model instance just to help us define some quantities later - flopy is not required to use the PstFrom class.",
"sim = flopy.mf6.MFSimulation.load(sim_ws=tmp_model_ws)\nm = sim.get_model(\"freyberg6\")\n",
"Here we use the simple SpatialReference pyemu implements to help us spatially locate parameters",
"sr = pyemu.helpers.SpatialReference.from_namfile(\n os.path.join(tmp_model_ws, \"freyberg6.nam\"),\n delr=m.dis.delr.array, delc=m.dis.delc.array)\nsr",
"Now we can instantiate a PstFrom class instance",
"template_ws = \"freyberg6_template\"\npf = pyemu.utils.PstFrom(original_d=tmp_model_ws, new_d=template_ws,\n remove_existing=True,\n longnames=True, spatial_reference=sr,\n zero_based=False,start_datetime=\"1-1-2018\")",
"Observations\nSo now that we have a PstFrom instance, but its just an empty container at this point, so we need to add some PEST interface \"observations\" and \"parameters\". Let's start with observations using MODFLOW6 head. These are stored in heads.csv:",
"df = pd.read_csv(os.path.join(tmp_model_ws,\"heads.csv\"),index_col=0)\ndf",
"The main entry point for adding observations is (surprise) PstFrom.add_observations(). This method works on the list-type observation output file. We need to tell it what column is the index column (can be string if there is a header or int if no header) and then what columns contain quantities we want to monitor (e.g. \"observe\") in the control file - in this case we want to monitor all columns except the index column:",
"hds_df = pf.add_observations(\"heads.csv\",insfile=\"heads.csv.ins\",index_cols=\"time\",\n use_cols=list(df.columns.values),prefix=\"hds\",)\nhds_df",
"We can see that it returned a dataframe with lots of useful info: the observation names that were formed (obsnme), the values that were read from heads.csv (obsval) and also some generic weights and group names. At this point, no control file has been created, we have simply prepared to add this observations to the control file later.",
"[f for f in os.listdir(template_ws) if f.endswith(\".ins\")]",
"Nice! We also have a PEST-style instruction file for those obs.\nNow lets do the same for SFR observations:",
"df = pd.read_csv(os.path.join(tmp_model_ws, \"sfr.csv\"), index_col=0)\nsfr_df = pf.add_observations(\"sfr.csv\", insfile=\"sfr.csv.ins\", index_cols=\"time\", use_cols=list(df.columns.values))\nsfr_df",
"Sweet as! Now that we have some observations, let's add parameters!\nParameters\nIn the PstFrom realm, all parameters are setup as multipliers against existing array and list files. This is a good thing because it lets us preserve the existing model inputs and treat them as the mean of the prior parameter distribution. It also let's us use mixtures of spatial and temporal scales in the parameters to account for varying scale of uncertainty. \nSince we are all sophisticated and recognize the importance of expressing spatial and temporal uncertainty (e.g. heterogeneity) in the model inputs (and the corresponding spatial correlation in those uncertain inputs), let's use geostatistics to express uncertainty. To do that we need to define \"geostatistical structures\". As we will see, defining parameter correlation is optional and only matters for the prior parameter covariance matrix and prior parameter ensemble:",
"v = pyemu.geostats.ExpVario(contribution=1.0,a=1000)\ngrid_gs = pyemu.geostats.GeoStruct(variograms=v, transform='log')\ntemporal_gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(contribution=1.0,a=60))\n\ngrid_gs.plot()\nprint(\"spatial variogram\")\n\ntemporal_gs.plot()\n\"temporal variogram (x axis in days)\"",
"Now let's get the idomain array to use as a zone array - this keeps us from setting up parameters in inactive model cells:",
"ib = m.dis.idomain[0].array",
"First, let's setup parameters for static properties - HK, VK, SS, SY. Do that, we need to find all the external array files that contain these static arrays. Let's do just HK slowly so as to explain what is happening:",
"hk_arr_files = [f for f in os.listdir(tmp_model_ws) if \"npf_k_\" in f and f.endswith(\".txt\")]\nhk_arr_files",
"So those are the existing model input arrays for HK. Notice we found the files in the temporary model workspace - PstFrom will copy all those files to the new model workspace for us in a bit...\nLet's setup grid-scale multiplier parameter for HK in layer 1:",
"pf.add_parameters(filenames=\"freyberg6.npf_k_layer1.txt\",par_type=\"grid\",\n par_name_base=\"hk_layer_1\",pargp=\"hk_layer_1\",zone_array=ib,\n upper_bound=10.,lower_bound=0.1,ult_ubound=100,ult_lbound=0.01)\n",
"What just happened there? Well, we told our PstFrom instance to setup a set of grid-scale multiplier parameters (par_type=\"grid\") for the array file \"freyberg6.npf_k_layer1.txt\". We told it to prefix the parameter names with \"hk_layer_1\" and also to make the parameter group \"hk_layer_1\" (pargp=\"hk_layer_1\"). When specified two sets of bound information: upper_bound and lower_bound are the standard control file bounds, while ult_ubound and ult_lbound are bounds that are applied at runtime to the resulting (multiplied out) model input array - since we are using multipliers (and potentially, sets of multipliers - stay tuned), it is important to make sure we keep the resulting model input arrays within the range of realistic values.\nIf you inspect the contents of the working directory, we will see a new template file:",
"[f for f in os.listdir(template_ws) if f.endswith(\".tpl\")]\n\nwith open(os.path.join(template_ws,\"hk_layer_1_inst0_grid.csv.tpl\"),'r') as f:\n for _ in range(2):\n print(f.readline().strip())\n ",
"So those might look like pretty redic parameter names, but they contain heaps of metadata to help you post process things later...\nPilot points in PstFrom\nYou can add pilot points in two ways PstFrom can generate them for you on a regular grid or you can supply PstFrom with existing pilot point location information. First lets looks at the regular simple stuf - when you change par_type to \"pilotpoints\", by default, a regular grid of pilot points is setup using a default pp_space value of 10, which is every 10th row and column. We can override this default like:",
"pf.add_parameters(filenames=\"freyberg6.npf_k_layer3.txt\",par_type=\"pilotpoints\",\n par_name_base=\"hk_layer_1\",pargp=\"hk_layer_1\",zone_array=ib,\n upper_bound=10.,lower_bound=0.1,ult_ubound=100,ult_lbound=0.01,\n pp_space=5)",
"Now lets look at how to supply existing pilot locations - to do this, we simply change the pp_space arg to a filename or a dataframe. The dataframe must have \"name\", \"x\", and \"y\" as columns - it can have more, but must have those. If you supply pp_space as an str it is assumed to be a filename the extension is the guide: \".csv\" for dataframe, \".shp\" for shapefile (point-type) and everything else is assumed to be a pilot points file type. For example, here it is with a shapefile - first we will just make up some random pilot point locations and write those to a shapefile:",
"xmn = m.modelgrid.xvertices.min()\nxmx = m.modelgrid.xvertices.max()\nymn = m.modelgrid.yvertices.min()\nymx = m.modelgrid.yvertices.max()\n\nnumpp = 20\nxvals = np.random.uniform(xmn,xmx,numpp)\nyvals = np.random.uniform(ymn, ymx, numpp)\npp_locs = pd.DataFrame({\"x\":xvals,\"y\":yvals})\npp_locs.loc[:,\"zone\"] = 1\npp_locs.loc[:,\"name\"] = [\"pp_{0}\".format(i) for i in range(numpp)]\npp_locs.loc[:,\"parval1\"] = 1.0\n\npyemu.pp_utils.write_pp_shapfile(pp_locs,os.path.join(template_ws,\"pp_locs.shp\"))",
"Normally, you would probably put more thought in to pilot point locations, or maybe not! Now we call add_parameters and just pass the shapefile name for pp_space:",
"pf.add_parameters(filenames=\"freyberg6.npf_k_layer2.txt\",par_type=\"pilotpoints\",\n par_name_base=\"hk_layer_1\",pargp=\"hk_layer_1\",zone_array=ib,\n upper_bound=10.,lower_bound=0.1,ult_ubound=100,ult_lbound=0.01,\n pp_space=\"pp_locs.shp\")",
"Extra pre- and post-processing functions\nYou will also certainly need to include some additional processing steps. These are supported thru the PstFrom.pre_py cmds and PstFrom.post_py_cmds, which are lists for pre and post model run python commands and PstFrom.pre_sys_cmds and PstFrom.post_sys_cmds, which are lists for pre and post model run system commands (these are wrapped in pyemu.os_utils.run(). But what if your additional steps are actually an entire python function? Well, we got that too! PstFrom.add_py_function(). For example, let's say you have a post processing function called process_model_outputs() in a python source file called helpers.py:",
"_ = [print(line.rstrip()) for line in open(\"helpers.py\",'r').readlines()]",
"We see that the file helpers.py contains two functions (could be more..). We want to call process_model_outputs() each time pest(++) runs the model as a post processing function. This function will yield some quantities that we want to record with an instruction. So, first, we can call the function write_ins_file() in helpers.py to build the instruction file for the special processed outputs that process_model_outputs() will produce (in this trivial example, process_model_outputs() just generates random numbers...). Note that the instruction file needs to be in the template_ws directory since it is a pest interface file.\nLets make sure our new instruction file exists...",
"assert os.path.exists(\"special_outputs.dat.ins\")\nspecial_ins_filename = os.path.join(template_ws,\"special_outputs.dat.ins\")\nshutil.copy2(\"special_outputs.dat.ins\",special_ins_filename)",
"First, we can add the function process_model_outputs() to the forward run script like this:",
"pf.add_py_function(\"helpers.py\",\"process_model_outputs()\",is_pre_cmd=False)",
"This will copy the function process_model_outputs() from helpers.py into the forward run script that PstFrom will write. But we still need to add the instruction file into the mix - lets do that!",
"out_file = special_ins_filename.replace(\".ins\",\"\")\npf.add_observations_from_ins(ins_file=special_ins_filename,out_file=out_file,pst_path=\".\")",
"that pst_path argument tells PstFrom that the instruction file will be in the directory where pest(++) is running\nbuild the control file, pest interface files, and forward run script\nAt this point, we have some parameters and some observations, so we can create a control file:",
"pst = pf.build_pst()",
"Oh snap! we did it! thanks for playing...\nWell, there is a little more to the story. Like how do we run this thing? Lucky for you, PstFrom writes a forward run script for you! Say Wat?!",
"[f for f in os.listdir(template_ws) if f.endswith(\".py\")]\n\n_ = [print(line.rstrip()) for line in open(os.path.join(template_ws,\"forward_run.py\"))]",
"Not bad! We have everything we need, including our special post processing function...except we didnt set a command to run the model! Doh! \nLet's add that:",
"# only execute this block once!\npf.mod_sys_cmds.append(\"mf6\")\npst = pf.build_pst()\n\n\n_ = [print(line.rstrip()) for line in open(os.path.join(template_ws,\"forward_run.py\"))]",
"That's better! See the pyemu.os_utils.run(r'mf6') line in main()? \nWe also see that we now have a function called process_model_outputs() added to the forward run script and the function is being called after the model run call.\nGenerating geostatistical prior covariance matrices and ensembles\nSo that's nice, but how do we include spatial correlation in these parameters? It simple: just pass the geostruct arg to PstFrom.add_parameters()",
"pf.add_parameters(filenames=\"freyberg6.npf_k_layer3.txt\",par_type=\"grid\",\n par_name_base=\"hk_layer_3\",pargp=\"hk_layer_3\",zone_array=ib,\n upper_bound=10.,lower_bound=0.1,ult_ubound=100,ult_lbound=0.01,\n geostruct=grid_gs)\n",
"let's also check out the super awesome prior parameter covariance matrix and prior parameter ensemble helpers in PstFrom:",
"pst = pf.build_pst()\ncov = pf.build_prior()\nx = cov.x.copy()\nx[x<0.00001] = np.NaN\nplt.imshow(x)",
"Da-um! that's sweet ez! We can see the first block of HK parameters in the upper left as \"uncorrelated\" (diagonal only) entries, then the second block of HK parameters (lower right) that are spatially correlated.\nList file parameterization\nLet's add parameters for well extraction rates (always uncertain, rarely estimated!)",
"wel_files = [f for f in os.listdir(tmp_model_ws) if \"wel_stress_period\" in f and f.endswith(\".txt\")]\nwel_files\n\npd.read_csv(os.path.join(tmp_model_ws,wel_files[0]),header=None)",
"There are several ways to approach wel file parameterization. One way is to add a constant multiplier parameter for each stress period (that is, one scaling parameter that is applied all active wells for each stress period). Let's see how that looks, but first one important point: If you use the same parameter group name (pargp) and same geostruct, the PstFrom will treat parameters setup across different calls to add_parameters() as correlated. In this case, we want to express temporal correlation in the well multiplier pars, so we use the same parameter group names, specify the datetime and geostruct args.",
"# build up a container of stress period start datetimes - this will\n# be used to specify the datetime of each multipler parameter\ndts = pd.to_datetime(pf.start_datetime) + pd.to_timedelta(np.cumsum(sim.tdis.perioddata.array[\"perlen\"]),unit='d')\n\nfor wel_file in wel_files:\n # get the stress period number from the file name\n kper = int(wel_file.split('.')[1].split('_')[-1]) - 1 \n pf.add_parameters(filenames=wel_file,par_type=\"constant\",par_name_base=\"wel_cn\",\n pargp=\"wel_cn\", upper_bound = 1.5, lower_bound=0.5,\n datetime=dts[kper],geostruct=temporal_gs)\n\npst = pf.build_pst()\ncov = pf.build_prior(fmt=\"none\") # skip saving to a file...\nx = cov.x.copy()\nx[x==0] = np.NaN\nplt.imshow(x)",
"See the little offset in the lower right? there are a few parameters there in a small block:",
"plt.imshow(x[-25:,-25:])",
"Those are our constant-in-space but correlated in time wel rate parameters - snap!\nTo compliment those stress period level constant multipliers, lets add a set of multipliers, one for each pumping well, that is broadcast across all stress periods (and let's add spatial correlation for these):",
"pf.add_parameters(filenames=wel_files,par_type=\"grid\",par_name_base=\"wel_gr\",\n pargp=\"wel_gr\", upper_bound = 1.5, lower_bound=0.5,\n geostruct=grid_gs)\n\npst = pf.build_pst()\ncov = pf.build_prior(fmt=\"none\")\nx = cov.x.copy()\nx[x==0] = np.NaN\nplt.imshow(x[-49:,-49:])",
"The upper left block is the constant-in-space but correlated-in-time wel rate multiplier parameters, while the lower right block is the constant-in-time but correlated-in-space wel rate multiplier parameters. Boom!\nAfter building the control file\nAt this point, we can do some additional modifications that would typically be done that are problem specific. Note that any modifications made after calling PstFrom.build_pst() will only exist in memory - you need to call pf.pst.write() to record these changes to the control file on disk. Also note that if you call PstFrom.build_pst() after making some changes, these changes will be lost. \nAdditional parameters in existing template files\nIn many cases, you will have additional odd-ball parameters that arent in list or array file format that you want to include in the pest control. To demonstrate how this works, lets make up a template file:",
"tpl_filename = os.path.join(template_ws,\"special_pars.dat.tpl\")\nwith open(tpl_filename,'w') as f:\n f.write(\"ptf ~\\n\")\n f.write(\"special_par1 ~ special_par1 ~\\n\")\n f.write(\"special_par2 ~ special_par2 ~\\n\")\n\npf.pst.add_parameters(tpl_filename,pst_path=\".\")",
"Tying parameters\nLet's say you want to tie some parameters in the control file. This happens through the Pst.parameter_data dataframe. Here let's tie the first parameter in the control file to the second:",
"par = pf.pst.parameter_data\npar.loc[pf.pst.par_names[0],\"partrans\"] = \"tied\"\npar.loc[pf.pst.par_names[0],\"partied\"] = pf.pst.par_names[1]",
"Manipulating parameter bounds\nWhile you can pass parameter bound information to PstFrom.add_parameters(), in many cases, you may want to change the bounds for individual parameters before build the prior parameter covariance matrix and/or generating the prior parameter ensemble. This can be done through the PstFrom.pst.parameter_data dataframe:",
"par.loc[pf.pst.par_names[5:10],\"parlbnd\"]\n\npar.loc[pf.pst.par_names[5:10],\"parlbnd\"] = 0.25\npar.loc[pf.pst.par_names[5:10],\"parlbnd\"]",
"Setting observation values and weights\nSo far, we have automated the setup for pest(++). But one critical task remains and there is not an easy way to automate it: setting the actual observed values and weights in the * observation data information. PstFrom and Pst will both try to read existing model output files that correspond to instruction files and put those simulated values into the * observation data section for the observed values (the obsval quantity). However, if you have actual observation data and you want to use pest(++) to try to match these data, then you need to get these values into the * observation data section and you will probably also need to adjust the weight quantities as well. You can do this operation with pandas or you can save the control file in \"version 2\" format, which will write the * observation data section (along with the sections) as a CSV file, which can be imported into any number of spreadsheet programs. \nGenerating a prior parameter ensemble\nThis is crazy easy - using the previous defined correlation structures, we can draw from the block diagonal covariance matrix (and use spectral simulation for the grid-scale parameters):",
"pe = pf.draw(num_reals=100,use_specsim=True)\n\npe.to_csv(os.path.join(template_ws,\"prior.csv\"))\n\nprint(pe.loc[:,pst.adj_par_names[0]])\npe.loc[:,pst.adj_par_names[0]]._df.hist()",
"Industrial strength control file setup\nThis functionality mimics the demonstration the PstFrom manuscript",
"# load the mf6 model with flopy to get the spatial reference\nsim = flopy.mf6.MFSimulation.load(sim_ws=tmp_model_ws)\nm = sim.get_model(\"freyberg6\")\n\n# work out the spatial rediscretization factor\nredis_fac = m.dis.nrow.data / 40\n\n# where the pest interface will be constructed\ntemplate_ws = tmp_model_ws.split('_')[1] + \"_template\"\n\n\n# instantiate PstFrom object\npf = pyemu.utils.PstFrom(original_d=tmp_model_ws, new_d=template_ws,\n remove_existing=True,\n longnames=True, spatial_reference=m.modelgrid,\n zero_based=False,start_datetime=\"1-1-2018\")\n\n# add observations from the sfr observation output file\ndf = pd.read_csv(os.path.join(tmp_model_ws, \"sfr.csv\"), index_col=0)\npf.add_observations(\"sfr.csv\", insfile=\"sfr.csv.ins\", index_cols=\"time\", \n use_cols=list(df.columns.values),\n prefix=\"sfr\")\n\n# add observations for the heads observation output file\ndf = pd.read_csv(os.path.join(tmp_model_ws, \"heads.csv\"), index_col=0)\npf.add_observations(\"heads.csv\", insfile=\"heads.csv.ins\", \n index_cols=\"time\", use_cols=list(df.columns.values),\n prefix=\"hds\")\n\n# the geostruct object for grid-scale parameters\ngrid_v = pyemu.geostats.ExpVario(contribution=1.0,a=500)\ngrid_gs = pyemu.geostats.GeoStruct(variograms=grid_v)\n\n# the geostruct object for pilot-point-scale parameters\npp_v = pyemu.geostats.ExpVario(contribution=1.0, a=2000)\npp_gs = pyemu.geostats.GeoStruct(variograms=pp_v)\n\n# the geostruct for recharge grid-scale parameters\nrch_v = pyemu.geostats.ExpVario(contribution=1.0, a=1000)\nrch_gs = pyemu.geostats.GeoStruct(variograms=rch_v)\n\n# the geostruct for temporal correlation\ntemporal_v = pyemu.geostats.ExpVario(contribution=1.0,a=60)\ntemporal_gs = pyemu.geostats.GeoStruct(variograms=temporal_v)\n\n# import flopy as part of the forward run process\npf.extra_py_imports.append('flopy')\n\n# use the idomain array for masking parameter locations\nib = m.dis.idomain[0].array\n\n# define a dict that contains file name tags and lower/upper bound information\ntags = {\"npf_k_\":[0.1,10.],\"npf_k33_\":[.1,10],\"sto_ss\":[.1,10],\n\t\t\"sto_sy\":[.9,1.1],\"rch_recharge\":[.5,1.5]}\ndts = pd.to_datetime(\"1-1-2018\") + \\\n pd.to_timedelta(np.cumsum(sim.tdis.perioddata.array[\"perlen\"]),unit=\"d\")\n\n# loop over each tag, bound info pair\nfor tag,bnd in tags.items():\n lb,ub = bnd[0],bnd[1]\n # find all array based files that have the tag in the name\n arr_files = [f for f in os.listdir(template_ws) if tag in f \n\t\t\t\t and f.endswith(\".txt\")]\n\n if len(arr_files) == 0:\n print(\"warning: no array files found for \",tag)\n continue\n \n # make sure each array file in nrow X ncol dimensions (not wrapped, sigh)\n for arr_file in arr_files:\n arr = np.loadtxt(os.path.join(template_ws,arr_file)).reshape(ib.shape)\n np.savetxt(os.path.join(template_ws,arr_file),arr,fmt=\"%15.6E\")\n \n # if this is the recharge tag\n if \"rch\" in tag:\n # add one set of grid-scale parameters for all files\n pf.add_parameters(filenames=arr_files, par_type=\"grid\", \n \t\t\t\t par_name_base=\"rch_gr\",pargp=\"rch_gr\", \n \t\t\t\t zone_array=ib, upper_bound=ub, \n \t\t\t\t lower_bound=lb,geostruct=rch_gs)\n\n # add one constant parameter for each array, and \n # assign it a datetime so we can work out the \n # temporal correlation\n for arr_file in arr_files:\n kper = int(arr_file.split('.')[1].split('_')[-1]) - 1\n pf.add_parameters(filenames=arr_file,par_type=\"constant\",\n \t\t\t par_name_base=arr_file.split('.')[1]+\"_cn\",\n pargp=\"rch_const\",zone_array=ib,upper_bound=ub,\n lower_bound=lb,geostruct=temporal_gs,\n datetime=dts[kper])\n # otherwise...\n else:\n # for each array add both grid-scale and pilot-point scale parameters\n for arr_file in arr_files:\n pf.add_parameters(filenames=arr_file,par_type=\"grid\",\n par_name_base=arr_file.split('.')[1]+\"_gr\",\n pargp=arr_file.split('.')[1]+\"_gr\",zone_array=ib,\n upper_bound=ub,lower_bound=lb,\n geostruct=grid_gs)\n pf.add_parameters(filenames=arr_file, par_type=\"pilotpoints\", \n par_name_base=arr_file.split('.')[1]+\"_pp\",\n pargp=arr_file.split('.')[1]+\"_pp\", \n zone_array=ib,upper_bound=ub,lower_bound=lb,\n pp_space=int(5 * redis_fac),geostruct=pp_gs)\n\n\n# get all the list-type files associated with the wel package\nlist_files = [f for f in os.listdir(tmp_model_ws) if \n\t\t\t \"freyberg6.wel_stress_period_data_\" \n in f and f.endswith(\".txt\")]\n# for each wel-package list-type file \nfor list_file in list_files:\n kper = int(list_file.split(\".\")[1].split('_')[-1]) - 1\n # add spatially constant, but temporally correlated parameter\n pf.add_parameters(filenames=list_file,par_type=\"constant\",\n \t\t\t\t par_name_base=\"twel_mlt_{0}\".format(kper),\n pargp=\"twel_mlt\".format(kper),index_cols=[0,1,2],\n use_cols=[3],upper_bound=1.5,lower_bound=0.5, \n datetime=dts[kper], geostruct=temporal_gs)\n\n # add temporally indep, but spatially correlated grid-scale \n # parameters, one per well\n pf.add_parameters(filenames=list_file, par_type=\"grid\", \n par_name_base=\"wel_grid_{0}\".format(kper),\n pargp=\"wel_{0}\".format(kper), index_cols=[0, 1, 2], \n use_cols=[3],upper_bound=1.5, lower_bound=0.5)\n\n# add grid-scale parameters for SFR reach conductance. \n# Use layer, row, col and reach number in the \n# parameter names\npf.add_parameters(filenames=\"freyberg6.sfr_packagedata.txt\", \n par_name_base=\"sfr_rhk\",\n pargp=\"sfr_rhk\", index_cols=[0,1,2,3], \n use_cols=[9], upper_bound=10.,\n lower_bound=0.1,\n par_type=\"grid\")\n\n# add model run command\npf.mod_sys_cmds.append(\"mf6\")\n\n# build pest control file\npst = pf.build_pst('freyberg.pst')\n\n# draw from the prior and save the ensemble in binary format\npe = pf.draw(100, use_specsim=True)\npe.to_binary(os.path.join(template_ws, \"prior.jcb\"))\n\n# set some algorithmic controls\npst.control_data.noptmax = 0\npst.pestpp_options[\"additional_ins_delimiters\"] = \",\"\n\n# write the control file\npst.write(os.path.join(pf.new_d, \"freyberg.pst\"))\n\n# run with noptmax = 0\npyemu.os_utils.run(\"{0} freyberg.pst\".format(\n os.path.join(\"pestpp-ies\")), cwd=pf.new_d)\n\n# make sure it ran\nres_file = os.path.join(pf.new_d, \"freyberg.base.rei\")\nassert os.path.exists(res_file), res_file\npst.set_res(res_file)\nprint(pst.phi)\n\n# if successful, set noptmax = -1 for prior-based Monte Carlo\npst.control_data.noptmax = -1\n\n# define what file has the prior parameter ensemble\npst.pestpp_options[\"ies_par_en\"] = \"prior.jcb\"\n\n# write the updated pest control file\npst.write(os.path.join(pf.new_d, \"freyberg.pst\"))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/cas/cmip6/models/sandbox-2/seaice.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: CAS\nSource ID: SANDBOX-2\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:45\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cas', 'sandbox-2', 'seaice')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties --> Model\n2. Key Properties --> Variables\n3. Key Properties --> Seawater Properties\n4. Key Properties --> Resolution\n5. Key Properties --> Tuning Applied\n6. Key Properties --> Key Parameter Values\n7. Key Properties --> Assumptions\n8. Key Properties --> Conservation\n9. Grid --> Discretisation --> Horizontal\n10. Grid --> Discretisation --> Vertical\n11. Grid --> Seaice Categories\n12. Grid --> Snow On Seaice\n13. Dynamics\n14. Thermodynamics --> Energy\n15. Thermodynamics --> Mass\n16. Thermodynamics --> Salt\n17. Thermodynamics --> Salt --> Mass Transport\n18. Thermodynamics --> Salt --> Thermodynamics\n19. Thermodynamics --> Ice Thickness Distribution\n20. Thermodynamics --> Ice Floe Size Distribution\n21. Thermodynamics --> Melt Ponds\n22. Thermodynamics --> Snow Processes\n23. Radiative Processes \n1. Key Properties --> Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of sea ice model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2. Key Properties --> Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of prognostic variables in the sea ice component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"3. Key Properties --> Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"3.2. Ocean Freezing Point Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"4. Key Properties --> Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. Canonical Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"5. Key Properties --> Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Target\nIs Required: TRUE Type: STRING Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.3. Simulations\nIs Required: TRUE Type: STRING Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.4. Metrics Used\nIs Required: TRUE Type: STRING Cardinality: 1.1\nList any observed metrics used in tuning model/parameters",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.5. Variables\nIs Required: FALSE Type: STRING Cardinality: 0.1\nWhich variables were changed during the tuning process?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6. Key Properties --> Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nWhat values were specificed for the following parameters if used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.2. Additional Parameters\nIs Required: FALSE Type: STRING Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7. Key Properties --> Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. On Diagnostic Variables\nIs Required: TRUE Type: STRING Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.3. Missing Processes\nIs Required: TRUE Type: STRING Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Key Properties --> Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nProvide a general description of conservation methodology.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Properties\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.3. Budget\nIs Required: TRUE Type: STRING Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.4. Was Flux Correction Used\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes conservation involved flux correction?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE Type: STRING Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9. Grid --> Discretisation --> Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.2. Grid Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the type of sea ice grid?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.3. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the advection scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.4. Thermodynamics Time Step\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"9.5. Dynamics Time Step\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"9.6. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify any additional horizontal discretisation details.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Grid --> Discretisation --> Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"10.2. Number Of Layers\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nIf using multi-layers specify how many.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"10.3. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify any additional vertical grid details.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11. Grid --> Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"11.2. Number Of Categories\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nIf using sea ice categories specify how many.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"11.3. Category Limits\nIs Required: TRUE Type: STRING Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11.5. Other\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12. Grid --> Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs snow on ice represented in this model?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"12.2. Number Of Snow Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of vertical levels of snow on ice?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"12.3. Snow Fraction\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12.4. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify any additional details related to snow on ice.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Transport In Thickness Space\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.3. Ice Strength Formulation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhich method of sea ice strength formulation is used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.4. Redistribution\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.5. Rheology\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nRheology, what is the ice deformation formulation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14. Thermodynamics --> Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the energy formulation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.2. Thermal Conductivity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat type of thermal conductivity is used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.3. Heat Diffusion\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the method of heat diffusion?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.4. Basal Heat Flux\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.5. Fixed Salinity Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"14.6. Heat Content Of Precipitation\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"14.7. Precipitation Effects On Salinity\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15. Thermodynamics --> Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.2. Ice Vertical Growth And Melt\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.3. Ice Lateral Melting\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the method of sea ice lateral melting?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.4. Ice Surface Sublimation\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.5. Frazil Ice\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method of frazil ice formation.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"16. Thermodynamics --> Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"17. Thermodynamics --> Salt --> Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.2. Constant Salinity Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"17.3. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the salinity profile used.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18. Thermodynamics --> Salt --> Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.2. Constant Salinity Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"18.3. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the salinity profile used.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"19. Thermodynamics --> Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is the sea ice thickness distribution represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20. Thermodynamics --> Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is the sea ice floe-size represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.2. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"21. Thermodynamics --> Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nAre melt ponds included in the sea ice model?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"21.2. Formulation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat method of melt pond formulation is used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"21.3. Impacts\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nWhat do melt ponds have an impact on?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22. Thermodynamics --> Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"22.2. Snow Aging Scheme\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the snow aging scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.3. Has Snow Ice Formation\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"22.4. Snow Ice Formation Scheme\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the snow ice formation scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.5. Redistribution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nWhat is the impact of ridging on snow cover?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.6. Heat Diffusion\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod used to handle surface albedo.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. Ice Radiation Transmission\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
landlab/landlab
|
notebooks/tutorials/lithology/lithology_and_litholayers.ipynb
|
mit
|
[
"<a href=\"http://landlab.github.io\"><img style=\"float: left\" src=\"../../landlab_header.png\"></a>\nIntroduction to the Lithology and LithoLayers objects\nLithology and LithoLayers are two Landlab components meant to make it easier to work with spatially variable lithology that produces spatially variable parameter values (e.g. stream power erodability or diffusivity). \nThis tutorial is meant for users who have some experience using Landlab components.\nIn this tutorial we will explore the creation of spatially variable lithology and its impact on the evolution of topography. After an introductory example that will let you see how LithoLayers works, we will work through two more complicated examples. In the first example, we use the LithoLayers to erode either dipping layeres or an anticline. Then we will use Lithology to create inverted topography. \nWe will use xarray to store and annotate our model output. While we won't extensively discuss the use of xarray, some background will be provided. \nTo start, we will import the necessary modules. A note: this tutorial uses the HoloViews package for visualization. This package is a great tool for dealing with multidimentional annotated data (e.g. an xarray dataset). If you get an error on import, consider updating dask (this is what the author needed to do in April 2018). You will also need to have the Bokeh and Matplotlib packages installed.\nIn testing we've seen some users have a warning raised related to the Matplotlib backend. In our testing it was OK to ignore these errors.",
"import warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nimport os\nimport numpy as np\nimport xarray as xr\nimport dask\n\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\nimport holoviews as hv\n\nhv.notebook_extension(\"matplotlib\")\n\nfrom landlab import RasterModelGrid\nfrom landlab.components import (\n FlowAccumulator,\n FastscapeEroder,\n LinearDiffuser,\n Lithology,\n LithoLayers,\n)",
"Part 1: Creating layered rock\nFirst we will create an instance of a LithoLayers to learn how this component works. Both LithoLayers and Lithology work closely with a Landlab ModelGrid, storing information about rock type at each grid node. \nTo create LithoLayers you need the following information:\n\nA model grid that has the field 'topographic__elevation' already created. \nA list of elevations, called 'layer_elevations' that the bottom of your layers will go through at specified plan-view anchor point (default value for the anchor point is (x, y) = (0, 0)), and a list of rock type IDs that indicate the rock type of that layer. When 'layer_elevations' is negative that means that the layer goes through the anchor point above the topographic surface. These layers will be created where they extend below the topographic surface.\nA dictionary of rock property attributes that maps a rock ID type to property values.\nA functional form in x and y that defines the shape of your surface. \n\nThe use of this function form makes it possible for any function of x and y to be passed to LithoLayers.\nBoth the Lithology and LithoLayers components then know the rock type ID of all the material in the 'block of rock' you have specified. This can be used to continuously know the value of specified rock properties at the topographic surface, even as the rock is eroded, uplifted, or new rock is deposited. \nIn this tutorial we will first make an example to help build intuition and then do two more complex examples. Most of the functionality of Lithology and LithoLayers is shown in this tutorial, but if you want to read the full component documentation for LithoLayers, it can be found here. Links to both components documentation can be found at the bottom of the tutorial.\nFirst, we create a small RasterModelGrid with topography.",
"mg = RasterModelGrid((10, 15))\nz = mg.add_zeros(\"topographic__elevation\", at=\"node\")",
"Next we make our layer elevations. We will make 20 layers that are 5 meters thick. Note that here, as with most Landlab components, there are no default units. At the anchor point, half of the layers will be above the ground ('layer_elevations' will have negative values) and half will be below the ground ('layer_elevations' have positive values). \nWe will make this with the np.arange function. We will also make the bottom layer really really thick so that we won't be able to erode through through it.",
"layer_elevations = 5.0 * np.arange(-10, 10)\n\n# we create a bottom layer that is very thick.\nlayer_elevations[-1] = layer_elevations[-2] + 100",
"Next we create an array that represents our rock type ID values. We will create alternating layers of four types of rock by making an array with alternating 0s 1s 2s and 3s with the np.tile function.",
"layer_ids = np.tile([0, 1, 2, 3], 5)",
"Our dictionary containing rock property attributes has the following form:",
"attrs = {\"K_sp\": {0: 0.0003, 1: 0.0001, 2: 0.0002, 3: 0.0004}}",
"'K_sp' is the property that we want to track through the layered rock, 0, 1, 2, 3 are the rock type IDs, and 0.0003 and 0.0001 are the values for 'K_sp' for the rock types 0 and 1. \nThe rock type IDs are unique identifiers for each type of rock. A particular rock type may have many properties (e.g. 'K_sp', 'diffusivity', and more). You can either specify all the possible rock types and attributes when you instantiate the LithoLayers component, or you can add new ones with the lith.add_rock_type or lith.add_property built in functions.\nFinally, we define our function. Here we will use a lambda expression to create a small anonymous function. In this case we define a function of x and y that returns the value x + (2. * y). The LithoLayers component will check that this function is a function of two variables and that when passed two arrays of size number-of-nodes it returns an array of size number-of-nodes.\nThis means that planar rock layers will dip into the ground to the North-North-East. By changing this functional form, we can make more complicated rock layers.",
"func = lambda x, y: x + (2.0 * y)",
"Finally we construct our LithoLayers component by passing the correct arguments.",
"lith = LithoLayers(mg, layer_elevations, layer_ids, function=func, attrs=attrs)",
"LithoLayers will make sure that the model grid has at-node grid fields with the layer attribute names. In this case, this means that the model grid will now include a grid field called 'K_sp' and a field called 'rock_type__id'. We can plot these with the Landlab imshow function.",
"mg.imshow(\"rock_type__id\", cmap=\"viridis\")",
"As you can see, we have layers that strike East-South-East. Since we can only see the surface expression of the layers, we can't infer the dip direction or magnitude from the plot alone. \nIf the topographic surface erodes, then you will want to update LithoLayers. Like most Landlab components, LithoLayers uses a run_one_step method to update. \nNext we will erode the topography by decrementing the variable z, which points to the topographic elevation of our model grid, by an amount 1. In a landscape evolution model, this would typically be done by running the run_one_step method for each of the process components in the model. If the rock mass is being advected up or down by an external force (e.g. tectonic rock uplift), then then advection must be specified. The dz_advection argument can be a single value or an array of size number-of-nodes.",
"z -= 1.0\ndz_ad = 0.0\nlith.dz_advection = dz_ad\nlith.run_one_step()",
"We can re-plot the value of 'K_sp'. We will see that the location of the surface expression of the rock layers has changed. As we expect, the location has changed in a way that is consistent with layers dipping to the NNE.",
"mg.imshow(\"rock_type__id\", cmap=\"viridis\")",
"Anytime material is added, LithoLayers or Lithology needs to know the type of rock that has been added. LithoLayers and Lithology do not assume to know the correct rock type ID and thus require that the user specify it with the rock_id keyword argument. In the run_one_step function, both components will check to see if any deposition has occured. If deposition occurs and this argument is not passed, then an error will be raised. \nFor example here we add 1 m of topographic elevation and do not advect the block of rock up or down. When we run lith.run_one_step we specify that the type of rock has id 0.",
"z += 1.0\ndz_ad = 0.0\n\nlith.dz_advection = dz_ad\nlith.rock_id = 0\n\nlith.run_one_step()",
"When we plot the value of the rock type ID at the surface, we find that it is now all purple, the color of rock type zero.",
"mg.imshow(\"rock_type__id\", cmap=\"viridis\", vmin=0, vmax=3)",
"The value passed to the rock_id keyword argument can be either a single value (as in the second to last example) or an array of length number-of-nodes. This option permits a user to indicate that more than one type of rock is deposited in a single time step. \nNext we will add a 2 m thick layer that is type 1 for x values less than or equal to 6 and type 2 for all other locations.",
"z += 2.0\ndz_ad = 0.0\nspatially_variable_rock_id = mg.ones(\"node\")\nspatially_variable_rock_id[mg.x_of_node > 6] = 2\n\nlith.dz_advection = dz_ad\nlith.rock_id = spatially_variable_rock_id\n\nlith.run_one_step()\nmg.imshow(\"rock_type__id\", cmap=\"viridis\", vmin=0, vmax=3)",
"As you can see this results in the value of rock type at the surface being about half rock type 1 and about half rock type 2. Next we will create an xarray dataset that has 3D information about our Lithology to help visualize the layers in space. We will use the rock_cube_to_xarray method of the LithoLayers component. \nWe will then convert this xarray dataset into a HoloViews dataset so we can visualize the result. \nAs you can see the LithoLayers has a value of rock types 1 and 2 at the surface, then a layer of 0 below, and finally changes to alternating layers.",
"ds = lith.rock_cube_to_xarray(np.arange(30))\nhvds_rock = hv.Dataset(ds.rock_type__id)\n\n%opts Image style(cmap='viridis') plot[colorbar=True]\nhvds_rock.to(hv.Image, [\"x\", \"y\"])",
"The slider allows us to change the depth below the topographic surface.\nWe can also plot the cube of rock created with LithoLayers as a cross section. In the cross section we can see the top two layers we made by depositing rock and then dipping layers of alternating rock types.",
"%opts Image style(cmap='viridis') plot[colorbar=True, invert_yaxis=True]\nhvds_rock.to(hv.Image, [\"x\", \"z\"])",
"Hopefuly this gives you a sense of how LithoLayers works. The next two blocks of code have all the steps we just worked through in one place. \nTry modifying the layer thicknesses, the size of the grid, the function used to create the form of the layers, the layers deposited and eroded, and the location of the anchor point to gain intuition for how you can use LithoLayers to create different types of layered rock.",
"# Parameters that control the size and shape of the model grid\nnumber_of_rows = 50\nnumber_of_columns = 50\ndx = 1\n\n# Parameters that control the LithoLayers\n\n# the layer shape function\nfunc = lambda x, y: (0.5 * x) ** 2 + (0.5 * y) ** 2\n\n# the layer thicknesses\nlayer_thickness = 50.0\n\n# the location of the anchor point\nx0 = 25\ny0 = 25\n\n# the resolution at which you sample to create the plan view and cros-section view figures.\nsample_depths = np.arange(0, 30, 1)\n\n# create the model grid\nmg = RasterModelGrid((number_of_rows, number_of_columns), dx)\nz = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n\n# set up LithoLayers inputs\nlayer_ids = np.tile([0, 1, 2, 3], 5)\nlayer_elevations = layer_thickness * np.arange(-10, 10)\nlayer_elevations[-1] = layer_elevations[-2] + 100\nattrs = {\"K_sp\": {0: 0.0003, 1: 0.0001, 2: 0.0002, 3: 0.0004}}\n\n# create LithoLayers\nlith = LithoLayers(\n mg, layer_elevations, layer_ids, x0=x0, y0=y0, function=func, attrs=attrs\n)\n\n# deposity and erode\ndz_ad = 0.0\n\nz -= 1.0\nlith.dz_advection = dz_ad\nlith.run_one_step()\n\nz += 1.0\nlith.dz_advection = dz_ad\nlith.rock_id = 0\nlith.run_one_step()\n\nz += 2.0\nspatially_variable_rock_id = mg.ones(\"node\")\nspatially_variable_rock_id[mg.x_of_node > 6] = 2\nlith.dz_advection = dz_ad\nlith.rock_id = spatially_variable_rock_id\nlith.run_one_step()\n\n# get the rock-cube data structure and plot\nds = lith.rock_cube_to_xarray(sample_depths)\nhvds_rock = hv.Dataset(ds.rock_type__id)\n\n# make a plan view image\n%opts Image style(cmap='viridis') plot[colorbar=True]\nhvds_rock.to(hv.Image, [\"x\", \"y\"])",
"You can also make a cross section of this new LithoLayers component.",
"%opts Image style(cmap='viridis') plot[colorbar=True, invert_yaxis=True]\nhvds_rock.to(hv.Image, [\"x\", \"z\"])",
"Part 2: Creation of a landscape evolution model with LithoLayers\nIn this next section, we will run LithoLayers with components used for a simple Landscape Evolution Model. \nWe will start by creating the grid.",
"mg = RasterModelGrid((50, 30), 400)\nz = mg.add_zeros(\"topographic__elevation\", at=\"node\")\nrandom_field = 0.01 * np.random.randn(mg.size(\"node\"))\nz += random_field - random_field.min()",
"Next we set all the parameters for LithoLayers. Here we have two types of rock with different erodabilities.",
"attrs = {\"K_sp\": {0: 0.0003, 1: 0.0001}}\n\nz0s = 50 * np.arange(-20, 20)\nz0s[-1] = z0s[-2] + 10000\n\nids = np.tile([0, 1], 20)",
"There are three functional forms that you can choose between. Here we define each of them.",
"# Anticline\nanticline_func = lambda x, y: ((0.002 * x) ** 2 + (0.001 * y) ** 2)\n\n# Shallow dips\nshallow_func = lambda x, y: ((0.001 * x) + (0.003 * y))\n\n# Steeper dips\nsteep_func = lambda x, y: ((0.01 * x) + (0.01 * y))",
"The default option is to make an anticline, but you can comment/uncomment lines to choose a different functional form.",
"# Anticline\nlith = LithoLayers(\n mg, z0s, ids, x0=6000, y0=10000, function=anticline_func, attrs=attrs\n)\n\n# Shallow dips\n# lith = LithoLayers(mg, z0s, ids, function=shallow_func, attrs=attrs)\n\n# Steeper dips\n# lith = LithoLayers(mg, z0s, ids, function=steep_func, attrs=attrs)",
"Now that we've created LithoLayers, model grid fields for each of the LithoLayers attributes exist and have been set to the values of the rock exposed at the surface. \nHere we plot the value of 'K_sp' as a function of the model grid.",
"mg.imshow(\"K_sp\")",
"As you can see (in the default anticline option) we have concentric elipses of stronger and weaker rock. \nNext, lets instantiate a FlowAccumulator and a FastscapeEroder to create a simple landscape evolution model. \nWe will point the FastscapeEroder to the model grid field 'K_sp' so that it will respond to the spatially variable erodabilities created by LithoLayers.",
"nts = 300\nU = 0.001\ndt = 1000\n\nfa = FlowAccumulator(mg)\nsp = FastscapeEroder(mg, K_sp=\"K_sp\")",
"Before we run the model we will also instatiate an xarray dataset used to store the output of our model through time for visualization. \nThe next block may look intimidating, but I'll try and walk you through what it does. \nxarray allows us to create a container for our data and label it with information like units, dimensions, short and long names, etc. xarray gives all the tools for dealing with N-dimentional data provided by python packages such as numpy, the labeling and named indexing power of the pandas package, and the data-model of the NetCDF file.\nThis means that we can use xarray to make a \"self-referential\" dataset that contains all of the variables and attributes that describe what each part is and how it was made. In this application, we won't make a fully self-referential dataset, but if you are interested in this, check out the NetCDF best practices. \nImportant for our application is that later on we will use the HoloViews package for visualization. This package is a great tool for dealing with multidimentional annotated data and will do things like automatically create nice axis labels with units. However, in order for it to work, we must first annotate our data to include this information.\nHere we create an xarray Dataset with two variables 'topographic__elevation' and 'rock_type__id' and three dimensions 'x', 'y', and 'time'. \nWe pass xarray two dictionaries, one with information about the data variabiables (data_vars) and one with information about the coordinate system (coords). For each data variable or coordinate, we pass a tuple of three items: (dims, data, atts). The first element is a tuple of the name of the dimensions, the second element is the data, an the third is a dictionary of attributes.",
"ds = xr.Dataset(\n data_vars={\n \"topographic__elevation\": (\n (\"time\", \"y\", \"x\"), # tuple of dimensions\n np.empty((nts, mg.shape[0], mg.shape[1])), # n-d array of data\n {\n \"units\": \"meters\", # dictionary with data attributes\n \"long_name\": \"Topographic Elevation\",\n },\n ),\n \"rock_type__id\": (\n (\"time\", \"y\", \"x\"),\n np.empty((nts, mg.shape[0], mg.shape[1])),\n {\"units\": \"-\", \"long_name\": \"Rock Type ID Code\"},\n ),\n },\n coords={\n \"x\": (\n (\"x\"), # tuple of dimensions\n mg.x_of_node.reshape(mg.shape)[0, :], # 1-d array of coordinate data\n {\"units\": \"meters\"},\n ), # dictionary with data attributes\n \"y\": ((\"y\"), mg.y_of_node.reshape(mg.shape)[:, 1], {\"units\": \"meters\"}),\n \"time\": (\n (\"time\"),\n dt * np.arange(nts) / 1e6,\n {\"units\": \"millions of years since model start\", \"standard_name\": \"time\"},\n ),\n },\n)",
"We can print the data set to get some basic information about it.",
"print(ds)",
"We can also print a single variable to get more detailed information about it. \nSince we initialized the datset with empty arrays for the two data variables, we just see zeros for the data values.",
"ds.topographic__elevation",
"Next, we run the model. In each time step we first run the FlowAccumulator to direct flow and accumulatate drainage area. Then the FastscapeEroder erodes the topography based on the stream power equation using the erodability value in the field 'K_sp'. We create an uplift field that uplifts only the model grid's core nodes. After uplifting these core nodes, we update LithoLayers. Importantly, we must tell the LithoLayers how it has been advected upward by uplift using the dz_advection keyword argument. \nAs we discussed in the introductory example, the built-in function lith.run_one_step has an optional keyword argument rock_id to use when some material may be deposited. The LithoLayers component needs to know what type of rock exists everywhere and it will raise an error if material is deposited and no rock type is specified. However, here we are using the FastscapeEroder which is fully detachment limited, and thus we know that no material will be deposited at any time. Thus we can ignore this keyword argument. Later in the tutorial we will use the LinearDiffuser which can deposit sediment and we will need to set this keyword argument correctly. \nWithin each timestep we save information about the model for plotting.",
"out_fields = [\"topographic__elevation\", \"rock_type__id\"]\n\nfor i in range(nts):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n dz_ad = np.zeros(mg.size(\"node\"))\n dz_ad[mg.core_nodes] = U * dt\n z += dz_ad\n lith.dz_advection = dz_ad\n lith.run_one_step()\n\n for of in out_fields:\n ds[of][i, :, :] = mg[\"node\"][of].reshape(mg.shape)",
"Now that the model has run, lets start by plotting the resulting topography.",
"mg.imshow(\"topographic__elevation\", cmap=\"viridis\")",
"The layers of rock clearly influence the form of topography. \nNext we will use HoloViews to visualize the topography and rock type together. \nTo start, we create a HoloViewDataset from our xarray datastructure.",
"hvds_topo = hv.Dataset(ds.topographic__elevation)\nhvds_rock = hv.Dataset(ds.rock_type__id)\nhvds_topo",
"Next we specify that we want two images, one showing rock type and one showing topographic elevation. A slider bar shows us model time in millions of years. \nBe patient. Running this next block may take a moment. HoloViews is rendering an image of all time slices so you can see an animated slider. This is pretty magical (but not instantaneous).",
"%opts Image style(interpolation='bilinear', cmap='viridis') plot[colorbar=True]\ntopo = hvds_topo.to(hv.Image, [\"x\", \"y\"])\nrock = hvds_rock.to(hv.Image, [\"x\", \"y\"])\n\ntopo + rock",
"We can see the form of the anticline advecting through the topography. Cool!\nPart 3: Creation of Inverted Topography\nHere we will explore making inverted topography by eroding Lithology with constant properties for half of the model evaluation time, and then filling Lithology in with resistant material only where the drainage area is large. This is meant as a simple example of filling in valleys with volcanic material. \nAll of the details of the options for creating a Lithology can be found here. \nIn the next code block we make a new model and run it. There are a few important differences between this next example and the one we just worked through in Part 2. \nHere we will have two rock types. Type 0 that represents non-volcanic material. It will have a higher diffusivity and erodability than the volcanic material, which is type 1. \nRecall that in Part 2 we did not specify a rock_id keyword argument to the lith.run_one_step method. This was because we used only the FastscapeEroder component which is fully detachment limited and thus never deposits material. In this example we will also use the LinearDiffuser component, which may deposity material. The Lithology component needs to know the rock type everywhere and thus we must indicate the rock type of the newly deposited rock. This is done by passing a single value or number-of-nodes sized array rock type values to the run_one_step method. \nWe also are handling the model grid boundary conditions differently than in the last example, setting the boundaries on the top and bottom to closed.",
"mg2 = RasterModelGrid((30, 30), 200)\nmg2.set_closed_boundaries_at_grid_edges(False, True, False, True)\nz2 = mg2.add_zeros(\"topographic__elevation\", at=\"node\")\nrandom_field = 0.01 * np.random.randn(mg2.size(\"node\"))\nz2 += random_field - random_field.min()\n\nthicknesses2 = [10000]\nids2 = [0]\n\nattrs2 = {\"K_sp\": {0: 0.0001, 1: 0.00001}, \"D\": {0: 0.4, 1: 0.001}}\n\nlith2 = Lithology(mg2, thicknesses2, ids2, attrs=attrs2)\n\nnts = 500\nU = 0.005\ndt = 1000\n\nfa2 = FlowAccumulator(mg2)\nsp2 = FastscapeEroder(mg2, K_sp=\"K_sp\")\nld2 = LinearDiffuser(mg2, linear_diffusivity=\"D\")\n\nout_fields = [\"topographic__elevation\", \"rock_type__id\"]\n\nout_fields = [\"topographic__elevation\", \"rock_type__id\"]\n\nnts = 200\nU = 0.001\ndt = 1000\n\nds2 = xr.Dataset(\n data_vars={\n \"topographic__elevation\": (\n (\"time\", \"y\", \"x\"),\n np.empty((nts, mg2.shape[0], mg2.shape[1])),\n {\"units\": \"meters\", \"long_name\": \"Topographic Elevation\"},\n ),\n \"rock_type__id\": (\n (\"time\", \"y\", \"x\"),\n np.empty((nts, mg2.shape[0], mg2.shape[1])),\n {\"units\": \"-\", \"long_name\": \"Rock Type ID Code\"},\n ),\n },\n coords={\n \"x\": ((\"x\"), mg2.x_of_node.reshape(mg2.shape)[0, :], {\"units\": \"meters\"}),\n \"y\": ((\"y\"), mg2.y_of_node.reshape(mg2.shape)[:, 1], {\"units\": \"meters\"}),\n \"time\": (\n (\"time\"),\n dt * np.arange(nts) / 1e6,\n {\"units\": \"millions of years since model start\", \"standard_name\": \"time\"},\n ),\n },\n)\n\nhalf_nts = int(nts / 2)\n\ndz_ad2 = np.zeros(mg2.size(\"node\"))\ndz_ad2[mg2.core_nodes] = U * dt\nlith2.dz_advection = dz_ad2\nlith2.rock_id = 0\n\nfor i in range(half_nts):\n fa2.run_one_step()\n sp2.run_one_step(dt=dt)\n ld2.run_one_step(dt=dt)\n\n z2 += dz_ad2\n lith2.run_one_step()\n\n for of in out_fields:\n ds2[of][i, :, :] = mg2[\"node\"][of].reshape(mg2.shape)",
"After the first half of run time, let's look at the topography.",
"mg2.imshow(\"topographic__elevation\", cmap=\"viridis\")",
"We can see that we have developed ridges and valleys as we'd expect from a model with stream power erosion and linear diffusion. \nNext we will create some volcanic deposits that fill the channels in our model.",
"volcanic_deposits = np.zeros(mg2.size(\"node\"))\nda_big_enough = mg2[\"node\"][\"drainage_area\"] > 5e4\n\ntopo_difference_from_top = (\n mg2[\"node\"][\"topographic__elevation\"].max() - mg2[\"node\"][\"topographic__elevation\"]\n)\n\nvolcanic_deposits[da_big_enough] = 0.25 * topo_difference_from_top[da_big_enough]\nvolcanic_deposits[mg2.boundary_nodes] = 0.0\n\nz2 += volcanic_deposits\nlith2.rock_id = 1\nlith2.run_one_step()\n\nmg2.imshow(volcanic_deposits)",
"We should expect that the locations of our valleys and ridges change as the river system encouters the much stronger volcanic rock.",
"for i in range(half_nts, nts):\n fa2.run_one_step()\n sp2.run_one_step(dt=dt)\n ld2.run_one_step(dt=dt)\n dz_ad2 = np.zeros(mg2.size(\"node\"))\n dz_ad2[mg2.core_nodes] = U * dt\n z2 += dz_ad2\n lith2.dz_advection = dz_ad2\n lith2.rock_id = 0\n lith2.run_one_step()\n\n for of in out_fields:\n ds2[of][i, :, :] = mg2[\"node\"][of].reshape(mg2.shape)",
"Now that the model has run, let's plot the final elevation",
"mg2.imshow(\"topographic__elevation\", cmap=\"viridis\")",
"And now a HoloView Plot that lets us explore the time evolution of the topography",
"hvds_topo2 = hv.Dataset(ds2.topographic__elevation)\nhvds_rock2 = hv.Dataset(ds2.rock_type__id)\n\n%opts Image style(interpolation='bilinear', cmap='viridis') plot[colorbar=True]\ntopo2 = hvds_topo2.to(hv.Image, [\"x\", \"y\"])\nrock2 = hvds_rock2.to(hv.Image, [\"x\", \"y\"])\n\ntopo2 + rock2\n\n# if you wanted to output to visualize in something like ParaView, the following commands can be used\n# ds.to_netcdf('anticline.nc')\n# ds2.to_netcdf('inversion.nc')",
"Sure enough, the volcanic deposits impact the location of the ridges and valleys. The old valleys become ridges because it takes so much time for them to be eroded. \nYou can explore how this changes as the thickness of the deposit changes and as the relative erodabilities change. \nThe end.\nNice work getting to the end of the tutorial!\nFor more detailed information about the Lithology and LithoLayers objects, check out their detailed documentation. \nClick here for more Landlab tutorials"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Quasimondo/RasterFairy
|
examples/Raster Fairy Warp Demo.ipynb
|
bsd-3-clause
|
[
"<h1>Raster Fairy - Warping</h1>\n<p>Version 1.0<br/>Author: Mario Klingemann | @Quasimondo</p>\n<p>Raster Fairy also allows to warp a 2D point cloud by means of a Coons Patch. This warp will not align the points to a grid, but allow to reshape the cloud into a different form whilst still preserving the neighborhood relations.</p>\n<p>Let's start by creating an example dataset consisting of 2000 random RGB points and calculate a 2D t-sne embedding from it. Of course you can use any other clustering method or whatever 2D points you have lying around. </p>",
"# just some basic setup for the purpose of this demo:\n%matplotlib inline \nfrom IPython.display import display\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.manifold import TSNE\n#alternative you can use bh_sne:\n#from tsne import bh_sne\n\n\n# generate a set of 4900 random 3D points\ntotalDataPoints = 2000 \ndataPoints = np.random.uniform(low=0.0, high=1.0, size=(totalDataPoints,3))\n\n# create a t-sne embedding in 2D (we have to clone the array since bh_sne changes the original data)\n# The bh_sne repository can be found here: https://github.com/danielfrg/tsne\n#xy = bh_sne(dataPoints.copy())\n\nxy = TSNE().fit_transform(dataPoints)\n\n\nfig = plt.figure(figsize=(10.0,10.0))\nax = fig.add_subplot(1, 1, 1)\nax.set_facecolor('black')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.spines['left'].set_visible(False)\nax.axes.get_xaxis().set_visible(False)\nax.axes.get_yaxis().set_visible(False)\nax.autoscale_view(True,True,True)\nax.invert_yaxis()\nax.scatter(xy[:,0],xy[:,1], c = dataPoints, edgecolors='none',marker='s',s=7.5) \nplt.show()\n",
"The default method tries to warp the cloud towards a square shape. It does that by calculating the outer hull of the cloud and remapping it to a rectangle. The perimeterSubdivisionSteps parameter controls how concave the hull is. A value of 0 is the equivalent of a convex hull, the higher the value becomes the more the hull tries to \"creep\" into gaps. You have to experiment with this value depending on the shape of the hull - too high values will often result in most of the points being drawn to the edges. The autoPerimeterOffset will look for the grid that best matches the density distribution of the cloud, but will also often result in a rotation of the cloud. The width and height parameters control the resolution of the Coons Patch and will influence how continouus the mapping is. The default values of 64 should give good results in most cases. The paddingScale parameter controls the offset of the patch's hull from the cloud - the bigger the value the less pronounced the warping effect will be.",
"from rasterfairy import coonswarp\n\nwarped_xy = coonswarp.rectifyCloud(xy,perimeterSubdivisionSteps=4,autoPerimeterOffset=False, paddingScale=1.05)\n\nfig = plt.figure(figsize=(10.0,10.0))\nax = fig.add_subplot(1, 1, 1)\nax.set_facecolor('black')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.spines['left'].set_visible(False)\nax.axes.get_xaxis().set_visible(False)\nax.axes.get_yaxis().set_visible(False)\nax.autoscale_view(True,True,True)\nax.invert_yaxis()\nax.scatter(warped_xy[:,0],warped_xy[:,1], c = dataPoints, edgecolors='none',marker='s',s=7.5) \nplt.show()",
"Here is another example that shows how to use a different target grid, in this case a circle, to warp the cloud. Also The source and target grids are shown in the rendering:",
"sourceGrid = coonswarp.getCloudGrid( xy, perimeterSubdivisionSteps=2,autoPerimeterOffset=False, perimeterOffset=64,paddingScale=1.05, smoothing = 0.5)\ntargetGrid = coonswarp.getCircularGrid(xy)\nwarped_xy = coonswarp.warpCloud(xy,sourceGrid,targetGrid)\n\n\nfig = plt.figure(figsize=(10.0,10.0))\nax = fig.add_subplot(1, 1, 1)\nax.set_facecolor('black')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.spines['left'].set_visible(False)\nax.axes.get_xaxis().set_visible(False)\nax.axes.get_yaxis().set_visible(False)\nax.autoscale_view(True,True,True)\nax.invert_yaxis()\nax.scatter(xy[:,0],xy[:,1], c = dataPoints, edgecolors='none',s=7.5) \nax.scatter(sourceGrid[:,0],sourceGrid[:,1], c = 'w', edgecolors='none',s=4,alpha=0.6) \n\n\nfig = plt.figure(figsize=(10.0,10.0))\nax = fig.add_subplot(1, 1, 1)\nax.set_facecolor('black')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.spines['left'].set_visible(False)\nax.axes.get_xaxis().set_visible(False)\nax.axes.get_yaxis().set_visible(False)\nax.autoscale_view(True,True,True)\nax.invert_yaxis()\nax.scatter(warped_xy[:,0],warped_xy[:,1], c = dataPoints, edgecolors='none',s=7.5) \nax.scatter(targetGrid[:,0],targetGrid[:,1], c = 'w', edgecolors='none',s=4,alpha=0.6) \n\nplt.show()",
"Here is a snippet that renders an animation of the warp to an animated gif:",
"import io\nfrom moviepy.editor import *\nimport PIL.Image as PImage\nfrom IPython.display import clear_output, Image\nfrom sklearn.cluster import KMeans\n\npointCount = len(xy)\ngridPointCount = len(sourceGrid)\n# some color reduction since the NeuQuant color reduction of images2gif does a bad job in this case:\nkmeans = KMeans(n_clusters=min(255,pointCount), random_state=0).fit(dataPoints)\nreducedColors = kmeans.cluster_centers_[kmeans.predict(dataPoints)]\n\nwidth = 64\nheight = 64\n\nimages = []\n\n# a bit of coordinate normalization so the coordinates are in the same range:\npx_from = xy[:,0] - min(xy[:,0])\npy_from = xy[:,1] - min(xy[:,1])\n\npx_from /= max(px_from)\npy_from /= max(py_from)\n\npx_to = warped_xy[:,0] - min(warped_xy[:,0])\npy_to = warped_xy[:,1] - min(warped_xy[:,1])\n\npx_to /= max(px_to)\npy_to /= max(py_to)\n\n \ngx_from = sourceGrid[:,0] - min(sourceGrid[:,0])\ngy_from = sourceGrid[:,1] - min(sourceGrid[:,1])\n\ngx_from /= max(gx_from)\ngy_from /= max(gy_from)\n\ngx_to = targetGrid[:,0] - min(targetGrid[:,0])\ngy_to = targetGrid[:,1] - min(targetGrid[:,1])\n\ngx_to /= max(gx_to)\ngy_to /= max(gy_to)\n\n\nsteps = 50\n\nfor i in range(steps*2):\n \n t = 1.0- abs(1.0 * i / float(steps-1)-1.0) #tsteps[steps]\n #t= 1.0 * i / (steps-1)\n dx = []\n dy = []\n for j in range( pointCount ):\n dx.append( (1.0-t)*px_from[j] + t*px_to[j])\n dy.append( (1.0-t)*py_from[j] + t*py_to[j] )\n \n gx = []\n gy = []\n for j in range( gridPointCount ):\n gx.append( (1.0-t)*gx_from[j] + t*gx_to[j])\n gy.append( (1.0-t)*gy_from[j] + t*gy_to[j] )\n \n fig2 = plt.figure(figsize=(10.0,10.0))\n ax = fig2.add_subplot(1, 1, 1)\n ax.set_facecolor('black')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.scatter(dx,dy, c = reducedColors, edgecolors='none',s=10.0) #,marker='s'\n ax.scatter(gx,gy, c = 'w', alpha=0.6, edgecolors='none',s=5.0) #,marker='s'\n ax.axes.set_xlim([-0.01,1.01])\n ax.axes.set_ylim([-0.01,1.01])\n \n clear_output(wait=True)\n \n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n images.append( np.array(PImage.open(buf)) )\n \n \n #in case you want to rather export a series of PNGs:\n #plt.savefig('clusters/cluster'+\"%03d\" % (i+steps,)+'.png')\n plt.show()\n \nclip = ImageSequenceClip(images,fps=25)\nclip.write_gif(\"../warp_demo.gif\") \nImage(url=\"../warp_demo.gif\")\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
johnnycakes79/pyops
|
dashboard/pandas-highcharts-examples.ipynb
|
bsd-3-clause
|
[
"Pandas DataFrame plotting with Highcharts\npandas_highcharts is a Python library to turn your pandas DataFrame into a suited JSON for Highcharts, a Javascript library for interactive charts.\nBefore introducing Highcharts, I build an arbitrary DataFrame with a timeseries (a Brownian motion), plot it with matplotlib, then seaborn and Highcharts.\nImport",
"%matplotlib inline\nimport string\n\nimport numpy as np\nimport pandas as pd\n\nprint pd.__version__",
"Build a DataFrame with a timeseries",
"# Dimensions\nnb_rand_var = 8\nnb_dates = 220\nnp.random.seed(4321)\n# Random choice letters\npickme = lambda x: np.random.choice(26, x, replace=False)\nlabels = np.array(list(string.ascii_uppercase))[pickme(nb_rand_var)]\n\nlabels\n\n# Timeseries\nts = pd.date_range(\"2015-03-23\", periods=nb_dates, freq=\"B\")\nts\n\n# A Brownian Motion\nnoise = np.random.randn(nb_dates, nb_rand_var)\ndf = pd.DataFrame(noise.cumsum(axis=0),\n index=ts,\n columns=labels)\n\ndf.head()",
"Plotting with matplotlib",
"import matplotlib.pyplot as plt\nplt.style.use(\"ggplot\") # only for matplotlib >= 1.4\n\ndf.plot();",
"Even if the figure is nicer than the matplotlib default style, I think it's to small, and legends don't fit when the number of variables is higher than 5 or 6.\nPlotting with seaborn\nJust a try with seaborn.",
"import seaborn as sns\n\nwith sns.axes_style(\"darkgrid\"):\n df.plot()",
"The colors palette, the figure size and the legends position are OK for me.",
"with sns.axes_style(\"ticks\"):\n df.plot()",
"Let's go with Highcharts",
"from pandas_highcharts.display import display_charts\n\ndisplay_charts(df, title=\"Brownian Motion\")",
"You can also retrieve the JSON data generated by pandas_highcharts thanks to the function serialize.",
"from pandas_highcharts.core import serialize",
"Serialize the previous DataFrame with the pandas_highcharts function serialize.",
"json_data = serialize(df, render_to=\"brownian\", title=\"Brownian Motion\")\n\ntype(json_data)",
"And display the charts thanks to the HTML renderer and the function display of IPython",
"from IPython.core.display import display, HTML\n\ndisplay(HTML(\"\"\"<div id=\"{chart_id}\"</div>\n<script type=\"text/javascript\">{data}</script>\"\"\".format(chart_id=\"brownian\", data=json_data)))",
"Try to change some parameters\nYou can also retrive the related Python dict before getting the JSON version. You'll have the same structure as Highcharts options and can change some parameters.",
"data = serialize(df, render_to=\"brownian\", output_type=\"dict\")\n\ndata.keys()\n\ndata[\"chart\"]\n\ndata[\"legend\"]",
"Add a subtitle",
"data[\"subtitle\"] = {\"text\": \"a subtitle here...\"}",
"Change the chart type",
"data[\"chart\"][\"type\"] = \"spline\"\n\ndata[\"plotOptions\"] = {\"spline\": {\n \"lineWidth\": 2,\n \"states\": {\n \"hover\": {\n \"lineWidth\": 3}\n }}}\n\ndata['chart']['type'] = 'line'\ndata['chart']['zoomType'] = 'x'\ndata['chart']['panning'] = True\ndata['chart']['panKey'] = 'shift'\n\ndata[\"chart\"][\"renderTo\"] = \"new_brownian\"\n\nprint data[\"chart\"]\nprint data[\"subtitle\"]\nprint data[\"yAxis\"]",
"Serialize this new dict.",
"from pandas_highcharts.core import json_encode\n\njson_data_2 = \"new Highcharts.StockChart(%s);\" % json_encode(data)\n\ndisplay(HTML(\"\"\"<div id=\"{chart_id}\"</div>\n<script type=\"text/javascript\">{data}</script>\"\"\".format(chart_id=\"new_brownian\", data=json_data_2)))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
fastai/course-v3
|
zh-nbs/Lesson4_collab.ipynb
|
apache-2.0
|
[
"Practical Deep Learning for Coders, v3\nLesson4_collab",
"from fastai.collab import *\nfrom fastai.tabular import *",
"Collaborative filtering example\n协同过滤案例\ncollab models use data in a DataFrame of user, items, and ratings.\ncollab模型使用的是DataFrame中的一个(包含)用户、电影和评分的数据集。",
"user,item,title = 'userId','movieId','title'\n\npath = untar_data(URLs.ML_SAMPLE)\npath\n\nratings = pd.read_csv(path/'ratings.csv')\nratings.head()",
"That's all we need to create and train a model:\n以上就是我们用来训练模型的全部(数据):",
"data = CollabDataBunch.from_df(ratings, seed=42)\n\ny_range = [0,5.5]\n\nlearn = collab_learner(data, n_factors=50, y_range=y_range)\n\nlearn.fit_one_cycle(3, 5e-3)",
"Movielens 100k\nLet's try with the full Movielens 100k data dataset, available from http://files.grouplens.org/datasets/movielens/ml-100k.zip\n让我们尝试一下用Movielens的全部数据进行建模。",
"path=Config.data_path()/'ml-100k'\n\nratings = pd.read_csv(path/'u.data', delimiter='\\t', header=None,\n names=[user,item,'rating','timestamp'])\nratings.head()\n\nmovies = pd.read_csv(path/'u.item', delimiter='|', encoding='latin-1', header=None,\n names=[item, 'title', 'date', 'N', 'url', *[f'g{i}' for i in range(19)]])\nmovies.head()\n\nlen(ratings)\n\nrating_movie = ratings.merge(movies[[item, title]])\nrating_movie.head()\n\ndata = CollabDataBunch.from_df(rating_movie, seed=42, valid_pct=0.1, item_name=title)\n\ndata.show_batch()\n\ny_range = [0,5.5]\n\nlearn = collab_learner(data, n_factors=40, y_range=y_range, wd=1e-1)\n\nlearn.lr_find()\nlearn.recorder.plot(skip_end=15)\n\nlearn.fit_one_cycle(5, 5e-3)\n\nlearn.save('dotprod')",
"Here's some benchmarks on the same dataset for the popular Librec system for collaborative filtering. They show best results based on RMSE of 0.91, which corresponds to an MSE of 0.91**2 = 0.83.\n这里 是一些在同一数据集上建模的基准数据。在表格中我们可以看到最好的模型的RMSE是0.91,对应的MSE是0.91**2 = 0.83。\nInterpretation\n模型释义\nSetup 调用",
"learn.load('dotprod');\n\nlearn.model\n\ng = rating_movie.groupby(title)['rating'].count()\ntop_movies = g.sort_values(ascending=False).index.values[:1000]\ntop_movies[:10]",
"Movie bias\n电影模型的偏差",
"movie_bias = learn.bias(top_movies, is_item=True)\nmovie_bias.shape\n\nmean_ratings = rating_movie.groupby(title)['rating'].mean()\nmovie_ratings = [(b, i, mean_ratings.loc[i]) for i,b in zip(top_movies,movie_bias)]\n\nitem0 = lambda o:o[0]\n\nsorted(movie_ratings, key=item0)[:15]\n\nsorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15]",
"Movie weights\n电影模型权重",
"movie_w = learn.weight(top_movies, is_item=True)\nmovie_w.shape\n\nmovie_pca = movie_w.pca(3)\nmovie_pca.shape\n\nfac0,fac1,fac2 = movie_pca.t()\nmovie_comp = [(f, i) for f,i in zip(fac0, top_movies)]\n\nsorted(movie_comp, key=itemgetter(0), reverse=True)[:10]\n\nsorted(movie_comp, key=itemgetter(0))[:10]\n\nmovie_comp = [(f, i) for f,i in zip(fac1, top_movies)]\n\nsorted(movie_comp, key=itemgetter(0), reverse=True)[:10]\n\nsorted(movie_comp, key=itemgetter(0))[:10]\n\nidxs = np.random.choice(len(top_movies), 50, replace=False)\nidxs = list(range(50))\nX = fac0[idxs]\nY = fac2[idxs]\nplt.figure(figsize=(15,15))\nplt.scatter(X, Y)\nfor i, x, y in zip(top_movies[idxs], X, Y):\n plt.text(x,y,i, color=np.random.rand(3)*0.7, fontsize=11)\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
eshlykov/mipt-day-after-day
|
labs/term-4/lab-1-4.ipynb
|
unlicense
|
[
"Работа 1.4. Исследование вынужденной прецессии гироскопа\nЦель работы: исследовать вынужденную прецессию уравновешенного симметричного гироскопа; установить зависимость угловой скорости вынужденной прецессии от величины момента сил, действующих на ось гироскопа; по угловой скорости прецессии определить\nугловую скорость вращения ротора гироскопа.\nВ работе используются: гироскоп в кардановом подвесе, секундомер, набор грузов, отдельный ротор гироскопа, цилиндр известной\nмассы, крутильный маятник, штангенциркуль, линейка.",
"import numpy as np\nimport scipy as ps\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"Параметры установки\n$f = 440$ Гц - резонансная частота.\n$l = 12,1$ см - расстояние до крайней риски.\n$T_{э} = 9 $ c - период эталона.\n$M_{э} = 1618.9 \\pm 0.5 $ г - масса эталона.\n$R_{э} = 4$ см - радиус эталона.\n$T_{г} = 7$ с - период гироскопа. \nТеоретические формулы\n$$\\Omega = \\frac{mgl}{J_0\\omega_0}$$\n$$J_э = \\frac{M_эR_э^2}{2}$$\n$$\\frac{J_г}{J_э} = \\left(\\frac{T_г}{T_ц}\\right)^2$$\nПостроение графика",
"data = pd.read_excel('lab-1-4.xlsx', 'table-1')\ndata.head(len(data))\n\nx = data.values[:, 1]\ny = data.values[:, 5]\ndx = data.values[:, 2]\ndy = data.values[:, 6]\n\nk, b = np.polyfit(x, y, deg=1)\n\ngrid = np.linspace(0.0, np.max(x), 300)\n\nplt.figure(figsize=(12, 8))\n\nplt.grid(linestyle='--')\n\nplt.title('Зависимость $\\Omega$ от $M$', fontweight='bold', fontsize=20)\nplt.xlabel('$M$, $\\\\frac{кг\\\\cdot м^2}{с^2}$', fontsize=16)\nplt.ylabel('$\\Omega$, $\\\\frac{рад}{с^2}$', fontsize=16)\n\nplt.plot(grid, k * grid + b)\nplt.errorbar(x, y, xerr=dx, yerr=dy, fmt='o')\n\nplt.show()",
"Вычисление момента инерции",
"J_0 = 1.6189 * 0.04 ** 2.0 / 2.0\nT_0 = 9.0\nT_1 = 7.0\nJ_1 = J_0 * (T_1 / T_0) ** 2\nprint(J_1 * 10 ** 6)",
"Таким образом, $J_г = 783.5 \\cdot 10^{-6}$ $кг \\cdot \\frac{м^2}{с^2}$."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
markroxor/gensim
|
docs/notebooks/Poincare Evaluation.ipynb
|
lgpl-2.1
|
[
"Evaluation of Poincare Embeddings\nThis notebook demonstrates how well Poincare embeddings perform on the tasks detailed in the original paper about the embeddings.\nThe following two external, open-source implementations are used - \n1. C++\n2. Numpy\nThis is the list of tasks - \n1. WordNet reconstruction\n2. WordNet link prediction\n3. Link prediction in collaboration networks (evaluation incomplete)\n4. Lexical entailment on HyperLex\nA more detailed explanation of the tasks and the evaluation methodology is present in the individual evaluation subsections.\n1. Setup\nThe following section performs the following - \n1. Imports required python libraries and downloads the wordnet data\n2. Clones the repositories containing the C++ and Numpy implementations of the Poincare embeddings\n3. Applies patches containing minor changes to the implementations.\n4. Compiles the C++ sources to create a binary",
"% cd ../..\n\n% cd docs/notebooks/\n\nimport csv\nfrom collections import OrderedDict\nimport logging\nimport os\nimport pickle\nimport random\nimport re\n\nimport click\nfrom gensim.models.poincare import PoincareModel, PoincareRelations, \\\n ReconstructionEvaluation, LinkPredictionEvaluation, \\\n LexicalEntailmentEvaluation, PoincareKeyedVectors\nfrom gensim.utils import check_output\nimport nltk\nfrom prettytable import PrettyTable\nfrom smart_open import smart_open\n\nlogging.basicConfig(level=logging.INFO)\nnltk.download('wordnet')",
"Note that not all the above libraries are part of the gensim dependencies, so they might need to be installed separately. These requirements are listed in the poincare requirements.txt\nPlease set the variable parent_directory below to change the directory to which the repositories are cloned.",
"current_directory = os.getcwd()\n\n# Change this variable to `False` to not remove and re-download repos for external implementations\nforce_setup = False\n\n# The poincare datasets, models and source code for external models are downloaded to this directory\nparent_directory = os.path.join(current_directory, 'poincare')\n! mkdir -p {parent_directory}\n\n% cd {parent_directory}\n\n# Clone repos\nnp_repo_name = 'poincare-np-embedding'\nif force_setup and os.path.exists(np_repo_name):\n ! rm -rf {np_repo_name}\nclone_np_repo = not os.path.exists(np_repo_name)\nif clone_np_repo:\n ! git clone https://github.com/nishnik/poincare_embeddings.git {np_repo_name}\n\ncpp_repo_name = 'poincare-cpp-embedding'\nif force_setup and os.path.exists(cpp_repo_name):\n ! rm -rf {cpp_repo_name}\nclone_cpp_repo = not os.path.exists(cpp_repo_name)\nif clone_cpp_repo:\n ! git clone https://github.com/TatsuyaShirakawa/poincare-embedding.git {cpp_repo_name}\n\npatches_applied = False\n\n# Apply patches\nif clone_cpp_repo and not patches_applied:\n % cd {cpp_repo_name}\n ! git apply ../poincare_burn_in_eps.patch\n\nif clone_np_repo and not patches_applied:\n % cd ../{np_repo_name}\n ! git apply ../poincare_numpy.patch\n \npatches_applied = True\n\n# Compile the code for the external c++ implementation into a binary\n% cd {parent_directory}/{cpp_repo_name}\n! mkdir -p work\n% cd work\n! cmake ..\n! make\n% cd {current_directory}",
"You might need to install an updated version of cmake to be able to compile the source code. Please make sure that the binary poincare_embedding has been created before proceeding by verifying the above cell does not raise an error.",
"cpp_binary_path = os.path.join(parent_directory, cpp_repo_name, 'work', 'poincare_embedding')\nassert(os.path.exists(cpp_binary_path)), 'Binary file doesnt exist at %s' % cpp_binary_path",
"2. Training\n2.1 Create the data",
"# These directories are auto created in the current directory for storing poincare datasets and models\ndata_directory = os.path.join(parent_directory, 'data')\nmodels_directory = os.path.join(parent_directory, 'models')\n\n# Create directories\n! mkdir -p {data_directory}\n! mkdir -p {models_directory}\n\n# Prepare the WordNet data\nwordnet_file = os.path.join(data_directory, 'wordnet_noun_hypernyms.tsv')\nif not os.path.exists(wordnet_file):\n ! python {parent_directory}/{cpp_repo_name}/scripts/create_wordnet_noun_hierarchy.py {wordnet_file}\n\n# Prepare the HyperLex data\nhyperlex_url = \"http://people.ds.cam.ac.uk/iv250/paper/hyperlex/hyperlex-data.zip\"\n! wget {hyperlex_url} -O {data_directory}/hyperlex-data.zip\nif os.path.exists(os.path.join(data_directory, 'hyperlex')):\n ! rm -r {data_directory}/hyperlex\n! unzip {data_directory}/hyperlex-data.zip -d {data_directory}/hyperlex/\nhyperlex_file = os.path.join(data_directory, 'hyperlex', 'nouns-verbs', 'hyperlex-nouns.txt')",
"2.2 Training C++ embeddings",
"def train_cpp_model(\n binary_path, data_file, output_file, dim, epochs, neg,\n num_threads, epsilon, burn_in, seed=0):\n \"\"\"Train a poincare embedding using the c++ implementation\n \n Args:\n binary_path (str): Path to the compiled c++ implementation binary\n data_file (str): Path to tsv file containing relation pairs\n output_file (str): Path to output file containing model\n dim (int): Number of dimensions of the trained model\n epochs (int): Number of epochs to use\n neg (int): Number of negative samples to use\n num_threads (int): Number of threads to use for training the model\n epsilon (float): Constant used for clipping below a norm of one\n burn_in (int): Number of epochs to use for burn-in init (0 means no burn-in)\n \n Notes: \n If `output_file` already exists, skips training\n \"\"\"\n if os.path.exists(output_file):\n print('File %s exists, skipping' % output_file)\n return\n args = {\n 'dim': dim,\n 'max_epoch': epochs,\n 'neg_size': neg,\n 'num_thread': num_threads,\n 'epsilon': epsilon,\n 'burn_in': burn_in,\n 'learning_rate_init': 0.1,\n 'learning_rate_final': 0.0001,\n }\n cmd = [binary_path, data_file, output_file]\n for option, value in args.items():\n cmd.append(\"--%s\" % option)\n cmd.append(str(value))\n \n return check_output(args=cmd)\n\nmodel_sizes = [5, 10, 20, 50, 100, 200]\ndefault_params = {\n 'neg': 20,\n 'epochs': 50,\n 'threads': 8,\n 'eps': 1e-6,\n 'burn_in': 0,\n 'batch_size': 10,\n}\n\nnon_default_params = {\n 'neg': [10],\n 'epochs': [200],\n 'burn_in': [10]\n}\n\ndef cpp_model_name_from_params(params, prefix):\n param_keys = ['burn_in', 'epochs', 'neg', 'eps', 'threads']\n name = ['%s_%s' % (key, params[key]) for key in sorted(param_keys)]\n return '%s_%s' % (prefix, '_'.join(name))\n\ndef train_model_with_params(params, train_file, model_sizes, prefix, implementation):\n \"\"\"Trains models with given params for multiple model sizes using the given implementation\n \n Args:\n params (dict): parameters to train the model with\n train_file (str): Path to tsv file containing relation pairs\n model_sizes (list): list of dimension sizes (integer) to train the model with\n prefix (str): prefix to use for the saved model filenames\n implementation (str): whether to use the numpy or c++ implementation,\n allowed values: 'numpy', 'c++'\n \n Returns:\n tuple (model_name, model_files)\n model_files is a dict of (size, filename) pairs\n Example: ('cpp_model_epochs_50', {5: 'models/cpp_model_epochs_50_dim_5'})\n \"\"\"\n files = {}\n if implementation == 'c++':\n model_name = cpp_model_name_from_params(params, prefix)\n elif implementation == 'numpy':\n model_name = np_model_name_from_params(params, prefix)\n elif implementation == 'gensim':\n model_name = gensim_model_name_from_params(params, prefix)\n else:\n raise ValueError('Given implementation %s not found' % implementation)\n for model_size in model_sizes:\n output_file_name = '%s_dim_%d' % (model_name, model_size)\n output_file = os.path.join(models_directory, output_file_name)\n print('Training model %s of size %d' % (model_name, model_size))\n if implementation == 'c++':\n out = train_cpp_model(\n cpp_binary_path, train_file, output_file, model_size,\n params['epochs'], params['neg'], params['threads'],\n params['eps'], params['burn_in'], seed=0)\n elif implementation == 'numpy':\n train_external_numpy_model(\n python_script_path, train_file, output_file, model_size,\n params['epochs'], params['neg'], seed=0)\n elif implementation == 'gensim':\n train_gensim_model(\n train_file, output_file, model_size, params['epochs'],\n params['neg'], params['burn_in'], params['batch_size'], seed=0)\n else:\n raise ValueError('Given implementation %s not found' % implementation)\n files[model_size] = output_file\n return (model_name, files)\n\nmodel_files = {}\n\nmodel_files['c++'] = {}\n# Train c++ models with default params\nmodel_name, files = train_model_with_params(default_params, wordnet_file, model_sizes, 'cpp_model', 'c++')\nmodel_files['c++'][model_name] = {}\nfor dim, filepath in files.items():\n model_files['c++'][model_name][dim] = filepath\n# Train c++ models with non-default params\nfor param, values in non_default_params.items():\n params = default_params.copy()\n for value in values:\n params[param] = value\n model_name, files = train_model_with_params(params, wordnet_file, model_sizes, 'cpp_model', 'c++')\n model_files['c++'][model_name] = {}\n for dim, filepath in files.items():\n model_files['c++'][model_name][dim] = filepath",
"2.3 Training numpy embeddings (non-gensim)",
"python_script_path = os.path.join(parent_directory, np_repo_name, 'poincare.py')\n\ndef np_model_name_from_params(params, prefix):\n param_keys = ['neg', 'epochs']\n name = ['%s_%s' % (key, params[key]) for key in sorted(param_keys)]\n return '%s_%s' % (prefix, '_'.join(name))\n\ndef train_external_numpy_model(\n script_path, data_file, output_file, dim, epochs, neg, seed=0):\n \"\"\"Train a poincare embedding using an external numpy implementation\n \n Args:\n script_path (str): Path to the Python training script\n data_file (str): Path to tsv file containing relation pairs\n output_file (str): Path to output file containing model\n dim (int): Number of dimensions of the trained model\n epochs (int): Number of epochs to use\n neg (int): Number of negative samples to use\n \n Notes: \n If `output_file` already exists, skips training\n \"\"\"\n if os.path.exists(output_file):\n print('File %s exists, skipping' % output_file)\n return\n args = {\n 'input-file': data_file,\n 'output-file': output_file,\n 'dimensions': dim,\n 'epochs': epochs,\n 'learning-rate': 0.01,\n 'num-negative': neg,\n }\n cmd = ['python', script_path]\n for option, value in args.items():\n cmd.append(\"--%s\" % option)\n cmd.append(str(value))\n \n return check_output(args=cmd)\n\nmodel_files['numpy'] = {}\n# Train models with default params\nmodel_name, files = train_model_with_params(default_params, wordnet_file, model_sizes, 'np_model', 'numpy')\nmodel_files['numpy'][model_name] = {}\nfor dim, filepath in files.items():\n model_files['numpy'][model_name][dim] = filepath",
"2.4 Training gensim embeddings",
"def gensim_model_name_from_params(params, prefix):\n param_keys = ['neg', 'epochs', 'burn_in', 'batch_size']\n name = ['%s_%s' % (key, params[key]) for key in sorted(param_keys)]\n return '%s_%s' % (prefix, '_'.join(name))\n\ndef train_gensim_model(\n data_file, output_file, dim, epochs, neg, burn_in, batch_size, seed=0):\n \"\"\"Train a poincare embedding using gensim implementation\n \n Args:\n data_file (str): Path to tsv file containing relation pairs\n output_file (str): Path to output file containing model\n dim (int): Number of dimensions of the trained model\n epochs (int): Number of epochs to use\n neg (int): Number of negative samples to use\n burn_in (int): Number of epochs to use for burn-in initialization\n batch_size (int): Size of batch to use for training\n \n Notes: \n If `output_file` already exists, skips training\n \"\"\"\n if os.path.exists(output_file):\n print('File %s exists, skipping' % output_file)\n return\n train_data = PoincareRelations(data_file)\n model = PoincareModel(train_data, size=dim, negative=neg, burn_in=burn_in)\n model.train(epochs=epochs, batch_size=batch_size)\n model.save(output_file)\n\nnon_default_params_gensim = {\n 'neg': [10],\n 'burn_in': [10],\n 'batch_size': [50]\n}\n\nmodel_files['gensim'] = {}\n# Train models with default params\nmodel_name, files = train_model_with_params(default_params, wordnet_file, model_sizes, 'gensim_model', 'gensim')\nmodel_files['gensim'][model_name] = {}\nfor dim, filepath in files.items():\n model_files['gensim'][model_name][dim] = filepath\n# Train models with non-default params\nfor param, values in non_default_params_gensim.items():\n params = default_params.copy()\n for value in values:\n params[param] = value\n model_name, files = train_model_with_params(params, wordnet_file, model_sizes, 'gensim_model', 'gensim')\n model_files['gensim'][model_name] = {}\n for dim, filepath in files.items():\n model_files['gensim'][model_name][dim] = filepath",
"3. Loading the embeddings",
"def transform_cpp_embedding_to_kv(input_file, output_file, encoding='utf8'):\n \"\"\"Given a C++ embedding tsv filepath, converts it to a KeyedVector-supported file\"\"\"\n with smart_open(input_file, 'rb') as f:\n lines = [line.decode(encoding) for line in f]\n if not len(lines):\n raise ValueError(\"file is empty\")\n first_line = lines[0]\n parts = first_line.rstrip().split(\"\\t\")\n model_size = len(parts) - 1\n vocab_size = len(lines)\n with open(output_file, 'w') as f:\n f.write('%d %d\\n' % (vocab_size, model_size))\n for line in lines:\n f.write(line.replace('\\t', ' '))\n\ndef transform_numpy_embedding_to_kv(input_file, output_file, encoding='utf8'):\n \"\"\"Given a numpy poincare embedding pkl filepath, converts it to a KeyedVector-supported file\"\"\"\n np_embeddings = pickle.load(open(input_file, 'rb'))\n random_embedding = np_embeddings[list(np_embeddings.keys())[0]]\n \n model_size = random_embedding.shape[0]\n vocab_size = len(np_embeddings)\n with open(output_file, 'w') as f:\n f.write('%d %d\\n' % (vocab_size, model_size))\n for key, vector in np_embeddings.items():\n vector_string = ' '.join('%.6f' % value for value in vector)\n f.write('%s %s\\n' % (key, vector_string))\n\ndef load_poincare_cpp(input_filename):\n \"\"\"Load embedding trained via C++ Poincare model.\n\n Parameters\n ----------\n filepath : str\n Path to tsv file containing embedding.\n\n Returns\n -------\n PoincareKeyedVectors instance.\n\n \"\"\"\n keyed_vectors_filename = input_filename + '.kv'\n transform_cpp_embedding_to_kv(input_filename, keyed_vectors_filename)\n embedding = PoincareKeyedVectors.load_word2vec_format(keyed_vectors_filename)\n os.unlink(keyed_vectors_filename)\n return embedding\n\ndef load_poincare_numpy(input_filename):\n \"\"\"Load embedding trained via Python numpy Poincare model.\n\n Parameters\n ----------\n filepath : str\n Path to pkl file containing embedding.\n\n Returns:\n PoincareKeyedVectors instance.\n\n \"\"\"\n keyed_vectors_filename = input_filename + '.kv'\n transform_numpy_embedding_to_kv(input_filename, keyed_vectors_filename)\n embedding = PoincareKeyedVectors.load_word2vec_format(keyed_vectors_filename)\n os.unlink(keyed_vectors_filename)\n return embedding\n\ndef load_poincare_gensim(input_filename):\n \"\"\"Load embedding trained via Gensim PoincareModel.\n\n Parameters\n ----------\n filepath : str\n Path to model file.\n\n Returns:\n PoincareKeyedVectors instance.\n\n \"\"\"\n model = PoincareModel.load(input_filename)\n return model.kv\n\ndef load_model(implementation, model_file):\n \"\"\"Convenience function over functions to load models from different implementations.\n \n Parameters\n ----------\n implementation : str\n Implementation used to create model file ('c++'/'numpy'/'gensim').\n model_file : str\n Path to model file.\n \n Returns\n -------\n PoincareKeyedVectors instance\n \n Notes\n -----\n Raises ValueError in case of invalid value for `implementation`\n\n \"\"\"\n if implementation == 'c++':\n return load_poincare_cpp(model_file)\n elif implementation == 'numpy':\n return load_poincare_numpy(model_file)\n elif implementation == 'gensim':\n return load_poincare_gensim(model_file)\n else:\n raise ValueError('Invalid implementation %s' % implementation)",
"4. Evaluation",
"def display_results(task_name, results):\n \"\"\"Display evaluation results of multiple embeddings on a single task in a tabular format\n \n Args:\n task_name (str): name the task being evaluated\n results (dict): mapping between embeddings and corresponding results\n \n \"\"\"\n data = PrettyTable()\n data.field_names = [\"Model Description\", \"Metric\"] + [str(dim) for dim in sorted(model_sizes)]\n for model_name, model_results in results.items():\n metrics = [metric for metric in model_results.keys()]\n dims = sorted([dim for dim in model_results[metrics[0]].keys()])\n row = [model_name, '\\n'.join(metrics) + '\\n']\n for dim in dims:\n scores = ['%.2f' % model_results[metric][dim] for metric in metrics]\n row.append('\\n'.join(scores))\n data.add_row(row)\n data.align = 'r'\n data_cols = data.get_string().split('\\n')[0].split('+')[1:-1]\n col_lengths = [len(col) for col in data_cols]\n header_col_1_length = col_lengths[0] + col_lengths[1] - 1\n header_col_2_length = sum(col_lengths[2:]) + len(col_lengths[2:-1]) - 2\n \n header_col_2_content = \"Model Dimensions\"\n header_col_2_left_margin = (header_col_2_length - len(header_col_2_content)) // 2\n header_col_2_right_margin = header_col_2_length - len(header_col_2_content) - header_col_2_left_margin\n header_col_2_string = \"%s%s%s\" % (\n \" \" * header_col_2_left_margin, header_col_2_content, \" \" * header_col_2_right_margin)\n header = PrettyTable()\n header.field_names = [\" \" * header_col_1_length, header_col_2_string]\n header_lines = header.get_string(start=0, end=0).split(\"\\n\")[:2]\n print('Results for %s task' % task_name)\n print(\"\\n\".join(header_lines))\n print(data) ",
"4.1 WordNet reconstruction",
"reconstruction_results = OrderedDict()\nmetrics = ['mean_rank', 'MAP']\n\nfor implementation, models in sorted(model_files.items()):\n for model_name, files in models.items():\n if model_name in reconstruction_results:\n continue\n reconstruction_results[model_name] = OrderedDict()\n for metric in metrics:\n reconstruction_results[model_name][metric] = {}\n for model_size, model_file in files.items():\n print('Evaluating model %s of size %d' % (model_name, model_size))\n embedding = load_model(implementation, model_file)\n eval_instance = ReconstructionEvaluation(wordnet_file, embedding)\n eval_result = eval_instance.evaluate(max_n=1000)\n for metric in metrics:\n reconstruction_results[model_name][metric][model_size] = eval_result[metric]\n\ndisplay_results('WordNet Reconstruction', reconstruction_results)",
"4.2 WordNet link prediction\n4.2.1 Preparing data",
"def train_test_split(data_file, test_ratio=0.1):\n \"\"\"Creates train and test files from given data file, returns train/test file names\n \n Args:\n data_file (str): path to data file for which train/test split is to be created\n test_ratio (float): fraction of lines to be used for test data\n \n Returns\n (train_file, test_file): tuple of strings with train file and test file paths\n \"\"\"\n train_filename = data_file + '.train'\n test_filename = data_file + '.test'\n if os.path.exists(train_filename) and os.path.exists(test_filename):\n print('Train and test files already exist, skipping')\n return (train_filename, test_filename)\n root_nodes, leaf_nodes = get_root_and_leaf_nodes(data_file)\n test_line_candidates = []\n line_count = 0\n all_nodes = set()\n with open(data_file, 'rb') as f:\n for i, line in enumerate(f):\n node_1, node_2 = line.split()\n all_nodes.update([node_1, node_2])\n if (\n node_1 not in leaf_nodes\n and node_2 not in leaf_nodes\n and node_1 not in root_nodes\n and node_2 not in root_nodes\n and node_1 != node_2\n ):\n test_line_candidates.append(i)\n line_count += 1\n\n num_test_lines = int(test_ratio * line_count)\n if num_test_lines > len(test_line_candidates):\n raise ValueError('Not enough candidate relations for test set')\n print('Choosing %d test lines from %d candidates' % (num_test_lines, len(test_line_candidates)))\n test_line_indices = set(random.sample(test_line_candidates, num_test_lines))\n train_line_indices = set(l for l in range(line_count) if l not in test_line_indices)\n \n train_set_nodes = set()\n with open(data_file, 'rb') as f:\n train_file = open(train_filename, 'wb')\n test_file = open(test_filename, 'wb')\n for i, line in enumerate(f):\n if i in train_line_indices:\n train_set_nodes.update(line.split())\n train_file.write(line)\n elif i in test_line_indices:\n test_file.write(line)\n else:\n raise AssertionError('Line %d not present in either train or test line indices' % i)\n train_file.close()\n test_file.close()\n assert len(train_set_nodes) == len(all_nodes), 'Not all nodes from dataset present in train set relations'\n return (train_filename, test_filename)\n\ndef get_root_and_leaf_nodes(data_file):\n \"\"\"Return keys of root and leaf nodes from a file with transitive closure relations\n \n Args:\n data_file(str): file path containing transitive closure relations\n \n Returns:\n (root_nodes, leaf_nodes) - tuple containing keys of root and leaf nodes\n \"\"\"\n root_candidates = set()\n leaf_candidates = set()\n with open(data_file, 'rb') as f:\n for line in f:\n nodes = line.split()\n root_candidates.update(nodes)\n leaf_candidates.update(nodes)\n \n with open(data_file, 'rb') as f:\n for line in f:\n node_1, node_2 = line.split()\n if node_1 == node_2:\n continue\n leaf_candidates.discard(node_1)\n root_candidates.discard(node_2)\n \n return (leaf_candidates, root_candidates)\n\nwordnet_train_file, wordnet_test_file = train_test_split(wordnet_file)",
"4.2.2 Training models",
"# Training models for link prediction\nlp_model_files = {}\n\nlp_model_files['c++'] = {}\n# Train c++ models with default params\nmodel_name, files = train_model_with_params(default_params, wordnet_train_file, model_sizes, 'cpp_lp_model', 'c++')\nlp_model_files['c++'][model_name] = {}\nfor dim, filepath in files.items():\n lp_model_files['c++'][model_name][dim] = filepath\n# Train c++ models with non-default params\nfor param, values in non_default_params.items():\n params = default_params.copy()\n for value in values:\n params[param] = value\n model_name, files = train_model_with_params(params, wordnet_train_file, model_sizes, 'cpp_lp_model', 'c++')\n lp_model_files['c++'][model_name] = {}\n for dim, filepath in files.items():\n lp_model_files['c++'][model_name][dim] = filepath\n\nlp_model_files['numpy'] = {}\n# Train numpy models with default params\nmodel_name, files = train_model_with_params(default_params, wordnet_train_file, model_sizes, 'np_lp_model', 'numpy')\nlp_model_files['numpy'][model_name] = {}\nfor dim, filepath in files.items():\n lp_model_files['numpy'][model_name][dim] = filepath\n\nlp_model_files['gensim'] = {}\n# Train models with default params\nmodel_name, files = train_model_with_params(default_params, wordnet_train_file, model_sizes, 'gensim_lp_model', 'gensim')\nlp_model_files['gensim'][model_name] = {}\nfor dim, filepath in files.items():\n lp_model_files['gensim'][model_name][dim] = filepath\n# Train models with non-default params\nfor param, values in non_default_params_gensim.items():\n params = default_params.copy()\n for value in values:\n params[param] = value\n model_name, files = train_model_with_params(params, wordnet_train_file, model_sizes, 'gensim_lp_model', 'gensim')\n lp_model_files['gensim'][model_name] = {}\n for dim, filepath in files.items():\n lp_model_files['gensim'][model_name][dim] = filepath",
"4.2.3 Evaluating models",
"lp_results = OrderedDict()\nmetrics = ['mean_rank', 'MAP']\n\nfor implementation, models in sorted(lp_model_files.items()):\n for model_name, files in models.items():\n lp_results[model_name] = OrderedDict()\n for metric in metrics:\n lp_results[model_name][metric] = {}\n for model_size, model_file in files.items():\n print('Evaluating model %s of size %d' % (model_name, model_size))\n embedding = load_model(implementation, model_file)\n eval_instance = LinkPredictionEvaluation(wordnet_train_file, wordnet_test_file, embedding)\n eval_result = eval_instance.evaluate(max_n=1000)\n for metric in metrics:\n lp_results[model_name][metric][model_size] = eval_result[metric]\n\ndisplay_results('WordNet Link Prediction', lp_results)",
"4.3 HyperLex Lexical Entailment",
"entailment_results = OrderedDict()\neval_instance = LexicalEntailmentEvaluation(hyperlex_file)\n\nfor implementation, models in sorted(model_files.items()):\n for model_name, files in models.items():\n if model_name in entailment_results:\n continue\n entailment_results[model_name] = OrderedDict()\n entailment_results[model_name]['spearman'] = {}\n for model_size, model_file in files.items():\n print('Evaluating model %s of size %d' % (model_name, model_size))\n embedding = load_model(implementation, model_file)\n entailment_results[model_name]['spearman'][model_size] = eval_instance.evaluate_spearman(embedding)\n\ndisplay_results('Lexical Entailment (HyperLex)', entailment_results)",
"4.4 Link Prediction for collaboration networks",
"# TODO - quite tricky, since the loss function used for training the model on this network is different\n# Will require changes to how gradients are calculated in C++ code"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Lstyle1/Deep_learning_projects
|
autoencoder/Convolutional_Autoencoder.ipynb
|
mit
|
[
"Convolutional Autoencoder\nSticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data.",
"%matplotlib inline\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', validation_size=0)\n\nimg = mnist.train.images[2]\nplt.imshow(img.reshape((28, 28)), cmap='Greys_r')",
"Network Architecture\nThe encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below.\n<img src='assets/convolutional_autoencoder.png' width=500px>\nHere our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data.\nWhat's going on with the decoder\nOkay, so the decoder has these \"Upsample\" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see transposed convolution layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, tf.nn.conv2d_transpose. \nHowever, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling.\n\nExercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor.",
"learning_rate = 0.001\nimg_size = mnist.train.images.shape[1]\n\n# Input and target placeholders\ninputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1))\ntargets_ = tf.placeholder(tf.float32, (None, 28, 28, 1))\n\n### Encoder\nconv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.1))\n# Now 28x28x16\nmaxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')\n# Now 14x14x16\nconv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu)\n# Now 14x14x8\nmaxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')\n# Now 7x7x8\nconv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu)\n# Now 7x7x8\nencoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')\n# Now 4x4x8\n\n### Decoder\nupsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))\n# Now 7x7x8\nconv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)\n# Now 7x7x8\nupsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))\n# Now 14x14x8\nconv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu)\n# Now 14x14x8\nupsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))\n# Now 28x28x8\nconv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu)\n# Now 28x28x16\n\nlogits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)\n#Now 28x28x1\n\n# Pass logits through sigmoid to get reconstructed image\ndecoded =tf.nn.sigmoid(logits)\n\n# Pass logits through sigmoid and calculate the cross-entropy loss\nloss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)\n\n# Get cost and define the optimizer\ncost = tf.reduce_mean(loss)\nopt = tf.train.AdamOptimizer(learning_rate).minimize(cost)",
"Training\nAs before, here we'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays.",
"sess = tf.Session()\n\nepochs = 10\nbatch_size = 200\nsess.run(tf.global_variables_initializer())\nfor e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n imgs = batch[0].reshape((-1, 28, 28, 1))\n batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,\n targets_: imgs})\n\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Training loss: {:.4f}\".format(batch_cost))\n\nfig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))\nin_imgs = mnist.test.images[:10]\nreconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})\n\nfor images, row in zip([in_imgs, reconstructed], axes):\n for img, ax in zip(images, row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n\nfig.tight_layout(pad=0.1)\n\nsess.close()",
"Denoising\nAs I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images.\n\nSince this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before.\n\nExercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers.",
"learning_rate = 0.001\ninputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')\ntargets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')\n\n### Encoder\nconv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu)\n# Now 28x28x32\nmaxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')\n# Now 14x14x32\nconv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu)\n# Now 14x14x32\nmaxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')\n# Now 7x7x32\nconv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu)\n# Now 7x7x16\nencoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')\n# Now 4x4x16\n\n### Decoder\nupsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))\n# Now 7x7x16\nconv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu)\n# Now 7x7x16\nupsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))\n# Now 14x14x16\nconv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu)\n# Now 14x14x32\nupsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))\n# Now 28x28x32\nconv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu)\n# Now 28x28x32\n\nlogits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)\n#Now 28x28x1\n\n# Pass logits through sigmoid to get reconstructed image\ndecoded = tf.nn.sigmoid(logits, name='decoded')\n\n# Pass logits through sigmoid and calculate the cross-entropy loss\nloss = tf.nn.sigmoid_\n\n# Get cost and define the optimizer\ncost = tf.reduce_mean(loss)\nopt = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n\nsess = tf.Session()\n\nepochs = 100\nbatch_size = 200\n# Set's how much noise we're adding to the MNIST images\nnoise_factor = 0.5\nsess.run(tf.global_variables_initializer())\nfor e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n # Get images from the batch\n imgs = batch[0].reshape((-1, 28, 28, 1))\n \n # Add random noise to the input images\n noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)\n # Clip the images to be between 0 and 1\n noisy_imgs = np.clip(noisy_imgs, 0., 1.)\n \n # Noisy images as inputs, original images as targets\n batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,\n targets_: imgs})\n\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Training loss: {:.4f}\".format(batch_cost))",
"Checking out the performance\nHere I'm adding noise to the test images and passing them through the autoencoder. It does a suprisingly great job of removing the noise, even though it's sometimes difficult to tell what the original number is.",
"fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))\nin_imgs = mnist.test.images[:10]\nnoisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)\nnoisy_imgs = np.clip(noisy_imgs, 0., 1.)\n\nreconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})\n\nfor images, row in zip([noisy_imgs, reconstructed], axes):\n for img, ax in zip(images, row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\nfig.tight_layout(pad=0.1)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mne-tools/mne-tools.github.io
|
0.23/_downloads/91078106f2c04f1e09c01a2fa07e9d27/10_raw_overview.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"The Raw data structure: continuous data\nThis tutorial covers the basics of working with raw EEG/MEG data in Python. It\nintroduces the :class:~mne.io.Raw data structure in detail, including how to\nload, query, subselect, export, and plot data from a :class:~mne.io.Raw\nobject. For more info on visualization of :class:~mne.io.Raw objects, see\ntut-visualize-raw. For info on creating a :class:~mne.io.Raw object\nfrom simulated data in a :class:NumPy array <numpy.ndarray>, see\ntut_creating_data_structures.\nAs usual we'll start by importing the modules we need:",
"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mne",
"Loading continuous data\n.. sidebar:: Datasets in MNE-Python\nThere are ``data_path`` functions for several example datasets in\nMNE-Python (e.g., :func:`mne.datasets.kiloword.data_path`,\n:func:`mne.datasets.spm_face.data_path`, etc). All of them will check the\ndefault download location first to see if the dataset is already on your\ncomputer, and only download it if necessary. The default download\nlocation is also configurable; see the documentation of any of the\n``data_path`` functions for more information.\n\nAs mentioned in the introductory tutorial <tut-overview>,\nMNE-Python data structures are based around\nthe :file:.fif file format from Neuromag. This tutorial uses an\nexample dataset <sample-dataset> in :file:.fif format, so here we'll\nuse the function :func:mne.io.read_raw_fif to load the raw data; there are\nreader functions for a wide variety of other data formats\n<data-formats> as well.\nThere are also several other example datasets\n<datasets> that can be downloaded with just a few lines\nof code. Functions for downloading example datasets are in the\n:mod:mne.datasets submodule; here we'll use\n:func:mne.datasets.sample.data_path to download the \"sample-dataset\"\ndataset, which contains EEG, MEG, and structural MRI data from one subject\nperforming an audiovisual experiment. When it's done downloading,\n:func:~mne.datasets.sample.data_path will return the folder location where\nit put the files; you can navigate there with your file browser if you want\nto examine the files yourself. Once we have the file path, we can load the\ndata with :func:~mne.io.read_raw_fif. This will return a\n:class:~mne.io.Raw object, which we'll store in a variable called raw.",
"sample_data_folder = mne.datasets.sample.data_path()\nsample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis_raw.fif')\nraw = mne.io.read_raw_fif(sample_data_raw_file)",
"As you can see above, :func:~mne.io.read_raw_fif automatically displays\nsome information about the file it's loading. For example, here it tells us\nthat there are three \"projection items\" in the file along with the recorded\ndata; those are :term:SSP projectors <projector> calculated to remove\nenvironmental noise from the MEG signals, and are discussed in a the tutorial\ntut-projectors-background.\nIn addition to the information displayed during loading, you can\nget a glimpse of the basic details of a :class:~mne.io.Raw object by\nprinting it:",
"print(raw)",
"By default, the :samp:mne.io.read_raw_{*} family of functions will not\nload the data into memory (instead the data on disk are memory-mapped_,\nmeaning the data are only read from disk as-needed). Some operations (such as\nfiltering) require that the data be copied into RAM; to do that we could have\npassed the preload=True parameter to :func:~mne.io.read_raw_fif, but we\ncan also copy the data into RAM at any time using the\n:meth:~mne.io.Raw.load_data method. However, since this particular tutorial\ndoesn't do any serious analysis of the data, we'll first\n:meth:~mne.io.Raw.crop the :class:~mne.io.Raw object to 60 seconds so it\nuses less memory and runs more smoothly on our documentation server.",
"raw.crop(tmax=60)",
"Querying the Raw object\n.. sidebar:: Attributes vs. Methods\n**Attributes** are usually static properties of Python objects — things\nthat are pre-computed and stored as part of the object's representation\nin memory. Attributes are accessed with the ``.`` operator and do not\nrequire parentheses after the attribute name (example: ``raw.ch_names``).\n\n**Methods** are like specialized functions attached to an object.\nUsually they require additional user input and/or need some computation\nto yield a result. Methods always have parentheses at the end; additional\narguments (if any) go inside those parentheses (examples:\n``raw.estimate_rank()``, ``raw.drop_channels(['EEG 030', 'MEG 2242'])``).\n\nWe saw above that printing the :class:~mne.io.Raw object displays some\nbasic information like the total number of channels, the number of time\npoints at which the data were sampled, total duration, and the approximate\nsize in memory. Much more information is available through the various\nattributes and methods of the :class:~mne.io.Raw class. Some useful\nattributes of :class:~mne.io.Raw objects include a list of the channel\nnames (:attr:~mne.io.Raw.ch_names), an array of the sample times in seconds\n(:attr:~mne.io.Raw.times), and the total number of samples\n(:attr:~mne.io.Raw.n_times); a list of all attributes and methods is given\nin the documentation of the :class:~mne.io.Raw class.\nThe Raw.info attribute\nThere is also quite a lot of information stored in the raw.info\nattribute, which stores an :class:~mne.Info object that is similar to a\n:class:Python dictionary <dict> (in that it has fields accessed via named\nkeys). Like Python dictionaries, raw.info has a .keys() method that\nshows all the available field names; unlike Python dictionaries, printing\nraw.info will print a nicely-formatted glimpse of each field's data. See\ntut-info-class for more on what is stored in :class:~mne.Info\nobjects, and how to interact with them.",
"n_time_samps = raw.n_times\ntime_secs = raw.times\nch_names = raw.ch_names\nn_chan = len(ch_names) # note: there is no raw.n_channels attribute\nprint('the (cropped) sample data object has {} time samples and {} channels.'\n ''.format(n_time_samps, n_chan))\nprint('The last time sample is at {} seconds.'.format(time_secs[-1]))\nprint('The first few channel names are {}.'.format(', '.join(ch_names[:3])))\nprint() # insert a blank line in the output\n\n# some examples of raw.info:\nprint('bad channels:', raw.info['bads']) # chs marked \"bad\" during acquisition\nprint(raw.info['sfreq'], 'Hz') # sampling frequency\nprint(raw.info['description'], '\\n') # miscellaneous acquisition info\n\nprint(raw.info)",
"<div class=\"alert alert-info\"><h4>Note</h4><p>Most of the fields of ``raw.info`` reflect metadata recorded at\n acquisition time, and should not be changed by the user. There are a few\n exceptions (such as ``raw.info['bads']`` and ``raw.info['projs']``), but\n in most cases there are dedicated MNE-Python functions or methods to\n update the :class:`~mne.Info` object safely (such as\n :meth:`~mne.io.Raw.add_proj` to update ``raw.info['projs']``).</p></div>\n\nTime, sample number, and sample index\n.. sidebar:: Sample numbering in VectorView data\nFor data from VectorView systems, it is important to distinguish *sample\nnumber* from *sample index*. See :term:`first_samp` for more information.\n\nOne method of :class:~mne.io.Raw objects that is frequently useful is\n:meth:~mne.io.Raw.time_as_index, which converts a time (in seconds) into\nthe integer index of the sample occurring closest to that time. The method\ncan also take a list or array of times, and will return an array of indices.\nIt is important to remember that there may not be a data sample at exactly\nthe time requested, so the number of samples between time = 1 second and\ntime = 2 seconds may be different than the number of samples between\ntime = 2 and time = 3:",
"print(raw.time_as_index(20))\nprint(raw.time_as_index([20, 30, 40]), '\\n')\n\nprint(np.diff(raw.time_as_index([1, 2, 3])))",
"Modifying Raw objects\n.. sidebar:: len(raw)\nAlthough the :class:`~mne.io.Raw` object underlyingly stores data samples\nin a :class:`NumPy array <numpy.ndarray>` of shape (n_channels,\nn_timepoints), the :class:`~mne.io.Raw` object behaves differently from\n:class:`NumPy arrays <numpy.ndarray>` with respect to the :func:`len`\nfunction. ``len(raw)`` will return the number of timepoints (length along\ndata axis 1), not the number of channels (length along data axis 0).\nHence in this section you'll see ``len(raw.ch_names)`` to get the number\nof channels.\n\n:class:~mne.io.Raw objects have a number of methods that modify the\n:class:~mne.io.Raw instance in-place and return a reference to the modified\ninstance. This can be useful for method chaining_\n(e.g., raw.crop(...).pick_channels(...).filter(...).plot())\nbut it also poses a problem during interactive analysis: if you modify your\n:class:~mne.io.Raw object for an exploratory plot or analysis (say, by\ndropping some channels), you will then need to re-load the data (and repeat\nany earlier processing steps) to undo the channel-dropping and try something\nelse. For that reason, the examples in this section frequently use the\n:meth:~mne.io.Raw.copy method before the other methods being demonstrated,\nso that the original :class:~mne.io.Raw object is still available in the\nvariable raw for use in later examples.\nSelecting, dropping, and reordering channels\nAltering the channels of a :class:~mne.io.Raw object can be done in several\nways. As a first example, we'll use the :meth:~mne.io.Raw.pick_types method\nto restrict the :class:~mne.io.Raw object to just the EEG and EOG channels:",
"eeg_and_eog = raw.copy().pick_types(meg=False, eeg=True, eog=True)\nprint(len(raw.ch_names), '→', len(eeg_and_eog.ch_names))",
"Similar to the :meth:~mne.io.Raw.pick_types method, there is also the\n:meth:~mne.io.Raw.pick_channels method to pick channels by name, and a\ncorresponding :meth:~mne.io.Raw.drop_channels method to remove channels by\nname:",
"raw_temp = raw.copy()\nprint('Number of channels in raw_temp:')\nprint(len(raw_temp.ch_names), end=' → drop two → ')\nraw_temp.drop_channels(['EEG 037', 'EEG 059'])\nprint(len(raw_temp.ch_names), end=' → pick three → ')\nraw_temp.pick_channels(['MEG 1811', 'EEG 017', 'EOG 061'])\nprint(len(raw_temp.ch_names))",
"If you want the channels in a specific order (e.g., for plotting),\n:meth:~mne.io.Raw.reorder_channels works just like\n:meth:~mne.io.Raw.pick_channels but also reorders the channels; for\nexample, here we pick the EOG and frontal EEG channels, putting the EOG\nfirst and the EEG in reverse order:",
"channel_names = ['EOG 061', 'EEG 003', 'EEG 002', 'EEG 001']\neog_and_frontal_eeg = raw.copy().reorder_channels(channel_names)\nprint(eog_and_frontal_eeg.ch_names)",
"Changing channel name and type\n.. sidebar:: Long channel names\nDue to limitations in the :file:`.fif` file format (which MNE-Python uses\nto save :class:`~mne.io.Raw` objects), channel names are limited to a\nmaximum of 15 characters.\n\nYou may have noticed that the EEG channel names in the sample data are\nnumbered rather than labelled according to a standard nomenclature such as\nthe 10-20 <ten_twenty_> or 10-05 <ten_oh_five_> systems, or perhaps it\nbothers you that the channel names contain spaces. It is possible to rename\nchannels using the :meth:~mne.io.Raw.rename_channels method, which takes a\nPython dictionary to map old names to new names. You need not rename all\nchannels at once; provide only the dictionary entries for the channels you\nwant to rename. Here's a frivolous example:",
"raw.rename_channels({'EOG 061': 'blink detector'})",
"This next example replaces spaces in the channel names with underscores,\nusing a Python dict comprehension_:",
"print(raw.ch_names[-3:])\nchannel_renaming_dict = {name: name.replace(' ', '_') for name in raw.ch_names}\nraw.rename_channels(channel_renaming_dict)\nprint(raw.ch_names[-3:])",
"If for some reason the channel types in your :class:~mne.io.Raw object are\ninaccurate, you can change the type of any channel with the\n:meth:~mne.io.Raw.set_channel_types method. The method takes a\n:class:dictionary <dict> mapping channel names to types; allowed types are\necg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, ecog,\nhbo, hbr. A common use case for changing channel type is when using frontal\nEEG electrodes as makeshift EOG channels:",
"raw.set_channel_types({'EEG_001': 'eog'})\nprint(raw.copy().pick_types(meg=False, eog=True).ch_names)",
"Selection in the time domain\nIf you want to limit the time domain of a :class:~mne.io.Raw object, you\ncan use the :meth:~mne.io.Raw.crop method, which modifies the\n:class:~mne.io.Raw object in place (we've seen this already at the start of\nthis tutorial, when we cropped the :class:~mne.io.Raw object to 60 seconds\nto reduce memory demands). :meth:~mne.io.Raw.crop takes parameters tmin\nand tmax, both in seconds (here we'll again use :meth:~mne.io.Raw.copy\nfirst to avoid changing the original :class:~mne.io.Raw object):",
"raw_selection = raw.copy().crop(tmin=10, tmax=12.5)\nprint(raw_selection)",
":meth:~mne.io.Raw.crop also modifies the :attr:~mne.io.Raw.first_samp and\n:attr:~mne.io.Raw.times attributes, so that the first sample of the cropped\nobject now corresponds to time = 0. Accordingly, if you wanted to re-crop\nraw_selection from 11 to 12.5 seconds (instead of 10 to 12.5 as above)\nthen the subsequent call to :meth:~mne.io.Raw.crop should get tmin=1\n(not tmin=11), and leave tmax unspecified to keep everything from\ntmin up to the end of the object:",
"print(raw_selection.times.min(), raw_selection.times.max())\nraw_selection.crop(tmin=1)\nprint(raw_selection.times.min(), raw_selection.times.max())",
"Remember that sample times don't always align exactly with requested tmin\nor tmax values (due to sampling), which is why the max values of the\ncropped files don't exactly match the requested tmax (see\ntime-as-index for further details).\nIf you need to select discontinuous spans of a :class:~mne.io.Raw object —\nor combine two or more separate :class:~mne.io.Raw objects — you can use\nthe :meth:~mne.io.Raw.append method:",
"raw_selection1 = raw.copy().crop(tmin=30, tmax=30.1) # 0.1 seconds\nraw_selection2 = raw.copy().crop(tmin=40, tmax=41.1) # 1.1 seconds\nraw_selection3 = raw.copy().crop(tmin=50, tmax=51.3) # 1.3 seconds\nraw_selection1.append([raw_selection2, raw_selection3]) # 2.5 seconds total\nprint(raw_selection1.times.min(), raw_selection1.times.max())",
"<div class=\"alert alert-danger\"><h4>Warning</h4><p>Be careful when concatenating :class:`~mne.io.Raw` objects from different\n recordings, especially when saving: :meth:`~mne.io.Raw.append` only\n preserves the ``info`` attribute of the initial :class:`~mne.io.Raw`\n object (the one outside the :meth:`~mne.io.Raw.append` method call).</p></div>\n\nExtracting data from Raw objects\nSo far we've been looking at ways to modify a :class:~mne.io.Raw object.\nThis section shows how to extract the data from a :class:~mne.io.Raw object\ninto a :class:NumPy array <numpy.ndarray>, for analysis or plotting using\nfunctions outside of MNE-Python. To select portions of the data,\n:class:~mne.io.Raw objects can be indexed using square brackets. However,\nindexing :class:~mne.io.Raw works differently than indexing a :class:NumPy\narray <numpy.ndarray> in two ways:\n\n\nAlong with the requested sample value(s) MNE-Python also returns an array\n of times (in seconds) corresponding to the requested samples. The data\n array and the times array are returned together as elements of a tuple.\n\n\nThe data array will always be 2-dimensional even if you request only a\n single time sample or a single channel.\n\n\nExtracting data by index\nTo illustrate the above two points, let's select a couple seconds of data\nfrom the first channel:",
"sampling_freq = raw.info['sfreq']\nstart_stop_seconds = np.array([11, 13])\nstart_sample, stop_sample = (start_stop_seconds * sampling_freq).astype(int)\nchannel_index = 0\nraw_selection = raw[channel_index, start_sample:stop_sample]\nprint(raw_selection)",
"You can see that it contains 2 arrays. This combination of data and times\nmakes it easy to plot selections of raw data (although note that we're\ntransposing the data array so that each channel is a column instead of a row,\nto match what matplotlib expects when plotting 2-dimensional y against\n1-dimensional x):",
"x = raw_selection[1]\ny = raw_selection[0].T\nplt.plot(x, y)",
"Extracting channels by name\nThe :class:~mne.io.Raw object can also be indexed with the names of\nchannels instead of their index numbers. You can pass a single string to get\njust one channel, or a list of strings to select multiple channels. As with\ninteger indexing, this will return a tuple of (data_array, times_array)\nthat can be easily plotted. Since we're plotting 2 channels this time, we'll\nadd a vertical offset to one channel so it's not plotted right on top\nof the other one:",
"channel_names = ['MEG_0712', 'MEG_1022']\ntwo_meg_chans = raw[channel_names, start_sample:stop_sample]\ny_offset = np.array([5e-11, 0]) # just enough to separate the channel traces\nx = two_meg_chans[1]\ny = two_meg_chans[0].T + y_offset\nlines = plt.plot(x, y)\nplt.legend(lines, channel_names)",
"Extracting channels by type\nThere are several ways to select all channels of a given type from a\n:class:~mne.io.Raw object. The safest method is to use\n:func:mne.pick_types to obtain the integer indices of the channels you\nwant, then use those indices with the square-bracket indexing method shown\nabove. The :func:~mne.pick_types function uses the :class:~mne.Info\nattribute of the :class:~mne.io.Raw object to determine channel types, and\ntakes boolean or string parameters to indicate which type(s) to retain. The\nmeg parameter defaults to True, and all others default to False,\nso to get just the EEG channels, we pass eeg=True and meg=False:",
"eeg_channel_indices = mne.pick_types(raw.info, meg=False, eeg=True)\neeg_data, times = raw[eeg_channel_indices]\nprint(eeg_data.shape)",
"Some of the parameters of :func:mne.pick_types accept string arguments as\nwell as booleans. For example, the meg parameter can take values\n'mag', 'grad', 'planar1', or 'planar2' to select only\nmagnetometers, all gradiometers, or a specific type of gradiometer. See the\ndocstring of :meth:mne.pick_types for full details.\nThe Raw.get_data() method\nIf you only want the data (not the corresponding array of times),\n:class:~mne.io.Raw objects have a :meth:~mne.io.Raw.get_data method. Used\nwith no parameters specified, it will extract all data from all channels, in\na (n_channels, n_timepoints) :class:NumPy array <numpy.ndarray>:",
"data = raw.get_data()\nprint(data.shape)",
"If you want the array of times, :meth:~mne.io.Raw.get_data has an optional\nreturn_times parameter:",
"data, times = raw.get_data(return_times=True)\nprint(data.shape)\nprint(times.shape)",
"The :meth:~mne.io.Raw.get_data method can also be used to extract specific\nchannel(s) and sample ranges, via its picks, start, and stop\nparameters. The picks parameter accepts integer channel indices, channel\nnames, or channel types, and preserves the requested channel order given as\nits picks parameter.",
"first_channel_data = raw.get_data(picks=0)\neeg_and_eog_data = raw.get_data(picks=['eeg', 'eog'])\ntwo_meg_chans_data = raw.get_data(picks=['MEG_0712', 'MEG_1022'],\n start=1000, stop=2000)\n\nprint(first_channel_data.shape)\nprint(eeg_and_eog_data.shape)\nprint(two_meg_chans_data.shape)",
"Summary of ways to extract data from Raw objects\nThe following table summarizes the various ways of extracting data from a\n:class:~mne.io.Raw object.\n.. cssclass:: table-bordered\n.. rst-class:: midvalign\n+-------------------------------------+-------------------------+\n| Python code | Result |\n| | |\n| | |\n+=====================================+=========================+\n| raw.get_data() | :class:NumPy array |\n| | <numpy.ndarray> |\n| | (n_chans × n_samps) |\n+-------------------------------------+-------------------------+\n| raw[:] | :class:tuple of (data |\n+-------------------------------------+ (n_chans × n_samps), |\n| raw.get_data(return_times=True) | times (1 × n_samps)) |\n+-------------------------------------+-------------------------+\n| raw[0, 1000:2000] | |\n+-------------------------------------+ |\n| raw['MEG 0113', 1000:2000] | |\n+-------------------------------------+ |\n| raw.get_data(picks=0, | :class:`tuple` of |\n| start=1000, stop=2000, | (data (1 × 1000), |\n| return_times=True) | times (1 × 1000)) |\n+-------------------------------------+ |\n| raw.get_data(picks='MEG 0113', | |\n| start=1000, stop=2000, | |\n| return_times=True) | |\n+-------------------------------------+-------------------------+\n| raw[7:9, 1000:2000] | |\n+-------------------------------------+ |\n| raw[[2, 5], 1000:2000] | :class:tuple of |\n+-------------------------------------+ (data (2 × 1000), |\n| raw[['EEG 030', 'EOG 061'], | times (1 × 1000)) |\n| 1000:2000] | |\n+-------------------------------------+-------------------------+\nExporting and saving Raw objects\n:class:~mne.io.Raw objects have a built-in :meth:~mne.io.Raw.save method,\nwhich can be used to write a partially processed :class:~mne.io.Raw object\nto disk as a :file:.fif file, such that it can be re-loaded later with its\nvarious attributes intact (but see precision for an important\nnote about numerical precision when saving).\nThere are a few other ways to export just the sensor data from a\n:class:~mne.io.Raw object. One is to use indexing or the\n:meth:~mne.io.Raw.get_data method to extract the data, and use\n:func:numpy.save to save the data array:",
"data = raw.get_data()\nnp.save(file='my_data.npy', arr=data)",
"It is also possible to export the data to a :class:Pandas DataFrame\n<pandas.DataFrame> object, and use the saving methods that :mod:Pandas\n<pandas> affords. The :class:~mne.io.Raw object's\n:meth:~mne.io.Raw.to_data_frame method is similar to\n:meth:~mne.io.Raw.get_data in that it has a picks parameter for\nrestricting which channels are exported, and start and stop\nparameters for restricting the time domain. Note that, by default, times will\nbe converted to milliseconds, rounded to the nearest millisecond, and used as\nthe DataFrame index; see the scaling_time parameter in the documentation\nof :meth:~mne.io.Raw.to_data_frame for more details.",
"sampling_freq = raw.info['sfreq']\nstart_end_secs = np.array([10, 13])\nstart_sample, stop_sample = (start_end_secs * sampling_freq).astype(int)\ndf = raw.to_data_frame(picks=['eeg'], start=start_sample, stop=stop_sample)\n# then save using df.to_csv(...), df.to_hdf(...), etc\nprint(df.head())",
"<div class=\"alert alert-info\"><h4>Note</h4><p>When exporting data as a :class:`NumPy array <numpy.ndarray>` or\n :class:`Pandas DataFrame <pandas.DataFrame>`, be sure to properly account\n for the `unit of representation <units>` in your subsequent\n analyses.</p></div>\n\n.. LINKS\nhttps://docs.python.org/3/tutorial/datastructures.html#dictionaries"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/test-institute-1/cmip6/models/sandbox-1/toplevel.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Toplevel\nMIP Era: CMIP6\nInstitute: TEST-INSTITUTE-1\nSource ID: SANDBOX-1\nSub-Topics: Radiative Forcings. \nProperties: 85 (42 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:43\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-1', 'toplevel')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties\n2. Key Properties --> Flux Correction\n3. Key Properties --> Genealogy\n4. Key Properties --> Software Properties\n5. Key Properties --> Coupling\n6. Key Properties --> Tuning Applied\n7. Key Properties --> Conservation --> Heat\n8. Key Properties --> Conservation --> Fresh Water\n9. Key Properties --> Conservation --> Salt\n10. Key Properties --> Conservation --> Momentum\n11. Radiative Forcings\n12. Radiative Forcings --> Greenhouse Gases --> CO2\n13. Radiative Forcings --> Greenhouse Gases --> CH4\n14. Radiative Forcings --> Greenhouse Gases --> N2O\n15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3\n16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3\n17. Radiative Forcings --> Greenhouse Gases --> CFC\n18. Radiative Forcings --> Aerosols --> SO4\n19. Radiative Forcings --> Aerosols --> Black Carbon\n20. Radiative Forcings --> Aerosols --> Organic Carbon\n21. Radiative Forcings --> Aerosols --> Nitrate\n22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect\n23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect\n24. Radiative Forcings --> Aerosols --> Dust\n25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic\n26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic\n27. Radiative Forcings --> Aerosols --> Sea Salt\n28. Radiative Forcings --> Other --> Land Use\n29. Radiative Forcings --> Other --> Solar \n1. Key Properties\nKey properties of the model\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop level overview of coupled model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of coupled model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2. Key Properties --> Flux Correction\nFlux correction properties of the model\n2.1. Details\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how flux corrections are applied in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3. Key Properties --> Genealogy\nGenealogy and history of the model\n3.1. Year Released\nIs Required: TRUE Type: STRING Cardinality: 1.1\nYear the model was released",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.2. CMIP3 Parent\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCMIP3 parent if any",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.3. CMIP5 Parent\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCMIP5 parent if any",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.4. Previous Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nPreviously known as",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Key Properties --> Software Properties\nSoftware properties of model\n4.1. Repository\nIs Required: FALSE Type: STRING Cardinality: 0.1\nLocation of code for this component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. Code Version\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCode version identifier.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.3. Code Languages\nIs Required: FALSE Type: STRING Cardinality: 0.N\nCode language(s).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.4. Components Structure\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how model realms are structured into independent software components (coupled via a coupler) and internal software components.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.5. Coupler\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nOverarching coupling framework for model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OASIS\" \n# \"OASIS3-MCT\" \n# \"ESMF\" \n# \"NUOPC\" \n# \"Bespoke\" \n# \"Unknown\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"5. Key Properties --> Coupling\n**\n5.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of coupling in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Atmosphere Double Flux\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"5.3. Atmosphere Fluxes Calculation Grid\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nWhere are the air-sea fluxes calculated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Atmosphere grid\" \n# \"Ocean grid\" \n# \"Specific coupler grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"5.4. Atmosphere Relative Winds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nAre relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6. Key Properties --> Tuning Applied\nTuning methodology for model\n6.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.2. Global Mean Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList set of metrics/diagnostics of the global mean state used in tuning model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.3. Regional Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.4. Trend Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList observed trend metrics/diagnostics used in tuning model/component (such as 20th century)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.5. Energy Balance\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.6. Fresh Water Balance\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7. Key Properties --> Conservation --> Heat\nGlobal heat convervation properties of the model\n7.1. Global\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how heat is conserved globally",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. Atmos Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.3. Atmos Land Interface\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how heat is conserved at the atmosphere/land coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.4. Atmos Sea-ice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.5. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.6. Land Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the land/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Key Properties --> Conservation --> Fresh Water\nGlobal fresh water convervation properties of the model\n8.1. Global\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how fresh_water is conserved globally",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Atmos Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh_water is conserved at the atmosphere/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.3. Atmos Land Interface\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how fresh water is conserved at the atmosphere/land coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.4. Atmos Sea-ice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.5. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh water is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.6. Runoff\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how runoff is distributed and conserved",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.7. Iceberg Calving\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how iceberg calving is modeled and conserved",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.8. Endoreic Basins\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how endoreic basins (no ocean access) are treated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.9. Snow Accumulation\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how snow accumulation over land and over sea-ice is treated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9. Key Properties --> Conservation --> Salt\nGlobal salt convervation properties of the model\n9.1. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how salt is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Key Properties --> Conservation --> Momentum\nGlobal momentum convervation properties of the model\n10.1. Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how momentum is conserved in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11. Radiative Forcings\nRadiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)\n11.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of radiative forcings (GHG and aerosols) implementation in model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12. Radiative Forcings --> Greenhouse Gases --> CO2\nCarbon dioxide forcing\n12.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"13. Radiative Forcings --> Greenhouse Gases --> CH4\nMethane forcing\n13.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"14. Radiative Forcings --> Greenhouse Gases --> N2O\nNitrous oxide forcing\n14.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3\nTroposheric ozone forcing\n15.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3\nStratospheric ozone forcing\n16.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"17. Radiative Forcings --> Greenhouse Gases --> CFC\nOzone-depleting and non-ozone-depleting fluorinated gases forcing\n17.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.2. Equivalence Concentration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDetails of any equivalence concentrations used",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"Option 1\" \n# \"Option 2\" \n# \"Option 3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18. Radiative Forcings --> Aerosols --> SO4\nSO4 aerosol forcing\n18.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"19. Radiative Forcings --> Aerosols --> Black Carbon\nBlack carbon aerosol forcing\n19.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"19.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"20. Radiative Forcings --> Aerosols --> Organic Carbon\nOrganic carbon aerosol forcing\n20.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"21. Radiative Forcings --> Aerosols --> Nitrate\nNitrate forcing\n21.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"21.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect\nCloud albedo effect forcing (RFaci)\n22.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"22.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect\nCloud lifetime effect forcing (ERFaci)\n23.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"23.3. RFaci From Sulfate Only\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative forcing from aerosol cloud interactions from sulfate aerosol only?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"23.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"24. Radiative Forcings --> Aerosols --> Dust\nDust forcing\n24.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic\nTropospheric volcanic forcing\n25.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic\nStratospheric volcanic forcing\n26.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"27. Radiative Forcings --> Aerosols --> Sea Salt\nSea salt forcing\n27.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"28. Radiative Forcings --> Other --> Land Use\nLand use forcing\n28.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"28.2. Crop Change Only\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nLand use change represented via crop change only?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"28.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"29. Radiative Forcings --> Other --> Solar\nSolar forcing\n29.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow solar forcing is provided",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"irradiance\" \n# \"proton\" \n# \"electron\" \n# \"cosmic ray\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"29.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mromanello/SunoikisisDC_NER
|
Sunoikisis - Named Entity Extraction 2a-FM.ipynb
|
gpl-3.0
|
[
"Table of Contents\n<p><div class=\"lev1 toc-item\"><a href=\"#Summary-of-the-previous-lecture\" data-toc-modified-id=\"Summary-of-the-previous-lecture-1\"><span class=\"toc-item-num\">1 </span>Summary of the previous lecture</a></div><div class=\"lev2 toc-item\"><a href=\"#Libraries-and-import-statements\" data-toc-modified-id=\"Libraries-and-import-statements-11\"><span class=\"toc-item-num\">1.1 </span>Libraries and import statements</a></div><div class=\"lev2 toc-item\"><a href=\"#Data-types\" data-toc-modified-id=\"Data-types-12\"><span class=\"toc-item-num\">1.2 </span>Data types</a></div><div class=\"lev2 toc-item\"><a href=\"#Data-collections-(and-variables)\" data-toc-modified-id=\"Data-collections-(and-variables)-13\"><span class=\"toc-item-num\">1.3 </span>Data collections (and variables)</a></div><div class=\"lev2 toc-item\"><a href=\"#For-loops-and-if-statements\" data-toc-modified-id=\"For-loops-and-if-statements-14\"><span class=\"toc-item-num\">1.4 </span>For loops and if statements</a></div><div class=\"lev2 toc-item\"><a href=\"#Functions\" data-toc-modified-id=\"Functions-15\"><span class=\"toc-item-num\">1.5 </span>Functions</a></div><div class=\"lev2 toc-item\"><a href=\"#Handling-exceptions\" data-toc-modified-id=\"Handling-exceptions-16\"><span class=\"toc-item-num\">1.6 </span>Handling exceptions</a></div><div class=\"lev2 toc-item\"><a href=\"#A-bonus:-objects\" data-toc-modified-id=\"A-bonus:-objects-17\"><span class=\"toc-item-num\">1.7 </span>A bonus: objects</a></div><div class=\"lev1 toc-item\"><a href=\"#Regular-expressions\" data-toc-modified-id=\"Regular-expressions-2\"><span class=\"toc-item-num\">2 </span>Regular expressions</a></div><div class=\"lev1 toc-item\"><a href=\"#Extracting-dates-and-persons-from-texts\" data-toc-modified-id=\"Extracting-dates-and-persons-from-texts-3\"><span class=\"toc-item-num\">3 </span>Extracting dates and persons from texts</a></div><div class=\"lev2 toc-item\"><a href=\"#A-modern-text-in-English\" data-toc-modified-id=\"A-modern-text-in-English-31\"><span class=\"toc-item-num\">3.1 </span>A modern text in English</a></div><div class=\"lev2 toc-item\"><a href=\"#Part-Of-Speech-(POS)-and-Named-Entity-(NE)-Tagging\" data-toc-modified-id=\"Part-Of-Speech-(POS)-and-Named-Entity-(NE)-Tagging-32\"><span class=\"toc-item-num\">3.2 </span>Part-Of-Speech (POS) and Named-Entity (NE) Tagging</a></div><div class=\"lev2 toc-item\"><a href=\"#Chunking\" data-toc-modified-id=\"Chunking-33\"><span class=\"toc-item-num\">3.3 </span>Chunking</a></div><div class=\"lev2 toc-item\"><a href=\"#Export-to-IOB-notation\" data-toc-modified-id=\"Export-to-IOB-notation-34\"><span class=\"toc-item-num\">3.4 </span>Export to IOB notation</a></div><div class=\"lev1 toc-item\"><a href=\"#Regex-tagger\" data-toc-modified-id=\"Regex-tagger-4\"><span class=\"toc-item-num\">4 </span>Regex tagger</a></div><div class=\"lev1 toc-item\"><a href=\"#Exercise\" data-toc-modified-id=\"Exercise-5\"><span class=\"toc-item-num\">5 </span>Exercise</a></div>\n\n# Summary of the previous lecture \n\nIn our previous common session, we have introduced some fundamental notions of the Python language. Let's review some of them!\n\n## Libraries and import statements",
"from idai_journals import nlp as dainlp\nimport re\nfrom treetagger import TreeTagger\n\nfrom nltk.tag import StanfordNERTagger\nfrom nltk.chunk.util import tree2conlltags\nfrom nltk.chunk import RegexpParser\nfrom nltk.tree import Tree\nfrom nltk.tag import StanfordNERTaggelr",
"Data types",
"#interges and floats\n3 + 0.5\n#strings\n\"hello\"\n#Booleans\nTrue",
"Data collections (and variables)",
"#lists (can also contain multiple different data types)\nli = [\"Leipzig\", \"London\", \"Berlin\", \"Boston\", 4, False]\n#tuples (like lists, but immutable)\ntu = (\"tuple\", \"list\", \"dictionary\")\n#dictionaries (key : value pairs)\ndi = {\"key\" : \"value\", \"other-key\" : \"second value\"}",
"For loops and if statements",
"#home assignment: try to figure out what the if statement (line 2) does\nfor l in li:\n if isinstance(l, str):\n print(l)",
"Functions",
"def printMe(message):\n print(message)\n \nprintMe(\"Hello, world!\")\nprintMe(\"goodbye...\")",
"Handling exceptions",
"l = [\"zero\", \"one\", \"two\", \"three\"]\nl[10]\n\ntry:\n l[10]\nexcept IndexError:\n print(\"hey, your index is way too high!\")",
"A bonus: objects\nObjects might be a bit complicated, but they're very important to understand the code written by other people, while most of the programs that you'll find around is written using classes and objects. Oh, and the good news is... you've already met them!\nWhat are \"objects\" in a programming language like Python? Well, I like to think about them as... magical, animated tools!\n\nSay that you want to fetch water from a well (and maybe clean some of the mess...). Well, the object-oriented approach to this tak consists in creating one or more magic brooms that go and fetch the water for you! In order to create them, you have to conceptualize the broom in terms of:\n\nthe special features it has (e.g. number of buckets carried, speed...)\nthe actions that it can execute (fetch water, clean the floor)\n\nThat's it! In programming parlance, the features are called properties of the object; the actions are called methods.\nWhen you want to build your own magic brooms you first create a sort of prototype for each of them (which is called the class of magic brooms); then you can go on and create as many brooms as you want...\nHere's how to do it! (very simplified)",
"class MagicBroom():\n #this is called \"constructor\"; it's a special method\n def __init__(self, name, speed=20):\n self.name = name\n self.buckets = 2\n self.speed = speed\n\n def greet(self):\n print(\"Hello, my name is %s! What can I do for you?\" % self.name)\n \n def fetchWater(self):\n if self.speed >= 20:\n print(\"Yes, sir! I'll be back in a sec!\")\n else:\n print(\"Allright, but I am taking my time!\")\n\nmickey = MagicBroom(\"Mickey\")\nmickey.greet()\n\npeter = MagicBroom(\"Peter\", speed=5)\n\nmickey.speed\n\nmickey.fetchWater()\n\npeter.fetchWater()",
"Regular expressions\nHow would you find all the numbers in this sentence?\n\nThe set of integers consists of zero (0), the positive natural numbers (1, 2, 3, …), also called whole numbers or counting numbers,[1][2] and their additive inverses (the negative integers, i.e., −1, −2, −3, …). This is often denoted by a boldface Z (\"Z\") or blackboard bold Z {\\displaystyle \\mathbb {Z} } \\mathbb {Z} (Unicode U+2124 ℤ) standing for the German word Zahlen ([ˈtsaːlən], \"numbers\").[3][4] ℤ is a subset of the sets of rational and real numbers and, like the natural numbers, is countably infinite.",
"wiki = 'The set of integers consists of zero (0), the positive natural numbers (1, 2, 3, …), also called whole numbers or counting numbers,[1][2] and their additive inverses (the negative integers, i.e., −1, −2, −3, …). This is often denoted by a boldface Z (\"Z\") or blackboard bold Z {\\displaystyle \\mathbb {Z} } \\mathbb {Z} (Unicode U+2124 ℤ) standing for the German word Zahlen ([ˈtsaːlən], \"numbers\").[3][4] ℤ is a subset of the sets of rational and real numbers and, like the natural numbers, is countably infinite.'",
"We'd need a way to tell our machine not to look for specific strings, bur rather for classes of strings, i.e. using some sort of meta-character to catch a whole group of signs (e.g. the numbers); then we'd need to tell to optionally include/exclude some other signs, or to catch the numbers only if they're not preceeded/followed by other signs...\nThat's precisely what Regular Expressions do! They allow you to express a query as a string of metacharacters (or groups of metacharacters).\nHow do we use them in Python? First, we need to import a module from the Standard Library (i.e. you already have them with Python: no need to install external libraries)",
"import re",
"A cool feature of RegExp in Python is that you can create your complicated patterns as objects (and assign them to variables)! That's right, RegExp patterns are your magic brooms...",
"#here is one to catch all numbers\nreg = re.compile(r'[0-9]+') #or: r'\\d+'\ntype(reg)",
"The Pattern object has a number of interesting methods to search and replace the pattern. Generally, you use them with the text that must be searched as an argument. For instance, findall returns all matches as a list",
"reg.findall(wiki)",
"Kind of a sloppy job we did! The negative numbers are not captured as negative; the footnote reference (e.g. [1], [4]) are also captured and we don't want them... We can do better. Let's improve our pattern so that we include the '-' signs (if present) and we get rid of the footnotes",
"reg = re.compile(r'(?<!\\[)−?\\d+(?!\\])') # the 'r' is there to make sure that we don't have to \"escape the escape\" sing (\\)\nreg.findall(wiki)",
"Now it's time to go back to our task of (Named) Entity recognition and extraction task. But we're going to use RegExp patterns and syntax quite a few times now...\nExtracting dates and persons from texts\nAs Matteo said last time, the concept of \"named entity\" is domain- and task- specific. While a person's or a place's name will more or less always fall under the definition, in some contexts of information extraction people might be interested in other kinds of real-life \"entities\", such as time references (months, days, dates) or museum objects, which are not relevant to others.\nIn this exercise, we are going to expand on what Matteo did last time with proper names in Latin and look at two specific classes of \"entities\" mentioned in a modern scientific text about ancient history: dates and persons.\nA modern text in English\nFirst, let's grab a text.\nWe will be working with an English article on Roman history. The article is: Frederik Juliaan Vervaet, The Praetorian Proconsuls of the Roman Republic (211–52 BCE). A Constitutional Survey, Chiron 42(2012): 45-96.\nLet's start by loading the text and inspect the first 10.000 characters (we'll be working with just the first 10k words)",
"with open(\"data/txt/article446_10k.txt\") as f:\n txt = f.read()\n\ntxt[:1000]",
"Part-Of-Speech (POS) and Named-Entity (NE) Tagging\nMost of the time POS tagging is the precondition before you can perform any other advanced operation on a text\nAs we did with Matteo last time, by \"tagging\" we mean the coupling of each word with a tag that describe some property of the word itself. Part-of-speech tags define what word class (e.g. \"verb\", or \"proper noun\") a text token belongs to.\nThere are several tagset used for each language, and several software (pos taggers) who can tag your text automatically. One of the most used is TreeTagger, which has pretrained classifiers for many languages.\nLet's run it from Python, using one of the few \"wrappers\" available",
"#first we load the library\nfrom treetagger import TreeTagger\n\n#That's right! we start by creating a Tagger \"magic broom\" (a Tagger object)\ntt = TreeTagger(language=\"english\")\n\n#then we tag our text\ntagged = tt.tag(txt)\n\ntagged[:20]",
"Named Entity Recognition (using a tool like the Stanford NER that we saw in our last lecture) is also a way of tagging the text, this time using information not on the word class but on a different level of classification (place, person, organization or none of the above).\nLet's do this too",
"#first, we define the path to the English classifier for Stanford NER\nenglish_classifier = 'english.all.3class.distsim.crf.ser.gz'\ntwords = [w[0] for w in tagged]\n\n#then... guess what? Yes, we create a NER-tagger Magic Broom ;-)\nfrom nltk.tag import StanfordNERTagger\n\nner_tagger = StanfordNERTagger(english_classifier)\nners = ner_tagger.tag(twords)\n\n#not very pretty...\nners[:20]",
"Chunking\nAs we saw, when we analyze a text we proceed word by word (more exactly: token by token). However, Named Entities (now including dates) often span over more than one token. The task of sub-dividing a section of text into phrases and/or meaningful constituents (which may include 1 or more text tokens) is called chunking\n\nIn the image above, the tokens are [We, saw, the, yellow, dog]. Two Noun Phrases (NP) can be chunked:\n* \"we\" (1 token)\n* \"the yellow dog\" (3 tokens)\nThe IOB notation that Matteo introduced last time is a popular way to store the information about chunks in a word-by-word format. In the case of \"the yellow dog\", we will have:\n* saw = not in a chunk --> O\n* the = beginning of the chunk --> B-NP\n* yellow = internal part of the chunk --> I-NP\n* dog = internal part of the chunk --> I-NP\nThe easiest method for chunking a sentence in Python is to use the information in the Tag and a regexp syntax.\nFor example, if we have:\nin O\nNew LOCATION\nYork LOCATION\nCity LOCATION\nWe easily see that the 3 tokens tagged as LOCATION go together. We may thus write a grammar rule that chunks the LOC together:\nLOC:\n {<LOCATION><LOCATION>*}\nWhich means group in a chunk named LOC every token tagged as LOCATION, including any token tagged as LOCATION that might optionally come after.\nAnd the same goes also for PERSONS and ORGANIZATIONS. We may even use RegExp syntax to be more tollerant and make room for annotation errors, in case e.g. the two tokens Geore Washington are wrongly tagged as PERSON and LOCATION.\nHere's how I'd do it (it's not perfect at all but it should work in most cases)...",
"from nltk.chunk import RegexpParser\n\nenglish_chunker = RegexpParser(r'''\nLOC:\n {<LOCATION><(PERSON|LOCATION|MISC|ORGANIZATION)>*}\n''')",
"Let's see it in action with the first few words",
"tree = english_chunker.parse(ners[:20])\nprint(tree)",
"Well... OK, \"Roman Republic\" is not a location, but at least the chunking is exactly what we wanted to have, right?\nExport to IOB notation\nOK, but now how do we convert this to the IOB notation?\nLuckily, there's a ready-made function in a module from the NLTK library! Let's load and use it\n(just in case, there is also a function that does the reverse: from IOB to tree)",
"from nltk.chunk.util import tree2conlltags\n\niobs = tree2conlltags(tree)\n\niobs",
"Regex tagger\nNow, to go back to our original task, how do we use all this to annotate the dates and export them to IOB?\nDates are often just numbers (e.g. \"2017\"); sometimes they come in more complex formats like: \"14 September 2017\" or \"14-09-2017\". \nOne very simple solutions to find them and annotate them with a chunking notation might be to tag the tokens of our text with a very simple custom tagset that we design for dates. We assign \"O\" to all tokens, save the numbers (that we tag \"CD\") and some selected time formats or expressions, like the months of the year or the sequence number-number. We use the tag \"Date\" for them.\nIn order to do this, we need:\n\nregular expression syntax\na tagger that works with RegExp patterns\n\nA module of NLTK provides with exactly that tagger that can work with RegExp syntax",
"from nltk.tag import RegexpTagger\n\n#here is our list of patterns\npatterns = [\n (r'\\d+$', 'CD'),\n (r'\\d+[-–]\\d+$', \"Date\"),\n (r'\\d{1,2}[-\\.\\/]\\d{1,2}[-\\.\\/]\\d{2,4}', \"Date\"),\n (r'January|February|March|April|May|June|July|August|September|October|November|December', \"Date\"),\n (r'\\d{4}$', \"Date\"),\n (r'BCE|BC|AD', \"Date\"),\n (r'.*', \"O\")\n]\n\n#Our RegexpTagger magic broom! We initialize it with our pattern list\ntagger = RegexpTagger(patterns)\n\n#let's test it with a trivial example\ntagger.tag(\"I was born on September 14 , or 14-09\".split(\" \"))",
"Now let's see it in action on the real stuff",
"reg_tag = tagger.tag(twords)\n\nreg_tag[:50]",
"Now we just need to chunk it and export it to IOB. Then we are ready to evaluate the manual annotation...\nFirst, we have to define a chunker",
"date_chunker = RegexpParser(r'''\nDATE:\n {<CD>*<Date><Date|CD>*}\nDATE:\n {<CD>+}\n''')\n\nt = date_chunker.parse(reg_tag)\n\n#we use that function to make sure that the tree is not too complex to be converted\nflat = dainlp.flatten_tree(t)\n\niob_list = tree2conlltags(flat)\n\niob_list[:50]\n\n#then we can write it on an output file\nwith open(\"data/iob/article_446_date_aut.iob\", \"w\") as out:\n for i in iob_list:\n out.write(\"\\t\".join(i)+\"\\n\")",
"Exercise\nIn the practical exercise, you are requested to extract the person names from the same article that we used for dates. You will annotate them using the Stanford NER with the pre-trained classifier for English that come with the software; extract the Person chunks; evaluate the results against a golden standard.\nHere is a summary of the steps that you will have to execute in order to solve the exercise:\n\nload the file: data/txt/article446_10k.txt and read its content\nannotate the Named Entities using Stanford NER\ndefine an appropriate chunker for Persons\nchunk the extracted Named Entities\nconvert the chunked Tree into IOB format\nevaluate the IOB annotation using the appropriate functions\nuse the file: data/iob/article_446_person_GOLD.iob as gold standard\n\n\nreport the final evaluation metrics (precision, recall, F-score)",
"#just remember that the path to the English pre-trained classifier for Stanfor NER is\nenglish_classifier = 'english.all.3class.distsim.crf.ser.gz'",
"For the evaluation of the accuracy of your classifier, you can adapt the following lines of code:\n```python\nfrom sklearn.metrics import precision_recall_fscore_support\nprecision, recall, fscore, support = precision_recall_fscore_support(gold_labels\n , auto_labels\n , average=\"micro\"\n , labels=[\"B-DATE\",\"I-DATE\"])\nprint(\"Precision: {0:.2f}\".format(precision))\nprint(\"Recall: {0:.2f}\".format(recall))\nprint(\"F1-score: {0:.2f}\".format(fscore))\n```\nThings you'll need to change/provide:\n- list of positive labels (variable labels)\n- gold_labels: a list with the correct labels\n- auto_labels: a similar list with the labels output by your classifier.\nNB: make sure that gold_labels and auto_labels are of the same lenght, i.e. that both labels at position n in both lists refer to the same token."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
julienchastang/unidata-python-workshop
|
notebooks/Primer/Numpy and Matplotlib Basics.ipynb
|
mit
|
[
"<div style=\"width:1000 px\">\n\n<div style=\"float:right; width:98 px; height:98px;\">\n<img src=\"https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png\" alt=\"Unidata Logo\" style=\"height: 98px;\">\n</div>\n\n<h1>Primer</h1>\n<h3>Unidata Python Workshop</h3>\n\n<div style=\"clear:both\"></div>\n</div>\n\n<hr style=\"height:2px;\">\n\n<div style=\"float:right; width:250 px\"><img src=\"http://www.contribute.geeksforgeeks.org/wp-content/uploads/numpy-logo1.jpg\" alt=\"NumPy Logo\" style=\"height: 250px;\"></div>\n\nOverview:\n\nTeaching: 20 minutes\nExercises: 10 minutes\n\nQuestions\n\nWhat are arrays?\nHow can arrays be manipulated effectively in Python?\n\nObjectives\n\nCreate an array of ‘data’.\nPerform basic calculations on this data using python math functions.\nSlice and index the array\n\nNumPy is the fundamental package for scientific computing with Python. It contains among other things:\n- a powerful N-dimensional array object\n- sophisticated (broadcasting) functions\n- useful linear algebra, Fourier transform, and random number capabilities\nThe NumPy array object is the common interface for working with typed arrays of data across a wide-variety of scientific Python packages. NumPy also features a C-API, which enables interfacing existing Fortran/C/C++ libraries with Python and NumPy.\nCreate an array of 'data'\nThe NumPy array represents a contiguous block of memory, holding entries of a given type (and hence fixed size). The entries are laid out in memory according to the shape, or list of dimension sizes.",
"# Convention for import to get shortened namespace\nimport numpy as np\n\n# Create a simple array from a list of integers\na = np.array([1, 2, 3])\na\n\n# See how many dimensions the array has\na.ndim\n\n# Print out the shape attribute\na.shape\n\n# Print out the data type attribute\na.dtype\n\n# This time use a nested list of floats\na = np.array([[1., 2., 3., 4., 5.]])\na\n\n# See how many dimensions the array has\na.ndim\n\n# Print out the shape attribute\na.shape\n\n# Print out the data type attribute\na.dtype",
"NumPy also provides helper functions for generating arrays of data to save you typing for regularly spaced data. \n\narange(start, stop, interval) creates a range of values in the interval [start,stop) with step spacing.\nlinspace(start, stop, num) creates a range of num evenly spaced values over the range [start,stop].\n\narange",
"a = np.arange(5)\nprint(a)\n\na = np.arange(3, 11)\nprint(a)\n\na = np.arange(1, 10, 2)\nprint(a)",
"linspace",
"b = np.linspace(5, 15, 5)\nprint(b)\n\nb = np.linspace(2.5, 10.25, 11)\nprint(b)",
"Perform basic calculations with Python\nBasic math\nIn core Python, that is without NumPy, creating sequences of values and adding them together requires writing a lot of manual loops, just like one would do in C/C++:",
"a = range(5, 10)\nb = [3 + i * 1.5/4 for i in range(5)]\n\nresult = []\nfor x, y in zip(a, b):\n result.append(x + y)\nprint(result)",
"That is very verbose and not very intuitive. Using NumPy this becomes:",
"a = np.arange(5, 10)\nb = np.linspace(3, 4.5, 5)\n\na + b",
"The four major mathematical operations operate in the same way. They perform an element-by-element calculation of the two arrays. The two must be the same shape though!",
"a * b",
"Constants\nNumPy proves us access to some useful constants as well - remember you should never be typing these in manually! Other libraries such as SciPy and MetPy have their own set of constants that are more domain specific.",
"np.pi\n\nnp.e\n\n# This makes working with radians effortless!\nt = np.arange(0, 2 * np.pi + np.pi / 4, np.pi / 4)\nt",
"Array math functions\nNumPy also has math functions that can operate on arrays. Similar to the math operations, these greatly simplify and speed up these operations. Be sure to checkout the listing of mathematical functions in the NumPy documentation.",
"# Calculate the sine function\nsin_t = np.sin(t)\nprint(sin_t)\n\n# Round to three decimal places\nprint(np.round(sin_t, 3))\n\n# Calculate the cosine function\ncos_t = np.cos(t)\nprint(cos_t)\n\n# Convert radians to degrees\ndegrees = np.rad2deg(t)\nprint(degrees)\n\n# Integrate the sine function with the trapezoidal rule\nsine_integral = np.trapz(sin_t, t)\nprint(np.round(sine_integral, 3))\n\n# Sum the values of the cosine\ncos_sum = np.sum(cos_t)\nprint(cos_sum)\n\n# Calculate the cumulative sum of the cosine\ncos_csum = np.cumsum(cos_t)\nprint(cos_csum)",
"Index and slice arrays\nIndexing is how we pull individual data items out of an array. Slicing extends this process to pulling out a regular set of the items.",
"# Convention for import to get shortened namespace\nimport numpy as np\n\n# Create an array for testing\na = np.arange(12).reshape(3, 4)\n\na",
"Indexing in Python is 0-based, so the command below looks for the 2nd item along the first dimension (row) and the 3rd along the second dimension (column).",
"a[1, 2]",
"Can also just index on one dimension",
"a[2]",
"Negative indices are also allowed, which permit indexing relative to the end of the array.",
"a[0, -1]",
"Slicing syntax is written as start:stop[:step], where all numbers are optional.\n- defaults: \n - start = 0\n - end = len(dim)\n - step = 1\n- The second colon is also optional if no step is used.\nIt should be noted that end represents one past the last item; one can also think of it as a half open interval: [start, end)",
"# Get the 2nd and 3rd rows\na[1:3]\n\n# All rows and 3rd column\na[:, 2]\n\n# ... can be used to replace one or more full slices\na[..., 2]\n\n# Slice every other row\na[::2]\n\n# Slice out every other column\na[:, ::2]\n\n# Slice every other item along each dimension -- how would we do this\n",
"Plotting with Matplotlib\nMatplotlib is a python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms.\nThe first step is to set up our notebook environment so that matplotlib plots appear inline as images:",
"%matplotlib inline",
"Next we import the matplotlib library's pyplot interface. This is a MATLAB-like interface that makes generating plots relatively simple. To shorten this long name, we import it as plt to keep things short but clear.",
"import matplotlib.pyplot as plt",
"Now we generate some data to use while experimenting with plotting:",
"times = np.array([ 93., 96., 99., 102., 105., 108., 111., 114., 117.,\n 120., 123., 126., 129., 132., 135., 138., 141., 144.,\n 147., 150., 153., 156., 159., 162.])\ntemps = np.array([310.7, 308.0, 296.4, 289.5, 288.5, 287.1, 301.1, 308.3,\n 311.5, 305.1, 295.6, 292.4, 290.4, 289.1, 299.4, 307.9,\n 316.6, 293.9, 291.2, 289.8, 287.1, 285.8, 303.3, 310.])",
"Now we come to two quick lines to create a plot. Matplotlib has two core objects: the Figure and the Axes. The Axes is an individual plot with an x-axis, a y-axis, labels, etc; it has all of the various plotting methods we use. A Figure holds one or more Axes on which we draw.\nBelow the first line asks for a Figure 10 inches by 6 inches; matplotlib takes care of creating an Axes on it for us. After that, we call plot, with times as the data along the x-axis (independant values) and temps as the data along the y-axis (the dependant values).",
"# Create a figure and an axes\nfig, ax = plt.subplots(figsize=(10, 6))\n\n# Plot times as x-variable and temperatures as y-variable\nax.plot(times, temps)",
"From there, we can do things like ask the axis to add labels for x and y:",
"# Add some labels to the plot\nax.set_xlabel('Time')\nax.set_ylabel('Temperature')\n\n# Prompt the notebook to re-display the figure after we modify it\nfig",
"We can also add a title to the plot:",
"ax.set_title('GFS Temperature Forecast', fontdict={'size':16})\n\nfig",
"Of course, we can do so much more...",
"# Set up more temperature data\ntemps_1000 = np.array([316.0, 316.3, 308.9, 304.0, 302.0, 300.8, 306.2, 309.8,\n 313.5, 313.3, 308.3, 304.9, 301.0, 299.2, 302.6, 309.0,\n 311.8, 304.7, 304.6, 301.8, 300.6, 299.9, 306.3, 311.3])",
"Here we call plot more than once to plot multiple series of temperature on the same plot; when plotting we pass label to plot to facilitate automatic creation. This is added with the legend call. We also add gridlines to the plot using the grid() call.",
"fig, ax = plt.subplots(figsize=(10, 6))\n\n# Plot two series of data\n# The label argument is used when generating a legend.\nax.plot(times, temps, label='Temperature (surface)')\nax.plot(times, temps_1000, label='Temperature (1000 mb)')\n\n# Add labels and title\nax.set_xlabel('Time')\nax.set_ylabel('Temperature')\nax.set_title('Temperature Forecast')\n\n# Add gridlines\nax.grid(True)\n\n# Add a legend to the upper left corner of the plot\nax.legend(loc='upper left')",
"We're not restricted to the default look of the plots, but rather we can override style attributes, such as linestyle and color. color can accept a wide array of options for color, such as red or blue or HTML color codes. Here we use some different shades of red taken from the Tableau color set in matplotlib, by using tab:red for color.",
"fig, ax = plt.subplots(figsize=(10, 6))\n\n# Specify how our lines should look\nax.plot(times, temps, color='tab:red', label='Temperature (surface)')\nax.plot(times, temps_1000, color='tab:red', linestyle='--',\n label='Temperature (isobaric level)')\n\n# Same as above\nax.set_xlabel('Time')\nax.set_ylabel('Temperature')\nax.set_title('Temperature Forecast')\nax.grid(True)\nax.legend(loc='upper left')",
"Resources\nThe goal of this tutorial is to provide an overview of the use of the NumPy library. It tries to hit all of the important parts, but it is by no means comprehensive. For more information, try looking at the:\n- Tentative NumPy Tutorial\n- NumPy User Guide\n- Introduction to NumPy from SAM\n- Matplotlib Documentation\n- Matplotlib plot documentation"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jiarong/SSUsearch
|
notebooks-pc-linux/ssu-search-Copy3.ipynb
|
bsd-3-clause
|
[
"Set up working directory",
"cd ~/Desktop/SSUsearch/\n\nmkdir -p ./workdir\n\n#check seqfile files to process in data directory (make sure you still remember the data directory)\n!ls ./data/test/data",
"README\nThis part of pipeline search for the SSU rRNA gene fragments, classify them, and extract reads aligned specific region. It is also heavy lifting part of the whole pipeline (more cpu will help).\nThis part works with one seqfile a time. You just need to change the \"Seqfile\" and maybe other parameters in the two cells bellow.\nTo run commands, click \"Cell\" then \"Run All\". After it finishes, you will see \"*** pipeline runs successsfully :)\" at bottom of this pape.\nIf your computer has many processors, there are two ways to make use of the resource:\n\n\nSet \"Cpu\" higher number.\n\n\nmake more copies of this notebook (click \"File\" then \"Make a copy\" in menu bar), so you can run the step on multiple files at the same time.\n\n\n(Again we assume the \"Seqfile\" is quality trimmed.)\nHere we will process one file at a time; set the \"Seqfile\" variable to the seqfile name to be be processed\nFirst part of seqfile basename (separated by \".\") will be the label of this sample, so named it properly.\ne.g. for \"/usr/local/notebooks/data/test/data/1c.fa\", \"1c\" will the label of this sample.",
"Seqfile='./data/test/data/2c.fa'",
"Other parameters to set",
"Cpu='1' # number of maxixum threads for search and alignment\nHmm='./data/SSUsearch_db/Hmm.ssu.hmm' # hmm model for ssu\nGene='ssu'\nScript_dir='./scripts'\nGene_model_org='./data/SSUsearch_db/Gene_model_org.16s_ecoli_J01695.fasta'\nAli_template='./data/SSUsearch_db/Ali_template.silva_ssu.fasta'\n\nStart='577' #pick regions for de novo clustering\nEnd='727'\nLen_cutoff='100' # min length for reads picked for the region\n\nGene_tax='./data/SSUsearch_db/Gene_tax.silva_taxa_family.tax' # silva 108 ref\nGene_db='./data/SSUsearch_db/Gene_db.silva_108_rep_set.fasta'\n\nGene_tax_cc='./data/SSUsearch_db/Gene_tax_cc.greengene_97_otus.tax' # greengene 2012.10 ref for copy correction\nGene_db_cc='./data/SSUsearch_db/Gene_db_cc.greengene_97_otus.fasta'\n\n# first part of file basename will the label of this sample\nimport os\nFilename=os.path.basename(Seqfile)\nTag=Filename.split('.')[0]\n\nimport os\n\nNew_path = '{}:{}'.format('~/Desktop/SSUsearch/external_tools/bin/', os.environ['PATH'])\nHmm=os.path.abspath(Hmm)\nSeqfile=os.path.abspath(Seqfile)\nScript_dir=os.path.abspath(Script_dir)\nGene_model_org=os.path.abspath(Gene_model_org)\nAli_template=os.path.abspath(Ali_template)\nGene_tax=os.path.abspath(Gene_tax)\nGene_db=os.path.abspath(Gene_db)\nGene_tax_cc=os.path.abspath(Gene_tax_cc)\nGene_db_cc=os.path.abspath(Gene_db_cc)\n\nos.environ.update(\n {'PATH':New_path,\n 'Cpu':Cpu, \n 'Hmm':os.path.abspath(Hmm), \n 'Gene':Gene, \n 'Seqfile':os.path.abspath(Seqfile), \n 'Filename':Filename, \n 'Tag':Tag, \n 'Script_dir':os.path.abspath(Script_dir), \n 'Gene_model_org':os.path.abspath(Gene_model_org), \n 'Ali_template':os.path.abspath(Ali_template), \n 'Start':Start, \n 'End':End,\n 'Len_cutoff':Len_cutoff,\n 'Gene_tax':os.path.abspath(Gene_tax), \n 'Gene_db':os.path.abspath(Gene_db), \n 'Gene_tax_cc':os.path.abspath(Gene_tax_cc), \n 'Gene_db_cc':os.path.abspath(Gene_db_cc)})\n\n!echo \"*** make sure: parameters are right\"\n!echo \"Seqfile: $Seqfile\\nCpu: $Cpu\\nFilename: $Filename\\nTag: $Tag\"\n\ncd workdir\n\nmkdir -p $Tag.ssu.out\n\n### start hmmsearch\n\n%%bash\necho \"*** hmmsearch starting\"\ntime hmmsearch --incE 10 --incdomE 10 --cpu $Cpu \\\n --domtblout $Tag.ssu.out/$Tag.qc.$Gene.hmmdomtblout \\\n -o /dev/null -A $Tag.ssu.out/$Tag.qc.$Gene.sto \\\n $Hmm $Seqfile\necho \"*** hmmsearch finished\"\n\n!python $Script_dir/get-seq-from-hmmout.py \\\n $Tag.ssu.out/$Tag.qc.$Gene.hmmdomtblout \\\n $Tag.ssu.out/$Tag.qc.$Gene.sto \\\n $Tag.ssu.out/$Tag.qc.$Gene",
"Pass hits to mothur aligner",
"%%bash\necho \"*** Starting mothur align\"\ncat $Gene_model_org $Tag.ssu.out/$Tag.qc.$Gene > $Tag.ssu.out/$Tag.qc.$Gene.RFadded\n\n# mothur does not allow tab between its flags, thus no indents here\ntime mothur \"#align.seqs(candidate=$Tag.ssu.out/$Tag.qc.$Gene.RFadded, template=$Ali_template, threshold=0.5, flip=t, processors=$Cpu)\"\n\nrm -f mothur.*.logfile",
"Get aligned seqs that have > 50% matched to references",
"!python $Script_dir/mothur-align-report-parser-cutoff.py \\\n $Tag.ssu.out/$Tag.qc.$Gene.align.report \\\n $Tag.ssu.out/$Tag.qc.$Gene.align \\\n $Tag.ssu.out/$Tag.qc.$Gene.align.filter \\\n 0.5\n \n\n!python $Script_dir/remove-gap.py $Tag.ssu.out/$Tag.qc.$Gene.align.filter $Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa",
"Search is done here (the computational intensive part). Hooray!\n\n\n\\$Tag.ssu.out/\\$Tag.qc.\\$Gene.align.filter: \n aligned SSU rRNA gene fragments\n\n\n\\$Tag.ssu.out/\\$Tag.qc.\\$Gene.align.filter.fa: \n unaligned SSU rRNA gene fragments\n\n\nExtract the reads mapped 150bp region in V4 (577-727 in E.coli SSU rRNA gene position) for unsupervised clustering",
"!python $Script_dir/region-cut.py $Tag.ssu.out/$Tag.qc.$Gene.align.filter $Start $End $Len_cutoff\n\n!mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.\"$Start\"to\"$End\".cut.lenscreen $Tag.ssu.out/$Tag.forclust",
"Classify SSU rRNA gene seqs using SILVA",
"%%bash\nrm -f $Tag.ssu.out/$Tag.qc.$Gene.align.filter.silva_taxa_family*.taxonomy\nmothur \"#classify.seqs(fasta=$Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa, template=$Gene_db, taxonomy=$Gene_tax, cutoff=50, processors=$Cpu)\"\nmv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.silva_taxa_family*.taxonomy \\\n $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy\n\n!python $Script_dir/count-taxon.py \\\n $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy \\\n $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy.count\n!rm -f mothur.*.logfile",
"Classify SSU rRNA gene seqs with Greengene for copy correction later",
"%%bash\nrm -f $Tag.ssu.out/$Tag.qc.$Gene.align.filter.greengene_97_otus*.taxonomy\nmothur \"#classify.seqs(fasta=$Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa, template=$Gene_db_cc, taxonomy=$Gene_tax_cc, cutoff=50, processors=$Cpu)\"\nmv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.greengene_97_otus*.taxonomy \\\n $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy\n\n!python $Script_dir/count-taxon.py \\\n $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy \\\n $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy.count\n!rm -f mothur.*.logfile\n\n# check the output directory\n!ls $Tag.ssu.out",
"This part of pipeline (working with one sequence file) finishes here. Next we will combine samples for community analysis (see unsupervised analysis).\nFollowing are files useful for community analysis:\n\n1c.577to727: aligned fasta file of seqs mapped to target region for de novo clustering\n1c.qc.ssu.align.filter: aligned fasta file of all SSU rRNA gene fragments\n1c.qc.ssu.align.filter.wang.gg.taxonomy: Greengene taxonomy (for copy correction)\n1c.qc.ssu.align.filter.wang.silva.taxonomy: SILVA taxonomy",
"!echo \"*** pipeline runs successsfully :)\""
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
h-mayorquin/time_series_basic
|
presentations/.ipynb_checkpoints/2015-august-checkpoint.ipynb
|
bsd-3-clause
|
[
"Nexa And Time Series\nSo this is a brief show on the work with time series so far. The first thing that we have to do is to import the classical libraries. We also do a little trick to work with the librarires in the directory above.\nMain Libraries",
"# Scientific Python libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mpld3\nimport seaborn as sn\nmpld3.enable_notebook()\nimport sys\nsys.path.append(\"../\")\n# Nexa in-house libraries\nfrom signals.time_series_class import MixAr\nfrom signals.aux_functions import sidekick\nfrom input.sensors import PerceptualSpace, Sensor\nfrom nexa.nexa import Nexa",
"Now we have a couple of imports here. I will explain what al libraries does:\nSignals\nThis is the module to put time series. In this case I am importing a class that allows us to build an autoregressive process (MixAr) that can be mix in space with a simpler series (sidekick) as Pawell suggested.\nInput\nThis module takes care of the input sides to Nexa, that is a group of sensors. Here I created some classes that allow easy organization and implementation of data with a time dimension. In particular the class Sensor represents a single receptive unit that reads a time series from the exterior whereas Perceptual Space allows us to deal with a group of them and their interactions.\nNexa\nFinally, Nexa. Building in Benjaminsson's previous work I implemented (in far more simpler terms, there is still of testing and optimization to be done) a Nexa framework. The Nexa object contains here a perceptual space which represents a -as stated before- a group of sensors with information on time. The Object contains all the operations that allow the creation of vector codes from the ground up:\n\nFormation of a Spatio Temporal Distance Matrix (STDM) that captures the cross-correlations of a percpetual space.\nClustering / Vector quantization in the vector space\nIndex creation. That is, utilities to transform the data from the whole preceptual space to the particular set of indexes of a cluster and the other way around.\nClustering / Vector quantization in the data / time space. \nCode creation. \n\nProgram Execution and Workflow\nSo first we declare and discuss the parameters and setup requiered for a run of Nexa. We declare the time resoultion of the system and the total amount of time that our system will be simulated. In a real data analysis task this will be determined from the domain of the problem but given that we are in the development, toy example phase we determine those quantites by ourselves.",
"dt = 0.1\nTmax = 100",
"Time series to analyze\nNow we input the necessary setup for our time series. We present the code here and explain it bellow together with a visualization of both of them.",
"# Let's get the sideckick function\namplitude = 1\nw1 = 1\nw2 = 5\nbeta = sidekick(w1, w2, dt, Tmax, amplitude)\n\n# Now we will get the AR proccess mixed with the sidekick\n\n# First we need the phi's vector\nphi0 = 0.0\nphi1 = -0.8\nphi2 = 0.3\n\nphi = np.array((phi0, phi1, phi2))\n\n# Now we need the initial conditions\nx0 = 1\nx1 = 1\nx2 = 0\n\ninitial_conditions = np.array((x0, x1, x2))\n\n# Second we construct the series with the mix\nA = MixAr(phi, dt=dt, Tmax=Tmax, beta=beta)\nA.initial_conditions(initial_conditions)\nmix_series = A.construct_series()\n# mix_series = beta\n\ntime = A.time",
"First we describe the sideckick function, it is specified by two frquencies and the amplitude. Under the hood it is simple a the mix of two sine waves with the given frequency. We visualize it bellow.",
"%matplotlib inline\nplt.plot(time, beta)\nplt.show()",
"Now we will visualiza the Auto Regresive process which is a little bit more complicated. In order to specify an autoregresive process we need as many initial conditions as the order of the process. In concrete our AR is:\n$$x(t) = \\phi_0 + x(t - 1) * \\phi_1 + x(t - 2) * \\phi_2 $$ \nIt is easy to imagine how to generalize this to any order. Now, the particularity that we introduce is to add also an spatial term to this equation.\n$$x(t) = \\phi_0 + x(t - 1) * \\phi_1 + x(t - 2) * \\phi_2 + \\beta(t)$$\nWhere beta is our sidekick function. \nOur AR class therefore takes in its constructor three initial conditions and the corresponding values of phi. We show the plot below and we see the characteristic plot of an AR process.",
"plt.plot(time, mix_series)",
"Nexa worflow\nNow we present here the nexa worflow but first we need to initialize a couple of parameters and the setup",
"# Here we will calculate correlations\nNlags = 100\nNspatial_clusters = 2 # Number of spatial clusters\nNtime_clusters = 2 # Number of time clusters\nNembedding = 3 # Dimension of the embedding space\n\n# We create the here perceptual space\naux_sensors = [Sensor(mix_series, dt), Sensor(beta, dt)]\nperceptual_space = PerceptualSpace(aux_sensors, Nlags)\n\n# Now the Nexa object\nnexa_object = Nexa(perceptual_space, Nlags, Nspatial_clusters,\n Ntime_clusters, Nembedding)",
"We execute the whole nexa workflow with a single routine",
"# Calculate all the quantities\nnexa_object.calculate_all()",
"I decided to implement the routine to calculate the code vectors separte however (discuss this!)",
"# Build the code vectors\ncode_vectors = nexa_object.build_code_vectors()",
"Visualization\nNow in order to discuss this with more detail and show how the whole process looks in at the graph level I present the plots.\nFirst we import all the required libraries.",
"from visualization.sensor_clustering import visualize_cluster_matrix\nfrom visualization.sensors import visualize_SLM\nfrom visualization.sensors import visualize_STDM_seaborn\nfrom visualization.time_cluster import visualize_time_cluster_matrix\nfrom visualization.code_vectors import visualize_code_vectors\n",
"Visualize SLM\nFirst we present the plot of the Sensor Lagged Matrix, which just represents the sensors in our system and all the possible lags until the klags quantity in order to show the overall structure of the time series",
"%matplotlib inline\nfig = visualize_SLM(nexa_object)\nplt.show(fig)",
"Visualize STDM (Spatio Temporal Distance Matrix)\nNow we get the usual correlation matrix between the data with the novelty that we also calculate the correlation between all the possible pairs of laggins and sensors.",
"%matplotlib qt\n# fig = visualize_STDM(nexa_object)\nfig = visualize_STDM_seaborn(nexa_object)\nplt.show(fig)",
"Visualize of Sensor Clusterings\nNow we show how the lagged sensors cluster",
"%matplotlib inline\nfig = visualize_cluster_matrix(nexa_object)",
"Visualize the time cluster\nThis one is a little bit more tricky. Here we take on of the centers (in this case the second center of the first cluster) and show how the center (code vector) of that and show how it looks. So here we have a center of the first cluster in other words.",
"%matplotlib inline\ncluster = 0\ntime_center = 1\nfig = visualize_time_cluster_matrix(nexa_object, cluster, time_center,\n cmap='coolwarm', inter='none',\n origin='upper', fontsize=16)\n\n",
"Visualize the Code Vectors\nHere we visualize the code vectors. We show as different cells the different cluster in the sensor space and as different colors the particular code vector that encode the signal at each particular moment in time.",
"%matplotlib inline\nfig = visualize_code_vectors(code_vectors)",
"Statistics of the Code Vectors\nNow we calculate the correlation between the two clusters. We expect them to have a very low correlation coefficient.",
"np.corrcoef(code_vectors, rowvar=0)",
"Discussion and Works To Do\n\nNew functions instead of an AR or autoregresive process (Anders suggested a square function with variable period)\nWhat to do, predict? identify structure? recover the generating patterns?\nCheck more statistics of the code vectors (non all-or-none competition), check distribution of the data in the clusters.\nDifferent clusternig algorithms using (Japanese guy paper's)\n\nNon-Academic Points to Discuss\n\nGot funding for the BrainDisc conference at Freiburg (http://www.bcf.uni-freiburg.de/events/conferences-workshops/20151001-phd-conference). Good point to know other students and interested in some of the talks.\nNIPS - Funding.\nJeannette or Alex, insurance.\n\nThings to do\n\nPlay with Phis\nPlay with other periodical functions\nPlay with spatial correlaiton that days exponentially -or with another time function-\nCreate lags with arbitrary distribution and distance instead of with linear one."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
robertoalotufo/ia898
|
master/gengaussian.ipynb
|
mit
|
[
"Demo Gaussian Generation\nIllustrate the generation of a d-dimensional Gaussian image\nDescription\nThe sequence below shows a technique to a d-dimensional Gaussian image,\nunderstanding the difficulties in computing an equation with vector and\nmatrix notation.\nOne dimensional case\nThe Gaussian function is a symmetric bell shaped function that is characterized by\ntwo parameters: mean and variance. The one-dimensional Gaussian function at point\n$x$ is given by the following equation, with mean $\\mu$ and variance \n$\\sigma^2$. The function is maximum at point $x=\\mu$ and it falls by the\nfactor $e^{-\\frac{1}{2}}$ (approx. 0.6) at point $x=\\sigma$ away from the mean.\nEquation\n$$ f(x) = \\frac{1}{\\sqrt{2 \\pi} \\sigma} exp\\left[ -\\frac{1}{2} \\frac{\\left(x - \\mu\\right)^2}{\\sigma^2} \\right] $$\nAs this function is scalar, it is possible to compute this function on N samples represented\nas a N x 1 vector ${\\mathbf x} = [x_0, x_1, x_2, \\ldots x_{N-1}]^\\mathrm{T}$:\n$$ f({\\mathbf x}) = \\frac{1}{\\sqrt{2 \\pi} \\sigma} exp\\left[ -\\frac{1}{2} \\frac{\\left({\\mathbf x} - \\mu\\right)^2}{\\sigma^2} \\right]$$",
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport sys,os\nia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')\nif ia898path not in sys.path:\n sys.path.append(ia898path)\nimport ia898.src as ia\n\n# First case: unidimensional\n# x: single value (single sample) or a row of values (many samples)\n# mu and sigma are scalar\ndef fun1(x, mu, sigma):\n return (1./(np.sqrt(2 * np.pi) * sigma)) * np.exp(-1./2 * ((x-mu)/ sigma)**2)\n\nprint('Computing the Gaussian function at a single point') \nex1 = \"fun1( 10, 10, 5)\"\nprint(ex1,\"=>\", eval(ex1))\n\nex2 = \"fun1( 15, 10, 5)\"\nprint(ex2,\"=>\", eval(ex2))",
"Computing the Gaussian function at many points, using the same code",
"ex3 = \"fun1( np.array([[10,15,20]]).T, 10, 5)\"\nprint(ex3,\"=>\\n\", eval(ex3))\n\nx = np.arange(-5,26).reshape(-1,1)\ny = fun1(x, 10, 5)\nplt.plot(x,y)",
"d-dimensional Case\nIf a sample point is a vector of dimension d: ${\\mathbf x} = [x_0, x_1, \\ldots x_{d-1}]^T$, \nthe d-dimensional Gaussian function is characterized by the mean \nvector: ${\\mathbf \\mu} = [\\mu_0, \\mu_1, \\ldots \\mu_{d-1}]^T$ and the symmetric square \ncovariance matrix:\n$$ \\Sigma_d = \\left( \n \\begin{array}{cccc} \n \\sigma_0^2 & \\sigma_0\\sigma_1 & \\ldots & \\sigma_0\\sigma_{d-1} \\\n \\sigma_1\\sigma_0 & \\sigma_1^2 & \\ldots & \\sigma_1\\sigma_{d-1} \\\n \\vdots & \\vdots & \\vdots & \\vdots \\\n \\sigma_{d-1}\\sigma_0 & \\sigma_{d-1}\\sigma_1 & \\ldots & \\sigma_{d-1}^2\n \\end{array}\n \\right) $$\nEquation\n$$ f({\\mathbf x}) = \\frac{1}{(2 \\pi)^{d/2}|\\Sigma|^{1/2}} exp\\left[ -\\frac{1}{2}\\left({\\mathbf x} - {\\mathbf \\mu} \\right)^\\mathrm{T}\\Sigma^{-1}\\left({\\mathbf x} - {\\mathbf \\mu} \\right)\\right] $$",
"# Second case: d-dimensional, single sample\n# x: single column vector (single sample with d characteristics)\n# mu: column vector, 1 x d\n# sigma: covariance matrix, square and symmetric, d x d\ndef funn(X, MU, COV):\n d = len(X)\n Xc = X - MU\n aux = np.linalg.inv(COV).dot(Xc)\n k = 1. * (Xc.T).dot(aux)\n return (1./((2 * np.pi)**(d/2.) * np.sqrt(np.linalg.det(COV)))) * np.exp(-1./2 * k)\n\nprint('\\ncomputing the Gaussian function at a single 3-D sample')\nX1 = np.array([[10],\n [5],\n [3]])\nMU = X1\nCOV = np.array([[10*10, 0, 0],\n [0, 5*5, 0],\n [0, 0, 3*3]])\nprint('X1=',X1)\nprint('MU=',MU)\nprint('COV=',COV)\nex4 = \"funn( X1, MU, COV)\"\nprint(ex4,\"=>\", eval(ex4))\n\nprint('\\nComputing the Gaussian function at two 3-D samples')\nprint('\\nNote that it does not work')\nX2 = 1. * X1/2\nX = np.hstack([X1,X2])\nprint('X=',X)\nex5 = \"funn( X, MU, COV)\"\nprint(ex5,\"=>\", eval(ex5))",
"Computing d-dimensional Gaussian function on n sample points directly\nThe exponent part of the d-dimensional equation is an inner product with the covariance\nmatrix in the center. When the data is arranged as a block matrix of n d-dimensional points,\nwe need to apply the inner product to each d-dimensional point. This is equivalent to\nuse only the diagonal results of the matrix product. More information can be seen at\nhttp://en.wikipedia.org/wiki/Matrix_multiplication Matrix Mulplication wikipedia page.\n.. equation:: latex\n :align: left\n{\\mathbf X_{n \\times d}} = [ {\\mathbf x_0}, {\\mathbf x_1}, \\ldots {\\mathbf x_{n-1}} ]\n\n.. equation:: latex\n :align: left\nf({\\mathbf X}) = \\frac{1}{(2 \\pi)^{d/2}|\\Sigma|^{1/2}} exp\\left[ diag ( -\\frac{1}{2} \\left({\\mathbf x} - {\\mathbf \\mu} \\right)^\\mathrm{T}\\Sigma^{-1}\\left({\\mathbf x} - {\\mathbf \\mu} \\right) ) \\right]\n\n.. code:: python\n# Third case: m n-dimensional computing\n# X: n column vectors (n samples with d characteristics)\n# MU: column vector, 1 x M\n# COV: covariance matrix, square and symmetric, d x d\ndef funm(X, MU, COV):\n d = len(MU)\n Xc = X - MU\n k = 1. * diagonal(dot(transpose(Xc), dot(inv(COV), Xc)))\n return (1./((2 * pi)**(d/2.) * sqrt(det(COV)))) * exp(-1./2 * k)\n\nprint '\\ncomputing the Gaussian function on two 3-D samples'\nX = array([[10, 5],\n [ 5, 2.5],\n [ 3, 1.5]])\nMU = transpose(array([[10, 5, 3]]))\nCOV = array([[10*10, 0, 0],\n [0, 5*5, 0],\n [0, 0, 3*3]])\nprint 'X=',X\nprint 'MU=',MU\nprint 'COV=',COV\nex6 = \"funm( X, MU, COV)\"\nprint ex6,\"=>\", eval(ex6)\n\n.. code:: python\n :show_code: yes\n :show_output: yes\n :show_images: yes\nfrom ia636 import ianormalize\n# Forth case: optimized m n-dimensional computing\n# X: n column vectors (n samples with d characteristics)\n# MU: column vector, 1 x M\n# COV: covariance matrix, square and symmetric, d x d\ndef funm1(X, MU, COV):\n d = len(MU)\n Xc = X - MU\n k = 1. * Xc * dot(inv(COV), Xc)\n k = sum(k,axis=0) #the sum is only applied to the rows\n return (1./((2 * pi)**(d/2.) * sqrt(det(COV)))) * exp(-1./2 * k)\n\nprint '\\ncomputing the Gaussian function on two 3-D samples'\nX = array([[10, 5],\n [ 5, 2.5],\n [ 3, 1.5]])\nMU = transpose(array([[10, 5, 3]]))\nCOV = array([[10*10, 0, 0],\n [0, 5*5, 0],\n [0, 0, 3*3]])\nprint 'X=',X\nprint 'MU=',MU\nprint 'COV=',COV\nex6 = \"funm1( X, MU, COV)\"\nprint ex6,\"=>\", eval(ex6)\n\ni,j = indices((50,100))\nx = vstack((ravel(i),ravel(j)))\nMU = transpose(array([[25, 40]]))\nCOV = array([[15*15, 0, ],\n [0, 10*10]])\ny = funm1(x, MU, COV).reshape((50,100))\nadshow(ianormalize(y).astype(uint8))\n\nSuggested Exercises\n\nCompute the 2 dimensional Gaussian image using the equation of a decomposible Gaussian\n below. You can use the meshgrid style programming where the image (coordinates and\n value) can be stored in an image like format.\n\n.. equation:: latex\nf(i,j) = \\frac{1}{2 \\pi \\sigma_i \\sigma_j} \n exp ( -\\frac{1}{2} \\left( \\frac{\\left(i - \\mu_i\\right)^2}{\\sigma_i^2} +\n \\frac{\\left(j - \\mu_j\\right)^2}{\\sigma_j^2} \\right) )\n\n\n\nShow that the previous 2 dimensional Gaussian image can be composed as an outer product \n of two one dimensional signals\n\n\nFind the equation of a 2 dimensional Gaussian image when the covariance matrix is \n of the form:\n\n\n.. equation:: latex\nCov(i,j) = \\left(\n \\begin{array}{cc}\n \\sigma_i^2 & \\sigma_i \\sigma_j \\\n \\sigma_i \\sigma_j & \\sigma_j^2\n \\end{array}\n \\right)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
zzsza/TIL
|
Tensorflow/Dataset.ipynb
|
mit
|
[
"import tensorflow as tf",
"참고 자료\n\n이찬우님 유튜브\n\n(1) 보편적 Case\n\nGenerator를 사용\npython api를 의존하기 때문에 병목이 있을 수 있음",
"def gen():\n for i in range(10):\n yield i\n\ndataset = tf.data.Dataset.from_generator(gen, tf.float32)\\\n .make_one_shot_iterator()\\\n .get_next()\n\nwith tf.Session() as sess:\n _data = sess.run(dataset)\n print(_data)\n\nwith tf.Session() as sess:\n for _ in range(10):\n _data = sess.run(dataset)\n print(_data)\n\n# End of sequence Error 발생\nwith tf.Session() as sess:\n for _ in range(12):\n _data = sess.run(dataset)\n print(_data)",
"generator로 label, feature까지 출력하고 싶다면",
"def gen():\n for i, j in zip(range(10, 20), range(10)):\n yield (i, j)\n\ndataset = tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))\\\n .make_one_shot_iterator()\\\n .get_next()\n\nwith tf.Session() as sess:\n for _ in range(10):\n _label, _feature = sess.run(dataset)\n print(_label, _feature)",
"Minibatch를 하고 싶다면\n\nshuffle한 후, batch 설정",
"def gen():\n for i, j in zip(range(10, 1010), range(1000)):\n yield (i, j)\n\ndataset = tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))\\\n .shuffle(7777)\\\n .batch(20)\\\n .make_one_shot_iterator()\\\n .get_next()\n\nwith tf.Session() as sess:\n for _ in range(10):\n _label, _feature = sess.run(dataset)\n print(_label, _feature)",
"(2) TextLineDataset\n\n병목을 해결 가능",
"dataset = tf.data.TextLineDataset(\"./test_data.csv\")\\\n .make_one_shot_iterator()\\\n .get_next()\n\nwith tf.Session() as sess:\n _data = sess.run(dataset)\n print(_data)",
"b'1,1,2,3,4,5,6,7,8,9' : decoding 필요",
"dataset = tf.data.TextLineDataset(\"./test_data.csv\")\\\n .make_one_shot_iterator()\\\n .get_next()\n\nlines = tf.decode_csv(dataset, record_defaults=[[0]]*10)\n\nfeature = tf.stack(lines[1:]) #, axis=1)\n\nlabel = lines[0]\n\nwith tf.Session() as sess:\n _fea, _lab = sess.run([feature, label])\n print(_lab, _fea)\n\ndataset = tf.data.TextLineDataset(\"./test_data.csv\")\\\n .batch(2)\\\n .repeat(999999)\\\n .make_one_shot_iterator()\\\n .get_next()\n\nlines = tf.decode_csv(dataset, record_defaults=[[0]]*10)\nfeature = tf.stack(lines[1:], axis=1)\nlabel = tf.expand_dims(lines[0], axis=-1)\n\nfeature = tf.cast(feature, tf.float32)\nlabel = tf.cast(label, tf.float32)\n# float형으로 정의해야 이상없이 연산이 됨\n\nwith tf.Session() as sess:\n _fea, _lab = sess.run([feature, label])\n for f, l in zip(_fea, _lab):\n print(f, l)",
"Modeling",
"layer1 = tf.layers.dense(feature, units=9, activation=tf.nn.relu)\nlayer2 = tf.layers.dense(layer1, units=9, activation=tf.nn.relu)\nlayer3 = tf.layers.dense(layer2, units=9, activation=tf.nn.relu)\nlayer4 = tf.layers.dense(layer3, units=9, activation=tf.nn.relu)\nout = tf.layers.dense(layer4, units=1)\n\nprint(\"label's shape {}\".format(label))\n# label's shape (?,) : [1, 2, 3, 4, 5, 6]\n# int면 계산이 안됨\n\nprint(\"out's shape {}\".format(out))\n# [[1], [2], [3], [4], [5], [6]]",
"loss, Optimizer 정의",
"loss = tf.losses.sigmoid_cross_entropy(label, out)",
"Shapes (?, 1) and (?,) are incompatible error\nshape를 맞춰주기 : tf.expand_dims 사용\n\n\nValue passed to parameter 'x' has DataType int32 not in list of allowed values error\nvalue의 type을 float32로 바꾸기 : tf.cast 사용\n\n\nAttempting to use uninitialized value accuracy/total error\naccuracy 관련 tf.local_variables_initializer() 실행",
"train_op = tf.train.GradientDescentOptimizer(1e-2).minimize(loss)\n\npred = tf.nn.sigmoid(out)\naccuracy = tf.metrics.accuracy(label, tf.round(pred))\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n for i in range(30):\n _, _loss, _acc = sess.run([train_op, loss, accuracy])\n print(\"step: {}, loss: {}, accuracy: {}\".format(i, _loss, _acc))",
"Accuracy\nTFRecord\n\nread, write 속도가 빠르게!"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
google-research/recsim
|
recsim/colab/RecSim_Overview.ipynb
|
apache-2.0
|
[
"Copyright 2019 The RecSim Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\nRunning RecSim\nIn this Colab we explore how to train and evaulate an agent within RecSim using the provided environments and clarify some basic concepts along the way. \nRecSim at a Glance\nRecSim is a configurable platform for simulating a recommendation system environment in which a recommender agent interacts with a corpus of documents (or recommendable items) and a set of users, in a natural but abstract fashion, to support the development of new recommendation algorithms.\nAt its core, a RecSim simulation consists of running the following event loop for some fixed number of sessions (episodes):\n\nfor episode in [1,...,number_of_episodes]:\n user = sample_user()\n recommended_slate = null\n while session_not_over:\n user_response = user_responds_to_recommendation(recommended_slate)\n available_documents = sample_documents_from_database()\n recommended_slate = agent_step(available_documents, user_response)\nThe document database (document model), user model, and recommender agent each have various internal components, and we will discuss how to design and implement them in later colabs (Developing an Environment, Developing an Agent). For now, we will see how to set up one of the ready-made environments that ship with RecSim in order to run a simulation.",
"# @title Install\n!pip install --upgrade --no-cache-dir recsim\n!pip install -q tf-nightly-2.0-preview\n# Load the TensorBoard notebook extension\n%load_ext tensorboard\n\n#@title Importing generics\nimport numpy as np\nimport tensorflow as tf",
"In RecSim, a user model and a document model are packaged together within an OpenAI Gym-style environment. In this tutorial, we will use the \"Interest Evolution\" environment used in Ie et al., as well as a full Slate-Q agent also described therein. Both come ready to use with RecSim. We import the environment from recsim.environments. Agents are found in recsim.agents. Finally, we need to import runner_lib from recsim.simulator, which executes the loop outlined above.",
"#@title Importing RecSim components \nfrom recsim.environments import interest_evolution\nfrom recsim.agents import full_slate_q_agent\nfrom recsim.simulator import runner_lib",
"Creating an Agent\nSimilarly to Dopamine, a RecSim experiment runner (simulator) consumes an environment creation function and an agent creation function. These functions are responsible for setting up the environment/agent based on external parameters. The interest evolution environment already comes with a creation function, so we will limit our attention to the agent.\nA create_agent function takes a tensorflow session, environment object, a training/eval flag and (optionally) a Tensorflow summary writer, which are passed to the agent for in-agent training statistics in Tensorboard (more on that below). In the case of full Slate-Q, we just need to extract the action and observation spaces from the environment and pass them to the agent constructor.",
"def create_agent(sess, environment, eval_mode, summary_writer=None):\n kwargs = {\n 'observation_space': environment.observation_space,\n 'action_space': environment.action_space,\n 'summary_writer': summary_writer,\n 'eval_mode': eval_mode,\n }\n return full_slate_q_agent.FullSlateQAgent(sess, **kwargs)",
"Training and Evaluating the Agent in a Simulation Loop\nBefore we run the agent, we need to set up a few environment parameters. These are the bare minimum:\n* slate_size sets the size of the set of elements presented to the user;\n* num_candidates specifies the number of documents present in the document database at any given time;\n* resample_documents specifies whether the set of candidates should be resampled between time steps according to the document distribution (more on this in later notebooks).\n* finally, we set the random seed.",
"seed = 0\nnp.random.seed(seed)\nenv_config = {\n 'num_candidates': 10,\n 'slate_size': 2,\n 'resample_documents': True,\n 'seed': seed,\n }",
"Once we've created a dictionary of these, we can run training, specifying additionally the number of training steps, number of iterations and a directory in which to checkpoint the agent.",
"tmp_base_dir = '/tmp/recsim/'\nrunner = runner_lib.TrainRunner(\n base_dir=tmp_base_dir,\n create_agent_fn=create_agent,\n env=interest_evolution.create_environment(env_config),\n episode_log_file=\"\",\n max_training_steps=50,\n num_iterations=10)\nrunner.run_experiment()",
"After training is finished, we can run a separate simulation to evaluate the agent's performance.",
" runner = runner_lib.EvalRunner(\n base_dir=tmp_base_dir,\n create_agent_fn=create_agent,\n env=interest_evolution.create_environment(env_config),\n max_eval_episodes=5,\n test_mode=True)\n runner.run_experiment()",
"The cumulative reward across the training episodes will be stored in base_dir/eval/. However, RecSim also exports a more detailed set of summaries, including environment specific ones, that can be visualized in a Tensorboard.",
"#@title Tensorboard\n%tensorboard --logdir=/tmp/recsim/\n",
"References\nSlateQ: A Tractable Decomposition for Reinforcement Learning with Recommendation Sets. IJCAI 2019: 2592-2599"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jkeung/yellowbrick
|
examples/examples.ipynb
|
apache-2.0
|
[
"%matplotlib inline",
"Yellowbrick Examples\nThs notebook is a sample of the examples that yellowbrick provides.",
"import os\nimport sys \n\n# Modify the path \nsys.path.append(\"..\")\n\nimport pandas as pd\nimport yellowbrick as yb \nimport matplotlib.pyplot as plt ",
"Anscombe's Quartet\nYellowbrick has learned Anscombe's lesson - which is why we believe that visual diagnostics are vital to machine learning.",
"g = yb.anscombe()",
"Load Datasets for Example Code\nYellowbrick has provided several datasets wrangled from the UCI Machine Learning Repository to present the following examples. If you haven't downloaded the data, you can do so by running:\n$ python download.py\nIn the same directory as the example notebook. Note that this will create a directory called data that contains subdirectories with the given data.",
"from download import download_all \n\n## The path to the test data sets\nFIXTURES = os.path.join(os.getcwd(), \"data\")\n\n## Dataset loading mechanisms\ndatasets = {\n \"credit\": os.path.join(FIXTURES, \"credit\", \"credit.csv\"),\n \"concrete\": os.path.join(FIXTURES, \"concrete\", \"concrete.csv\"),\n \"occupancy\": os.path.join(FIXTURES, \"occupancy\", \"occupancy.csv\"),\n}\n\n\ndef load_data(name, download=True):\n \"\"\"\n Loads and wrangles the passed in dataset by name.\n If download is specified, this method will download any missing files. \n \"\"\"\n \n # Get the path from the datasets \n path = datasets[name]\n \n # Check if the data exists, otherwise download or raise \n if not os.path.exists(path):\n if download:\n download_all() \n else:\n raise ValueError((\n \"'{}' dataset has not been downloaded, \"\n \"use the download.py module to fetch datasets\"\n ).format(name))\n \n \n # Return the data frame\n return pd.read_csv(path)",
"Feature Analysis\nFeature analysis visualizers are designed to visualize instances in data space in order to detect features or targets that might impact downstream fitting. Because ML operates on high-dimensional data sets (usually at least 35), the visualizers focus on aggregation, optimization, and other techniques to give overviews of the data. It is our intent that the steering process will allow the data scientist to zoom and filter and explore the relationships between their instances and between dimensions.\nAt the moment we have three feature analysis visualizers implemented:\n\nRank2D: rank pairs of features to detect covariance \nRadViz: plot data points along axes ordered around a circle to detect separability \nParallel Coordinates: plot instances as lines along vertical axes to detect clusters \n\nFeature analysis visualizers implement the Transformer API from Scikit-Learn, meaning they can be used as intermediate transform steps in a Pipeline (particularly a VisualPipeline). They are instantiated in the same way, and then fit and transform are called on them, which draws the instances correctly. Finally poof or show is called which displays the image.",
"# Feature Analysis Imports \n# NOTE that all these are available for import from the `yellowbrick.features` module \nfrom yellowbrick.features.rankd import Rank2D \nfrom yellowbrick.features.radviz import RadViz \nfrom yellowbrick.features.pcoords import ParallelCoordinates ",
"Rank2D\nRank1D and Rank2D evaluate single features or pairs of features using a variety of metrics that score the features on the scale [-1, 1] or [0, 1] allowing them to be ranked. A similar concept to SPLOMs, the scores are visualized on a lower-left triangle heatmap so that patterns between pairs of features can be easily discerned for downstream analysis.",
"# Load the classification data set\ndata = load_data('credit') \n\n# Specify the features of interest\nfeatures = [\n 'limit', 'sex', 'edu', 'married', 'age', 'apr_delay', 'may_delay',\n 'jun_delay', 'jul_delay', 'aug_delay', 'sep_delay', 'apr_bill', 'may_bill',\n 'jun_bill', 'jul_bill', 'aug_bill', 'sep_bill', 'apr_pay', 'may_pay', 'jun_pay',\n 'jul_pay', 'aug_pay', 'sep_pay',\n ]\n\n# Extract the numpy arrays from the data frame \nX = data[features].as_matrix()\ny = data.default.as_matrix()\n\n# Instantiate the visualizer with the Covariance ranking algorithm \nvisualizer = Rank2D(features=features, algorithm='covariance')\n\nvisualizer.fit(X, y) # Fit the data to the visualizer\nvisualizer.transform(X) # Transform the data\nvisualizer.poof() # Draw/show/poof the data\n\n# Instantiate the visualizer with the Pearson ranking algorithm \nvisualizer = Rank2D(features=features, algorithm='pearson')\n\nvisualizer.fit(X, y) # Fit the data to the visualizer\nvisualizer.transform(X) # Transform the data\nvisualizer.poof() # Draw/show/poof the data",
"RadViz\nRadViz is a multivariate data visualization algorithm that plots each feature dimension uniformely around the circumference of a circle then plots points on the interior of the circle such that the point normalizes its values on the axes from the center to each arc. This meachanism allows as many dimensions as will easily fit on a circle, greatly expanding the dimensionality of the visualization. \nData scientists use this method to dect separability between classes. E.g. is there an opportunity to learn from the feature set or is there just too much noise?",
"# Load the classification data set\ndata = load_data('occupancy') \n\n# Specify the features of interest and the classes of the target \nfeatures = [\"temperature\", \"relative humidity\", \"light\", \"C02\", \"humidity\"]\nclasses = ['unoccupied', 'occupied']\n\n# Extract the numpy arrays from the data frame \nX = data[features].as_matrix()\ny = data.occupancy.as_matrix()\n\n# Instantiate the visualizer\nvisualizer = visualizer = RadViz(classes=classes, features=features)\n\nvisualizer.fit(X, y) # Fit the data to the visualizer\nvisualizer.transform(X) # Transform the data\nvisualizer.poof() # Draw/show/poof the data",
"For regression, the RadViz visualizer should use a color sequence to display the target information, as opposed to discrete colors.\nParallel Coordinates\nParallel coordinates displays each feature as a vertical axis spaced evenly along the horizontal, and each instance as a line drawn between each individual axis. This allows many dimensions; in fact given infinite horizontal space (e.g. a scrollbar) an infinite number of dimensions can be displayed! \nData scientists use this method to detect clusters of instances that have similar classes, and to note features that have high varaince or different distributions.",
"# Load the classification data set\ndata = load_data('occupancy') \n\n# Specify the features of interest and the classes of the target \nfeatures = [\"temperature\", \"relative humidity\", \"light\", \"C02\", \"humidity\"]\nclasses = ['unoccupied', 'occupied']\n\n# Extract the numpy arrays from the data frame \nX = data[features].as_matrix()\ny = data.occupancy.as_matrix()\n\n# Instantiate the visualizer\nvisualizer = visualizer = ParallelCoordinates(classes=classes, features=features)\n\nvisualizer.fit(X, y) # Fit the data to the visualizer\nvisualizer.transform(X) # Transform the data\nvisualizer.poof() # Draw/show/poof the data",
"Regressor Evaluation\nRegression models attempt to predict a target in a continuous space. Regressor score visualizers display the instances in model space to better understand how the model is making predictions. We currently have implemented two regressor evaluations:\n\nResiduals Plot: plot the difference between the expected and actual values \nPrediction Error: plot expected vs. the actual values in model space \n\nEstimator score visualizers wrap Scikit-Learn estimators and expose the Estimator API such that they have fit(), predict(), and score() methods that call the appropriate estimator methods under the hood. Score visualizers can wrap an estimator and be passed in as the final step in a Pipeline or VisualPipeline.",
"# Regression Evaluation Imports \n\nfrom sklearn.linear_model import Ridge, Lasso \nfrom sklearn.cross_validation import train_test_split\n\nfrom yellowbrick.regressor import PredictionError, ResidualsPlot",
"Residuals Plot\nA residual plot shows the residuals on the vertical axis and the independent variable on the horizontal axis. If the points are randomly dispersed around the horizontal axis, a linear regression model is appropriate for the data; otherwise, a non-linear model is more appropriate.",
"# Load the data\ndf = load_data('concrete')\nfeature_names = ['cement', 'slag', 'ash', 'water', 'splast', 'coarse', 'fine', 'age']\ntarget_name = 'strength'\n\n# Get the X and y data from the DataFrame \nX = df[feature_names].as_matrix()\ny = df[target_name].as_matrix() \n\n# Create the train and test data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Instantiate the linear model and visualizer \nridge = Ridge()\nvisualizer = ResidualsPlot(ridge)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.poof() # Draw/show/poof the data",
"Prediction Error Plot\nPlots the actual targets from the dataset against the predicted values generated by our model. This allows us to see how much variance is in the model. Data scientists diagnose this plot by comparing against the 45 degree line, where the prediction exactly matches the model.",
"# Load the data\ndf = load_data('concrete')\nfeature_names = ['cement', 'slag', 'ash', 'water', 'splast', 'coarse', 'fine', 'age']\ntarget_name = 'strength'\n\n# Get the X and y data from the DataFrame \nX = df[feature_names].as_matrix()\ny = df[target_name].as_matrix() \n\n# Create the train and test data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Instantiate the linear model and visualizer \nlasso = Lasso()\nvisualizer = PredictionError(lasso)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.poof() # Draw/show/poof the data",
"Classifier Evaluation\nClassification models attempt to predict a target in a discrete space, that is assign an instance of dependent variables one or more categories. Classification score visualizers display the differences between classes as well as a number of classifier-specific visual evaluations. We currently have implemented three classifier evaluations:\n\nClassificationReport: Presents the confusion matrix of the classifier as a heatmap \nROCAUC: Presents the graph of receiver operating characteristics along with area under the curve\nClassBalance: Displays the difference between the class balances and support \n\nEstimator score visualizers wrap Scikit-Learn estimators and expose the Estimator API such that they have fit(), predict(), and score() methods that call the appropriate estimator methods under the hood. Score visualizers can wrap an estimator and be passed in as the final step in a Pipeline or VisualPipeline.",
"# Classifier Evaluation Imports \n\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import train_test_split\n\nfrom yellowbrick.classifier import ClassificationReport, ROCAUC, ClassBalance",
"Classification Report\nThe classification report visualizer displays the precision, recall, and F1 scores for the model. Integrates numerical scores as well color-coded heatmap in order for easy interpretation and detection.",
"# Load the classification data set\ndata = load_data('occupancy') \n\n# Specify the features of interest and the classes of the target \nfeatures = [\"temperature\", \"relative humidity\", \"light\", \"C02\", \"humidity\"]\nclasses = ['unoccupied', 'occupied']\n\n# Extract the numpy arrays from the data frame \nX = data[features].as_matrix()\ny = data.occupancy.as_matrix()\n\n# Create the train and test data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Instantiate the classification model and visualizer \nbayes = GaussianNB()\nvisualizer = ClassificationReport(bayes, classes=classes)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.poof() # Draw/show/poof the data",
"ROCAUC\nPlot the ROC to visualize the tradeoff between the classifier's sensitivity and specificity.",
"# Load the classification data set\ndata = load_data('occupancy') \n\n# Specify the features of interest and the classes of the target \nfeatures = [\"temperature\", \"relative humidity\", \"light\", \"C02\", \"humidity\"]\nclasses = ['unoccupied', 'occupied']\n\n# Extract the numpy arrays from the data frame \nX = data[features].as_matrix()\ny = data.occupancy.as_matrix()\n\n# Create the train and test data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Instantiate the classification model and visualizer \nlogistic = LogisticRegression()\nvisualizer = ROCAUC(logistic)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.poof() # Draw/show/poof the data",
"ClassBalance\nClass balance chart that shows the support for each class in the fitted classification model.",
"# Load the classification data set\ndata = load_data('occupancy') \n\n# Specify the features of interest and the classes of the target \nfeatures = [\"temperature\", \"relative humidity\", \"light\", \"C02\", \"humidity\"]\nclasses = ['unoccupied', 'occupied']\n\n# Extract the numpy arrays from the data frame \nX = data[features].as_matrix()\ny = data.occupancy.as_matrix()\n\n# Create the train and test data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Instantiate the classification model and visualizer \nforest = RandomForestClassifier()\nvisualizer = ClassBalance(forest, classes=classes)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.poof() # Draw/show/poof the data"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
manifoldai/merf
|
notebooks/MERF Gain Experiment.ipynb
|
mit
|
[
"%load_ext autoreload\n%autoreload 2\nimport os, sys\nsys.path.append('..')\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context(\"poster\")\nimport numpy as np\nfrom sklearn.ensemble import RandomForestRegressor\nimport pandas as pd\nimport matplotlib as mpl\nmpl.rcParams['figure.figsize'] = (11,8)\nfrom merf.utils import MERFDataGenerator\nfrom merf.merf import MERF",
"Experimental Setup\nThere are some global parameters for all the experiments. Each experiment is run N_per_experiment times. The experiment itself is parametrized by three parameters of the generative model. We collect up the results of the experiments in a big list of dictinaries. This is then used to compute certain summary statistics after all the experiments are over.",
"# Globals\nnum_clusters_each_size = 20\ntrain_sizes = [1, 3, 5, 7, 9]\nknown_sizes = [9, 27, 45, 63, 81]\nnew_sizes = [10, 30, 50, 70, 90]\nn_estimators = 300\nmax_iterations = 100\ntrain_cluster_sizes = MERFDataGenerator.create_cluster_sizes_array(train_sizes, num_clusters_each_size)\nknown_cluster_sizes = MERFDataGenerator.create_cluster_sizes_array(known_sizes, num_clusters_each_size)\nnew_cluster_sizes = MERFDataGenerator.create_cluster_sizes_array(new_sizes, num_clusters_each_size)\n\n# Number of times to run each experiemnts \nN_per_experiment = 10\n\n# Defining the experiments to run\nexperiments = [{'id': 0, 'm': .8, 'sigma_b_sq': 0.9, 'sigma_e': 1},\n {'id': 1, 'm': .7, 'sigma_b_sq': 2.7, 'sigma_e': 1},\n {'id': 2, 'm': .6, 'sigma_b_sq': 4.5, 'sigma_e': 1},\n {'id': 3, 'm': .3, 'sigma_b_sq': 0.2, 'sigma_e': 1},\n {'id': 4, 'm': .3, 'sigma_b_sq': 0.5, 'sigma_e': 1},\n {'id': 5, 'm': .2, 'sigma_b_sq': 0.8, 'sigma_e': 1}]",
"Run Experiments",
"# Creating a dictionary to hold the results of the experiments\nresults = []\nfor experiment in experiments:\n results.append({'id': experiment['id'], 'ptev': [], 'prev': [],\n 'mse_known_rf_fixed': [], 'mse_known_rf_ohe': [], 'mse_known_merf': [], \n 'mse_new_rf_fixed': [], 'mse_new_rf_ohe': [], 'mse_new_merf': []})\n \nfor experiment, result in zip(experiments, results): \n for experiment_iteration in range(0, N_per_experiment):\n print(\"Experiment iteration: {}\".format(experiment_iteration))\n # Generate data for experiment\n dgm = MERFDataGenerator(m=experiment['m'], sigma_b=np.sqrt(experiment['sigma_b_sq']), sigma_e=experiment['sigma_e'])\n train, test_known, test_new, train_cluster_ids, ptev, prev = dgm.generate_split_samples(train_cluster_sizes, known_cluster_sizes, new_cluster_sizes)\n \n # Store off PTEV and PREV\n result['ptev'].append(ptev)\n result['prev'].append(prev)\n \n # Training Data Extract\n X_train = train[['X_0', 'X_1', 'X_2']]\n Z_train = train[['Z']]\n clusters_train = train['cluster']\n y_train = train['y']\n\n # Known Cluster Data Extract\n X_known = test_known[['X_0', 'X_1', 'X_2']]\n Z_known = test_known[['Z']]\n clusters_known = test_known['cluster']\n y_known = test_known['y']\n\n # New Cluster Data Extract\n X_new = test_new[['X_0', 'X_1', 'X_2']]\n Z_new = test_new[['Z']]\n clusters_new = test_new['cluster']\n y_new = test_new['y']\n\n # MERF\n print(\"---------------------MERF----------------------\")\n mrf = MERF(n_estimators=n_estimators, max_iterations=max_iterations)\n mrf.fit(X_train, Z_train, clusters_train, y_train)\n y_hat_known_merf = mrf.predict(X_known, Z_known, clusters_known)\n y_hat_new_merf = mrf.predict(X_new, Z_new, clusters_new)\n mse_known_merf = np.mean((y_known - y_hat_known_merf) ** 2)\n mse_new_merf = np.mean((y_new - y_hat_new_merf) ** 2)\n result['mse_known_merf'].append(mse_known_merf)\n result['mse_new_merf'].append(mse_new_merf)\n\n # Random Forest Fixed Only \n print(\"---------------------Random Forest Fixed Effect Only----------------------\")\n rf = RandomForestRegressor(n_estimators=n_estimators, n_jobs=-1)\n rf.fit(X_train, y_train)\n y_hat_known_rf_fixed = rf.predict(X_known)\n y_hat_new_rf_fixed = rf.predict(X_new)\n mse_known_rf_fixed = np.mean((y_known - y_hat_known_rf_fixed) ** 2)\n mse_new_rf_fixed = np.mean((y_new - y_hat_new_rf_fixed) ** 2)\n result['mse_known_rf_fixed'].append(mse_known_rf_fixed)\n result['mse_new_rf_fixed'].append(mse_new_rf_fixed)\n \n # Random Forest with OHE Cluster\n print(\"---------------------Random Forest w OHE Cluster----------------------\")\n X_train_w_ohe = MERFDataGenerator.create_X_with_ohe_clusters(X_train, clusters_train, train_cluster_ids)\n X_known_w_ohe = MERFDataGenerator.create_X_with_ohe_clusters(X_known, clusters_known, train_cluster_ids)\n X_new_w_ohe = MERFDataGenerator.create_X_with_ohe_clusters(X_new, clusters_new, train_cluster_ids)\n rf_ohe = RandomForestRegressor(n_estimators=n_estimators, n_jobs=-1)\n rf_ohe.fit(X_train_w_ohe, y_train)\n y_hat_known_rf_ohe = rf_ohe.predict(X_known_w_ohe)\n y_hat_new_rf_ohe = rf_ohe.predict(X_new_w_ohe)\n mse_known_rf_ohe = np.mean((y_known - y_hat_known_rf_ohe) ** 2)\n mse_new_rf_ohe = np.mean((y_new - y_hat_new_rf_ohe) ** 2)\n result['mse_known_rf_ohe'].append(mse_known_rf_ohe)\n result['mse_new_rf_ohe'].append(mse_new_rf_ohe)\n",
"Save and Load Results",
"import pickle\n# pickle.dump(results, open(\"results_merf100_n10.pkl\", \"wb\" ))\n\nresults = pickle.load(open(\"results_merf100_n10.pkl\", \"rb\"))",
"Summarize Results",
"def merf_gain(merf_mse, non_merf_mse):\n return 100 * np.mean((np.array(non_merf_mse) - np.array(merf_mse)) / np.array(non_merf_mse))\n\nsummary_results = pd.DataFrame()\nfor experiment, result in zip(experiments, results): \n summary_results.loc[result['id'], 'm'] = experiment['m']\n summary_results.loc[result['id'], 'sigma_b2'] = experiment['sigma_b_sq']\n summary_results.loc[result['id'], 'sigma_e2'] = experiment['sigma_e']\n summary_results.loc[result['id'], 'PTEV'] = np.round(np.mean(np.array(result['ptev'])), 2)\n summary_results.loc[result['id'], 'PREV'] = np.round(np.mean(np.array(result['prev'])), 2)\n summary_results.loc[result['id'], 'Gain RF (Known)'] = np.round(merf_gain(result['mse_known_merf'], result['mse_known_rf_fixed']), 2)\n summary_results.loc[result['id'], 'Gain RF (New)']= np.round(merf_gain(result['mse_new_merf'], result['mse_new_rf_fixed']), 2)\n summary_results.loc[result['id'], 'Gain RFOHE (Known)'] = np.round(merf_gain(result['mse_known_merf'], result['mse_known_rf_ohe']), 2)\n summary_results.loc[result['id'], 'Gain RFOHE (New)'] = np.round(merf_gain(result['mse_new_merf'], result['mse_new_rf_ohe']), 2)\n\nsummary_results\n\nplt.figure(figsize=[16, 8])\nplt.subplot(121)\nplt.plot(summary_results.loc[0:2, 'PREV'], \n summary_results.loc[0:2, 'Gain RF (Known)'], 'bs-', label='RF, PTEV=90')\nplt.plot(summary_results.loc[3:5, 'PREV'], \n summary_results.loc[3:5, 'Gain RF (Known)'], 'rs-', label='RF, PTEV=60')\nplt.plot(summary_results.loc[0:2, 'PREV'], \n summary_results.loc[0:2, 'Gain RFOHE (Known)'], 'b^--', label='RFOHE, PTEV=90')\nplt.plot(summary_results.loc[3:5, 'PREV'], \n summary_results.loc[3:5, 'Gain RFOHE (Known)'], 'r^--', label='RFOHE, PTEV=60')\nplt.grid('on')\nplt.xlabel('PREV')\nplt.ylabel('MERF Gain over Compared Algorithm')\n#plt.legend()\nplt.title('Known Clusters')\nplt.ylim([-5, 75])\nplt.xlim([0, 65])\nplt.subplot(122)\nplt.plot(summary_results.loc[0:2, 'PREV'], \n summary_results.loc[0:2, 'Gain RF (New)'], 'bs-', label='RF, PTEV=90')\nplt.plot(summary_results.loc[3:5, 'PREV'], \n summary_results.loc[3:5, 'Gain RF (New)'], 'rs-', label='RF, PTEV=60')\nplt.plot(summary_results.loc[0:2, 'PREV'], \n summary_results.loc[0:2, 'Gain RFOHE (New)'], 'b^--', label='RFOHE, PTEV=90')\nplt.plot(summary_results.loc[3:5, 'PREV'], \n summary_results.loc[3:5, 'Gain RFOHE (New)'], 'r^--', label='RFOHE, PTEV=60')\nplt.grid('on')\nplt.xlabel('PREV')\n#plt.ylabel('MERF %-gain over Compared Algorithm')\nplt.legend()\nplt.title('New Clusters')\nplt.ylim([-5, 75])\nplt.xlim([0, 65])"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
trolldbois/python-haystack-reverse
|
docs/Haystack_reverse_CLI.ipynb
|
gpl-3.0
|
[
"Usage reference guide for haystack-reverse\nthis is an example of every haystack-reverse commands.\nThe zeus.vmem.856.dump is there https://dl.dropboxusercontent.com/u/10222931/HAYSTACK/zeus.vmem.856.dump.tgz\nIt was extracted from pid 856 from the zeus.img image from http://malwarecookbook.googlecode.com/svn-history/r26/trunk/17/1/zeus.vmem.zip",
"!haystack-reverse --help",
"First we need to generate the analysis for the process memory dump.",
"!haystack-reverse ../test/dumps/vol/zeus.vmem.856.dump",
"Then we can start to use some of the cli",
"!ls -al ../test/dumps/vol/zeus.vmem.856.dump/cache/\n\n!cat ../test/dumps/vol/zeus.vmem.856.dump/cache/*.strings| grep -a http",
"Mhh interesting string... I wonder what memory chunk was that allocated in.",
"!haystack-reverse-show ../test/dumps/vol/zeus.vmem.856.dump 0xc64e8",
"Ah, that make sense.. It's a classic utf16 string. The whole allocated memory chunk is being used for a string.\nLets look at the bytes behind the scene.",
"!haystack-reverse-hex ../test/dumps/vol/zeus.vmem.856.dump 0xc64e8",
"I wonder if this record was referenced in some other record...\nMaybe we can find a parent record that points to this string...",
"!haystack-reverse-parents ../test/dumps/vol/zeus.vmem.856.dump 0xc64e8",
"Tough luck... What about the others ?",
"!haystack-reverse-parents ../test/dumps/vol/zeus.vmem.856.dump 0xc32d98\n\n!haystack-reverse-parents ../test/dumps/vol/zeus.vmem.856.dump 0xc329f8",
"That looks interesting. A record made of 82x 4-bytes pointers and some trailings zeroes/padding.\nLet's see if we can check that out with haystack CLI.",
"!cat ../test/structures/zeus/records.py",
"So, due to a little monkey patching, there is a CString ctypes types available in the haystack ctypes module.",
"!haystack-show ../test/dumps/vol/zeus.vmem.856.dump test.structures.zeus.records.array_of_pointers 0xc31e90",
"Oh, that is pretty good...\nbut it seems the first few strings are not quite right..",
"!haystack-reverse-hex ../test/dumps/vol/zeus.vmem.856.dump 0x00c32000\n!haystack-reverse-show ../test/dumps/vol/zeus.vmem.856.dump 0x00c32000",
"Mhh, it seems the first few strings are utf16 strings. Lets try with a Wide char string type.",
"!haystack-show ../test/dumps/vol/zeus.vmem.856.dump test.structures.zeus.records.array_of_wcharp 0xc31e90 ",
"Well, that more or less works.\nUltimately, you might want to clean the cache ( REMOVES ALL ANALYSIS FILES )"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/nims-kma/cmip6/models/sandbox-2/toplevel.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Toplevel\nMIP Era: CMIP6\nInstitute: NIMS-KMA\nSource ID: SANDBOX-2\nSub-Topics: Radiative Forcings. \nProperties: 85 (42 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:29\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'nims-kma', 'sandbox-2', 'toplevel')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties\n2. Key Properties --> Flux Correction\n3. Key Properties --> Genealogy\n4. Key Properties --> Software Properties\n5. Key Properties --> Coupling\n6. Key Properties --> Tuning Applied\n7. Key Properties --> Conservation --> Heat\n8. Key Properties --> Conservation --> Fresh Water\n9. Key Properties --> Conservation --> Salt\n10. Key Properties --> Conservation --> Momentum\n11. Radiative Forcings\n12. Radiative Forcings --> Greenhouse Gases --> CO2\n13. Radiative Forcings --> Greenhouse Gases --> CH4\n14. Radiative Forcings --> Greenhouse Gases --> N2O\n15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3\n16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3\n17. Radiative Forcings --> Greenhouse Gases --> CFC\n18. Radiative Forcings --> Aerosols --> SO4\n19. Radiative Forcings --> Aerosols --> Black Carbon\n20. Radiative Forcings --> Aerosols --> Organic Carbon\n21. Radiative Forcings --> Aerosols --> Nitrate\n22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect\n23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect\n24. Radiative Forcings --> Aerosols --> Dust\n25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic\n26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic\n27. Radiative Forcings --> Aerosols --> Sea Salt\n28. Radiative Forcings --> Other --> Land Use\n29. Radiative Forcings --> Other --> Solar \n1. Key Properties\nKey properties of the model\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop level overview of coupled model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of coupled model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2. Key Properties --> Flux Correction\nFlux correction properties of the model\n2.1. Details\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how flux corrections are applied in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3. Key Properties --> Genealogy\nGenealogy and history of the model\n3.1. Year Released\nIs Required: TRUE Type: STRING Cardinality: 1.1\nYear the model was released",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.2. CMIP3 Parent\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCMIP3 parent if any",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.3. CMIP5 Parent\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCMIP5 parent if any",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.4. Previous Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nPreviously known as",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Key Properties --> Software Properties\nSoftware properties of model\n4.1. Repository\nIs Required: FALSE Type: STRING Cardinality: 0.1\nLocation of code for this component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. Code Version\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCode version identifier.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.3. Code Languages\nIs Required: FALSE Type: STRING Cardinality: 0.N\nCode language(s).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.4. Components Structure\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how model realms are structured into independent software components (coupled via a coupler) and internal software components.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.5. Coupler\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nOverarching coupling framework for model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OASIS\" \n# \"OASIS3-MCT\" \n# \"ESMF\" \n# \"NUOPC\" \n# \"Bespoke\" \n# \"Unknown\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"5. Key Properties --> Coupling\n**\n5.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of coupling in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Atmosphere Double Flux\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"5.3. Atmosphere Fluxes Calculation Grid\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nWhere are the air-sea fluxes calculated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Atmosphere grid\" \n# \"Ocean grid\" \n# \"Specific coupler grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"5.4. Atmosphere Relative Winds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nAre relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6. Key Properties --> Tuning Applied\nTuning methodology for model\n6.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.2. Global Mean Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList set of metrics/diagnostics of the global mean state used in tuning model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.3. Regional Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.4. Trend Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList observed trend metrics/diagnostics used in tuning model/component (such as 20th century)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.5. Energy Balance\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.6. Fresh Water Balance\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7. Key Properties --> Conservation --> Heat\nGlobal heat convervation properties of the model\n7.1. Global\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how heat is conserved globally",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. Atmos Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.3. Atmos Land Interface\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how heat is conserved at the atmosphere/land coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.4. Atmos Sea-ice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.5. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.6. Land Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the land/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Key Properties --> Conservation --> Fresh Water\nGlobal fresh water convervation properties of the model\n8.1. Global\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how fresh_water is conserved globally",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Atmos Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh_water is conserved at the atmosphere/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.3. Atmos Land Interface\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how fresh water is conserved at the atmosphere/land coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.4. Atmos Sea-ice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.5. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh water is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.6. Runoff\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how runoff is distributed and conserved",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.7. Iceberg Calving\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how iceberg calving is modeled and conserved",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.8. Endoreic Basins\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how endoreic basins (no ocean access) are treated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.9. Snow Accumulation\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how snow accumulation over land and over sea-ice is treated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9. Key Properties --> Conservation --> Salt\nGlobal salt convervation properties of the model\n9.1. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how salt is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Key Properties --> Conservation --> Momentum\nGlobal momentum convervation properties of the model\n10.1. Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how momentum is conserved in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11. Radiative Forcings\nRadiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)\n11.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of radiative forcings (GHG and aerosols) implementation in model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12. Radiative Forcings --> Greenhouse Gases --> CO2\nCarbon dioxide forcing\n12.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"13. Radiative Forcings --> Greenhouse Gases --> CH4\nMethane forcing\n13.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"14. Radiative Forcings --> Greenhouse Gases --> N2O\nNitrous oxide forcing\n14.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3\nTroposheric ozone forcing\n15.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3\nStratospheric ozone forcing\n16.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"17. Radiative Forcings --> Greenhouse Gases --> CFC\nOzone-depleting and non-ozone-depleting fluorinated gases forcing\n17.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.2. Equivalence Concentration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDetails of any equivalence concentrations used",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"Option 1\" \n# \"Option 2\" \n# \"Option 3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18. Radiative Forcings --> Aerosols --> SO4\nSO4 aerosol forcing\n18.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"19. Radiative Forcings --> Aerosols --> Black Carbon\nBlack carbon aerosol forcing\n19.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"19.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"20. Radiative Forcings --> Aerosols --> Organic Carbon\nOrganic carbon aerosol forcing\n20.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"21. Radiative Forcings --> Aerosols --> Nitrate\nNitrate forcing\n21.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"21.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect\nCloud albedo effect forcing (RFaci)\n22.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"22.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect\nCloud lifetime effect forcing (ERFaci)\n23.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"23.3. RFaci From Sulfate Only\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative forcing from aerosol cloud interactions from sulfate aerosol only?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"23.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"24. Radiative Forcings --> Aerosols --> Dust\nDust forcing\n24.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic\nTropospheric volcanic forcing\n25.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic\nStratospheric volcanic forcing\n26.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"27. Radiative Forcings --> Aerosols --> Sea Salt\nSea salt forcing\n27.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"28. Radiative Forcings --> Other --> Land Use\nLand use forcing\n28.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"28.2. Crop Change Only\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nLand use change represented via crop change only?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"28.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"29. Radiative Forcings --> Other --> Solar\nSolar forcing\n29.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow solar forcing is provided",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"irradiance\" \n# \"proton\" \n# \"electron\" \n# \"cosmic ray\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"29.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tensorflow/docs-l10n
|
site/ko/federated/tutorials/federated_learning_for_image_classification.ipynb
|
apache-2.0
|
[
"Copyright 2019 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"이미지 분류를 위한 Federated Learning\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classification\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org에서 보기</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/federated/tutorials/federated_learning_for_image_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab에서 실행하기</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/federated/tutorials/federated_learning_for_image_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub에서소스 보기</a></td>\n</table>\n\n참고: 이 colab은 <code>tensorflow_federated</code> pip 패키지의 <a>최신 릴리즈 버전</a>에서 동작하는 것으로 확인되었지만, Tensorflow Federated 프로젝트는 아직 릴리즈 전 개발 중이며 master에서 동작하지 않을 수 있습니다.\n이 튜토리얼에서는 고전적인 MNIST 훈련 예제를 사용하여 TFF의 Federated Learning(FL) API 레이어(tff.learning - TensorFlow에서 구현된 사용자 제공 모델에 대해 페더레이션 훈련과 같은 일반적인 유형의 페더레이션 학습 작업을 수행하는 데 사용할 수 있는 상위 수준의 인터페이스 세트)를 소개합니다.\n이 튜토리얼과 Federated Learning API는 주로 자신의 TensorFlow 모델을 TFF에 연결하여 후자를 대부분 블랙 박스로 취급하려는 사용자를 대상으로 합니다. TFF에 대한 심층적인 이해와 자신의 페더레이션 학습 알고리즘을 구현하는 방법은 FC Core API 튜토리얼 - 사용자 정의 페더레이션 알고리즘 1부 및 2부를 참조하세요.\n<code>tff.learning</code>에 대한 자세한 내용은 <a>Text Generation용 Federated Learning</a> 튜토리얼에서 계속하세요. 반복 모델을 다루는 것 외에도 Keras를 사용한 평가와 결합된 페더레이션 학습을 통해 구체화를 위한 사전 훈련되고 직렬화된 Keras 모델을 로드하는 방법을 보여줍니다.\n시작하기 전에\n시작하기 전에 다음을 실행하여 환경이 올바르게 설정되었는지 확인합니다. 인사말이 표시되지 않으면 설치 가이드에서 지침을 참조하세요.",
"#@test {\"skip\": true}\n!pip install --quiet --upgrade tensorflow_federated_nightly\n!pip install --quiet --upgrade nest_asyncio\n\nimport nest_asyncio\nnest_asyncio.apply()\n\n%load_ext tensorboard\n\nimport collections\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nnp.random.seed(0)\n\ntff.federated_computation(lambda: 'Hello, World!')()",
"입력 데이터 준비하기\n데이터부터 시작하겠습니다. 페더레이션 학습에는 페더레이션 데이터세트, 즉 여러 사용자의 데이터 모음이 필요합니다. 페더레이션 데이터는 일반적으로 고유한 문제를 제기하는 비 i.i.d.입니다.\n실험을 용이하게 하기 위해 Leaf를 사용하여 재처리된 원래 NIST 데이터세트의 버전이 포함된 MNIST의 페더레이션 버전을 포함하여 몇 개의 데이터세트로 TFF 리포지토리를 시드하여 데이터가 원래 숫자 작성자에 의해 입력되도록 합니다. 작성자마다 고유한 스타일이 있기 때문에 이 데이터세트는 페더레이션 데이터세트에서 예상되는 non-i.i.d. 동작의 종류를 보여줍니다.\n로드하는 방법은 다음과 같습니다.",
"emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()",
"load_data()가 반환하는 데이터세트는 사용자 세트를 열거하고 특정 사용자의 데이터를 나타내는 tf.data.Dataset를 구성하고 개별 요소의 구조를 쿼리할 수 있는 인터페이스인 tff.simulation.ClientData의 인스턴스입니다. 이 인터페이스를 사용하여 데이터세트의 내용을 탐색하는 방법은 다음과 같습니다. 이 인터페이스를 사용하면 클라이언트 ID를 반복할 수 있지만, 이는 시뮬레이션 데이터의 특성일 뿐입니다. 곧 살펴보겠지만, 클라이언트 ID는 페더레이션 학습 프레임워크에서 사용되지 않습니다. 클라이언트 ID의 유일한 목적은 시뮬레이션을 위해 데이터의 하위 집합을 선택할 수 있도록 하는 것입니다.",
"len(emnist_train.client_ids)\n\nemnist_train.element_type_structure\n\nexample_dataset = emnist_train.create_tf_dataset_for_client(\n emnist_train.client_ids[0])\n\nexample_element = next(iter(example_dataset))\n\nexample_element['label'].numpy()\n\nfrom matplotlib import pyplot as plt\nplt.imshow(example_element['pixels'].numpy(), cmap='gray', aspect='equal')\nplt.grid(False)\n_ = plt.show()",
"페더레이션 데이터의 이질성 탐색하기\n페더레이션 데이터는 일반적으로 비 i.i.d.이며, 사용자는 일반적으로 사용 패턴에 따라 데이터 분포가 다릅니다. 일부 클라이언트는 기기에 훈련 예제가 적어 로컬에서 데이터가 부족할 수 있지만, 일부 클라이언트는 충분한 훈련 예제를 가지고 있습니다. 사용 가능한 EMNIST 데이터를 사용하여 페더레이션 시스템의 일반적인 데이터 이질성 개념을 살펴보겠습니다. 고객 데이터에 대한 이 심층 분석은 모든 데이터를 로컬에서 사용할 수 있는 시뮬레이션 환경이기 때문에 당사만 사용할 수 있다는 점에 유의하는 것이 중요합니다. 실제 운영 페더레이션 환경에서는 단일 클라이언트의 데이터를 검사할 수 없습니다.\n먼저, 하나의 시뮬레이션 기기에서 예제에 대한 느낌을 얻기 위해 한 클라이언트의 데이터를 샘플링해 보겠습니다. 당사가 사용하는 데이터세트는 고유한 작성자가 입력했기 때문에 한 클라이언트의 데이터는 한 사용자의 고유한 \"사용 패턴\"을 시뮬레이션하여 0부터 9까지의 숫자 샘플에 대한 한 사람의 손글씨를 나타냅니다.",
"## Example MNIST digits for one client\nfigure = plt.figure(figsize=(20, 4))\nj = 0\n\nfor example in example_dataset.take(40):\n plt.subplot(4, 10, j+1)\n plt.imshow(example['pixels'].numpy(), cmap='gray', aspect='equal')\n plt.axis('off')\n j += 1",
"이제 각 MNIST 숫자 레이블에 대한 각 클라이언트의 예제 수를 시각화해 보겠습니다. 페더레이션 환경에서 각 클라이언트의 예제 수는 사용자 동작에 따라 상당히 다를 수 있습니다.",
"# Number of examples per layer for a sample of clients\nf = plt.figure(figsize=(12, 7))\nf.suptitle('Label Counts for a Sample of Clients')\nfor i in range(6):\n client_dataset = emnist_train.create_tf_dataset_for_client(\n emnist_train.client_ids[i])\n plot_data = collections.defaultdict(list)\n for example in client_dataset:\n # Append counts individually per label to make plots\n # more colorful instead of one color per plot.\n label = example['label'].numpy()\n plot_data[label].append(label)\n plt.subplot(2, 3, i+1)\n plt.title('Client {}'.format(i))\n for j in range(10):\n plt.hist(\n plot_data[j],\n density=False,\n bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])",
"이제 각 MNIST 레이블에 대한 클라이언트별 평균 이미지를 시각화해 보겠습니다. 이 코드는 하나의 레이블에 대한 사용자의 모든 예제에 대한 각 픽셀 값의 평균을 생성합니다. 한 고객의 숫자에 대한 평균 이미지는 각 개인의 고유한 필기 스타일로 인해 같은 숫자에 대한 다른 고객의 평균 이미지와 다르게 보일 것입니다. 해당 로컬 라운드에서 해당 사용자의 고유 한 데이터에서 학습하므로 각 로컬 훈련 라운드가 각 클라이언트에서 다른 방향으로 모델을 어떻게 움직일지 뮤즈할 수 있습니다. 튜토리얼의 뒷부분에서 모든 클라이언트의 모델에 대한 각 업데이트를 가져와서 각 클라이언트의 고유한 데이터에서 학습한 새로운 글로벌 모델로 통합하는 방법을 살펴보겠습니다.",
"# Each client has different mean images, meaning each client will be nudging\n# the model in their own directions locally.\n\nfor i in range(5):\n client_dataset = emnist_train.create_tf_dataset_for_client(\n emnist_train.client_ids[i])\n plot_data = collections.defaultdict(list)\n for example in client_dataset:\n plot_data[example['label'].numpy()].append(example['pixels'].numpy())\n f = plt.figure(i, figsize=(12, 5))\n f.suptitle(\"Client #{}'s Mean Image Per Label\".format(i))\n for j in range(10):\n mean_img = np.mean(plot_data[j], 0)\n plt.subplot(2, 5, j+1)\n plt.imshow(mean_img.reshape((28, 28)))\n plt.axis('off')",
"사용자 데이터는 노이즈가 많고 레이블이 안정적이지 않을 수 있습니다. 예를 들어, 위의 클라이언트 #2의 데이터를 살펴보면 레이블 2의 경우, 노이즈가 더 많은 평균 이미지를 생성하는 레이블이 잘못 지정된 예가 있을 수 있습니다.\n입력 데이터 전처리\n데이터가 이미 tf.data.Dataset이므로 데이터세트 변환을 사용하여 전처리를 수행할 수 있습니다. 여기에서는 28x28 이미지를 784개 요소 배열로 병합하고, 개별 예를 셔플하고, 배치로 구성하고, Keras와 함께 사용할 수 있도록 특성의 이름을 pixels 및 label에서 x 및 y로 바꿉니다. 또한, 데이터세트를 repeat하여 여러 epoch를 실행합니다.",
"NUM_CLIENTS = 10\nNUM_EPOCHS = 5\nBATCH_SIZE = 20\nSHUFFLE_BUFFER = 100\nPREFETCH_BUFFER=10\n\ndef preprocess(dataset):\n\n def batch_format_fn(element):\n \"\"\"Flatten a batch `pixels` and return the features as an `OrderedDict`.\"\"\"\n return collections.OrderedDict(\n x=tf.reshape(element['pixels'], [-1, 784]),\n y=tf.reshape(element['label'], [-1, 1]))\n\n return dataset.repeat(NUM_EPOCHS).shuffle(SHUFFLE_BUFFER).batch(\n BATCH_SIZE).map(batch_format_fn).prefetch(PREFETCH_BUFFER)",
"이것이 동작하는지 확인합니다.",
"preprocessed_example_dataset = preprocess(example_dataset)\n\nsample_batch = tf.nest.map_structure(lambda x: x.numpy(),\n next(iter(preprocessed_example_dataset)))\n\nsample_batch",
"당사는 페더레이션 데이터세트를 구성하기 위한 거의 모든 구성 요소를 갖추고 있습니다.\n시뮬레이션에서 페더레이션 데이터를 TFF에 공급하는 방법 중 하나는 목록의 각 요소가 목록이든 tf.data.Dataset이든 상관없이 개별 사용자의 데이터를 보유하는 목록의 각 요소를 사용하여 간단히 Python 목록으로 만드는 것입니다. 후자를 제공하는 인터페이스가 이미 있으므로 사용해 보겠습니다.\n다음은 훈련 또는 평가 라운드에 대한 입력으로 주어진 사용자 세트의 데이터세트 목록을 구성하는 간단한 도우미 함수입니다.",
"def make_federated_data(client_data, client_ids):\n return [\n preprocess(client_data.create_tf_dataset_for_client(x))\n for x in client_ids\n ]",
"이제 클라이언트를 어떻게 선택할까요?\n일반적인 페더레이션 훈련 시나리오에서는 잠재적으로 매우 많은 수의 사용자 기기를 다루고 있으며, 이 중 일부만 주어진 시점에서 훈련에 사용할 수 있습니다. 예를 들어, 클라이언트 기기가 전원에 연결되어 있고 데이터 통신 연결 네트워크가 꺼져 있거나 유휴 상태일 때만 훈련에 참여하는 휴대폰인 경우입니다.\n물론, 시뮬레이션 환경에서는 모든 데이터를 로컬에서 사용할 수 있습니다. 통상적으로, 시뮬레이션을 실행할 때 일반적으로 각 라운드마다 다른 각 훈련 라운드에 참여할 클라이언트의 무작위 하위 집합을 샘플링합니다.\n즉, Federated Averaging 알고리즘에 대한 논문을 연구하면 알 수 있듯이, 각 라운드에 무작위로 샘플링된 클라이언트 하위 집합이 있는 시스템에서 수렴을 달성하는 데는 시간이 걸릴 수 있으며, 이 대화형 튜토리얼에서 수백 번의 라운드를 실행해야 하는 것은 비현실적입니다.\n대신 클라이언트 세트를 한 번 샘플링하고 수렴 속도를 높이기 위해 라운드에서 같은 세트를 재사용할 것입니다(의도적으로 이들 소수의 사용자 데이터에 과대적합임). 독자가 이 튜토리얼을 수정하여 무작위 샘플링을 시뮬레이션하는 것은 연습으로 남겨 둡니다. 매우 쉽습니다(한 번 수행하면 모델을 수렴하는 데 시간이 걸릴 수 있음을 명심하세요).",
"sample_clients = emnist_train.client_ids[0:NUM_CLIENTS]\n\nfederated_train_data = make_federated_data(emnist_train, sample_clients)\n\nprint('Number of client datasets: {l}'.format(l=len(federated_train_data)))\nprint('First dataset: {d}'.format(d=federated_train_data[0]))",
"Keras로 모델 만들기\nKeras를 사용하는 경우, Keras 모델을 구성하는 코드가 이미 있을 수 있습니다. 다음은 요구 사항에 맞는 간단한 모델의 예제입니다.",
"def create_keras_model():\n return tf.keras.models.Sequential([\n tf.keras.layers.Input(shape=(784,)),\n tf.keras.layers.Dense(10, kernel_initializer='zeros'),\n tf.keras.layers.Softmax(),\n ])",
"참고: 아직 모델을 컴파일하지 않습니다. 손실, 메트릭 및 옵티마이저는 나중에 소개됩니다.\nTFF와 함께 모델을 사용하려면, Keras와 유사하게 모델의 순방향 전달, 메타데이터 속성 등을 스탬핑하는 메서드를 노출하는 tff.learning.Model 인터페이스의 인스턴스로 모델을 래핑해야 하지만, 페더레이션 메트릭의 계산 프로세스를 제어하는 방법과 같은 추가 요소도 도입합니다. 지금은 이것에 대해 걱정하지 마세요. 위에서 정의한 것과 같은 Keras 모델이 있는 경우, 아래와 같이 <code>tff.learning.from_keras_model</code>를 호출하고 모델과 샘플 데이터 배치를 인수로 전달하여 TFF를 래핑할 수 있습니다.",
"def model_fn():\n # We _must_ create a new model here, and _not_ capture it from an external\n # scope. TFF will call this within different graph contexts.\n keras_model = create_keras_model()\n return tff.learning.from_keras_model(\n keras_model,\n input_spec=preprocessed_example_dataset.element_spec,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])",
"페더레이션 데이터에 대해 모델 훈련하기\nTFF와 함께 사용하기 위해 tff.learning.Model로 래핑한 모델이 있으므로 다음과 같이 도우미 함수 tff.learning.build_federated_averaging_process를 호출하여 TFF에서 Federated Averaging 알고리즘을 구성하도록 할 수 있습니다.\n인수는 이미 생성된 인스턴스가 아닌 생성자(예: 위의 model_fn)여야 하므로 모델 생성은 TFF에 의해 제어되는 컨텍스트에서 발생할 수 있습니다(그 이유가 궁금하다면, 사용자 정의 알고리즘에 대한 후속 튜토리얼을 읽어 보시기 바랍니다).\n아래의 Federated Averaging 알고리즘에 대한 중요한 참고 사항 중 하나는 client_optimizer 및 server_optimizer의 두 가지 옵티마이저입니다. client_optimizer는 각 클라이언트에서 로컬 모델 업데이트를 계산하는 데만 사용됩니다. server_optimizer는 평균 업데이트를 서버의 글로벌 모델에 적용합니다. 특히, 이는 사용되는 옵티마이저 및 학습률의 선택이 표준 iid 데이터세트에 대해 모델을 훈련하는 데 사용한 것과 달라야 할 수 있음을 의미합니다. 정규 SGD부터 시작하는 것이 좋습니다. 학습률이 평소보다 낮을 수 있습니다. 여기서 사용하는 학습률은 신중하게 조정되지 않았으므로 자유롭게 실험해 보세요.",
"iterative_process = tff.learning.build_federated_averaging_process(\n model_fn,\n client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),\n server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))",
"방금 무슨 일이 있었나요? TFF에서 한 쌍의 페더레이션 계산을 구성하고 tff.templates.IterativeProcess로 패키징하여 이들 계산을 한 쌍의 속성 initialize 및 next로 사용할 수 있습니다.\n간단히 말해서, 페더레이션 계산은 다양한 페더레이션 알고리즘을 표현할 수 있는 TFF의 내부 언어로 된 프로그램입니다(사용자 정의 알고리즘 튜토리얼에서 자세한 내용을 찾을 수 있음). 이 경우, 생성되고 iterative_process로 패키징된 두 가지 계산은 Federated Averaging을 구현합니다.\n실제 페더레이션 학습 설정에서 실행될 수 있는 방식으로 계산을 정의하는 것이 TFF의 목표이지만, 현재는 로컬 실행 시뮬레이션 런타임만 구현됩니다. 시뮬레이터에서 계산을 실행하려면 Python 함수처럼 간단히 호출하면 됩니다. 이 기본 해석 환경은 고성능을 위해 설계되지 않았지만, 이 튜토리얼에는 충분합니다. 향후 릴리스에서 대규모 연구를 용이하게 하기 위해 고성능 시뮬레이션 런타임을 제공할 것으로 기대합니다.\ninitialize 계산부터 시작하겠습니다. 모든 페더레이션 계산의 경우와 마찬가지로 이를 함수로 생각할 수 있습니다. 계산은 인수를 사용하지 않고 하나의 결과를 반환합니다. 즉, 서버에서 Federated Averaging 프로세스의 상태를 나타냅니다. TFF의 세부 사항에 대해 자세히 알아보고 싶지는 않지만, 이 상태가 어떻게 생겼는지 확인하는 것이 도움이 될 수 있습니다. 다음과 같이 시각화할 수 있습니다.",
"str(iterative_process.initialize.type_signature)",
"위의 형식 서명이 처음에는 다소 모호해 보일 수 있지만, 서버 상태는 model(모든 기기에 배포될 MNIST의 초기 모델 매개변수)과 optimizer_state(서버에서 유지 관리하는 추가 정보, 하이퍼 매개변수 일정 등에 사용할 라운드 수 등)로 구성됩니다..\ninitialize 계산을 호출하여 서버 상태를 구성해 보겠습니다.",
"state = iterative_process.initialize()",
"두 번째 페더레이션 계산 쌍인 next는 서버 상태(모델 매개변수 포함)를 클라이언트에 푸시, 로컬 데이터에 대한 기기 내 훈련, 모델 업데이트 수집 및 평균화로 구성된 단일 라운드의 페더레이션 평균화를 나타내며, 서버에서 업데이트된 새 모델을 생성합니다.\n개념적으로, next과 같은 함수형 형식 서명을 갖는 것으로 생각할 수 있습니다.\nSERVER_STATE, FEDERATED_DATA -> SERVER_STATE, TRAINING_METRICS\n특히, next()는 서버에서 실행되는 함수가 아니라 전체 분산 계산의 선언적 함수형 표현으로 생각해야 합니다. 일부 입력은 서버( SERVER_STATE)에서 제공하지만, 참여하는 각 기기는 자체 로컬 데이터트를 제공합니다.\n라운드 한 번 훈련을 실행하고 결과를 시각화해 보겠습니다. 사용자 샘플을 위해 위에서 이미 생성한 페더레이션 데이터를 사용할 수 있습니다.",
"state, metrics = iterative_process.next(state, federated_train_data)\nprint('round 1, metrics={}'.format(metrics))",
"몇 라운드를 더 실행해 봅시다. 앞서 언급했듯이, 일반적으로 이 시점에서 사용자가 지속적으로 오고가는 현실적인 배포를 시뮬레이션하기 위해 각 라운드에서 무작위로 선택한 새로운 사용자 샘플에서 시뮬레이션 데이터의 하위 집합을 선택하지만, 이 대화형 노트북에서는 데모를 위해 같은 사용자를 재사용하여 시스템이 빠르게 수렴되도록 합니다.",
"NUM_ROUNDS = 11\nfor round_num in range(2, NUM_ROUNDS):\n state, metrics = iterative_process.next(state, federated_train_data)\n print('round {:2d}, metrics={}'.format(round_num, metrics))",
"페더레이션 훈련의 각 라운드 후에 훈련 손실이 감소하여 모델이 수렴되고 있음을 나타냅니다. 이러한 훈련 메트릭에는 몇 가지 중요한 주의 사항이 있지만, 이 튜토리얼 뒷부분의 평가 섹션을 참조하세요.\nTensorBoard에 모델 메트릭 표시, 다음으로 Tensorboard를 사용하여 이들 페더레이션 계산의 메트릭을 시각화해 보겠습니다.\n메트릭을 기록할 디렉터리와 해당 요약 작성기를 만드는 것으로 시작하겠습니다.",
"#@test {\"skip\": true}\nlogdir = \"/tmp/logs/scalars/training/\"\nsummary_writer = tf.summary.create_file_writer(logdir)\nstate = iterative_process.initialize()",
"같은 요약 작성기를 사용하여 관련 스칼라 메트릭을 플롯합니다.",
"#@test {\"skip\": true}\nwith summary_writer.as_default():\n for round_num in range(1, NUM_ROUNDS):\n state, metrics = iterative_process.next(state, federated_train_data)\n for name, value in metrics.train._asdict().items():\n tf.summary.scalar(name, value, step=round_num)",
"위에 지정된 루트 로그 디렉터리로 TensorBoard를 시작합니다. 데이터를 로드하는 데 몇 초 정도 걸릴 수 있습니다.",
"#@test {\"skip\": true}\n%tensorboard --logdir /tmp/logs/scalars/ --port=0\n\n#@test {\"skip\": true}\n# Run this this cell to clean your directory of old output for future graphs from this directory.\n!rm -R /tmp/logs/scalars/*",
"같은 방식으로 평가 메트릭을 보려면 \"logs/scalars/eval\"과 같은 별도의 eval 폴더를 만들어 TensorBoard에 쓸 수 있습니다.\n모델 구현 사용자 정의하기\nKeras는 TensorFlow용으로 권장되는 상위 수준 모델 API이며, 가능하면 TFF에서 Keras 모델(tff.learning.from_keras_model를 통해)을 사용하는 것이 좋습니다.\n그러나 tff.learning은 페더레이션 학습을 위해 모델을 사용하는 데 필요한 최소한의 기능을 노출하는 하위 수준 모델 인터페이스 인 tff.learning.Model을 제공합니다. 이 인터페이스(아마도 tf.keras.layers와 같은 구성 요소를 계속 사용)를 직접 구현하면 페더레이션 학습 알고리즘의 내부를 수정하지 않고도 최대한으로 사용자 정의가 가능합니다.\n처음부터 다시 한번 해봅시다.\n모델 변수, 순방향 전달 및 메트릭 정의하기\n첫 번째 단계는 작업할 TensorFlow 변수를 식별하는 것입니다. 다음 코드를 더 읽기 쉽게 만들기 위해 전체 집합을 나타내는 데이터 구조를 정의하겠습니다. 여기에는 훈련할 weights와 bias와 같은 변수와 함께 loss_sum, accuracy_sum 및 num_examples와 같은 훈련 중에 업데이트할 다양한 누적 통계 및 카운터를 보유하는 변수도 포함됩니다.",
"MnistVariables = collections.namedtuple(\n 'MnistVariables', 'weights bias num_examples loss_sum accuracy_sum')",
"다음은 변수를 생성하는 메서드입니다. 간단하게 하기 위해 모든 통계를 tf.float32로 표시합니다. 그러면 이후 단계에서 유형 변환이 필요하지 않습니다. 변수 이니셜라이저를 람다로 래핑하는 것은 리소스 변수에서 요구하는 사항입니다.",
"def create_mnist_variables():\n return MnistVariables(\n weights=tf.Variable(\n lambda: tf.zeros(dtype=tf.float32, shape=(784, 10)),\n name='weights',\n trainable=True),\n bias=tf.Variable(\n lambda: tf.zeros(dtype=tf.float32, shape=(10)),\n name='bias',\n trainable=True),\n num_examples=tf.Variable(0.0, name='num_examples', trainable=False),\n loss_sum=tf.Variable(0.0, name='loss_sum', trainable=False),\n accuracy_sum=tf.Variable(0.0, name='accuracy_sum', trainable=False))",
"모델 매개변수 및 누적 통계에 대한 변수를 사용하여 다음과 같이 손실을 계산하고, 예측값을 내보내고, 단일 배치의 입력 데이터에 대한 누적 통계를 업데이트하는 순방향 전달 메서드를 정의할 수 있습니다.",
"def mnist_forward_pass(variables, batch):\n y = tf.nn.softmax(tf.matmul(batch['x'], variables.weights) + variables.bias)\n predictions = tf.cast(tf.argmax(y, 1), tf.int32)\n\n flat_labels = tf.reshape(batch['y'], [-1])\n loss = -tf.reduce_mean(\n tf.reduce_sum(tf.one_hot(flat_labels, 10) * tf.math.log(y), axis=[1]))\n accuracy = tf.reduce_mean(\n tf.cast(tf.equal(predictions, flat_labels), tf.float32))\n\n num_examples = tf.cast(tf.size(batch['y']), tf.float32)\n\n variables.num_examples.assign_add(num_examples)\n variables.loss_sum.assign_add(loss * num_examples)\n variables.accuracy_sum.assign_add(accuracy * num_examples)\n\n return loss, predictions",
"다음으로 다시 TensorFlow를 사용하여 로컬 메트릭 세트를 반환하는 함수를 정의합니다. 로컬 메트릭 세트는 페더레이션 학습 또는 평가 프로세스에서 서버로 집계할 수 있는 값(자동으로 처리되는 모델 업데이트에 추가)입니다.\n여기서는 단순히 평균 loss 및 accuracy와 num_examples를 반환하며, 페더레이션 집계를 계산할 때 다른 사용자의 기여도에 올바르게 가중치를 적용해야 합니다.",
"def get_local_mnist_metrics(variables):\n return collections.OrderedDict(\n num_examples=variables.num_examples,\n loss=variables.loss_sum / variables.num_examples,\n accuracy=variables.accuracy_sum / variables.num_examples)",
"마지막으로, get_local_mnist_metrics를 통해 각 기기에서 내보낸 로컬 메트릭을 집계하는 방법을 결정해야 합니다. 이것은 TensorFlow로 작성되지 않은 코드의 유일한 부분입니다. TFF로 표현된 페더레이션 계산입니다. 더 자세히 알고 싶다면, 사용자 정의 알고리즘 튜토리얼을 살펴보지만, 대부분의 애플리케이션에서는 그럴 필요가 없습니다. 아래 표시된 패턴의 변형으로 충분합니다. 다음과 같습니다.",
"@tff.federated_computation\ndef aggregate_mnist_metrics_across_clients(metrics):\n return collections.OrderedDict(\n num_examples=tff.federated_sum(metrics.num_examples),\n loss=tff.federated_mean(metrics.loss, metrics.num_examples),\n accuracy=tff.federated_mean(metrics.accuracy, metrics.num_examples)) ",
"입력 metrics 인수는 위의 get_local_mnist_metrics에서 반환한 OrderedDict에 해당하지만, 결정적으로 해당 값은 더 이상 tf.Tensors가 아닙니다. tff.Value로 \"박스화\"되어 있으므로 더 이상 TensorFlow를 사용하여 조작할 수 없지만, tff.federated_mean 및 tff.federated_sum과 같은 TFF의 페더레이션 연산자만 사용할 수 있습니다. 반환된 전역 집계 사전은 서버에서 사용할 수 있는 메트릭 세트를 정의합니다.\ntff.learning.Model의 인스턴스 생성하기\n위의 모든 항목이 준비되었으므로 TFF가 Keras 모델을 수집하도록 할 때 생성되는 것과 유사한 TFF와 함께 사용할 모델 표현을 구성할 준비가 되었습니다.",
"class MnistModel(tff.learning.Model):\n\n def __init__(self):\n self._variables = create_mnist_variables()\n\n @property\n def trainable_variables(self):\n return [self._variables.weights, self._variables.bias]\n\n @property\n def non_trainable_variables(self):\n return []\n\n @property\n def local_variables(self):\n return [\n self._variables.num_examples, self._variables.loss_sum,\n self._variables.accuracy_sum\n ]\n\n @property\n def input_spec(self):\n return collections.OrderedDict(\n x=tf.TensorSpec([None, 784], tf.float32),\n y=tf.TensorSpec([None, 1], tf.int32))\n\n @tf.function\n def forward_pass(self, batch, training=True):\n del training\n loss, predictions = mnist_forward_pass(self._variables, batch)\n num_exmaples = tf.shape(batch['x'])[0]\n return tff.learning.BatchOutput(\n loss=loss, predictions=predictions, num_examples=num_exmaples)\n\n @tf.function\n def report_local_outputs(self):\n return get_local_mnist_metrics(self._variables)\n\n @property\n def federated_output_computation(self):\n return aggregate_mnist_metrics_across_clients",
"보시다시피, tff.learning.Model에서 정의한 추상 메서드 및 속성은 변수를 도입하고 손실 및 통계를 정의한 이전 섹션의 코드 조각에 해당합니다.\n다음은 강조할 만한 몇 가지 사항입니다.\n\nTFF는 런타임에 Python을 사용하지 않으므로 모델에서 사용할 모든 상태를 TensorFlow 변수로 캡처해야 합니다(코드는 모바일 기기에 배포할 수 있도록 작성되어야 합니다. 그 이유에 대한 자세한 내용은 사용자 정의 알고리즘 튜토리얼을 참조하세요).\n일반적으로 TFF는 강력한 형식의 환경이며 모든 구성 요소에 대한 형식 서명을 결정하려고 하기 때문에 모델은 허용하는 데이터 형식(input_spec)을 설명해야 합니다. 모델의 입력 형식을 선언하는 것은 필수입니다.\n기술적으로는 필요하지 않지만, 모든 TensorFlow 로직(순방향 전달, 메트릭 계산 등)을 tf.function로 래핑하는 것이 좋습니다. 이렇게 하면 TensorFlow가 직렬화될 수 있고 명시적인 제어 종속성이 필요하지 않습니다.\n\n위의 내용은 Federated SGD와 같은 평가 및 알고리즘에 충분합니다. 그러나 Federated Averaging의 경우 모델이 각 배치에서 로컬로 훈련하는 방법을 지정해야 합니다. Federated Averaging 알고리즘을 빌드할 때 로컬 옵티마이저를 지정합니다.\n새 모델로 페더레이션 훈련 시뮬레이션하기\n위의 모든 사항이 준빈되면, 프로세스의 나머지 부분은 이미 본 것과 같이 보입니다. 모델 생성자를 새 모델 클래스의 생성자로 교체하고 생성한 반복 프로세스에서 두 개의 페더레이션 계산을 사용하여 훈련 라운드를 순환합니다.",
"iterative_process = tff.learning.build_federated_averaging_process(\n MnistModel,\n client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02))\n\nstate = iterative_process.initialize()\n\nstate, metrics = iterative_process.next(state, federated_train_data)\nprint('round 1, metrics={}'.format(metrics))\n\nfor round_num in range(2, 11):\n state, metrics = iterative_process.next(state, federated_train_data)\n print('round {:2d}, metrics={}'.format(round_num, metrics))",
"TensorBoard 내에서 이들 메트릭을 보려면, 위의 \"TensorBoard에서 모델 메트릭 표시하기\"에 나열된 단계를 참조하세요.\n평가\n지금까지의 모든 실험은 페더레이션 훈련 메트릭(라운드의 모든 클라이언트에 걸쳐 훈련된 모든 데이터 배치에 대한 평균 메트릭)만 제시했습니다. 이는 특히 단순성을 위해 각 라운드에서 같은 클라이언트 세트를 사용했기 때문에 과대적합에 대한 일반적인 우려가 있지만, Federated Averaging 알고리즘에 특정한 훈련 메트릭에는 과대적합이라는 추가 개념이 있습니다. 이것은 각 클라이언트가 단일 데이터 배치를 가지고 있다고 상상하고 많은 반복(epoch) 동안 해당 배치에 대해 훈련하는 경우 가장 쉽게 확인할 수 있습니다. 이 경우 로컬 모델은 해당 배치 하나에 빠르게 정확히 맞으므로 평균적인 로컬 정확성 메트릭은 1.0에 접근합니다. 따라서 이들 훈련 메트릭은 훈련이 진행되고 있다는 신호로 간주될 수 있지만, 그 이상은 아닙니다.\n페더레이션 데이터에 대한 평가를 수행하려면, <code>tff.learning.build_federated_evaluation</code> 함수를 사용하고 모델 생성자에 인수로 전달하는, 이 용도로 설계된 또 다른 <em>페더레이션 계산</em>을 구성할 수 있습니다. MnistTrainableModel를 사용했던 Federated Averaging과는 달리, MnistMode을 전달하면 충분합니다. 평가는 경사 하강을 수행하지 않으며 옵티마이저를 구성할 필요가 없습니다.\n실험과 연구를 위해 중앙 집중식 테스트 데이터세트를 사용할 수 있는 경우, 텍스트 생성을 위한 페더레이션 학습은 다른 평가 옵션을 보여줍니다. 페더레이션 학습에서 훈련된 가중치를 가져와 표준 Keras 모델에 적용한 다음 중앙 집중식 데이터세트에서 tf.keras.models.Model.evaluate()를 호출하면 됩니다.",
"evaluation = tff.learning.build_federated_evaluation(MnistModel)",
"다음과 같이 평가 함수의 추상 형식 서명을 검사할 수 있습니다.",
"str(evaluation.type_signature)",
"이 시점에서 세부 사항에 대해 걱정할 필요는 없습니다. tff.templates.IterativeProcess.next와 비슷하지만, 두 가지 중요한 차이점이 있는 다음과 같은 일반적인 형식을 취한다는 점만 알아 두십시오. 첫째, 평가에서는 모델이나 상태의 다른 측면을 수정하지 않기 때문에 서버 상태를 반환하지 않습니다. 상태 비저장으로 생각할 수 있습니다. 둘째, 평가에는 모델만 필요하며 옵티마이저 변수와 같이 훈련과 관련될 수 있는 서버 상태의 다른 부분이 필요하지 않습니다.\nSERVER_MODEL, FEDERATED_DATA -> TRAINING_METRICS\n훈련 중에 도달한 최신 상태에 대한 평가를 호출해 보겠습니다. 서버 상태에서 훈련된 최신 모델을 추출하려면 다음과 같이 .model 멤버에 액세스하기만 하면 됩니다.",
"train_metrics = evaluation(state.model, federated_train_data)",
"평가 결과는 다음과 같습니다. 위의 마지막 훈련 라운드에서 보고된 것보다 수치가 약간 더 좋아 보입니다. 일반적으로, 반복 훈련 프로세스에서 보고된 훈련 메트릭은 일반적으로 훈련 라운드 시작 시 모델의 성능을 반영하므로 평가 메트릭은 항상 한 단계 앞서 있습니다.",
"str(train_metrics)",
"이제 페더레이션 데이터의 테스트 샘플을 컴파일하고 테스트 데이터에 대한 평가를 다시 실행해 보겠습니다. 데이터는 실제 사용자의 같은 샘플에서 제공되지만, 별개의 보류된 데이터세트에서 제공됩니다.",
"federated_test_data = make_federated_data(emnist_test, sample_clients)\n\nlen(federated_test_data), federated_test_data[0]\n\ntest_metrics = evaluation(state.model, federated_test_data)\n\nstr(test_metrics)",
"이것으로 튜토리얼을 마칩니다. 매개변수(예: 배치 크기, 사용자 수, epoch, 학습률 등)를 사용하여 위의 코드를 수정하여 각 라운드에서 사용자의 무작위 샘플에 대한 훈련을 시뮬레이션하고 당사의 다른 튜토리얼을 탐색하는 것이 좋습니다."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
SheffieldML/notebook
|
compbio/SpatioTemporalGeneProteinDrosophila.ipynb
|
bsd-3-clause
|
[
"Partial Differential Equations and Gaussian Process with GPy\npresented at the EBI BioPreDyn Course 'The Systems Biology Modelling Cycle'\nMu Niu, Neil Lawrence, 12th May 20014, University of Sheffield\nThe Spatio-Temporal Model\nIn this notebook we consider the latent force model paradigm (<a href=\"http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6514873\">Alvarez et al, 2013</a>, <a href=\"http://arxiv.org/abs/1107.2699\">Alvarez et al, 2011</a>, <a href=\"http://jmlr.org/proceedings/papers/v5/alvarez09a/alvarez09a.pdf\">Alvarez et al, 2009</a>) to drive spatio-temporal differential equation with a Gaussian process. Latent force models are differential equations whose initial conditions or driving forces are given by a stochastic process. Linear latent force models are linear (partial or ordinary) differential equations. If a linear latent force model is driven by a Gaussian process latent force, this describes a joint Gaussian process distribution across all the variables of interest. The Gaussian process covariance function encodes the relationships between the variables, as proscribed by the differential equation, through the covariance function. \nThis direction of research is part of our ideas as to how to merge mechanistic and statistical models of data. It also maps onto our ideas about 'Gaussian processes over everything'. The covariance function interelates the protein and mRNA concentrations in the same model.\nDifferential Equation Model\nA model of post-transcriptional processing is formulated to describe the spatio-temporal Drosophila\nprotein expression data (<a href=\"http://www.ploscompbiol.org/article/info%3Adoi%2F10.1371%2Fjournal.pcbi.1003281\">Becker et al, 2013</a>, <a href=\"http://arxiv.org/abs/1107.2699\">Alvarez et al, 2011</a>). Protein production is considered to be linearly dependent on the concentration of mRNA at an earlier time point. The model also allows for diffusion of protein between nuclei and linear protein decay. These processes are dependent on the diffusion parameter and the degradation rate of protein respectively.\n\\begin{equation}\n a \\frac{\\partial ^2 y_{x,t}}{\\partial x^2} + b \\frac{\\partial y_{x,t}}{\\partial t} + c y_{x,t}= f_{x,t} \n\\end{equation}\nThe coefficients $a$, $b$ and $c$ are unknown. In this study, we use Gaussian process with an exponentiated quadratic kernel as a prior over $y_{x,t}$ (protein). The kernel of $f_{x,t}$ (mRNA) is derived by applying the partial differential operator on the spatial-temporal kernel of protein. The multi-output Gaussian process are developed by combining the covariance matrix of mRNA and protein and their cross covariance.",
"%matplotlib inline\n%config InlineBackend.figure_format = 'svg'\nimport numpy as np\nimport pylab as pb\nimport GPy\nimport pandas\nfrom pandas import read_csv",
"The spatio-temporal multi-output partial differential equation covariance functions have been developed with the kernel name GPy.kern.ODE_st in GPy. The inputs are one dimension spatial data, one dimension temporal data and one dimension index which is used to indicate $f$ and $y$.",
"data = GPy.util.datasets.drosophila_knirps()",
"The next thing to do is to compose the data ready for presentation to GPy. Here we need to use the time field as the input and a concatanation of the expression levels as the output. We need to provide a corresponding index for to each input to describe what output is being represented.\nNow we set up the covariance function with the relevant parameters.",
"kern = GPy.kern.ODE_st(input_dim=3,\n a=1., b=1., c=1.,\n variance_Yx=1., variance_Yt=1.,\n lengthscale_Yx=15.,\n lengthscale_Yt=15.) ",
"With the data correctly presented and the covariance function defined, we are ready to proceed with the Gaussian process regression.",
"data['X']\ndata['Y']\n\nm = GPy.models.GPRegression(data['X'],data['Y'],kern) ",
"Initial Fit\nThe initial value of $a$, $b$ and $c$ are 1. For these choices of covariance function parameters, we can plot the random field of $f$ and $y$ separately.",
"leng = data['X'].shape[0]\nm.plot(fixed_inputs=[(2,0)], which_data_rows = slice(0,leng*2/2))\nm.plot(fixed_inputs=[(2,1)], which_data_rows = slice(leng*2/2,leng*2))",
"Now we optimize the model, this will take a few minutes.",
"m.optimize(messages=True)",
"After optimization, the estimated value of $a$, $b$ and $c$ can be printed.",
"print m",
"The plot of the random fields are plotted below.",
"m.plot(fixed_inputs=[(2,0)], which_data_rows = slice(0,leng*2/2))\npb.savefig(\"gene.pdf\")\nGPy.plotting.show(m.plot(fixed_inputs=[(2,1)], which_data_rows = slice(leng*2/2,leng*2)), filename='test')\npb.savefig(\"protein.pdf\")",
"In his diploma thesis Kolja Becker estimated the parameters of the partial differential equation where $a = 0.159$, $b =12.77$ and $c =0.983$. The results from the GP approach is $a =0.439$, $b= 10.06$ and $c=0.62$. The GP results are different but they are still within the range of the confidence interval defined in Becker's paper. Two further issues may lead to the difference. One reason could be that the original partial differential equation had a delay parameter $\\tau$ for the mRNA ($f$). In our GP model, we did not include this parameter. However, since the Protein-mRNA partial differential equation is linear and Becker's estimate of $\\tau$ is small comparing with the time step of the data. The delay impact should not be too big. The estimation algorithm used in Becker's thesis is based on least squares optimization. The GP approach considers protein and mRNA as a nonlinear function of space and time. And treat the PDE as a linear function to link them. The different modelling methods could also lead to different estimation of the parameters.\nFinally, we didn't perform a sensitivity analysis in the above notebook. One thing we can do next is to run a Hamiltonian Monte-Carlo sampler on the model to form error bars for our own analysis. It may be that they are not very well determined given the data.\nwork funded by the BioPreDyn project, it is a collaboration with Nicolas Durrande, Johannes Jaeger."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jonathanmorgan/msu_phd_work
|
methods/data_creation/2016.12.10-work_log-prelim_month-single_name_match_error.ipynb
|
lgpl-3.0
|
[
"2016.12.10 - work log - prelim_month - single name match error\n<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Table-of-Contents\" data-toc-modified-id=\"Table-of-Contents-1\"><span class=\"toc-item-num\">1 </span>Table of Contents</a></span></li><li><span><a href=\"#Setup\" data-toc-modified-id=\"Setup-2\"><span class=\"toc-item-num\">2 </span>Setup</a></span><ul class=\"toc-item\"><li><span><a href=\"#Setup---Imports\" data-toc-modified-id=\"Setup---Imports-2.1\"><span class=\"toc-item-num\">2.1 </span>Setup - Imports</a></span></li><li><span><a href=\"#Setup---Initialize-Django\" data-toc-modified-id=\"Setup---Initialize-Django-2.2\"><span class=\"toc-item-num\">2.2 </span>Setup - Initialize Django</a></span></li></ul></li><li><span><a href=\"#Coding-to-look-into\" data-toc-modified-id=\"Coding-to-look-into-3\"><span class=\"toc-item-num\">3 </span>Coding to look into</a></span><ul class=\"toc-item\"><li><span><a href=\"#Match-for-just-first-name?---DONE\" data-toc-modified-id=\"Match-for-just-first-name?---DONE-3.1\"><span class=\"toc-item-num\">3.1 </span>Match for just first name? - DONE</a></span></li></ul></li></ul></div>\n\nSetup\n\nBack to Table of Contents\n\nSetup - Imports\n\nBack to Table of Contents",
"import datetime\n\nprint( \"packages imported at \" + str( datetime.datetime.now() ) )\n\n%pwd",
"Setup - Initialize Django\n\nBack to Table of Contents\n\nFirst, initialize my dev django project, so I can run code in this notebook that references my django models and can talk to the database using my project's settings.",
"%run django_init.py",
"Coding to look into\n\nBack to Table of Contents\n\nCoding decisions to look at more closely:\nMatch for just first name? - DONE\n\nBack to Table of Contents\n\nFirst name \"Kate\" was matched to \"Kate Gosselin\" but \"Gosselin\" is nowhere in the article.\n\n\nArticle Data 2980, article 20739 - 11003 (AS) - Gosselin, Kate ( id = 1608; capture_method = OpenCalais_REST_API_v2 ) (mentioned; individual) ==> name: Kate\n\nNot sure where \"Gosselin\" came from - need to look into the lookup for \"Kate\".\narticle 20739 - https://research.local/research/context/text/article/article_data/view_with_text/?article_id=20739\narticle data 2980 - https://research.local/research/context/text/article/article_data/view/?article_id=20739&article_data_id_select=2980",
"# imports\nfrom context_text.article_coding.manual_coding.manual_article_coder import ManualArticleCoder\nfrom context_text.models import Article_Subject\n\n# declare variables\nmy_coder = None\nsubject = None\nperson_name = \"\"\nperson_instance = None\nperson_match_list = None\n\n# create ManualArticleCoder and Article_Subject instance\nmy_coder = ManualArticleCoder()\nsubject = Article_Subject()\n\n# set up look up of \"Kate\"\nperson_name = \"Kate\"\n\n# lookup person - returns person and confidence score inside\n# Article_Person descendent instance.\nsubject = my_coder.lookup_person( subject, \n person_name,\n create_if_no_match_IN = False,\n update_person_IN = False )\n\n# retrieve information from Article_Person\nperson_instance = subject.person\nperson_match_list = subject.person_match_list # list of Person instances\n\nif ( person_instance is not None ):\n\n # Found person for \"Kate\":\n print( \"Found person for \\\"\" + str( person_name ) + \"\\\": \" + str( person_instance ) )\n \nelse:\n \n # no person instance found.\n print( \"No person instance found for \\\"\" + str( person_name ) + \"\\\"\" )\n \n#-- END check to see if person_instance --#\n\nif ( ( person_match_list is not None ) and ( len( person_match_list ) > 0 ) ):\n\n print( \"match list:\" )\n for match_person in person_match_list:\n \n # output each person for now.\n print( \"- \" + str( match_person ) )\n \n #-- END loop over person_match_list --#\n\nelse:\n \n print( \"match list is None or empty.\" )\n\n#-- END check to see if there is a match list.",
"Update - Well, it no longer returns anything for \"Kate\". Need to run unit tests.\nFor more precise detail, looked at the underlying way the lookup method finds a person for a name:",
"# imports\nfrom context_text.models import Person\n\n# declare variables\nperson_name = \"\"\nperson_instance = None\nlookup_status = \"\"\n\n# lookup Person - this is the way it works down in Article_Coder.lookup_person().\nperson_name = \"Kate\"\nperson_instance = Person.get_person_for_name( person_name, create_if_no_match_IN = False )\nlookup_status = Person.get_person_lookup_status( person_instance )\n\n# what we got?\nprint( \"Person instance: \" + str( person_instance ) + \"; lookup_status: \" + str( lookup_status ) )\n\n# String lookup?\nperson_instance = Person.get_person_for_name( person_name,\n create_if_no_match_IN = False,\n do_strict_match_IN = True,\n do_partial_match_IN = False )\n\n# what we got?\nprint( \"Person instance strict lookup: \" + str( person_instance ) )\n\n",
"Is there only one person with first name Kate?",
"# imports\nfrom context_text.models import Person\n\n# declare variables\nname_string = \"\"\ntest_person_qs = None\ntest_person = None\ntest_person_count = -1\n\n# do a lookup, filtering on first name of \"Kate\".\nname_string = \"Kate\"\ntest_person_qs = Person.objects.filter( first_name = name_string )\n\n# got anything at all?\nif ( test_person_qs is not None ):\n\n # process results - count...\n test_person_count = test_person_qs.count()\n print( \"Found \" + str( test_person_count ) + \" matches:\" )\n\n # ...and loop.\n for test_person in test_person_qs:\n\n # output person\n print( \"- \" + str( test_person ) )\n \n #-- END loop over matching persons. --#\n \n#-- END check to see if None --#",
"So... If there is a single match in the database for a single name part (first name or last name), but the match contains more than just the first name, I don't want to call that a match unless there is some sort of associated ID that also matches.\nMake sure that single names are always considered the first name:",
"# functions\ndef output_person_name_details( person_IN ):\n\n if ( person_IN is not None ):\n\n print( \"Person \" + str( person_IN ) + \":\" )\n print( \"- first_name: \" + str( person_IN.first_name ) )\n print( \"- middle_name: \" + str( person_IN.middle_name ) )\n print( \"- last_name: \" + str( person_IN.last_name ) )\n print( \"- name_prefix: \" + str( person_IN.name_prefix ) )\n print( \"- name_suffix: \" + str( person_IN.name_suffix ) )\n print( \"- nickname: \" + str( person_IN.nickname ) )\n print( \"- full_name_string: \" + str( person_IN.full_name_string ) )\n print( \"- original_name_string: \" + str( person_IN.original_name_string ) )\n \n else:\n \n print( \"Can't output details since nothing (None) passed in.\" )\n \n #-- END Check to see if Person is None --# \n \n#-- END function output_person_name_details() --#\n \n# imports\nfrom context_text.models import Person\n\n# declare variables\nperson_instance = None\nname_string = \"\"\nparsed_name = None\n\n# see if HumanName consistently places single name in first name.\nperson_instance = Person()\nname_string = \"Smith\"\nperson_instance.set_name( name_string )\noutput_person_name_details( person_instance )\n\n# see if HumanName consistently places single name in first name.\nperson_instance = Person()\nname_string = \"Kevin\"\nperson_instance.set_name( name_string )\noutput_person_name_details( person_instance )\n\n# see if HumanName consistently places single name in first name.\nperson_instance = Person()\nname_string = \"Huxtable\"\nperson_instance.set_name( name_string )\noutput_person_name_details( person_instance )\n\n# see if HumanName consistently places single name in first name.\nperson_instance = Person()\nname_string = \"Kate\"\nperson_instance.set_name( name_string )\noutput_person_name_details( person_instance )\n\n# see if HumanName consistently places single name in first name.\nperson_instance = Person()\nname_string = \"Von Williams\"\nperson_instance.set_name( name_string )\noutput_person_name_details( person_instance )",
"Sure looks like single name part always goes into first name. So, if I do a straight up lookup, and the original name parses out to only have a first name, and the result has more than a first name, then we call it not a match, store the match as an alternate.\nTest out the check to see if there is more than a first name.",
"# imports\nfrom context_text.models import Person\n\n# declare variables\nperson_name = \"\"\nperson_instance = None\nis_just_first_name = False\n\n# is single name part?\nperson_name = \"Kate\"\nis_just_first_name = Person.is_single_name_part( person_name )\nprint( str( person_name ) + \" - \" + str( is_just_first_name ) )\n\n# is single name part?\nperson_name = \"Kate Smith\"\nis_just_first_name = Person.is_single_name_part( person_name )\nprint( str( person_name ) + \" - \" + str( is_just_first_name ) )\n\n# is single name part?\nperson_name = \"Michael W. Smith\"\nis_just_first_name = Person.is_single_name_part( person_name )\nprint( str( person_name ) + \" - \" + str( is_just_first_name ) )\n\n# is single name part?\nperson_name = \"Michael \"\nis_just_first_name = Person.is_single_name_part( person_name )\nprint( str( person_name ) + \" - \" + str( is_just_first_name ) )\n\n# is single name part?\nperson_name = \"Mr. Michael\"\nis_just_first_name = Person.is_single_name_part( person_name )\nprint( str( person_name ) + \" - \" + str( is_just_first_name ) )",
"Here is the fix I came up with (article_coder.py lines 1308 to 1365):",
"# 1307...\n # check to see if the name passed in is a single name.\n is_single_name_part = Person.is_single_name_part( full_name_IN )\n if ( is_single_name_part == True ):\n\n # yes. Do strict lookup with no partial match to see if\n # this is an exact match.\n temp_person = Person.get_person_for_name( full_name_IN,\n create_if_no_match_IN = False,\n do_strict_match_IN = True,\n do_partial_match_IN = False )\n temp_lookup_status = Person.get_person_lookup_status( temp_person )\n \n # exact match?\n if ( ( temp_person is None ) or ( person_instance.id != temp_person.id ) ):\n \n # we have a single word name, lookup and strict\n # lookup result in different matches.\n \n # further verify by checking if just one match for\n # the name passed in and first_name, ignoring\n # other name fields.\n test_person_qs = Person.objects.filter( first_name__iexact = full_name_IN )\n test_person_count = test_person_qs.count()\n if ( test_person_count == 1 ):\n\n # This is a relatively rare scenario - a single\n # name part matches to the only person in\n # the database that contains that name part\n # in their first name. For our purposes,\n # this is not a match. Make a new person\n # for the single name part, set match status\n # to None.\n match_status = self.MATCH_STATUS_NONE\n person_instance = Person.create_person_for_name( full_name_IN )\n self.output_debug( \"In \" + me + \": name \" + str( full_name_IN ) + \" is first name of one person ( \" + str( person_instance ) + \" ) who has more name information. This is not a reliable match, so creating new Person with just name passed in.\" )\n \n elif ( test_person_count > 1 ):\n \n # make list of IDs of multiple matches.\n multiple_list = []\n for test_person in test_person_qs:\n \n # add ID of each person to list.\n multiple_list.append( test_person )\n \n #-- END loop over multiple matches. --#\n \n self.output_debug( \"In \" + me + \": name \" + str( full_name_IN ) + \" is first name of more than one person ( \" + str( multiple_list ) + \" ) who just has that first name. But we found an exact match. This makes no sense.\" )\n \n else:\n \n self.output_debug( \"In \" + me + \": name \" + str( full_name_IN ) + \" is not first name of any person, and yet it was matched to person: \" + str( person_instance ) + \". This makes no sense.\" ) \n\n #-- END check to see if one person with same first name --#\n \n #-- END check to see if our match is an exact match --#\n \n #-- END check to see if single name part --#\n# ...1365",
"Now, to run unit tests. Command is:\npython manage.py test sourcenet.tests\n\nmust be in Django project folder and have activated your virtualenv.\nTests passed. Plus little tests here. We'll call that good for now, moving back to assessing each single-word name."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
BDannowitz/polymath-progression-blog
|
jlab-ml-lunch-2/notebooks/03-Recurrent-Network-Model.ipynb
|
gpl-2.0
|
[
"03 - Sequence Model Approach\n\nThe more 'classical' approach to solving this problem\nTrain a model that can take any number of 'steps'\nMakes a prediction on next step based on previous steps\nLearn from full tracks\nFor test tracks, predict what the next step's values will be",
"%matplotlib inline\n\nimport pandas as pd\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense, LeakyReLU, Dropout, ReLU, GRU, TimeDistributed, Conv2D, MaxPooling2D, Flatten\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom jlab import load_test_data, get_test_detector_plane",
"Load up and prep the datasets",
"X_train = pd.read_csv('MLchallenge2_training.csv')\nX_test = load_test_data('test_in.csv')\neval_planes = get_test_detector_plane(X_test)\n\n# Also, load our truth values\ny_true = pd.read_csv('test_prediction.csv', names=['x', 'y', 'px', 'py', 'pz'],\n header=None)\n\nX_test.head()\n\ny_true.head()",
"Construct the training data and targets\n\nFor each track\nChoose a number N between 8 and 24\nThat track will have 6 kinematics for N blocks\nThe target variable will be the 6 kinematic variables for the N+1th detector block\nThis will cause variable length sequences\nApply pad_sequences to prepend with zeros appropriately\n\nTraining Dataset",
"N_SAMPLES = len(X_train)\nN_DETECTORS = 25\nN_KINEMATICS = 6\nSHAPE = (N_SAMPLES, N_DETECTORS-1, N_KINEMATICS)\n\nX_train_list = []\ny_train_array = np.ndarray(shape=(N_SAMPLES, N_KINEMATICS-1))\nfor ix in range(N_SAMPLES):\n seq_len = np.random.choice(range(8, 25))\n track = X_train.iloc[ix].values.reshape(N_DETECTORS, N_KINEMATICS)\n X_train_list.append(track[0:seq_len])\n # Store the kinematics of the next in the sequence\n # Ignore the 3rd one, which is z\n y_train_array[ix] = track[seq_len][[0,1,3,4,5]]\n\nfor track in X_train_list[:10]:\n print(len(track))\n\nX_train_list = pad_sequences(X_train_list, dtype=float)\n\nfor track in X_train_list[:10]:\n print(len(track))\n\nX_train_array = np.array(X_train_list)\nX_train_array.shape\n\ny_train_array.shape",
"Validation Dataset",
"N_TEST_SAMPLES = len(X_test)\n\ny_test_array = y_true.values\n\nX_test_list = []\nfor ix in range(N_TEST_SAMPLES):\n seq_len = get_test_detector_plane(X_test.iloc[ix])\n track = X_test.iloc[ix].values.reshape(N_DETECTORS, N_KINEMATICS)\n X_test_list.append(track[0:seq_len])\n\nX_test_list = pad_sequences(X_test_list, dtype=float)\nX_test_array = np.array(X_test_list)\n\nX_test_array.shape\n\ny_test_array.shape\n\ny_true.values.shape\n\nimport pandas as pd\nimport numpy as np\nfrom math import floor\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom sklearn.model_selection import train_test_split\n\ndata = pd.read_csv('MLchallenge2_training.csv')\n\n# Z values are constant -- what are they?\nZ_VALS = data[['z'] + [f'z{i}' for i in range(1, 25)]].loc[0].values\n# Z-distance from one timestep to another is set; calculate it\nZ_DIST = [Z_VALS[i+1] - Z_VALS[i] for i in range(0, 24)] + [0.0]\n\n# Number of timesteps\nN_DETECTORS = 25\n# Provided number of kinematics\nN_KINEMATICS = 6\n# Number of features after engineering them all\nN_FEATURES = 13\n\ndef get_detector_meta(kin_array, det_id):\n \n # Is there a large gap after this detector?\n # 0 is for padded timesteps\n # 1 is for No, 2 is for Yes\n mind_the_gap = int(det_id % 6 == 0) + 1\n \n # Detector group: 1 (origin), 2, 3, 4, or 5\n det_grp = floor((det_id-1) / 6) + 2\n \n # Detectors numbered 1-6 (origin is 6)\n # (Which one in the group of six is it?)\n det_rank = ((det_id-1) % 6) + 1\n \n # Distance to the next detector?\n z_dist = Z_DIST[det_id]\n \n # Transverse momentum (x-y component)\n pt = np.sqrt(np.square(kin_array[3]) + np.square(kin_array[4]))\n \n # Total momentum\n p_tot = np.sqrt(np.square(kin_array[3])\n + np.square(kin_array[4])\n + np.square(kin_array[5]))\n\n # Put all the calculated features together\n det_meta = np.array([det_id, mind_the_gap, det_grp, det_rank,\n z_dist, pt, p_tot])\n \n # Return detector data plus calculated features\n return np.concatenate([kin_array, det_meta], axis=None)\n\ndef tracks_to_time_series(X):\n \"\"\"Convert training dataframe to multivariate time series training set\n \n Pivots each track to a series ot timesteps. Then randomly truncates them\n to be identical to the provided test set. The step after the truncated\n step is saved as the target.\n \n Truncated sequence are front-padded with zeros.\n \n Parameters\n ----------\n X : pandas.DataFrame\n \n Returns\n -------\n (numpy.ndarray, numpy.ndarray)\n Tuple of the training data and labels\n \"\"\"\n \n X_ts_list = []\n n_samples = len(X)\n y_array = np.ndarray(shape=(n_samples, N_KINEMATICS-1))\n for ix in range(n_samples):\n # Randomly choose how many detectors the track went through\n track_len = np.random.choice(range(8, 25))\n # Reshape into ts-like\n track = X.iloc[ix].values.reshape(N_DETECTORS, N_KINEMATICS)\n #eng_track = np.zeros(shape=(N_DETECTORS, N_FEATURES))\n #for i in range(0, N_DETECTORS):\n # eng_track[i] = get_detector_meta(track[i], i)\n # Truncate the track to only N detectors\n X_ts_list.append(track[0:track_len])\n # Store the kinematics of the next in the sequence\n # Ignore the 3rd one, which is z\n y_array[ix] = track[track_len][[0,1,3,4,5]]\n \n # Pad the training sequence\n X_ts_list = pad_sequences(X_ts_list, dtype=float)\n X_ts_array = np.array(X_ts_list)\n \n return X_ts_array, y_array\n\nX, y = tracks_to_time_series(data)\n\nX[3]\n\ny[3]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nlen(X_train), len(X_test)",
"Multi-layer GRU Model with LReLU",
"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import GRU, Dense, LeakyReLU, Dropout\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport joblib\n\ndef lrelu(x):\n return LeakyReLU()(x)\n\n\ndef gru_model(gru_units=35, dense_units=100,\n dropout_rate=0.25):\n \"\"\"Model definition.\n \n Three layers of Gated Recurrent Units (GRUs), utilizing\n LeakyReLU activations, finally passing GRU block output\n to a dense layer, passing its output to the final output\n layer, with a touch of dropout in between.\n \n Bon apetit.\n \n Parameters\n ----------\n gru_units : int\n dense_units : int\n dropout_rate : float\n \n Returns\n -------\n tensorflow.keras.models.Sequential\n \n \"\"\"\n \n model = Sequential()\n \n model.add(GRU(gru_units, activation=lrelu,\n input_shape=(N_DETECTORS-1, N_KINEMATICS),\n return_sequences=True))\n model.add(GRU(gru_units, activation=lrelu,\n return_sequences=True))\n model.add(GRU(gru_units, activation=lrelu))\n \n model.add(Dense(dense_units, activation=lrelu))\n model.add(Dropout(dropout_rate))\n model.add(Dense(N_KINEMATICS-1))\n \n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nmodel = gru_model()\n\nmodel.summary()\n\nfrom tensorflow.keras.utils import plot_model\nplot_model(model, to_file='gru_model.png', show_shapes=True)\n\nes = EarlyStopping(monitor='val_loss', mode='min',\n patience=5, restore_best_weights=True)\nhistory = model.fit(\n x=X_train,\n y=y_train,\n validation_data=(X_test, y_test),\n callbacks=[es],\n epochs=50,\n)\n\nmodel.save(\"gru_model.h5\")\njoblib.dump(history.history, \"gru_model.history\")\n\nhistory = joblib.load(\"dannowitz_jlab2_model_20191031.history\")\n\nimport matplotlib.pyplot as plt\n\n# Plot training & validation loss values\nplt.plot(history['loss'])\nplt.plot(history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()",
"Calculate the score on my predictions\n\nScoring code provided by Thomas Britton\nEach kinematic has different weight",
"pred = pd.read_csv('data/submission/dannowitz_jlab2_submission_20191112.csv', header=None)\ntruth = pd.read_csv('data/ANSWERS.csv', header=None)\n\n# Calculate square root of the mean squared error\n# Then apply weights and sum them all up\nsq_error = (truth - pred).applymap(np.square)\nmse = sq_error.sum() / len(truth)\nrmse = np.sqrt(mse)\nrms_weighted = rmse / [0.03, 0.03, 0.01, 0.01, 0.011]\nscore = rms_weighted.sum()\nscore",
"Visualize the predictions vs true\nYou can slice and dice the stats however you want, but it helps to be able to see your predictions at work.\nRunning history of me tinkering around\n\nI didn't arrive at this construction from the start.\nMany different changes and tweaks",
"def lstm_model():\n \n model = Sequential()\n model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1, activation='linear'))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nmodel = lstm_model()\nmodel.summary()\n\nhistory = model.fit(x=X_train_array, y=y_train_array, validation_data=(X_test_array, y_test_array), epochs=5)\n\nhistory = model.fit(x=X_train_array, y=y_train_array,\n validation_data=(X_test_array, y_test_array),\n epochs=50, use_multiprocessing=True)\n\nmodel = lstm_model()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = model.fit(x=X_train_array, y=y_train_array,\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=20, use_multiprocessing=True)\n\nmodel.save(\"lstm100-dense100-dropout025-epochs20-early-stopping.h5\")\n\ndef lstm_model_lin():\n \n model = Sequential()\n model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1, activation='linear'))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nlin_act_model = lstm_model_lin()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = lin_act_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=20, use_multiprocessing=True)\n\ndef lstm_model_adam():\n \n model = Sequential()\n model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nadam_model = lstm_model_adam()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = adam_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=20, use_multiprocessing=True)\n\ndef lstm_model_dropout50():\n \n model = Sequential()\n model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.50))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\ndropout50_model = lstm_model_dropout50()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = dropout50_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=20, use_multiprocessing=True)\n\ndef lstm_model_nodropout():\n \n model = Sequential()\n model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nnodropout_model = lstm_model_nodropout()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = nodropout_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=20, use_multiprocessing=True)\n\ndef lstm_model_relu():\n \n model = Sequential()\n model.add(LSTM(200, activation='relu', input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nrelu_model = lstm_model_relu()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = relu_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=20, use_multiprocessing=True)\n\ndef model_gru():\n \n model = Sequential()\n model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\ngru_model = model_gru()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = gru_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=20, use_multiprocessing=True)",
"Early Conclusions\n\nGRU > LSTM\nLeakyReLU > ReLU\nadam > rmsprop\ndropout 0.25 > dropout 0.5 > no dropout",
"def model_v2():\n \n model = Sequential()\n model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nv2_model = model_v2()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = v2_model.fit(x=X_train_array, y=y_train_array,\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=8, use_multiprocessing=True)\n\nfrom tensorflow.keras.back\n\ndef model_v2_deep():\n \n model = Sequential()\n model.add(GRU(30, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS),\n return_sequences=True))\n model.add(GRU(30, activation=LeakyReLU(), return_sequences=True))\n model.add(GRU(30, activation=LeakyReLU()))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nv2_model_deep = model_v2_deep()\nv2_model_deep.summary()\n\nes = EarlyStopping(monitor='val_loss', mode='min', patience=2, restore_best_weights=True)\nhistory = v2_model_deep.fit(x=X_train_array, y=y_train_array,\n validation_data=(X_test_array, y_test_array),\n callbacks=[es],\n epochs=8, use_multiprocessing=True)\n\ndef model_v2_dbl_gru():\n \n model = Sequential()\n model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS),\n return_sequences=True))\n model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nv2_model_dbl_gru = model_v2_dbl_gru()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = v2_model_dbl_gru.fit(x=X_train_array[:20000], y=y_train_array[:20000],\n validation_data=(X_test_array, y_test_array),\n #callbacks=[es],\n epochs=10, use_multiprocessing=True)\n\ndef model_v2_2x_dropout():\n \n model = Sequential()\n model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dropout(0.25))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nv2_model_dbl_dropout = model_v2_2x_dropout()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = v2_model_dbl_dropout.fit(x=X_train_array[:20000], y=y_train_array[:20000],\n validation_data=(X_test_array, y_test_array),\n callbacks=[es], epochs=20, use_multiprocessing=True)\n\ndef model_v2_big_gru():\n \n model = Sequential()\n model.add(GRU(400, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\nv2_model_big_gru = model_v2_big_gru()\nes = EarlyStopping(monitor='val_loss', mode='min')\nhistory = v2_model_big_gru.fit(x=X_train_array[:20000], y=y_train_array[:20000],\n validation_data=(X_test_array, y_test_array),\n #callbacks=[es],\n epochs=10, use_multiprocessing=True)\n\nv2_model_big_gru.fit(x=X_train_array[:20000], y=y_train_array[:20000],\n validation_data=(X_test_array, y_test_array),\n #callbacks=[es],\n epochs=15, use_multiprocessing=True, initial_epoch=10)",
"Try CNN LSTM",
"X_train_array.shape\n\ndef cnn_gru():\n \n model = Sequential()\n model.add(Conv1D(filters=5, kernel_size=2, strides=1, input_shape=(N_DETECTORS-1, N_KINEMATICS)))\n #model.add(MaxPooling1D())\n model.add(GRU(200, activation=LeakyReLU()))\n model.add(Dense(100, activation=LeakyReLU()))\n model.add(Dropout(0.25))\n model.add(Dense(N_KINEMATICS-1))\n model.compile(loss='mse', optimizer='adam')\n \n return model\n\ncnn_model = cnn_gru()\ncnn_model.summary()\n\n#es = EarlyStopping(monitor='val_loss', mode='min')\nhistory = cnn_model.fit(x=X_train_array[:20000], y=y_train_array[:20000],\n validation_data=(X_test_array, y_test_array),\n epochs=10, use_multiprocessing=True)\n\nhistory.history",
"Enough tinkering around\n\nFormalize this into some scripts\nMake predictions on competition test data",
"from train import train\nfrom predict import predict\n\nmodel = train(frac=1.00, filename=\"dannowitz_jlab2_model\", epochs=100, ret_model=True)\n\npreds = predict(model_filename=\"dannowitz_jlab2_model.h5\",\n data_filename=\"test_in (1).csv\",\n output_filename=\"danowitz_jlab2_submission.csv\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
phuongxuanpham/SelfDrivingCar
|
CarND-LeNet-Lab/LeNet-Lab.ipynb
|
gpl-3.0
|
[
"LeNet Lab\n\nSource: Yan LeCun\nLoad Data\nLoad the MNIST data, which comes pre-loaded with TensorFlow.\nYou do not need to modify this section.",
"from tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", reshape=False)\nX_train, y_train = mnist.train.images, mnist.train.labels\nX_validation, y_validation = mnist.validation.images, mnist.validation.labels\nX_test, y_test = mnist.test.images, mnist.test.labels\n\nassert(len(X_train) == len(y_train))\nassert(len(X_validation) == len(y_validation))\nassert(len(X_test) == len(y_test))\n\nprint()\nprint(\"Image Shape: {}\".format(X_train[0].shape))\nprint()\nprint(\"Training Set: {} samples\".format(len(X_train)))\nprint(\"Validation Set: {} samples\".format(len(X_validation)))\nprint(\"Test Set: {} samples\".format(len(X_test)))",
"The MNIST data that TensorFlow pre-loads comes as 28x28x1 images.\nHowever, the LeNet architecture only accepts 32x32xC images, where C is the number of color channels.\nIn order to reformat the MNIST data into a shape that LeNet will accept, we pad the data with two rows of zeros on the top and bottom, and two columns of zeros on the left and right (28+2+2 = 32).\nYou do not need to modify this section.",
"import numpy as np\n\n# Pad images with 0s\nX_train = np.pad(X_train, ((0,0),(2,2),(2,2),(0,0)), 'constant')\nX_validation = np.pad(X_validation, ((0,0),(2,2),(2,2),(0,0)), 'constant')\nX_test = np.pad(X_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')\n \nprint(\"Updated Image Shape: {}\".format(X_train[0].shape))",
"Visualize Data\nView a sample from the dataset.\nYou do not need to modify this section.",
"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nindex = random.randint(0, len(X_train))\nimage = X_train[index].squeeze()\n\nplt.figure(figsize=(1,1))\nplt.imshow(image, cmap=\"gray\")\nprint(y_train[index])",
"Preprocess Data\nShuffle the training data.\nYou do not need to modify this section.",
"from sklearn.utils import shuffle\n\nX_train, y_train = shuffle(X_train, y_train)",
"Setup TensorFlow\nThe EPOCH and BATCH_SIZE values affect the training speed and model accuracy.\nYou do not need to modify this section.",
"import tensorflow as tf\n\nEPOCHS = 10\nBATCH_SIZE = 128",
"TODO: Implement LeNet-5\nImplement the LeNet-5 neural network architecture.\nThis is the only cell you need to edit.\nInput\nThe LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since MNIST images are grayscale, C is 1 in this case.\nArchitecture\nLayer 1: Convolutional. The output shape should be 28x28x6.\nActivation. Your choice of activation function.\nPooling. The output shape should be 14x14x6.\nLayer 2: Convolutional. The output shape should be 10x10x16.\nActivation. Your choice of activation function.\nPooling. The output shape should be 5x5x16.\nFlatten. Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using tf.contrib.layers.flatten, which is already imported for you.\nLayer 3: Fully Connected. This should have 120 outputs.\nActivation. Your choice of activation function.\nLayer 4: Fully Connected. This should have 84 outputs.\nActivation. Your choice of activation function.\nLayer 5: Fully Connected (Logits). This should have 10 outputs.\nOutput\nReturn the result of the 2nd fully connected layer.",
"from tensorflow.contrib.layers import flatten\n\ndef LeNet(x): \n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n mu = 0\n sigma = 0.1\n \n # TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))\n conv1_b = tf.Variable(tf.zeros(6))\n conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b\n\n # TODO: Activation.\n conv1 = tf.nn.relu(conv1)\n\n # TODO: Pooling. Input = 28x28x6. Output = 14x14x6.\n conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # TODO: Layer 2: Convolutional. Output = 10x10x16.\n conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))\n conv2_b = tf.Variable(tf.zeros(16))\n conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b\n \n # TODO: Activation.\n conv2 = tf.nn.relu(conv2)\n\n # TODO: Pooling. Input = 10x10x16. Output = 5x5x16.\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # TODO: Flatten. Input = 5x5x16. Output = 400.\n fc0 = tf.contrib.layers.flatten(conv2)\n \n # TODO: Layer 3: Fully Connected. Input = 400. Output = 120.\n fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))\n fc1_b = tf.Variable(tf.zeros(120))\n fc1 = tf.matmul(fc0, fc1_W) + fc1_b\n \n # TODO: Activation.\n fc1 = tf.nn.relu(fc1)\n\n # TODO: Layer 4: Fully Connected. Input = 120. Output = 84.\n fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))\n fc2_b = tf.Variable(tf.zeros(84))\n fc2 = tf.matmul(fc1, fc2_W) + fc2_b\n \n # TODO: Activation.\n fc2 = tf.nn.relu(fc2)\n\n # TODO: Layer 5: Fully Connected. Input = 84. Output = 10.\n fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 10), mean = mu, stddev = sigma))\n fc3_b = tf.Variable(tf.zeros(10))\n logits = tf.matmul(fc2, fc3_W) + fc3_b\n \n return logits",
"Features and Labels\nTrain LeNet to classify MNIST data.\nx is a placeholder for a batch of input images.\ny is a placeholder for a batch of output labels.\nYou do not need to modify this section.",
"x = tf.placeholder(tf.float32, (None, 32, 32, 1))\ny = tf.placeholder(tf.int32, (None))\none_hot_y = tf.one_hot(y, 10)",
"Training Pipeline\nCreate a training pipeline that uses the model to classify MNIST data.\nYou do not need to modify this section.",
"rate = 0.001\n\nlogits = LeNet(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\ntraining_operation = optimizer.minimize(loss_operation)",
"Model Evaluation\nEvaluate how well the loss and accuracy of the model for a given dataset.\nYou do not need to modify this section.",
"correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples",
"Train the Model\nRun the training data through the training pipeline to train the model.\nBefore each epoch, shuffle the training set.\nAfter each epoch, measure the loss and accuracy of the validation set.\nSave the model after training.\nYou do not need to modify this section.",
"with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n \n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n \n validation_accuracy = evaluate(X_validation, y_validation)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n \n saver.save(sess, './lenet')\n print(\"Model saved\")",
"Evaluate the Model\nOnce you are completely satisfied with your model, evaluate the performance of the model on the test set.\nBe sure to only do this once!\nIf you were to measure the performance of your trained model on the test set, then improve your model, and then measure the performance of your model on the test set again, that would invalidate your test results. You wouldn't get a true measure of how well your model would perform against real data.\nYou do not need to modify this section.",
"with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(X_test, y_test)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
IanHawke/ET-NumericalMethods-2016
|
solutions/01-finite-differencing.ipynb
|
mit
|
[
"Numerical Methods\nFor Numerical Relativity, we need to\n\nevolve the spacetime (hyperbolic PDEs with \"smooth\" fields);\nevolve the matter (hyperbolic PDEs with discontinuous fields);\nsolve initial data (elliptic PDEs);\nextract gravitational waves (interpolation and integration);\nfind and analyse horizons (interpolation, BVPs).\n\nThese can be built on some simple foundations. \nThe general concepts that underpin most numerical methods are\n\nthe solution of linear systems $A {\\bf x} = {\\bf b}$;\nthe solution of nonlinear root-finding problems ${\\bf f} ( {\\bf x} ) = {\\bf 0}$;\nthe representation of a function or field $f(x)$ by discrete data $f_i$, by interpolation or other means;\nthe (discrete) Fast Fourier Transform;\nstochastic concepts and methods.\n\nFor Numerical Relativity, there has been little need (yet!) for stochastic methods, and the use of FFTs is mostly restricted to analysis. All of these points can be found in standard numerical packages and libraries: the question, however, is\n\nwhat do we need to understand about these methods before implementing or using them?\nwhen is it faster or better to implement our own version rather than using a library?\n\nFinite differencing\nAs a first step we'll quickly cover finite differencing: the approximation of derivatives of a function $f$ when the only information about $f$ is its value at a set of points, or nodes, ${x_i}$, denoted ${f_i}$.\nHere we have the \"representation of a function\" problem. We represent the function $f$ using a piecewise polynomial function $g$. This polynomial must interpolate $f$: that is, $g(x_i) \\equiv f(x_i) = f_i$. We then approximate derivatives of $f$ by derivatives of $g$.\nAs simple examples, let's assume we know three points, ${f_{i-1}, f_i, f_{i+1}}$. Then we have the linear polynomial approximations\n$$\n g_{FD} = \\frac{x - x_{i+1}}{x_i - x_{i+1}} f_i + \\frac{x - x_{i}}{x_{i+1} - x_{i}} f_{i+1}\n$$\nand\n$$\n g_{BD} = \\frac{x - x_{i}}{x_{i-1} - x_{i}} f_{i-1} + \\frac{x - x_{i-1}}{x_i - x_{i-1}} f_i\n$$\nor the quadratic polynomial approximation\n$$\n g_{CD} = \\frac{(x - x_{i})(x - x_{i+1})}{(x_{i-1} - x_{i})(x_{i-1} - x_{i+1})} f_{i-1} + \\frac{(x - x_{i-1})(x - x_{i+1})}{(x_{i} - x_{i-1})(x_{i} - x_{i+1})} f_{i} + \\frac{(x - x_{i-1})(x - x_{i})}{(x_{i+1} - x_{i-1})(x_{i+1} - x_{i})} f_{i+1}.\n$$\nNote how this Lagrange form is built out of indicator polynomials that take the value $1$ at one node and vanish at all others.\nBy differentiating these polynomial interpolating functions we get approximations to the derivatives of $f$. Each approximation is different, with different errors.\nWe'll assume that the nodes are equally spaced, with grid spacing $\\Delta x = x_{i+1} - x_i$. The approximations above give the standard forward difference\n$$\n \\left. \\frac{\\partial g_{FD}}{\\partial x} \\right|{x = x_i} \\to \\left. \\frac{\\partial f}{\\partial x} \\right|{x = x_i} = \\frac{1}{\\Delta x} \\left( f_{i+1} - f_i \\right) + {\\cal O} \\left( \\Delta x \\right),\n$$\nthe standard backward difference\n$$\n \\left. \\frac{\\partial g_{BD}}{\\partial x} \\right|{x = x_i} \\to \\left. \\frac{\\partial f}{\\partial x} \\right|{x = x_i} = \\frac{1}{\\Delta x} \\left( f_{i} - f_{i-1} \\right) + {\\cal O} \\left( \\Delta x \\right),\n$$\nand the standard central difference approximations\n\\begin{align}\n \\left. \\frac{\\partial g_{CD}}{\\partial x} \\right|{x = x_i} & \\to \\left. \\frac{\\partial f}{\\partial x} \\right|{x = x_i} \\ & = \\frac{1}{2 \\, \\Delta x} \\left( f_{i+1} - f_{i-1} \\right) + {\\cal O} \\left( \\Delta x^2 \\right), \\\n \\left. \\frac{\\partial^2 g_{CD}}{\\partial x^2} \\right|{x = x_i} & \\to \\left. \\frac{\\partial^2 f}{\\partial x^2} \\right|{x = x_i} \\ & = \\frac{1}{\\left( \\Delta x \\right)^2} \\left( f_{i-1} - 2 f_i + f_{i+1} \\right) + {\\cal O} \\left( \\Delta x^2 \\right).\n\\end{align}\nTesting this in code\nWe'll use finite differencing repeatedly. To test our code we'll be testing the differencing. Let's check the above approximations applied to a simple function,\n$$\n f(x) = \\exp \\left[ x \\right].\n$$\nAll derivatives match the original function, which evaluated at $x=0$ gives $1$.\nFirst we write the functions, then we test them.",
"def backward_differencing(f, x_i, dx):\n \"\"\"\n Backward differencing of f at x_i with grid spacing dx.\n \"\"\"\n f_i = f(x_i)\n f_i_minus_1 = f(x_i - dx)\n \n return (f_i - f_i_minus_1) / dx\n\ndef forward_differencing(f, x_i, dx):\n \"\"\"\n Forward differencing of f at x_i with grid spacing dx.\n \"\"\"\n f_i = f(x_i)\n f_i_plus_1 = f(x_i + dx)\n \n return (f_i_plus_1 - f_i) / dx\n\ndef central_differencing(f, x_i, dx):\n \"\"\"\n Second order central differencing of f at x_i with grid spacing dx.\n \"\"\"\n f_i = f(x_i)\n f_i_minus_1 = f(x_i - dx)\n f_i_plus_1 = f(x_i + dx)\n \n first_derivative = (f_i_plus_1 - f_i_minus_1) / (2.0 * dx)\n second_derivative = (f_i_minus_1 - 2.0 * f_i + f_i_plus_1) / (dx**2)\n \n return first_derivative, second_derivative\n\nimport numpy\n\nbd = backward_differencing(numpy.exp, 0.0, dx=1.0)\nfd = forward_differencing(numpy.exp, 0.0, dx=1.0)\ncd1, cd2 = central_differencing(numpy.exp, 0.0, dx=1.0)\n\nprint(\"Backward difference should be 1, is {}, error {}\".format(bd, abs(bd - 1.0)))\nprint(\"Forward difference should be 1, is {}, error {}\".format(fd, abs(fd - 1.0)))\nprint(\"Central difference (1st derivative) should be 1, is {}, error {}\".format(cd1, abs(cd1 - 1.0)))\nprint(\"Central difference (2nd derivative) should be 1, is {}, error {}\".format(cd2, abs(cd2 - 1.0)))",
"The errors here are significant. What matters is how fast the errors reduce as we change the grid spacing. Try changing from $\\Delta x = 1$ to $\\Delta x = 0.1$:",
"bd = backward_differencing(numpy.exp, 0.0, dx=0.1)\nfd = forward_differencing(numpy.exp, 0.0, dx=0.1)\ncd1, cd2 = central_differencing(numpy.exp, 0.0, dx=0.1)\n\nprint(\"Backward difference should be 1, is {}, error {}\".format(bd, abs(bd - 1.0)))\nprint(\"Forward difference should be 1, is {}, error {}\".format(fd, abs(fd - 1.0)))\nprint(\"Central difference (1st derivative) should be 1, is {}, error {}\".format(cd1, abs(cd1 - 1.0)))\nprint(\"Central difference (2nd derivative) should be 1, is {}, error {}\".format(cd2, abs(cd2 - 1.0)))",
"We see roughly the expected scaling, with forward and backward differencing errors reducing by roughly $10$, and central differencing errors reducing by roughly $10^2$.\nConvergence\nThe feature that we always want to show is that the error $\\cal E$ reduces with the grid spacing $\\Delta x$. In particular, for most methods in Numerical Relativity, we expect a power law relationship:\n$$\n {\\cal E} \\propto \\left( \\Delta x \\right)^p.\n$$\nIf we can measure the error (by knowing the exact solution) then we can measure the convergence rate $p$, by using\n$$\n \\log \\left( {\\cal E} \\right) = p \\, \\log \\left( \\Delta x \\right) + \\text{constant}.\n$$\nThis is the slope of the best-fit straight line through the plot of the error against the grid spacing, on a logarithmic scale.\nIf we do not know the exact solution (the usual case), we can use self convergence to do the same measurement.\nWe check this for our finite differencing above.",
"from matplotlib import pyplot\n%matplotlib notebook\n\ndxs = numpy.logspace(-5, 0, 10)\nbd_errors = numpy.zeros_like(dxs)\nfd_errors = numpy.zeros_like(dxs)\ncd1_errors = numpy.zeros_like(dxs)\ncd2_errors = numpy.zeros_like(dxs)\n\nfor i, dx in enumerate(dxs):\n bd_errors[i] = abs(backward_differencing(numpy.exp, 0.0, dx) - 1.0)\n fd_errors[i] = abs(forward_differencing(numpy.exp, 0.0, dx) - 1.0)\n cd1, cd2 = central_differencing(numpy.exp, 0.0, dx)\n cd1_errors[i] = abs(cd1 - 1.0)\n cd2_errors[i] = abs(cd2 - 1.0)\n\npyplot.figure()\npyplot.loglog(dxs, bd_errors, 'kx', label='Backwards')\npyplot.loglog(dxs, fd_errors, 'b+', label='Forwards')\npyplot.loglog(dxs, cd1_errors, 'go', label='Central (1st)')\npyplot.loglog(dxs, cd2_errors, 'r^', label='Central (2nd)')\npyplot.loglog(dxs, dxs*(bd_errors[0]/dxs[0]), 'k-', label=r\"$p=1$\")\npyplot.loglog(dxs, dxs**2*(cd1_errors[0]/dxs[0]**2), 'k--', label=r\"$p=2$\")\npyplot.xlabel(r\"$\\Delta x$\")\npyplot.ylabel(\"Error\")\npyplot.legend(loc=\"lower right\")\npyplot.show()",
"Forwards and backwards differencing are converging at first order ($p=1$). Central differencing is converging at second order ($p=2$) until floating point effects start causing problems at small $\\Delta x$.\nExtension exercises\nHigher order\nShow, either by Taylor expansion, or by constructing the interpolating polynomial, that the fourth order central differencing approximations are\n\\begin{align}\n \\left. \\frac{\\partial f}{\\partial x} \\right|{x = x_i} & = \\frac{1}{12 \\, \\Delta x} \\left( -f{i+2} + 8 f_{i+1} - 8 f_{i-1} + f_{i-2} \\right) + {\\cal O} \\left( \\Delta x^4 \\right), \\\n \\left. \\frac{\\partial^2 f}{\\partial x^2} \\right|{x = x_i} & = \\frac{1}{12 \\left( \\Delta x \\right)^2} \\left( -f{i-2} + 16 f_{i-1} - 30 f_i + 16 f_{i+1} - f_{i+2} \\right) + {\\cal O} \\left( \\Delta x^4 \\right).\n\\end{align}\nMeasure the convergence rate\nUsing numpy.polyfit, directly measure the convergence rate for the algorithms above. Be careful to exclude points where finite differencing effects cause problems. Repeat the test for the fourth order formulas above.\nSelf convergence\nBy definition, the error ${\\cal E}(\\Delta x)$ is a function of the grid spacing, as is our numerical approximation of the thing we're trying to compute $F(\\Delta x)$ (above $F$ was the derivative of $f$, evaluated at $0$). This gives\n$$\n {\\cal E}(\\Delta x) = F \\left( \\Delta x \\right) - F \\left( 0 \\right)\n$$\nor\n$$\n F \\left( \\Delta x \\right) = F \\left( 0 \\right) + {\\cal E}(\\Delta x).\n$$\nOf course, $F(0)$ is the exact solution we're trying to compute. However, by subtracting any two approximations we can eliminate the exact solution. Using the power law dependence\n$$\n {\\cal E}(\\Delta x) = C \\left( \\Delta x \\right)^p\n$$\nthis gives\n$$\n F \\left( 2 \\Delta x \\right) - F \\left( \\Delta x \\right) = C \\left( \\Delta x \\right)^p \\left( 2^p - 1 \\right).\n$$\nWe still do not know the value of the constant $C$. However, we can use three approximations to eliminate it:\n$$\n \\frac{F \\left( 4 \\Delta x \\right) - F \\left( 2 \\Delta x \\right)}{F \\left( 2 \\Delta x \\right) - F \\left( \\Delta x \\right)} = \\frac{\\left( 4^p - 2^p \\right)}{\\left( 2^p - 1 \\right)} = 2^p.\n$$\nSo the self-convergence rate is\n$$\n p = \\log_2 \\left| \\frac{F \\left( 4 \\Delta x \\right) - F \\left( 2 \\Delta x \\right)}{F \\left( 2 \\Delta x \\right) - F \\left( \\Delta x \\right)} \\right|.\n$$\nCompute this self-convergence rate for all the cases above."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
kgrodzicki/machine-learning-specialization
|
course-2-regression/notebooks/week-3-polynomial-regression-assignment-blank.ipynb
|
mit
|
[
"Regression Week 3: Assessing Fit (polynomial regression)\nIn this notebook you will compare different regression models in order to assess which model fits best. We will be using polynomial regression as a means to examine this topic. In particular you will:\n* Write a function to take an SArray and a degree and return an SFrame where each column is the SArray to a polynomial value up to the total degree e.g. degree = 3 then column 1 is the SArray column 2 is the SArray squared and column 3 is the SArray cubed\n* Use matplotlib to visualize polynomial regressions\n* Use matplotlib to visualize the same polynomial degree on different subsets of the data\n* Use a validation set to select a polynomial degree\n* Assess the final fit using test data\nWe will continue to use the House data from previous notebooks.\nFire up graphlab create",
"import graphlab",
"Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree.\nThe easiest way to apply a power to an SArray is to use the .apply() and lambda x: functions. \nFor example to take the example array and compute the third power we can do as follows: (note running this cell the first time may take longer than expected since it loads graphlab)",
"tmp = graphlab.SArray([1., 2., 3.])\ntmp_cubed = tmp.apply(lambda x: x**3)\nprint tmp\nprint tmp_cubed",
"We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself).",
"ex_sframe = graphlab.SFrame()\nex_sframe['power_1'] = tmp\nprint ex_sframe",
"Polynomial_sframe function\nUsing the hints above complete the following function to create an SFrame consisting of the powers of an SArray up to a specific degree:",
"def polynomial_sframe(feature, degree):\n # assume that degree >= 1\n # initialize the SFrame:\n poly_sframe = graphlab.SFrame()\n # and set poly_sframe['power_1'] equal to the passed feature\n poly_sframe['power_1'] = feature\n\n # first check if degree > 1\n if degree > 1:\n # then loop over the remaining degrees:\n # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree\n for power in range(2, degree + 1): \n # first we'll give the column a name:\n name = 'power_' + str(power)\n # then assign poly_sframe[name] to the appropriate power of feature\n poly_sframe[name] = feature.apply(lambda x: x**power)\n return poly_sframe",
"To test your function consider the smaller tmp variable and what you would expect the outcome of the following call:",
"print polynomial_sframe(tmp, 3)",
"Visualizing polynomial regression\nLet's use matplotlib to visualize what a polynomial regression looks like on some real data.",
"sales = graphlab.SFrame('kc_house_data.gl/')",
"As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.",
"sales = sales.sort(['sqft_living', 'price'])",
"Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like.",
"poly1_data = polynomial_sframe(sales['sqft_living'], 1)\npoly1_data['price'] = sales['price'] # add price to the data since it's the target",
"NOTE: for all the models in this notebook use validation_set = None to ensure that all results are consistent across users.",
"model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None)\n\n#let's take a look at the weights before we plot\nmodel1.get(\"coefficients\")\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.plot(poly1_data['power_1'], poly1_data['price'], '.', poly1_data['power_1'], model1.predict(poly1_data), '-')",
"Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'. \nWe can see, not surprisingly, that the predicted values all fall on a line, specifically the one with slope 280 and intercept -43579. What if we wanted to plot a second degree polynomial?",
"poly2_data = polynomial_sframe(sales['sqft_living'], 2)\nmy_features = poly2_data.column_names() # get the name of the features\npoly2_data['price'] = sales['price'] # add price to the data since it's the target\nmodel2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None)\n\nmodel2.get(\"coefficients\")\n\nplt.plot(poly2_data['power_1'], poly2_data['price'], '.', poly2_data['power_1'], model2.predict(poly2_data), '-')",
"The resulting model looks like half a parabola. Try on your own to see what the cubic looks like:",
"poly3_data = polynomial_sframe(sales['sqft_living'], 3)\nmy_features = poly3_data.column_names()\npoly3_data['price'] = sales['price']\nmodel3 = graphlab.linear_regression.create(poly3_data, target = 'price', features = my_features, validation_set = None)\n\nplt.plot(poly3_data['power_1'], poly3_data['price'], '.', poly3_data['power_1'], model3.predict(poly3_data), '-')",
"Now try a 15th degree polynomial:",
"poly15_data = polynomial_sframe(sales['sqft_living'], 15)\nmy_features = poly15_data.column_names()\npoly15_data['price'] = sales['price']\nmodel15 = graphlab.linear_regression.create(poly15_data, target = 'price', features = my_features, validation_set = None)\n\nmodel15.get(\"coefficients\")\n\nplt.plot(poly15_data['power_1'], poly15_data['price'], '.', poly15_data['power_1'], model5.predict(poly15_data), '-')",
"What do you think of the 15th degree polynomial? Do you think this is appropriate? If we were to change the data do you think you'd get pretty much the same curve? Let's take a look.\nChanging the data and re-learning\nWe're going to split the sales data into four subsets of roughly equal size. Then you will estimate a 15th degree polynomial model on all four subsets of the data. Print the coefficients (you should use .print_rows(num_rows = 16) to view all of them) and plot the resulting fit (as we did above). The quiz will ask you some questions about these results.\nTo split the sales data into four subsets, we perform the following steps:\n* First split sales into 2 subsets with .random_split(0.5, seed=0). \n* Next split the resulting subsets into 2 more subsets each. Use .random_split(0.5, seed=0).\nWe set seed=0 in these steps so that different users get consistent results.\nYou should end up with 4 subsets (set_1, set_2, set_3, set_4) of approximately equal size.",
"dtmp0,dtmp1 = sales.random_split(.5,seed=0)\nset_1,set_2 = dtmp0.random_split(.5,seed=0)\nset_3,set_4 = dtmp1.random_split(.5,seed=0)",
"Fit a 15th degree polynomial on set_1, set_2, set_3, and set_4 using sqft_living to predict prices. Print the coefficients and make a plot of the resulting model.",
"plt.plot(poly15_set_1_data['power_1'], poly15_set_1_data['price'], '.', poly15_set_1_data['power_1'], set_1_model15.predict(poly15_set_1_data), '-')\n\npoly15_set_2_data = polynomial_sframe(set_2['sqft_living'], 15)\nmy_features_set_2 = poly15_set_2_data.column_names()\npoly15_set_2_data['price'] = set_2['price']\nset_2_model15 = graphlab.linear_regression.create(poly15_set_2_data, target = 'price', features = my_features_set_2, validation_set = None)\nset_2_model15.get(\"coefficients\").print_rows(num_rows = 16)\n\nplt.plot(poly15_set_2_data['power_1'], poly15_set_2_data['price'], '.', poly15_set_2_data['power_1'], set_2_model15.predict(poly15_set_2_data), '-')\n\npoly15_set_3_data = polynomial_sframe(set_3['sqft_living'], 15)\nmy_features_set_3 = poly15_set_3_data.column_names()\npoly15_set_3_data['price'] = set_3['price']\nset_3_model15 = graphlab.linear_regression.create(poly15_set_3_data, target = 'price', features = my_features_set_3, validation_set = None)\nset_3_model15.get(\"coefficients\").print_rows(num_rows = 16)\n\nplt.plot(poly15_set_3_data['power_1'], poly15_set_3_data['price'], '.', poly15_set_3_data['power_1'], set_3_model15.predict(poly15_set_3_data), '-')\n\npoly15_set_4_data = polynomial_sframe(set_4['sqft_living'], 15)\nmy_features_set_4 = poly15_set_4_data.column_names()\npoly15_set_4_data['price'] = set_4['price']\nset_4_model15 = graphlab.linear_regression.create(poly15_set_4_data, target = 'price', features = my_features_set_4, validation_set = None)\nset_4_model15.get(\"coefficients\").print_rows(num_rows = 16)\n\nplt.plot(poly15_set_4_data['power_1'], poly15_set_4_data['price'], '.', poly15_set_4_data['power_1'], set_4_model15.predict(poly15_set_4_data), '-')",
"Some questions you will be asked on your quiz:\nQuiz Question: Is the sign (positive or negative) for power_15 the same in all four models?\nQuiz Question: (True/False) the plotted fitted lines look the same in all four plots\nSelecting a Polynomial Degree\nWhenever we have a \"magic\" parameter like the degree of the polynomial there is one well-known way to select these parameters: validation set. (We will explore another approach in week 4).\nWe split the sales dataset 3-way into training set, test set, and validation set as follows:\n\nSplit our sales data into 2 sets: training_and_validation and testing. Use random_split(0.9, seed=1).\nFurther split our training data into two sets: training and validation. Use random_split(0.5, seed=1).\n\nAgain, we set seed=1 to obtain consistent results for different users.",
"training_and_validation, test_data = sales.random_split(0.9, seed=1)\ntrain_data, validation_data = training_and_validation.random_split(0.5, seed=1)",
"Next you should write a loop that does the following:\n* For degree in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] (to get this in python type range(1, 15+1))\n * Build an SFrame of polynomial data of train_data['sqft_living'] at the current degree\n * hint: my_features = poly_data.column_names() gives you a list e.g. ['power_1', 'power_2', 'power_3'] which you might find useful for graphlab.linear_regression.create( features = my_features)\n * Add train_data['price'] to the polynomial SFrame\n * Learn a polynomial regression model to sqft vs price with that degree on TRAIN data\n * Compute the RSS on VALIDATION data (here you will want to use .predict()) for that degree and you will need to make a polynmial SFrame using validation data.\n* Report which degree had the lowest RSS on validation data (remember python indexes from 0)\n(Note you can turn off the print out of linear_regression.create() with verbose = False)",
"result = dict()\nfor degree in range(1, 15 + 1):\n ps = polynomial_sframe(train_data['sqft_living'], degree)\n my_features = ps.column_names()\n ps['price'] = train_data['price']\n model = graphlab.linear_regression.create(ps, target = 'price', features = my_features, validation_set = None, verbose = False)\n validation_pol = polynomial_sframe(validation_data['sqft_living'], degree)\n predictions = model.predict(validation_pol)\n residuals = validation_data['price'] - predictions\n result[degree] = sum(residuals**2)\nprint \"Deegree of polynomial:\", min(result, key=result.get)",
"Quiz Question: Which degree (1, 2, …, 15) had the lowest RSS on Validation data?\nNow that you have chosen the degree of your polynomial using validation data, compute the RSS of this model on TEST data. Report the RSS on your quiz.",
"ps = polynomial_sframe(train_data['sqft_living'], 6)\nmy_features = ps.column_names()\nps['price'] = train_data['price']\nmodel = graphlab.linear_regression.create(ps, target = 'price', features = my_features, validation_set = None, verbose = False)\ntest_pol = polynomial_sframe(test_data['sqft_living'], degree)\npredictions = model.predict(test_pol)\nresiduals = test_data['price'] - predictions\nRSS = sum(residuals**2)",
"Quiz Question: what is the RSS on TEST data for the model with the degree selected from Validation data? (Make sure you got the correct degree from the previous question)",
"print RSS"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
phobson/statsmodels
|
examples/notebooks/statespace_dfm_coincident.ipynb
|
bsd-3-clause
|
[
"Dynamic factors and coincident indices\nFactor models generally try to find a small number of unobserved \"factors\" that influence a subtantial portion of the variation in a larger number of observed variables, and they are related to dimension-reduction techniques such as principal components analysis. Dynamic factor models explicitly model the transition dynamics of the unobserved factors, and so are often applied to time-series data.\nMacroeconomic coincident indices are designed to capture the common component of the \"business cycle\"; such a component is assumed to simultaneously affect many macroeconomic variables. Although the estimation and use of coincident indices (for example the Index of Coincident Economic Indicators) pre-dates dynamic factor models, in several influential papers Stock and Watson (1989, 1991) used a dynamic factor model to provide a theoretical foundation for them.\nBelow, we follow the treatment found in Kim and Nelson (1999), of the Stock and Watson (1991) model, to formulate a dynamic factor model, estimate its parameters via maximum likelihood, and create a coincident index.\nMacroeconomic data\nThe coincident index is created by considering the comovements in four macroeconomic variables (versions of thse variables are available on FRED; the ID of the series used below is given in parentheses):\n\nIndustrial production (IPMAN)\nReal aggregate income (excluding transfer payments) (W875RX1)\nManufacturing and trade sales (CMRMTSPL)\nEmployees on non-farm payrolls (PAYEMS)\n\nIn all cases, the data is at the monthly frequency and has been seasonally adjusted; the time-frame considered is 1972 - 2005.",
"%matplotlib inline\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n\nnp.set_printoptions(precision=4, suppress=True, linewidth=120)\n\nfrom pandas.io.data import DataReader\n\n# Get the datasets from FRED\nstart = '1979-01-01'\nend = '2014-12-01'\nindprod = DataReader('IPMAN', 'fred', start=start, end=end)\nincome = DataReader('W875RX1', 'fred', start=start, end=end)\nsales = DataReader('CMRMTSPL', 'fred', start=start, end=end)\nemp = DataReader('PAYEMS', 'fred', start=start, end=end)\n# dta = pd.concat((indprod, income, sales, emp), axis=1)\n# dta.columns = ['indprod', 'income', 'sales', 'emp']",
"Note: in a recent update on FRED (8/12/15) the time series CMRMTSPL was truncated to begin in 1997; this is probably a mistake due to the fact that CMRMTSPL is a spliced series, so the earlier period is from the series HMRMT and the latter period is defined by CMRMT.\nThis has since (02/11/16) been corrected, however the series could also be constructed by hand from HMRMT and CMRMT, as shown below (process taken from the notes in the Alfred xls file).",
"# HMRMT = DataReader('HMRMT', 'fred', start='1967-01-01', end=end)\n# CMRMT = DataReader('CMRMT', 'fred', start='1997-01-01', end=end)\n\n# HMRMT_growth = HMRMT.diff() / HMRMT.shift()\n# sales = pd.Series(np.zeros(emp.shape[0]), index=emp.index)\n\n# # Fill in the recent entries (1997 onwards)\n# sales[CMRMT.index] = CMRMT\n\n# # Backfill the previous entries (pre 1997)\n# idx = sales.ix[:'1997-01-01'].index\n# for t in range(len(idx)-1, 0, -1):\n# month = idx[t]\n# prev_month = idx[t-1]\n# sales.ix[prev_month] = sales.ix[month] / (1 + HMRMT_growth.ix[prev_month].values)\n\ndta = pd.concat((indprod, income, sales, emp), axis=1)\ndta.columns = ['indprod', 'income', 'sales', 'emp']\n\ndta.ix[:, 'indprod':'emp'].plot(subplots=True, layout=(2, 2), figsize=(15, 6));",
"Stock and Watson (1991) report that for their datasets, they could not reject the null hypothesis of a unit root in each series (so the series are integrated), but they did not find strong evidence that the series were co-integrated.\nAs a result, they suggest estimating the model using the first differences (of the logs) of the variables, demeaned and standardized.",
"# Create log-differenced series\ndta['dln_indprod'] = (np.log(dta.indprod)).diff() * 100\ndta['dln_income'] = (np.log(dta.income)).diff() * 100\ndta['dln_sales'] = (np.log(dta.sales)).diff() * 100\ndta['dln_emp'] = (np.log(dta.emp)).diff() * 100\n\n# De-mean and standardize\ndta['std_indprod'] = (dta['dln_indprod'] - dta['dln_indprod'].mean()) / dta['dln_indprod'].std()\ndta['std_income'] = (dta['dln_income'] - dta['dln_income'].mean()) / dta['dln_income'].std()\ndta['std_sales'] = (dta['dln_sales'] - dta['dln_sales'].mean()) / dta['dln_sales'].std()\ndta['std_emp'] = (dta['dln_emp'] - dta['dln_emp'].mean()) / dta['dln_emp'].std()",
"Dynamic factors\nA general dynamic factor model is written as:\n$$\n\\begin{align}\ny_t & = \\Lambda f_t + B x_t + u_t \\\nf_t & = A_1 f_{t-1} + \\dots + A_p f_{t-p} + \\eta_t \\qquad \\eta_t \\sim N(0, I)\\\nu_t & = C_1 u_{t-1} + \\dots + C_1 f_{t-q} + \\varepsilon_t \\qquad \\varepsilon_t \\sim N(0, \\Sigma)\n\\end{align}\n$$\nwhere $y_t$ are observed data, $f_t$ are the unobserved factors (evolving as a vector autoregression), $x_t$ are (optional) exogenous variables, and $u_t$ is the error, or \"idiosyncratic\", process ($u_t$ is also optionally allowed to be autocorrelated). The $\\Lambda$ matrix is often referred to as the matrix of \"factor loadings\". The variance of the factor error term is set to the identity matrix to ensure identification of the unobserved factors.\nThis model can be cast into state space form, and the unobserved factor estimated via the Kalman filter. The likelihood can be evaluated as a byproduct of the filtering recursions, and maximum likelihood estimation used to estimate the parameters.\nModel specification\nThe specific dynamic factor model in this application has 1 unobserved factor which is assumed to follow an AR(2) proces. The innovations $\\varepsilon_t$ are assumed to be independent (so that $\\Sigma$ is a diagonal matrix) and the error term associated with each equation, $u_{i,t}$ is assumed to follow an independent AR(2) process.\nThus the specification considered here is:\n$$\n\\begin{align}\ny_{i,t} & = \\lambda_i f_t + u_{i,t} \\\nu_{i,t} & = c_{i,1} u_{1,t-1} + c_{i,2} u_{i,t-2} + \\varepsilon_{i,t} \\qquad & \\varepsilon_{i,t} \\sim N(0, \\sigma_i^2) \\\nf_t & = a_1 f_{t-1} + a_2 f_{t-2} + \\eta_t \\qquad & \\eta_t \\sim N(0, I)\\\n\\end{align}\n$$\nwhere $i$ is one of: [indprod, income, sales, emp ].\nThis model can be formulated using the DynamicFactor model built-in to Statsmodels. In particular, we have the following specification:\n\nk_factors = 1 - (there is 1 unobserved factor)\nfactor_order = 2 - (it follows an AR(2) process)\nerror_var = False - (the errors evolve as independent AR processes rather than jointly as a VAR - note that this is the default option, so it is not specified below)\nerror_order = 2 - (the errors are autocorrelated of order 2: i.e. AR(2) processes)\nerror_cov_type = 'diagonal' - (the innovations are uncorrelated; this is again the default)\n\nOnce the model is created, the parameters can be estimated via maximum likelihood; this is done using the fit() method.\nNote: recall that we have de-meaned and standardized the data; this will be important in interpreting the results that follow.\nAside: in their empirical example, Kim and Nelson (1999) actually consider a slightly different model in which the employment variable is allowed to also depend on lagged values of the factor - this model does not fit into the built-in DynamicFactor class, but can be accomodated by using a subclass to implement the required new parameters and restrictions - see Appendix A, below.\nParameter estimation\nMultivariate models can have a relatively large number of parameters, and it may be difficult to escape from local minima to find the maximized likelihood. In an attempt to mitigate this problem, I perform an initial maximization step (from the model-defined starting paramters) using the modified Powell method available in Scipy (see the minimize documentation for more information). The resulting parameters are then used as starting parameters in the standard LBFGS optimization method.",
"# Get the endogenous data\nendog = dta.ix['1979-02-01':, 'std_indprod':'std_emp']\n\n# Create the model\nmod = sm.tsa.DynamicFactor(endog, k_factors=1, factor_order=2, error_order=2)\ninitial_res = mod.fit(method='powell', disp=False)\nres = mod.fit(initial_res.params)",
"Estimates\nOnce the model has been estimated, there are two components that we can use for analysis or inference:\n\nThe estimated parameters\nThe estimated factor\n\nParameters\nThe estimated parameters can be helpful in understanding the implications of the model, although in models with a larger number of observed variables and / or unobserved factors they can be difficult to interpret.\nOne reason for this difficulty is due to identification issues between the factor loadings and the unobserved factors. One easy-to-see identification issue is the sign of the loadings and the factors: an equivalent model to the one displayed below would result from reversing the signs of all factor loadings and the unobserved factor.\nHere, one of the easy-to-interpret implications in this model is the persistence of the unobserved factor: we find that exhibits substantial persistence.",
"print(res.summary(separate_params=False))",
"Estimated factors\nWhile it can be useful to plot the unobserved factors, it is less useful here than one might think for two reasons:\n\nThe sign-related identification issue described above.\nSince the data was differenced, the estimated factor explains the variation in the differenced data, not the original data.\n\nIt is for these reasons that the coincident index is created (see below).\nWith these reservations, the unobserved factor is plotted below, along with the NBER indicators for US recessions. It appears that the factor is successful at picking up some degree of business cycle activity.",
"fig, ax = plt.subplots(figsize=(13,3))\n\n# Plot the factor\ndates = endog.index._mpl_repr()\nax.plot(dates, res.factors.filtered[0], label='Factor')\nax.legend()\n\n# Retrieve and also plot the NBER recession indicators\nrec = DataReader('USREC', 'fred', start=start, end=end)\nylim = ax.get_ylim()\nax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:,0], facecolor='k', alpha=0.1);",
"Post-estimation\nAlthough here we will be able to interpret the results of the model by constructing the coincident index, there is a useful and generic approach for getting a sense for what is being captured by the estimated factor. By taking the estimated factors as given, regressing them (and a constant) each (one at a time) on each of the observed variables, and recording the coefficients of determination ($R^2$ values), we can get a sense of the variables for which each factor explains a substantial portion of the variance and the variables for which it does not.\nIn models with more variables and more factors, this can sometimes lend interpretation to the factors (for example sometimes one factor will load primarily on real variables and another on nominal variables).\nIn this model, with only four endogenous variables and one factor, it is easy to digest a simple table of the $R^2$ values, but in larger models it is not. For this reason, a bar plot is often employed; from the plot we can easily see that the factor explains most of the variation in industrial production index and a large portion of the variation in sales and employment, it is less helpful in explaining income.",
"res.plot_coefficients_of_determination(figsize=(8,2));",
"Coincident Index\nAs described above, the goal of this model was to create an interpretable series which could be used to understand the current status of the macroeconomy. This is what the coincident index is designed to do. It is constructed below. For readers interested in an explanation of the construction, see Kim and Nelson (1999) or Stock and Watson (1991).\nIn essense, what is done is to reconstruct the mean of the (differenced) factor. We will compare it to the coincident index on published by the Federal Reserve Bank of Philadelphia (USPHCI on FRED).",
"usphci = DataReader('USPHCI', 'fred', start='1979-01-01', end='2014-12-01')['USPHCI']\nusphci.plot(figsize=(13,3));\n\ndusphci = usphci.diff()[1:].values\ndef compute_coincident_index(mod, res):\n # Estimate W(1)\n spec = res.specification\n design = mod.ssm['design']\n transition = mod.ssm['transition']\n ss_kalman_gain = res.filter_results.kalman_gain[:,:,-1]\n k_states = ss_kalman_gain.shape[0]\n\n W1 = np.linalg.inv(np.eye(k_states) - np.dot(\n np.eye(k_states) - np.dot(ss_kalman_gain, design),\n transition\n )).dot(ss_kalman_gain)[0]\n\n # Compute the factor mean vector\n factor_mean = np.dot(W1, dta.ix['1972-02-01':, 'dln_indprod':'dln_emp'].mean())\n \n # Normalize the factors\n factor = res.factors.filtered[0]\n factor *= np.std(usphci.diff()[1:]) / np.std(factor)\n\n # Compute the coincident index\n coincident_index = np.zeros(mod.nobs+1)\n # The initial value is arbitrary; here it is set to\n # facilitate comparison\n coincident_index[0] = usphci.iloc[0] * factor_mean / dusphci.mean()\n for t in range(0, mod.nobs):\n coincident_index[t+1] = coincident_index[t] + factor[t] + factor_mean\n \n # Attach dates\n coincident_index = pd.Series(coincident_index, index=dta.index).iloc[1:]\n \n # Normalize to use the same base year as USPHCI\n coincident_index *= (usphci.ix['1992-07-01'] / coincident_index.ix['1992-07-01'])\n \n return coincident_index",
"Below we plot the calculated coincident index along with the US recessions and the comparison coincident index USPHCI.",
"fig, ax = plt.subplots(figsize=(13,3))\n\n# Compute the index\ncoincident_index = compute_coincident_index(mod, res)\n\n# Plot the factor\ndates = endog.index._mpl_repr()\nax.plot(dates, coincident_index, label='Coincident index')\nax.plot(usphci.index._mpl_repr(), usphci, label='USPHCI')\nax.legend(loc='lower right')\n\n# Retrieve and also plot the NBER recession indicators\nylim = ax.get_ylim()\nax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:,0], facecolor='k', alpha=0.1);",
"Appendix 1: Extending the dynamic factor model\nRecall that the previous specification was described by:\n$$\n\\begin{align}\ny_{i,t} & = \\lambda_i f_t + u_{i,t} \\\nu_{i,t} & = c_{i,1} u_{1,t-1} + c_{i,2} u_{i,t-2} + \\varepsilon_{i,t} \\qquad & \\varepsilon_{i,t} \\sim N(0, \\sigma_i^2) \\\nf_t & = a_1 f_{t-1} + a_2 f_{t-2} + \\eta_t \\qquad & \\eta_t \\sim N(0, I)\\\n\\end{align}\n$$\nWritten in state space form, the previous specification of the model had the following observation equation:\n$$\n\\begin{bmatrix}\ny_{\\text{indprod}, t} \\\ny_{\\text{income}, t} \\\ny_{\\text{sales}, t} \\\ny_{\\text{emp}, t} \\\n\\end{bmatrix} = \\begin{bmatrix}\n\\lambda_\\text{indprod} & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n\\lambda_\\text{income} & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\\n\\lambda_\\text{sales} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\\n\\lambda_\\text{emp} & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\n\\end{bmatrix}\n\\begin{bmatrix}\nf_t \\\nf_{t-1} \\\nu_{\\text{indprod}, t} \\\nu_{\\text{income}, t} \\\nu_{\\text{sales}, t} \\\nu_{\\text{emp}, t} \\\nu_{\\text{indprod}, t-1} \\\nu_{\\text{income}, t-1} \\\nu_{\\text{sales}, t-1} \\\nu_{\\text{emp}, t-1} \\\n\\end{bmatrix}\n$$\nand transition equation:\n$$\n\\begin{bmatrix}\nf_t \\\nf_{t-1} \\\nu_{\\text{indprod}, t} \\\nu_{\\text{income}, t} \\\nu_{\\text{sales}, t} \\\nu_{\\text{emp}, t} \\\nu_{\\text{indprod}, t-1} \\\nu_{\\text{income}, t-1} \\\nu_{\\text{sales}, t-1} \\\nu_{\\text{emp}, t-1} \\\n\\end{bmatrix} = \\begin{bmatrix}\na_1 & a_2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & c_{\\text{indprod}, 1} & 0 & 0 & 0 & c_{\\text{indprod}, 2} & 0 & 0 & 0 \\\n0 & 0 & 0 & c_{\\text{income}, 1} & 0 & 0 & 0 & c_{\\text{income}, 2} & 0 & 0 \\\n0 & 0 & 0 & 0 & c_{\\text{sales}, 1} & 0 & 0 & 0 & c_{\\text{sales}, 2} & 0 \\\n0 & 0 & 0 & 0 & 0 & c_{\\text{emp}, 1} & 0 & 0 & 0 & c_{\\text{emp}, 2} \\\n0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\n\\end{bmatrix} \n\\begin{bmatrix}\nf_{t-1} \\\nf_{t-2} \\\nu_{\\text{indprod}, t-1} \\\nu_{\\text{income}, t-1} \\\nu_{\\text{sales}, t-1} \\\nu_{\\text{emp}, t-1} \\\nu_{\\text{indprod}, t-2} \\\nu_{\\text{income}, t-2} \\\nu_{\\text{sales}, t-2} \\\nu_{\\text{emp}, t-2} \\\n\\end{bmatrix}\n+ R \\begin{bmatrix}\n\\eta_t \\\n\\varepsilon_{t}\n\\end{bmatrix}\n$$\nthe DynamicFactor model handles setting up the state space representation and, in the DynamicFactor.update method, it fills in the fitted parameter values into the appropriate locations.\nThe extended specification is the same as in the previous example, except that we also want to allow employment to depend on lagged values of the factor. This creates a change to the $y_{\\text{emp},t}$ equation. Now we have:\n$$\n\\begin{align}\ny_{i,t} & = \\lambda_i f_t + u_{i,t} \\qquad & i \\in {\\text{indprod}, \\text{income}, \\text{sales} }\\\ny_{i,t} & = \\lambda_{i,0} f_t + \\lambda_{i,1} f_{t-1} + \\lambda_{i,2} f_{t-2} + \\lambda_{i,2} f_{t-3} + u_{i,t} \\qquad & i = \\text{emp} \\\nu_{i,t} & = c_{i,1} u_{i,t-1} + c_{i,2} u_{i,t-2} + \\varepsilon_{i,t} \\qquad & \\varepsilon_{i,t} \\sim N(0, \\sigma_i^2) \\\nf_t & = a_1 f_{t-1} + a_2 f_{t-2} + \\eta_t \\qquad & \\eta_t \\sim N(0, I)\\\n\\end{align}\n$$\nNow, the corresponding observation equation should look like the following:\n$$\n\\begin{bmatrix}\ny_{\\text{indprod}, t} \\\ny_{\\text{income}, t} \\\ny_{\\text{sales}, t} \\\ny_{\\text{emp}, t} \\\n\\end{bmatrix} = \\begin{bmatrix}\n\\lambda_\\text{indprod} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n\\lambda_\\text{income} & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\\n\\lambda_\\text{sales} & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\\n\\lambda_\\text{emp,1} & \\lambda_\\text{emp,2} & \\lambda_\\text{emp,3} & \\lambda_\\text{emp,4} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\n\\end{bmatrix}\n\\begin{bmatrix}\nf_t \\\nf_{t-1} \\\nf_{t-2} \\\nf_{t-3} \\\nu_{\\text{indprod}, t} \\\nu_{\\text{income}, t} \\\nu_{\\text{sales}, t} \\\nu_{\\text{emp}, t} \\\nu_{\\text{indprod}, t-1} \\\nu_{\\text{income}, t-1} \\\nu_{\\text{sales}, t-1} \\\nu_{\\text{emp}, t-1} \\\n\\end{bmatrix}\n$$\nNotice that we have introduced two new state variables, $f_{t-2}$ and $f_{t-3}$, which means we need to update the transition equation:\n$$\n\\begin{bmatrix}\nf_t \\\nf_{t-1} \\\nf_{t-2} \\\nf_{t-3} \\\nu_{\\text{indprod}, t} \\\nu_{\\text{income}, t} \\\nu_{\\text{sales}, t} \\\nu_{\\text{emp}, t} \\\nu_{\\text{indprod}, t-1} \\\nu_{\\text{income}, t-1} \\\nu_{\\text{sales}, t-1} \\\nu_{\\text{emp}, t-1} \\\n\\end{bmatrix} = \\begin{bmatrix}\na_1 & a_2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & 0 & 0 & c_{\\text{indprod}, 1} & 0 & 0 & 0 & c_{\\text{indprod}, 2} & 0 & 0 & 0 \\\n0 & 0 & 0 & 0 & 0 & c_{\\text{income}, 1} & 0 & 0 & 0 & c_{\\text{income}, 2} & 0 & 0 \\\n0 & 0 & 0 & 0 & 0 & 0 & c_{\\text{sales}, 1} & 0 & 0 & 0 & c_{\\text{sales}, 2} & 0 \\\n0 & 0 & 0 & 0 & 0 & 0 & 0 & c_{\\text{emp}, 1} & 0 & 0 & 0 & c_{\\text{emp}, 2} \\\n0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\\n0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\n\\end{bmatrix} \n\\begin{bmatrix}\nf_{t-1} \\\nf_{t-2} \\\nf_{t-3} \\\nf_{t-4} \\\nu_{\\text{indprod}, t-1} \\\nu_{\\text{income}, t-1} \\\nu_{\\text{sales}, t-1} \\\nu_{\\text{emp}, t-1} \\\nu_{\\text{indprod}, t-2} \\\nu_{\\text{income}, t-2} \\\nu_{\\text{sales}, t-2} \\\nu_{\\text{emp}, t-2} \\\n\\end{bmatrix}\n+ R \\begin{bmatrix}\n\\eta_t \\\n\\varepsilon_{t}\n\\end{bmatrix}\n$$\nThis model cannot be handled out-of-the-box by the DynamicFactor class, but it can be handled by creating a subclass when alters the state space representation in the appropriate way.\nFirst, notice that if we had set factor_order = 4, we would almost have what we wanted. In that case, the last line of the observation equation would be:\n$$\n\\begin{bmatrix}\n\\vdots \\\ny_{\\text{emp}, t} \\\n\\end{bmatrix} = \\begin{bmatrix}\n\\vdots & & & & & & & & & & & \\vdots \\\n\\lambda_\\text{emp,1} & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\n\\end{bmatrix}\n\\begin{bmatrix}\nf_t \\\nf_{t-1} \\\nf_{t-2} \\\nf_{t-3} \\\n\\vdots\n\\end{bmatrix}\n$$\nand the first line of the transition equation would be:\n$$\n\\begin{bmatrix}\nf_t \\\n\\vdots\n\\end{bmatrix} = \\begin{bmatrix}\na_1 & a_2 & a_3 & a_4 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\n\\vdots & & & & & & & & & & & \\vdots \\\n\\end{bmatrix} \n\\begin{bmatrix}\nf_{t-1} \\\nf_{t-2} \\\nf_{t-3} \\\nf_{t-4} \\\n\\vdots\n\\end{bmatrix}\n+ R \\begin{bmatrix}\n\\eta_t \\\n\\varepsilon_{t}\n\\end{bmatrix}\n$$\nRelative to what we want, we have the following differences:\n\nIn the above situation, the $\\lambda_{\\text{emp}, j}$ are forced to be zero for $j > 0$, and we want them to be estimated as parameters.\nWe only want the factor to transition according to an AR(2), but under the above situation it is an AR(4).\n\nOur strategy will be to subclass DynamicFactor, and let it do most of the work (setting up the state space representation, etc.) where it assumes that factor_order = 4. The only things we will actually do in the subclass will be to fix those two issues.\nFirst, here is the full code of the subclass; it is discussed below. It is important to note at the outset that none of the methods defined below could have been omitted. In fact, the methods __init__, start_params, param_names, transform_params, untransform_params, and update form the core of all state space models in Statsmodels, not just the DynamicFactor class.",
"from statsmodels.tsa.statespace import tools\nclass ExtendedDFM(sm.tsa.DynamicFactor):\n def __init__(self, endog, **kwargs):\n # Setup the model as if we had a factor order of 4\n super(ExtendedDFM, self).__init__(\n endog, k_factors=1, factor_order=4, error_order=2,\n **kwargs)\n\n # Note: `self.parameters` is an ordered dict with the\n # keys corresponding to parameter types, and the values\n # the number of parameters of that type.\n # Add the new parameters\n self.parameters['new_loadings'] = 3\n\n # Cache a slice for the location of the 4 factor AR\n # parameters (a_1, ..., a_4) in the full parameter vector\n offset = (self.parameters['factor_loadings'] +\n self.parameters['exog'] +\n self.parameters['error_cov'])\n self._params_factor_ar = np.s_[offset:offset+2]\n self._params_factor_zero = np.s_[offset+2:offset+4]\n\n @property\n def start_params(self):\n # Add three new loading parameters to the end of the parameter\n # vector, initialized to zeros (for simplicity; they could\n # be initialized any way you like)\n return np.r_[super(ExtendedDFM, self).start_params, 0, 0, 0]\n \n @property\n def param_names(self):\n # Add the corresponding names for the new loading parameters\n # (the name can be anything you like)\n return super(ExtendedDFM, self).param_names + [\n 'loading.L%d.f1.%s' % (i, self.endog_names[3]) for i in range(1,4)]\n\n def transform_params(self, unconstrained):\n # Perform the typical DFM transformation (w/o the new parameters)\n constrained = super(ExtendedDFM, self).transform_params(\n unconstrained[:-3])\n\n # Redo the factor AR constraint, since we only want an AR(2),\n # and the previous constraint was for an AR(4)\n ar_params = unconstrained[self._params_factor_ar]\n constrained[self._params_factor_ar] = (\n tools.constrain_stationary_univariate(ar_params))\n\n # Return all the parameters\n return np.r_[constrained, unconstrained[-3:]]\n\n def untransform_params(self, constrained):\n # Perform the typical DFM untransformation (w/o the new parameters)\n unconstrained = super(ExtendedDFM, self).untransform_params(\n constrained[:-3])\n\n # Redo the factor AR unconstraint, since we only want an AR(2),\n # and the previous unconstraint was for an AR(4)\n ar_params = constrained[self._params_factor_ar]\n unconstrained[self._params_factor_ar] = (\n tools.unconstrain_stationary_univariate(ar_params))\n\n # Return all the parameters\n return np.r_[unconstrained, constrained[-3:]]\n\n def update(self, params, transformed=True):\n # Peform the transformation, if required\n if not transformed:\n params = self.transform_params(params)\n params[self._params_factor_zero] = 0\n \n # Now perform the usual DFM update, but exclude our new parameters\n super(ExtendedDFM, self).update(params[:-3], transformed=True)\n\n # Finally, set our new parameters in the design matrix\n self.ssm['design', 3, 1:4] = params[-3:]\n ",
"So what did we just do?\n__init__\nThe important step here was specifying the base dynamic factor model which we were operating with. In particular, as described above, we initialize with factor_order=4, even though we will only end up with an AR(2) model for the factor. We also performed some general setup-related tasks.\nstart_params\nstart_params are used as initial values in the optimizer. Since we are adding three new parameters, we need to pass those in. If we hadn't done this, the optimizer would use the default starting values, which would be three elements short.\nparam_names\nparam_names are used in a variety of places, but especially in the results class. Below we get a full result summary, which is only possible when all the parameters have associated names.\ntransform_params and untransform_params\nThe optimizer selects possibly parameter values in an unconstrained way. That's not usually desired (since variances can't be negative, for example), and transform_params is used to transform the unconstrained values used by the optimizer to constrained values appropriate to the model. Variances terms are typically squared (to force them to be positive), and AR lag coefficients are often constrained to lead to a stationary model. untransform_params is used for the reverse operation (and is important because starting parameters are usually specified in terms of values appropriate to the model, and we need to convert them to parameters appropriate to the optimizer before we can begin the optimization routine).\nEven though we don't need to transform or untransform our new parameters (the loadings can in theory take on any values), we still need to modify this function for two reasons:\n\nThe version in the DynamicFactor class is expecting 3 fewer parameters than we have now. At a minimum, we need to handle the three new parameters.\nThe version in the DynamicFactor class constrains the factor lag coefficients to be stationary as though it was an AR(4) model. Since we actually have an AR(2) model, we need to re-do the constraint. We also set the last two autoregressive coefficients to be zero here.\n\nupdate\nThe most important reason we need to specify a new update method is because we have three new parameters that we need to place into the state space formulation. In particular we let the parent DynamicFactor.update class handle placing all the parameters except the three new ones in to the state space representation, and then we put the last three in manually.",
"# Create the model\nextended_mod = ExtendedDFM(endog)\ninitial_extended_res = extended_mod.fit(maxiter=1000, disp=False)\nextended_res = extended_mod.fit(initial_extended_res.params, method='nm', maxiter=1000)\nprint(extended_res.summary(separate_params=False))",
"Although this model increases the likelihood, it is not preferred by the AIC and BIC mesaures which penalize the additional three parameters.\nFurthermore, the qualitative results are unchanged, as we can see from the updated $R^2$ chart and the new coincident index, both of which are practically identical to the previous results.",
"extended_res.plot_coefficients_of_determination(figsize=(8,2));\n\nfig, ax = plt.subplots(figsize=(13,3))\n\n# Compute the index\nextended_coincident_index = compute_coincident_index(extended_mod, extended_res)\n\n# Plot the factor\ndates = endog.index._mpl_repr()\nax.plot(dates, coincident_index, '-', linewidth=1, label='Basic model')\nax.plot(dates, extended_coincident_index, '--', linewidth=3, label='Extended model')\nax.plot(usphci.index._mpl_repr(), usphci, label='USPHCI')\nax.legend(loc='lower right')\nax.set(title='Coincident indices, comparison')\n\n# Retrieve and also plot the NBER recession indicators\nylim = ax.get_ylim()\nax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:,0], facecolor='k', alpha=0.1);"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
wzxiong/DAVIS-Machine-Learning
|
labs/lab3-soln.ipynb
|
mit
|
[
"The Lasso\nModified from the github repo: https://github.com/JWarmenhoven/ISLR-python which is based on the book by James et al. Intro to Statistical Learning.",
"# %load ../standard_import.txt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import scale \nfrom sklearn.model_selection import LeaveOneOut\nfrom sklearn.linear_model import LinearRegression, lars_path, Lasso, LassoCV\n\n%matplotlib inline\n\nn=100\np=1000\nX = np.random.randn(n,p)\nX = scale(X)\n\nsprob = 0.02\nSbool = np.random.rand(p) < sprob\ns = np.sum(Sbool)\nprint(\"Number of non-zero's: {}\".format(s))\n\nmu = 100.\nbeta = np.zeros(p)\nbeta[Sbool] = mu * np.random.randn(s)\n\neps = np.random.randn(n)\ny = X.dot(beta) + eps\n\nlarper = lars_path(X,y,method=\"lasso\")\n\nS = set(np.where(Sbool)[0])\n\nfor j in S:\n _ = plt.plot(larper[0],larper[2][j,:],'r')\nfor j in set(range(p)) - S:\n _ = plt.plot(larper[0],larper[2][j,:],'k',linewidth=.5)\n_ = plt.title('Lasso path for simulated data')\n_ = plt.xlabel('lambda')\n_ = plt.ylabel('Coef')",
"Hitters dataset\nLet's load the dataset from the previous lab.",
"# In R, I exported the dataset from package 'ISLR' to a csv file.\ndf = pd.read_csv('../data/Hitters.csv', index_col=0).dropna()\ndf.index.name = 'Player'\ndf.info()\n\ndf.head()\n\ndummies = pd.get_dummies(df[['League', 'Division', 'NewLeague']])\ndummies.info()\nprint(dummies.head())\n\ny = df.Salary\n\n# Drop the column with the independent variable (Salary), and columns for which we created dummy variables\nX_ = df.drop(['Salary', 'League', 'Division', 'NewLeague'], axis=1).astype('float64')\n# Define the feature set X.\nX = pd.concat([X_, dummies[['League_N', 'Division_W', 'NewLeague_N']]], axis=1)\nX.info()\n\nX.head(5)",
"Exercise Compare the previous methods to the Lasso on this dataset. Tune $\\lambda$ and compare the LOO risk to other methods (ridge, forward selection, etc.)\nThe following is a fast implementation of the lasso path cross-validated using LOO.",
"loo = LeaveOneOut()\nlooiter = loo.split(X)\nhitlasso = LassoCV(cv=looiter)\nhitlasso.fit(X,y)\n\nprint(\"The selected lambda value is {:.2f}\".format(hitlasso.alpha_))",
"The following is the fitted coefficient vector for this chosen lambda.",
"hitlasso.coef_\n\nnp.mean(hitlasso.mse_path_[hitlasso.alphas_ == hitlasso.alpha_])",
"The above is the MSE for the selected model. The best performance for ridge regression was roughly 120,000, so this does not outperform ridge. We can also compare this to the selected model from forward stagewise regression:\n[-0.21830515, 0.38154135, 0. , 0. , 0. ,\n 0.16139123, 0. , 0. , 0. , 0. ,\n 0.09994524, 0.56696569, -0.16872682, 0.16924078, 0. ,\n 0. , 0. , -0.19429699, 0. ]\nThis is not exactly the same model with differences in the inclusion or exclusion of AtBat, HmRun, Runs, RBI, Years, CHmRun, Errors, League_N, Division_W, NewLeague_N",
"bforw = [-0.21830515, 0.38154135, 0. , 0. , 0. ,\n 0.16139123, 0. , 0. , 0. , 0. ,\n 0.09994524, 0.56696569, -0.16872682, 0.16924078, 0. ,\n 0. , 0. , -0.19429699, 0. ]\n\nprint(\", \".join(X.columns[(hitlasso.coef_ != 0.) != (bforw != 0.)]))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
JoaoRodrigues/pypdb
|
demos/demos.ipynb
|
mit
|
[
"pypdb demos\nThis is a set of basic examples of the usage and outputs of the various individual functions included in. There are generally two types of functions:\n\nFunctions that perform searches and return lists of PDB IDs\nFunctions that get information about specific PDB IDs\n\nThe list of supported search types, as well as the different types of information that can be returned for a given PDB ID, is large (and growing) and is enumerated completely in the docstrings of pypdb.py. The PDB allows a very wide range of different types of queries, and so any option that is not currently available can likely be implemented pretty easily based on the structure of the query types that have already been implemented. I appreciate any feedback and pull requests.\nAnother notebook in this directory, advanced_demos.ipynb, includes more in-depth usages of multiple functions, including the tutorial on graphing the popularity of CRISPR that was originally included in this notebook\nPreamble",
"%pylab inline\nfrom IPython.display import HTML\n\nfrom pypdb.pypdb import *\n\nimport pprint",
"1. Search functions that return lists of PDB IDs\nGet a list of PDBs for a specific search term",
"search_dict = make_query('actin network')\nfound_pdbs = do_search(search_dict)\nprint(found_pdbs)",
"Search by a specific modified structure",
"search_dict = make_query('3W3D',querytype='ModifiedStructuresQuery')\nfound_pdbs = do_search(search_dict)\nprint(found_pdbs)",
"Search by Author",
"search_dict = make_query('Perutz, M.F.',querytype='AdvancedAuthorQuery')\nfound_pdbs = do_search(search_dict)\nprint(found_pdbs)",
"Search by Motif",
"search_dict = make_query('T[AG]AGGY',querytype='MotifQuery')\nfound_pdbs = do_search(search_dict)\nprint(found_pdbs)",
"Search by a specific experimental method",
"search_dict = make_query('SOLID-STATE NMR',querytype='ExpTypeQuery')\nfound_pdbs = do_search(search_dict)\nprint(found_pdbs)",
"Search by whether it has free ligands",
"search_dict = make_query('', querytype='NoLigandQuery')\nfound_pdbs = do_search(search_dict)\nprint(found_pdbs[:10])",
"Search by protein symmetry group",
"kk = do_protsym_search('C9', min_rmsd=0.0, max_rmsd=1.0)\nprint(kk[:5])",
"Information Search functions\nWhile the basic functions described in the previous section are useful for looking up and manipulating individual unique entries, these functions are intended to be more user-facing: they take search keywords and return lists of authors or dates\nFind most common authors for a given keyword",
"top_authors = find_authors('crispr',max_results=100)\npprint.pprint(top_authors[:5])",
"Find papers for a given keyword",
"matching_papers = find_papers('crispr',max_results=3)\npprint.pprint(matching_papers)",
"2. Functions that return information about single PDB entries\nGet the full PDB file",
"pdb_file = get_pdb_file('4lza', filetype='cif', compression=True)\nprint(pdb_file[:200])",
"Get a general description of the entry's metadata",
"describe_pdb('4lza')",
"Get all of the information deposited in a PDB entry",
"all_info = get_all_info('4lza')\nprint(all_info)\n\nresults = get_all_info('2F5N')\nfirst_polymer = results['polymer'][0]\nfirst_polymer['polymerDescription']",
"Run a BLAST search on an entry\nThere are several options here: One function, get_blast(), returns a dict() just like every other function. However, all the metadata associated with this function leads to deeply-nested dictionaries. A simpler function, get_blast2(), uses text parsing on the raw output page, and it returns a tuple consisting of 1. a ranked list of other PDB IDs that were hits, and 2. A list of the actual BLAST alignments and similarity scores.",
"blast_results = get_blast('2F5N', chain_id='A')\njust_hits = blast_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']\nprint(just_hits[50]['Hit_hsps']['Hsp']['Hsp_hseq'])\n\nblast_results = get_blast2('2F5N', chain_id='A', output_form='HTML')\nprint('Total Results: ' + str(len(blast_results[0])) +'\\n')\npprint.pprint(blast_results[1][0])",
"Get PFAM information about an entry",
"pfam_info = get_pfam('2LME')\nprint(pfam_info)",
"Get chemical info\nThis function takes the name of the chemical, not a PDB ID",
"chem_desc = describe_chemical('NAG')\npprint.pprint(chem_desc)",
"Get ligand info if present",
"ligand_dict = get_ligands('100D')\npprint.pprint(ligand_dict)",
"Get gene ontology info",
"gene_info = get_gene_onto('4Z0L ')\npprint.pprint(gene_info['term'][0])",
"Get sequence clusters by chain",
"sclust = get_seq_cluster('2F5N.A')\npprint.pprint(sclust['pdbChain'][:10]) # Just look at the top 10",
"Get the representative for a chain",
"clusts = get_clusters('4hhb.A')\nprint(clusts)",
"List all taxa associated with a list of IDs",
"crispr_query = make_query('crispr')\ncrispr_results = do_search(crispr_query)\npprint.pprint(list_taxa(crispr_results[:10]))",
"List data types with a list of IDs",
"crispr_query = make_query('crispr')\ncrispr_results = do_search(crispr_query)\npprint.pprint(list_types(crispr_results[:5]))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ray-project/ray
|
doc/source/_templates/template.ipynb
|
apache-2.0
|
[
"(document-tag-to-refer-to)=\nCreating an Example\nThis is an example template file for writing Jupyter Notebooks in markdown, using MyST.\nFor more information on MyST notebooks, see the\nMyST-NB documentation.\nIf you want to learn more about the MyST parser, see the\nMyST documentation.\nMyST is common markdown compliant, so if you can use plain markdown here.\nIn case you need to execute restructured text (rSt) directives, you can use {eval-rst} to execute the code.\nFor instance, a here's a note written in rSt:\n```{eval-rst}\n.. note::\nA note written in reStructuredText.\n```\n{margin}\nYou can create margins with this syntax for smaller notes that don't make it into the main\ntext.\nYou can also easily define footnotes.[^example]\n[^example]: This is a footnote.\nAdding code cells",
"import ray\nimport ray.rllib.agents.ppo as ppo\nfrom ray import serve\n\ndef train_ppo_model():\n trainer = ppo.PPOTrainer(\n config={\"framework\": \"torch\", \"num_workers\": 0},\n env=\"CartPole-v0\",\n )\n # Train for one iteration\n trainer.train()\n trainer.save(\"/tmp/rllib_checkpoint\")\n return \"/tmp/rllib_checkpoint/checkpoint_000001/checkpoint-1\"\n\n\ncheckpoint_path = train_ppo_model()",
"Hiding and removing cells\nYou can hide cells, so that they will toggle when you click on the cell header.\nYou can use different :tags: like hide-cell, hide-input, or hide-output to hide cell content,\nand you can use remove-cell, remove-input, or remove-output to remove the cell completely when rendered.\nThose cells will still show up in the notebook itself, e.g. when you launch it in binder.",
"# This can be useful if you don't want to clutter the page with details.\n\nimport ray\nimport ray.rllib.agents.ppo as ppo\nfrom ray import serve",
":::{tip}\nHere's a quick tip.\n:::\n:::{note}\nAnd this is a note.\n:::\nThe following cell will be removed and not render:",
"ray.shutdown()",
"Equations\n\\begin{equation}\n\\frac {\\partial u}{\\partial x} + \\frac{\\partial v}{\\partial y} = - \\, \\frac{\\partial w}{\\partial z}\n\\end{equation}\n\\begin{align}\n2x - 5y &= 8 \\\n3x + 9y &= -12\n\\end{align}"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
AssembleSoftware/IoTPy
|
examples/ExamplesOfIncrementalPCA.ipynb
|
bsd-3-clause
|
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom sklearn.datasets import load_iris\nfrom sklearn.decomposition import IncrementalPCA\n\niris = load_iris()\niris_data = iris.data\niris_target = iris.target\niris_names = iris.target_names",
"Information about sklearn datasets iris\n<b>iris_data</b> is a numpy.ndarray with N rows and 4 columns.\nEach row represents a different instance of a type of Iris flower. The instances are drawn from three types of Irises: Setosa, Versicolour, and Virginica.\nThe four columns are features of each flower such as sepal length and width and petal length and width.\nSee:\nhttps://scikitlearn.org/stable/auto_examples/datasets/plot_iris_dataset.html\n<br>\n<br>\n<b>iris_target</b> is a 1D numpy.ndarray. It has N elements where iris_target[j] is the type of Iris for the flower described in iris_data[j]. In this example N is 150.\n<br>\n<br>\n<b>iris_names</b> is a list of the names of the irises.\n<b>We first take a random permutation</b> of the data and target because the given data is aranged in order of target.",
"permutation = np.random.permutation(len(iris_target))\niris_data = np.take(iris_data, permutation, axis=0)\niris_target = np.take(iris_target, permutation)\n\n# Function to plot data\ndef plot_data(colors, names, data, target):\n plt.figure(figsize=(8, 8))\n N = len(names)\n for color, i, target_name in zip(colors, list(range(N)), names):\n plt.scatter(data[target == i, 0], \n data[target == i, 1],\n color=color, lw=2, label=target_name)\n plt.title(\"Incremental PCA of iris dataset\")\n plt.show()",
"Create an incremental PCA object\n<b>n_components</b> is the number of components of the transformed data. In this case, with <i>n_components</i> = 2, the 4-dimensional given data is projected onto a 2-dimensional plane.",
"ipca = IncrementalPCA(n_components=2)\n# Colors used in the plot. Each color represents a target.\ncolors = ['navy', 'turquoise', 'darkorange']\nWINDOW_SIZE = 50\n\nfor step_number in range(3):\n data = iris_data[\n WINDOW_SIZE*step_number: WINDOW_SIZE*(step_number+1), :]\n iris_data_principal_components = ipca.partial_fit(data)\n transformed_data = iris_data_principal_components.transform(\n iris_data[:WINDOW_SIZE*(step_number+1)])\n target = iris_target[:WINDOW_SIZE*(step_number+1)]\n plot_data(colors, iris_names, transformed_data, target)",
"IoTPy\nUse a <b>sliding window</b> to see the effect of incremental data.",
"import sys\nsys.path.append(\"../\")\nfrom IoTPy.core.stream import Stream, StreamArray, run\nfrom IoTPy.agent_types.merge import merge_window, zip_stream\nfrom IoTPy.core.helper_control import _multivalue\nfrom IoTPy.helper_functions.recent_values import recent_values\n\nipca = IncrementalPCA(n_components=2)\ndata_stream = StreamArray(name='data_stream', dimension=4, dtype=float)\ntarget_stream = StreamArray(name='target_stream', dtype=int)\ntransformed_stream = StreamArray(name='transformed_stream', \n dimension=2, dtype=float)\n\ndef f(windows, state):\n data, target = windows\n saved_data, saved_target = state\n iris_data_principal_components = ipca.partial_fit(data)\n saved_data = (data if saved_data is None\n else np.concatenate((saved_data, data), axis=0))\n saved_target = (target if saved_target is None\n else np.concatenate((saved_target, target), axis=0))\n next_state = saved_data, saved_target\n transformed_data = iris_data_principal_components.transform(\n saved_data)\n output_data = iris_data_principal_components.transform(data)\n plot_data(colors, iris_names, transformed_data, saved_target)\n return _multivalue(output_data), next_state\n \n\nmerge_window(func=f, in_streams=[data_stream, target_stream],\n out_stream=transformed_stream, state=(None, None),\n window_size=50, step_size=50)\n#file_stream = StreamArray(name='file_stream', dimension=3, dtype=float)\nfile_stream = Stream(name='file_stream')\nzip_stream([transformed_stream, target_stream], file_stream)\ndata_stream.extend(iris_data)\ntarget_stream.extend(iris_target)\nrun()\nprint (recent_values(file_stream))"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jastarex/DeepLearningCourseCodes
|
01_TF_basics_and_linear_regression/mnist_data_introduction_tf.ipynb
|
apache-2.0
|
[
"MNIST 数据集 简介\n作为最经典的深度学习数据集,MNISIT包含65,000个灰度书写数字图片,尺寸均为28x28,其中55,000个用于训练,10,000个用于测试\n所有图片已归一化与中心化,像素值从0到1。\n使用\n我们使用TensorFlow的input_data函数进行数据准备与输入,它能帮助:\n* 自动下载数据\n* 将数据集load成numpy array的形式",
"from __future__ import print_function\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"./mnist/\", one_hot=True)\n\n# Load data\nx_train = mnist.train.images\ny_train = mnist.train.labels\nx_test = mnist.test.images\ny_test = mnist.test.labels",
"数据维度\n每张图片维度为784 (28x28x1)",
"print(\"x_train: \", x_train.shape)\nprint(\"y_train: \", y_train.shape)\nprint(\"x_test: \", x_test.shape)\nprint(\"y_test: \", y_test.shape)",
"数据可视化\n使用matplotlib可视化MNIST:",
"import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\ndef plot_mnist(data, classes):\n \n for i in range(10):\n idxs = (classes == i)\n \n # get 10 images for class i\n images = data[idxs][0:10]\n \n for j in range(5): \n plt.subplot(5, 10, i + j*10 + 1)\n plt.imshow(images[j].reshape(28, 28), cmap='gray')\n # print a title only once for each class\n if j == 0:\n plt.title(i)\n plt.axis('off')\n plt.show()\n\nclasses = np.argmax(y_train, 1)\nplot_mnist(x_train, classes)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
benbovy/cosmogenic_dating
|
emcee_test_3params_b.ipynb
|
mit
|
[
"Bayesian approach with emcee - Test case - 3 free parameters\nAn example of applying the Bayesian approach with 3 free parameters (erosion rate, time exposure and inheritance), using the emcee package.\nFor more info about the method used, see the notebook Inference_Notes.\nThis example (a test case) is based on a generic dataset of 10Be concentration vs. depth, which is drawn from a distribution with given \"true\" parameters.\nThis notebook has the following external dependencies:",
"import math\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom scipy import optimize\nimport emcee\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n%matplotlib inline\nclr_plt = sns.color_palette()",
"The mathematical (deterministic, forward) model\nAn implementation of the mathematical model used for predicting profiles of 10Be concentrations is available in the models Python module (see the notebook Models). The 10Be model assumes that the soil density is constant along the depth profile and that the inheritance is the same for the whole sample of 10Be concentration vs. depth.",
"import models",
"The data\nThe dataset is generated using the following parameter values. eps is the erosion rate, t is the exposure time, rho is the soil density and inh is the inheritance.",
"# the true parameters \neps_true = 5e-4\nt_true = 3e5\nrho_true = 2.\ninh_true = 5e4\n\n# depths and sample size\ndepth_minmax = [50, 500]\nN = 8\n\n# perturbations\nerr_magnitude = 20.\nerr_variability = 5.",
"The gendata Python module is used to generate the dataset (see the notebook Datasets).",
"import gendata\n\nprofile_data = gendata.generate_dataset(\n models.C_10Be,\n (eps_true, t_true, rho_true, inh_true),\n zlimits=depth_minmax,\n n=N,\n err=(err_magnitude, err_variability)\n)",
"Make a plot of the dataset",
"sns.set_context('notebook')\n\nfig, ax = plt.subplots()\n\nprofile_data.plot(\n y='depth', x='C', xerr='std',\n kind=\"scatter\", ax=ax, rot=45\n)\n\nax.invert_yaxis()",
"The statistical model used for computing the posterior probability density PPD\nHere below we define a data model by the tuple m = (eps, t, inh). It correspond to a given location in the 3-d parameter space. The soil density is assumed known.\n\nDefine the parameter names. It is important to use the same order to further define the priors and bounds tuples!",
"param_names = 'erosion rate', 'time exposure', 'inheritance'",
"Create a pd.Series with the true parameter values. It will be used for plotting purpose.",
"param_true = pd.Series((eps_true, t_true, inh_true), index=param_names)",
"Define the prior probability distribution for each free parameter. Here the uniform distribution is used, with given bounds (loc and scale arguments of scipy.stats.uniform are the lower bound and the range, respectively)",
"eps_prior = stats.uniform(loc=0., scale=1e-3)\nt_prior = stats.uniform(loc=0., scale=8e5)\ninh_prior = stats.uniform(loc=0., scale=1.5e5)\n\npriors = eps_prior, t_prior, inh_prior\nparam_priors = pd.Series(priors, index=param_names)",
"Define (min, max) bounds for each free parameter. It should be given by lower and upper quantiles (lower_qtl, upper_qtl) of the prior distribution. Choose the extreme quantiles (0, 1) if the distribution is uniform. It will be used for plotting purpose and also for constrained optimization (see below).",
"def get_bounds(f, lower_qtl=0., upper_qtl=1.):\n return f.ppf(lower_qtl), f.ppf(upper_qtl)\n\neps_bounds = get_bounds(eps_prior, 0, 1)\nt_bounds = get_bounds(t_prior, 0, 1)\ninh_bounds = get_bounds(inh_prior, 0, 1)\n\nbounds = eps_bounds, t_bounds, inh_bounds\nparam_bounds = pd.DataFrame(\n np.array(bounds), columns=('min', 'max'), index=param_names\n)\n\nparam_bounds",
"Plot the prior probability density for each parameter.",
"fig, axes = plt.subplots(1, 3, figsize=(13, 3))\n\nfor ax, p, b, name in zip(axes.flatten(),\n param_priors.values,\n param_bounds.values,\n param_names):\n xmin, xmax = b\n eps = 0.1 * (xmax - xmin)\n x = np.linspace(xmin - eps, xmax + eps, 200)\n d = p.pdf(x)\n ax.plot(x, d)\n ax.fill(x, d, alpha=0.4)\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)\n plt.setp(ax, ylim=(0, None), yticklabels=[],\n xlabel=name)\n\nplt.subplots_adjust()",
"Define a function that returns the (logarithm of the) prior probability density for a given data model m.",
"def lnprior(m):\n lps = [p.logpdf(v) for (p, v) in zip(priors, m)]\n if not np.all(np.isfinite(lps)):\n return -np.inf\n return np.sum(lps)",
"Define a function that returns the log-likelihood. It is a $n$-dimensional Gaussian ($n$ nucleide concentrations sampled along the depth profile) with the mean given by the formard model and the variance given by the error estimated from the measurements of the nucleide concentration of each sample. This Gaussian implies that (1) the error on each measurement is random, (2) the sampled nucleide concentrations are measured independently of each other, (3) the forward model - i.e., the deterministic model that predicts the nucleide concentration profile - represents the real physics and (4) the values of the non-free parameters of the forward model - e.g., nucleide surface production rate, attenuation lengths... - are exactly known.",
"def lnlike(m):\n eps, t, inh = m\n \n mean = models.C_10Be(profile_data['depth'].values,\n eps, t, rho_true, inh)\n var = profile_data['std']**2\n \n lngauss = -0.5 * np.sum(\n np.log(2. * np.pi * var) +\n (profile_data['C'] - mean)**2 / var\n ) \n \n return lngauss",
"Define a function that returns the log-posterior probability density, according to the Bayes's theorem.",
"def lnprob(m):\n lp = lnprior(m)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(m)",
"Sampling the posterior probablility density using MCMC\nIn our case, the from of the PPD may be highly anisotropic ; it may present high (negative or positive) correlations between its parameters (erosion rate, exposure time, soil density, inheritance). Usually, these relationships are even non-linear.\nIt is therefore important to use a robust algorithm to sample this complex PPD. The Affine Invariant Markov chain Monte Carlo (MCMC) Ensemble sampler implemented in the emcee package will be more efficient in our case than the standard MCMC algorithms such as the Metropolis-Hasting method.\nThe emcee sampler allows to define multiple, independent walkers. This requires to first set the initial position of each walker in the parameter space. As shown in the emcee documentation, the author suggests initializing the walkers in a tiny Gaussian ball around the maximum likelihood result. We can obtain the maximum likelihood estimate by applying an optimization algorithm such as one of those implemented in the scipy.optimize module. Note that non-linear optimization usually requires to provide an initial guess.\nGiven our complex, non-linear, and potentially flat form of the PDD in some areas of the parameter space, we prefer to set the initial positions of the walkers as the maximum likelihood estimates resulting from randomly chosing initial guesses in the parameter space according to the prior probability density. Note that we use a constrained optimization algorithm to ensure that the initial positions are within the bounds defined above.",
"n_params, n_walkers = len(param_names), 100\n\n# randomly choose initial guesses according to the prior\ninit_guesses = np.array(\n [p.rvs(size=n_walkers) for p in priors]\n).T\n\n# perform bounded non-linear optimization from each initial guess\nop_lnlike = lambda *args: -lnlike(*args)\ninit_walkers = np.empty_like(init_guesses)\n\nfor i, g in enumerate(init_guesses):\n res = optimize.minimize(op_lnlike, g,\n method='TNC',\n bounds=bounds)\n init_walkers[i] = res['x']\n",
"We show below the initial guesses and the initial positions of the walkers in a scatter plot.",
"df_init_guesses = pd.DataFrame(init_guesses, columns=param_names)\ndf_init_walkers = pd.DataFrame(init_walkers, columns=param_names)\n\ndef scatter_pos(xcol, ycol, ax):\n df_init_guesses.plot(\n kind='scatter', x=xcol, y=ycol,\n alpha=0.5, ax=ax, color=clr_plt[0], label='init guesses'\n )\n df_init_walkers.plot(\n kind='scatter', x=xcol, y=ycol,\n alpha=0.8, ax=ax, color=clr_plt[1], label='init walkers'\n )\n legend = ax.legend(frameon=True, loc='lower right')\n legend.get_frame().set_facecolor('w')\n plt.setp(ax, xlim=param_bounds.loc[xcol],\n ylim=param_bounds.loc[ycol])\n\nfig, ax = plt.subplots(2, 2, figsize=(12,12))\nscatter_pos('erosion rate', 'time exposure', ax[0][0])\nscatter_pos('inheritance', 'time exposure', ax[0][1])\nscatter_pos('erosion rate', 'inheritance', ax[1][0])",
"We can then setup the emcee sampler and run the MCMC for n_steps iterations starting from the initial positions defined above.",
"sampler = emcee.EnsembleSampler(n_walkers, n_params, lnprob)\n\nn_steps = 500\nsampler.run_mcmc(init_walkers, n_steps)\n\nmcmc_samples = pd.DataFrame(sampler.flatchain,\n columns=param_names)",
"Let's plot the trace of the MCMC iterations. The red lines show the true values.",
"sample_plot_range = slice(None)\n\naxes = mcmc_samples[sample_plot_range].plot(\n kind='line', subplots=True,\n figsize=(10, 8), color=clr_plt[0]\n)\n\nfor i, ax in enumerate(axes):\n ax.axhline(param_true.iloc[i], color='r')",
"Try plotting only the firsts samples (e.g., sample_plot_range = slice(0, 1000)). We see that thanks to the initial positions of the walkers, the emcee sampler quickly starts exploring the full posterior distribution. The “burn-in” period is small and we can therefore set a small value for nburn below.",
"nburn = 100\n\nmcmc_kept_samples = pd.DataFrame(\n sampler.chain[:, nburn:, :].reshape((-1, n_params)),\n columns=param_names\n)",
"We can visualize the sampled posterior propbability density by joint plots of the MCMC samples. The red lines show the true values.",
"def jointplot_density(xcol, ycol):\n p = sns.jointplot(\n xcol, ycol,\n data=mcmc_kept_samples,\n xlim=(mcmc_kept_samples[xcol].min(),\n mcmc_kept_samples[xcol].max()),\n ylim=(mcmc_kept_samples[ycol].min(),\n mcmc_kept_samples[ycol].max()),\n joint_kws={'alpha': 0.02}\n )\n p.ax_joint.axhline(param_true.loc[ycol], color='r')\n p.ax_joint.axvline(param_true.loc[xcol], color='r')\n\njointplot_density('erosion rate', 'time exposure')\njointplot_density('inheritance', 'time exposure')\njointplot_density('erosion rate', 'inheritance')",
"Given the samples, it is straightforward to characterize the posterior porbability density and estimate its moments.\n\nthe PPD mean (if the PPD distribution is strictly gaussian, it also correspond to the MAP (Maximum A-Posterori) and therefore the most probable model)",
"mcmc_kept_samples.mean()",
"the sample which have the max PPD value (i.e., the most probable sampled model)",
"max_ppd = sampler.lnprobability[:, nburn:].reshape((-1)).argmax()\nmcmc_kept_samples.iloc[max_ppd]",
"the PPD quantiles (useful for delineating the Bayesian confidence intervals or credible intervals for each free parameter)",
"percentiles = np.array([2.5, 5, 25, 50, 75, 95, 97.5])\nmcmc_kept_samples.quantile(percentiles * 0.01)",
"We finally plot the nucleide concentration profiles (blue dots: data w/ error bars, red line: true profile, grey lines: randomly chosen profiles from MCMC samples).",
"fig, ax = plt.subplots()\n\n# plot the profile data with error bars\nprofile_data.plot(\n y='depth', x='C', xerr='std',\n kind=\"scatter\", ax=ax, rot=45\n)\n\n# plot 50 randomly chosen profiles from MCMC samples\ndepths = np.linspace(profile_data['depth'].min(),\n profile_data['depth'].max(),\n 100)\n\nfor i in np.random.randint(len(mcmc_kept_samples), size=100):\n eps, t, inh = mcmc_kept_samples.iloc[i]\n c = models.C_10Be(depths, eps, t, rho_true, inh)\n ax.plot(c, depths, color='grey', alpha=0.1)\n\n# plot the true profile\nc_true = models.C_10Be(depths, eps_true, t_true,\n rho_true, inh_true)\nax.plot(c_true, depths, color='r', label='true model')\n\nax.invert_yaxis()",
"The plot shows here that the uncertainty on the fitted model parameters has only a small influence on the shape of the profile of nucleide concentration vs. depth. This illustrates the non-linearity of that dependence. \nInformation about this notebook\nAuthor: B. Bovy, Ulg\n<a href=\"http://creativecommons.org/licenses/by/4.0/\"><img alt=\"Creative Commons License\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by/4.0/88x31.png\" /></a><br />This work is licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\">Creative Commons Attribution 4.0 International License</a>."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
linglaiyao1314/maths-with-python
|
02-programs.ipynb
|
mit
|
[
"Programs\nUsing the Python console to type in commands works fine, but has serious drawbacks. It doesn't save the work for the future. It doesn't allow the work to be re-used. It's frustrating to edit when you make a mistake, or want to make a small change. Instead, we want to write a program.\nA program is a text file containing Python commands. It can be written in any text editor. Something like the editor in spyder is ideal: it has additional features for helping you write code. However, any plain text editor will work. A program like Microsoft Word will not work, as it will try and save additional information with the file.\nLet us use a simple pair of Python commands:",
"import math\nx = math.sin(1.2)",
"Go to the editor in spyder and enter those commands in a file:\npython\nimport math\nx = math.sin(1.2)\nSave this file in a suitable location and with a suitable name, such as lab1_basic.py (the rules and conventions for filenames are similar to those for variable names laid out above: descriptive, lower case names without spaces). The file extension should be .py: spyder should add this automatically.\nTo run this program, either\n\npress the green \"play\" button in the toolbar;\npress the function key F5;\nselect \"Run\" from the \"Run\" menu.\n\nIn the console you should see a line like\nrunfile('/Users/ih3/PythonLabs/lab1_basic.py', wdir='/Users/ih3/PythonLabs')\nappear, and nothing else. To check that the program has worked, check the value of x. In the console just type x:",
"x",
"Also, in the top right of the spyder window, select the \"Variable explorer\" tab. It shows the variables that it currently knows, which should include x, its type (float) and its value.\nIf there are many variables known, you may worry that your earlier tests had already set the value for x and that the program did not actually do anything. To get back to a clean state, type %reset in the console to delete all variables - you will need to confirm that you want to do this. You can then re-run the program to test that it worked.\nUsing programs and modules\nIn previous sections we have imported and used standard Python libraries, packages or modules, such as math. This is one way of using a program, or code, that someone else has written. To do this for ourselves, we use exactly the same syntax.\nSuppose we have the file lab1_basic.py exactly as above. Write a second file containing the lines\npython\nimport lab1_basic\nprint(lab1_basic.x)\nSave this file, in the same directory as lab1_basic.py, say as lab1_import.py. When we run this program, the console should show something like\nrunfile('/Users/ih3/PythonLabs/lab1_import.py', wdir='/Users/ih3/PythonLabs')\n0.9320390859672263\nThis shows what the import statement is doing. All the library imports, definitions and operations in the imported program (lab1_basic) are performed. The results are then available to us, using the dot notation, via lab1_basic.<variable>, or lab1_basic.<function>.\nTo build up a program, we write Python commands into plain text files. When we want to use, or re-use, those definitions or results, we use import on the name of the file to recover their values.\nNote\nWe saved both files - the original lab1_basic.py, and the program that imported lab1_basic.py, in the same directory. If they were in different directories then Python would not know where to find the file it was trying to import, and would give an error. The solution to this is to create a package, which is rather more work.\nFunctions\nWe have already seen and used some functions, such as the log and sin functions from the math package. However, in programming, a function is more general; it is any set of commands that acts on some input parameters and returns some output.\nFunctions are central to effective programming, as they stop you from having to repeat yourself and reduce the chances of making a mistake. Defining and using your own functions is the next step.\nLet us write a function that converts angles from degrees to radians. The formula is\n\\begin{equation}\n \\theta_r = \\frac{\\pi}{180} \\theta_d,\n\\end{equation}\nwhere $\\theta_r$ is the angle in radians, and $\\theta_d$ is the angle in degrees. If we wanted to do this for, eg, $\\theta_d = 30^{\\circ}$, we could use the commands",
"from math import pi\ntheta_d = 30.0\ntheta_r = pi / 180.0 * theta_d\n\nprint(theta_r)",
"This is effective for a single angle. If we want to repeat this for many angles, we could copy and paste the code. However, this is dangerous. We could make a mistake in editing the code. We could find a mistake in our original code, and then have to remember to modify every location where we copied it to. Instead we want to have a single piece of code that performs an action, and use that piece of code without modification whenever needed.\nThis is summarized in the \"DRY\" principle: do not repeat yourself. Instead, convert the code into a function and use the function.\nWe will define the function and show that it works, then discuss how:",
"from math import pi\n\ndef degrees_to_radians(theta_d):\n \"\"\"\n Convert an angle from degrees to radians.\n \n Parameters\n ----------\n \n theta_d : float\n The angle in degrees.\n \n Returns\n -------\n \n theta_r : float\n The angle in radians.\n \"\"\"\n theta_r = pi / 180.0 * theta_d\n return theta_r",
"We check that it works by printing the result for multiple angles:",
"print(degrees_to_radians(30.0))\nprint(degrees_to_radians(60.0))\nprint(degrees_to_radians(90.0))",
"How does the function definition work? \nFirst we need to use the def command:\npython\ndef degrees_to_radians(theta_d):\nThis command effectively says \"what follows is a function\". The first word after def will be the name of the function, which can be used to call it later. This follows similar rules and conventions to variables and files (no spaces, lower case, words separated by underscores, etc.).\nAfter the function name, inside brackets, is the list of input parameters. If there are no input parameters the brackets still need to be there. If there is more than one parameter, they should be separated by commas.\nAfter the bracket there is a colon :. The use of colons to denote special \"blocks\" of code happens frequently in Python code, and we will see it again later.\nAfter the colon, all the code is indented by four spaces or one tab. Most helpful text editors, such as the spyder editor, will automatically indent the code after a function is defined. If not, use the tab key to ensure the indentation is correct. In Python, whitespace and indentation is essential: it defines where blocks of code (such as functions) start and end. In other languages special keywords or characters may be used, but in Python the indentation of the code statements is the key.\nThe statement on the next few lines is the function documentation, or docstring. \n```python\n \"\"\"\n Convert an angle from degrees to radians.\n...\n\"\"\"\n\n```\nThis is in principle optional: it's not needed to make the code run. However, documentation is extremely useful for the next user of the code. As the next user is likely to be you in a week (or a month), when you'll have forgotten the details of what you did, documentation helps you first. In reality, you should always include documentation.\nThe docstring can be any string within quotes. Using \"triple quotes\" allows the string to go across multiple lines. The docstring can be rapidly printed using the help function:",
"help(degrees_to_radians)",
"This allows you to quickly use code correctly without having to look at the code. We can do the same with functions from packages, such as",
"help(math.sin)",
"You can put whatever you like in the docstring. The format used above in the degrees_to_radians function follows the numpydoc convention, but there are other conventions that work well. One reason for following this convention can be seen in spyder. Copy the function degrees_to_radians into the console, if you have not done so already. Then, in the top right part of the window, select the \"Object inspector\" tab. Ensure that the \"Source\" is \"Console\". Type degrees_to_radians into the \"Object\" box. You should see the help above displayed, but nicely formatted.\nGoing back to the function itself. After the comment, the code to convert from degrees to radians starts. Compare it to the original code typed directly into the console. In the console we had\npython\nfrom math import pi\ntheta_d = 30.0\ntheta_r = pi / 180.0 * theta_d\nIn the function we have\npython\n theta_r = pi / 180.0 * theta_d\n return theta_r\nThe line\npython\n from math import pi\nis in the function file, but outside the definition of the function itself.\nThere are four differences.\n\nThe function code is indented by four spaces, or one tab.\nThe input parameter theta_d must be defined in the console, but not in the function. When the function is called the value of theta_d is given, but inside the function itself it is not: the function knows that the specific value of theta_d will be given as input.\nThe output of the function theta_r is explicitly returned, using the return statement.\nThe import statement is moved outside the function definition - this is the convention recommended by PEP8.\n\nAside from these points, the code is identical. A function, like a program, is a collection of Python statements exactly as you would type into a console. The first three differences above are the essential differences to keep in mind: the first is specific to Python (other programming languages have something similar), whilst the other differences are common to most programming languages.\nScope\nNames used internally by the function are not visible externally. Also, the name used for the output of the function need not be used externally. To see an example of this, start with a clean slate by typing %reset into the console.",
"%reset",
"Then copy and paste the function definition again:",
"from math import pi\n\ndef degrees_to_radians(theta_d):\n \"\"\"\n Convert an angle from degrees to radians.\n \n Parameters\n ----------\n \n theta_d : float\n The angle in degrees.\n \n Returns\n -------\n \n theta_r : float\n The angle in radians.\n \"\"\"\n theta_r = pi / 180.0 * theta_d\n return theta_r",
"(Alternatively you can use the history in the console by pressing the up arrow until the definition of the function you previously entered appears. Then click at the end of the function and press Return). Now call the function as",
"angle = degrees_to_radians(45.0)\n\nprint(angle)",
"But the variables used internally, theta_d and theta_r, are not known outside the function:",
"theta_d",
"This is an example of scope: the existence of variables, and their values, is restricted inside functions (and files).\nYou may note that above, we had a value of theta_d outside the function (from when we were working in the console), and a value of theta_d inside the function (as the input parameter). These do not have to match. If a variable is assigned a value inside the function then Python will take this \"local\" value. If not, Python will look outside the function. Two examples will illustrate this:",
"x1 = 1.1\n\ndef print_x1():\n print(x1)\n\nprint(x1)\nprint_x1()\n\nx2 = 1.2\n\ndef print_x2():\n x2 = 2.3\n print(x2)\n\nprint(x2)\nprint_x2()",
"In the first (x1) example, the variable x1 was not defined within the function, but it was used. When x1 is printed, Python has to look for the definition outside of the scope of the function, which it does successfully.\nIn the second (x2) example, the variable x2 is defined within the function. The value of x2 does not match the value of the variable with the same name defined outside the function, but that does not matter: within the function, its local value is used. When printed outside the function, the value of x2 uses the external definition, as the value defined inside the function is not known (it is \"not in scope\").\nSome care is needed with using scope in this way, as Python reads the whole function at the time it is defined when deciding scope. As an example:",
"x3 = 1.3\n\ndef print_x3():\n print(x3)\n x3 = 2.4\n\nprint(x3)\nprint_x3()",
"The only significant change from the second example is the order of the print statement and the assignment to x3 inside the function. Because x3 is assigned inside the function, Python wants to use the local value within the function, and will ignore the value defined outside the function. However, the print function is called before x3 has been set within the function, leading to an error.\nKeyword and default arguments\nOur original function degrees_to_radians only had one argument, the angle to be converted theta_d. Many functions will take more than one argument, and sometimes the function will take arguments that we don't always want to set. Python can make life easier in these cases.\nSuppose we wanted to know how long it takes an object released from a height $h$, in a gravitational field of strength $g$, with initial vertical speed $v$, to hit the ground. The answer is\n\\begin{equation}\n t = \\frac{1}{g} \\left( v + \\sqrt{v^2 + 2 h g} \\right).\n\\end{equation}\nWe can write this as a function:",
"from math import sqrt\n\ndef drop_time(height, speed, gravity):\n \"\"\"\n Return how long it takes an object released from a height h, \n in a gravitational field of strength g, with initial vertical speed v, \n to hit the ground.\n \n Parameters\n ----------\n \n height : float\n Initial height h\n speed : float\n Initial vertical speed v\n gravity : float\n Gravitional field strength g\n \n Returns\n -------\n \n t : float\n Time the object hits the ground\n \"\"\"\n \n return (speed + sqrt(speed**2 + 2.0*height*gravity)) / gravity",
"But when we start using it, it can be a bit confusing:",
"print(drop_time(10.0, 0.0, 9.8))\nprint(drop_time(10.0, 1.0, 9.8))\nprint(drop_time(100.0, 9.8, 15.0))",
"Is that last case correct? Did we really want to change the gravitational field, whilst at the same time using an initial velocity of exactly the value we expect for $g$?\nA far clearer use of the function comes from using keyword arguments. This is where we explicitly use the name of the function arguments. For example:",
"print(drop_time(height=10.0, speed=0.0, gravity=9.8))",
"The result is exactly the same, but now it's explicitly clear what we're doing. \nEven more useful: when using keyword arguments, we don't have to ensure that the order we use matches the order of the function definition:",
"print(drop_time(height=100.0, gravity=9.8, speed=15.0))",
"This is the same as the confusing case above, but now there is no ambiguity. Whilst it is good practice to match the order of the arguments to the function definition, it is only needed when you don't use the keywords. Using the keywords is always useful.\nWhat if we said that we were going to assume that the gravitational field strength $g$ is nearly always going to be that of Earth, $9.8$ms${}^{-2}$? We can re-define our function using a default argument:",
"def drop_time(height, speed, gravity=9.8):\n \"\"\"\n Return how long it takes an object released from a height h, \n in a gravitational field of strength g, with initial vertical speed v, \n to hit the ground.\n \n Parameters\n ----------\n \n height : float\n Initial height h\n speed : float\n Initial vertical speed v\n gravity : float\n Gravitional field strength g\n \n Returns\n -------\n \n t : float\n Time the object hits the ground\n \"\"\"\n \n return (speed + sqrt(speed**2 + 2.0*height*gravity)) / gravity",
"Note that there is only one difference here, in the very first line: we state that gravity=9.8. What this means is that if this function is called and the value of gravity is not specified, then it takes the value 9.8.\nFor example:",
"print(drop_time(10.0, 0.0))\nprint(drop_time(height=50.0, speed=1.0))\nprint(drop_time(gravity=15.0, height=50.0, speed=1.0))",
"So, we can still give a specific value for gravity when we don't want to use the value 9.8, but it isn't needed if we're happy for it to take the default value of 9.8. This works both if we use keyword arguments and if not, with certain restrictions.\nSome things to keep in mind. \n\nDefault arguments can only be used without specifying the keyword if they come after arguments without defaults. It is a very strong convention that arguments with a default come at the end of the argument list.\nThe value of default arguments can be pretty much anything, but care should be taken to get the behaviour you expect. In particular, it is strongly discouraged to allow the default value to be anything that might change, as this can lead to odd behaviour that is hard to find. In particular, allowing a default value to be a container such as a list (seen below) can lead to unexpected behaviour. See, for example, this discussion, pointing out why, and that the value of the default argument is fixed when the function is defined, not when it's called.\n\nPrinting and strings\nWe have already seen the print function used multiple times. It displays its argument(s) to the screen when called, either from the console or from within a program. It prints some representation of what it is given in the form of a string: it converts simple numbers and other objects to strings that can be shown on the screen. For example:",
"import math\nx = 1.2\nname = \"Alice\"\nprint(\"Hello\")\nprint(6)\nprint(name)\nprint(x)\nprint(math.pi)\nprint(math.sin(x))\nprint(math.sin)\nprint(math)",
"We see that variables are converted to their values (such as name and math.pi) and functions are called to get values (such as math.sin(x)), which are then converted to strings displayed on screen. However, functions (math.sin) and modules (math) are also \"printed\", in that a string saying what they are, and where they come from, is displayed.\nOften we want to display useful information to the screen, which means building a message that is readable and printing that. There are many ways of doing this: here we will just look at the format command. Here is an example:",
"print(\"Hello {}. We set x={}.\".format(name, x))",
"The format command takes the string (here \"Hello {}. We set x={}.\") and replaces the {} with the values of the variables (here name and x in order).\nWe can use the format command in this way for anything that has a string representation. For example:",
"print (\"The function {} applied to x={} gives {}\".format(math.sin, x, math.sin(x)))",
"There are many more ways to use the format command which can be helpful.\nWe note that format is a function, but a function applied to the string before the dot. This type of function is called a method, and we shall return to them later.\nStrings\nWe have just printed a lot of strings out, but it is useful to briefly talk about what a string is.\nIn Python a string is not just a sequence of characters. It is a Python object that contains additional information that \"lives on it\". If this information is a constant property it is called an attribute. If it is a function it is called a method. We can access this information to tell us things about the string, and to manipulate it.\nHere are some basic string methods:",
"name = \"Alice\"\nnumber = \"13\"\nsentence = \" a b c d e \"\nprint(name.upper())\nprint(name.lower())\nprint(name.isdigit())\nprint(number.isdigit())\nprint(sentence.strip())\nprint(sentence.split())",
"The use of the \"dot\" notation appears here. We saw this with accessing functions in modules and packages above; now we see it with accessing attributes and methods. It appears repeatedly in Python. The format method used above is particularly important for our purposes, but there are a lot of methods available.\nThere are other ways of manipulating strings.\nWe can join two strings using the + operator.",
"print(\"Hello\" + \"Alice\")",
"We can repeat strings using the * operator.",
"print(\"Hello\" * 3)",
"We can convert numbers to strings using the str function.",
"print(str(3.4))",
"We can also access individual characters (starting from 0!), or a range of characters:",
"print(\"Hello\"[0])\nprint(\"Hello\"[2])\nprint(\"Hello\"[1:3])",
"We will come back to this notation when discussing lists and slicing.\nNote\nThere are big differences between how Python deals with strings in Python 2.X and Python 3.X. Whilst most of the commands above will produce identical output, string handling is one of the major reasons why Python 2.X doesn't always work in Python 3.X. The ways strings are handled in Python 3.X is much better than in 2.X.\nPutting it together\nWe can now combine the introduction of programs with functions. First, create a file called lab1_function.py containing the code\n```python\nfrom math import pi\ndef degrees_to_radians(theta_d):\n \"\"\"\n Convert an angle from degrees to radians.\nParameters\n----------\n\ntheta_d : float\n The angle in degrees.\n\nReturns\n-------\n\ntheta_r : float\n The angle in radians.\n\"\"\"\ntheta_r = pi / 180.0 * theta_d\nreturn theta_r\n\n```\nThis is almost exactly the function as defined above.\nNext, write a second file lab1_use_function.py containing\n```python\nfrom lab1_function import degrees_to_radians\nprint(degrees_to_radians(15.0))\nprint(degrees_to_radians(30.0))\nprint(degrees_to_radians(45.0))\nprint(degrees_to_radians(60.0))\nprint(degrees_to_radians(75.0))\nprint(degrees_to_radians(90.0))\n```\nThis function uses our own function to convert from degrees to radians. To save typing we have used the from <module> import <function> notation. We could have instead written import lab1_function, but then every function call would need to use lab1_function.degrees_to_radians.\nThis program, when run, will print to the screen the angles $(n \\pi)/ 12$ for $n = 1, 2, \\dots, 6$.\nExercise: basic functions\nExercise 1\nWrite a function to calculate the volume of a cuboid with edge lengths $a, b, c$. Test your code on sample values such as\n\n$a=1, b=1, c=1$ (result should be $1$);\n$a=1, b=2, c=3.5$ (result should be $7.0$);\n$a=0, b=1, c=1$ (result should be $0$);\n$a=2, b=-1, c=1$ (what do you think the result should be?).\n\nExercise 2\nWrite a function to compute the time (in seconds) taken for an object to fall from a height $H$ (in metres) to the ground, using the formula\n\\begin{equation}\n h(t) = \\frac{1}{2} g t^2.\n\\end{equation}\nUse the value of the acceleration due to gravity $g$ from scipy.constants.g. Test your code on sample values such as\n\n$H = 1$m (result should be $\\approx 0.452$s);\n$H = 10$m (result should be $\\approx 1.428$s);\n$H = 0$m (result should be $0$s);\n$H = -1$m (what do you think the result should be?).\n\nExercise 3\nWrite a function that computes the area of a triangle with edge lengths $a, b, c$. You may use the formula\n\\begin{equation}\n A = \\sqrt{s (s - a) (s - b) (s - c)}, \\qquad s = \\frac{a + b + c}{2}.\n\\end{equation}\nConstruct your own test cases to cover a range of possibilities.\nExercise: Floating point numbers\nExercise 1\nComputers cannot, in principle, represent real numbers perfectly. This can lead to problems of accuracy. For example, if\n\\begin{equation}\n x = 1, \\qquad y = 1 + 10^{-14} \\sqrt{3}\n\\end{equation}\nthen it should be true that\n\\begin{equation}\n 10^{14} (y - x) = \\sqrt{3}.\n\\end{equation}\nCheck how accurately this equation holds in Python and see what this implies about the accuracy of subtracting two numbers that are close together.\nNote\nThe standard floating point number holds the first 16 significant digits of a real.\nExercise 2\nThe standard quadratic formula gives the solutions to\n\\begin{equation}\n a x^2 + b x + c = 0\n\\end{equation}\nas\n\\begin{equation}\n x = \\frac{-b \\pm \\sqrt{b^2 - 4 a c}}{2 a}.\n\\end{equation}\nShow that, if $a = 10^{-n} = c$ and $b = 10^n$ then\n\\begin{equation}\n x = \\frac{10^{2 n}}{2} \\left( -1 \\pm \\sqrt{1 - 10^{-4n}} \\right).\n\\end{equation}\nUsing the expansion (from Taylor's theorem)\n\\begin{equation}\n \\sqrt{1 - 10^{-4 n}} \\simeq 1 - \\frac{10^{-4 n}}{2} + \\dots, \\qquad n \\gg 1,\n\\end{equation}\nshow that\n\\begin{equation}\n x \\simeq -10^{2 n} + \\frac{10^{-2 n}}{4} \\quad \\text{and} \\quad -\\frac{10^{-2n}}{4}, \\qquad n \\gg 1.\n\\end{equation}\nExercise 3\nBy multiplying and dividing by $-b \\mp \\sqrt{b^2 - 4 a c}$, check that we can also write the solutions to the quadratic equation as\n\\begin{equation}\n x = \\frac{2 c}{-b \\mp \\sqrt{b^2 - 4 a c}}.\n\\end{equation}\nExercise 4\nUsing Python, calculate both solutions to the quadratic equation\n\\begin{equation}\n 10^{-n} x^2 + 10^n x + 10^{-n} = 0\n\\end{equation}\nfor $n = 3$ and $n = 4$ using both formulas. What do you see? How has floating point accuracy caused problems here?\nExercise 5\nThe standard definition of the derivative of a function is\n\\begin{equation}\n \\left. \\frac{\\text{d} f}{\\text{d} x} \\right|{x=X} = \\lim{\\delta \\to 0} \\frac{f(X + \\delta) - f(X)}{\\delta}.\n\\end{equation}\nWe can approximate this by computing the result for a finite value of $\\delta$:\n\\begin{equation}\n g(x, \\delta) = \\frac{f(x + \\delta) - f(x)}{\\delta}.\n\\end{equation}\nWrite a function that takes as inputs a function of one variable, $f(x)$, a location $X$, and a step length $\\delta$, and returns the approximation to the derivative given by $g$.\nExercise 6\nThe function $f_1(x) = e^x$ has derivative with the exact value $1$ at $x=0$. Compute the approximate derivative using your function above, for $\\delta = 10^{-2 n}$ with $n = 1, \\dots, 7$. You should see the results initially improve, then get worse. Why is this?"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
bmeaut/python_nlp_2017_fall
|
course_material/12_Semantics_1/12_Semantics_1_lab_solutions.ipynb
|
mit
|
[
"11. Semantics 1: words - Lab excercises\n11.E1 Accessing WordNet using NLTK\n11.E2 Using word embeddings\n11.E3 Comparing WordNet and word embeddings\n11.E1 Accessing WordNet using NLTK\n<a id='11.E1'></a>\nNLTK (Natural Language Toolkit) is a python library for accessing many NLP tools and resources. The NLTK WordNet interface is described here: http://www.nltk.org/howto/wordnet.html\nThe NLTK python package can be installed using pip:",
"!pip install nltk",
"Import nltk and use its internal download tool to get WordNet:",
"import nltk\nnltk.download('wordnet')",
"Import the wordnet module:",
"from nltk.corpus import wordnet as wn",
"Access synsets of a word using the synsets function:",
"club_synsets = wn.synsets('club')\nprint(club_synsets)",
"Each synset has a definition function:",
"for synset in club_synsets:\n print(\"{0}\\t{1}\".format(synset.name(), synset.definition()))\n\ndog = wn.synsets('dog')[0]\ndog.definition()",
"List lemmas of a synset:",
"dog.lemmas()",
"List hypernyms and hyponyms of a synset",
"dog.hypernyms()\n\ndog.hyponyms()",
"The closure method of synsets allows us to retrieve the transitive closure of the hypernym, hyponym, etc. relations:",
"list(dog.closure(lambda s: s.hypernyms()))",
"common_hypernyms and lowest_common_hypernyms work in relation to another synset:",
"cat = wn.synsets('cat')[0]\ndog.lowest_common_hypernyms(cat)\n\ndog.common_hypernyms(cat)\n\ndog.path_similarity(cat)",
"To iterate through all synsets, possibly by POS-tag, use all_synsets, which returns a generator:",
"wn.all_synsets(pos='n')\n\nfor c, noun in enumerate(wn.all_synsets(pos='n')):\n if c > 5:\n break\n print(noun.name())",
"Excercise (optional): use WordNet to implement the \"Guess the category\" game: the program lists lemmas that all share a hypernym, which the user has to guess.\n11.E2 Using word embeddings\n<a id='11.E2'></a>\n\nDownload and extract the word embedding glove.6B, which was trained on 6 billion words of English text using the GloVe algorithm.",
"!wget http://sandbox.hlt.bme.hu/~recski/stuff/glove.6B.50d.txt.gz\n!gunzip -f glove.6B.50d.txt.gz",
"Read the embedding into a 2D numpy array. Word forms should be stored in a separate 1D array. Also create a word index, a dictionary that returns the index of each word in the embedding. Vectors should be normalized to a length of 1",
"import numpy as np\n\ndef read_embedding(fn):\n words = []\n emb = []\n word_index = {}\n c = 0\n with open(fn, encoding='utf-8') as f:\n for line in f:\n fields = line.strip().split()\n emb.append(np.array([float(i) for i in fields[1:]], dtype='float32'))\n words.append(fields[0])\n word_index[fields[0]] = c\n c += 1\n\n print(\"read {0} lines\".format(c))\n return np.array(words), word_index, np.array(emb) \n\ndef normalize_embedding(emb):\n return emb / np.linalg.norm(emb, axis=1)[:,None]\n\nwords, word_index, emb = read_embedding('glove.6B.50d.txt')\nemb = normalize_embedding(emb)",
"write a function that takes two words and the embedding as input and returns their cosine similarity",
"def vec_sim(w1, w2, word_index, emb):\n if w1 not in word_index or w2 not in word_index:\n return None\n return np.dot(emb[word_index[w1]], emb[word_index[w2]])\n\nvec_sim('cat', 'dog', word_index, emb)",
"Implement a function that takes a word as a parameter and returns the 5 words that are closest to it in the embedding space",
"def nearest_n(word, words, word_index, emb, n=5):\n try:\n w_index = word_index[word]\n except KeyError:\n return None\n w_vec = emb[w_index]\n\n distances = np.dot(emb, w_vec)\n indices = np.argsort(distances)[-n:][::-1]\n return [words[i] for i in indices] \n\nprint(nearest_n('dog', words, word_index, emb))\nprint(nearest_n('king', words, word_index, emb))",
"11.E3 Vector similarity in WordNet\n<a id='11.E3'></a>\nUse the code written in 11.E2 to analyze word groups in WordNet:\n\nCreate an embedding of WordNet synsets by mapping each of them to the mean of their lemmas' vectors.",
"def embed_synset(synset, words, word_index, emb):\n word_set = [lemma.name() for lemma in synset.lemmas()]\n indices = filter(None, map(word_index.get, word_set))\n vecs = np.array([emb[i] for i in indices])\n if len(vecs) == 0:\n return None\n return np.mean(vecs, axis=0)\n\ndef embed_synsets(words, word_index, emb):\n return {synset: embed_synset(synset, words, word_index, emb) for synset in wn.all_synsets()}\n\nsynset_emb = embed_synsets(words, word_index, emb)",
"write a function that measures the similarity of two synsets based on the cosine similarity of their vectors",
"def synset_sim(ss1, ss2, synset_emb):\n vec1 = synset_emb[ss1]\n vec2 = synset_emb[ss2]\n if vec1 is None or vec2 is None:\n return None\n return np.dot(vec1, vec2)\n\nsynset_sim(dog, cat, synset_emb)",
"Write a function that takes a synset as input and retrieves the n most similar synsets, using the above embedding",
"def nearest_n_synsets(synset, synset_emb, n=5):\n distances = [(synset_sim(synset, other, synset_emb), other) for other in wn.all_synsets() if synset != other]\n distances = [(sim, synset) for sim, synset in distances if not sim is None]\n return sorted(distances, reverse=True)[:n]\n\n%%time\nnearest_n_synsets(wn.synsets('penguin')[0], synset_emb, 10)",
"Build the list of all words that are both in wordnet and the GloVe embedding. On a sample of 100 such words, measure Spearman correlation of synset similarity and vector similarity (use scipy.stats.spearmanr)",
"words_in_both = [word for word in wn.all_lemma_names() if word in word_index]\n\nlen(words_in_both)\n\nimport random\nsample = random.sample(words_in_both, 100)\n\nfrom scipy.stats import spearmanr\n\ndef compare_sims(sample, synset_emb, word_index, emb):\n vec_sims, ss_sims = [], []\n for w1 in sample:\n for w2 in sample:\n ss_sim = synset_sim(wn.synsets(w1)[0], wn.synsets(w2)[0], synset_emb)\n if ss_sim is None:\n continue\n v_sim = vec_sim(w1, w2, word_index, emb)\n vec_sims.append(v_sim)\n ss_sims.append(ss_sim)\n return spearmanr(vec_sims, ss_sims)\n\ncompare_sims(sample, synset_emb, word_index, emb)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
esa-as/2016-ml-contest
|
dagrha/KNN_submission_1_dagrha.ipynb
|
apache-2.0
|
[
"Facies classification using KNearestNeighbors\n<a rel=\"license\" href=\"https://creativecommons.org/licenses/by-sa/4.0/\">\n <img alt=\"Creative Commons License BY-SA\" align=\"left\" src=\"https://i.creativecommons.org/l/by-sa/4.0/88x31.png\">\n</a>\n<br>\nDan Hallau\nA lot of sophisticated models have been submitted for the contest so far (deep neural nets, random forests, etc.) so I thought I'd try submitting a simpler model to see how it stacks up. In that spirit here's a KNearestNeighbors classifier.\nI spend a few cells back-calculating some more standard logging curves (RHOB, NPHI, etc) then create a log-based lithology model from a Umaa-Rhomaa plot. After training, I finish it up with a LeaveOneGroupOut test.",
"import pandas as pd\nimport numpy as np\n\nfrom sklearn import neighbors\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import LeaveOneGroupOut\n\nimport inversion\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline",
"Load training data",
"df = pd.read_csv('../facies_vectors.csv')\ndf.dropna(inplace=True)",
"Build features\nIn the real world it would be unusual to have neutron-density cross-plot porosity (i.e. PHIND) without the corresponding raw input curves, namely bulk density and neutron porosity, as we have in this contest dataset. So as part of the feature engineering process, I back-calculate estimates of those raw curves from the provided DeltaPHI and PHIND curves. One issue with this approach though is that cross-plot porosity differs between vendors, toolstrings, and software packages, and it is not known exactly how the PHIND in this dataset was computed. So I make the assumption here that PHIND ≈ sum of squares porosity, which is usually an adequate approximation of neutron-density crossplot porosity. That equation looks like this: \n$$PHIND ≈ \\sqrt{\\frac{NPHI^2 + DPHI^2}{2}}$$\nand it is assumed here that DeltaPHI is:\n$$DeltaPHI = NPHI - DPHI$$\nThe functions below use the relationships from the above equations (...two equations, two unknowns...) to estimate NPHI and DPHI (and consequently RHOB).\nOnce we have RHOB, we can use it combined with PE to estimate apparent grain density (RHOMAA) and apparent photoelectric capture cross-section (UMAA), which are useful in lithology estimations from well logs.",
"def estimate_dphi(df):\n return ((4*(df['PHIND']**2) - (df['DeltaPHI']**2))**0.5 - df['DeltaPHI']) / 2\n\ndef estimate_rhob(df):\n return (2.71 - (df['DPHI_EST']/100) * 1.71)\n\ndef estimate_nphi(df):\n return df['DPHI_EST'] + df['DeltaPHI']\n\ndef compute_rhomaa(df):\n return (df['RHOB_EST'] - (df['PHIND'] / 100)) / (1 - df['PHIND'] / 100)\n \ndef compute_umaa(df):\n return ((df['PE'] * df['RHOB_EST']) - (df['PHIND']/100 * 0.398)) / (1 - df['PHIND'] / 100)",
"Because solving the sum of squares equation involved the quadratic formula, in some cases imaginary numbers result due to porosities being negative, which is what the warning below is about.",
"df['DPHI_EST'] = df.apply(lambda x: estimate_dphi(x), axis=1).astype(float)\ndf['RHOB_EST'] = df.apply(lambda x: estimate_rhob(x), axis=1)\ndf['NPHI_EST'] = df.apply(lambda x: estimate_nphi(x), axis=1)\ndf['RHOMAA_EST'] = df.apply(lambda x: compute_rhomaa(x), axis=1)\ndf['UMAA_EST'] = df.apply(lambda x: compute_umaa(x), axis=1)",
"Just for fun, below is a basic Umaa-Rhomaa plot to view relative abundances of quartz, calcite, dolomite, and clay. The red triangle represents a ternary solution for QTZ, CAL, and DOL, while the green triangle represents a solution for QTZ, CAL, and CLAY (illite).",
"df[df.GR < 125].plot(kind='scatter', x='UMAA_EST', y='RHOMAA_EST', c='GR', figsize=(8,6))\nplt.ylim(3.1, 2.2)\nplt.xlim(0.0, 17.0)\nplt.plot([4.8, 9.0, 13.8, 4.8], [2.65, 2.87, 2.71, 2.65], c='r')\nplt.plot([4.8, 11.9, 13.8, 4.8], [2.65, 3.06, 2.71, 2.65], c='g')\nplt.scatter([4.8], [2.65], s=50, c='r')\nplt.scatter([9.0], [2.87], s=50, c='r')\nplt.scatter([13.8], [2.71], s=50, c='r')\nplt.scatter([11.9], [3.06], s=50, c='g')\nplt.text(2.8, 2.65, 'Quartz', backgroundcolor='w')\nplt.text(14.4, 2.71, 'Calcite', backgroundcolor='w')\nplt.text(9.6, 2.87, 'Dolomite', backgroundcolor='w')\nplt.text(12.5, 3.06, 'Illite', backgroundcolor='w')\nplt.text(7.0, 2.55, \"gas effect\", ha=\"center\", va=\"center\", rotation=-55,\n size=8, bbox=dict(boxstyle=\"larrow,pad=0.3\", fc=\"pink\", ec=\"red\", lw=2))\nplt.text(15.0, 2.78, \"heavies?\", ha=\"center\", va=\"center\", rotation=0,\n size=8, bbox=dict(boxstyle=\"rarrow,pad=0.3\", fc=\"yellow\", ec=\"orange\", lw=2))",
"Here I use matrix inversion to \"solve\" the ternary plot for each lithologic component. Essentially each datapoint is a mix of the three components defined by the ternary diagram, with abundances of each defined by the relative distances from each endpoint. I use a GR cutoff of 40 API to determine when to use either the QTZ-CAL-DOL or QTZ-CAL-CLAY ternary solutions. In other words, it is assumed that below 40 API, there is 0% clay, and above 40 API there is 0% dolomite, and also that these four lithologic components are the only components in these rocks. Admittedly it's not a great assumption, especially since the ternary plot indicates other stuff is going on. For example the high Umaa datapoints near the Calcite endpoint may indicate some heavy minerals (e.g., pyrite) or even barite-weighted mud. The \"pull\" of datapoints to the northwest quadrant probably reflects some gas effect, so my lithologies in those gassy zones will be skewed.",
"# QTZ-CAL-CLAY\nur = inversion.UmaaRhomaa()\nur.set_dol_uma(11.9)\nur.set_dol_rhoma(3.06)\n# QTZ-CAL-DOL\nur2 = inversion.UmaaRhomaa()\n\ndf['UR_QTZ'] = np.nan\ndf['UR_CLY'] = np.nan\ndf['UR_CAL'] = np.nan\ndf['UR_DOL'] = np.nan\n\ndf.ix[df.GR >= 40, 'UR_QTZ'] = df.ix[df.GR >= 40].apply(lambda x: ur.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1)\ndf.ix[df.GR >= 40, 'UR_CLY'] = df.ix[df.GR >= 40].apply(lambda x: ur.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1) \ndf.ix[df.GR >= 40, 'UR_CAL'] = df.ix[df.GR >= 40].apply(lambda x: ur.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1)\ndf.ix[df.GR >= 40, 'UR_DOL'] = 0\n\ndf.ix[df.GR < 40, 'UR_QTZ'] = df.ix[df.GR < 40].apply(lambda x: ur2.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1)\ndf.ix[df.GR < 40, 'UR_DOL'] = df[df.GR < 40].apply(lambda x: ur2.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1) \ndf.ix[df.GR < 40, 'UR_CAL'] = df[df.GR < 40].apply(lambda x: ur2.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1)\ndf.ix[df.GR < 40, 'UR_CLY'] = 0",
"Fit KNearestNeighbors model and apply LeaveOneGroupOut test\nThere is some bad log data in this dataset which I'd guess is due to rugose hole. PHIND gets as high at 80%, which is certainly spurious. For the purposes of this exercise, I'll just remove records where neutron-density crossplot porosity is super high.\nAlso the CROSS H CATTLE well consistently returns anomalously low F1 scores, so I'm going to omit it from the training set.",
"df1 = df.dropna()\ndf1 = df1[(df1.PHIND <= 40) & (df1['Well Name'] != 'CROSS H CATTLE')]\n\nfacies = df1['Facies'].values\nwells = df1['Well Name'].values\n\ndrop_list = ['Formation', 'Well Name', 'Facies', 'Depth', 'DPHI_EST', 'NPHI_EST', 'DeltaPHI',\n 'RHOMAA_EST', 'UMAA_EST', 'UR_QTZ', 'UR_DOL'] \n\nfv = df1.drop(drop_list, axis=1).values\n\nclf = neighbors.KNeighborsClassifier(n_neighbors=56, weights='distance') \n\nX = preprocessing.StandardScaler().fit(fv).transform(fv)\ny = facies\n\nlogo = LeaveOneGroupOut()\nf1knn = []\n\nfor train, test in logo.split(X, y, groups=wells):\n well_name = wells[test[0]]\n clf.fit(X[train], y[train])\n predicted_labels = clf.predict(X[test])\n score = clf.fit(X[train], y[train]).score(X[test], y[test])\n print(\"{:>20s} {:.3f}\".format(well_name, score))\n f1knn.append(score)\n \nprint(\"-Average leave-one-well-out F1 Score: %6f\" % (np.mean(f1knn)))\nf1knn.pop(4)\nprint(\"-Average leave-one-well-out F1 Score, no Recruit F1: %6f\" % (np.mean(f1knn)))",
"Apply model to validation dataset\nLoad validation data (vd), build features, and use the classfier from above to predict facies.",
"vd = pd.read_csv('../validation_data_nofacies.csv', index_col=0)\nvd.dropna(inplace=True)\n\nvd['DPHI_EST'] = vd.apply(lambda x: estimate_dphi(x), axis=1).astype(float)\nvd['RHOB_EST'] = vd.apply(lambda x: estimate_rhob(x), axis=1)\nvd['NPHI_EST'] = vd.apply(lambda x: estimate_nphi(x), axis=1)\nvd['RHOMAA_EST'] = vd.apply(lambda x: compute_rhomaa(x), axis=1)\nvd['UMAA_EST'] = vd.apply(lambda x: compute_umaa(x), axis=1)\n\nvd['UR_QTZ'] = np.nan\nvd['UR_CLY'] = np.nan\nvd['UR_CAL'] = np.nan\nvd['UR_DOL'] = np.nan\n\nvd.ix[vd.GR >= 40, 'UR_QTZ'] = vd.ix[vd.GR >= 40].apply(lambda x: ur.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1)\nvd.ix[vd.GR >= 40, 'UR_CLY'] = vd.ix[vd.GR >= 40].apply(lambda x: ur.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1) \nvd.ix[vd.GR >= 40, 'UR_CAL'] = vd.ix[vd.GR >= 40].apply(lambda x: ur.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1)\nvd.ix[vd.GR >= 40, 'UR_DOL'] = 0\n\nvd.ix[vd.GR < 40, 'UR_QTZ'] = vd.ix[vd.GR < 40].apply(lambda x: ur2.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1)\nvd.ix[vd.GR < 40, 'UR_DOL'] = vd[vd.GR < 40].apply(lambda x: ur2.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1) \nvd.ix[vd.GR < 40, 'UR_CAL'] = vd[vd.GR < 40].apply(lambda x: ur2.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1)\nvd.ix[vd.GR < 40, 'UR_CLY'] = 0\n\nvd1 = vd.dropna()\nvd1 = vd1[(vd1.PHIND <= 40)]\n\ndrop_list1 = ['Well Name', 'Depth', 'DPHI_EST', 'NPHI_EST', 'DeltaPHI',\n 'RHOMAA_EST', 'UMAA_EST', 'UR_QTZ', 'UR_DOL'] \n\nfv1 = vd1.drop(drop_list1, axis=1).values\n\nX1 = preprocessing.StandardScaler().fit(fv1).transform(fv1)\n\nvd_predicted_facies = clf.predict(X1)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
rdlester/meap-analysis
|
meap.ipynb
|
mit
|
[
"meap analysis\nMEAP data obtained from\nhttps://www.mischooldata.org/DistrictSchoolProfiles/EntitySummary/SchoolDataFile.aspx\nStaffing info from\nhttps://www.mischooldata.org/Other/DataFiles/StaffingInformation/HistoricalStaffingSummary.aspx",
"%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as stats\nfrom sklearn.linear_model import LinearRegression, Ridge, LogisticRegressionCV\nfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn_pandas import DataFrameMapper\nfrom os.path import exists\nimport qgrid as qgrid\nqgrid.nbinstall()\n\n# consts for column names\nscore_code_col = 'DistrictCode'\nstaff_code_col = 'DCODE'\nsubject_col = 'Subject Name'\ngrade_col = 'Grade'\nsubgroup_col = 'Subgroup'\nnum_students_col = 'Number Tested'\nproficient_col = 'Percent Proficient'\navg_score_col = 'Average Scaled Score'\nstddev_col = 'Standard Deviation'\nnum_teachers_col = 'TEACHER'\nnum_librarians_col = 'LIB_SPEC'\nnum_library_support_col = 'LIB_SUPP'\n\n# basic database creation and loading funcs\n# hdf is very fast but requires extra installs\n# hdf5 from homebrew/science tap, tables from pip\n\n# clean junk here\ndef cleanData(combined):\n combined.drop(staff_code_col, 1, inplace=True)\n \n # look at scores on district level only\n combined = combined[combined['BuildingName'] == 'All Buildings']\n combined.drop('BuildingName', 1, inplace=True)\n \n # Old format listed small number of students as '< 10' and lists no results,\n # drop these\n if combined.dtypes[num_students_col] == 'O':\n combined = combined[combined[num_students_col] != '< 10']\n\n # New formats list small proficiencies as '< 5%'\n # Round to 5 to convert to numeric.\n if combined.dtypes[proficient_col] == 'O':\n percent_idx = combined[proficient_col] == '< 5%'\n combined.loc[percent_idx, proficient_col] = 5\n \n combined.loc[:, num_students_col] = pd.to_numeric(combined[num_students_col])\n combined.loc[:, proficient_col] = pd.to_numeric(combined[proficient_col])\n combined.loc[:, avg_score_col] = pd.to_numeric(combined[avg_score_col])\n combined.loc[:, stddev_col] = pd.to_numeric(combined[stddev_col])\n \n # replace NAN librarian cells with 0\n combined.loc[:, num_teachers_col].fillna(0, inplace=True)\n combined.loc[:, num_librarians_col].fillna(0, inplace=True)\n combined.loc[:, num_library_support_col].fillna(0, inplace=True)\n \n return combined\n\n# I/O here\nhdf_key = 'table'\ndef buildAndSaveCombinedSet(meap_csv, staff_csv, hdf_file):\n scores = pd.read_csv(meap_csv)\n scores.drop('BuildingCode', 1, inplace=True)\n\n staff = pd.read_csv(staff_csv)\n staff.drop('DNAME', 1, inplace=True)\n\n combined = pd.merge(scores, staff, left_on=score_code_col, right_on=staff_code_col)\n combined = cleanData(combined)\n combined.to_hdf(hdf_file, hdf_key, mode='w')",
"2012-2013.",
"# build and load hdf5 for 12-13.\ncombined_12_hdf = 'all_data-12-13.hdf'\nif not exists(combined_12_hdf):\n buildAndSaveCombinedSet('csv/meap-12-13.csv', 'csv/staff-12-13.csv', combined_12_hdf)\n\ncombined_12 = pd.read_hdf(combined_12_hdf, hdf_key)\n# print list of columns\ncombined_12.columns",
"2013-2014",
"# build and load hdf5 for 13-14.\ncombined_13_hdf = 'all_data-13-14.hdf'\nif not exists(combined_13_hdf):\n buildAndSaveCombinedSet('csv/meap-13-14.csv', 'csv/staff-13-14.csv', combined_13_hdf)\n\ncombined_13 = pd.read_hdf(combined_13_hdf, hdf_key)\n# print list of columns\ncombined_13.columns\n\n# print valid grades + subjects\n# look at both together, as not all subjects are tested at each grade level\nvalid_grade_subject = list(combined_12.groupby([grade_col, subject_col], sort=True).groups.keys())\nvalid_grade_subject.sort()\nvalid_grade_subject\n\ncombined_12.groupby(subgroup_col).groups.keys()",
"There are some weird schools. Detroit has way more teachers than every other school (2323.7), and one school has 0 teachers (but 3 career teachers).",
"all_subgroup = 'All Students'\npoor_subgroup = 'Economically Disadvantaged'\n\n# compare scores only within a single grade + subject combination\ndef examineFor(database, grade, subject, subgroup):\n grouped = database.groupby([grade_col, subject_col, subgroup_col])\n\n data = grouped.get_group((grade, subject, subgroup))\n data = data.drop(grade_col, 1).drop(subject_col, 1).drop(subgroup_col, 1)\n \n return data\n\n# test with 8th grade reading.\ndata = examineFor(combined_12, 8, 'Reading', 'All Students')\n\n# summary statistics\ntotal = data.shape[0]\ndesc_data = data[[num_students_col, proficient_col, avg_score_col, \n stddev_col, num_teachers_col, num_librarians_col, num_library_support_col]]\ndesc_data.describe()\n\n# Librarian specific stats\nidx_w_teachers = data[num_librarians_col] > 0\npercent_w_teachers = np.sum(idx_w_teachers) / total\nprint(\"Percent of schools with librarians: \" + str(percent_w_teachers))\nprint(\"\")\nscores_w = data[idx_w_teachers][avg_score_col]\nscores_wo = data[idx_w_teachers == 0][avg_score_col]\nprint(\"Average score w librarians: \" + str(np.average(scores_w)))\nprint(\"Average score wo librarians: \" + str(np.average(scores_wo)))\nprint(\"\")\npercent_prof_w = data[idx_w_teachers][proficient_col]\npercent_prof_wo = data[idx_w_teachers == 0][proficient_col]\navg_prof_w = np.average(percent_prof_w)\navg_prof_wo = np.average(percent_prof_wo)\nprint(\"Average proficiency w librarians: \" + str(avg_prof_w))\nprint(\"Std: \" + str(np.std(percent_prof_w)))\nprint(\"Average proficiency wo librarians: \" + str(avg_prof_wo))\nprint(\"Std: \" + str(np.std(percent_prof_wo)))\nprint(\"Diff: \" + str((avg_prof_w / avg_prof_wo - 1) * 100) + \"%\")\nprint(\"\")\nprint(\"Norm test on with: \" + str(stats.normaltest(percent_prof_w)))\nprint(\"Norm test on without: \" + str(stats.normaltest(percent_prof_wo)))\nprint(\"T-test: \" + str(stats.ttest_ind(percent_prof_w, percent_prof_wo)))\n\n# examining avg_score_col asks: do kids score better on average?\ndata.plot(x=avg_score_col, y=num_teachers_col, kind='scatter')\ndata.plot(x=avg_score_col, y=num_librarians_col, kind='scatter')\n\n# examining proficient_col asks: do more kids pass?\ndata.plot(x=proficient_col, y=num_teachers_col, kind='scatter')\ndata.plot(x=proficient_col, y=num_librarians_col, kind='scatter')\n\nstudents_per_teacher_col = 'Students per teacher'\ndata.loc[:,students_per_teacher_col] = data[num_students_col].values / data[num_teachers_col].values\ndata.plot(x=avg_score_col, y=students_per_teacher_col, kind='scatter')\n\n# since many districts have no librarians and dividing by zero is out\n# use librarians / student instead of students / librarian\nlibrarian_per_student_col = 'Librarians per student'\ndata.loc[:,librarian_per_student_col] = data[num_librarians_col].values / data[num_students_col].values\ndata.plot(x=avg_score_col, y=librarian_per_student_col, kind='scatter')\n\n# add in library support\nall_library_per_student_col = 'All Library Staff per student'\ndata.loc[:,all_library_per_student_col] = (data[num_librarians_col].values + data[num_library_support_col].values) / data[num_students_col].values\ndata.plot(x=avg_score_col, y=all_library_per_student_col, kind='scatter')\n\ndef runRegression(data, x_col, y_col, basis_degree=2):\n ''' From librarians / student predict the percent proficient.\n '''\n scaler = StandardScaler()\n model = make_pipeline(scaler,\n PolynomialFeatures(basis_degree),\n LinearRegression())\n x = data[librarian_per_student_col].values\n X = x[:, np.newaxis]\n y = data[y_col].values\n model.fit(X, y)\n x_truth = scaler.fit_transform(X)\n x_plot = np.linspace(x_truth.min(),x_truth.max(),1000)\n X_plot = x_plot[:, np.newaxis]\n y_plot = model.predict(X_plot)\n plt.plot(x_plot, y_plot, label=\"prediction\")\n plt.scatter(x_truth, y, label=\"truth\")\n plt.legend(loc='lower right')\n plt.show()\n # ideally we'd get a second data set to score on\n # different year, maybe\n print('R^2: ' + str(model.score(X, y)))\n\nrunRegression(data, librarian_per_student_col, proficient_col, 1)\nrunRegression(data, librarian_per_student_col, avg_score_col, 1)\nrunRegression(data, all_library_per_student_col, proficient_col, 1)\nrunRegression(data, all_library_per_student_col, avg_score_col, 1)",
"Nope.\nLogistic regression",
"def logit(x):\n return 1 / (1 + np.exp(-x))\n\ndef runLogistic(data, x_col, y_col, basis_degree=1, threshold=50):\n ''' From librarians / student predict the percent proficient.\n '''\n scaler = StandardScaler()\n regression = LogisticRegressionCV(Cs=5, penalty='l2', solver='liblinear')\n model = make_pipeline(scaler,\n# PolynomialFeatures(basis_degree),\n regression)\n x = data[librarian_per_student_col].values\n X = x[:, np.newaxis]\n y = (data[y_col].values > threshold).astype('int')\n model.fit(X, y)\n # plot\n x_truth = scaler.transform(X)\n x_plot = np.linspace(x_truth.min(),x_truth.max(),1000)\n y_plot = logit(x_plot * regression.coef_ + regression.intercept_).ravel()\n plt.plot(x_plot, y_plot, label=\"prediction\")\n plt.scatter(x_truth, y, label=\"truth\")\n plt.legend(loc='lower right')\n plt.show()\n # ideally we'd get a second data set to score on\n # different year, maybe\n print('Accuracy: ' + str(model.score(X, y)))\n\nrunLogistic(data, librarian_per_student_col, proficient_col, 1, 65)\n#runLogistic(data, librarian_per_student_col, avg_score_col, 1)\n#runLogistic(data, all_library_per_student_col, proficient_col, 1)\n#runLogistic(data, all_library_per_student_col, avg_score_col, 1)\n\n# For a given proficiency percentage: what percent of schools beat it?\nproscr = data[proficient_col].values\nabove_fifty = np.sum((proscr > 65).astype('int'))\n\nprint(above_fifty / proscr.shape[0])",
"4th grade reading.",
"# test with 4th grade reading.\ndata4 = examineFor(combined_12, 4, 'Reading', 'All Students')\n\n# summary statistics\ntotal = data4.shape[0]\ndesc_data = data4[[num_students_col, proficient_col, avg_score_col, \n stddev_col, num_teachers_col, num_librarians_col, num_library_support_col]]\ndesc_data.describe()\n\n# Librarian specific stats\nidx_w_teachers = data4[num_librarians_col] > 0\npercent_w_teachers = np.sum(idx_w_teachers) / total\nprint(\"Percent of schools with librarians: \" + str(percent_w_teachers))\n\nscores_w = data4[idx_w_teachers][avg_score_col]\nscores_wo = data4[idx_w_teachers == 0][avg_score_col]\nprint(\"Average score w librarians: \" + str(np.average(scores_w)))\nprint(\"Average score wo librarians: \" + str(np.average(scores_wo)))\n\npercent_prof_w = data4[idx_w_teachers][proficient_col]\npercent_prof_wo = data4[idx_w_teachers == 0][proficient_col]\navg_prof_w = np.average(percent_prof_w)\navg_prof_wo = np.average(percent_prof_wo)\nprint(\"Average proficiency w librarians: \" + str(avg_prof_w))\nprint(\"Average proficiency wo librarians: \" + str(avg_prof_wo))\nprint(\"Diff: \" + str((avg_prof_w / avg_prof_wo - 1) * 100) + \"%\")\n\nttest_ind(percent_prof_w, percent_prof_wo)\n\n# correlation b/w SES and librarians\ndata4Dis = examineFor(combined_12, 4, 'Reading', poor_subgroup)\ndesc_data4Dis = data4Dis[[num_students_col, proficient_col, avg_score_col, \n stddev_col, num_teachers_col, num_librarians_col, num_library_support_col]]\ndesc_data4Dis.describe()\n\nprint(data4.shape)\nprint(data4Dis.shape)\n\nnum_rich_schools = data4.shape[0] - data4Dis.shape[0]\n\nnot_rich_ids = data4Dis[score_code_col]\ndata4AllNotRich = data4[numpy.in1d(data4[score_code_col], not_rich_ids)]\n\nprint(data4AllNotRich.shape)\n\nprint(numpy.all(data4AllNotRich[score_code_col] == data4Dis[score_code_col]))\n\npercent_dis = np.divide(data4Dis[num_students_col], data4AllNotRich[num_students_col])\npercent_dis.describe()\ndata4DisPlot = np.array([percent_dis, data4[num_librarians_col]])\n\nplt.scatter(percent_dis, data4AllNotRich[num_librarians_col])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/nuist/cmip6/models/sandbox-2/ocean.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Ocean\nMIP Era: CMIP6\nInstitute: NUIST\nSource ID: SANDBOX-2\nTopic: Ocean\nSub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. \nProperties: 133 (101 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:34\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'nuist', 'sandbox-2', 'ocean')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties\n2. Key Properties --> Seawater Properties\n3. Key Properties --> Bathymetry\n4. Key Properties --> Nonoceanic Waters\n5. Key Properties --> Software Properties\n6. Key Properties --> Resolution\n7. Key Properties --> Tuning Applied\n8. Key Properties --> Conservation\n9. Grid\n10. Grid --> Discretisation --> Vertical\n11. Grid --> Discretisation --> Horizontal\n12. Timestepping Framework\n13. Timestepping Framework --> Tracers\n14. Timestepping Framework --> Baroclinic Dynamics\n15. Timestepping Framework --> Barotropic\n16. Timestepping Framework --> Vertical Physics\n17. Advection\n18. Advection --> Momentum\n19. Advection --> Lateral Tracers\n20. Advection --> Vertical Tracers\n21. Lateral Physics\n22. Lateral Physics --> Momentum --> Operator\n23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff\n24. Lateral Physics --> Tracers\n25. Lateral Physics --> Tracers --> Operator\n26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff\n27. Lateral Physics --> Tracers --> Eddy Induced Velocity\n28. Vertical Physics\n29. Vertical Physics --> Boundary Layer Mixing --> Details\n30. Vertical Physics --> Boundary Layer Mixing --> Tracers\n31. Vertical Physics --> Boundary Layer Mixing --> Momentum\n32. Vertical Physics --> Interior Mixing --> Details\n33. Vertical Physics --> Interior Mixing --> Tracers\n34. Vertical Physics --> Interior Mixing --> Momentum\n35. Uplow Boundaries --> Free Surface\n36. Uplow Boundaries --> Bottom Boundary Layer\n37. Boundary Forcing\n38. Boundary Forcing --> Momentum --> Bottom Friction\n39. Boundary Forcing --> Momentum --> Lateral Friction\n40. Boundary Forcing --> Tracers --> Sunlight Penetration\n41. Boundary Forcing --> Tracers --> Fresh Water Forcing \n1. Key Properties\nOcean key properties\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of ocean model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of ocean model code (NEMO 3.6, MOM 5.0,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.3. Model Family\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of ocean model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OGCM\" \n# \"slab ocean\" \n# \"mixed layer ocean\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.4. Basic Approximations\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nBasic approximations made in the ocean.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Primitive equations\" \n# \"Non-hydrostatic\" \n# \"Boussinesq\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.5. Prognostic Variables\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of prognostic variables in the ocean component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# \"Salinity\" \n# \"U-velocity\" \n# \"V-velocity\" \n# \"W-velocity\" \n# \"SSH\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2. Key Properties --> Seawater Properties\nPhysical properties of seawater in ocean\n2.1. Eos Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of EOS for sea water",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Wright, 1997\" \n# \"Mc Dougall et al.\" \n# \"Jackett et al. 2006\" \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2.2. Eos Functional Temp\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTemperature used in EOS for sea water",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# TODO - please enter value(s)\n",
"2.3. Eos Functional Salt\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSalinity used in EOS for sea water",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Practical salinity Sp\" \n# \"Absolute salinity Sa\" \n# TODO - please enter value(s)\n",
"2.4. Eos Functional Depth\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDepth or pressure used in EOS for sea water ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pressure (dbars)\" \n# \"Depth (meters)\" \n# TODO - please enter value(s)\n",
"2.5. Ocean Freezing Point\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2.6. Ocean Specific Heat\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nSpecific heat in ocean (cpocean) in J/(kg K)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"2.7. Ocean Reference Density\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nBoussinesq reference density (rhozero) in kg / m3",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"3. Key Properties --> Bathymetry\nProperties of bathymetry in ocean\n3.1. Reference Dates\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nReference date of bathymetry",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Present day\" \n# \"21000 years BP\" \n# \"6000 years BP\" \n# \"LGM\" \n# \"Pliocene\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"3.2. Type\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the bathymetry fixed in time in the ocean ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"3.3. Ocean Smoothing\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe any smoothing or hand editing of bathymetry in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.4. Source\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe source of bathymetry in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.source') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Key Properties --> Nonoceanic Waters\nNon oceanic waters treatement in ocean\n4.1. Isolated Seas\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how isolated seas is performed",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. River Mouth\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how river mouth mixing or estuaries specific treatment is performed",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5. Key Properties --> Software Properties\nSoftware properties of ocean code\n5.1. Repository\nIs Required: FALSE Type: STRING Cardinality: 0.1\nLocation of code for this component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Code Version\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCode version identifier.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.3. Code Languages\nIs Required: FALSE Type: STRING Cardinality: 0.N\nCode language(s).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6. Key Properties --> Resolution\nResolution in the ocean grid\n6.1. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.2. Canonical Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.3. Range Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.4. Number Of Horizontal Gridpoints\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"6.5. Number Of Vertical Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of vertical levels resolved on computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"6.6. Is Adaptive Grid\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDefault is False. Set true if grid resolution changes during execution.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.7. Thickness Level 1\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nThickness of first surface ocean level (in meters)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"7. Key Properties --> Tuning Applied\nTuning methodology for ocean component\n7.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. Global Mean Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.3. Regional Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.4. Trend Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList observed trend metrics used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Key Properties --> Conservation\nConservation in the ocean component\n8.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBrief description of conservation methodology",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nProperties conserved in the ocean by the numerical schemes",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Enstrophy\" \n# \"Salt\" \n# \"Volume of ocean\" \n# \"Momentum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.3. Consistency Properties\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAny additional consistency properties (energy conversion, pressure gradient discretisation, ...)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.4. Corrected Conserved Prognostic Variables\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSet of variables which are conserved by more than the numerical scheme alone.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.5. Was Flux Correction Used\nIs Required: FALSE Type: BOOLEAN Cardinality: 0.1\nDoes conservation involve flux correction ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"9. Grid\nOcean grid\n9.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of grid in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Grid --> Discretisation --> Vertical\nProperties of vertical discretisation in ocean\n10.1. Coordinates\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of vertical coordinates in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Z-coordinate\" \n# \"Z*-coordinate\" \n# \"S-coordinate\" \n# \"Isopycnic - sigma 0\" \n# \"Isopycnic - sigma 2\" \n# \"Isopycnic - sigma 4\" \n# \"Isopycnic - other\" \n# \"Hybrid / Z+S\" \n# \"Hybrid / Z+isopycnic\" \n# \"Hybrid / other\" \n# \"Pressure referenced (P)\" \n# \"P*\" \n# \"Z**\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"10.2. Partial Steps\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nUsing partial steps with Z or Z vertical coordinate in ocean ?*",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"11. Grid --> Discretisation --> Horizontal\nType of horizontal discretisation scheme in ocean\n11.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal grid type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Lat-lon\" \n# \"Rotated north pole\" \n# \"Two north poles (ORCA-style)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11.2. Staggering\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nHorizontal grid staggering type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa E-grid\" \n# \"N/a\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11.3. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite difference\" \n# \"Finite volumes\" \n# \"Finite elements\" \n# \"Unstructured grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12. Timestepping Framework\nOcean Timestepping Framework\n12.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of time stepping in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12.2. Diurnal Cycle\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDiurnal cycle type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Via coupling\" \n# \"Specific treatment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13. Timestepping Framework --> Tracers\nProperties of tracers time stepping in ocean\n13.1. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTracers time stepping scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Time Step\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nTracers time step (in seconds)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"14. Timestepping Framework --> Baroclinic Dynamics\nBaroclinic dynamics in ocean\n14.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nBaroclinic dynamics type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Preconditioned conjugate gradient\" \n# \"Sub cyling\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nBaroclinic dynamics scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.3. Time Step\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nBaroclinic time step (in seconds)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"15. Timestepping Framework --> Barotropic\nBarotropic time stepping in ocean\n15.1. Splitting\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime splitting method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"split explicit\" \n# \"implicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.2. Time Step\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nBarotropic time step (in seconds)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"16. Timestepping Framework --> Vertical Physics\nVertical physics time stepping in ocean\n16.1. Method\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDetails of vertical time stepping in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"17. Advection\nOcean advection\n17.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of advection in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18. Advection --> Momentum\nProperties of lateral momemtum advection scheme in ocean\n18.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of lateral momemtum advection scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flux form\" \n# \"Vector form\" \n# TODO - please enter value(s)\n",
"18.2. Scheme Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of ocean momemtum advection scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18.3. ALE\nIs Required: FALSE Type: BOOLEAN Cardinality: 0.1\nUsing ALE for vertical advection ? (if vertical coordinates are sigma)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.ALE') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"19. Advection --> Lateral Tracers\nProperties of lateral tracer advection scheme in ocean\n19.1. Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nOrder of lateral tracer advection scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"19.2. Flux Limiter\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nMonotonic flux limiter for lateral tracer advection scheme in ocean ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"19.3. Effective Order\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nEffective order of limited lateral tracer advection scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"19.4. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"19.5. Passive Tracers\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nPassive tracers advected",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ideal age\" \n# \"CFC 11\" \n# \"CFC 12\" \n# \"SF6\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"19.6. Passive Tracers Advection\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIs advection of passive tracers different than active ? if so, describe.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"20. Advection --> Vertical Tracers\nProperties of vertical tracer advection scheme in ocean\n20.1. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"20.2. Flux Limiter\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nMonotonic flux limiter for vertical tracer advection scheme in ocean ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"21. Lateral Physics\nOcean lateral physics\n21.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of lateral physics in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"21.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of transient eddy representation in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Eddy active\" \n# \"Eddy admitting\" \n# TODO - please enter value(s)\n",
"22. Lateral Physics --> Momentum --> Operator\nProperties of lateral physics operator for momentum in ocean\n22.1. Direction\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDirection of lateral physics momemtum scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.2. Order\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrder of lateral physics momemtum scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.3. Discretisation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDiscretisation of lateral physics momemtum scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff\nProperties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean\n23.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nLateral physics momemtum eddy viscosity coeff type in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. Constant Coefficient\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"23.3. Variable Coefficient\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"23.4. Coeff Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"23.5. Coeff Backscatter\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"24. Lateral Physics --> Tracers\nProperties of lateral physics for tracers in ocean\n24.1. Mesoscale Closure\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there a mesoscale closure in the lateral physics tracers scheme ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"24.2. Submesoscale Mixing\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"25. Lateral Physics --> Tracers --> Operator\nProperties of lateral physics operator for tracers in ocean\n25.1. Direction\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDirection of lateral physics tracers scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.2. Order\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrder of lateral physics tracers scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.3. Discretisation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDiscretisation of lateral physics tracers scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff\nProperties of eddy diffusity coeff in lateral physics tracers scheme in the ocean\n26.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nLateral physics tracers eddy diffusity coeff type in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.2. Constant Coefficient\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"26.3. Variable Coefficient\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"26.4. Coeff Background\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nDescribe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"26.5. Coeff Backscatter\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"27. Lateral Physics --> Tracers --> Eddy Induced Velocity\nProperties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean\n27.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of EIV in lateral physics tracers in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"GM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.2. Constant Val\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf EIV scheme for tracers is constant, specify coefficient value (M2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"27.3. Flux Type\nIs Required: TRUE Type: STRING Cardinality: 1.1\nType of EIV flux (advective or skew)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"27.4. Added Diffusivity\nIs Required: TRUE Type: STRING Cardinality: 1.1\nType of EIV added diffusivity (constant, flow dependent or none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"28. Vertical Physics\nOcean Vertical Physics\n28.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of vertical physics in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"29. Vertical Physics --> Boundary Layer Mixing --> Details\nProperties of vertical physics in ocean\n29.1. Langmuir Cells Mixing\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there Langmuir cells mixing in upper ocean ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"30. Vertical Physics --> Boundary Layer Mixing --> Tracers\n*Properties of boundary layer (BL) mixing on tracers in the ocean *\n30.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of boundary layer mixing for tracers in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"30.2. Closure Order\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"30.3. Constant\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant BL mixing of tracers, specific coefficient (m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"30.4. Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBackground BL mixing of tracers coefficient, (schema and value in m2/s - may by none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"31. Vertical Physics --> Boundary Layer Mixing --> Momentum\n*Properties of boundary layer (BL) mixing on momentum in the ocean *\n31.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of boundary layer mixing for momentum in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.2. Closure Order\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"31.3. Constant\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant BL mixing of momentum, specific coefficient (m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"31.4. Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBackground BL mixing of momentum coefficient, (schema and value in m2/s - may by none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"32. Vertical Physics --> Interior Mixing --> Details\n*Properties of interior mixing in the ocean *\n32.1. Convection Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of vertical convection in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Non-penetrative convective adjustment\" \n# \"Enhanced vertical diffusion\" \n# \"Included in turbulence closure\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32.2. Tide Induced Mixing\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how tide induced mixing is modelled (barotropic, baroclinic, none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"32.3. Double Diffusion\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there double diffusion",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"32.4. Shear Mixing\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there interior shear mixing",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"33. Vertical Physics --> Interior Mixing --> Tracers\n*Properties of interior mixing on tracers in the ocean *\n33.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of interior mixing for tracers in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"33.2. Constant\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant interior mixing of tracers, specific coefficient (m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"33.3. Profile\nIs Required: TRUE Type: STRING Cardinality: 1.1\nIs the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"33.4. Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBackground interior mixing of tracers coefficient, (schema and value in m2/s - may by none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34. Vertical Physics --> Interior Mixing --> Momentum\n*Properties of interior mixing on momentum in the ocean *\n34.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of interior mixing for momentum in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"34.2. Constant\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant interior mixing of momentum, specific coefficient (m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"34.3. Profile\nIs Required: TRUE Type: STRING Cardinality: 1.1\nIs the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34.4. Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBackground interior mixing of momentum coefficient, (schema and value in m2/s - may by none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"35. Uplow Boundaries --> Free Surface\nProperties of free surface in ocean\n35.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of free surface in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"35.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nFree surface scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear implicit\" \n# \"Linear filtered\" \n# \"Linear semi-explicit\" \n# \"Non-linear implicit\" \n# \"Non-linear filtered\" \n# \"Non-linear semi-explicit\" \n# \"Fully explicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"35.3. Embeded Seaice\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the sea-ice embeded in the ocean model (instead of levitating) ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36. Uplow Boundaries --> Bottom Boundary Layer\nProperties of bottom boundary layer in ocean\n36.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of bottom boundary layer in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"36.2. Type Of Bbl\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of bottom boundary layer in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diffusive\" \n# \"Acvective\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"36.3. Lateral Mixing Coef\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"36.4. Sill Overflow\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe any specific treatment of sill overflows",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37. Boundary Forcing\nOcean boundary forcing\n37.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of boundary forcing in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.2. Surface Pressure\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.3. Momentum Flux Correction\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.4. Tracers Flux Correction\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.5. Wave Effects\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how wave effects are modelled at ocean surface.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.6. River Runoff Budget\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how river runoff from land surface is routed to ocean and any global adjustment done.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.7. Geothermal Heating\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how geothermal heating is present at ocean bottom.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"38. Boundary Forcing --> Momentum --> Bottom Friction\nProperties of momentum bottom friction in ocean\n38.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of momentum bottom friction in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Non-linear\" \n# \"Non-linear (drag function of speed of tides)\" \n# \"Constant drag coefficient\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"39. Boundary Forcing --> Momentum --> Lateral Friction\nProperties of momentum lateral friction in ocean\n39.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of momentum lateral friction in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Free-slip\" \n# \"No-slip\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"40. Boundary Forcing --> Tracers --> Sunlight Penetration\nProperties of sunlight penetration scheme in ocean\n40.1. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of sunlight penetration scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"1 extinction depth\" \n# \"2 extinction depth\" \n# \"3 extinction depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"40.2. Ocean Colour\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the ocean sunlight penetration scheme ocean colour dependent ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"40.3. Extinction Depth\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe and list extinctions depths for sunlight penetration scheme (if applicable).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"41. Boundary Forcing --> Tracers --> Fresh Water Forcing\nProperties of surface fresh water forcing in ocean\n41.1. From Atmopshere\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of surface fresh water forcing from atmos in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"41.2. From Sea Ice\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of surface fresh water forcing from sea-ice in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Real salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"41.3. Forced Mode Restoring\nIs Required: TRUE Type: STRING Cardinality: 1.1\nType of surface salinity restoring in forced mode (OMIP)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
phobson/statsmodels
|
examples/notebooks/tsa_arma_1.ipynb
|
bsd-3-clause
|
[
"Autoregressive Moving Average (ARMA): Artificial data",
"%matplotlib inline\n\nfrom __future__ import print_function\nimport numpy as np\nimport statsmodels.api as sm\nimport pandas as pd\nfrom statsmodels.tsa.arima_process import arma_generate_sample\nnp.random.seed(12345)",
"Generate some data from an ARMA process:",
"arparams = np.array([.75, -.25])\nmaparams = np.array([.65, .35])",
"The conventions of the arma_generate function require that we specify a 1 for the zero-lag of the AR and MA parameters and that the AR parameters be negated.",
"arparams = np.r_[1, -arparams]\nmaparams = np.r_[1, maparams]\nnobs = 250\ny = arma_generate_sample(arparams, maparams, nobs)",
"Now, optionally, we can add some dates information. For this example, we'll use a pandas time series.",
"dates = sm.tsa.datetools.dates_from_range('1980m1', length=nobs)\ny = pd.TimeSeries(y, index=dates)\narma_mod = sm.tsa.ARMA(y, order=(2,2))\narma_res = arma_mod.fit(trend='nc', disp=-1)\n\nprint(arma_res.summary())\n\ny.tail()\n\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(figsize=(10,8))\nfig = arma_res.plot_predict(start='1999m6', end='2001m5', ax=ax)\nlegend = ax.legend(loc='upper left')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
steinam/teacher
|
jup_notebooks/data-science-ipython-notebooks-master/deep-learning/tensor-flow-exercises/4_convolutions.ipynb
|
mit
|
[
"Deep Learning with TensorFlow\nCredits: Forked from TensorFlow by Google\nSetup\nRefer to the setup instructions.\nExercise 4\nPreviously in 2_fullyconnected.ipynb and 3_regularization.ipynb, we trained fully connected networks to classify notMNIST characters.\nThe goal of this exercise is make the neural network convolutional.",
"# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nimport cPickle as pickle\nimport numpy as np\nimport tensorflow as tf\n\npickle_file = 'notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print 'Training set', train_dataset.shape, train_labels.shape\n print 'Validation set', valid_dataset.shape, valid_labels.shape\n print 'Test set', test_dataset.shape, test_labels.shape",
"Reformat into a TensorFlow-friendly shape:\n- convolutions need the image data formatted as a cube (width by height by #channels)\n- labels as float 1-hot encodings.",
"image_size = 28\nnum_labels = 10\nnum_channels = 1 # grayscale\n\nimport numpy as np\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape(\n (-1, image_size, image_size, num_channels)).astype(np.float32)\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint 'Training set', train_dataset.shape, train_labels.shape\nprint 'Validation set', valid_dataset.shape, valid_labels.shape\nprint 'Test set', test_dataset.shape, test_labels.shape\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])",
"Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.",
"batch_size = 16\npatch_size = 5\ndepth = 16\nnum_hidden = 64\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(\n tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n layer1_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, num_channels, depth], stddev=0.1))\n layer1_biases = tf.Variable(tf.zeros([depth]))\n layer2_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth, depth], stddev=0.1))\n layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))\n layer3_weights = tf.Variable(tf.truncated_normal(\n [image_size / 4 * image_size / 4 * depth, num_hidden], stddev=0.1))\n layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n layer4_weights = tf.Variable(tf.truncated_normal(\n [num_hidden, num_labels], stddev=0.1))\n layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))\n \n # Model.\n def model(data):\n conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer1_biases)\n conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n shape = hidden.get_shape().as_list()\n reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)\n return tf.matmul(hidden, layer4_weights) + layer4_biases\n \n # Training computation.\n logits = model(tf_train_dataset)\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n \n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))\n\nnum_steps = 1001\n\nwith tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print \"Initialized\"\n for step in xrange(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 50 == 0):\n print \"Minibatch loss at step\", step, \":\", l\n print \"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels)\n print \"Validation accuracy: %.1f%%\" % accuracy(\n valid_prediction.eval(), valid_labels)\n print \"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels)",
"Problem 1\nThe convolutional model above uses convolutions with stride 2 to reduce the dimensionality. Replace the strides a max pooling operation (nn.max_pool()) of stride 2 and kernel size 2.\n\n\nProblem 2\nTry to get the best performance you can using a convolutional net. Look for example at the classic LeNet5 architecture, adding Dropout, and/or adding learning rate decay."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
amitkaps/hackermath
|
Module_3b_principal_component_analysis.ipynb
|
mit
|
[
"Principle Component Analysis (PCA)\nKey Equation: $Ax = \\lambda b ~~ \\text{for} ~~ n \\times n $\nPCA is an orthogonal linear transformation that transforms the data to a new coordinate system such that the greatest variance by some projection of the data comes to lie on the first coordinate (called the first principal component), the second greatest variance on the second coordinate, and so on. This is an Unsupervised Learning Techniques - Which means we don't have a target variable.",
"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nplt.style.use('fivethirtyeight')\nplt.rcParams['figure.figsize'] = (10, 6)",
"From 2 Dimension to 1 Dimension\nLet us generate a two variable data set - $a,b$\n$$ b = 50 + 3a + \\epsilon$$",
"np.random.seed(123)\n\na = np.arange(12, 56, 0.5)\ne = np.random.normal(0, 100, a.size)\nb = 500 + 20*a + e\n\nX = np.c_[a,b]\n\ndef plot2var (m, xlabel, ylabel):\n x = m[:,0]\n y = m[:,1]\n fig, ax = plt.subplots(figsize=(6, 6))\n plt.scatter(x, y, s = 40, alpha = 0.8)\n sns.rugplot(x, color=\"m\", ax=ax)\n sns.rugplot(y, color=\"m\", vertical=True, ax=ax)\n \n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\nplot2var(X, 'a', 'b')",
"Standardizing the Variables\nCentering the Variables (Remove mean and divide by std dev)",
"X_mean = np.mean(X, axis=0)\n\nX_mean\n\nX_sd = np.std(X, axis=0)\n\nX_sd\n\nX_std = np.subtract(X, X_mean) / X_sd\n\ndef plot2var_std (m, xlabel, ylabel):\n x = m[:,0]\n y = m[:,1]\n fig, ax = plt.subplots(figsize=(6, 6))\n plt.scatter(x, y, s = 40, alpha = 0.8)\n sns.rugplot(x, color=\"m\", ax=ax)\n sns.rugplot(y, color=\"m\", vertical=True, ax=ax)\n \n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n \n plt.xlim([-3,3])\n plt.ylim([-3,3])\n\nplot2var_std(X_std, \"a\", \"b\")",
"Calculate the Covariance Matrix",
"cov_mat_2var = np.cov(X_std.T)\n\ncov_mat_2var",
"So now this is the symetric $A$ matrix we are trying to solve\n$$ Ax = \\lambda x $$\nwhere \n$$ A = \\begin{bmatrix} 1.01 & -0.92 \\ -0.92 & 1.01 \\end{bmatrix} $$\nGet Eigen-vectors and Eigen-values\nLets get the eigen-vectors for this matrix",
"eigen_val_2var, eigen_vec_2var = np.linalg.eig(cov_mat_2var)\n\neigen_val_2var\n\neigen_vec_2var\n\neigen_vec_2var[1].dot(eigen_vec_2var[0])",
"So our eigen vectors and eigen values are:\n$$ \\lambda_1 = 1.93, \\lambda_2 = 0.09 $$\n$$ \\vec{v_1} = \\begin{bmatrix} 0.707 \\ -0.707\\end{bmatrix} $$\n$$ \\vec{v_2} = \\begin{bmatrix} 0.707 \\ 0.707\\end{bmatrix} $$\nThese are orthogonal to each other. Let us plots to see these eigen vectors",
"def plot2var_eigen (m, xlabel, ylabel):\n x = m[:,0]\n y = m[:,1]\n fig, ax = plt.subplots(figsize=(6, 6))\n plt.scatter(x, y, s = 40, alpha = 0.8)\n sns.rugplot(x, color=\"m\", ax=ax)\n sns.rugplot(y, color=\"m\", vertical=True, ax=ax)\n \n cov_mat = np.cov(m.T)\n eigen_val, eigen_vec = np.linalg.eig(cov_mat)\n \n plt.quiver(eigen_vec[0, 0], eigen_vec[0, 1], angles='xy', scale_units='xy', scale=1, color='brown')\n plt.quiver(eigen_vec[1, 0], eigen_vec[1, 1], angles='xy', scale_units='xy', scale=1, color='brown')\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n \n plt.xlim(-3,3)\n plt.ylim(-3,3)\n\nplot2var_eigen(X_std, 'a' ,'b')",
"Projection Matrix\nLet us project our orginal values to see the new results",
"eigen_vec_2var\n\nX_std.T.shape\n\neigen_vec_2var.shape\n\nX_proj = eigen_vec_2var.dot(X_std.T)\n\nplot2var_eigen(X_proj.T, 'pca1' ,'pca2')",
"Using PCA from SKlearn",
"from sklearn.decomposition import PCA\n\npca = PCA(n_components=2)\n\npca.fit(X_std)\n\nX_pca_proj = pca.transform(X_std)\n\nplot2var_std(X_pca_proj, 'pca1', 'pca2')\n\npca.explained_variance_",
"From 4 Dimensions to 2 Dimensions\nRun PCA with 2 dimensions on the cars dataset",
"pop = pd.read_csv('data/cars_small.csv')\n\npop.head()",
"Preprocessing - brand, price, kmpl, bhp",
"pop = pop.drop(['model'], axis = 1)\n\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\ndf = pop.apply(le.fit_transform)\n\ndf.head()\n\ng = sns.PairGrid(df, hue = 'type')\ng.map_diag(plt.hist)\ng.map_offdiag(plt.scatter, alpha = 0.8)",
"Standardizing",
"X = df.iloc[:,:4]\n\nfrom sklearn.preprocessing import StandardScaler\nX_std = StandardScaler().fit_transform(X)",
"Eigendecomposition - Computing Eigenvectors and Eigenvalues",
"mean_vec = np.mean(X_std, axis=0)\ncov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0]-1)\nprint('Covariance matrix \\n%s' %cov_mat)\n\n# Doing this directly using np.cov\nprint('NumPy covariance matrix: \\n%s' %np.cov(X_std.T))\n\ncov_mat = np.cov(X_std.T)\neig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\nprint('Eigenvectors \\n%s' %eig_vecs)\nprint('\\nEigenvalues \\n%s' %eig_vals)",
"How do you select which 2 axis to choose?\nSorting the Eigenvalues and Eigenvectors\nIn order to decide which eigenvector(s) can dropped without losing too much information for the construction of lower-dimensional subspace, we need to inspect the corresponding eigenvalues: The eigenvectors with the lowest eigenvalues bear the least information about the distribution of the data; those are the ones can be dropped.",
"# Make a list of (eigenvalue, eigenvector) tuples\neig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]\n\n# Sort the (eigenvalue, eigenvector) tuples from high to low\neig_pairs.sort(key=lambda x: x[0], reverse=True)\n\n# Visually confirm that the list is correctly sorted by decreasing eigenvalues\nprint('Eigenvalues in descending order:')\nfor i in eig_pairs:\n print(i[0])",
"Explained Variance\nThe explained variance tells us how much information (variance) can be attributed to each of the principal components.",
"tot = sum(eig_vals)\nvar_exp = [(i / tot)*100 for i in sorted(eig_vals, reverse=True)]\ncum_var_exp = np.cumsum(var_exp)\n\nplt.bar(range(4), var_exp, alpha=0.5, align='center',\n label='individual explained variance')\nplt.step(range(4), cum_var_exp, where='mid',\n label='cumulative explained variance')\nplt.ylabel('Explained variance ratio')\nplt.xlabel('Principal components')\nplt.legend(loc='best')\nplt.tight_layout()",
"Projection Matrix\nThe “projection matrix” is just a matrix of our concatenated top k eigenvectors. Here, we are reducing the 4-dimensional feature space to a 2-dimensional feature subspace, by choosing the “top 2” eigenvectors with the highest eigenvalues to construct our $n×k$-dimensional eigenvector matrix $W$.",
"matrix_w = np.hstack((eig_pairs[0][1].reshape(4,1),\n eig_pairs[1][1].reshape(4,1)))\n\nprint('Matrix W:\\n', matrix_w)\n\nX_proj = X_std.dot(matrix_w)\n\nfig, ax = plt.subplots(figsize=(6, 6))\nplt.scatter(X_proj[:,0], X_proj[:,1], c = df.type, s = 100, cmap = plt.cm.viridis)\nplt.xlabel('Principal Component 1')\nplt.ylabel('Principal Component 2')",
"PCA using sklearn",
"from sklearn.decomposition import PCA\n\npca = PCA(n_components=2)\n\npca.fit(X_std)\n\nX_proj_sklearn = pca.transform(X_std)\n\nfig, ax = plt.subplots(figsize=(6, 6))\nplt.scatter(X_proj_sklearn[:,0], X_proj_sklearn[:,1], c = df.type, \n s = 100, cmap = plt.cm.viridis)\nplt.xlabel('Principal Component 1')\nplt.ylabel('Principal Component 2')\n\npca.explained_variance_",
"From 784 Dimensions to 2 Dimensions",
"digits = pd.read_csv('data/digits.csv')\n\ndigits.head()\n\ndigits.shape\n\ndigitsX = digits.iloc[:,1:785]\n\ndigitsX.head()\n\npca = PCA(n_components=2)\n\npca.fit(digitsX)\n\ndigits_trans = pca.transform(digitsX)\n\ndigits_trans\n\nplt.scatter(digits_trans[:,0], digits_trans[:,1], c = digits.num, \n s = 20, alpha = 0.8, cmap = plt.cm.viridis)\nplt.xlabel('Principal Component 1')\nplt.ylabel('Principal Component 2')",
"Exercise - Run PCA on the Bank Data"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mne-tools/mne-tools.github.io
|
stable/_downloads/fcc5782db3e2930fc79f31bc745495ed/60_ctf_bst_auditory.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Working with CTF data: the Brainstorm auditory dataset\nHere we compute the evoked from raw for the auditory Brainstorm\ntutorial dataset. For comparison, see :footcite:TadelEtAl2011 and the\nassociated brainstorm site.\nExperiment:\n- One subject, 2 acquisition runs 6 minutes each.\n- Each run contains 200 regular beeps and 40 easy deviant beeps.\n- Random ISI: between 0.7s and 1.7s seconds, uniformly distributed.\n- Button pressed when detecting a deviant with the right index finger.\n\nThe specifications of this dataset were discussed initially on the\nFieldTrip bug tracker_.",
"# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>\n# Eric Larson <larson.eric.d@gmail.com>\n# Jaakko Leppakangas <jaeilepp@student.jyu.fi>\n#\n# License: BSD-3-Clause\n\nimport os.path as op\nimport pandas as pd\nimport numpy as np\n\nimport mne\nfrom mne import combine_evoked\nfrom mne.minimum_norm import apply_inverse\nfrom mne.datasets.brainstorm import bst_auditory\nfrom mne.io import read_raw_ctf\n\nprint(__doc__)",
"To reduce memory consumption and running time, some of the steps are\nprecomputed. To run everything from scratch change use_precomputed to\nFalse. With use_precomputed = False running time of this script can\nbe several minutes even on a fast computer.",
"use_precomputed = True",
"The data was collected with a CTF 275 system at 2400 Hz and low-pass\nfiltered at 600 Hz. Here the data and empty room data files are read to\nconstruct instances of :class:mne.io.Raw.",
"data_path = bst_auditory.data_path()\n\nsubject = 'bst_auditory'\nsubjects_dir = op.join(data_path, 'subjects')\n\nraw_fname1 = op.join(data_path, 'MEG', subject, 'S01_AEF_20131218_01.ds')\nraw_fname2 = op.join(data_path, 'MEG', subject, 'S01_AEF_20131218_02.ds')\nerm_fname = op.join(data_path, 'MEG', subject, 'S01_Noise_20131218_01.ds')",
"In the memory saving mode we use preload=False and use the memory\nefficient IO which loads the data on demand. However, filtering and some\nother functions require the data to be preloaded into memory.",
"raw = read_raw_ctf(raw_fname1)\nn_times_run1 = raw.n_times\n\n# Here we ignore that these have different device<->head transforms\nmne.io.concatenate_raws(\n [raw, read_raw_ctf(raw_fname2)], on_mismatch='ignore')\nraw_erm = read_raw_ctf(erm_fname)",
"The data array consists of 274 MEG axial gradiometers, 26 MEG reference\nsensors and 2 EEG electrodes (Cz and Pz). In addition:\n\n1 stim channel for marking presentation times for the stimuli\n1 audio channel for the sent signal\n1 response channel for recording the button presses\n1 ECG bipolar\n2 EOG bipolar (vertical and horizontal)\n12 head tracking channels\n20 unused channels\n\nNotice also that the digitized electrode positions (stored in a .pos file)\nwere automatically loaded and added to the ~mne.io.Raw object.\nThe head tracking channels and the unused channels are marked as misc\nchannels. Here we define the EOG and ECG channels.",
"raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'})\nif not use_precomputed:\n # Leave out the two EEG channels for easier computation of forward.\n raw.pick(['meg', 'stim', 'misc', 'eog', 'ecg']).load_data()",
"For noise reduction, a set of bad segments have been identified and stored\nin csv files. The bad segments are later used to reject epochs that overlap\nwith them.\nThe file for the second run also contains some saccades. The saccades are\nremoved by using SSP. We use pandas to read the data from the csv files. You\ncan also view the files with your favorite text editor.",
"annotations_df = pd.DataFrame()\noffset = n_times_run1\nfor idx in [1, 2]:\n csv_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'events_bad_0%s.csv' % idx)\n df = pd.read_csv(csv_fname, header=None,\n names=['onset', 'duration', 'id', 'label'])\n print('Events from run {0}:'.format(idx))\n print(df)\n\n df['onset'] += offset * (idx - 1)\n annotations_df = pd.concat([annotations_df, df], axis=0)\n\nsaccades_events = df[df['label'] == 'saccade'].values[:, :3].astype(int)\n\n# Conversion from samples to times:\nonsets = annotations_df['onset'].values / raw.info['sfreq']\ndurations = annotations_df['duration'].values / raw.info['sfreq']\ndescriptions = annotations_df['label'].values\n\nannotations = mne.Annotations(onsets, durations, descriptions)\nraw.set_annotations(annotations)\ndel onsets, durations, descriptions",
"Here we compute the saccade and EOG projectors for magnetometers and add\nthem to the raw data. The projectors are added to both runs.",
"saccade_epochs = mne.Epochs(raw, saccades_events, 1, 0., 0.5, preload=True,\n baseline=(None, None),\n reject_by_annotation=False)\n\nprojs_saccade = mne.compute_proj_epochs(saccade_epochs, n_mag=1, n_eeg=0,\n desc_prefix='saccade')\nif use_precomputed:\n proj_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'bst_auditory-eog-proj.fif')\n projs_eog = mne.read_proj(proj_fname)[0]\nelse:\n projs_eog, _ = mne.preprocessing.compute_proj_eog(raw.load_data(),\n n_mag=1, n_eeg=0)\nraw.add_proj(projs_saccade)\nraw.add_proj(projs_eog)\ndel saccade_epochs, saccades_events, projs_eog, projs_saccade # To save memory",
"Visually inspect the effects of projections. Click on 'proj' button at the\nbottom right corner to toggle the projectors on/off. EOG events can be\nplotted by adding the event list as a keyword argument. As the bad segments\nand saccades were added as annotations to the raw data, they are plotted as\nwell.",
"raw.plot(block=True)",
"Typical preprocessing step is the removal of power line artifact (50 Hz or\n60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the\noriginal 60 Hz artifact and the harmonics. The power spectra are plotted\nbefore and after the filtering to show the effect. The drop after 600 Hz\nappears because the data was filtered during the acquisition. In memory\nsaving mode we do the filtering at evoked stage, which is not something you\nusually would do.",
"if not use_precomputed:\n raw.plot_psd(tmax=np.inf, picks='meg')\n notches = np.arange(60, 181, 60)\n raw.notch_filter(notches, phase='zero-double', fir_design='firwin2')\n raw.plot_psd(tmax=np.inf, picks='meg')",
"We also lowpass filter the data at 100 Hz to remove the hf components.",
"if not use_precomputed:\n raw.filter(None, 100., h_trans_bandwidth=0.5, filter_length='10s',\n phase='zero-double', fir_design='firwin2')",
"Epoching and averaging.\nFirst some parameters are defined and events extracted from the stimulus\nchannel (UPPT001). The rejection thresholds are defined as peak-to-peak\nvalues and are in T / m for gradiometers, T for magnetometers and\nV for EOG and EEG channels.",
"tmin, tmax = -0.1, 0.5\nevent_id = dict(standard=1, deviant=2)\nreject = dict(mag=4e-12, eog=250e-6)\n# find events\nevents = mne.find_events(raw, stim_channel='UPPT001')",
"The event timing is adjusted by comparing the trigger times on detected\nsound onsets on channel UADC001-4408.",
"sound_data = raw[raw.ch_names.index('UADC001-4408')][0][0]\nonsets = np.where(np.abs(sound_data) > 2. * np.std(sound_data))[0]\nmin_diff = int(0.5 * raw.info['sfreq'])\ndiffs = np.concatenate([[min_diff + 1], np.diff(onsets)])\nonsets = onsets[diffs > min_diff]\nassert len(onsets) == len(events)\ndiffs = 1000. * (events[:, 0] - onsets) / raw.info['sfreq']\nprint('Trigger delay removed (μ ± σ): %0.1f ± %0.1f ms'\n % (np.mean(diffs), np.std(diffs)))\nevents[:, 0] = onsets\ndel sound_data, diffs",
"We mark a set of bad channels that seem noisier than others. This can also\nbe done interactively with raw.plot by clicking the channel name\n(or the line). The marked channels are added as bad when the browser window\nis closed.",
"raw.info['bads'] = ['MLO52-4408', 'MRT51-4408', 'MLO42-4408', 'MLO43-4408']",
"The epochs (trials) are created for MEG channels. First we find the picks\nfor MEG and EOG channels. Then the epochs are constructed using these picks.\nThe epochs overlapping with annotated bad segments are also rejected by\ndefault. To turn off rejection by bad segments (as was done earlier with\nsaccades) you can use keyword reject_by_annotation=False.",
"epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=['meg', 'eog'],\n baseline=(None, 0), reject=reject, preload=False,\n proj=True)",
"We only use first 40 good epochs from each run. Since we first drop the bad\nepochs, the indices of the epochs are no longer same as in the original\nepochs collection. Investigation of the event timings reveals that first\nepoch from the second run corresponds to index 182.",
"epochs.drop_bad()\n\n# avoid warning about concatenating with annotations\nepochs.set_annotations(None)\n\nepochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],\n epochs['standard'][182:222]])\nepochs_standard.load_data() # Resampling to save memory.\nepochs_standard.resample(600, npad='auto')\nepochs_deviant = epochs['deviant'].load_data()\nepochs_deviant.resample(600, npad='auto')\ndel epochs",
"The averages for each conditions are computed.",
"evoked_std = epochs_standard.average()\nevoked_dev = epochs_deviant.average()\ndel epochs_standard, epochs_deviant",
"Typical preprocessing step is the removal of power line artifact (50 Hz or\n60 Hz). Here we lowpass filter the data at 40 Hz, which will remove all\nline artifacts (and high frequency information). Normally this would be done\nto raw data (with :func:mne.io.Raw.filter), but to reduce memory\nconsumption of this tutorial, we do it at evoked stage. (At the raw stage,\nyou could alternatively notch filter with :func:mne.io.Raw.notch_filter.)",
"for evoked in (evoked_std, evoked_dev):\n evoked.filter(l_freq=None, h_freq=40., fir_design='firwin')",
"Here we plot the ERF of standard and deviant conditions. In both conditions\nwe can see the P50 and N100 responses. The mismatch negativity is visible\nonly in the deviant condition around 100-200 ms. P200 is also visible around\n170 ms in both conditions but much stronger in the standard condition. P300\nis visible in deviant condition only (decision making in preparation of the\nbutton press). You can view the topographies from a certain time span by\npainting an area with clicking and holding the left mouse button.",
"evoked_std.plot(window_title='Standard', gfp=True, time_unit='s')\nevoked_dev.plot(window_title='Deviant', gfp=True, time_unit='s')",
"Show activations as topography figures.",
"times = np.arange(0.05, 0.301, 0.025)\nevoked_std.plot_topomap(times=times, title='Standard', time_unit='s')\n\nevoked_dev.plot_topomap(times=times, title='Deviant', time_unit='s')",
"We can see the MMN effect more clearly by looking at the difference between\nthe two conditions. P50 and N100 are no longer visible, but MMN/P200 and\nP300 are emphasised.",
"evoked_difference = combine_evoked([evoked_dev, evoked_std], weights=[1, -1])\nevoked_difference.plot(window_title='Difference', gfp=True, time_unit='s')",
"Source estimation.\nWe compute the noise covariance matrix from the empty room measurement\nand use it for the other runs.",
"reject = dict(mag=4e-12)\ncov = mne.compute_raw_covariance(raw_erm, reject=reject)\ncov.plot(raw_erm.info)\ndel raw_erm",
"The transformation is read from a file:",
"trans_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'bst_auditory-trans.fif')\ntrans = mne.read_trans(trans_fname)",
"To save time and memory, the forward solution is read from a file. Set\nuse_precomputed=False in the beginning of this script to build the\nforward solution from scratch. The head surfaces for constructing a BEM\nsolution are read from a file. Since the data only contains MEG channels, we\nonly need the inner skull surface for making the forward solution. For more\ninformation: CHDBBCEJ, :func:mne.setup_source_space,\nbem-model, :func:mne.bem.make_watershed_bem.",
"if use_precomputed:\n fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'bst_auditory-meg-oct-6-fwd.fif')\n fwd = mne.read_forward_solution(fwd_fname)\nelse:\n src = mne.setup_source_space(subject, spacing='ico4',\n subjects_dir=subjects_dir, overwrite=True)\n model = mne.make_bem_model(subject=subject, ico=4, conductivity=[0.3],\n subjects_dir=subjects_dir)\n bem = mne.make_bem_solution(model)\n fwd = mne.make_forward_solution(evoked_std.info, trans=trans, src=src,\n bem=bem)\n\ninv = mne.minimum_norm.make_inverse_operator(evoked_std.info, fwd, cov)\nsnr = 3.0\nlambda2 = 1.0 / snr ** 2\ndel fwd",
"The sources are computed using dSPM method and plotted on an inflated brain\nsurface. For interactive controls over the image, use keyword\ntime_viewer=True.\nStandard condition.",
"stc_standard = mne.minimum_norm.apply_inverse(evoked_std, inv, lambda2, 'dSPM')\nbrain = stc_standard.plot(subjects_dir=subjects_dir, subject=subject,\n surface='inflated', time_viewer=False, hemi='lh',\n initial_time=0.1, time_unit='s')\ndel stc_standard, brain",
"Deviant condition.",
"stc_deviant = mne.minimum_norm.apply_inverse(evoked_dev, inv, lambda2, 'dSPM')\nbrain = stc_deviant.plot(subjects_dir=subjects_dir, subject=subject,\n surface='inflated', time_viewer=False, hemi='lh',\n initial_time=0.1, time_unit='s')\ndel stc_deviant, brain",
"Difference.",
"stc_difference = apply_inverse(evoked_difference, inv, lambda2, 'dSPM')\nbrain = stc_difference.plot(subjects_dir=subjects_dir, subject=subject,\n surface='inflated', time_viewer=False, hemi='lh',\n initial_time=0.15, time_unit='s')",
"References\n.. footbibliography::"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
matt-graham/auxiliary-pm-mcmc
|
experiment_notebooks/Analyse results.ipynb
|
mit
|
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport glob\nimport os\nfrom collections import OrderedDict\nimport scipy.io\n%matplotlib inline\n%load_ext rpy2.ipython",
"Load python R interface and import coda for computing chain statistics",
"import rpy2.interactive as r\nimport rpy2.interactive.packages\nr.packages.importr(\"coda\")\nrlib = r.packages.packages",
"Function for correct printing of values to specified number of significant figures",
"def to_precision(x, p):\n p_str = str(p)\n fmt_string = '{0:.' + p_str + 'g}'\n return fmt_string.format(x)\n# alternative method which properly deals with trailing zeros can be got by uncommenting below\n# to load function by Randle Taylor from git URL\n# %load https://raw.githubusercontent.com/randlet/to-precision/master/to_precision.py\n\nexp_dir = os.path.join(os.environ['EXP_DIR'], 'apm_mcmc')",
"Specify file pattern for saved results for different data set and method combination. Ordered dict used so that order is maintained in printed LaTeX table",
"file_name_pattern_map = OrderedDict([\n (('Pima', 'PM MH'), '*pima_pmmh_chain_*_results.npz'),\n (('Pima', 'APM MI+MH'), '*pima_apm(mi+mh)_chain_*_results.npz'),\n (('Pima', 'APM SS+MH'), '*pima_apm(ss+mh)_chain_*_results.npz'),\n (('Pima', 'APM SS+SS'), '*pima_apm(ess+rdss)_chain_*_results.npz'),\n (('Breast', 'PM MH'), '*breast_pmmh_chain_*_results.npz'),\n (('Breast', 'APM MI+MH'), '*breast_apm(mi+mh)_chain_*_results.npz'),\n (('Breast', 'APM SS+MH'), '*breast_apm(ss+mh)_chain_*_results.npz'),\n (('Breast', 'APM SS+SS'), '*breast_apm(ess+rdss)_chain_*_results.npz'),\n])",
"Load up saved chains and run stats and store in another ordered dict. Also compute effective sample size and Gelman-Rubin R stat for chains at this point using R-coda interface",
"results_map = OrderedDict()\nfor (data_set, method), file_name_pattern in file_name_pattern_map.items():\n file_list = glob.glob(os.path.join(exp_dir, file_name_pattern))\n chains = []\n chains_stats = []\n for file_path in file_list:\n results = np.load(file_path)\n chains.append(results['thetas'])\n chains_stats.append(results['n_reject_n_cubic_ops_comp_time'])\n chains = np.array(chains)\n chains_stats = np.array(chains_stats)\n n_effs = np.empty((chains.shape[0], 2))\n for i, chain in enumerate(chains):\n n_effs[i, 0] = rlib.coda.effectiveSize(chain[:, 0])[0]\n n_effs[i, 1] = rlib.coda.effectiveSize(chain[:, 1])[0]\n r_chains_list = rlib.coda.as_mcmc_list([rlib.coda.as_mcmc(chain) for chain in chains[:, :, :]])\n gelman_rubin = rlib.coda.gelman_diag(r_chains_list, autoburnin=False)\n results_map[(data_set, method)] = (chains, chains_stats, n_effs, gelman_rubin)\n\nprc_mn = 3 # precision to report means with\nprc_se = 2 # precision to report standard errors with\nmax_n_chains = 0 # will be populated with max n chains to allow proper \n # formatting of autocorr plots later for cases when\n # plotting intermediate results with differing number\n # of chains completed per method / data set\n# header for LaTeX table of results\nlatex_table = ''\nlatex_table += ' & Method & $N_\\\\text{cub.cop}$ & Acc. rate '\nlatex_table += '& $N_\\\\text{eff}$ & $\\\\frac{N_\\\\text{eff}}{N_\\\\text{cub.op}}$ & $\\\\hat{R}$ '\nlatex_table += '& $N_\\\\text{eff}$ & $\\\\frac{N_\\\\text{eff}}{N_\\\\text{cub.op}}$ & $\\\\hat{R}$ '\nlatex_table += '\\\\\\\\ \\n \\hline \\n'\nfor (data_set, method), (chains, chains_stats, n_effs, gelman_rubin) in results_map.items():\n n_chains, n_samples, n_param = chains.shape\n max_n_chains = max(max_n_chains, n_chains) # update record of maximum no. chains\n # second last column of chain stats is number of cubic operations for a run\n # for display purposes, divide by 1000 as easier to visually compare without\n # scientific notation\n # possibly two reject rates (for u|theta and theta|u updates) present so index\n # chain_stats from end rather than start to make consistent\n n_kcops = chains_stats[:, -2] / 1000. \n # calculate various mean stats over chains and their associated statndard errors\n mean_n_k_cub_ops = n_kcops.mean()\n ster_n_k_cub_ops = n_kcops.std(ddof=1) / n_chains**0.5\n mean_n_eff_samps = n_effs.mean(0)\n ster_n_eff_samps = n_effs.std(0, ddof=1) / n_chains**0.5\n mean_es_per_kcop = (n_effs / n_kcops[:, None]).mean(0)\n ster_es_per_kcop = (n_effs / n_kcops[:, None]).std(0, ddof=1) / n_chains**0.5\n # third column from end contains reject rate for theta|u updates\n # often will be first column however sometimes reject rate for u|theta updates\n # present as first column\n acc_rates = 1. - chains_stats[:, -3] * 1. / n_samples\n mean_accept_rate = acc_rates.mean()\n ster_accept_rate = acc_rates.std(0, ddof=1) / n_chains**0.5\n # add row for current results to LaTeX table\n latex_table += ' & \\sc {0} & {1} ({2}) & {3} ({4})\\n'.format(\n method.lower(), \n to_precision(mean_n_k_cub_ops, prc_mn), \n to_precision(ster_n_k_cub_ops, prc_se),\n to_precision(mean_accept_rate, prc_mn), \n to_precision(ster_accept_rate, prc_se)\n )\n latex_table += ' & {0} ({1}) & {2} ({3}) & {4}\\n'.format(\n to_precision(mean_n_eff_samps[0], prc_mn), \n to_precision(ster_n_eff_samps[0], prc_se),\n to_precision(mean_es_per_kcop[0], prc_mn), \n to_precision(ster_es_per_kcop[0], prc_se),\n to_precision(gelman_rubin[0][0], prc_mn),\n )\n latex_table += ' & {0} ({1}) & {2} ({3}) & {4}'.format(\n to_precision(mean_n_eff_samps[1], prc_mn), \n to_precision(ster_n_eff_samps[1], prc_se),\n to_precision(mean_es_per_kcop[1], prc_mn), \n to_precision(ster_es_per_kcop[1], prc_se),\n to_precision(gelman_rubin[0][1], prc_mn),\n )\n latex_table += ' \\\\\\\\ \\n'\n # Print space delimited table of results for quick checking\n print('-' * 55)\n print('Data set: {0: <8} Method: {1: <10} # chains: {2}'\n .format(data_set, method, n_chains))\n print('-' * 55)\n print(' mean num. k cubic op. {0: <6} ({1})'\n .format(to_precision(mean_n_k_cub_ops, prc_mn), \n to_precision(ster_n_k_cub_ops, prc_se)))\n print(' effective sample size (sigma) {0: <6} ({1})'\n .format(to_precision(mean_n_eff_samps[0], prc_mn), \n to_precision(ster_n_eff_samps[0], prc_se)))\n print(' effective sample size (tau) {0: <6} ({1})'\n .format(to_precision(mean_n_eff_samps[1], prc_mn), \n to_precision(ster_n_eff_samps[1], prc_se)))\n print(' eff. samp. / cubic op. (sigma) {0: <6} ({1})'\n .format(to_precision(mean_es_per_kcop[0], prc_mn), \n to_precision(ster_es_per_kcop[0], prc_se)))\n print(' eff. samp. / cubic op. (tau) {0: <6} ({1})'\n .format(to_precision(mean_es_per_kcop[1], prc_mn), \n to_precision(ster_es_per_kcop[1], prc_se)))\n print(' Gelman-Rubin statistic (sigma) {0}'\n .format(to_precision(gelman_rubin[0][0], prc_mn)))\n print(' Gelman-Rubin statistic (tau) {0}'\n .format(to_precision(gelman_rubin[0][1], prc_mn)))\n print(' n acc rates off-target {0}'\n .format(np.sum((acc_rates < 0.15) + (acc_rates > 0.30))))",
"Print LaTeX table rows for inclusion in paper",
"print(latex_table)",
"Save all chains for different method / dataset / variate combinations to a MATLAB readable file to allow loading results there to plot autocorrelations in same style as other figures",
"n_chains = 10\nn_samples = 10000\nn_methods = len(file_name_pattern_map) / 2\npima_sigma_chains = np.empty((n_chains, n_samples, n_methods))\npima_tau_chains = np.empty((n_chains, n_samples, n_methods))\nbreast_sigma_chains = np.empty((n_chains, n_samples, n_methods))\nbreast_tau_chains = np.empty((n_chains, n_samples, n_methods))\npima_comp_costs = np.empty(n_methods)\nbreast_comp_costs = np.empty(n_methods)\npima_method_names = []\nbreast_method_names = []\nm, n = 0, 0\nfor (data_set, method), (chains, chains_stats, n_effs, gelman_rubin) in results_map.items():\n if data_set.lower() == 'pima':\n pima_sigma_chains[:, :, m] = chains[:, -n_samples:, 0]\n pima_tau_chains[:, :, m] = chains[:, -n_samples:, 1]\n pima_method_names.append(method)\n pima_comp_costs[m] = chains_stats[:, -2].mean()\n m += 1\n elif data_set.lower() == 'breast':\n breast_sigma_chains[:, :, n] = chains[:, -n_samples:, 0]\n breast_tau_chains[:, :, n] = chains[:, -n_samples:, 1]\n breast_method_names.append(method)\n breast_comp_costs[n] = chains_stats[:, -2].mean()\n n += 1\npima_rel_comp_costs = pima_comp_costs / pima_comp_costs[0]\nbreast_rel_comp_costs = breast_comp_costs / breast_comp_costs[0]\ntime_stamp = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S_')\nscipy.io.savemat(os.path.join(exp_dir, time_stamp + 'chains_matlab_dump.mat'),\n {\n 'pima_sigma_chains' : pima_sigma_chains,\n 'pima_tau_chains' : pima_tau_chains,\n 'breast_sigma_chains' : breast_sigma_chains,\n 'breast_tau_chains' : breast_tau_chains,\n 'pima_rel_comp_costs' : pima_rel_comp_costs,\n 'breast_rel_comp_costs' : breast_rel_comp_costs,\n 'pima_method_names' : pima_method_names,\n 'breast_method_names' : breast_method_names\n }\n)",
"Plot autocorrelation plots for all chains - if lots of chains loaded will be a large figure so best viewed externally",
"thin_factor = 10\nmax_lag = 30\nfig = plt.figure(figsize=(40, 16))\nn_dm = len(results_map)\nfor i, ((data_set, method), (chains, chains_stats, n_effective, gelman_rubin)) in enumerate(results_map.items()):\n for j, chain in enumerate(chains):\n ax_tau = fig.add_subplot(max_n_chains, 2 * n_dm, j * 2 * n_dm + 1 + 2 * i % 60)\n ax_sig = fig.add_subplot(max_n_chains, 2 * n_dm, j * 2 * n_dm + 2 + 2 * i % 60)\n x_tau = chain[::thin_factor, 0].copy()\n x_tau -= x_tau.mean()\n autocorr_tau = np.correlate(x_tau, x_tau, mode=2)[x_tau.size:]\n autocorr_tau /= autocorr_tau[0]\n x_sig = chain[::thin_factor, 1].copy()\n x_sig -= x_sig.mean()\n autocorr_sig = np.correlate(x_sig, x_sig, mode=2)[x_sig.size:]\n autocorr_sig /= autocorr_sig[0]\n ax_tau.vlines(np.arange(max_lag) + 1, 0., autocorr_tau[:max_lag])\n ax_tau.axhline()\n ax_tau.set_yticks(np.linspace(-0.4, 0.8, 4))\n #ax_tau.set_xticks(np.arange(0, 31, 10))\n ax_sig.vlines(np.arange(max_lag) + 1, 0., autocorr_sig[:max_lag])\n ax_sig.axhline()\n #ax_sig.set_xticks(np.arange(0, 31, 10))\n ax_sig.set_yticks(np.linspace(-0.4, 0.8, 4))\n if j == 0:\n ax_tau.set_title('{0} $\\\\tau$'.format(data_set + ', ' + method))\n ax_sig.set_title('{0} $\\\\sigma$'.format(data_set + ', ' + method))\nfig.tight_layout()",
"Calculate mean compute time across 10 chains for PM MH method and APM SS+MH method (for runs on the same machine) to verify that extra quadratic operations for APM approaches here are a negligible overhead",
"for (data_set, method), (chains, chains_stats, n_effs, gelman_rubin) in results_map.items():\n if data_set == 'Pima':\n if method == 'PM MH' or method == 'APM SS+MH':\n print('{0} {1} mean compute time: {2} +/- {3}'.format(\n data_set, method,\n to_precision(chains_stats[:, -1].mean(), 3),\n to_precision(chains_stats[:, -1].std(ddof=1) / chains.shape[0]**0.5, 2))"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
flothesof/presidentielles2017
|
goodies/galeries d'image.ipynb
|
mit
|
[
"Emmanuel Macron\nC'est marrant à quel point l'iconographie du site d'Emmanuel est soignée. Peut-on en faire une gallerie d'images?\nOn part de cette url : https://en-marche.fr/emmanuel-macron/le-programme",
"from bs4 import BeautifulSoup\nimport requests\n\nr = requests.get('https://en-marche.fr/emmanuel-macron/le-programme')\n\nsoup = BeautifulSoup(r.text, 'html.parser')\n\nproposals = soup.find_all(class_='programme__proposal')\n\nproposals = [p for p in proposals if 'programme__proposal--category' not in p.attrs['class']]\n\nlen(proposals)\n\np = proposals[0]\n\nfull_url = 'https://en-marche.fr' + p.find('a').attrs['href']\nfull_url\n\nfull_urls = ['https://en-marche.fr' + p.find('a').attrs['href'] for p in proposals]\n\nfull_urls[:10]\n\nr = requests.get(full_url)\nsoup = BeautifulSoup(r.text, 'html.parser')\n\nfigure_tag = soup.find('figure', class_='fullscreen')\nfigure_tag",
"On peut maintenant extraire le lien vers l'image.",
"src_url = 'https://en-marche.fr' + figure_tag('img')[0].attrs['src']\nsrc_url",
"On peut afficher ceci dans le notebook.",
"from IPython.display import Image\n\nImage(url=src_url)\n\ndef extract_img_src(url):\n \"Extracts image src url from linked page.\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n figure_tag = soup.find('figure', class_='fullscreen')\n if figure_tag is not None and figure_tag('img') is not None:\n src_url = 'https://en-marche.fr' + figure_tag('img')[0].attrs['src']\n return src_url\n else:\n print(\"no image for url: {}\".format(url))\n return None",
"On peut répeter ce processus et faire une gallerie avec toutes ces images.",
"srcs = [extract_img_src(url) for url in full_urls]\n\nsrcs = [_ for _ in srcs if _ is not None]\n\nheader = \"\"\"<!doctype html>\n<html lang=\"fr\">\n<head>\n <meta charset=\"utf-8\">\n <title>Gallerie des photos du site d'Emmanuel Macron</title>\n <style>\n img {width: 100%;}\n </style>\n</head>\"\"\"\n\ndef format_as_img_tag(src):\n return \"<img src={} />\".format(src)\n\nformat_as_img_tag(srcs[2])\n\nwith open('galerie_macron.html', 'w') as f:\n body = \"\"\"<body>\n{0}\n</body>\"\"\".format(\"\\n\".join(format_as_img_tag(url) for url in srcs))\n html = header + body + \"</html>\"\n f.write(html)",
"Ce sont des belles photos...\nFrançois Fillon\nDepuis la sortie du programme de François Fillon, on peut répéter la démarche.",
"r = requests.get('https://www.fillon2017.fr/projet/')\nsoup = BeautifulSoup(r.text, 'html.parser')\n\ntags = soup.find_all('a', class_='projectItem__inner')\n\nsublinks = [tag.attrs['href'] for tag in tags]",
"On s'attaque aux pages individuelles.",
"sublinks[0]\n\nr = requests.get(sublinks[0])\nsoup = BeautifulSoup(r.text, 'html.parser')\n\nsrc = soup.find('div', class_='singleProject__banner bannerWithMask backgroundCover').attrs['style'].split(\"background-image: url(\")[1][1:-3]\n\ndef extract_img_src(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser') \n src = soup.find('div', class_='singleProject__banner bannerWithMask backgroundCover').attrs['style'].split(\"background-image: url(\")[1][1:-3]\n return src\n\nsrcs = [extract_img_src(url) for url in sublinks]\n\nsrcs\n\nwith open('galerie_fillon.html', 'w') as f:\n body = \"\"\"<body>\n{0}\n</body>\"\"\".format(\"\\n\".join(format_as_img_tag(url) for url in srcs))\n html = header + body + \"</html>\"\n f.write(html)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
wkuling/spark-FM-parallelSGD
|
FMonSpark_demo_a9a.ipynb
|
apache-2.0
|
[
"FM on Spark demo\nInitialisation\nLoad the file.",
"sc.addPyFile(\"./fm/fm_parallel_sgd.py\")\n\nimport fm_parallel_sgd as fm\nfrom fm_parallel_sgd import *",
"Set some matplotlib parameters for plotting figures directly in the notebook:",
"%matplotlib inline\nimport matplotlib.pylab as pylab\npylab.rcParams['figure.figsize']=(16.0, 12.0)",
"Loading the dataset\nThe dataset should be a RDD of LabeledPoints.\nLabels should be -1 or 1.\nFeatures should be either SparseVector or DenseVector from mllib.linalg library\nThe Adult dataset (a9a) is used to predict who has a salary over $50.000, based on various information (Platt, 1998).\nYou can download it here : a9a\n123 features - 11% sparse",
"nrPartitions = 5\ntrainPath = \"/path/to/a9a_train_dataset/a9a\"\ntrainAll = MLUtils.loadLibSVMFile(sc, trainPath, numFeatures=123).repartition(nrPartitions)\ntestPath = \"/path/to/a9a_test_dataset/a9a.t\"\ntest = MLUtils.loadLibSVMFile(sc, testPath, numFeatures=123)\n\nprint trainAll.count()\nprint test.count()\nprint trainAll.first()",
"Training\ntrainFM_parallel_sgd\nTrain a Factorization Machine model using parallel stochastic gradient descent.",
"?trainFM_parallel_sgd\n\ntemp = time.time()\nmodel = trainFM_parallel_sgd (sc, trainAll, iterations=1, iter_sgd= 1, alpha=0.01, regParam=0.01, factorLength=4,\\\n verbose=False, savingFilename = None, evalTraining=None)\nprint 'time :'; print time.time()-temp;",
"evaluate(test, model)\nEvaluate your model on a test set.",
"?evaluate\n\nprint evaluate(test, model)",
"verbose=True\nSplit the rdd into a training set and a validation set.\nPrint the evaluation after each iteration.\nsavingFilename (string)\nSaves the model in a pickle file after each iteration. The files are saved in the current directory.\nThe files are named 'savingFilename_iteration_#'",
"temp = time.time()\ntrainFM_parallel_sgd (sc, trainAll, iterations=5, iter_sgd= 3, alpha=0.01, regParam=0.01, factorLength=4,\\\n verbose=True, savingFilename = 'a9a', evalTraining=None)\nprint 'total time :'; print time.time()-temp;",
"Load one of the saved model",
"model = loadModel('a9a_iteration_5')\n\nevaluate(test, model)",
"evalTraining\nUsed to plot the evaluation (train+validation) during the training\nYou need to create an instance of the class evaluation first.\nYou can set evalTraining.modulo = 5 to evaluate the model after each 5 iterations for example (default is 1)",
"temp = time.time()\n\nevalTraining = evaluation(trainAll)\nevalTraining.modulo = 1\ntrainFM_parallel_sgd(sc, trainAll, iterations=10, iter_sgd=1, verbose = True, evalTraining=evalTraining)\nprint 'total time'\nprint time.time()-temp",
"Plotting\nLet's plot the different parameters using a sample of the training set",
"trainSample = trainAll.sample(False, 0.1)",
"plotAlpha(sc, data, alpha_list, iterations, iter_sgd, regParam, factorLength)\nSpecify the list of alpha you want to plot in the alpha_list",
"model = plotAlpha(sc, trainSample, iterations=10, iter_sgd=1, alpha_list = [0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1])",
"plotFactorLength(sc, data, factorLength_list, iterations, iter_sgd, alpha, regParam)\nSpecify the list of factor length you want to plot in the factorLength_list",
"model = plotFactorLength(sc, trainSample, factorLength_list = [1,5, 10, 15,20, 30, 40],\\\n iterations=5, iter_sgd=1, alpha=0.01, regParam=0.)",
"plotRegParam(sc, data, regParam_list, iterations, iter_sgd, alpha, factorLength)\nSpecify the list of factor length you want to plot in the factorLength_list",
"model = plotRegParam(sc, trainSample, regParam_list = [0, 0.0001, 0.001, 0.01], iterations=5, iter_sgd=1, alpha=0.01, factorLength=4)",
"plotAlpha_RegParam(sc, trainAll, alpha_list, regParam_list, iterations, iter_sgd)\nSpecify the alpha_list and the factorLength_list to plot a color map of the best parameters. The brighter is the lower logloss.",
"bestModel = plotAlpha_RegParam(sc, trainSample, alpha_list = [0.01, 0.03, 0.06, 0.1],\\\n regParam_list = [0, 0.0001, 0.001, 0.01],\\\n iterations=5, iter_sgd=1)\n\nevaluate(test, bestModel)",
"Predictions\nTo calculate the probabilities according to the model for a test set, call predict(data, model). This return a RDD with probability scores.",
"prediction = predictFM(test, bestModel)\nprediction.take(5)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ktaneishi/deepchem
|
examples/notebooks/protein_ligand_complex_notebook.ipynb
|
mit
|
[
"Basic Protein-Ligand Affinity Models\nTutorial: Use machine learning to model protein-ligand affinity.\nWritten by Evan Feinberg and Bharath Ramsundar\nCopyright 2016, Stanford University\nThis DeepChem tutorial demonstrates how to use mach.ine learning for modeling protein-ligand binding affinity\nOverview:\nIn this tutorial, you will trace an arc from loading a raw dataset to fitting a cutting edge ML technique for predicting binding affinities. This will be accomplished by writing simple commands to access the deepchem Python API, encompassing the following broad steps:\n\nLoading a chemical dataset, consisting of a series of protein-ligand complexes.\nFeaturizing each protein-ligand complexes with various featurization schemes. \nFitting a series of models with these featurized protein-ligand complexes.\nVisualizing the results.\n\nFirst, let's point to a \"dataset\" file. This can come in the format of a CSV file or Pandas DataFrame. Regardless\nof file format, it must be columnar data, where each row is a molecular system, and each column represents\na different piece of information about that system. For instance, in this example, every row reflects a \nprotein-ligand complex, and the following columns are present: a unique complex identifier; the SMILES string\nof the ligand; the binding affinity (Ki) of the ligand to the protein in the complex; a Python list of all lines\nin a PDB file for the protein alone; and a Python list of all lines in a ligand file for the ligand alone.\nThis should become clearer with the example. (Make sure to set DISPLAY = True)",
"%load_ext autoreload\n%autoreload 2\n%pdb off\n# set DISPLAY = True when running tutorial\nDISPLAY = False\n# set PARALLELIZE to true if you want to use ipyparallel\nPARALLELIZE = False\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport deepchem as dc\nfrom deepchem.utils import download_url\n\nimport os\n\ndata_dir = dc.utils.get_data_dir()\ndataset_file = os.path.join(data_dir, \"pdbbind_core_df.csv.gz\")\n\nif not os.path.exists(dataset_file):\n print('File does not exist. Downloading file...')\n download_url(\"https://s3-us-west-1.amazonaws.com/deepchem.io/datasets/pdbbind_core_df.csv.gz\")\n print('File downloaded...')\n\nraw_dataset = dc.utils.save.load_from_disk(dataset_file)",
"Let's see what dataset looks like:",
"print(\"Type of dataset is: %s\" % str(type(raw_dataset)))\nprint(raw_dataset[:5])\nprint(\"Shape of dataset is: %s\" % str(raw_dataset.shape))",
"One of the missions of deepchem is to form a synapse between the chemical and the algorithmic worlds: to be able to leverage the powerful and diverse array of tools available in Python to analyze molecules. This ethos applies to visual as much as quantitative examination:",
"import nglview\nimport tempfile\nimport os\nimport mdtraj as md\nimport numpy as np\nimport deepchem.utils.visualization\n#from deepchem.utils.visualization import combine_mdtraj, visualize_complex, convert_lines_to_mdtraj\n\ndef combine_mdtraj(protein, ligand):\n chain = protein.topology.add_chain()\n residue = protein.topology.add_residue(\"LIG\", chain, resSeq=1)\n for atom in ligand.topology.atoms:\n protein.topology.add_atom(atom.name, atom.element, residue)\n protein.xyz = np.hstack([protein.xyz, ligand.xyz])\n protein.topology.create_standard_bonds()\n return protein\n\ndef visualize_complex(complex_mdtraj):\n ligand_atoms = [a.index for a in complex_mdtraj.topology.atoms if \"LIG\" in str(a.residue)]\n binding_pocket_atoms = md.compute_neighbors(complex_mdtraj, 0.5, ligand_atoms)[0]\n binding_pocket_residues = list(set([complex_mdtraj.topology.atom(a).residue.resSeq for a in binding_pocket_atoms]))\n binding_pocket_residues = [str(r) for r in binding_pocket_residues]\n binding_pocket_residues = \" or \".join(binding_pocket_residues)\n\n traj = nglview.MDTrajTrajectory( complex_mdtraj ) # load file from RCSB PDB\n ngltraj = nglview.NGLWidget( traj )\n ngltraj.representations = [\n { \"type\": \"cartoon\", \"params\": {\n \"sele\": \"protein\", \"color\": \"residueindex\"\n } },\n { \"type\": \"licorice\", \"params\": {\n \"sele\": \"(not hydrogen) and (%s)\" % binding_pocket_residues\n } },\n { \"type\": \"ball+stick\", \"params\": {\n \"sele\": \"LIG\"\n } }\n ]\n return ngltraj\n\ndef visualize_ligand(ligand_mdtraj):\n traj = nglview.MDTrajTrajectory( ligand_mdtraj ) # load file from RCSB PDB\n ngltraj = nglview.NGLWidget( traj )\n ngltraj.representations = [\n { \"type\": \"ball+stick\", \"params\": {\"sele\": \"all\" } } ]\n return ngltraj\n\ndef convert_lines_to_mdtraj(molecule_lines):\n molecule_lines = molecule_lines.strip('[').strip(']').replace(\"'\",\"\").replace(\"\\\\n\", \"\").split(\", \")\n tempdir = tempfile.mkdtemp()\n molecule_file = os.path.join(tempdir, \"molecule.pdb\")\n with open(molecule_file, \"w\") as f:\n for line in molecule_lines:\n f.write(\"%s\\n\" % line)\n molecule_mdtraj = md.load(molecule_file)\n return molecule_mdtraj\n\nfirst_protein, first_ligand = raw_dataset.iloc[0][\"protein_pdb\"], raw_dataset.iloc[0][\"ligand_pdb\"]\nprotein_mdtraj = convert_lines_to_mdtraj(first_protein)\nligand_mdtraj = convert_lines_to_mdtraj(first_ligand)\ncomplex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj)\n\nngltraj = visualize_complex(complex_mdtraj)\nngltraj",
"Now that we're oriented, let's use ML to do some chemistry. \nSo, step (2) will entail featurizing the dataset.\nThe available featurizations that come standard with deepchem are ECFP4 fingerprints, RDKit descriptors, NNScore-style bdescriptors, and hybrid binding pocket descriptors. Details can be found on deepchem.io.",
"grid_featurizer = dc.feat.RdkitGridFeaturizer(\n voxel_width=16.0, feature_types=\"voxel_combined\", \n voxel_feature_types=[\"ecfp\", \"splif\", \"hbond\", \"pi_stack\", \"cation_pi\", \"salt_bridge\"], \n ecfp_power=5, splif_power=5, parallel=True, flatten=True)\ncompound_featurizer = dc.feat.CircularFingerprint(size=128)",
"Note how we separate our featurizers into those that featurize individual chemical compounds, compound_featurizers, and those that featurize molecular complexes, complex_featurizers.\nNow, let's perform the actual featurization. Calling loader.featurize() will return an instance of class Dataset. Internally, loader.featurize() (a) computes the specified features on the data, (b) transforms the inputs into X and y NumPy arrays suitable for ML algorithms, and (c) constructs a Dataset() instance that has useful methods, such as an iterator, over the featurized data. This is a little complicated, so we will use MoleculeNet to featurize the PDBBind core set for us.",
"seed = 23\nnp.random.seed(seed)\nPDBBIND_tasks, (train_dataset, valid_dataset, test_dataset), transformers = dc.molnet.load_pdbbind_grid()",
"Now, we conduct a train-test split. If you'd like, you can choose splittype=\"scaffold\" instead to perform a train-test split based on Bemis-Murcko scaffolds.\nWe generate separate instances of the Dataset() object to hermetically seal the train dataset from the test dataset. This style lends itself easily to validation-set type hyperparameter searches, which we will illustate in a separate section of this tutorial. \nThe performance of many ML algorithms hinges greatly on careful data preprocessing. Deepchem comes standard with a few options for such preprocessing.\nNow, we're ready to do some learning! \nTo fit a deepchem model, first we instantiate one of the provided (or user-written) model classes. In this case, we have a created a convenience class to wrap around any ML model available in Sci-Kit Learn that can in turn be used to interoperate with deepchem. To instantiate an SklearnModel, you will need (a) task_types, (b) model_params, another dict as illustrated below, and (c) a model_instance defining the type of model you would like to fit, in this case a RandomForestRegressor.",
"from sklearn.ensemble import RandomForestRegressor\n\nsklearn_model = RandomForestRegressor(n_estimators=10, max_features='sqrt')\nsklearn_model.random_state = seed\nmodel = dc.models.SklearnModel(sklearn_model)\nmodel.fit(train_dataset)\n\nfrom deepchem.utils.evaluate import Evaluator\nimport pandas as pd\n\nmetric = dc.metrics.Metric(dc.metrics.r2_score)\n\nevaluator = Evaluator(model, train_dataset, transformers)\ntrain_r2score = evaluator.compute_model_performance([metric])\nprint(\"RF Train set R^2 %f\" % (train_r2score[\"r2_score\"]))\n\nevaluator = Evaluator(model, valid_dataset, transformers)\nvalid_r2score = evaluator.compute_model_performance([metric])\nprint(\"RF Valid set R^2 %f\" % (valid_r2score[\"r2_score\"]))",
"In this simple example, in few yet intuitive lines of code, we traced the machine learning arc from featurizing a raw dataset to fitting and evaluating a model. \nHere, we featurized only the ligand. The signal we observed in R^2 reflects the ability of circular fingerprints and random forests to learn general features that make ligands \"drug-like.\"",
"predictions = model.predict(test_dataset)\nprint(predictions)\n\n# TODO(rbharath): This cell visualizes the ligand with highest predicted activity. Commenting it out for now. Fix this later\n#from deepchem.utils.visualization import visualize_ligand\n\n#top_ligand = predictions.iloc[0]['ids']\n#ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==top_ligand]['ligand_pdb'].values[0])\n#if DISPLAY:\n# ngltraj = visualize_ligand(ligand1)\n# ngltraj\n\n# TODO(rbharath): This cell visualizes the ligand with lowest predicted activity. Commenting it out for now. Fix this later\n#worst_ligand = predictions.iloc[predictions.shape[0]-2]['ids']\n#ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==worst_ligand]['ligand_pdb'].values[0])\n#if DISPLAY:\n# ngltraj = visualize_ligand(ligand1)\n# ngltraj",
"The protein-ligand complex view.\nThe preceding simple example, in few yet intuitive lines of code, traces the machine learning arc from featurizing a raw dataset to fitting and evaluating a model. \nIn this next section, we illustrate deepchem's modularity, and thereby the ease with which one can explore different featurization schemes, different models, and combinations thereof, to achieve the best performance on a given dataset. We will demonstrate this by examining protein-ligand interactions. \nIn the previous section, we featurized only the ligand. The signal we observed in R^2 reflects the ability of grid fingerprints and random forests to learn general features that make ligands \"drug-like.\" In this section, we demonstrate how to use hyperparameter searching to find a higher scoring ligands.",
"def rf_model_builder(model_params, model_dir):\n sklearn_model = RandomForestRegressor(**model_params)\n sklearn_model.random_state = seed\n return dc.models.SklearnModel(sklearn_model, model_dir)\n\nparams_dict = {\n \"n_estimators\": [10, 50, 100],\n \"max_features\": [\"auto\", \"sqrt\", \"log2\", None],\n}\n\nmetric = dc.metrics.Metric(dc.metrics.r2_score)\noptimizer = dc.hyper.HyperparamOpt(rf_model_builder)\nbest_rf, best_rf_hyperparams, all_rf_results = optimizer.hyperparam_search(\n params_dict, train_dataset, valid_dataset, transformers,\n metric=metric)\n\n%matplotlib inline\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nrf_predicted_test = best_rf.predict(test_dataset)\nrf_true_test = test_dataset.y\nplt.scatter(rf_predicted_test, rf_true_test)\nplt.xlabel('Predicted pIC50s')\nplt.ylabel('True IC50')\nplt.title(r'RF predicted IC50 vs. True pIC50')\nplt.xlim([2, 11])\nplt.ylim([2, 11])\nplt.plot([2, 11], [2, 11], color='k')\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
intel-analytics/analytics-zoo
|
apps/sentiment-analysis/sentiment.ipynb
|
apache-2.0
|
[
"Sentiment Classification on Large Movie Reviews\nSentiment Analysis is understood as a classic natural language processing problem. In this example, a large moview review dataset was chosen from IMDB to do a sentiment classification task with some deep learning approaches. The labeled data set consists of 50,000 IMDB movie reviews (good or bad), in which 25000 highly polar movie reviews for training, and 25,000 for testing. The dataset is originally collected by Stanford researchers and was used in a 2011 paper, and the highest accuray of 88.33% was achieved without using the unbalanced data. This example illustrates some deep learning approaches to do the sentiment classification with BigDL python API.\nLoad the IMDB Dataset\nThe IMDB dataset need to be loaded into BigDL, note that the dataset has been pre-processed, and each review was encoded as a sequence of integers. Each integer represents the index of the overall frequency of dataset, for instance, '5' means the 5-th most frequent words occured in the data. It is very convinient to filter the words by some conditions, for example, to filter only the top 5,000 most common word and/or eliminate the top 30 most common words. Let's define functions to load the pre-processed data.",
"from bigdl.dataset import base\nimport numpy as np\n\ndef download_imdb(dest_dir):\n \"\"\"Download pre-processed IMDB movie review data\n\n :argument\n dest_dir: destination directory to store the data\n\n :return\n The absolute path of the stored data\n \"\"\"\n file_name = \"imdb.npz\"\n file_abs_path = base.maybe_download(file_name,\n dest_dir,\n 'https://s3.amazonaws.com/text-datasets/imdb.npz')\n return file_abs_path\n\ndef load_imdb(dest_dir='/tmp/.bigdl/dataset'):\n \"\"\"Load IMDB dataset.\n\n :argument\n dest_dir: where to cache the data (relative to `~/.bigdl/dataset`).\n\n :return\n the train, test separated IMDB dataset.\n \"\"\"\n path = download_imdb(dest_dir)\n f = np.load(path, allow_pickle=True)\n x_train = f['x_train']\n y_train = f['y_train']\n x_test = f['x_test']\n y_test = f['y_test']\n f.close()\n\n return (x_train, y_train), (x_test, y_test)\n\nprint('Processing text dataset')\n(x_train, y_train), (x_test, y_test) = load_imdb()\nprint('finished processing text')",
"In order to set a proper max sequence length, we need to go througth the property of the data and see the length distribution of each sentence in the dataset. A box and whisker plot is shown below for reviewing the length distribution in words.",
"import matplotlib\nmatplotlib.use('Agg')\n%pylab inline \n# Summarize review length\nfrom matplotlib import pyplot\n\nprint(\"Review length: \")\nX = np.concatenate((x_train, x_test), axis=0)\nresult = [len(x) for x in X]\nprint(\"Mean %.2f words (%f)\" % (np.mean(result), np.std(result)))\n# plot review length\n# Create a figure instance\nfig = pyplot.figure(1, figsize=(6, 6))\npyplot.boxplot(result)\npyplot.show()",
"Looking the box and whisker plot, the max length of a sample in words is 500, and the mean and median are below 250. According to the plot, we can probably cover the mass of the distribution with a clipped length of 400 to 500. Here we set the max sequence length of each sample as 500.\nThe corresponding vocabulary sorted by frequency is also required, for further embedding the words with pre-trained vectors. The downloaded vocabulary is in {word: index}, where each word as a key and the index as a value. It needs to be transformed into {index: word} format.\nLet's define a function to obtain the vocabulary.",
"import json\n\ndef get_word_index(dest_dir='/tmp/.bigdl/dataset', ):\n \"\"\"Retrieves the dictionary mapping word indices back to words.\n\n :argument\n path: where to cache the data (relative to `~/.bigdl/dataset`).\n\n :return\n The word index dictionary.\n \"\"\"\n file_name = \"imdb_word_index.json\"\n path = base.maybe_download(file_name,\n dest_dir,\n source_url='https://s3.amazonaws.com/text-datasets/imdb_word_index.json')\n f = open(path)\n data = json.load(f)\n f.close()\n return data\n\nprint('Processing vocabulary')\nword_idx = get_word_index()\nidx_word = {v:k for k,v in word_idx.items()}\nprint('finished processing vocabulary')",
"Text pre-processing\nBefore we train the network, some pre-processing steps need to be applied to the dataset. \nNext let's go through the mechanisms that used to be applied to the data.\n\n\nWe insert a start_char at the beginning of each sentence to mark the start point. We set it as 2 here, and each other word index will plus a constant index_from to differentiate some 'helper index' (eg. start_char, oov_char, etc.).\n\n\nA max_words variable is defined as the maximum index number (the least frequent word) included in the sequence. If the word index number is larger than max_words, it will be replaced by a out-of-vocabulary number oov_char, which is 3 here.\n\n\nEach word index sequence is restricted to the same length. We used left-padding here, which means the right (end) of the sequence will be keep as many as possible and drop the left (head) of the sequence if its length is more than pre-defined sequence_len, or padding the left (head) of the sequence with padding_value.",
"def replace_oov(x, oov_char, max_words):\n \"\"\"\n Replace the words out of vocabulary with `oov_char`\n :param x: a sequence\n :param max_words: the max number of words to include\n :param oov_char: words out of vocabulary because of exceeding the `max_words`\n limit will be replaced by this character\n\n :return: The replaced sequence\n \"\"\"\n return [oov_char if w >= max_words else w for w in x]\n\ndef pad_sequence(x, fill_value, length):\n \"\"\"\n Pads each sequence to the same length\n :param x: a sequence\n :param fill_value: pad the sequence with this value\n :param length: pad sequence to the length\n\n :return: the padded sequence\n \"\"\"\n if len(x) >= length:\n return x[(len(x) - length):]\n else:\n return [fill_value] * (length - len(x)) + x\n\ndef to_sample(features, label):\n \"\"\"\n Wrap the `features` and `label` to a training sample object\n :param features: features of a sample\n :param label: label of a sample\n \n :return: a sample object including features and label\n \"\"\"\n return Sample.from_ndarray(np.array(features, dtype='float'), np.array(label))\n\npadding_value = 1\nstart_char = 2\noov_char = 3\nindex_from = 3\nmax_words = 5000\nsequence_len = 500\n\nprint('start transformation')\n\nfrom zoo.common.nncontext import *\nsc = init_nncontext(\"Sentiment Analysis Example\")\n\n\ntrain_rdd = sc.parallelize(zip(x_train, y_train), 2) \\\n .map(lambda record: ([start_char] + [w + index_from for w in record[0]], record[1])) \\\n .map(lambda record: (replace_oov(record[0], oov_char, max_words), record[1])) \\\n .map(lambda record: (pad_sequence(record[0], padding_value, sequence_len), record[1])) \\\n .map(lambda record: to_sample(record[0], record[1]))\ntest_rdd = sc.parallelize(zip(x_test, y_test), 2) \\\n .map(lambda record: ([start_char] + [w + index_from for w in record[0]], record[1])) \\\n .map(lambda record: (replace_oov(record[0], oov_char, max_words), record[1])) \\\n .map(lambda record: (pad_sequence(record[0], padding_value, sequence_len), record[1])) \\\n .map(lambda record: to_sample(record[0], record[1]))\n \nprint('finish transformation')",
"Word Embedding\nWord embedding is a recent breakthrough in natural language field. The key idea is to encode words and phrases into distributed representations in the format of word vectors, which means each word is represented as a vector. There are two widely used word vector training alogirhms, one is published by Google called word to vector, the other is published by Standford called Glove. In this example, pre-trained glove is loaded into a lookup table and will be fine-tuned during the training process. BigDL provides a method to download and load glove in news20 package.",
"from bigdl.dataset import news20\nimport itertools\n\nembedding_dim = 100\n\nprint('loading glove')\nglove = news20.get_glove_w2v(source_dir='/tmp/.bigdl/dataset', dim=embedding_dim)\nprint('finish loading glove')",
"For each word whose index less than the max_word should try to match its embedding and store in an array.\nWith regard to those words which can not be found in glove, we randomly sample it from a [-0.05, 0.05] uniform distribution.\nBigDL usually use a LookupTable layer to do word embedding, so the matrix will be loaded to the LookupTable by seting the weight.",
"print('processing glove')\nw2v = [glove.get(idx_word.get(i - index_from), np.random.uniform(-0.05, 0.05, embedding_dim))\n for i in range(1, max_words + 1)]\nw2v = np.array(list(itertools.chain(*np.array(w2v, dtype='float'))), dtype='float') \\\n .reshape([max_words, embedding_dim])\nprint('finish processing glove')",
"Build models\nNext, let's build some deep learning models for the sentiment classification. \nAs an example, several deep learning models are illustrated for tutorial, comparison and demonstration.\nLSTM, GRU, Bi-LSTM, CNN and CNN + LSTM models are implemented as options. To decide which model to use, just assign model_type the corresponding string.",
"from bigdl.nn.layer import *\n\np = 0.2\n\ndef build_model(w2v):\n model = Sequential()\n\n embedding = LookupTable(max_words, embedding_dim)\n embedding.set_weights([w2v])\n model.add(embedding)\n if model_type.lower() == \"gru\":\n model.add(Recurrent()\n .add(GRU(embedding_dim, 128, p))) \\\n .add(Select(2, -1))\n elif model_type.lower() == \"lstm\":\n model.add(Recurrent()\n .add(LSTM(embedding_dim, 128, p)))\\\n .add(Select(2, -1))\n elif model_type.lower() == \"bi_lstm\":\n model.add(BiRecurrent(CAddTable())\n .add(LSTM(embedding_dim, 128, p)))\\\n .add(Select(2, -1))\n elif model_type.lower() == \"cnn\":\n model.add(Transpose([(2, 3)]))\\\n .add(Dropout(p))\\\n .add(Reshape([embedding_dim, 1, sequence_len]))\\\n .add(SpatialConvolution(embedding_dim, 128, 5, 1))\\\n .add(ReLU())\\\n .add(SpatialMaxPooling(sequence_len - 5 + 1, 1, 1, 1))\\\n .add(Reshape([128]))\n elif model_type.lower() == \"cnn_lstm\":\n model.add(Transpose([(2, 3)]))\\\n .add(Dropout(p))\\\n .add(Reshape([embedding_dim, 1, sequence_len])) \\\n .add(SpatialConvolution(embedding_dim, 64, 5, 1)) \\\n .add(ReLU()) \\\n .add(SpatialMaxPooling(4, 1, 1, 1)) \\\n .add(Squeeze(3)) \\\n .add(Transpose([(2, 3)])) \\\n .add(Recurrent()\n .add(LSTM(64, 128, p))) \\\n .add(Select(2, -1))\n\n model.add(Linear(128, 100))\\\n .add(Dropout(0.2))\\\n .add(ReLU())\\\n .add(Linear(100, 1))\\\n .add(Sigmoid())\n\n return model",
"Optimization\nOptimizer need to be created to optimise the model.\nHere we use the CNN model.",
"from bigdl.optim.optimizer import *\nfrom bigdl.nn.criterion import *\n\n# max_epoch = 4\nmax_epoch = 1\nbatch_size = 64\nmodel_type = 'gru'\n\n\noptimizer = Optimizer(\n model=build_model(w2v),\n training_rdd=train_rdd,\n criterion=BCECriterion(),\n end_trigger=MaxEpoch(max_epoch),\n batch_size=batch_size,\n optim_method=Adam())\n\noptimizer.set_validation(\n batch_size=batch_size,\n val_rdd=test_rdd,\n trigger=EveryEpoch(),\n val_method=Top1Accuracy())",
"To make the training process be visualized by TensorBoard, training summaries should be saved as a format of logs.",
"import datetime as dt\n\nlogdir = '/tmp/.bigdl/'\napp_name = 'adam-' + dt.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\ntrain_summary = TrainSummary(log_dir=logdir, app_name=app_name)\ntrain_summary.set_summary_trigger(\"Parameters\", SeveralIteration(50))\nval_summary = ValidationSummary(log_dir=logdir, app_name=app_name)\noptimizer.set_train_summary(train_summary)\noptimizer.set_val_summary(val_summary)",
"Now, let's start training!",
"%%time\ntrain_model = optimizer.optimize()\nprint (\"Optimization Done.\")",
"Test\nValidation accuracy is shown in the training log, here let's get the accuracy on validation set by hand.\nPredict the test_rdd (validation set data), and obtain the predicted label and ground truth label in the list.",
"predictions = train_model.predict(test_rdd)\n\ndef map_predict_label(l):\n if l > 0.5:\n return 1\n else:\n return 0\ndef map_groundtruth_label(l):\n return l.to_ndarray()[0]\n\ny_pred = np.array([ map_predict_label(s) for s in predictions.collect()])\n\ny_true = np.array([map_groundtruth_label(s.label) for s in test_rdd.collect()])",
"Then let's see the prediction accuracy on validation set.",
"correct = 0\nfor i in range(0, y_pred.size):\n if (y_pred[i] == y_true[i]):\n correct += 1\n\naccuracy = float(correct) / y_pred.size\nprint ('Prediction accuracy on validation set is: ', accuracy)",
"Show the confusion matrix",
"matplotlib.use('Agg')\n%pylab inline \nimport matplotlib.pyplot as plt\nimport seaborn as sn\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\n\ncm = confusion_matrix(y_true, y_pred)\ncm.shape\n\ndf_cm = pd.DataFrame(cm)\nplt.figure(figsize = (5,4))\nsn.heatmap(df_cm, annot=True,fmt='d')",
"Because of the limitation of ariticle length, not all the results of optional models can be shown respectively. Please try other provided optional models to see the results. If you are interested in optimizing the results, try different training parameters which may make inpacts on the result, such as the max sequence length, batch size, training epochs, preprocessing schemes, optimization methods and so on. Among the models, CNN training would be much quicker. Note that the LSTM and it variants (eg. GRU) are difficult to train, even a unsuitable batch size may cause the model not converge. In addition it is prone to overfitting, please try different dropout threshold and/or add regularizers."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
CELMA-project/CELMA
|
MES/arakawaBracket/ArakawaOfDDXf2/calculations/exactSolutions.ipynb
|
lgpl-3.0
|
[
"Exact solution used in MES runs\nWe would like to MES the operation\n$$\n\\left{\\left(\\partial_\\rho \\phi\\right)^2, n\\right}\n$$\nUsing cylindrical geometry.",
"%matplotlib notebook\n\nfrom sympy import init_printing\nfrom sympy import S\nfrom sympy import sin, cos, tanh, exp, pi, sqrt\n\nfrom boutdata.mms import x, y, z, t\nfrom boutdata.mms import DDX, DDZ\n\nimport os, sys\n# If we add to sys.path, then it must be an absolute path\ncommon_dir = os.path.abspath('./../../../../common/')\n# Sys path is a list of system paths\nsys.path.append(common_dir)\nfrom CELMAPy.MES import get_metric, make_plot, BOUT_print\n\ninit_printing()\n\ndef poisson(f, g, metric):\n return\\\n DDZ(f, metric=metric)*DDX(g, metric=metric)\\\n -\\\n DDX(f, metric=metric)*DDZ(g, metric=metric)\\",
"Initialize",
"folder = '../mixModeAndGaussian/'\nmetric = get_metric()",
"Define the variables",
"# Initialization\nthe_vars = {}",
"Define the function to take the derivative of\nNOTE:\n\nz must be periodic\nThe field $f(\\rho, \\theta)$ must be of class infinity in $z=0$ and $z=2\\pi$\nThe field $f(\\rho, \\theta)$ must be single valued when $\\rho\\to0$\nThe field $f(\\rho, \\theta)$ must be continuous in the $\\rho$ direction with $f(\\rho, \\theta + \\pi)$\nEventual BC in $\\rho$ must be satisfied",
"# We need Lx\nfrom boututils.options import BOUTOptions\nmyOpts = BOUTOptions(folder)\nLx = eval(myOpts.geom['Lx'])\n\n# Two gaussians\n\n# The skew sinus\n# In cartesian coordinates we would like a sinus with with a wave-vector in the direction\n# 45 degrees with respect to the first quadrant. This can be achieved with a wave vector\n# k = [1/sqrt(2), 1/sqrt(2)]\n# sin((1/sqrt(2))*(x + y))\n# We would like 2 nodes, so we may write\n# sin((1/sqrt(2))*(x + y)*(2*pi/(2*Lx)))\n# Rewriting this to cylindrical coordinates, gives\n# sin((1/sqrt(2))*(x*(cos(z)+sin(z)))*(2*pi/(2*Lx)))\n\n# The gaussian\n# In cartesian coordinates we would like\n# f = exp(-(1/(2*w^2))*((x-x0)^2 + (y-y0)^2))\n# In cylindrical coordinates, this translates to\n# f = exp(-(1/(2*w^2))*(x^2 + y^2 + x0^2 + y0^2 - 2*(x*x0+y*y0) ))\n# = exp(-(1/(2*w^2))*(rho^2 + rho0^2 - 2*rho*rho0*(cos(theta)*cos(theta0)+sin(theta)*sin(theta0)) ))\n# = exp(-(1/(2*w^2))*(rho^2 + rho0^2 - 2*rho*rho0*(cos(theta - theta0)) ))\n\n# A parabola\n# In cartesian coordinates, we have\n# ((x-x0)/Lx)^2\n# Chosing this function to have a zero value at the edge yields in cylindrical coordinates\n# ((x*cos(z)+Lx)/(2*Lx))^2\n\n# Scaling with 40 to get S in order of unity\n\nw = 0.8*Lx\nrho0 = 0.3*Lx\ntheta0 = 5*pi/4\nthe_vars['n'] = 40*sin((1/sqrt(2))*(x*(cos(z)+sin(z)))*(2*pi/(2*Lx)))*\\\n exp(-(1/(2*w**2))*(x**2 + rho0**2 - 2*x*rho0*(cos(z - theta0)) ))*\\\n ((x*cos(z)+Lx)/(2*Lx))**2\n\n# Mixmode\n\n# Need the x^3 in order to let the second derivative of the field go towards one value when rho -> 0\n# (needed in Arakawa brackets)\n# Mutliply with a mix of modes\n# Multiply with a tanh in order to make the variation in x more homogeneous\n\n# Scaling with 10 to make variations in phi comparable to those of n\n\nthe_vars['phi'] = 10*(6+((x/(Lx))**3)*\\\n cos(2*z)*\\\n (\n cos(2*pi*(x/Lx)) + sin(2*pi*(x/Lx))\n + cos(3*2*pi*(x/Lx)) + cos(2*2*pi*(x/Lx)) \n )\\\n *(1/2)*(1-tanh((1/8)*(x))))",
"Calculating the solution",
"the_vars['S'] = poisson(\n DDX(the_vars['phi'], metric=metric)**2.0,\n the_vars['n'],\n metric=metric\n )",
"Plot",
"make_plot(folder=folder, the_vars=the_vars, plot2d=True, include_aux=False, save=False)",
"Print the variables in BOUT++ format",
"BOUT_print(the_vars, rational=False)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ohmtrivedi/Dustin-O
|
Assignment1/Assignment1.ipynb
|
gpl-2.0
|
[
"This program will print a table and plot a bar-chart.\nThis script depends on the following packages:\n\npandas\nnumpy\nseaborn\nmatplotlib\ntabulate\n\nYou may install each using pip/PyPI (or using whatever IDE function is available to search and install packages). You may also install the complete set of requirements using the requirements.txt file included as part of this project:\nsudo pip install -r requirements.txt\n\nThe following is a walkthrough of the code.\nDo the imports. Note that we add \"inline\" to the matplotlib import so that the graphics end-up in the notebook:",
"# Tell ipython to load the matplotlib environment.\n%matplotlib inline\n\nimport itertools\n\nimport pandas\nimport numpy\nimport seaborn\nimport matplotlib.pyplot\nimport tabulate",
"Set configurables:",
"_DATA_FILEPATH = 'datagovdatasetsviewmetrics.csv'\n_ROTATION_DEGREES = 90\n_BOTTOM_MARGIN = 0.35\n_COLOR_THEME = 'coolwarm'\n_LABEL_X = 'Organizations'\n_LABEL_Y = 'Views'\n_TITLE = 'Organizations with Most Views'\n_ORGANIZATION_COUNT = 10\n_MAX_LABEL_LENGTH = 20",
"We use pandas to read, group, and sort our data:",
"def read_data():\n d = pandas.read_csv(_DATA_FILEPATH)\n\n return d\n \ndef process_data(d):\n # Group by organization.\n\n def sum_views(df):\n return sum(df['Views per Month'])\n\n g = d.groupby('Organization Name').apply(sum_views)\n\n # Sort by views (descendingly).\n\n g.sort(ascending=False)\n\n # Grab the first N to plot.\n\n items = g.iteritems()\n s = itertools.islice(items, 0, _ORGANIZATION_COUNT)\n\n s = list(s)\n\n # Sort them in ascending order, this time, so that the larger ones are on \n # the right (in red) in the chart. This has a side-effect of flattening the \n # generator while we're at it.\n s = sorted(s, key=lambda (n, v): v)\n\n # Truncate the names (otherwise they're unwieldy).\n\n distilled = []\n for (name, views) in s:\n if len(name) > (_MAX_LABEL_LENGTH - 3):\n name = name[:17] + '...'\n\n distilled.append((name, views))\n\n return distilled",
"Use the tabulate library to render a nice table. This is one of my most favorite Python tools:",
"def print_table(distilled):\n headings = ['Organization', 'Views']\n print(tabulate.tabulate(distilled, headers=headings))",
"Use seaborn (one of the nicest chart-libraries for Python) to make a bar-chart. Notice that pandas is just a layer around numpy.",
"def plot_chart(distilled):\n # Split the series into separate vectors of labels and values.\n\n labels_raw = []\n values_raw = []\n for (name, views) in distilled:\n labels_raw.append(name)\n values_raw.append(views)\n\n labels = numpy.array(labels_raw)\n values = numpy.array(values_raw)\n\n # Create one plot.\n\n seaborn.set(style=\"white\", context=\"talk\")\n\n (f, ax) = matplotlib.pyplot.subplots(1)\n\n b = seaborn.barplot(\n labels, \n values,\n ci=None, \n palette=_COLOR_THEME, \n hline=0, \n ax=ax,\n x_order=labels)\n\n # Set labels.\n\n ax.set_title(_TITLE)\n ax.set_xlabel(_LABEL_X)\n ax.set_ylabel(_LABEL_Y)\n\n # Rotate the x-labels (otherwise they'll overlap). Seaborn also doesn't do \n # very well with diagonal labels so we'll go vertical.\n b.set_xticklabels(labels, rotation=_ROTATION_DEGREES)\n\n # Add some margin to the bottom so the labels aren't cut-off.\n matplotlib.pyplot.subplots_adjust(bottom=_BOTTOM_MARGIN)",
"Run the program:",
"df = read_data()\ndistilled = process_data(df)\n\nprint_table(distilled)\nplot_chart(distilled)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/test-institute-2/cmip6/models/sandbox-1/landice.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Landice\nMIP Era: CMIP6\nInstitute: TEST-INSTITUTE-2\nSource ID: SANDBOX-1\nTopic: Landice\nSub-Topics: Glaciers, Ice. \nProperties: 30 (21 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:44\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'test-institute-2', 'sandbox-1', 'landice')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties\n2. Key Properties --> Software Properties\n3. Grid\n4. Glaciers\n5. Ice\n6. Ice --> Mass Balance\n7. Ice --> Mass Balance --> Basal\n8. Ice --> Mass Balance --> Frontal\n9. Ice --> Dynamics \n1. Key Properties\nLand ice key properties\n1.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of land surface model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of land surface model code",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.3. Ice Albedo\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSpecify how ice albedo is modelled",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.ice_albedo') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prescribed\" \n# \"function of ice age\" \n# \"function of ice density\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.4. Atmospheric Coupling Variables\nIs Required: TRUE Type: STRING Cardinality: 1.1\nWhich variables are passed between the atmosphere and ice (e.g. orography, ice mass)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.5. Oceanic Coupling Variables\nIs Required: TRUE Type: STRING Cardinality: 1.1\nWhich variables are passed between the ocean and ice",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.6. Prognostic Variables\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nWhich variables are prognostically calculated in the ice model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ice velocity\" \n# \"ice thickness\" \n# \"ice temperature\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2. Key Properties --> Software Properties\nSoftware properties of land ice code\n2.1. Repository\nIs Required: FALSE Type: STRING Cardinality: 0.1\nLocation of code for this component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.2. Code Version\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCode version identifier.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.3. Code Languages\nIs Required: FALSE Type: STRING Cardinality: 0.N\nCode language(s).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3. Grid\nLand ice grid\n3.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of the grid in the land ice scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.2. Adaptive Grid\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs an adative grid being used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"3.3. Base Resolution\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nThe base resolution (in metres), before any adaption",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.base_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"3.4. Resolution Limit\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf an adaptive grid is being used, what is the limit of the resolution (in metres)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.resolution_limit') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"3.5. Projection\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThe projection of the land ice grid (e.g. albers_equal_area)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.grid.projection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Glaciers\nLand ice glaciers\n4.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of glaciers in the land ice scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the treatment of glaciers, if any",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.3. Dynamic Areal Extent\nIs Required: FALSE Type: BOOLEAN Cardinality: 0.1\nDoes the model include a dynamic glacial extent?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"5. Ice\nIce sheet and ice shelf\n5.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of the ice sheet and ice shelf in the land ice scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Grounding Line Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSpecify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.grounding_line_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"grounding line prescribed\" \n# \"flux prescribed (Schoof)\" \n# \"fixed grid size\" \n# \"moving grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"5.3. Ice Sheet\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nAre ice sheets simulated?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.ice_sheet') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"5.4. Ice Shelf\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nAre ice shelves simulated?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.ice_shelf') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6. Ice --> Mass Balance\nDescription of the surface mass balance treatment\n6.1. Surface Mass Balance\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7. Ice --> Mass Balance --> Basal\nDescription of basal melting\n7.1. Bedrock\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the implementation of basal melting over bedrock",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. Ocean\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the implementation of basal melting over the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Ice --> Mass Balance --> Frontal\nDescription of claving/melting from the ice shelf front\n8.1. Calving\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the implementation of calving from the front of the ice shelf",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Melting\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the implementation of melting from the front of the ice shelf",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9. Ice --> Dynamics\n**\n9.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral description if ice sheet and ice shelf dynamics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.2. Approximation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nApproximation type used in modelling ice dynamics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.approximation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"SIA\" \n# \"SAA\" \n# \"full stokes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.3. Adaptive Timestep\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there an adaptive time scheme for the ice scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"9.4. Timestep\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nTimestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.landice.ice.dynamics.timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mohanprasath/BigDataExercises
|
week4/Spark_Streaming.ipynb
|
mit
|
[
"Apache Spark Streaming\nhttp://spark.apache.org/streaming/\nDocumentation URL:\nhttp://spark.apache.org/docs/latest/streaming-programming-guide.html\nPython reference:\nhttp://spark.apache.org/docs/latest/api/python/index.html\nScala reference:\nhttp://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.package\n1\n You need this : a StreamingContext object to do any streaming task, similar to a SparkContext.\n Scala Example :",
"import org.apache.spark._\nimport org.apache.spark.streaming._\n\nval conf = new SparkConf().setMaster(\"local[*]\").setAppName(\"Example\")\nval ssc = new StreamingContext(conf, Seconds(1))",
"Python Example:",
"from pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\n\nsc = SparkContext(\"local[*]\", \"Example\") # Created a SparkContext object and is being passed to the StreamingContext\nssc = StreamingContext(sc, batchDuration=1) # batchDuration accepts value in seconds",
"2\n You need this : a DStream object, its a sequence of RDDs.\nhttp://spark.apache.org/docs/latest/streaming-programming-guide.html#discretized-streams-dstreams\nInput Sources: The following examples, use a TCP Socket as an input sources. We can group the input types as,\n\n\nBasic sources : Sockets, File systems\n http://spark.apache.org/docs/latest/streaming-programming-guide.html#basic-sources\n\n\nAdvanced sources : Kafka, Flume, etc\n http://spark.apache.org/docs/latest/streaming-programming-guide.html#advanced-sources\n\n\n Scala Example :",
"// Create a DStream that will connect to hostname:port, like localhost:9999\nval lines = ssc.socketTextStream(\"localhost\", 9999)",
"Python Example:",
"# Create a DStream that will connect to hostname:port, like localhost:9999\nlines = ssc.socketTextStream(\"localhost\", 9999)",
"Note:\nReceiver(Scala and Java documentation) is available. Receivers are responsible for handling the data from the input source and store it for spark streaming access. For the example, we are using a single data source. When you use multiple data sources, the cores allocated to the workers n, should be greater than the number of input sources. \n3\nStart the stream: Like \"actions\", the real computation start after the start command being issued. Only one StreamingContext can be allowed in a Spark Session. But you can use multiple input streams, and don't forget to allocate enough workers for processing those input streams. \nssc.start()\nNote: Once the streaming context is started no new code can be added. Stopping the context is similar to Spark Context, you can use\nssc.stop()\n4\nAll action performed on the DStreams are done in parallel, so to collect the final result, or update it periodically, use the following,\nUpdateStateByKey\nTransofrmations on RDD-to-RDD are still allowed in DStream. \nhttp://spark.apache.org/docs/latest/streaming-programming-guide.html#transformations-on-dstreams\n5\nhttp://spark.apache.org/docs/latest/streaming-programming-guide.html#window-operations\nWindowed Operations are performed over a DStream containing a discrete data. Here a window represents a collection of such discrete data. DStream considered data in a discrete fashion and uses a single DStream till the end. While using window operation, the data is grouped into multiple windows. Windows may overlap to each other but the final results produce a discrete window too. \nTwo properties to specify :-\n1. window length\n2. sliding interval\nThe above properties are required as argument for any window operations.\nA Databricks example,\nhttps://docs.cloud.databricks.com/docs/latest/databricks_guide/07%20Spark%20Streaming/10%20Window%20Aggregations.html\nReferences:\n Steps to deploy a spark streaming application\nhttp://spark.apache.org/docs/latest/streaming-programming-guide.html#deploying-applications\n Window and Sliding window \nhttps://groups.google.com/forum/#!topic/spark-users/GQoxJHAAtX4\nhttp://www.michael-noll.com/blog/2013/01/18/implementing-real-time-trending-topics-in-storm/"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Abjad/intensive
|
day-3/3-class-encapsulation.ipynb
|
mit
|
[
"%reload_ext abjadext.ipython\nimport abjad\nfrom abjadext import rmakers",
"Encapsulation, part 2: classes\nIn the previous notebook we encapsulated our code in functions. Functions model programming tasks as a collection of verbs (actions): data flows into and out of a series of functions until the desired result has been achieved. Classes, on the other hand, model programming tasks as a collection of nouns (objects). Objects have data (attributes) and implement methods to modify the data they contain. In this notebook we'll encapsulate our music-generating functions in a class.\n1. The class definition\nThe code below defines a class. An object-oriented class is like a template that tells a programming language how to construct instances of itself. (\"Class instance\" and \"object\" mean the same thing in an object-oriented context.) This means that after Python reads our music-maker class definition, we can instantiate as many music-maker objects as we want. (More on this below.) The four functions we defined in the previous notebook correspond to the four methods defined here. Functions and methods are both introduced with Python's def keyword. The primary difference between functions and methods is that functions can be defined at the top level of a module while methods are always defined within (and \"bound to\") a class. Classes provide an even higher level of encapsulation than functions, because classes encapsulate methods.",
"class MusicMaker:\n\n def __init__(\n self, \n counts, \n denominator, \n pitches,\n clef,\n ):\n self.counts = counts\n self.denominator = denominator\n self.pitches = pitches\n self.clef = clef\n\n def make_notes_and_rests(self, counts, denominator, time_signatures, clef):\n \"\"\"\n Makes notes and rests.\n \"\"\"\n durations = [_.duration for _ in time_signatures]\n total_duration = sum(durations)\n talea = rmakers.Talea(counts, denominator)\n talea_index = 0\n leaves = []\n current_duration = abjad.Duration(0)\n while current_duration < total_duration:\n leaf_duration = talea[talea_index]\n if 0 < leaf_duration: \n pitch = abjad.NamedPitch(\"c'\")\n else:\n pitch = None\n leaf_duration = abs(leaf_duration)\n if total_duration < (leaf_duration + current_duration):\n leaf_duration = total_duration - current_duration\n leaves_ = abjad.LeafMaker()([pitch], [leaf_duration])\n leaves.extend(leaves_)\n current_duration += leaf_duration\n talea_index += 1\n staff = abjad.Staff(leaves)\n clef = abjad.Clef(clef)\n abjad.attach(clef, staff[0])\n return staff\n\n def impose_time_signatures(self, staff, time_signatues):\n \"\"\"\n Imposes time signatures.\n \"\"\"\n selections = abjad.mutate.split(staff[:], time_signatures, cyclic=True)\n for time_signature, selection in zip(time_signatures, selections):\n abjad.attach(time_signature, selection[0])\n measure_selections = abjad.select(staff).leaves().group_by_measure()\n for time_signature, measure_selection in zip(time_signatures, measure_selections):\n abjad.Meter.rewrite_meter(measure_selection, time_signature)\n\n def pitch_notes(self, staff, pitches):\n \"\"\"\n Pitches notes.\n \"\"\"\n pitches = abjad.CyclicTuple(pitches)\n plts = abjad.select(staff).logical_ties(pitched=True)\n for i, plt in enumerate(plts):\n pitch = pitches[i]\n for note in plt:\n note.written_pitch = pitch\n\n def attach_indicators(self, staff):\n \"\"\"\n Attaches indicators to runs.\n \"\"\"\n for selection in abjad.select(staff).runs():\n articulation = abjad.Articulation(\"accent\")\n abjad.attach(articulation, selection[0])\n if 3 <= len(selection):\n abjad.hairpin(\"p < f\", selection)\n\n else:\n dynamic = abjad.Dynamic(\"ppp\")\n abjad.attach(dynamic, selection[0])\n abjad.override(staff).dynamic_line_spanner.staff_padding = 4\n\n def make_staff(self, time_signatures):\n \"\"\"\n Makes staff.\n \"\"\"\n staff = self.make_notes_and_rests(\n self.counts,\n self.denominator,\n time_signatures,\n self.clef\n )\n self.impose_time_signatures(staff, time_signatures)\n self.pitch_notes(staff, self.pitches)\n self.attach_indicators(staff)\n return staff",
"2. Instantiating objects\nWe can now instantiate a music-maker object. We do this by calling the music-maker's initializer, to which we pass counts, denominators and pitches:",
"pairs = [(3, 4), (5, 16), (3, 8), (4, 4)]\ntime_signatures = [abjad.TimeSignature(_) for _ in pairs]\ncounts = [1, 2, -3, 4]\ndenominator = 16\nstring = \"d' fs' a' d'' g' ef'\"\npitches = abjad.CyclicTuple(string.split())\nclef = \"treble\"\nmaker = MusicMaker(counts, denominator, pitches, clef)",
"Finally pass in time signatures and ask our music-maker to make a staff:",
"staff = maker.make_staff(time_signatures)\nabjad.show(staff)",
"3. Making musical texture with multiple instances of a single class\nBecause we can create multiple, variously initialized instances of the same class, it's possible to create both minimal and varied a polyphonic textures with just a single class definition. First we initialize four different makers:",
"fast_music_maker = MusicMaker(\n counts=[1, 1, 1, 1, 1, -1],\n denominator=16,\n pitches=[0, 1],\n clef=\"treble\"\n)\n\nslow_music_maker = MusicMaker(\n counts=[3, 4, 5, -1],\n denominator=4,\n pitches=[\"b,\", \"bf,\", \"gf,\"],\n clef=\"bass\",\n)\n\nstuttering_music_maker = MusicMaker(\n counts=[1, 1, -7],\n denominator=16,\n pitches=[23],\n clef=\"treble\"\n)\n\nsparkling_music_maker = MusicMaker(\n counts=[1, -5, 1, -9, 1, -5],\n denominator=16,\n pitches=[38, 39, 40],\n clef=\"treble^8\",\n)",
"Let's use these four music-makers to create a duo. We can set up a score with two staves and generate the music according to a single set of time signatures:",
"upper_staff = abjad.Staff()\nlower_staff = abjad.Staff()\npairs = [(3, 4), (5, 16), (3, 8), (4, 4)]\ntime_signatures = [abjad.TimeSignature(_) for _ in pairs]",
"Next, we loop through four makers, appending each maker's music to our staves as we go. We'll generate music for the top and bottom staff independently:",
"makers = (\n fast_music_maker,\n slow_music_maker,\n stuttering_music_maker,\n sparkling_music_maker,\n)\n\nfor maker in makers:\n staff = maker.make_staff(time_signatures)\n selection = staff[:]\n staff[:] = []\n upper_staff.extend(selection)\n\nmakers = (\n slow_music_maker,\n slow_music_maker,\n stuttering_music_maker,\n fast_music_maker,\n)\n \nfor maker in makers:\n staff = maker.make_staff(time_signatures)\n selection = staff[:]\n staff[:] = []\n lower_staff.extend(selection)",
"4. Making the score\nNow we can make our final score and add some formatting:",
"piano_staff = abjad.StaffGroup(\n [upper_staff, lower_staff],\n lilypond_type=\"PianoStaff\",\n)\nabjad.override(upper_staff).dynamic_line_spanner.staff_padding = 4\nabjad.override(lower_staff).dynamic_line_spanner.staff_padding = 4\nscore = abjad.Score([piano_staff])\nbar_line = abjad.BarLine(\"|.\")\nlast_leaf = abjad.select(lower_staff).leaf(-1)\nabjad.attach(bar_line, last_leaf)\n\nlilypond_file = abjad.LilyPondFile.new(score)\nlilypond_file.header_block.composer = \"Abjad Summer Course\"\nstring = r\"\\markup \\fontsize #3 \\bold ENCAPSULATION\"\ntitle_markup = abjad.Markup(string, literal=True)\nlilypond_file.header_block.title = title_markup\nlilypond_file.header_block.subtitle = \"working with classes\"\nabjad.show(lilypond_file)",
"In the next notebook we'll make a command class and explore collaboration between classes.\n\nContributed: Treviño (2.21); revised: Bača (3.2)."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
MelroLeandro/Matematica-Discreta-para-Hackers-ipnyb
|
Chapter2_Calculo_Proposicional/Chapter2_Calculo_Proposicional.ipynb
|
mit
|
[
"Chapter 2\nLógica proposicional\n\n\"Poder-se-á definir a Lógica como a ciência das regras que legitimam\na utilização da palavra portanto.\" B. Ruyer in Logique. \n\nProposição\nNo caso das instruções if e while, a execução dum bloco de código está dependente da avaliação duma função proposicional (condição). Com o objectivo de estudar estas instruções e formalizar a noção de função proposicional começa-se por rever algumas noções de lógica proposicional e do cálculo de predicados.\n\nOs elementos básicos da lógica são as proposições ou sentenças que se entendem como afirmações precisas. Na lógica clássica, que abordamos, a avaliação duma proposição é regida por dois princípios fundamentais:\n- Princípio da não contradição - Uma proposição não pode ser simultaneamente verdadeira e falsa;\n- Princípio do terceiro excluído - Uma proposição ou é verdadeira ou é falsa;\nPor exemplo \"1 é maior que 3\" é uma proposição cujo valor lógico é o de\n\"falsidade\" enquanto que \"todos os triângulos têm três lados e três ângulos\" é uma proposição cujo valor lógico é o de \"verdade\". \nPor outro lado \"x < 3\" não é uma proposição (depende do valor que venha a ser atribuído à variável x) sendo denominada função proposicional. \nRepresentam-se por letras (geralmente minúsculas) as proposições genéricas (ou variáveis proposicionais) e por 1 (ou V) e 0 (ou F) os valores lógicos de \"verdade\" e \"falsidade\", respectivamente. \nA área da lógica que trata as proposições neste contexto é designada por cálculo proposicional ou lógica proposicional.\nProposição simples e proposição composta\nPor vezes combinam-se várias proposições para obter proposições mais expressivas. Neste sentido, classificamos as proposições como simples (também denominada atómica) ou\ncomposta (também denominada molecular).\nAs proposições simples apresentam apenas uma afirmação:\n\n\n$p:$ $\\sqrt{2}$ não é um número racional.\n\n\n$q:$ existem mais números reais que inteiros.\n\n\n$v:$ $1=2$.\n\n\n$r:2+3>4$.\n\n\nAs proposições compostas são definidas por uma ou por mais do que uma\nproposição, usando na sua formação operadores lógicos\n(também designados de conectivas lógicas ou operadores para formação de proposições):\n\n\n$x = 2$ e $y = 1$.\n\n\nse $x > y$ então $y < x$.\n\n\nnão é verdade que $2+3>4$.\n\n\nConectivas lógicas\nEm cálculo proposicional as proposições são geradas a partir de proposições simples, usando operadores para formação de proposições. Vamos tomar como sintacticamente válidas proposições compostas da forma:\n\nnão $p$,\n$p$ e $q$,\n$p$ ou $q$,\nou $p$ ou (exclusivo) $q$,\nse $p$ então $q$,\n$p$ se e só se $q$.\n\nonde $p$ e $q$ são proposições (simples ou compostas). Neste casos, em geral, pretende-se obter os valores lógicos das proposições compostas em função dos valores lógicos conhecidos das proposições mais simples que as compõem. Por forma a podermos formalizar a lógica e a avaliação de proposições, convencionamos a seguinte representação para os operadores sintácticos usados na formação de proposições:\nOperações Lógicas | Símbolos | Notação | Significado\n------------------|----------|---------|------------\nNegação | $\\neg$ ou $\\sim$ | $\\neg p$ | não p\nConjunção | $\\wedge$ | $p \\wedge q$ | p e q\nDisjunção | $\\vee$ | $p \\vee q$ | p ou q \nDisjunção exclusiva | $\\oplus$ ou $\\dot{\\vee}$ | $p\\oplus q$ | ou p ou (exclusivo) q \nImplicação | $\\rightarrow$ | $p\\rightarrow q$ | se p então q \nBi-implicação | $\\leftrightarrow$ | $p\\leftrightarrow q$ | p se só se q\nNegação\nSeja $p$ uma proposição. A afirmação \"não se verifica que\np\" é uma nova proposição, designada de negação de $p$. A\nnegação de $p$ é denotada por $\\neg p$ ou $\\sim p$. A proposição\n$\\neg p$ deve ler-se \"não p\" e é verdadeira se p é falsa. A proposição $\\neg p$ é falsa se p é verdadeira.\nÉ usual definir a interpretação dum operador lógico através de\ntabelas do tipo:\n$p$ | $\\neg p$\n:----:|:-------:\n T | F \n F | T\nou \n$p$ | $\\neg p$ \n:----:|:--------:\n 1 | 0 \n 0 | 1\nstas tabelas são designadas por tabelas de verdade. Neste\ncaso define completamente o operador negação, relacionando os\nvalores lógicos de p e $\\neg p$.\nNote que, em linguagem corrente nem sempre se pode negar logicamente uma proposição,\nantepondo o advérbio \"não\" ao verbo da proposição, isto apenas se verifica nos casos mais simples.\nPor exemplo: negar \"Hoje é sábado.\" é afirmar \"Hoje não é sábado\".\nMas negar que \"Todas as aves voam\" é o mesmo que afirmar \"não se verifica que todas as aves voam\" o que é equivalente a afirmar que \"Nem todas as aves voam\" mas não é afirmar que \"Todas as aves não voam\".\nEm linguagem Matemática, dado o rigor da interpretação das\ndesignações usadas, o processo de negação fica simplificado. Por\nexemplo, negar \"5>2\" é o mesmo que afirmar \"$\\neg$(5>2)\" que é equivalente, por definição da relação >, a escrever \"5$\\leq$2\". Assim como \"5>2\" é verdade, temos pela interpretação da negação que \"$\\neg$(5>2)\" é falso.",
"#\n# Tabela da Negação\n#\nfor p in [True,False]:\n print('not',p,\"=\", not p)",
"Conjunção\nSejam $p$ e $q$ proposições. A proposição \"$p$ e $q$\", denotada\n$p\\wedge q$, é a proposição que é verdadeira apenas quando $p$ e $q$\nsão ambas verdadeiras, caso contrário é falsa. A proposição $p\\wedge q$\ndiz-se a \\textbf{conjunção} de $p$ e $q$.\nAssim, os valores lógicos das três proposições $p$, $q$, e $p\\wedge\nq$ estão relacionados pela tabela de verdade:\n$p$ | $q$ | $p$ $\\wedge$ $q$ \n:-----:|:----:|:--------:\n V | V | V \n V | F | F \n F | V | F \n F | F | F\nNote que a tabela tem quatro linhas, uma por cada combinação\npossível de valores de verdade para as proposições $p$ e $q$.",
"#\n# Tabela da conjunção\n#\nfor p in [True,False]:\n for q in [True,False]:\n print(p,'and',q,'=', p and q)",
"Disjunção\nSejam p e q proposições. A proposição \"$p$ ou $q$\", denotada\np$\\vee$q, é a proposição que é falsa apenas quando $p$ e $q$ são\nambas falsas, caso contrário é verdade. A proposição p$\\vee$q\ndiz-se a disjunção de p e q.\nA tabela de verdade de p $\\vee$q toma assim a forma:\n$p$ | $q$ | $p$ $\\vee$ $q$ \n:------:|:-----:|:---------:\n V | V | V \n V | F | V \n F | V | V \n F | F | F\nA conectiva ou é interpretada na versão inclusiva da\npalavra \"ou\" em linguagem corrente. Note que, nas proposições seguintes ou tem ou significado inclusivo ou significado\nexclusivo consoante o contexto de interpretação:\n- O João pratica futebol ou natação.[ou ambas as coisas]\n- Ele é do Sporting ou do Porto.[mas não as duas coisas]",
"#\n# Tabela da disjunção\n#\nfor p in [True,False]:\n for q in [True,False]:\n print(p,'or',q,'=', p or q)",
"Disjunção exclusiva\nPara tornar a interpretação da disjunção independente do contexto definimos: A disjunção exclusiva\nde p e q, denotada p$\\oplus$q ou p$\\dot{\\vee}$q, é a\nproposição que é verdade apenas quando, ou p é verdadeira ou q é\nverdadeira, caso contrário é falsa.\nA tabela de verdade de p$\\oplus$q toma assim a forma:\n$p$ | $q$ | $p$ $\\oplus$ $q$ \n:------:|:-----:|:--------:\n V | V | F \n V | F | V \n F | V | V \n F | F | F",
"#\n# Tabela da disjunção exclusiva\n#\nfor p in [True,False]:\n for q in [True,False]:\n if p!=q:\n print(p,'xor',q,'=', True)\n else:\n print(p,'xor',q,'=', False)",
"Exercício:\nRelacione o valor lógico das proposições $p$, $q$, $r$ e\n$(p\\wedge (\\neg q))\\oplus (r\\vee p)$.\nExercício:\nIndique os valores (de verdade ou falsidade) das seguintes afirmações:\n- $3\\leq 7$ e 4 é um número inteiro ímpar.\n- $3\\leq 7$ ou 4 é um número inteiro ímpar.\n- 5 é ímpar ou divisível por 4.\nImplicação\nSejam p e q proposições. A implicação p$\\rightarrow$q é\na proposição que é falsa quando p é verdadeira e q é falsa, nos\noutros casos é verdadeira.\nA tabela de verdade de p$\\rightarrow$q toma assim a forma:\n$p$ | $q$ | $p$ $\\rightarrow$ $q$ \n:------:|:-----:|:----------:\n V | V | V \n V | F | F \n F | V | V \n F | F | V\nNuma proposição do tipo p$\\rightarrow$q a proposição p recebe o\nnome de hipótese (antecedente ou premissa) e a q chama-se\ntese (conclusão ou consequente). A proposição p$\\rightarrow$q também é muitas vezes designada por declaração\ncondicional. Estas designações são compatíveis com o uso da implicação em linguagem corrente, devemos no entanto notar que a tabela entra em conflito com a interpretação que fazemos da implicação: neste caso não se dirá \"p implica q\" quando se sabe à priori que p é falsa. Na interpretação que apresentamos para a implicação ela é verdade sempre que \"p\" é falsa independentemente do valor lógico de \"q\". Esta situação pode ilustrar-se com a implicação \"se 1+1=1 então 2=3\" que é verdadeira, uma vez que o antecedente é falso.",
"#\n# Tabela da implicação\n#\n\nfor p in [True,False]:\n for q in [True,False]:\n if p and not q:\n print(p,'-->',q,'=',False)\n else:\n print(p,'-->',q,'=',True)",
"Bi-implicação\nSejam p e q proposições. A bi-condicional ou bi-implicação de p e q é a proposição p$\\leftrightarrow$q que é verdadeira\nquando p e q têm o mesmo valor lógico.\nA tabela de verdade de p$\\leftrightarrow$q toma assim a forma:\n$p$ | $q$ | $p$ $\\leftrightarrow$ $q$ \n:------:|:-----:|:----------:\n V | V | V \n V | F | F \n F | V | F \n F | F | V\nA proposição p$\\leftrightarrow$q deve ler-se \"p se e só se q\"\n(abreviado por \"p sse q\") ou \"p é condição necessária e\nsuficiente para q\".",
"#\n# Tabela da disjunção exclusiva\n#\nfor p in [True,False]:\n for q in [True,False]:\n if p==q:\n print(p,'<->',q,'=', True)\n else:\n print(p,'<->',q,'=', False)",
"Facilmente podemos mostrar que as proposições p$\\leftrightarrow$q\ne $(p\\rightarrow q)\\wedge(q\\rightarrow p)$ têm os mesmos valores\nlógicos, ou seja a proposição $(p\\leftrightarrow q)\\leftrightarrow\n((p\\rightarrow q)\\wedge(q\\rightarrow p))$ é sempre verdadeira.\n(p | $\\leftrightarrow$ | q) | $\\leftrightarrow$ | ((p | $\\rightarrow$ | q) | $\\wedge$ | (q | $\\rightarrow$ | p)) \n:------:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----: \n V | V | V | V | V | V | V | V | V | V | V \n V | F | F | V | V | F | F | F | F | V | V \n F | F | V | V | F | V | V | F | V | F | F \n F | V | F | V | F | V | F | V | F | V | F \n------|----|----|----|----|----|----|----|----|----|---- \n1 | 2 | 1 | 4 | 1 | 2 | 1 | 3 | 1 | 2 | 1 \nExercício:\nSuponhamos que p,q,r representam as seguintes sentenças:\n\n$p:$\"7 é um número inteiro par\"\n$q:3+1=4$\n$r:$\"24 é divisível por 8\"\n\n\n\nEscreva em linguagem simbólica as proposições\n\n\n$3+1\\neq 4$ e 24 é divisível por 8\n\n\nnão é verdade que 7 seja ímpar ou 3+1=4\n\n\nse 3+1=4 então 24 não é divisível por 8\n\n\n\n\nEscreva por palavras as sentenças\n\n\n$p\\vee(\\neg q)$\n\n\n$\\neg(p\\wedge q)$\n\n\n$(\\neg r)\\vee (\\neg q)$\n\n\n\n\nExercício:\nConstruir as tabelas de verdade das seguintes proposições:\n1. $((p\\rightarrow q)\\wedge p)\\rightarrow q$\n1. $p\\leftrightarrow(q\\rightarrow r)$\n1. $(p\\wedge(\\neg p))\\rightarrow q$\n1. $((p\\vee r)\\wedge(q\\vee r))\\wedge((\\neg p)\\vee (\\neg r))$\n1. $(p\\wedge(q\\vee r))\\wedge (q\\wedge (p\\vee r))$\nExercício:\nQuantas linhas tem a tabela de verdade de uma proposição com $n$ variáveis proposicionais?\nOrdem de precedência das conectivas lógicas\nAté aqui, temos usado parêntesis para definir a ordem de\naplicação dos operadores lógicos numa proposição composta. Por forma\na reduzir o número de parêntesis adoptamos a seguinte convenção: Sempre que numa expressão estiverem presentes várias operações lógicas, convenciona-se, na ausência de parêntesis, que as operações se efectuem na ordem seguinte:\n1. a negação;\n1. a conjunção e a disjunção;\n1. a implicação e a bi-implicação.\nAssim,\n1. $p\\rightarrow ((\\neg p)\\vee r)$ pode escrever-se $p\\rightarrow \\neg p\\vee r$;\n1. $(p\\wedge (\\neg q))\\leftrightarrow c$ pode escrever-se $p\\wedge \\neg q\\leftrightarrow c$;\n1. $p\\vee q\\wedge \\neg r \\rightarrow p \\rightarrow\\neg q$ deve ser entendida como\n$(((p\\vee q)\\wedge(\\neg r))\\rightarrow p) \\rightarrow(\\neg q)$.\nTautologia\nChama-se tautologia (ou fórmula logicamente\nverdadeira) a uma proposição que é verdadeira, para quaisquer que sejam os valores lógicos atribuídos às variáveis proposicionais que a compõem. Dito de outra forma, chama-se tautologia a uma proposição cuja coluna correspondente na tabela de verdade possui apenas Vs ou 1s. Exemplo duma tautologia é a proposição $p\\vee(\\neg p)$, designada de \"Princípio do terceiro excluído\",\n\nA negação duma tautologia, ou seja uma proposição que é sempre falsa, diz-se uma contra-tautologia ou contradição. Se uma proposição não é nem uma tautologia nem uma contradição denomina-se por contingência.\nNão deve confundir-se contradição com proposição falsa, assim como não deve confundir-se tautologia com proposição verdadeira. O facto de uma tautologia ser sempre verdadeira e uma contradição ser sempre falsa deve-se à sua forma lógica (sintaxe) e não ao significado que se lhes pode atribuir (semântica).\nA tabela de verdade\n\nmostra que $p\\rightarrow(p\\vee q)$ é uma tautologia, enquanto que $(p\\rightarrow q)\\wedge (p\\wedge (\\neg q))$ é uma contradição.\nExercício\nMostre que são tautologias:\n1. $(\\neg q\\rightarrow \\neg p)\\leftrightarrow(p\\rightarrow q)$\n1. $(p\\leftrightarrow q)\\leftrightarrow ((p\\rightarrow q)\\wedge(q\\rightarrow p))$\nExemplos de outras tautologias são apresentadas abaixo:\n\nEquivalências proposicionais\nAs proposições $p$ e $q$ dizem-se logicamente\nequivalentes se $p\\leftrightarrow q$ é uma tautologia. Por $p\\equiv\nq$ ou $p\\Leftrightarrow q$ denotamos que $p$ e $q$ são logicamente\nequivalentes.\nDiz-se que a proposição $p$ implica logicamente a proposição $q$ se a veracidade da primeira arrastar necessariamente a veracidade da segunda, ou seja, se a proposição p$\\rightarrow$q for uma tautologia.\n\n$\\neg q\\rightarrow \\neg p \\Leftrightarrow p\\rightarrow q$\n\n$\\neg$ | $q$ | $\\rightarrow$ | $\\neg$ | $p$ \n:-----------:|:-------:|:---------------:|:--------:|:-----:\nF | V | V | F | V \nV | F | F | F | V \nF | V | V | V | F \nV | F | V | V | F \n-----------|-------|---------------|--------|-----\n2 | 1 | 3 | 2 | 1 \ne\n$p$ | $\\rightarrow$ | $q$ \n:-----:|:-------------:|:----:\nV | V | V \nV | F | F \nF | V | V \nF | V | F \n-------|---------------|------ \n1 | 2 | 1 \n\n$p\\leftrightarrow q\\Leftrightarrow (p\\rightarrow q)\\wedge(q\\rightarrow p)$\n\n($p$ | $\\leftrightarrow$ | q) | $\\leftrightarrow$ | (($p$ | $\\rightarrow$ | $q$) | $\\wedge$ | ($q$ | $\\rightarrow$ | $p$)) \n:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:\nV | V | V | V | V | V | V | V | V | V | V \nV | F | F | V | V | F | F | F | F | V | V \nF | F | V | V | F | V | V | F | V | F | F \nF | V | F | V | F | V | F | V | F | V | F \n----|----|----|----|----|----|----|----|----|----|----\n 1 | 2 | 1 | 4 | 1 | 2 | 1 | 3 | 1 | 2 | 1 \nDeste modo, a equivalência proposicional pode ser sempre verificada através duma tabela de verdade. Em particular, as proposições $p$ e $q$ são equivalentes se e só se as colunas, na tabela de verdade, que determinam os seu valores lógicos coincidirem.\nExercício\nMostre que são exemplos de equivalências proposicionais:\n1. $\\neg(p\\vee \\neg p) \\Leftrightarrow p \\wedge \\neg p$\n1. $\\neg (p\\vee q)\\Leftrightarrow \\neg p \\wedge \\neg q$\n1. $\\neg p\\vee q \\Leftrightarrow p \\rightarrow q$\n1. $p\\vee(q\\wedge r)\\Leftrightarrow(p\\vee q)\\wedge(p\\vee r)$\nExercício\nIndique quais das sentenças seguintes são equivalentes:\n1. $p\\wedge(\\neg q)$\n1. $p\\rightarrow q$\n1. $\\neg((\\neg p)\\vee q)$\n1. $q\\rightarrow(\\neg q)$\n1. $(\\neg p)\\vee q$\n1. $\\neg(p\\rightarrow q)$\n1. $p\\rightarrow(\\neg q)$\n1. $(\\neg p)\\rightarrow (\\neg q)$\nExercício\nMostre que cada uma das proposições que se seguem:\n1. $(\\neg p)\\vee q$\n1. $(\\neg q)\\rightarrow (\\neg p)$\n1. $\\neg(p\\wedge (\\neg q))$\né equivalente a $p\\rightarrow q$.\nExercício\nMostre que:\n1. $p\\vee(q\\wedge r)$ não é logicamente equivalente a $(p\\vee q)\\wedge r$.\n1. $p\\vee (q\\wedge r)$ é logicamente equivalente a $(p\\vee q)\\wedge (p\\vee r)$.\n1. $p\\vee(\\neg (q \\vee r))$ é logicamente equivalente a $(p\\vee(\\neg q))\\vee(\\neg r)$\nDe seguida apresentamos exemplos de equivalências úteis para o que se segue (que podem ser verificadas através de tabelas de verdade):\nNome | Propriedade | Propriedade \n-------------|----------------------|-------------------\nComutatividade | $p \\wedge q \\Leftrightarrow q \\wedge p$ | $p \\vee q \\Leftrightarrow q \\vee p$ \nAssociativa| $(p\\wedge q)\\wedge r \\Leftrightarrow p \\wedge (q \\wedge r)$ | $(p\\vee q)\\vee r \\Leftrightarrow p \\vee (q \\vee r)$ \nIdempotência | $p\\wedge p \\Leftrightarrow p$ | $p\\vee p \\Leftrightarrow p$ \nIdentidade | $p\\wedge V\\Leftrightarrow p$ | $p\\vee F\\Leftrightarrow p$ \nDominância | $p\\wedge F\\Leftrightarrow F$ | $p\\vee V\\Leftrightarrow V$ \nAbsorção | $p\\wedge(p\\vee r)\\Leftrightarrow p$ |$p\\vee(p\\wedge r)\\Leftrightarrow p$\nDistributivas | $p\\wedge(q\\vee r)\\Leftrightarrow(p\\wedge q)\\vee(p\\wedge r)$ | $p\\vee(q\\wedge r)\\Leftrightarrow(p\\vee q)\\wedge(p\\vee r)$ \nDistributivas | $p\\rightarrow(q\\vee r)\\Leftrightarrow(p\\rightarrow q)\\vee(p\\rightarrow r)$ | $p\\rightarrow(q\\wedge r)\\Leftrightarrow (p\\rightarrow q)\\wedge(p\\rightarrow r)$ \nLeis de De Morgan | $\\neg (p\\wedge q)\\Leftrightarrow \\neg p \\vee \\neg q$ | $\\neg (p\\vee q)\\Leftrightarrow \\neg p \\wedge \\neg q$\nDef. Implicação | $p\\rightarrow q \\Leftrightarrow \\neg p \\vee q$ | $p\\rightarrow q\\Leftrightarrow \\neg(p\\wedge\\neg q)$ \nDef. Bi-condicional | $p\\leftrightarrow q \\Leftrightarrow (p\\rightarrow q) \\wedge (q \\rightarrow p)$ | $p\\leftrightarrow q \\Leftrightarrow (\\neg p \\vee q) \\wedge (\\neg q \\vee p)$ \nNegação | $\\neg(\\neg p)\\Leftrightarrow p$ | \nContraposição | $p\\rightarrow q \\Leftrightarrow \\neg q \\rightarrow \\neg p$| \nTroca de premissas | $p\\rightarrow (q\\rightarrow r)\\Leftrightarrow q\\rightarrow (p\\rightarrow r)$ | \nAs equivalências lógicas apresentadas na tabela anterior, podem ser usadas na determinação de equivalências lógicas adicionais. Isso porque, podemos numa proposição composta, substituir\nproposições por proposições que lhes sejam equivalentes\nsem que isso altere os valores de verdade da proposição original.\nPor exemplo:\n$$\n\\begin{array}{rcll}\n \\neg(p\\vee(\\neg p \\wedge q)) & \\Leftrightarrow & \\neg p \\wedge \\neg(\\neg p \\wedge q) & \\text{da segunda lei de De Morgan} \\\n & \\Leftrightarrow & \\neg p \\wedge [\\neg(\\neg p) \\vee \\neg q] & \\text{da primeira lei de De Morgan} \\\n & \\Leftrightarrow & \\neg p \\wedge (p\\vee \\neg q) & \\text{da lei da dupla negação} \\\n & \\Leftrightarrow & (\\neg p \\wedge p) \\vee (\\neg p \\wedge \\neg q) & \\text{da segunda distributividade} \\\n & \\Leftrightarrow & F \\vee (\\neg p \\wedge \\neg q) & \\text{já que } \\neg p \\wedge p \\Leftrightarrow F \\\n & \\Leftrightarrow & \\neg p \\wedge \\neg q & \\text{da lei identidade}\n\\end{array}\n$$\nDonde podemos concluir que $\\neg(p\\vee(\\neg p \\wedge q))$ e $\\neg p\n\\wedge \\neg q$ são proposições logicamente equivalentes:\n$$\n \\neg(p\\vee(\\neg p \\wedge q)) \\Leftrightarrow \\neg p \\wedge \\neg q\n$$\nExercício\nSimplifique as seguintes proposições:\n1. $p\\vee(q\\wedge (\\neg p))$\n1. $\\neg(p\\vee(q\\wedge(\\neg r)))\\wedge q$\n1. $\\neg((\\neg p)\\wedge(\\neg q))$\n1. $\\neg((\\neg p)\\vee q)\\vee(p\\wedge(\\neg r))$\n1. $(p\\wedge q)\\vee (p\\wedge (\\neg q))$\n1. $(p\\wedge r)\\vee((\\neg r)\\wedge (p\\vee q))$\nExercício\nPor vezes usa-se o símbolo $\\downarrow$ para construir proposições compostas $p\\downarrow q$ definidas por duas proposições $p$ e $q$, que é verdadeira quando e só quando $p$ e $q$ são simultaneamente falsas, e é falsa em todos os outros casos. A proposição $p\\downarrow q$ lê-se \"nem $p$ nem $q$\".\n1. Apresente a tabela de verdade de $p\\downarrow q$.\n1. Expresse $p\\downarrow q$ em termos das conectivas $\\wedge,\\vee$ e $\\neg$.\n1. Determine proposições apenas definidas pela conectiva $\\downarrow$ que sejam equivalentes a $\\neg p$, $p\\wedge q$ e $p\\vee q$.\nExercício\nExpresse a proposição $p\\leftrightarrow q$ usando apenas os símbolos $\\wedge,\\vee$ e $\\neg$.\nConsiderações sobre a implicação\nAs duas primeiras linhas da tabela da implicação\n$p$ | $q$ | $p\\rightarrow q$ \n:-------:|:-------:|:------------:\n V | V | V \n V | F | F \n F | V | V \n F | F | V\nnão apresentam qualquer problema sob o ponto de vista intuitivo do senso comum. Quanto às duas últimas, qualquer outra escolha possível apresenta desvantagens sob o ponto de vista lógico, o que levou à escolha das soluções apresentadas, já que:\n\nfazendo F na 3º linha e F na 4º linha, obtém-se a tabela da conjunção\nfazendo F na 3º linha e V na 4º linha, obtém-se a tabela da bi-implicação\nresta a possibilidade de fazer V na 3º linha e F na 4º linha que também não é, pois isso equivaleria a recusar a equivalência\n$$\n(p\\rightarrow q)\\Leftrightarrow(\\neg q\\rightarrow\\neg p)\n$$\nque é uma equivalência aconselhável, já que a proposição \"se o Pedro fala, existe\" é (intuitivamente) equivalente à proposição \"se o Pedro não existe, não fala\". A aceitação desta equivalência impõe a tabela considerada para a implicação.\n\n$\\neg$ | $q$ | $\\rightarrow$ | $\\neg$ | $p$ \n:-------:|:-----:|:---------------:|:--------:|:-------:\n F | V | V | F | V\n V | F | F | F | V \n F | V | V | V | F \n V | F | V | V | F \n-------|-----|---------------|--------|-------\n2 | 1 | 3 | 2 | 1 \ne \n$p$ | $\\rightarrow$ | $q$ \n:----:|:---------------:|:-------:\nV | V | V \nV | F | F \nF | V | V \nF | V | F \n----|---------------|-------\n1 | 2 | 1 \nA partir duma implicação $r$ dada por $p\\rightarrow q$ define-se as\nproposições:\n1. $q\\rightarrow p$, designada de recíproca da implicação $r$;\n1. $\\neg q\\rightarrow \\neg p$, designada por contra-recíproca de $r$;\n1. $\\neg p\\rightarrow \\neg q$, designada por inversa de $r$.\nObserve-se que, embora a contra-recíproca seja equivalente à proposição original, o mesmo não acontece com a recíproca (e a inversa, que lhe é equivalente) o que se pode verificar através das respectivas tabelas de verdade.\nExercício\nDetermine:\n1. a contra-recíproca de $(\\neg p)\\rightarrow q$\n1. a inversa de $(\\neg q)\\rightarrow p$\n1. a recíproca da inversa de $q\\rightarrow (\\neg p)$\n1. a negação de $p\\rightarrow (\\neg q)$\nVoltando ao Python\nPython: de volta às cláusulas if\nO mecanismo que mais temos usado para controlo de fluxo da execução são cláusulas if. Por exemplo:",
"x = int(input(\"Escreva um inteiro: \"))\n\nif x < 0:\n x = 0\n print('É negativo... vou transforma-lo em zero!')\nelif x == 0:\n print('É zero')\nelif x == 1:\n print('É a unidade.')\nelse:\n print('É num número grande!')",
"Já vimos que podem existir um ou mais blocos elif, e o bloco else é opcional. O comando elif é uma abreviação para ``else if'', sendo útil para reduzir a quantidade de indentações. Uma sequência if ... elif ... elif ... é o substituto para os comandos switch ou case disponíveis noutras linguagens de programação.\nPython: de volta ao comando for\nComo vimos no Python o comando for permite iterar sobre objectos de qualquer sequência (uma lista ou uma string) ou um conjunto, nas sequências o ciclo for segue a ordem pela qual os objectos aparecem na sequência. Por examplo:",
"# Medindo strings\nwords = ['Platão', 'Sócrates', 'Eu']\nfor w in words:\n print(w, len(w))",
"Caso tenha de modificar a sequência durante o ciclo for (por exemplo para duplicar elementos seleccionados), é conveniente fazer primeiro uma cópia. A noção de slice torna isso possível:",
"for w in words[:]:\n if len(w) > 6:\n words.insert(0, w)\n\nwords",
"Python: A função range()\nQuando temos de iterar numa sequência de números, a função built-in range() trata do assunto. Permitindo gerar progressões aritméticas",
"L=range(10)\n\nfor i in L: print(i,' ',end='')",
"O ponto final nunca é parte da lista gerada; range(10) gera uma sequência de 10 valores, os índices de uma lista com 10 objectos. É possível fazer o domínio ter inicio noutro número, ou indicar um incremento diferente (mesmo negativo; este incremento é usualmente designado de 'passo'):",
"for i in range(5, 10): print(i,' ',end='')\n\nfor i in range(0, 10, 3): print(i,' ',end='')\n\nfor i in range(-10, -100, -30): print(i,' ',end='')",
"Para iterar nos índices de uma sequência, pode combinar range() com a função len() como por exemplo:",
"a = ['Euler', 'Decarte', 'Pascal', 'Newton', 'Eu']\nfor i in range(len(a)):\n print( i, a[i])",
"Na maioria dos casos, é conveniente usar a função enumerate().\nPython:Comando break e continue, e cláusulas else nos ciclos\nO comando break, permite encurtar os ciclos for ou while.\nOs ciclos podem ter uma cláusula else; que é executado após ter percorrido todo o domínio do ciclo for ou quando a condição dum ciclo while se torna falsa, mas nunca quando o ciclo é interrompido com um comando break. Exemplificamos isto no ciclo seguinte, que tem por objectivo determinar números primos (recorde quando é um número natural primo):",
"#\n# O crivo de Eratóstenes\n#\nfor n in range(2, 10):\n for m in range(2, n):\n if n % m == 0:\n print(n, '=', m, '*', n//m, \"=>\",n,'não é primo')\n break\n else:\n print(n,' é um primo')",
"O que faz a operador binário %?\nO que faz a operador binário //? Onde está a diferênça entre / e //\nO comando continue pára a iteração corrente, saltando para a iteração seguinte do loop:",
"for num in range(2, 10):\n if num % 2 == 0:\n print(\"É par o número \", num)\n continue\n print(\"O número \", num, \"é ímpar\")",
"Exercícios de python\nExercício:\nImplemente os operadores de implicação e bi-implicação, através de funções\nimp(bool,bool)->bool e biimp(bool,bool)->bool.",
"def imp(p,q):\n u''' imp(bool,bool)->bool\n Operador de implicação '''\n return not p or q\n\ndef biimp(p,q):\n u''' biimp(bool,bool)->bool\n Operador de bi-implicação'''\n return imp(p,q) and imp(q,p)\n\nimp(False,True)\n\nbiimp(False,True)",
"Exercício:\nApresente as tabelas de verdade da implicação da bi-implicação e da proposição $P4:(p\\rightarrow q)\\vee h$. Por exemplo, tal que\n >>> TabelaP4()\n -----------------------------\n p | q | h | (p->q)|h\n -----------------------------\n False|False|False| True\n False|False| True| True\n False| True|False| True\n False| True| True| True\n True|False|False| False\n True|False| True| True\n True| True|False| True\n True| True| True| True",
"def TabelaP4():\n u''' TabelaP4()->\n\n tabela de (p->q)|h'''\n print('p'.center(5)+'|'+'q'.center(5)+'|'+'h'.center(5)+'| (p->q)|h')\n print('-'*27)\n for p in [False,True]:\n for q in [False,True]:\n for h in [False,True]:\n aval = imp(p,q) or h\n print(str(p).center(5)+'|'+str(q).center(5)+'|'+str(h).center(5)+'|'+str(aval).center(10))\n\nTabelaP4()",
"Exercício:\nDefina a função\n cab(list)->\nem que dado uma lista de strings ['p1','p2','p3',...,'pn'], imprima o cabeçalho duma tabela de verdade. Por exemplo, tal que\n >>> cab(['p1','p2','imp(p1,p2)'])\n -------------------------\n p1 | p2 | imp(p1,p2)\n -------------------------",
"def cab(lista):\n u''' cab(list)->\n\n Imprime cabeçalho de tabela'''\n print('-'*5*(len(lista)+1))\n for prop in lista[:-1]:\n print(prop.center(5)+'|', end='')\n print(lista[-1]) # imprime último elemento\n print('-'*5*(len(lista)+1))\n\ncab(['p1','p2','imp(p1,p2)'])",
"Exercício:\nDefina a função\n linha(list)->\nem que dada uma lista de valores lógicos ['p1','p2','p3',...,'pn'], imprima uma linha 'p1|p2|p3|...|pn' duma tabela de verdade, onde cada valor lógico está numa string com 5 posições. Por exemplo, tal que\n >>> linha([True,False,True])\n True|False| True",
"def linha(lista):\n u''' linha(list)->\n\n Imprime linha de tabela'''\n for prop in lista[:-1]:\n print(str(prop).center(5)+'|', end='')\n print(str(lista[-1])) # imprime último elemento\n\nlinha([True,False,True])",
"Exercício:\nDefina uma função trad(string)->string que faça a tradução duma expressão proposicional codificada, usando os símbolos 0,1,\\&,$|$ e $\\sim$, numa expressão proposicional no Python usando False, True, and, or e not. Por exemplo, tal que\n >>> trad('(p&~(q|w))')\n '(p and not (q or w))'",
"def trad(exp):\n u''' trans(str)->str \n \n Tradução duma expressão proposicional codificada,\n usando os símbolos 0,1,\\&,$|$ e $\\sim$, numa expressão\n proposicional no Python usando False, True, and, or e not.\n '''\n exp = exp.replace('0','False')\n exp = exp.replace('1','True')\n exp = exp.replace('&',' and ')\n exp = exp.replace('|',' or ')\n exp = exp.replace('~',' not ')\n return exp\n\ntrad('(p&~(q|w))')",
"Exercício:\nDefina a função\n Eval(string,list)->bool\n\nque avalia a expressão proposicional, na sintaxe do Python, associando a cada variável usada <var> o valor lógico <bool>. A associação entre variáveis e valores lógicos deve ser descrita por pares (<var>,<bool>) na lista que serve de argumento.\nEval('(p1 and not (p2 or p3))',[('p1',True),('p2',False),('p3',True)])} avalia '(True and not (False or True))'.\n\nPor exemplo, tal que\n>>> Eval('not(p1 and p2) or p1',[('p1',True),('p2',False)])\nTrue",
"def Eval(exp, atrib):\n u''' Eval(string,list)->bool\n\n Avalia a expressão proposicional, na sintaxe do Python,\n associando a cada variável usada <var> o valor lógico <bool>.\n A associação entre variáveis e valores lógicos deve ser descrita\n por pares (<var>,<bool>) na lista que serve de argumento.\n '''\n for var in atrib:\n exp = exp.replace(var[0],str(var[1]))\n return eval(exp)\n\nEval('not(p1 and p2) or p1',[('p1',True),('p2',False)])",
"Exercício:\nRepresente em representação binário os números de $2^n-1$ até zero. Exemplo:\n>>> binlist(3)\n111\n110\n101\n100\n011\n010\n001\n000",
"def binlist(nvar):\n u''' binlist(int)->\n\n lista em representação binária os números de 2**n-1 até 0\n '''\n for n in range(2**nvar-1,-1,-1):\n print(bin(n)[2:].rjust(nvar,'0'))\n\nbinlist(3)",
"Exercício:\nUsando as funções anteriores, defina uma função\n tabela(string, list)->\nque imprima a tabela de verdade da proposição $q$, descrita pela string, assumindo que as suas variáveis estão na lista $[p1,p2,...,pn]$. (USANDO: a linguagem proposicional de símbolos 0,1,\\&,$|$ e $\\sim$, mais as funções imp(bool,bool)->bool e biimp(bool,bool)->bool))\n Por exemplo, tal que\n >>> tabela('imp(u,q)|w',['u','q','w'])\n -------------------------\n u | q | w |imp(u,q)|w\n -------------------------\n True| True| True|True\n True| True|False|True\n True|False| True|True\n True|False|False|False\n False| True| True|True\n False| True|False|True\n False|False| True|True\n False|False|False|True",
"def tabela(exp,var):\n u''' tabela(str,list)->\n\n Imprime a tabela de verdade da proposição descrita pela string,\n assumindo que as suas variáveis estão na lista.\n USANDO: a linguagem proposicional de símbolos 0,1,\\&,$|$ e $\\sim$,\n mais as funções imp(bool,bool)->bool e biimp(bool,bool)->bool)\n '''\n cab(var+[exp])\n nvar = len(var)\n for n in range(2**nvar-1,-1,-1):\n l=bin(n)[2:].rjust(nvar,'0')\n cont=0\n lista = []\n vlog = []\n for v in var:\n lista.append((v,bool(int(l[cont]))))\n vlog.append(bool(int(l[cont])))\n cont = cont + 1\n linha(vlog+ [Eval(trad(exp),lista)])\n\ntabela('imp(u,q)|w',['u','q','w'])",
"Exercício:\nUsando as funções anteriores, defina uma função\ntautologia(string, list)->bool\n\nque verifica se a proposição $q$, descrita pela string, é uma tautologia e assumindo que as suas variáveis estão descritas na lista $[p1,p2,...p_n]$. (USANDO: a linguagem proposicional de símbolos 0,1,\\&,$|$ e $\\sim$, mais as funções imp(bool,bool)->bool e biimp(bool,bool)->bool)\n >>> tautologia('biimp(~q | w, imp(q,w))',['q','w'])\n False",
"def tautologia(exp,var):\n u''' tautologia(str,list)->bool\n\n Verifica se a proposição descrita pela string é uma tautologia,\n assumindo que as suas variáveis estão descritas na lista.\n USANDO: a linguagem proposicional de símbolos 0,1,\\&,$|$ e $\\sim$,\n mais as funções imp(bool,bool)->bool e biimp(bool,bool)->bool\n '''\n sai = True\n nvar = len(var)\n for n in range(2**nvar-1,-1,-1):\n l=str(bin(n))[2:].rjust(nvar,'0')\n cont=0\n lista = []\n for v in var:\n lista.append((v,bool(int(l[cont]))))\n cont = cont + 1\n sai = sai and bool(Eval(exp,lista))\n return sai\n\ntautologia('biimp(~q | w, imp(q,w))',['q','w'])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
oyamad/theory16
|
lemke_howson/mclennan_tourky_py.ipynb
|
mit
|
[
"Implementing McLennan-Tourky in Python\nDaisuke Oyama\nFaculty of Economics, University of Tokyo",
"%matplotlib inline\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport quantecon as qe\nfrom quantecon.compute_fp import _print_after_skip\nfrom quantecon.game_theory import Player, NormalFormGame, lemke_howson\n\ndef compute_fixed_point_ig(f, x_init, error_tol=1e-3, max_iter=50, verbose=1,\n *args, **kwargs):\n _skip = 1\n if verbose:\n start_time = time.time()\n _print_after_skip(_skip, it=None)\n \n x_new = x_init\n iterate = 0\n y_new = f(x_new, *args, **kwargs)\n error = np.max(np.abs(y_new - x_new))\n \n if error <= error_tol or iterate >= max_iter:\n if verbose:\n etime = time.time() - start_time\n _print_after_skip(_skip, iterate, error, etime)\n return x_new\n \n X = np.array([x_new])\n Y = np.array([y_new])\n x_new = Y[0]\n iterate += 1\n \n while True:\n y_new = f(x_new, *args, **kwargs)\n error = np.max(np.abs(y_new - x_new))\n if error <= error_tol or iterate >= max_iter:\n break\n \n X = np.append(X, np.expand_dims(x_new, axis=0), axis=0)\n Y = np.append(Y, np.expand_dims(y_new, axis=0), axis=0)\n \n m = len(X)\n D = np.expand_dims(X, axis=1) - Y\n D *= D\n A = np.add.reduce(np.atleast_3d(D), axis=-1) * (-1)\n B = np.identity(m)\n g = NormalFormGame((Player(A), Player(B)))\n _, rho = lemke_howson(g, init_pivot=m-1)\n \n x_new = rho.dot(Y)\n iterate += 1\n \n if verbose:\n etime = time.time() - start_time\n _print_after_skip(_skip, iterate, error, etime)\n \n return x_new",
"Univariate example",
"# Just a warmup\ncompute_fixed_point_ig(lambda x: 0.5*x, 1)",
"Let us try the logistic function which is well known to generate chaotic behavior.",
"def logistic(x, r):\n return r * x * (1 - x)\n\nx = np.linspace(0, 1, 100)\ny = logistic(x, r=4)\nfig, ax = plt.subplots()\nax.plot(x, y)\nax.plot([0, 1], [0, 1], ':', color='k')\nax.set_aspect(1)\nplt.show()\n\ntol = 1e-5\nx_init = 0.99\ncompute_fixed_point_ig(logistic, x_init, error_tol=tol, r=4)",
"Comare compute_fixed_point from quantecon:",
"qe.compute_fixed_point(logistic, x_init, error_tol=tol, r=4)",
"Example 4.6: 500-variable example",
"def f(x, M, c):\n return -np.arctan(np.dot(M, (x - c)**3)) + c\n\nx_min, x_max = -np.pi/2, np.pi/2\nx = np.linspace(x_min, x_max, 100)\nM = np.abs(np.random.randn())\nc = 0\ny = f(x, M, c)\n\nfig, ax = plt.subplots()\nax.plot(x, y)\nax.set_xlim(x_min, x_max)\nax.set_ylim(x_min, x_max)\nax.plot([x_min, x_max], [x_min, x_max], ':', color='k')\nax.set_aspect(1)\nplt.show()\n\nn = 500\ntol = 1e-5\nmax_iter = 200\n\nnum_trials = 3\n\nfor i in range(num_trials):\n print(\"===== Experiment {} =====\\n\".format(i))\n c = np.random.standard_normal(n)\n M = np.abs(np.random.standard_normal(size=(n, n)))\n x_init = (np.random.rand(n)-1/2)*np.pi + c\n \n print(\"***Imitation game***\")\n x0 = compute_fixed_point_ig(f, x_init, tol, max_iter, M=M, c=c)\n print(\"\")\n \n print(\"***Function iteration***\")\n x1 = qe.compute_fixed_point(f, x_init, tol, max_iter, verbose=1, print_skip=200, M=M, c=c)\n print(\"\")\n\nnum_trials = 3\n\nfor i in range(num_trials):\n print(\"===== Experiment {} =====\\n\".format(i))\n c = np.random.standard_normal(n)\n M = np.random.normal(0, 1/13, size=(n, n))\n x_init = (np.random.rand(n)-1/2)*np.pi + c\n \n print(\"***Imitation game***\")\n x0 = compute_fixed_point_ig(f, x_init, tol, max_iter, M=M, c=c)\n print(\"\")\n \n print(\"***Function iteration***\")\n x1 = qe.compute_fixed_point(f, x_init, tol, max_iter, verbose=1, print_skip=200, M=M, c=c)\n print(\"\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
MachinesWhoLearn/lectures
|
2016-2017.Meetings/spring/01.keras_tutorial_duplicate_questions/02.index_processed_data.ipynb
|
mit
|
[
"from __future__ import print_function\nfrom collections import Counter\nfrom tqdm import tqdm\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\n# Load the processed data we created in the previous notebook.\nraw_train_lines = pickle.load(open(\"./data/processed/01.processed_train.pkl\", \"rb\"))\n\n# Print the first example\nraw_train_lines[0]",
"Right now, each data point consists of two strings and an integer label. Computers don't like dealing with strings directly very much, so we need to convert these strings to lists of integers.\nThe way we do this is: we will assign each string a unique integer ID, and then replace all occurences of the string with that integer ID. In this way, we can encode to the model what the various input strings are. This is called \"indexing\" the data.",
"padding_token = \"@@PADDING@@\"\noov_token = \"@@UNKOWN@@\"\nword_indices = {padding_token: 0, oov_token: 1}\n\nfor train_instance in tqdm(raw_train_lines):\n # unpack the tuple into 3 variables\n question_1, question_2, label = train_instance\n\n # iterate over the tokens in each question, and add them to the word\n # indices if they aren't in there already\n for word in question_1:\n if word not in word_indices:\n # by taking the current length of the dictionary\n # to be the index, we can guarantee that each unique word\n # will get a unique index.\n index = len(word_indices)\n word_indices[word] = index\n\n for word in question_2:\n if word not in word_indices:\n # by taking the current length of the dictionary\n # to be the index, we can guarantee that each unique word\n # will get a unique index.\n index = len(word_indices)\n word_indices[word] = index\n\n# The number of unique tokens in our corpus\nlen(word_indices)",
"Now we will convert the raw_train_lines, which are string representations, to integers.",
"indexed_train_lines = []\nfor train_instance in tqdm(raw_train_lines):\n # unpack the tuple into 3 variables\n question_1, question_2, label = train_instance\n \n # for each token in question_1 and question_2, replace it with its index\n indexed_question_1 = [word_indices[word] for word in question_1]\n indexed_question_2 = [word_indices[word] for word in question_2]\n\n indexed_train_lines.append((indexed_question_1, indexed_question_2, label))\n\n# Print the first indexed example, which is the indexed version of \n# the raw example we printed above.\nindexed_train_lines[0]",
"If you compare the output of the first indexed example with the first raw example, you will see that each word has been assigned a unique index and words that are the same across sentences have the same index.\nNow, we'll repackage the lists into a slightly more digestible format for the model. We will have one list of lists (note that each \"question\" now is a list of integers) for all of the question_1's, and one list of lists for all of the question_2's. Then, we'll have a list of labels.\nThese lists should correspond index-wise, so that label[i] should correspond to the correct label of the data point with indexed_question_1s[i] and indexed_question_2s[i].",
"indexed_question_1s = []\nindexed_question_2s = []\nlabels = []\n\nfor indexed_train_line in tqdm(indexed_train_lines):\n # Unpack the tuple into 3 variables\n indexed_question_1, indexed_question_2, label = indexed_train_line\n \n # Now add each of the individual elements of one train instance to their\n # separate lists.\n indexed_question_1s.append(indexed_question_1)\n indexed_question_2s.append(indexed_question_2)\n labels.append(label)\n\n# Print the first element from each of the lists, it should be the same as the\n# first element of the combined dataset above.\nprint(\"First indexed_question_1s: {}\".format(indexed_question_1s[0]))\nprint(\"First indexed_question_2s: {}\".format(indexed_question_2s[0]))\nprint(\"First label: {}\".format(labels[0]))",
"Looks like everything matches up! We'll pickle these indexed instances for use when actually training the model.",
"# Pickle the data lists.\npickle.dump(indexed_question_1s, open(\"./data/processed/02.indexed_question_1s_train.pkl\", \"wb\"))\npickle.dump(indexed_question_2s, open(\"./data/processed/02.indexed_question_2s_train.pkl\", \"wb\"))\npickle.dump(labels, open(\"./data/processed/02.labels_train.pkl\", \"wb\"))\n\n# Also pickle the word indices\npickle.dump(word_indices, open(\"./data/processed/02.word_indices.pkl\", \"wb\"))"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Jonestj1/mbuild
|
docs/tutorials/tutorial_simple_LJ.ipynb
|
mit
|
[
"Point Particles: Basic system initialization\nThis tutorial focuses on the usage of basic system initialization operations, as applied to simple point particle systems (i.e., generic Lennard-Jones particles rather than specific atoms). \nThe code below defines several point particles in a cubic arrangement. Note, the color and radius associated with a Particle name can be set and passed to the visualize command. Colors are passed in hex format (see http://www.color-hex.com/color/bfbfbf).",
"import mbuild as mb\n\nclass MonoLJ(mb.Compound):\n def __init__(self):\n super(MonoLJ, self).__init__()\n lj_particle1 = mb.Particle(name='LJ', pos=[0, 0, 0])\n self.add(lj_particle1)\n\n lj_particle2 = mb.Particle(name='LJ', pos=[1, 0, 0])\n self.add(lj_particle2)\n\n lj_particle3 = mb.Particle(name='LJ', pos=[0, 1, 0])\n self.add(lj_particle3)\n\n lj_particle4 = mb.Particle(name='LJ', pos=[0, 0, 1])\n self.add(lj_particle4)\n\n lj_particle5 = mb.Particle(name='LJ', pos=[1, 0, 1])\n self.add(lj_particle5)\n\n lj_particle6 = mb.Particle(name='LJ', pos=[1, 1, 0])\n self.add(lj_particle6)\n\n lj_particle7 = mb.Particle(name='LJ', pos=[0, 1, 1])\n self.add(lj_particle7)\n \n lj_particle8 = mb.Particle(name='LJ', pos=[1, 1, 1])\n self.add(lj_particle8)\n\n\nmonoLJ = MonoLJ()\ncolors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}\nmonoLJ.visualize(element_properties=colors)",
"While this would work for defining a single molecule or very small system, this would not be efficient for large systems. Instead, the clone and translate operator can be used to facilitate automation. Below, we simply define a single prototype particle (lj_proto), which we then copy and translate about the system. \nNote, mBuild provides two different translate operations, \"translate\" and \"translate_to\". \"translate\" moves a particle by adding the vector the original position, whereas \"translate_to\" move a particle to the specified location in space. Note, \"translate_to\" maintains the internal spatial relationships of a collection of particles by first shifting the center of mass of the collection of particles to the origin, then translating to the specified location. Since the lj_proto particle in this example starts at the origin, these two commands produce identical behavior.",
"import mbuild as mb\n\nclass MonoLJ(mb.Compound):\n def __init__(self):\n super(MonoLJ, self).__init__()\n lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])\n\n for i in range(0,2):\n for j in range(0,2):\n for k in range(0,2):\n lj_particle = mb.clone(lj_proto)\n pos = [i,j,k]\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\n\nmonoLJ = MonoLJ()\ncolors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}\nmonoLJ.visualize(element_properties=colors)",
"To simplify this process, mBuild provides several build-in patterning tools, where for example, Grid3DPattern can be used to perform this same operation. Grid3DPattern generates a set of points, from 0 to 1, which get stored in the variable \"pattern\". We need only loop over the points in pattern, cloning, translating, and adding to the system. Note, because Grid3DPattern defines points between 0 and 1, they must be scaled based on the desired system size, i.e., pattern.scale(2).",
"import mbuild as mb\n\nclass MonoLJ(mb.Compound):\n def __init__(self):\n super(MonoLJ, self).__init__()\n lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])\n\n pattern = mb.Grid3DPattern(2, 2, 2)\n pattern.scale(2)\n\n for pos in pattern:\n lj_particle = mb.clone(lj_proto)\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\n\nmonoLJ = MonoLJ()\ncolors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}\nmonoLJ.visualize(element_properties=colors)",
"Larger systems can therefore be easily generated by toggling the values given to Grid3DPattern. Other patterns can also be generated using the same basic code, such as a 2D grid pattern:",
"import mbuild as mb\n\nclass MonoLJ(mb.Compound):\n def __init__(self):\n super(MonoLJ, self).__init__()\n lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])\n\n pattern = mb.Grid2DPattern(5, 5)\n pattern.scale(5)\n\n for pos in pattern:\n lj_particle = mb.clone(lj_proto)\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\n\nmonoLJ = MonoLJ()\ncolors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}\nmonoLJ.visualize(element_properties=colors)",
"Points on a sphere can be generated using SpherePattern. Points on a disk using DisKPattern, etc. \nNote to show both simultaneously, we shift the x-coordinate of Particles in the sphere by -1 (i.e., pos[0]-=1.0) and +1 for the disk (i.e, pos[0]+=1.0).",
"import mbuild as mb\n\nclass MonoLJ(mb.Compound):\n def __init__(self):\n super(MonoLJ, self).__init__()\n lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])\n \n pattern_sphere = mb.SpherePattern(200)\n pattern_sphere.scale(0.5)\n \n for pos in pattern_sphere:\n lj_particle = mb.clone(lj_proto)\n pos[0]-=1.0\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\n \n pattern_disk = mb.DiskPattern(200)\n pattern_disk.scale(0.5)\n for pos in pattern_disk:\n lj_particle = mb.clone(lj_proto)\n pos[0]+=1.0\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\n\nmonoLJ = MonoLJ()\ncolors = {'LJ': {'color': 0xbfbfbf, 'radius': 1.0}}\nmonoLJ.visualize(element_properties=colors)\n",
"We can also take advantage of the hierachical nature of mBuild to accomplish the same task more cleanly. Below we create a component that corresponds to the sphere (class SphereLJ), and one that corresponds to the disk (class DiskLJ), and then instantiate and shift each of these individually in the MonoLJ component.",
"import mbuild as mb\n\nclass SphereLJ(mb.Compound):\n def __init__(self):\n super(SphereLJ, self).__init__()\n lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])\n \n pattern_sphere = mb.SpherePattern(200)\n pattern_sphere.scale(0.5)\n \n for pos in pattern_sphere:\n lj_particle = mb.clone(lj_proto)\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\n\nclass DiskLJ(mb.Compound):\n def __init__(self):\n super(DiskLJ, self).__init__()\n lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])\n\n pattern_disk = mb.DiskPattern(200)\n pattern_disk.scale(0.5)\n for pos in pattern_disk:\n lj_particle = mb.clone(lj_proto)\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\n\n\nclass MonoLJ(mb.Compound):\n def __init__(self):\n super(MonoLJ, self).__init__()\n \n sphere = SphereLJ();\n pos=[-1, 0, 0]\n mb.translate(sphere, pos)\n self.add(sphere)\n\n disk = DiskLJ();\n pos=[1, 0, 0]\n mb.translate(disk, pos)\n self.add(disk)\n\n\nmonoLJ = MonoLJ()\ncolors = {'LJ': {'color': 0xbfbfbf, 'radius': 1.0}}\nmonoLJ.visualize(element_properties=colors)\n\n",
"Again, since mBuild is hierarchical, the pattern functions can be used to generate large systems of any arbitary component. For example, we can replicate the SphereLJ component on a regular array.",
"import mbuild as mb\n\nclass SphereLJ(mb.Compound):\n def __init__(self):\n super(SphereLJ, self).__init__()\n lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])\n \n pattern_sphere = mb.SpherePattern(13)\n pattern_sphere.scale(0.5)\n \n for pos in pattern_sphere:\n lj_particle = mb.clone(lj_proto)\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\nclass MonoLJ(mb.Compound):\n def __init__(self):\n super(MonoLJ, self).__init__()\n sphere = SphereLJ();\n\n pattern = mb.Grid3DPattern(3, 3, 3)\n pattern.scale(10)\n\n for pos in pattern:\n lj_sphere = mb.clone(sphere)\n mb.translate_to(lj_sphere, pos)\n #shift the particle so the center of mass\n #of the system is at the origin\n mb.translate(lj_sphere, [-5,-5,-5])\n\n self.add(lj_sphere)\n\nmonoLJ = MonoLJ()\ncolors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}\nmonoLJ.visualize(element_properties=colors)",
"Several functions exist for rotating compounds. For example, the spin command allows a compound to be rotated, in place, about a specific axis (i.e., it considers the origin for the rotation to lie at the compound's center of mass).",
"import mbuild as mb\nimport random\nfrom numpy import pi\n\n\nclass CubeLJ(mb.Compound):\n def __init__(self):\n super(CubeLJ, self).__init__()\n lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])\n \n pattern = mb.Grid3DPattern(2, 2, 2)\n pattern.scale(1)\n\n for pos in pattern:\n lj_particle = mb.clone(lj_proto)\n mb.translate(lj_particle, pos)\n self.add(lj_particle)\n \nclass MonoLJ(mb.Compound):\n def __init__(self):\n super(MonoLJ, self).__init__()\n cube_proto = CubeLJ();\n\n pattern = mb.Grid3DPattern(3, 3, 3)\n pattern.scale(10)\n rnd = random.Random()\n rnd.seed(123)\n \n for pos in pattern:\n lj_cube = mb.clone(cube_proto)\n mb.translate_to(lj_cube, pos)\n #shift the particle so the center of mass\n #of the system is at the origin\n mb.translate(lj_cube, [-5,-5,-5])\n mb.spin_x(lj_cube, rnd.uniform(0, 2 * pi))\n mb.spin_y(lj_cube, rnd.uniform(0, 2 * pi))\n mb.spin_z(lj_cube, rnd.uniform(0, 2 * pi))\n\n self.add(lj_cube)\n\nmonoLJ = MonoLJ()\ncolors = {'LJ': {'color': 0xbfbfbb, 'radius': 5}}\nmonoLJ.visualize(element_properties=colors)",
"Configurations can be dumped to file using the save command; this takes advantage of MDTraj and supports a range of file formats (see http://MDTraj.org).",
"#save as xyz file\nmonoLJ.save('output.xyz')\n#save as mol2\nmonoLJ.save('output.mol2')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
brian-rose/ClimateModeling_courseware
|
Lectures/Lecture13 -- TransientWarming.ipynb
|
mit
|
[
"ATM 623: Climate Modeling\nBrian E. J. Rose, University at Albany\nLecture 13: Toy models of transient warming\nExploring the rate of climate change\nWarning: content out of date and not maintained\nYou really should be looking at The Climate Laboratory book by Brian Rose, where all the same content (and more!) is kept up to date.\nHere you are likely to find broken links and broken code.\nIn the last set of notes and homework we discussed differences between equilibrium warming and transient warming due to the slowly-evolving effects of ocean heat uptake.\nHere we are going to look at some toy climate models and think about relationships between climate sensitivity, ocean heat uptake, and timescales.\nTwo versions of Radiative-Convective Equilibrium with different climate sensitivities\nWe are going set up two different single-column model with different lapse rate feedbacks.\nWe begin by repeating the same setup we have done several times before, building a single-column RCM with prescribed water vapor profile.",
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport climlab\n\n# Get the water vapor data\n#datapath = \"http://ramadda.atmos.albany.edu:8080/repository/opendap/latest/Top/Users/BrianRose/CESM_runs/\"\ndatapath = \"http://thredds.atmos.albany.edu:8080/thredds/dodsC/cesm/\"\n#endstr = \"/entry.das\"\natm_control = xr.open_dataset( datapath + 'som_1850_f19/som_1850_f19.cam.h0.clim.nc', decode_times=False)\nQglobal = ((atm_control.Q * atm_control.gw)/atm_control.gw.mean(dim='lat')).mean(dim=('lat','lon','time'))\n\n# Make a model on same vertical domain as the GCM\nstate = climlab.column_state(lev=Qglobal.lev, water_depth=2.5)\nsteps_per_year = 90\ndeltat = climlab.constants.seconds_per_year/steps_per_year\nrad = climlab.radiation.RRTMG(name='Radiation',\n state=state, \n specific_humidity=Qglobal.values,\n timestep = deltat,\n albedo = 0.25, # tuned to give reasonable ASR for reference cloud-free model\n )\nconv = climlab.convection.ConvectiveAdjustment(name='Convection',\n state=state,\n adj_lapse_rate=6.5,\n timestep=rad.timestep,)\nrcm_control = rad + conv\nrcm_control.name = 'Radiative-Convective Model'",
"Integrate the control model out to equilibrium.",
"rcm_control.integrate_years(5)\nrcm_control.ASR - rcm_control.OLR",
"Now let's make two copies of this model and keep them in a list:",
"slab_control = []\nslab_control.append(rcm_control)\nslab_control.append(climlab.process_like(rcm_control))",
"We are going to double CO2 in both models and label them as high and low sensitivity. We will build in different feedbacks into our two columns.",
"slab_2x = []\nfor n in range(len(slab_control)):\n rcm_2xCO2 = climlab.process_like(rcm_control)\n rcm_2xCO2.subprocess['Radiation'].absorber_vmr['CO2'] *= 2.\n if n == 0:\n rcm_2xCO2.name = 'High-sensitivity RCM'\n elif n == 1:\n rcm_2xCO2.name = 'Low-sensitivity RCM'\n slab_2x.append(rcm_2xCO2)",
"We will implement a water vapor feedback as we have done before: by recomputing the specific humidity at every timestep using the current temperatures so that the relative humidity stays fixed.\nWe begin by computing the relative humidity profile from the control climate.",
"# actual specific humidity\nq = rcm_control.subprocess['Radiation'].specific_humidity\n# saturation specific humidity (a function of temperature and pressure)\nqsat = climlab.utils.thermo.qsat(rcm_control.Tatm, rcm_control.lev)\n# Relative humidity\nrh = q/qsat",
"Now here is where our two models will differ:\nWe are going to assign them different lapse rate feedbacks.\nSimilar to the exercise in Assignment 3, we are going to assume \n$$ \\Gamma = \\Gamma_{ref} + \\gamma * \\Delta T_s $$\nwhere $\\Gamma_{ref} = 6.5 K/km$ is the critical lapse rate in our control climate, and $\\gamma$ is a number in units of km$^{-1}$ that determines how much the critical lapse rate should change per degree warming.\nWe are going to investigate two different assumptions:\n\ntemperatures decrease more with height under global warming, $\\gamma = +0.3$ km$^{-1}$\ntemperature decrease less with height under global warming, $\\gamma = -0.3$ km$^{-1}$",
"lapse_change_factor = [+0.3, -0.3]\n\nfor n in range(len(slab_2x)):\n rcm_2xCO2 = slab_2x[n]\n print('Integrating ' + rcm_2xCO2.name)\n for m in range(5 * steps_per_year):\n # At every timestep\n # we calculate the new saturation specific humidity for the new temperature\n # and change the water vapor in the radiation model\n # so that relative humidity is always the same\n qsat = climlab.utils.thermo.qsat(rcm_2xCO2.Tatm, rcm_2xCO2.lev)\n rcm_2xCO2.subprocess['Radiation'].specific_humidity[:] = rh * qsat\n # We also adjust the critical lapse rate in our convection model\n DeltaTs = rcm_2xCO2.Ts - rcm_control.Ts\n rcm_2xCO2.subprocess['Convection'].adj_lapse_rate = 6.5 + lapse_change_factor[n]*DeltaTs\n rcm_2xCO2.step_forward()\n print('The TOA imbalance is %0.5f W/m2' %(rcm_2xCO2.ASR-rcm_2xCO2.OLR))\n print('The ECS is %0.3f K' %(rcm_2xCO2.Ts - rcm_control.Ts))\n print('')",
"So Model 0 (in which the lapse rates have gotten larger) is more sensitive than Model 1 (smaller lapse rates). It has a larger system gain, or a more positive overall climate feedback. \nAlthough this is not the main topic of today's lesson, it's still interesting to think about why the lapse rates affect the climate sensivitity in this way...\nTime to reach equilibrium\nThese models reached their new equilibria in just a few years. Why is that? Because they have very little heat capacity:",
"slab_control[0].depth_bounds",
"The \"ocean\" in these models is just a \"slab\" of water 2.5 meter deep.\nThat's all we need to calculate the equilibrium temperatures, but it tells us nothing about the timescales for climate change in the real world.\nFor this, we need a deep ocean that can exchange heat with the surface.\nTransient warming scenarios in column models with ocean heat uptake\nWe are now going to build two new models. The atmosphere (radiative-convective model) will be identical to the two \"slab\" models we just used. But these will be coupled to a column of ocean water 2000 m deep!\nWe will parameterize the ocean heat uptake as a diffusive mixing process. Much like when we discussed the diffusive parameterization for atmospheric heat transport -- we are assuming that ocean dynamics result in a vertical mixing of heat from warm to cold temperatures.\nThe following code will set this up for us.\nWe will make one more assumption, just for the sake of illustration:\nThe more sensitive model (Model 0) is also more efficent at taking up heat into the deep ocean",
"# Create the domains\nocean_bounds = np.arange(0., 2010., 100.)\ndepthax = climlab.Axis(axis_type='depth', bounds=ocean_bounds)\nocean = climlab.domain.domain.Ocean(axes=depthax)\natm = slab_control[0].Tatm.domain\n\n# Model 0 has a higher ocean heat diffusion coefficient -- \n# a more efficent deep ocean heat sink\nocean_diff = [5.E-4, 3.5E-4]\n\n# List of deep ocean models\ndeep = []\nfor n in range(len(slab_control)):\n rcm_control = slab_control[n]\n # Create the state variables\n Tinitial_ocean = rcm_control.Ts * np.ones(ocean.shape)\n Tocean = climlab.Field(Tinitial_ocean.copy(), domain=ocean)\n Tatm = climlab.Field(rcm_control.Tatm.copy(), domain=atm)\n\n # Surface temperature Ts is the upper-most grid box of the ocean\n Ts = Tocean[0:1]\n atm_state = {'Tatm': Tatm, 'Ts': Ts}\n \n rad = climlab.radiation.RRTMG(name='Radiation',\n state=atm_state, \n specific_humidity=Qglobal.values,\n timestep = deltat,\n albedo = 0.25, \n )\n conv = climlab.convection.ConvectiveAdjustment(name='Convection',\n state=atm_state,\n adj_lapse_rate=6.5,\n timestep=rad.timestep,)\n\n model = rad + conv\n if n == 0:\n model.name = 'RCM with high sensitivity and efficient heat uptake'\n elif n == 1:\n model.name = 'RCM with low sensitivity and inefficient heat uptake'\n model.set_state('Tocean', Tocean)\n diff = climlab.dynamics.Diffusion(state={'Tocean': model.Tocean}, \n K=ocean_diff[n], \n diffusion_axis='depth', \n timestep=deltat * 10,)\n model.add_subprocess('Ocean Heat Uptake', diff)\n print('')\n print(model)\n print('')\n deep.append(model)",
"An idealized transient global warming scenario: CO2 increases by 1%/year to doubling.\nNow consider the CO2 increase. In the real world, CO2 has been increasing every year since the beginning of industrialization. Future CO2 concentrations depend on collective choices made by human societies about how much fossil fuel to extract and burn.\nWe will set up a simple scenario. Suppose that CO2 increases by 1% of its existing concentration every year until it reaches 2x its initial concentration. This takes about 70 years.\nAfter 70 years, we assume that all anthropogenic emissions, and CO2 concentration is stabilized at the 2x level.\nWhat happens to the surface temperature?\nHow do the histories of surface and deep ocean temperature compare in our two models?\nWe are going to simulation 400 years of transient global warming in the two models.\n<div class=\"alert alert-success\">\nThis code will take a long time to run! While it's running, we'll think about what the result might look like\n</div>",
"num_years = 400\nyears = np.arange(num_years+1)\n\nTsarray = []\nTocean = []\nnetrad = []\nfor n in range(len(deep)):\n thisTs = np.nan * np.zeros(num_years+1)\n thisnetrad = np.nan * np.zeros(num_years+1)\n thisTocean = np.nan * np.zeros((deep[n].Tocean.size, num_years+1))\n thisTs[0] = deep[n].Ts\n thisnetrad[0] = deep[n].ASR - deep[n].OLR\n thisTocean[:, 0] = deep[n].Tocean\n Tsarray.append(thisTs)\n Tocean.append(thisTocean)\n netrad.append(thisnetrad)\n \nCO2initial = deep[0].subprocess['Radiation'].absorber_vmr['CO2']\nCO2array = np.nan * np.zeros(num_years+1)\nCO2array[0] = CO2initial * 1E6\n\n# Increase CO2 by 1% / year for 70 years (until doubled), and then hold constant\nfor y in range(num_years):\n if deep[0].subprocess['Radiation'].absorber_vmr['CO2'] < 2 * CO2initial:\n for model in deep:\n model.subprocess['Radiation'].absorber_vmr['CO2'] *= 1.01\n CO2array[y+1] = deep[0].subprocess['Radiation'].absorber_vmr['CO2'] * 1E6\n print('Year ', y+1, ', CO2 mixing ratio is ', CO2array[y+1],' ppm.')\n\n for n, model in enumerate(deep):\n for m in range(steps_per_year): \n qsat = climlab.utils.thermo.qsat(model.Tatm, model.lev)\n model.subprocess['Radiation'].specific_humidity[:] = rh * qsat\n DeltaTs = model.Ts - slab_control[n].Ts\n model.subprocess['Convection'].adj_lapse_rate = 6.5 + lapse_change_factor[n]*DeltaTs\n model.step_forward()\n \n Tsarray[n][y+1] = model.Ts\n Tocean[n][:, y+1] = model.Tocean\n netrad[n][y+1] = model.ASR - model.OLR\n\ncolorlist = ['b', 'r']\nco2color = 'k'\n\nnum_axes = len(deep) + 1\nfig, ax = plt.subplots(num_axes, figsize=(12,14))\n\n# Twin the x-axis twice to make independent y-axes.\ntopaxes = [ax[0], ax[0].twinx(), ax[0].twinx()]\n\n# Make some space on the right side for the extra y-axis.\nfig.subplots_adjust(right=0.85)\n\n# Move the last y-axis spine over to the right by 10% of the width of the axes\ntopaxes[-1].spines['right'].set_position(('axes', 1.1))\n\n# To make the border of the right-most axis visible, we need to turn the frame\n# on. This hides the other plots, however, so we need to turn its fill off.\ntopaxes[-1].set_frame_on(True)\ntopaxes[-1].patch.set_visible(False)\n\nfor n, model in enumerate(slab_2x):\n topaxes[0].plot(model.Ts*np.ones_like(Tsarray[n]), '--', color=colorlist[n])\ntopaxes[0].set_ylabel('Surface temperature (K)')\ntopaxes[0].set_xlabel('Years')\ntopaxes[0].set_title('Transient warming scenario: 1%/year CO2 increase to doubling, followed by CO2 stabilization', fontsize=14)\ntopaxes[0].legend(['Model 0', 'Model 1'], loc='lower right')\n\ntopaxes[1].plot(CO2array, color=co2color)\ntopaxes[1].set_ylabel('CO2 (ppm)', color=co2color)\nfor tl in topaxes[1].get_yticklabels():\n tl.set_color(co2color)\ntopaxes[1].set_ylim(300., 1000.)\n\ntopaxes[2].set_ylabel('TOA imbalance (W/m2)', color='b')\nfor tl in topaxes[2].get_yticklabels():\n tl.set_color('b')\ntopaxes[2].set_ylim(0, 3)\n\n\ncontour_levels = np.arange(-0.25, 3.25, 0.25)\nfor n in range(len(deep)):\n cax = ax[n+1].contourf(years, deep[n].depth, Tocean[n] - Tsarray[n][0], levels=contour_levels)\n ax[n+1].invert_yaxis()\n ax[n+1].set_ylabel('Depth (m)')\n ax[n+1].set_xlabel('Years')\n\n\nfor n, model in enumerate(deep):\n topaxes[0].plot(Tsarray[n], color=colorlist[n])\n topaxes[2].plot(netrad[n], ':', color=colorlist[n])\n for n in range(len(deep)):\n cax = ax[n+1].contourf(years, deep[n].depth, Tocean[n] - Tsarray[n][0], levels=contour_levels) \ntopaxes[1].plot(CO2array, color=co2color)\n\nfig.subplots_adjust(bottom=0.12)\ncbar_ax = fig.add_axes([0.25, 0.02, 0.5, 0.03])\nfig.colorbar(cax, cax=cbar_ax, orientation='horizontal');",
"Transient vs. equilibrium warming: key points\n\nDuring the first 70 years, the radiative forcing goes up every year\nThe warming in the two models is almost identical during this phase\nAfter year 70, the CO2 levels are stable and so the radiative forcing is no longer increasing\nBoth models continue to warm for hundreds of years\nThe difference between the two models become larger over time\nIn either case, at the time of CO2 doubling the model has achieved only a fraction of its equilibrium surface warming.\nThe difference between the warming at year 70 and the equilibrium warming is called the committed warming. It is the global warming associated with CO2 emissions that are already in the atmosphere.\nHow do we know at year 70 what the committed warming is? Are we on the blue or the red path? At year 70, have we achieved half or only a third of the eventual equilibrium warming?\nIn our example, the more sensitive model also has more efficient ocean heat uptake, so the initial warming looks identical. \nUncertainties in both climate feedback processes and ocean heat uptake processes contribute to uncertainty in the rate of global warming\n\nResults from comprehensive coupled GCMs\nFast and slow components of the warming\nThis figure shows how a comprehensive coupled GCM responds to the same kind of idealized CO2 increase we have looked at above: CO2 increases at 1%/year for 70 years and is then held constant at twice the pre-industrial level (blue curve), or at 4x the pre-industrial level (red curve).\nThe dashed curve show the ocean heat content continuing to rise slowly over thousands of years.\n<img src=\"http://www.atmos.albany.edu/facstaff/brose/classes/ENV415_Spring2018/images/Yoshimori_transientwarming.png\" width=\"400\">\n\nM. Yoshimori, M. Watanabe, H. Shiogama, A. Oka, A. Abe-Ouchi, R. Ohgaito, and Y. Kamae. A review of progress towards understanding the transient global mean surface temperature response to radiative perturbation. Prog. Earth Planet. Sic., 3, 2016.\n\nWhat happens if CO2 levels are abruptly returned to pre-industrial levels?\nHere, in a different model, we see the surface temperature change through the historical period (black) followed by a typical future global warming scenario (blue).\nThe red curves show the effects of suddenly returning greenhouse gases to their preindustrial levels at various times in the future.\nThe temperatures very quickly drop, but not back to the preindustrial values. Over time, the build-up of heat content in the deep ocean means that, even if CO2 levels revert to what they used to be, the climate remain quite a bit warmer for thousands of years.\nThis has been referred to as the recalcitrant component of global warming, in analogy with stubborn medical conditions that are difficult to treat.\n<img src=\"http://www.atmos.albany.edu/facstaff/brose/classes/ENV415_Spring2018/images/Held_recalcitrant.png\" width=\"400\">\n\nI. M. Held, M. Winton, K. Takahashi, T. Delworth, F. Zeng, and G. K. Vallis. Probing the fast and slow components of global warming by returning abruptly to preindustrial forcing. J. Climate, 23:2418–2427, 2010.\n\n\nVersion information",
"%load_ext version_information\n%version_information numpy, matplotlib, xarray, climlab",
"Credits\nThe author of this notebook is Brian E. J. Rose, University at Albany.\nIt was developed in support of ATM 623: Climate Modeling, a graduate-level course in the Department of Atmospheric and Envionmental Sciences\nDevelopment of these notes and the climlab software is partially supported by the National Science Foundation under award AGS-1455071 to Brian Rose. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tensorflow/docs-l10n
|
site/en-snapshot/quantum/tutorials/quantum_data.ipynb
|
apache-2.0
|
[
"Copyright 2020 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"Quantum data\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/quantum/tutorials/quantum_data\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/quantum_data.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/quantum/blob/master/docs/tutorials/quantum_data.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/quantum_data.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nBuilding off of the comparisons made in the MNIST tutorial, this tutorial explores the recent work of Huang et al. that shows how different datasets affect performance comparisons. In the work, the authors seek to understand how and when classical machine learning models can learn as well as (or better than) quantum models. The work also showcases an empirical performance separation between classical and quantum machine learning model via a carefully crafted dataset. You will:\n\nPrepare a reduced dimension Fashion-MNIST dataset.\nUse quantum circuits to re-label the dataset and compute Projected Quantum Kernel features (PQK).\nTrain a classical neural network on the re-labeled dataset and compare the performance with a model that has access to the PQK features.\n\nSetup",
"!pip install tensorflow==2.7.0 tensorflow-quantum\n\n# Update package resources to account for version changes.\nimport importlib, pkg_resources\nimportlib.reload(pkg_resources)\n\nimport cirq\nimport sympy\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_quantum as tfq\n\n# visualization tools\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom cirq.contrib.svg import SVGCircuit\nnp.random.seed(1234)",
"1. Data preparation\nYou will begin by preparing the fashion-MNIST dataset for running on a quantum computer.\n1.1 Download fashion-MNIST\nThe first step is to get the traditional fashion-mnist dataset. This can be done using the tf.keras.datasets module.",
"(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n\n# Rescale the images from [0,255] to the [0.0,1.0] range.\nx_train, x_test = x_train/255.0, x_test/255.0\n\nprint(\"Number of original training examples:\", len(x_train))\nprint(\"Number of original test examples:\", len(x_test))",
"Filter the dataset to keep just the T-shirts/tops and dresses, remove the other classes. At the same time convert the label, y, to boolean: True for 0 and False for 3.",
"def filter_03(x, y):\n keep = (y == 0) | (y == 3)\n x, y = x[keep], y[keep]\n y = y == 0\n return x,y\n\nx_train, y_train = filter_03(x_train, y_train)\nx_test, y_test = filter_03(x_test, y_test)\n\nprint(\"Number of filtered training examples:\", len(x_train))\nprint(\"Number of filtered test examples:\", len(x_test))\n\nprint(y_train[0])\n\nplt.imshow(x_train[0, :, :])\nplt.colorbar()",
"1.2 Downscale the images\nJust like the MNIST example, you will need to downscale these images in order to be within the boundaries for current quantum computers. This time however you will use a PCA transformation to reduce the dimensions instead of a tf.image.resize operation.",
"def truncate_x(x_train, x_test, n_components=10):\n \"\"\"Perform PCA on image dataset keeping the top `n_components` components.\"\"\"\n n_points_train = tf.gather(tf.shape(x_train), 0)\n n_points_test = tf.gather(tf.shape(x_test), 0)\n\n # Flatten to 1D\n x_train = tf.reshape(x_train, [n_points_train, -1])\n x_test = tf.reshape(x_test, [n_points_test, -1])\n\n # Normalize.\n feature_mean = tf.reduce_mean(x_train, axis=0)\n x_train_normalized = x_train - feature_mean\n x_test_normalized = x_test - feature_mean\n\n # Truncate.\n e_values, e_vectors = tf.linalg.eigh(\n tf.einsum('ji,jk->ik', x_train_normalized, x_train_normalized))\n return tf.einsum('ij,jk->ik', x_train_normalized, e_vectors[:,-n_components:]), \\\n tf.einsum('ij,jk->ik', x_test_normalized, e_vectors[:, -n_components:])\n\nDATASET_DIM = 10\nx_train, x_test = truncate_x(x_train, x_test, n_components=DATASET_DIM)\nprint(f'New datapoint dimension:', len(x_train[0]))",
"The last step is to reduce the size of the dataset to just 1000 training datapoints and 200 testing datapoints.",
"N_TRAIN = 1000\nN_TEST = 200\nx_train, x_test = x_train[:N_TRAIN], x_test[:N_TEST]\ny_train, y_test = y_train[:N_TRAIN], y_test[:N_TEST]\n\nprint(\"New number of training examples:\", len(x_train))\nprint(\"New number of test examples:\", len(x_test))",
"2. Relabeling and computing PQK features\nYou will now prepare a \"stilted\" quantum dataset by incorporating quantum components and re-labeling the truncated fashion-MNIST dataset you've created above. In order to get the most seperation between quantum and classical methods, you will first prepare the PQK features and then relabel outputs based on their values. \n2.1 Quantum encoding and PQK features\nYou will create a new set of features, based on x_train, y_train, x_test and y_test that is defined to be the 1-RDM on all qubits of: \n$V(x_{\\text{train}} / n_{\\text{trotter}}) ^ {n_{\\text{trotter}}} U_{\\text{1qb}} | 0 \\rangle$\nWhere $U_\\text{1qb}$ is a wall of single qubit rotations and $V(\\hat{\\theta}) = e^{-i\\sum_i \\hat{\\theta_i} (X_i X_{i+1} + Y_i Y_{i+1} + Z_i Z_{i+1})}$\nFirst, you can generate the wall of single qubit rotations:",
"def single_qubit_wall(qubits, rotations):\n \"\"\"Prepare a single qubit X,Y,Z rotation wall on `qubits`.\"\"\"\n wall_circuit = cirq.Circuit()\n for i, qubit in enumerate(qubits):\n for j, gate in enumerate([cirq.X, cirq.Y, cirq.Z]):\n wall_circuit.append(gate(qubit) ** rotations[i][j])\n\n return wall_circuit",
"You can quickly verify this works by looking at the circuit:",
"SVGCircuit(single_qubit_wall(\n cirq.GridQubit.rect(1,4), np.random.uniform(size=(4, 3))))",
"Next you can prepare $V(\\hat{\\theta})$ with the help of tfq.util.exponential which can exponentiate any commuting cirq.PauliSum objects:",
"def v_theta(qubits):\n \"\"\"Prepares a circuit that generates V(\\theta).\"\"\"\n ref_paulis = [\n cirq.X(q0) * cirq.X(q1) + \\\n cirq.Y(q0) * cirq.Y(q1) + \\\n cirq.Z(q0) * cirq.Z(q1) for q0, q1 in zip(qubits, qubits[1:])\n ]\n exp_symbols = list(sympy.symbols('ref_0:'+str(len(ref_paulis))))\n return tfq.util.exponential(ref_paulis, exp_symbols), exp_symbols",
"This circuit might be a little bit harder to verify by looking at, but you can still examine a two qubit case to see what is happening:",
"test_circuit, test_symbols = v_theta(cirq.GridQubit.rect(1, 2))\nprint(f'Symbols found in circuit:{test_symbols}')\nSVGCircuit(test_circuit)",
"Now you have all the building blocks you need to put your full encoding circuits together:",
"def prepare_pqk_circuits(qubits, classical_source, n_trotter=10):\n \"\"\"Prepare the pqk feature circuits around a dataset.\"\"\"\n n_qubits = len(qubits)\n n_points = len(classical_source)\n\n # Prepare random single qubit rotation wall.\n random_rots = np.random.uniform(-2, 2, size=(n_qubits, 3))\n initial_U = single_qubit_wall(qubits, random_rots)\n\n # Prepare parametrized V\n V_circuit, symbols = v_theta(qubits)\n exp_circuit = cirq.Circuit(V_circuit for t in range(n_trotter))\n \n # Convert to `tf.Tensor`\n initial_U_tensor = tfq.convert_to_tensor([initial_U])\n initial_U_splat = tf.tile(initial_U_tensor, [n_points])\n\n full_circuits = tfq.layers.AddCircuit()(\n initial_U_splat, append=exp_circuit)\n # Replace placeholders in circuits with values from `classical_source`.\n return tfq.resolve_parameters(\n full_circuits, tf.convert_to_tensor([str(x) for x in symbols]),\n tf.convert_to_tensor(classical_source*(n_qubits/3)/n_trotter))",
"Choose some qubits and prepare the data encoding circuits:",
"qubits = cirq.GridQubit.rect(1, DATASET_DIM + 1)\nq_x_train_circuits = prepare_pqk_circuits(qubits, x_train)\nq_x_test_circuits = prepare_pqk_circuits(qubits, x_test)",
"Next, compute the PQK features based on the 1-RDM of the dataset circuits above and store the results in rdm, a tf.Tensor with shape [n_points, n_qubits, 3]. The entries in rdm[i][j][k] = $\\langle \\psi_i | OP^k_j | \\psi_i \\rangle$ where i indexes over datapoints, j indexes over qubits and k indexes over $\\lbrace \\hat{X}, \\hat{Y}, \\hat{Z} \\rbrace$ .",
"def get_pqk_features(qubits, data_batch):\n \"\"\"Get PQK features based on above construction.\"\"\"\n ops = [[cirq.X(q), cirq.Y(q), cirq.Z(q)] for q in qubits]\n ops_tensor = tf.expand_dims(tf.reshape(tfq.convert_to_tensor(ops), -1), 0)\n batch_dim = tf.gather(tf.shape(data_batch), 0)\n ops_splat = tf.tile(ops_tensor, [batch_dim, 1])\n exp_vals = tfq.layers.Expectation()(data_batch, operators=ops_splat)\n rdm = tf.reshape(exp_vals, [batch_dim, len(qubits), -1])\n return rdm\n\nx_train_pqk = get_pqk_features(qubits, q_x_train_circuits)\nx_test_pqk = get_pqk_features(qubits, q_x_test_circuits)\nprint('New PQK training dataset has shape:', x_train_pqk.shape)\nprint('New PQK testing dataset has shape:', x_test_pqk.shape)",
"2.2 Re-labeling based on PQK features\nNow that you have these quantum generated features in x_train_pqk and x_test_pqk, it is time to re-label the dataset. To achieve maximum seperation between quantum and classical performance you can re-label the dataset based on the spectrum information found in x_train_pqk and x_test_pqk.\nNote: This preparation of your dataset to explicitly maximize the seperation in performance between the classical and quantum models might feel like cheating, but it provides a very important proof of existance for datasets that are hard for classical computers and easy for quantum computers to model. There would be no point in searching for quantum advantage in QML if you couldn't first create something like this to demonstrate advantage.",
"def compute_kernel_matrix(vecs, gamma):\n \"\"\"Computes d[i][j] = e^ -gamma * (vecs[i] - vecs[j]) ** 2 \"\"\"\n scaled_gamma = gamma / (\n tf.cast(tf.gather(tf.shape(vecs), 1), tf.float32) * tf.math.reduce_std(vecs))\n return scaled_gamma * tf.einsum('ijk->ij',(vecs[:,None,:] - vecs) ** 2)\n\ndef get_spectrum(datapoints, gamma=1.0):\n \"\"\"Compute the eigenvalues and eigenvectors of the kernel of datapoints.\"\"\"\n KC_qs = compute_kernel_matrix(datapoints, gamma)\n S, V = tf.linalg.eigh(KC_qs)\n S = tf.math.abs(S)\n return S, V\n\nS_pqk, V_pqk = get_spectrum(\n tf.reshape(tf.concat([x_train_pqk, x_test_pqk], 0), [-1, len(qubits) * 3]))\n\nS_original, V_original = get_spectrum(\n tf.cast(tf.concat([x_train, x_test], 0), tf.float32), gamma=0.005)\n\nprint('Eigenvectors of pqk kernel matrix:', V_pqk)\nprint('Eigenvectors of original kernel matrix:', V_original)",
"Now you have everything you need to re-label the dataset! Now you can consult with the flowchart to better understand how to maximize performance seperation when re-labeling the dataset:\n<img src=\"./images/quantum_data_1.png\">\nIn order to maximize the seperation between quantum and classical models, you will attempt to maximize the geometric difference between the original dataset and the PQK features kernel matrices $g(K_1 || K_2) = \\sqrt{ || \\sqrt{K_2} K_1^{-1} \\sqrt{K_2} || _\\infty}$ using S_pqk, V_pqk and S_original, V_original. A large value of $g$ ensures that you initially move to the right in the flowchart down towards a prediction advantage in the quantum case.\nNote: Computing quantities for $s$ and $d$ are also very useful when looking to better understand performance seperations. In this case ensuring a large $g$ value is enough to see performance seperation.",
"def get_stilted_dataset(S, V, S_2, V_2, lambdav=1.1):\n \"\"\"Prepare new labels that maximize geometric distance between kernels.\"\"\"\n S_diag = tf.linalg.diag(S ** 0.5)\n S_2_diag = tf.linalg.diag(S_2 / (S_2 + lambdav) ** 2)\n scaling = S_diag @ tf.transpose(V) @ \\\n V_2 @ S_2_diag @ tf.transpose(V_2) @ \\\n V @ S_diag\n\n # Generate new lables using the largest eigenvector.\n _, vecs = tf.linalg.eig(scaling)\n new_labels = tf.math.real(\n tf.einsum('ij,j->i', tf.cast(V @ S_diag, tf.complex64), vecs[-1])).numpy()\n # Create new labels and add some small amount of noise.\n final_y = new_labels > np.median(new_labels)\n noisy_y = (final_y ^ (np.random.uniform(size=final_y.shape) > 0.95))\n return noisy_y\n\ny_relabel = get_stilted_dataset(S_pqk, V_pqk, S_original, V_original)\ny_train_new, y_test_new = y_relabel[:N_TRAIN], y_relabel[N_TRAIN:]",
"3. Comparing models\nNow that you have prepared your dataset it is time to compare model performance. You will create two small feedforward neural networks and compare performance when they are given access to the PQK features found in x_train_pqk.\n3.1 Create PQK enhanced model\nUsing standard tf.keras library features you can now create and a train a model on the x_train_pqk and y_train_new datapoints:",
"#docs_infra: no_execute\ndef create_pqk_model():\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(32, activation='sigmoid', input_shape=[len(qubits) * 3,]))\n model.add(tf.keras.layers.Dense(16, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(1))\n return model\n\npqk_model = create_pqk_model()\npqk_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.003),\n metrics=['accuracy'])\n\npqk_model.summary()\n\n#docs_infra: no_execute\npqk_history = pqk_model.fit(tf.reshape(x_train_pqk, [N_TRAIN, -1]),\n y_train_new,\n batch_size=32,\n epochs=1000,\n verbose=0,\n validation_data=(tf.reshape(x_test_pqk, [N_TEST, -1]), y_test_new))",
"3.2 Create a classical model\nSimilar to the code above you can now also create a classical model that doesn't have access to the PQK features in your stilted dataset. This model can be trained using x_train and y_label_new.",
"#docs_infra: no_execute\ndef create_fair_classical_model():\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(32, activation='sigmoid', input_shape=[DATASET_DIM,]))\n model.add(tf.keras.layers.Dense(16, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(1))\n return model\n\nmodel = create_fair_classical_model()\nmodel.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.03),\n metrics=['accuracy'])\n\nmodel.summary()\n\n#docs_infra: no_execute\nclassical_history = model.fit(x_train,\n y_train_new,\n batch_size=32,\n epochs=1000,\n verbose=0,\n validation_data=(x_test, y_test_new))",
"3.3 Compare performance\nNow that you have trained the two models you can quickly plot the performance gaps in the validation data between the two. Typically both models will achieve > 0.9 accuaracy on the training data. However on the validation data it becomes clear that only the information found in the PQK features is enough to make the model generalize well to unseen instances.",
"#docs_infra: no_execute\nplt.figure(figsize=(10,5))\nplt.plot(classical_history.history['accuracy'], label='accuracy_classical')\nplt.plot(classical_history.history['val_accuracy'], label='val_accuracy_classical')\nplt.plot(pqk_history.history['accuracy'], label='accuracy_quantum')\nplt.plot(pqk_history.history['val_accuracy'], label='val_accuracy_quantum')\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.legend()",
"Success: You have engineered a stilted quantum dataset that can intentionally defeat classical models in a fair (but contrived) setting. Try comparing results using other types of classical models. The next step is to try and see if you can find new and interesting datasets that can defeat classical models without needing to engineer them yourself!\n4. Important conclusions\nThere are several important conclusions you can draw from this and the MNIST experiments:\n\n\nIt's very unlikely that the quantum models of today will beat classical model performance on classical data. Especially on today's classical datasets that can have upwards of a million datapoints.\n\n\nJust because the data might come from a hard to classically simulate quantum circuit, doesn't necessarily make the data hard to learn for a classical model.\n\n\nDatasets (ultimately quantum in nature) that are easy for quantum models to learn and hard for classical models to learn do exist, regardless of model architecture or training algorithms used."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
kit-cel/wt
|
mloc/ch5_Algorithm_Unfolding/Deep_MIMO_Detection_ResNet.ipynb
|
gpl-2.0
|
[
"Deep MIMO Detection with ResNet Features\nThis code is provided as supplementary material of the lecture Machine Learning and Optimization in Communications (MLOC).<br>\nThis code illustrates:\n* Implementation and optimization of the deep MIMO detector DetNet introduced by [1]. This code implements DetNet closely to [1].\n* This code adds a ResNet feature to improve learning of initial layers in a deep architecture\n[1] N. Samuel, T. Diskin and A. Wiesel, \"Deep MIMO detection,\" 2017 IEEE 18th International Workshop on Signal Processing Advances in Wireless Communications (SPAWC), Sapporo, 2017, pp. 1-5.",
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import cm\n%matplotlib inline \n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint(\"We are using the following device for learning:\",device)",
"Our goal is to do a binary MIMO detection with a deep neural network as it is proposed in [1]. \nGiven is a $K \\times N$ MIMO channel. The channel matrix $\\mathbf{H}$ is varying for each sample and is known to the detector. After applying AWGN, the detector receives the signal and detects the data bits.\nThe optimal maximum likelihood detector has a too high worst case computational complexity and is therefore impractical for many applications [1]. Existing suboptimal MIMO detectors are e.g. zero forcing, minimum mean squared error or the matched filter. The idea is, that a deep neural network gains better performance than the so far known suboptimal detectors without getting too computationally complex.\nLets first define our MIMO channel and generate data.",
"# MIMO channel config\nK = 30 # Tranmit antennas.\nN = 60 # Receive antennas.\nsnr = 10.0 # SNR of channel in dB for the training.\nsnr_lin = 10.0 ** (snr/10.0) # SNR of channel in lin.\n\n# case VC: Varying channel (randomly generated channel matrix for each new sample).\n\n# Function for training and test data generation according to the FC model of the paper.\ndef data_generation_VC(K, N, size, snr, snr_var, device):\n t = np.random.randint(2, size=(size, K))*2.0-1 # Random binary TX symbols.\n n = np.random.randn(size, N) # Random noise for each RX antenna.\n r = np.empty(shape=[size, N]) # RX dummy vector\n Hr = np.empty(shape=[size, K])\n HH = np.zeros([size, K, K])\n # Randomly vary the SNR\n current_snr = 10.0 ** (np.random.uniform(low=snr-snr_var,high=snr+snr_var, size=size)/10.0)\n for i in range(size):\n H = np.random.randn(N, K)*1.0/np.sqrt(N) # Generate random and normalized channel.\n HH[i,:,:] = H.T.dot(H) # Calc (H^T H) as it is used as an input for the neural net.\n # Calc the output vector with additive noise which depends on the current SNR.\n r[i,:] = (H.dot(t[i,:])+n[i,:]*np.sqrt((HH[i,:,:]).trace()/K)/np.sqrt(current_snr[i]))\n Hr[i,:] = H.T.dot(r[i,:]) # Calc (H^T y) as it is used as an input for the neural net.\n t = torch.Tensor(t).to(device)\n Hr = torch.Tensor(Hr).to(device)\n HH = torch.Tensor(HH).to(device)\n return r, Hr, HH, current_snr, t",
"Configure and build the neural network.\nEach layer has the 4 inputs $\\mathbf{H}^{T}\\mathbf{r}$, $\\mathbf{H}^{T}\\mathbf{H}$, $\\mathbf{t}_k$ and $\\mathbf{v}_k$. The index $k$ denotes the layer. The layers can also be interpreted as iterations of an optimization algorithm [1].\nThe nonlinear operation\n$\n\\begin{align}\n&\\quad z_{k} = \\rho\\left(\\text{W}{1k}\\begin{bmatrix}\n\\mathbf{H}^{T}\\mathbf{r}\\\n\\hat{\\mathbf{t}}{k}\\\n\\mathbf{H}^{T}\\mathbf{H}\\hat{\\text{t}}{k}\\\n\\mathbf{v}{k}\n\\end{bmatrix}+\\mathbf{b}{1k}\\right)\\\n&\\tilde{\\mathbf{t}}{k+1} = \\tilde{\\mathbf{t}}k + \\mathbf{W}{2k}\\mathbf{z}{k}+\\mathbf{b}{2k}\\\n&\\hat{\\mathbf{t}}{k+1} = \\psi{t_{k}}(\\tilde{\\mathbf{t}}{k+1})\\\n&\\hat{\\mathbf{v}}{k+1} = \\hat{\\mathbf{v}}{k} + \\mathbf{W}{3k}\\mathbf{z}{k}+\\mathbf{b}{3k}\\\n&\\qquad\\hat{\\mathbf{t}}_{1} = \\mathbf{0}\\tag{10}\n\\end{align}\n$\nis applied to the input. $\\mathbf{t}_0$ is the received data vector.\nSummarized, each layer does roughly the following steps:\n* Concatenate the inputs.\n* Linear transformation.\n* Apply ReLU function.\n* Calculate $\\mathbf{v}{k+1}$ as a linear trafo of the ReLU output and use ResNet feature.\n* Calculate $\\hat{\\mathbf{t}}{k+1}$ as a linear trafo of the ReLU output which is then fed to the linear soft sign function together with the ResNet feature.",
"# DetNet config\nlayers = 3*K\nv_len = 2*K\nz_len = 8*K\n\n# Training params\ntraining_steps = 10000\nbatch_size_train = 5000\nsnr_var_train = 3.0 # Maximum absolute deviation of the SNR from its mean in logarithmic scale.\n\n# Test params\ntest_steps= 1000\nbatch_size_test = 5000\nsnr_range = np.arange(8, 14, 1)\n\n# Definition of the Loss function\ndef own_loss(t, t_train, t_ZF):\n loss_l = torch.zeros(len(t), 1, device=device) # Denotes the loss in Layer L\n for layer in range(1,len(t)+1):\n loss_l[layer-1] = torch.log(torch.Tensor([layer+1]).to(device))*torch.mean(torch.mean(torch.square(t_train - t[layer-1]),1)/torch.mean(torch.square(t_train - t_ZF),1))\n return loss_l\n \n\n# Definition of the DetNet\nclass DetNet(nn.Module):\n # Build DetNet\n def __init__(self, layers, K, v_len, z_len):\n # Here we define the trainable parameter (Net)\n super(DetNet, self).__init__()\n # We have to use here nn.ModuleList instead of a PythonList. (Otherwise you’ll get an error saying\n # that your model has no parameters, because PyTorch does not see the parameters of the layers stored\n # in a Python list)\n # Furtheremore, we initialize the linear trafo with normailzed weights\n # Linear Traffos W_1l, W_2l, W_3l\n self.linear_trafo_1_l = nn.ModuleList()\n self.linear_trafo_1_l.extend([nn.Linear(3*K + v_len, z_len) for i in range(1, layers+1)])\n for i in range(0, layers):\n nn.init.normal_(self.linear_trafo_1_l[i].weight, std = 0.01)\n nn.init.normal_(self.linear_trafo_1_l[i].bias, std = 0.01)\n self.linear_trafo_2_l = nn.ModuleList()\n self.linear_trafo_2_l.extend([nn.Linear(z_len, K) for i in range(1, layers+1)])\n for i in range(0, layers):\n nn.init.normal_(self.linear_trafo_2_l[i].weight, std = 0.01)\n nn.init.normal_(self.linear_trafo_2_l[i].bias, std = 0.01)\n self.linear_trafo_3_l = nn.ModuleList()\n self.linear_trafo_3_l.extend([nn.Linear(z_len , v_len) for i in range(1, layers+1)])\n for i in range(0, layers):\n nn.init.normal_(self.linear_trafo_3_l[i].weight, std = 0.01)\n nn.init.normal_(self.linear_trafo_3_l[i].bias, std = 0.01)\n # For Linear Soft Sign function \n self.kappa_l = nn.ParameterList()\n self.kappa_l.extend([nn.Parameter(torch.rand(1, requires_grad=True, device=device)) for i in range(1, layers+1)])\n # ReLU as activation faunction\n self.relu = nn.ReLU()\n \n def forward(self, Hr, HH):\n v = torch.zeros(len(Hr), v_len, device=device) # Internal Memory (state), that is passed to the next layer\n t = torch.zeros(1, len(Hr), K, device=device) # Transmit vector we want to estimate -> Initalizied as zero\n t_tilde = torch.zeros(len(Hr), K, device=device) # Transmit vector we want to estimate -> Initalizied as zero\n \n # Send Data through the staced DetNet\n for l in range(1,layers+1):\n # Concatenate the 4 inputs Hy, v, x and HH.\n concat = torch.cat((Hr, v, t[-1,:,:], torch.squeeze(torch.matmul(torch.unsqueeze(t[-1,:,:], 1), HH))), 1)\n \n # Apply linear transformation and rectified linear unit (ReLU).\n z = self.relu(self.linear_trafo_1_l[l-1](concat)) \n \n # Generate new t iterate with a final linear trafo.\n t_tilde = t_tilde + self.linear_trafo_2_l[l-1](z)\n t = torch.cat((t, torch.unsqueeze(-1+self.relu(t_tilde+self.kappa_l[l-1])/torch.abs(self.kappa_l[l-1])-self.relu(t_tilde-self.kappa_l[l-1])/torch.abs(self.kappa_l[l-1]),0)), 0)\n \n # Generate new v iterate with a final linear trafo.\n v = v + self.linear_trafo_3_l[l-1](z)\n del concat, z\n torch.cuda.empty_cache()\n del v, t_tilde\n torch.cuda.empty_cache()\n return t[1:,:,:]",
"Training of the network.\nThe loss function takes into account the output of all the layers $\\mathcal{L}$ and is normalized to the loss of a zero forcing equalizer $\\Vert \\mathbf{t}-\\tilde{\\mathbf{t}}\\Vert^{2}$. The loss function is defined as:\n\\begin{align}\n&L(\\mathbf{t};\\hat{\\mathbf{t}}{\\theta}(\\mathbf{H}, \\mathbf{r}))=\\sum{k=l}^{\\mathcal{L}}\\log(l)\\frac{\\Vert \\mathbf{t}^{[train]}-\\hat{\\mathbf{t}}{l}\\Vert ^{2}}{\\Vert \\mathbf{t}^{[train]}-\\hat{\\mathbf{t}}{ZF}\\Vert^{2}},\\tag{13}\\\n\\text{where}:\\\n&\\qquad\\qquad \\hat{\\mathbf{t}}_{ZF}=(\\mathbf{H}^{T}\\mathbf{H})^{-1}\\mathbf{H}^{T}\\mathbf{r}.\n\\tag{14}\n\\end{align}\nis the estimated transmit vector of the ZF decoder",
"model = DetNet(layers, K, v_len, z_len)\nmodel.to(device)\n\n# Adam Optimizer\noptimizer = optim.Adam(model.parameters(), eps=1e-07)\n\nresults = []\nber = []\n\n\nfor i in range(training_steps):\n # Generate a batch of training data.\n r_train, Hr_train, HH_train, snr_train, t_train = data_generation_VC(K, N, batch_size_train, snr, snr_var_train, device)\n \n # Feed the training data to network and update weights. \n t = model(Hr_train, HH_train)\n \n # compute loss\n # Calculate optimal decorrelation decoder to normalize the loss function later on.\n t_ZF = torch.squeeze(torch.matmul(torch.unsqueeze(Hr_train,1),torch.inverse(HH_train)), 1)\n loss = torch.sum(own_loss(t, t_train, t_ZF))\n \n # compute gradients\n loss.backward()\n\n # Adapt weights\n optimizer.step()\n\n # reset gradients\n optimizer.zero_grad()\n \n # Print the current progress of the training (Loss and BER).\n # Pay attention that we are print the Loss/BER on the Trainings-Dataset.\n # For a real evaulation of the model we should test the model on the test dataset\n if i%500 == 0: \n results.append(own_loss(t, t_train, t_ZF).detach().cpu().numpy())\n ber.append(1 - torch.mean(t_train.eq(torch.sign(t)).float(),[1,2]).detach().cpu().numpy())\n print('Train step ', i, ', current loss: ', results[-1][-1], ', current ber: ', ber[-1][-1])\n del r_train, Hr_train, HH_train, snr_train, t_train, t\n torch.cuda.empty_cache()\n \n# Save the trained model\ntorch.save(model.state_dict(), 'Det_Net_10')",
"Visualize",
"fig = plt.figure(1,figsize=(15,15))\nplt.rcParams.update({'font.size': 18})\ncolor=iter(cm.viridis_r(np.linspace(0,1,len(results))))\n# Plot loss.\nplt.subplot(211)\nfor i in range(0, len(results)):\n c=next(color)\n plt.semilogy(range(0, len(results[0])-1), results[i][1:], color=c)\nplt.grid(True)\nplt.title(\"Loss Function of DetNet over Layers and Iterations\")\nplt.xlabel(\"Layer\")\nplt.ylabel(r\"$l(\\mathbf{x};\\hat{\\mathbf{x}}(\\mathbf{H}, \\mathbf{y}))$\")\n# Plot BER.\nplt.subplot(212)\ncolor=iter(cm.viridis_r(np.linspace(0,1,len(results))))\nfor i in range(0, len(results)):\n c=next(color)\n plt.semilogy(range(0, len(results[0])), ber[i], color=c)\nplt.grid(True)\nplt.title(\"BER at 13 dB of DetNet over Layers and Iterations\")\nplt.xlabel(\"Layer\")\nplt.ylabel(\"BER\")\nplt.show()\nfig.savefig(\"DetNet_layers_ResNet.pdf\", format='pdf',bbox_inches='tight')",
"Test the network with different SNR scenarios",
"ber_test = np.zeros((len(snr_range), test_steps))\nber_zf_test = np.zeros((len(snr_range), test_steps))\nfor c, tmp_snr in enumerate(snr_range):\n for i in range(test_steps):\n r_test, Hr_test, HH_test, snr_test, t_test= data_generation_VC(K, N, batch_size_test, tmp_snr, 0.0, device)\n\n # Calculate the BER of the ZF-Equlizer as a reference\n t_DetNet = model(Hr_test, HH_test)\n t_ZF = torch.squeeze(torch.matmul(torch.unsqueeze(Hr_test,1),torch.inverse(HH_test)), 1).to(device)\n ber_test[c][i] = 1 - torch.mean(t_test.eq(torch.sign(t_DetNet[-1,:,:])).float()).detach().cpu().numpy()\n ber_zf_test[c][i] = 1 - torch.mean(t_test.eq(torch.sign(t_ZF)).float()).detach().cpu().numpy()\n print('SNR ', tmp_snr, 'BER = ', np.mean(ber_test[c]), ' ZF = ', np.mean(ber_zf_test[c]))\n\n# Plot BER curve.\nfig = plt.figure(1,figsize=(15,8))\nber_res = np.mean(ber_test, axis=1)\nber_zf_res = np.mean(ber_zf_test, axis=1)\nplt.figure(1,figsize=(15,8))\nplt.rcParams.update({'font.size': 18})\nplt.semilogy(snr_range, ber_res, marker='D')\nplt.semilogy(snr_range, ber_zf_res, marker='o')\nplt.legend(['DetNet', 'ZF'])\nplt.grid(True)\nplt.title(\"BER of DetNet for a Varying Channel (VC)\")\nplt.xlabel(\"SNR\")\nplt.ylabel(\"BER\")\nplt.show()\nfig.savefig(\"DetNet_BER_ResNet.pdf\", format='pdf',bbox_inches='tight')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
igabr/Metis_Projects_Chicago_2017
|
03-Project-McNulty/Cleaning_and_dummies.ipynb
|
mit
|
[
"Cleaning and Dummification Notebook\nThis notebook contains all the steps I took to clean my data and make it viable for all types of classificaton algo's.\nAt the end of this notebook, I load the dataframe into a PostgreSQL database. There isnt really a need for this, but it's a matter of demonstrating the skillset.",
"%matplotlib inline\nimport pickle\n%run helper_functions.py\npd.options.display.max_columns = 1000\nplt.rcParams[\"figure.figsize\"] = (15,10)\nfrom datetime import datetime\n# from sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\n\ndf = unpickle_object(\"non_current_df.pkl\") #loans that are 'complete'\n\ndf.shape",
"I will manipulate my dataset in order for it to be compatible with both GLM's and classification algorithms.\nAs such, I will create dummies and scale all of data. Scaling is incredibly important for KNN and will improve model performance for Logisitc Regression.\nI am not particularly concerned with coeff interpretability as the purpose is the assign a class.\nBy manipulating my data in this way, I will be ready to be used by any ML model.\nI will use 3 in particular:\n\n\nDummy Classifier. This will the global baseline I have to beat.\n\n\nKNN (My most constrained model)\n\n\nLogistic Regression\n\n\nRandom Forests\n\n\nNote that Multi-collinearity does NOT matter for models like DT's and RF's - however, it will matter for Logistic regression. I will first throw all of my data at LGR, and remove variables (multi-collinear) accordingly (this creates a baselinne model for lgr).\nMy project will be concerned with classifying whether an individual will re-pay their loan on time. I will change the 'loan status' feature in this dataset into a binary form of \"Fully Paid\" or \"Late\"",
"df['loan_status'].unique()\n\nmask = df['loan_status'] != \"Fully Paid\"\nrows_to_change = df[mask]\nrows_to_change.loc[:, 'loan_status'] = 'Late'\ndf.update(rows_to_change)\n\ndf['loan_status'].unique() #sweet!\n\ndf.shape # no dimensionality lost\n\nplot_corr_matrix(df)",
"Let's have a quick look at all of our columns, their descriptions and associated datatype.\nPerhaps we can reduce the dimension of our dataset off the bat by dropping columns that are not pertinent",
"no_desc = []\nfor column in df.columns:\n try:\n print(column+\":\",lookup_description(column),\" DataType:\", df[column].dtype)\n print()\n except KeyError:\n no_desc.append(column)\n\ncolumns_to_drop = [\"id\", \"member_id\", \"emp_title\",\"desc\",\"title\",\"out_prncp\",\"out_prncp_inv\",\"total_pymnt\",\"total_pymnt_inv\", \"total_rec_prncp\", \"total_rec_int\", \"total_rec_late_fee\", \"recoveries\", \"collection_recovery_fee\",\"last_pymnt_d\", \"last_pymnt_amnt\",\"next_pymnt_d\", \"last_credit_pull_d\", \"collections_12_mths_ex_med\",\"mths_since_last_major_derog\", \"all_util\", ]\n\n# df.loc[:, [\"loan_amnt\",\"funded_amnt\",\"out_prncp\",\"out_prncp_inv\",\"total_pymnt\",\"total_pymnt_inv\",\"total_rec_prncp\",\"last_credit_pull_d\"]]\n\nno_desc\n\ndf['verification_status_joint'].unique()\n\ndf['total_rev_hi_lim'].unique()\n\ndf['verification_status_joint'].dtype\n\ndf['total_rev_hi_lim'].dtype",
"After going through the list, I have decided to drop 5 columns!\nThese will not be relevant to the task at hand. Although, I could use some natural language processig via NLTK to parse job descriptions and loan descriptions. I will leave this for another day.\nIt is also important to note that I will be dropping variables that hint (i.e. information leakage) at what the final result will be.",
"df.drop(columns_to_drop, axis=1, inplace=True)\n\ndf.shape #just what we expected",
"After reviewing the above, the following columns need to be changed to categorical datatypes from float64.\n- policy_code\nI will first make it an object datatype as later I will write a function that changed all object datatypes into categorical datatypes.",
"df[\"policy_code\"] = df[\"policy_code\"].astype('object')",
"I will have to transform the following columns as they are currently in percentages. I will take the natural log of these columns before proceeding:\n\n\npct_tl_nvr_dlq\n\n\npercent_bc_gt_75\n\n\nThis will ensure better model performance for logistic regression as % may not follow a linear relationship.",
"df['pct_tl_nvr_dlq'] = df['pct_tl_nvr_dlq'].apply(lambda x: x/100)\ndf['percent_bc_gt_75'] = df['percent_bc_gt_75'].apply(lambda x: x/100)",
"My categorical features (those of type Object) have np.nan values, I will change these to something more meaningful like \"Missing Data\".\nI will then create dummies for all of my categorical features. This will lead to an explosion in the number of columns - this will be more computationally expensive, however, this is NOT an explosion in the 'feature space' as our dataframe contains the same amount of information.",
"object_columns = df.select_dtypes(include=['object']).columns\n\nfor c in object_columns:\n df.loc[df[df[c].isnull()].index, c] = \"missing\"",
"So, our dataset is comprised of features which are categorical and features that are numeric. We need to ensure that the object datatypes are converted to categorical datatypes.\nAlso, whether we use a GLM or classifier, we need to ensure that these datatypes stay consistent.\nNOTE: changing columns to categorical datatypes will NOT change how a machine learning model interprets the data. i.e. The algorithm will still think that 5 > 4. As such, one hot encoding (i.e. making dummies) is the only way to ensure that a Machine Learning Model can detect the presence of a particular attribute.\nI will be changing the object datatypes to categorical purely for data consistency within the dataframe.",
"obj_df = df.select_dtypes(include=['object'])\n\nobj_df_cols = obj_df.columns\n\nfor col in obj_df_cols:\n df[col] = df[col].astype(\"category\")\n \ndf.dtypes.unique() #This is what we wanted!\n\ndf.shape\n\ndf.head()\n\nunique_val_dict = {}\nfor col in df.columns:\n if col not in unique_val_dict:\n unique_val_dict[col] = df[col].unique()\n\nunique_val_dict #will use this later when making flask app.\n\ncategory_columns = df.select_dtypes(include=['category']).columns\ndf = pd.get_dummies(df, columns=category_columns, drop_first=True)\n\ndf.shape",
"Let's ensure that all of our missing values in float columns be nan values via the numpy library. I am doing this because Numpy is a highly optimized library.",
"float_columns = df.select_dtypes(include=['float64']).columns\n\nfor c in float_columns:\n df.loc[df[df[c].isnull()].index, c] = np.nan\n\npickle_object(unique_val_dict, \"unique_values_for_columns\")\n\npickle_object(df, \"dummied_dataset\")\n\ndf = unpickle_object(\"dummied_dataset.pkl\")\n\ndf.head()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
farr/emcee
|
docs/_static/notebooks/autocorr.ipynb
|
mit
|
[
"%matplotlib inline\n%config InlineBackend.figure_format = \"retina\"\n\nfrom matplotlib import rcParams\nrcParams[\"savefig.dpi\"] = 100\nrcParams[\"figure.dpi\"] = 100\nrcParams[\"font.size\"] = 20",
"Autocorrelation analysis & convergence\nIn this tutorial, we will discuss a method for convincing yourself that your chains are sufficiently converged.\nThis can be a difficult subject to discuss because it isn't formally possible to guarantee convergence for any but the simplest models, and therefore any argument that you make will be circular and heuristic.\nHowever, some discussion of autocorrelation analysis is (or should be!) a necessary part of any publication using MCMC.\nWith emcee, we follow Goodman & Weare (2010) and recommend using the integrated autocorrelation time to quantify the effects of sampling error on your results.\nThe basic idea is that the samples in your chain are not independent and you must estimate the effective number of independent samples.\nThere are other convergence diagnostics like the Gelman–Rubin statistic (Note: you should not compute the G–R statistic using multiple chains in the same emcee ensemble because the chains are not independent!) but, since the integrated autocorrelation time directly quantifies the Monte Carlo error (and hence the efficiency of the sampler) on any integrals computed using the MCMC results, it is the natural quantity of interest when judging the robustness of an MCMC analysis.\nMonte Carlo error\nThe goal of every MCMC analysis is to evaluate integrals of the form\n$$\n\\mathrm{E}_{p(\\theta)}[f(\\theta)] = \\int f(\\theta)\\,p(\\theta)\\,\\mathrm{d}\\theta \\quad.\n$$\nIf you had some way of generating $N$ samples $\\theta^{(n)}$ from the probability density $p(\\theta)$, then you could approximate this integral as\n$$\n\\mathrm{E}{p(\\theta)}[f(\\theta)] \\approx \\frac{1}{N} \\sum{n=1}^N f(\\theta^{(n)})\n$$\nwhere the sum is over the samples from $p(\\theta)$.\nIf these samples are independent, then the sampling variance on this estimator is\n$$\n\\sigma^2 = \\frac{1}{N}\\,\\mathrm{Var}_{p(\\theta)}[f(\\theta)]\n$$\nand the error decreses as $1/\\sqrt{N}$ as you generate more samples.\nIn the case of MCMC, the samples are not independent and the error is actually given by\n$$\n\\sigma^2 = \\frac{\\tau_f}{N}\\,\\mathrm{Var}_{p(\\theta)}[f(\\theta)]\n$$\nwhere $\\tau_f$ is the integrated autocorrelation time for the chain $f(\\theta^{(n)})$.\nIn other words, $N/\\tau_f$ is the effective number of samples and $\\tau_f$ is the number of steps that are needed before the chain \"forgets\" where it started.\nThis means that, if you can estimate $\\tau_f$, then you can estimate the number of samples that you need to generate to reduce the relative error on your target integral to (say) a few percent.\nNote: It is important to remember that $\\tau_f$ depends on the specific function $f(\\theta)$.\nThis means that there isn't just one integrated autocorrelation time for a given Markov chain.\nInstead, you must compute a different $\\tau_f$ for any integral you estimate using the samples.\nComputing autocorrelation times\nThere is a great discussion of methods for autocorrelation estimation in a set of lecture notes by Alan Sokal and the interested reader should take a look at that for a more formal discussion, but I'll include a summary of some of the relevant points here.\nThe integrated autocorrelation time is defined as\n$$\n\\tau_f = \\sum_{\\tau=-\\infty}^\\infty \\rho_f(\\tau)\n$$\nwhere $\\rho_f(\\tau)$ is the normalized autocorrelation function of the stochastic process that generated the chain for $f$.\nYou can estimate $\\rho_f(\\tau)$ using a finite chain ${f_n}_{n=1}^N$ as\n$$\n\\hat{\\rho}_f(\\tau) = \\hat{c}_f(\\tau) / \\hat{c}_f(0)\n$$\nwhere\n$$\n\\hat{c}f(\\tau) = \\frac{1}{N - \\tau} \\sum{n=1}^{N-\\tau} (f_n - \\mu_f)\\,(f_{n+\\tau}-\\mu_f)\n$$\nand\n$$\n\\mu_f = \\frac{1}{N}\\sum_{n=1}^N f_n \\quad.\n$$\n(Note: In practice, it is actually more computationally efficient to compute $\\hat{c}_f(\\tau)$ using a fast Fourier transform than summing it directly.)\nNow, you might expect that you can estimate $\\tau_f$ using this estimator for $\\rho_f(\\tau)$ as\n$$\n\\hat{\\tau}f \\stackrel{?}{=} \\sum{\\tau=-N}^{N} \\hat{\\rho}f(\\tau) = 1 + 2\\,\\sum{\\tau=1}^N \\hat{\\rho}_f(\\tau)\n$$\nbut this isn't actually a very good idea.\nAt longer lags, $\\hat{\\rho}_f(\\tau)$ starts to contain more noise than signal and summing all the way out to $N$ will result in a very noisy estimate of $\\tau_f$.\nInstead, we want to estimate $\\tau_f$ as\n$$\n\\hat{\\tau}f (M) = 1 + 2\\,\\sum{\\tau=1}^M \\hat{\\rho}_f(\\tau)\n$$\nfor some $M \\ll N$.\nAs discussed by Sokal in the notes linked above, the introduction of $M$ decreases the variance of the estimator at the cost of some added bias and he suggests choosing the smallest value of $M$ where $M \\ge C\\,\\hat{\\tau}_f (M)$ for a constant $C \\sim 5$.\nSokal says that he finds this procedure to work well for chains longer than $1000\\,\\tau_f$, but the situation is a bit better with emcee because we can use the parallel chains to reduce the variance and we've found that chains longer than about $50\\,\\tau$ are often sufficient.\nA toy problem\nTo demonstrate this method, we'll start by generating a set of \"chains\" from a process with known autocorrelation structure.\nTo generate a large enough dataset, we'll use celerite:",
"import numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(1234)\n\n# Build the celerite model:\nimport celerite\nfrom celerite import terms\nkernel = terms.RealTerm(log_a=0.0, log_c=-6.0)\nkernel += terms.RealTerm(log_a=0.0, log_c=-2.0)\n\n# The true autocorrelation time can be calculated analytically:\ntrue_tau = sum(2*np.exp(t.log_a-t.log_c) for t in kernel.terms)\ntrue_tau /= sum(np.exp(t.log_a) for t in kernel.terms)\ntrue_tau\n\n# Simulate a set of chains:\ngp = celerite.GP(kernel)\nt = np.arange(2000000)\ngp.compute(t)\ny = gp.sample(size=32)\n\n# Let's plot a little segment with a few samples:\nplt.plot(y[:3, :300].T)\nplt.xlim(0, 300)\nplt.xlabel(\"step number\")\nplt.ylabel(\"$f$\")\nplt.title(\"$\\\\tau_\\mathrm{{true}} = {0:.0f}$\".format(true_tau), fontsize=14);",
"Now we'll estimate the empirical autocorrelation function for each of these parallel chains and compare this to the true function.",
"def next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n return i\n\ndef autocorr_func_1d(x, norm=True):\n x = np.atleast_1d(x)\n if len(x.shape) != 1:\n raise ValueError(\"invalid dimensions for 1D autocorrelation function\")\n n = next_pow_two(len(x))\n\n # Compute the FFT and then (from that) the auto-correlation function\n f = np.fft.fft(x - np.mean(x), n=2*n)\n acf = np.fft.ifft(f * np.conjugate(f))[:len(x)].real\n acf /= 4*n\n \n # Optionally normalize\n if norm:\n acf /= acf[0]\n\n return acf\n\n# Make plots of ACF estimate for a few different chain lengths\nwindow = int(2*true_tau)\ntau = np.arange(window+1)\nf0 = kernel.get_value(tau) / kernel.get_value(0.0)\n\n# Loop over chain lengths:\nfig, axes = plt.subplots(1, 3, figsize=(12, 4), sharex=True, sharey=True)\nfor n, ax in zip([10, 100, 1000], axes):\n nn = int(true_tau * n)\n ax.plot(tau / true_tau, f0, \"k\", label=\"true\")\n ax.plot(tau / true_tau, autocorr_func_1d(y[0, :nn])[:window+1], label=\"estimate\")\n ax.set_title(r\"$N = {0}\\,\\tau_\\mathrm{{true}}$\".format(n), fontsize=14)\n ax.set_xlabel(r\"$\\tau / \\tau_\\mathrm{true}$\")\n\naxes[0].set_ylabel(r\"$\\rho_f(\\tau)$\")\naxes[-1].set_xlim(0, window / true_tau)\naxes[-1].set_ylim(-0.05, 1.05)\naxes[-1].legend(fontsize=14);",
"This figure shows how the empirical estimate of the normalized autocorrelation function changes as more samples are generated.\nIn each panel, the true autocorrelation function is shown as a black curve and the empricial estimator is shown as a blue line.\nInstead of estimating the autocorrelation function using a single chain, we can assume that each chain is sampled from the same stochastic process and average the estimate over ensemble members to reduce the variance.\nIt turns out that we'll actually do this averaging later in the process below, but it can be useful to show the mean autocorrelation function for visualization purposes.",
"fig, axes = plt.subplots(1, 3, figsize=(12, 4), sharex=True, sharey=True)\nfor n, ax in zip([10, 100, 1000], axes):\n nn = int(true_tau * n)\n ax.plot(tau / true_tau, f0, \"k\", label=\"true\")\n f = np.mean([autocorr_func_1d(y[i, :nn], norm=False)[:window+1]\n for i in range(len(y))], axis=0)\n f /= f[0]\n ax.plot(tau / true_tau, f, label=\"estimate\")\n ax.set_title(r\"$N = {0}\\,\\tau_\\mathrm{{true}}$\".format(n), fontsize=14)\n ax.set_xlabel(r\"$\\tau / \\tau_\\mathrm{true}$\")\n\naxes[0].set_ylabel(r\"$\\rho_f(\\tau)$\")\naxes[-1].set_xlim(0, window / true_tau)\naxes[-1].set_ylim(-0.05, 1.05)\naxes[-1].legend(fontsize=14);",
"Now let's estimate the autocorrelation time using these estimated autocorrelation functions.\nGoodman & Weare (2010) suggested averaging the ensemble over walkers and computing the autocorrelation function of the mean chain to lower the variance of the estimator and that was what was originally implemented in emcee.\nSince then, @fardal on GitHub suggested that other estimators might have lower variance.\nThis is absolutely correct and, instead of the Goodman & Weare method, we now recommend computing the autocorrelation time for each walker (it's actually possible to still use the ensemble to choose the appropriate window) and then average these estimates.\nHere is an implementation of each of these methods and a plot showing the convergence as a function of the chain length:",
"# Automated windowing procedure following Sokal (1989)\ndef auto_window(taus, c):\n m = np.arange(len(taus)) < c * taus\n if np.any(m):\n return np.argmin(m)\n return len(taus) - 1\n\n# Following the suggestion from Goodman & Weare (2010)\ndef autocorr_gw2010(y, c=5.0):\n f = autocorr_func_1d(np.mean(y, axis=0))\n taus = 2.0*np.cumsum(f)-1.0\n window = auto_window(taus, c)\n return taus[window]\n\ndef autocorr_new(y, c=5.0):\n f = np.zeros(y.shape[1])\n for yy in y:\n f += autocorr_func_1d(yy)\n f /= len(y)\n taus = 2.0*np.cumsum(f)-1.0\n window = auto_window(taus, c)\n return taus[window]\n\n# Compute the estimators for a few different chain lengths\nN = np.exp(np.linspace(np.log(100), np.log(y.shape[1]), 10)).astype(int)\ngw2010 = np.empty(len(N))\nnew = np.empty(len(N))\nfor i, n in enumerate(N):\n gw2010[i] = autocorr_gw2010(y[:, :n])\n new[i] = autocorr_new(y[:, :n])\n\n# Plot the comparisons\nplt.loglog(N, gw2010, \"o-\", label=\"G\\&W 2010\")\nplt.loglog(N, new, \"o-\", label=\"new\")\nylim = plt.gca().get_ylim()\nplt.plot(N, N / 50.0, \"--k\", label=r\"$\\tau = N/50$\")\nplt.axhline(true_tau, color=\"k\", label=\"truth\", zorder=-100)\nplt.ylim(ylim)\nplt.xlabel(\"number of samples, $N$\")\nplt.ylabel(r\"$\\tau$ estimates\")\nplt.legend(fontsize=14);",
"In this figure, the true autocorrelation time is shown as a horizontal line and it should be clear that both estimators give outrageous results for the short chains.\nIt should also be clear that the new algorithm has lower variance than the original method based on Goodman & Weare.\nIn fact, even for moderately long chains, the old method can give dangerously over-confident estimates.\nFor comparison, we have also plotted the $\\tau = N/50$ line to show that, once the estimate crosses that line, The estimates are starting to get more reasonable.\nThis suggests that you probably shouldn't trust any estimate of $\\tau$ unless you have more than $F\\times\\tau$ samples for some $F \\ge 50$.\nLarger values of $F$ will be more conservative, but they will also (obviously) require longer chains.\nA more realistic example\nNow, let's run an actual Markov chain and test these methods using those samples.\nSo that the sampling isn't completely trivial, we'll sample a multimodal density in three dimensions.",
"import emcee\n\ndef log_prob(p):\n return np.logaddexp(-0.5*np.sum(p**2), -0.5*np.sum((p-4.0)**2))\n\nsampler = emcee.EnsembleSampler(32, 3, log_prob)\nsampler.run_mcmc(np.concatenate((np.random.randn(16, 3),\n 4.0+np.random.randn(16, 3)), axis=0),\n 500000, progress=True);",
"Here's the marginalized density in the first dimension.",
"chain = sampler.get_chain()[:, :, 0].T\n\nplt.hist(chain.flatten(), 100)\nplt.gca().set_yticks([])\nplt.xlabel(r\"$\\theta$\")\nplt.ylabel(r\"$p(\\theta)$\");",
"And here's the comparison plot showing how the autocorrelation time estimates converge with longer chains.",
"# Compute the estimators for a few different chain lengths\nN = np.exp(np.linspace(np.log(100), np.log(chain.shape[1]), 10)).astype(int)\ngw2010 = np.empty(len(N))\nnew = np.empty(len(N))\nfor i, n in enumerate(N):\n gw2010[i] = autocorr_gw2010(chain[:, :n])\n new[i] = autocorr_new(chain[:, :n])\n\n# Plot the comparisons\nplt.loglog(N, gw2010, \"o-\", label=\"G\\&W 2010\")\nplt.loglog(N, new, \"o-\", label=\"new\")\nylim = plt.gca().get_ylim()\nplt.plot(N, N / 50.0, \"--k\", label=r\"$\\tau = N/50$\")\nplt.ylim(ylim)\nplt.xlabel(\"number of samples, $N$\")\nplt.ylabel(r\"$\\tau$ estimates\")\nplt.legend(fontsize=14);",
"As before, the short chains give absurd estimates of $\\tau$, but the new method converges faster and with lower variance than the old method.\nThe $\\tau = N/50$ line is also included as above as an indication of where we might start trusting the estimates.\nWhat about shorter chains?\nSometimes it just might not be possible to run chains that are long enough to get a reliable estimate of $\\tau$ using the methods described above.\nIn these cases, you might be able to get an estimate using parametric models for the autocorrelation.\nOne example would be to fit an autoregressive model to the chain and using that to estimate the autocorrelation time.\nAs an example, we'll use celerite to fit for the maximum likelihood autocorrelation function and then compute an estimate of $\\tau$ based on that model.\nThe celerite model that we're using is equivalent to a second-order ARMA model and it appears to be a good choice for this example, but we're not going to promise anything here about the general applicability and we caution care whenever estimating autocorrelation times using short chains.",
"from scipy.optimize import minimize\n\ndef autocorr_ml(y, thin=1, c=5.0):\n # Compute the initial estimate of tau using the standard method\n init = autocorr_new(y, c=c)\n z = y[:, ::thin]\n N = z.shape[1]\n \n # Build the GP model\n tau = max(1.0, init/thin)\n kernel = terms.RealTerm(np.log(0.9*np.var(z)), -np.log(tau),\n bounds=[(-5.0, 5.0), (-np.log(N), 0.0)])\n kernel += terms.RealTerm(np.log(0.1*np.var(z)), -np.log(0.5*tau),\n bounds=[(-5.0, 5.0), (-np.log(N), 0.0)])\n gp = celerite.GP(kernel, mean=np.mean(z))\n gp.compute(np.arange(z.shape[1]))\n\n # Define the objective\n def nll(p):\n # Update the GP model\n gp.set_parameter_vector(p)\n \n # Loop over the chains and compute likelihoods\n v, g = zip(*(\n gp.grad_log_likelihood(z0, quiet=True)\n for z0 in z\n ))\n \n # Combine the datasets\n return -np.sum(v), -np.sum(g, axis=0)\n\n # Optimize the model\n p0 = gp.get_parameter_vector()\n bounds = gp.get_parameter_bounds()\n soln = minimize(nll, p0, jac=True, bounds=bounds)\n gp.set_parameter_vector(soln.x)\n \n # Compute the maximum likelihood tau\n a, c = kernel.coefficients[:2]\n tau = thin * 2*np.sum(a / c) / np.sum(a)\n return tau\n\n# Calculate the estimate for a set of different chain lengths\nml = np.empty(len(N))\nml[:] = np.nan\nfor j, n in enumerate(N[1:8]):\n i = j+1\n thin = max(1, int(0.05*new[i]))\n ml[i] = autocorr_ml(chain[:, :n], thin=thin)\n\n# Plot the comparisons\nplt.loglog(N, gw2010, \"o-\", label=\"G\\&W 2010\")\nplt.loglog(N, new, \"o-\", label=\"new\")\nplt.loglog(N, ml, \"o-\", label=\"ML\")\nylim = plt.gca().get_ylim()\nplt.plot(N, N / 50.0, \"--k\", label=r\"$\\tau = N/50$\")\nplt.ylim(ylim)\nplt.xlabel(\"number of samples, $N$\")\nplt.ylabel(r\"$\\tau$ estimates\")\nplt.legend(fontsize=14);",
"This figure is the same as the previous one, but we've added the maximum likelihood estimates for $\\tau$ in green.\nIn this case, this estimate seems to be robust even for very short chains with $N \\sim \\tau$."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
chengjun/iching
|
iching.ipynb
|
mit
|
[
"蓍草卜卦\n大衍之数五十,其用四十有九。分而为二以象两,挂一以象三,揲之以四以象四时,归奇于扐以象闰。五岁再闰,故再扐而后挂。天一,地二;天三,地四;天五,地六;天七,地八;天九,地十。天数五,地数五。五位相得而各有合,天数二十有五,地数三十,凡天地之数五十有五,此所以成变化而行鬼神也。乾之策二百一十有六,坤之策百四十有四,凡三百六十,当期之日。二篇之策,万有一千五百二十,当万物之数也。是故四营而成《易》,十有八变而成卦,八卦而小成。引而伸之,触类而长之,天下之能事毕矣。显道神德行,是故可与酬酢,可与祐神矣。子曰:“知变化之道者,其知神之所为乎。”\n大衍之数五十,存一不用,构造天地人三者,历经三变,第一次的余数是5或9,第二次的是4或8,第三次的是4或8,剩下的数量除以4就是结果。即为一爻,算六爻要一个小时。古人构造随机数的方法太费时间啦。用Python写个程序来搞吧!",
"import random\n\ndef sepSkyEarth(data):\n sky = random.randint(1, data-2) \n earth = data - sky\n earth -= 1\n return sky , earth\n\n\ndef getRemainder(num):\n rm = num % 4\n if rm == 0:\n rm = 4\n return rm\n\ndef getChange(data):\n sky, earth = sepSkyEarth(data)\n skyRemainder = getRemainder(sky)\n earthRemainder = getRemainder(earth)\n change = skyRemainder + earthRemainder + 1\n data = data - change\n return sky, earth, change, data\n\ndef getYao(data):\n sky, earth, firstChange, data = getChange(data)\n sky, earth, secondChange, data = getChange(data)\n sky, earth, thirdChange, data = getChange(data)\n yao = data/4\n return yao, firstChange, secondChange, thirdChange\n\ndef sixYao():\n yao1 = getYao(data = 50 - 1)[0]\n yao2 = getYao(data = 50 - 1)[0]\n yao3 = getYao(data = 50 - 1)[0]\n yao4 = getYao(data = 50 - 1)[0]\n yao5 = getYao(data = 50 - 1)[0]\n yao6 = getYao(data = 50 - 1)[0]\n return[yao1, yao2, yao3, yao4, yao5, yao6]\n\ndef fixYao(num):\n if num == 6 or num == 9:\n print \"there is a changing predict! Also run changePredict()\"\n return num % 2\n \n\ndef changeYao(num):\n if num == 6:\n num = 1\n elif num == 9:\n num = 2\n num = num % 2\n return(num)\n\ndef fixPredict(pred):\n fixprd = [fixYao(i) for i in pred]\n fixprd = list2str(fixprd)\n return fixprd\n\ndef list2str(l):\n si = ''\n for i in l:\n si = si + str(i)\n return si\n\ndef changePredict(pred):\n changeprd = [changeYao(i) for i in pred]\n changeprd = list2str(changeprd)\n return changeprd\n\ndef getPredict():\n pred = sixYao()\n fixPred = fixPredict(pred)\n if 6 in pred or 9 in pred:\n changePred = changePredict(pred)\n else:\n changePred = None\n return fixPred, changePred \n\ndef interpretPredict(now, future):\n dt = {'111111':'乾','011111':'夬','000000':'坤','010001':'屯','100010':'蒙','010111':'需','111010':'讼','000010':'师',\n'010000':'比','110111':'小畜','111011':'履','000111':'泰','111000':'否','111101':'同人','101111':'大有','000100':'谦',\n'001000':'豫','011001':'随','100110':'蛊','000011':'临','110000':'观','101001':'噬嗑','100101':'贲','100000':'剥',\n'000001':'复','111001':'无妄','100111':'大畜','100001':'颐','011110':'大过','010010':'坎','101101':'离','011100':'咸',\n'001110':'恒','111100':'遁','001111':'大壮','101000':'晋','000101':'明夷','110101':'家人','101011':'睽','010100':'蹇',\n'001010':'解','100011':'损','110001':'益','111110':'姤','011000':'萃','000110':'升','011010':'困','010110':'井',\n'011101':'革','101110':'鼎','001001':'震','100100':'艮','110100':'渐','001011':'归妹','001101':'丰','101100':'旅',\n'110110':'巽','011011':'兑','110010':'涣','010011':'节','110011':'中孚','001100':'小过','010101':'既济','101010':'未济'}\n if future:\n name = dt[now] + ' & ' + dt[future]\n else:\n name = dt[now]\n print name\n \n\ndef plotTransitionRemainder(N, w):\n import matplotlib.cm as cm\n import matplotlib.pyplot as plt\n from collections import defaultdict\n \n changes = {}\n for i in range(N):\n sky, earth, firstChange, data = getChange(data = 50 -1)\n sky, earth, secondChange, data = getChange(data)\n sky, earth, thirdChange, data = getChange(data)\n changes[i]=[firstChange, secondChange, thirdChange, data/4]\n\n ichanges = changes.values()\n\n firstTransition = defaultdict(int)\n for i in ichanges:\n firstTransition[i[0], i[1]]+=1\n\n secondTransition = defaultdict(int)\n for i in ichanges:\n secondTransition[i[1], i[2]]+=1\n\n thirdTransition = defaultdict(int)\n for i in ichanges:\n thirdTransition[i[2], i[3]]+=1 \n \n cmap = cm.get_cmap('Accent_r', len(ichanges))\n\n for k, v in firstTransition.iteritems(): \n plt.plot([1, 2], k, linewidth = v*w/N)\n for k, v in secondTransition.iteritems(): \n plt.plot([2, 3], k, linewidth = v*w/N)\n for k, v in thirdTransition.iteritems(): \n plt.plot([3, 4], k, linewidth = v*w/N)\n plt.xlabel(u'Time')\n plt.ylabel(u'Changes')\n",
"大衍之数五十,存一不用",
"data = 50 - 1",
"一变",
"sky, earth, firstChange, data = getChange(data)\nprint sky, '\\n', earth, '\\n',firstChange, '\\n', data",
"二变",
"sky, earth, secondChange, data = getChange(data)\nprint sky, '\\n', earth, '\\n',secondChange, '\\n', data",
"三变",
"sky, earth, thirdChange, data = getChange(data)\nprint sky, '\\n', earth, '\\n',thirdChange, '\\n', data",
"得到六爻及变卦",
"getPredict()\n\ngetPredict()\n\ngetPredict()",
"得到卦名",
"fixPred, changePred = getPredict()\ninterpretPredict(fixPred, changePred )",
"添加卦辞",
"\n#http://baike.fututa.com/zhouyi64gua/\n\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport os\n\n# set work directory\nos.chdir('/Users/chengjun/github/iching/')\n\ndt = {'111111':'乾','011111':'夬','000000':'坤','010001':'屯','100010':'蒙','010111':'需','111010':'讼','000010':'师',\n '010000':'比','110111':'小畜','111011':'履','000111':'泰','111000':'否','111101':'同人','10111':'大有','000100':'谦',\n '001000':'豫','011001':'随','100110':'蛊','000011':'临','110000':'观','101001':'噬嗑','100101':'贲','100000':'剥',\n '000001':'复','111001':'无妄','100111':'大畜','100001':'颐','011110':'大过','010010':'坎','101101':'离','011100':'咸',\n '001110':'恒','111100':'遁','001111':'大壮','101000':'晋','000101':'明夷','110101':'家人','101011':'睽','010100':'蹇',\n '001010':'解','100011':'损','110001':'益','111110':'姤','011000':'萃','000110':'升','011010':'困','010110':'井',\n '011101':'革','101110':'鼎','001001':'震','100100':'艮','110100':'渐','001011':'归妹','001101':'丰','101100':'旅',\n '110110':'巽','011011':'兑','110010':'涣','010011':'节','110011':'中孚','001100':'小过','010101':'既济','101010':'未济'}\n\n\ndr = {}\nfor i, j in dt.iteritems():\n dr[unicode(j, 'utf8')]= i\n\nurl = \"http://baike.fututa.com/zhouyi64gua/\"\ncontent = urllib2.urlopen(url).read() #获取网页的html文本\nsoup = BeautifulSoup(content) \narticles = soup.find_all('div', {'class', 'gualist'})[0].find_all('a')\nlinks = [i['href'] for i in articles]\n\nlinks[:2]\n\ndtext = {}\n\nfrom time import sleep\n\nnum = 0\nfor j in links:\n sleep(0.1)\n num += 1\n ghtml = urllib2.urlopen(j).read() #获取网页的html文本\n print j, num\n gua = BeautifulSoup(ghtml, from_encoding = 'gb18030') \n guaName = gua.title.text.split('_')[1].split(u'卦')[0]\n guaId = dr[guaName]\n guawen = gua.find_all('div', {'class', 'gua_wen'})\n guaText = []\n for i in guawen:\n guaText.append(i.get_text() + '\\n\\n')\n guaText = ''.join(guaText)\n dtext[guaId] = guaText\n\ndtextu = {}\nfor i, j in dtext.iteritems():\n dtextu[i]= j.encode('utf-8')\n\ndtext.values()[0]\n\nimport json\nwith open(\"/Users/chengjun/github/iching/package_data.dat\",'w') as outfile:\n json.dump(dtextu, outfile, ensure_ascii=False) #, encoding = 'utf-8')\n\ndat = json.load(open('package_data.dat'), encoding='utf-8')\n\nprint dat.values()[1]\n\nnow, future = getPredict()\n\ndef ichingText(k):\n import json\n dat = json.load(open('iching/package_data.dat'))\n print dat[k]\n\nichingText(future)\n\n%matplotlib inline\nplotTransitionRemainder(10000, w = 50)\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(figsize=(15, 10),facecolor='white')\nplt.subplot(2, 2, 1)\nplotTransitionRemainder(1000, w = 50)\nplt.subplot(2, 2, 2)\nplotTransitionRemainder(1000, w = 50)\nplt.subplot(2, 2, 3)\nplotTransitionRemainder(1000, w = 50)\nplt.subplot(2, 2, 4)\nplotTransitionRemainder(1000, w = 50)\n\ndt = {'111111':u'乾','011111':u'夬','000000':u'坤','010001':u'屯','100010':u'蒙','010111':u'需','111010':u'讼','000010':'师',\n'010000':u'比','110111':u'小畜','111011':u'履','000111':u'泰','111000':u'否','111101':u'同人','101111':u'大有','000100':u'谦',\n'001000':u'豫','011001':u'随','100110':u'蛊','000011':u'临','110000':u'观','101001':u'噬嗑','100101':u'贲','100000':'u剥',\n'000001':u'复','111001':u'无妄','100111':u'大畜','100001':u'颐','011110':u'大过','010010':u'坎','101101':u'离','011100':u'咸',\n'001110':u'恒','111100':u'遁','001111':u'大壮','101000':u'晋','000101':u'明夷','110101':u'家人','101011':u'睽','010100':u'蹇',\n'001010':u'解','100011':u'损','110001':u'益','111110':u'姤','011000':u'萃','000110':u'升','011010':u'困','010110':u'井',\n'011101':u'革','101110':u'鼎','001001':u'震','100100':u'艮','110100':u'渐','001011':u'归妹','001101':u'丰','101100':u'旅',\n'110110':u'巽','011011':u'兑','110010':u'涣','010011':u'节','110011':u'中孚','001100':u'小过','010101':u'既济','101010':u'未济'\n }\n\nfor i in dt.values():\n print i\n\ndtu = {}\nfor i, j in dt.iteritems():\n dtu[i] = unicode(j, 'utf-8')\n \n\ndef ichingDate(d):\n import random\n random.seed(d)\n try:\n print 'Your birthday & your prediction time:', str(d)\n except:\n print('Your birthday & your prediction time:', str(d))\n "
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
basp/aya
|
.ipynb_checkpoints/noise-checkpoint.ipynb
|
mit
|
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"plotting images\nWe can easily plot images by using the imshow function. Conveniently, an image can just be a 2-dimensional numpy array of floats in the range of 0 to 1. We can easily create such an array with the ranf function. Below we create a 128x128 image of random values and display that using the ocean colormap from the pyplot.cm module.",
"img = np.random.ranf((128,128))\nplt.imshow(img, cmap=plt.cm.ocean)",
"value noise\nThe image above is an example of random noise. Although useful we need something a bit more controllable. For this we need a function that produces value noise instead of just random noise. Essentially this is just a function that given a vector of inputs, produces an float in the range of 0 to 1. For now we'll focus on one-dimensional noise but we'll deal with noise in more dimensions later.\nThere's various ways to produce value noise but we're gonna use a method that utilizes a seeded vector of random values. For no particular reason we're going to call this vector r and we'll use a numpy array to hold it. For convenience we will also write a function seed that we can use to initialize the table.",
"def seed(n, shape=(8,)):\n global r\n np.random.seed(n)\n r = np.random.ranf(shape)",
"We can visualize r by plotting it.",
"seed(0)\nx = np.arange(0, len(r), 1)\nplt.plot(x, r[x], 'bo')\nplt.axis('tight')",
"We can use this table to define a function noise that given an integer value x will return a value in the range of 0 to 1. The r vector only has a limited amount of values. In order to make it work for any value of x we can perform a modulo operation with the length of the r vector.",
"def noise(x):\n x = int(x % len(r))\n return r[x]",
"By default we only have 8 values in the r vector so if we plot the noise function over a range 0 to 16 we can see that it repeats itself halfway.",
"x = np.arange(0, 16, 1)\ny = [noise(x) for x in x]\nplt.plot(x, y, 'bo')",
"Our noise function works for integer values of x ideally it works for real values of x as well. We can do this by interpolation.",
"def noise(x):\n xi = int(x)\n x0 = xi % len(r)\n x1 = 0 if x0 == (len(r) - 1) else (x0 + 1)\n v0, v1 = r[x0], r[x1]\n t = x - xi\n return np.interp(t, [0, 1], [v0, v1])\n\nx = np.arange(0, 16, 1/10)\ny = [noise(x) for x in x]\nplt.plot(x, y)",
"Now let's create some noise in two-dimensions. We'll implement a naive solution at first and revisit it later. Let's start by re-seeding the r table in two dimensions.",
"seed(0, (4,4))",
"And now we redefine our noise function so it will interpolate between the x and y value as well.",
"def noise(x, y):\n xi, yi = int(x), int(y)\n tx, ty = x - xi, y - yi\n x0 = xi % len(r)\n x1 = 0 if x0 == (len(r) - 1) else x0 + 1\n y0 = yi % len(r[0])\n y1 = 0 if y0 == (len(r) - 1) else y0 + 1\n c00 = r[x0][y0]\n c01 = r[x1][y0]\n c10 = r[x0][y1]\n c11 = r[x1][y1]\n n0 = np.interp(tx, [0, 1], [c00, c01])\n n1 = np.interp(tx, [0, 1], [c10, c11])\n return np.interp(ty, [0, 1], [n0, n1])\n\nimg = np.array([[noise(x, y) for x in range(16)] for y in range(16)])\nplt.imshow(img, plt.cm.ocean)",
"We can see clearly see a pattern here. That's because the dimensions of our r table are very small, only 4 in each axis. We'll get much better noise if we make it bigger.",
"seed(0, (16,16))\nimg = np.array([[noise(x, y) for x in range(32)] for y in range(32)])\nplt.imshow(img, cmap=plt.cm.ocean)",
"There's still a pattern but that is to be expected with our still reasonably small r vector. It does like a lot better already but another thing that is noticeable is that it appears to be a bit blocky. This is because of the linear interpolation we're using to calculate the final noise values. We can fix this by introducing the smoothstep function.\nsmoothstep",
"def smoothstep(t):\n return 3 * t**2 - 2 * t**3\n\nx = np.linspace(0, 1, 1000)\ny = smoothstep(x)\nplt.plot(x, y)",
"And now we have to adjust our noise function to incorporate the smoothstep function.",
"def noise(x, y, c = lambda t: t):\n xi, yi = int(x), int(y)\n tx, ty = x - xi, y - yi\n x0 = xi % len(r)\n x1 = 0 if x0 == (len(r) - 1) else x0 + 1\n y0 = yi % len(r[0])\n y1 = 0 if y0 == (len(r) - 1) else y0 + 1\n c00 = r[x0][y0]\n c01 = r[x1][y0]\n c10 = r[x0][y1]\n c11 = r[x1][y1]\n sx = c(tx)\n sy = c(ty)\n n0 = np.interp(sx, [0, 1], [c00, c01])\n n1 = np.interp(sx, [0, 1], [c10, c11])\n return np.interp(sy, [0, 1], [n0, n1])",
"Note that instead of interpolating on tx and ty directly we are using the smoothstep-ed values of sx and sy instead.",
"def gen_im(size=(64,64), c = lambda x: x):\n w, h = size\n pixels = [[noise(x, y, c) for x in range(w)] for y in range(h)]\n return np.array(pixels)\n\nseed(1, (32, 32))\nimg = gen_im(c = smoothstep)\nplt.imshow(img, cmap=plt.cm.ocean)\n\nMAX_VERTICES = 32\nMAX_VERTICES_MASK = MAX_VERTICES - 1\n"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
edosedgar/xs-pkg
|
deep_learning/hw3/homework03_part3a_gan_advanced.ipynb
|
gpl-2.0
|
[
"Generating human faces with Adversarial Networks (5 points)\n<img src=\"https://www.strangerdimensions.com/wp-content/uploads/2013/11/reception-robot.jpg\" width=320>\nThis time we'll train a neural net to generate plausible human faces in all their subtlty: appearance, expression, accessories, etc. 'Cuz when us machines gonna take over Earth, there won't be any more faces left. We want to preserve this data for future iterations. Yikes...\nBased on Based on https://github.com/Lasagne/Recipes/pull/94 .",
"from torchvision import utils\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\nimport torch, torch.nn as nn\nimport torch.nn.functional as F\nfrom itertools import count\nfrom IPython import display\nimport warnings\nimport time\n\nplt.rcParams.update({'axes.titlesize': 'small'})\n\nfrom sklearn.datasets import load_digits\n#The following line fetches you two datasets: images, usable for autoencoder training and attributes.\n#Those attributes will be required for the final part of the assignment (applying smiles), so please keep them in mind\nfrom lfw_dataset import fetch_lfw_dataset\ndata,attrs = fetch_lfw_dataset(dimx=36, dimy=36)\n\n#preprocess faces\ndata = np.float32(data).transpose([0,3,1,2]) / 255.\n\nIMG_SHAPE = data.shape[1:]\n\n#print random image\nplt.imshow(data[np.random.randint(data.shape[0])].transpose([1,2,0]),\n cmap=\"gray\", interpolation=\"none\")",
"Generative adversarial nets 101\n<img src=\"https://raw.githubusercontent.com/torch/torch.github.io/master/blog/_posts/images/model.png\" width=320px height=240px>\nDeep learning is simple, isn't it? \n* build some network that generates the face (small image)\n* make up a measure of how good that face is\n* optimize with gradient descent :)\nThe only problem is: how can we engineers tell well-generated faces from bad? And i bet you we won't ask a designer for help. \nIf we can't tell good faces from bad, we delegate it to yet another neural network!\nThat makes the two of them:\n* G__enerator - takes random noize for inspiration and tries to generate a face sample. \n * Let's call him __G(z), where z is a gaussian noize.\n* D__iscriminator - takes a face sample and tries to tell if it's great or fake. \n * Predicts the probability of input image being a __real face\n * Let's call him D(x), x being an image.\n * D(x) is a predition for real image and D(G(z)) is prediction for the face made by generator.\nBefore we dive into training them, let's construct the two networks.",
"use_cuda = torch.cuda.is_available()\n\nprint(\"Torch version:\", torch.__version__)\nif use_cuda:\n print(\"Using GPU\")\nelse:\n print(\"Not using GPU\")\n\ndef sample_noise_batch(batch_size):\n noise = torch.randn(batch_size, CODE_SIZE)\n #print(noise.shape)\n return noise.cuda() if use_cuda else noise.cpu()\n \nclass Reshape(nn.Module):\n def __init__(self, shape):\n nn.Module.__init__(self)\n self.shape=shape\n def forward(self,input):\n return input.view(self.shape)\n \ndef save_checkpoint(state, filename):\n torch.save(state, filename)\n\nCODE_SIZE = 256\n\n# automatic layer name maker. Don't do this in production :)\nix = ('layer_%i'%i for i in count())\n\ngenerator = nn.Sequential()\n\ngenerator.add_module(next(ix), nn.Linear(CODE_SIZE, 10*8*8)) #output 10*8*8\ngenerator.add_module(next(ix), nn.ELU())\ngenerator.add_module(next(ix), Reshape([-1, 10, 8, 8])) #output 10x8x8\n\ngenerator.add_module(next(ix), nn.ConvTranspose2d(10, 64, kernel_size=(5,5))) #output 64x12x12\ngenerator.add_module(next(ix), nn.ELU())\ngenerator.add_module(next(ix), nn.ConvTranspose2d(64, 64, kernel_size=(5,5))) #output 64x16x16\ngenerator.add_module(next(ix), nn.ELU())\ngenerator.add_module(next(ix), nn.Upsample(scale_factor=2)) #output 64x32x32\n\ngenerator.add_module(next(ix), nn.ConvTranspose2d(64, 32, kernel_size=(5,5))) #output 32x36x36\ngenerator.add_module(next(ix), nn.ELU())\ngenerator.add_module(next(ix), nn.ConvTranspose2d(32, 32, kernel_size=(5,5))) #output 32x40x40\ngenerator.add_module(next(ix), nn.ELU())\n\ngenerator.add_module(next(ix), nn.Conv2d(32, 3, kernel_size=(5,5))) #output 3x36x36\n#generator.add_module(next(ix), nn.Sigmoid())\n\nif use_cuda: generator.cuda()\n\ngenerated_data = generator(sample_noise_batch(5))\nassert tuple(generated_data.shape)[1:] == IMG_SHAPE, \\\n\"generator must output an image of shape %s, but instead it produces %s\"%(IMG_SHAPE,generated_data.shape)\n\nplt.figure(figsize=(16,10))\nplt.axis('off')\nplt.imshow(utils.make_grid(generated_data).cpu().detach().numpy().transpose((1,2,0)).clip(0,1)*10)\nplt.show()",
"Discriminator\n\nDiscriminator is your usual convolutional network with interlooping convolution and pooling layers\nThe network does not include dropout/batchnorm to avoid learning complications.\nWe also regularize the pre-output layer to prevent discriminator from being too certain.",
"def sample_data_batch(batch_size):\n idxs = np.random.choice(np.arange(data.shape[0]), size=batch_size)\n batch = torch.tensor(data[idxs], dtype=torch.float32)\n return batch.cuda() if use_cuda else batch.cpu()\n\n# a special module that converts [batch, channel, w, h] to [batch, units]\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.shape[0], -1)\n\ndiscriminator = nn.Sequential()\n\n## Got mediocre result\n### YOUR CODE - create convolutional architecture for discriminator\n### Note: please start simple. A few convolutions & poolings would do, inception/resnet is an overkill\n\ndiscriminator.add_module(\"conv1\", nn.Conv2d(3, 32, 5)) #output 32x32x32\ndiscriminator.add_module(\"elu1\", nn.ELU())\n#discriminator.add_module(\"pool2d\", nn.MaxPool2d(2, stride=2)) #output 32x16x16\ndiscriminator.add_module('avgpool1', nn.AdaptiveAvgPool2d((16,16)))\ndiscriminator.add_module(\"conv2\", nn.Conv2d(32, 64, 5)) #output 64x12x12\ndiscriminator.add_module(\"elu2\", nn.ELU())\ndiscriminator.add_module(\"conv3\", nn.Conv2d(64, 10, 5)) #output 10x8x8\ndiscriminator.add_module(\"elu3\", nn.ELU())\ndiscriminator.add_module(\"reshape\", Reshape([-1, 10*8*8]))\ndiscriminator.add_module(\"linear1\", nn.Linear(10*8*8, CODE_SIZE)) #output 256\ndiscriminator.add_module(\"elu4\", nn.ELU())\ndiscriminator.add_module(\"linear1\", nn.Linear(CODE_SIZE, 1))\n\nif use_cuda: discriminator.cuda()\n\ndiscriminator = nn.Sequential()\n\n# Got bad results\n### YOUR CODE - create convolutional architecture for discriminator\n### Note: please start simple. A few convolutions & poolings would do, inception/resnet is an overkill\n\ndiscriminator.add_module(\"conv1\", nn.Conv2d(3, 32, 5)) #output 32x32x32\ndiscriminator.add_module(\"lrelu1\", nn.LeakyReLU(0.2))\ndiscriminator.add_module(\"conv2\", nn.Conv2d(32, 64, 3)) #output 64x30x30\ndiscriminator.add_module(\"bn1\", nn.BatchNorm2d(64))\ndiscriminator.add_module(\"lrelu2\", nn.LeakyReLU(0.2))\ndiscriminator.add_module('avgpool1', nn.AdaptiveAvgPool2d((15,15)))\ndiscriminator.add_module(\"conv3\", nn.Conv2d(64, 128, 4)) #output 128x12x12\ndiscriminator.add_module(\"bn2\", nn.BatchNorm2d(128))\ndiscriminator.add_module(\"lrelu3\", nn.LeakyReLU(0.2))\ndiscriminator.add_module('avgpool2', nn.AdaptiveAvgPool2d((6,6))) #output 128x6x6\ndiscriminator.add_module(\"conv4\", nn.Conv2d(128, 256, 4)) #output 256x3x3\ndiscriminator.add_module(\"bn3\", nn.BatchNorm2d(256))\ndiscriminator.add_module(\"lrelu4\", nn.LeakyReLU(0.2))\ndiscriminator.add_module(\"reshape\", Reshape([-1, 256*3*3]))\ndiscriminator.add_module(\"linear1\", nn.Linear(256*3*3, 1)) #output 256\n\nif use_cuda: discriminator.cuda()\n\ndiscriminator = nn.Sequential()\n\n# Moreless fine\n### YOUR CODE - create convolutional architecture for discriminator\n### Note: please start simple. A few convolutions & poolings would do, inception/resnet is an overkill\n\ndiscriminator.add_module(\"conv1\", nn.Conv2d(3, 32, 5)) #output 32x32x32\ndiscriminator.add_module(\"lrelu1\", nn.LeakyReLU(0.2))\ndiscriminator.add_module('avgpool1', nn.AdaptiveAvgPool2d((16,16))) #output 32x16x16\n\ndiscriminator.add_module(\"conv2\", nn.Conv2d(32, 64, 5, 1, 2)) #output 64x16x16\ndiscriminator.add_module(\"bn1\", nn.BatchNorm2d(64))\ndiscriminator.add_module(\"lrelu2\", nn.LeakyReLU(0.2))\ndiscriminator.add_module('avgpool2', nn.AdaptiveAvgPool2d((8,8))) #output 64x8x8\n\ndiscriminator.add_module(\"conv3\", nn.Conv2d(64, 128, 5, 1, 2)) #output 128x8x8\ndiscriminator.add_module(\"bn2\", nn.BatchNorm2d(128))\ndiscriminator.add_module(\"lrelu3\", nn.LeakyReLU(0.2))\ndiscriminator.add_module('avgpool2', nn.AdaptiveAvgPool2d((4,4))) #output 128x4x4\n\ndiscriminator.add_module(\"conv4\", nn.Dropout(0.5))\ndiscriminator.add_module(\"reshape\", Reshape([-1, 128*4*4]))\ndiscriminator.add_module(\"linear1\", nn.Linear(128*4*4, 1)) #output 1\n\nif use_cuda: discriminator.cuda()\n\nsample = sample_data_batch(5)\nplt.figure(figsize=(16,10))\nplt.axis('off')\nplt.imshow(utils.make_grid(sample).cpu().detach().numpy().transpose((1,2,0)).clip(0,1))\nplt.show()\ndiscriminator(sample).shape",
"Training\nWe train the two networks concurrently:\n* Train discriminator to better distinguish real data from current generator\n* Train generator to make discriminator think generator is real\n* Since discriminator is a differentiable neural network, we train both with gradient descent.\n\nTraining is done iteratively until discriminator is no longer able to find the difference (or until you run out of patience).\nTricks:\n\nRegularize discriminator output weights to prevent explosion\nTrain generator with adam to speed up training. Discriminator trains with SGD to avoid problems with momentum.\nMore: https://github.com/soumith/ganhacks",
"def generator_loss(noise):\n \"\"\"\n 1. generate data given noise\n 2. compute log P(real | gen noise)\n 3. return generator loss (should be scalar)\n \"\"\"\n generated_data = generator(noise)\n \n disc_on_generated_data = discriminator(generated_data)\n \n logp_gen_is_real = F.logsigmoid(disc_on_generated_data)\n \n loss = -1 * torch.mean(logp_gen_is_real)\n \n return loss\n\nloss = generator_loss(sample_noise_batch(32))\n\nprint(loss)\n\nassert len(loss.shape) == 0, \"loss must be scalar\"\n\ndef discriminator_loss(real_data, generated_data):\n \"\"\"\n 1. compute discriminator's output on real & generated data\n 2. compute log-probabilities of real data being real, generated data being fake\n 3. return discriminator loss (scalar)\n \"\"\"\n disc_on_real_data = discriminator(real_data)\n disc_on_fake_data = discriminator(generated_data)\n \n logp_real_is_real = F.logsigmoid(disc_on_real_data)\n logp_gen_is_fake = F.logsigmoid(1 - disc_on_fake_data)\n \n loss = -1 * torch.mean(logp_real_is_real + logp_gen_is_fake)\n return loss\n\nloss = discriminator_loss(sample_data_batch(32), \n generator(sample_noise_batch(32)))\n\nprint(loss)\n\nassert len(loss.shape) == 0, \"loss must be scalar\"",
"Auxilary functions\nHere we define a few helper functions that draw current data distributions and sample training batches.",
"def sample_images(nrow, ncol, sharp=False):\n with torch.no_grad():\n images = generator(sample_noise_batch(batch_size=nrow*ncol))\n images = images.data.cpu().numpy().transpose([0, 2, 3, 1])\n if np.var(images)!=0:\n images = images.clip(np.min(data),np.max(data))\n for i in range(nrow*ncol):\n plt.subplot(nrow,ncol,i+1)\n plt.axis('off')\n if sharp:\n plt.imshow(images[i], cmap=\"gray\", interpolation=\"none\")\n else:\n plt.imshow(images[i], cmap=\"gray\")\n plt.show()\n\ndef sample_probas(batch_size):\n plt.title('Generated vs real data')\n D_real = F.sigmoid(discriminator(sample_data_batch(batch_size)))\n generated_data_batch = generator(sample_noise_batch(batch_size))\n D_fake = F.sigmoid(discriminator(generated_data_batch))\n \n plt.hist(D_real.data.cpu().numpy(),\n label='D(x)', alpha=0.5, range=[0,1])\n plt.hist(D_fake.data.cpu().numpy(),\n label='D(G(z))', alpha=0.5, range=[0,1])\n plt.legend(loc='best')\n plt.show()",
"Training\nMain loop.\nWe just train generator and discriminator in a loop and draw results once every N iterations.",
"#optimizers\ndisc_opt = torch.optim.SGD(discriminator.parameters(), weight_decay=1e-4, lr=5e-3)\ngen_opt = torch.optim.Adam(generator.parameters(), lr=1e-4)\nlast_epoch = 0\n\nWEIGHTS_PATH = './weights/dcgan.pth.tar'\n\nif (torch.cuda.is_available()):\n checkpoint = torch.load(f=WEIGHTS_PATH)\nelse:\n net = nn.DataParallel(net)\n checkpoint = torch.load(map_location='cpu', f=WEIGHTS_PATH)\n \ngenerator.load_state_dict(checkpoint['gen_weights'])\ndiscriminator.load_state_dict(checkpoint['disc_weights'])\nlast_epoch = checkpoint['last_epoch']\ndisc_opt.load_state_dict(checkpoint['disc_optim'])\ngen_opt.load_state_dict(checkpoint['gen_optim'])\n\ndef gaussian(ins, mean=0, stddev=0.05):\n noise = torch.autograd.Variable(ins.data.new(ins.size()).normal_(mean, stddev))\n return ins + noise\n\nwarnings.simplefilter('ignore')\n\nbatch_size = 100\ndisc_loss = 0\ngen_loss = 0\nstart = time.time()\n\nfor epoch in range(last_epoch, 50000):\n # Train discriminator\n for i in range(5):\n real_data = sample_data_batch(batch_size)\n fake_data = generator(sample_noise_batch(batch_size))\n loss = discriminator_loss(gaussian(real_data), gaussian(fake_data))\n disc_opt.zero_grad()\n loss.backward()\n disc_opt.step()\n disc_loss = loss.item()\n \n # Train generator\n for j in range(1):\n noise = sample_noise_batch(batch_size)\n loss = generator_loss(noise)\n gen_opt.zero_grad()\n loss.backward()\n gen_opt.step()\n gen_loss = loss.item()\n \n if epoch %100==0:\n end = time.time()\n display.clear_output(wait=True)\n print(\"epoch %d, Generator loss %.7f, discriminator loss %.7f\" % (epoch, gen_loss, disc_loss))\n print(\"time taken (100 epochs) %.0f sec\" % (end - start))\n sample_images(2,3,True)\n sample_probas(1000)\n start = time.time()\n last_epoch = epoch\n\nprint(epoch)\nsave_checkpoint({\n 'gen_weights': generator.state_dict(),\n 'disc_weights' : discriminator.state_dict(),\n 'gen_optim' : gen_opt.state_dict(),\n 'disc_optim' : disc_opt.state_dict(),\n 'last_epoch' : last_epoch\n}, \"./weights/dcgan.pth.tar\")\n\nplt.figure(figsize=[16, 24])\nsample_images(16, 8)\n\n# Note: a no-nonsense neural network should be able to produce reasonably good images after 15k iterations\n# By \"reasonably good\" we mean \"resembling a car crash victim\" or better",
"Evaluation\nThe code below dumps a batch of images so that you could use them for precision/recall evaluation.\nPlease generate the same number of images as for autoencoders for a fair comparison.",
"num_images = len(data)\nbatch_size = 100\n\nall_images = []\n\nfor batch_i in range(int((num_images - 1) / batch_size + 1)):\n with torch.no_grad():\n images = generator(sample_noise_batch(batch_size=batch_size))\n images = images.data.cpu().numpy().transpose([0, 2, 3, 1])\n if np.var(images)!=0:\n images = images.clip(np.min(data), np.max(data))\n \n all_images.append(images)\n \nall_images = np.concatenate(all_images, axis=0)[:num_images]\n\nnp.savez(\"./gan.npz\", Pictures=all_images)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
junhwanjang/DataSchool
|
Lecture/16. 로지스틱 회귀분석/1) 로지스틱 회귀 분석.ipynb
|
mit
|
[
"로지스틱 회귀 분석\n로지스틱 회귀(Logistic Regression) 분석은 회귀 분석이라는 명칭을 가지고 있지만 분류(classsification) 방법의 일종이다.\n로지스틱 회귀 모형에서는 베르누이 확률 변수(Bernoilli random variable)의 모수(parameter) $\\theta$가 독립 변수 $x$에 의존한다고 가정한다.\n$$ p(y \\mid x, \\theta) = \\text{Ber} (y \\mid \\theta(x) )$$\n여기에서 모수 $\\theta$ 는 0과 1사이의 실수이며 다음과 같이 $x$의 값에 의존하는 함수이다.\n$$\n\\theta = f(w^Tx)\n$$\n시그모이드 함수\n모수 $\\theta$는 일반적인 회귀 분석의 종속 변수와 달리 0 부터 1까지의 실수값만 가질 수 있기 때문에 시그모이드 함수(sigmoid function)이라 불리는 특별한 형태의 함수 $f$를 사용해야 한다.\n시그모이드 함수는 종속 변수의 모든 실수 값에 대해 유한한 구간 $(a,b)$ 사이의 한정된(bounded) 값과 양의 기울기를 가지는 함수를 말하며 다음과 같은 함수들이 주로 사용된다.\n\n로지스틱 함수 (Logistic Function) \n\n$$ \\text{logitstic}(z) = \\dfrac{1}{1+\\exp{(-z)}} $$\n\n오차 함수 (Error Function) \n\n$$ \\text{erf}(z) = \\frac{2}{\\sqrt\\pi}\\int_0^z e^{-t^2}\\,dt $$\n\n하이퍼볼릭 탄젠트 함수 (Hyperbolic tangent) \n\n$$ \\tanh(z) = \\frac{\\sinh z}{\\cosh z} = \\frac {e^z - e^{-z}} {e^z + e^{-z}} $$\n\n역 탄젠트 함수 (Arc-tangent) \n\n$$ \\arctan(z) = \\tan^{-1}(z) $$",
"xx = np.linspace(-10, 10, 1000)\nplt.plot(xx, (1/(1+np.exp(-xx)))*2-1, label=\"logistic (scaled)\")\nplt.plot(xx, sp.special.erf(0.5*np.sqrt(np.pi)*xx), label=\"erf (scaled)\")\nplt.plot(xx, np.tanh(xx), label=\"tanh\")\nplt.ylim([-1.1, 1.1])\nplt.legend(loc=2)\nplt.show()",
"로지스틱 함수\n여러가지 시그모이드 중 로지스틱 함수는 다음과 같은 물리적인 의미를 부여할 수 있기 때문에 많이 사용된다.\n우선 Bernoulli 시도에서 1이 나올 확률 $\\theta$ 과 0이 나올 확률 $1-\\theta$ 의 비(ratio)는 다음과 같은 수식이 되며 odds ratio 라고 한다.\n$$ \\text{odds ratio} = \\dfrac{\\theta}{1-\\theta} $$\n이 odds ratio 를 로그 변환한 것이 로지트 함수(Logit function)이다.\n$$ z = \\text{logit}(\\text{odds ratio}) = \\log \\left(\\dfrac{\\theta}{1-\\theta}\\right) $$ \n로지스틱 함수(Logistic function) 는 이 로지트 함수의 역함수이다.\n$$ \\text{logitstic}(z) = \\theta(z) = \\dfrac{1}{1+\\exp{(-z)}} $$ \n로지스틱 모형의 모수 추정\n로지스틱 모형은 일종의 비선형 회귀 모형이지만 다음과 같이 MLE(Maximum Likelihood Estimation) 방법으로 모수 $w$를 추정할 수 있다.\n여기에서는 종속 변수 $y$가 베르누이 확률 변수라고 가정한다.\n$$ p(y \\mid x, \\theta) = \\text{Ber} (y \\mid \\theta(x) )$$\n데이터 표본이 ${ x_i, y_i }$일 경우 Log Likelihood $\\text{LL}$ 를 구하면 다음과 같다.\n$$\n\\begin{eqnarray}\n\\text{LL} \n&=& \\log \\prod_{i=1}^N \\theta_i(x_i)^{y_i} (1-\\theta_i(x_i))^{1-y_i} \\\n&=& \\sum_{i=1}^N \\left( y_i \\log\\theta_i(x_i) + (1-y_i)\\log(1-\\theta_i(x_i)) \\right) \\\n\\end{eqnarray}\n$$\n$\\theta$가 로지스틱 함수 형태로 표현된다면\n$$\n\\log \\left(\\dfrac{\\theta(x)}{1-\\theta(x)}\\right) = w^T x\n$$\n$$\n\\theta(x) = \\dfrac{1}{1 + \\exp{(-w^Tx)}}\n$$\n가 되고 이를 Log Likelihood 에 적용하면 다음과 같다.\n$$\n\\begin{eqnarray}\n\\text{LL} \n&=& \\sum_{i=1}^N \\left( y_i \\log\\theta_i(x_i) + (1-y_i)\\log(1-\\theta_i(x_i)) \\right) \\\n&=& \\sum_{i=1}^N \\left( y_i \\log\\left(\\dfrac{1}{1 + \\exp{(-w^Tx_i)}}\\right) - (1-y_i)\\log\\left(\\dfrac{\\exp{(-w^Tx_i)}}{1 + \\exp{(-w^Tx_i)}}\\right) \\right) \\\n\\end{eqnarray}\n$$\n이 값의 최대화하는 값을 구하기 위해 chain rule를 사용하여 $w$로 미분해야 한다.\n우선 $\\theta$를 $w$로 미분하면\n$$ \\dfrac{\\partial \\theta}{\\partial w} \n= \\dfrac{\\partial}{\\partial w} \\dfrac{1}{1 + \\exp{(-w^Tx)}} \\ \n= \\dfrac{\\exp{(-w^Tx)}}{(1 + \\exp{(-w^Tx)})^2} x \\ \n= \\theta(1-\\theta) x $$\nchain rule를 적용하면 \n$$ \n\\begin{eqnarray}\n\\dfrac{\\partial \\text{LL}}{\\partial w} \n&=& \\sum_{i=1}^N \\left( y_i \\dfrac{1}{\\theta_i(x_i;w)} - (1-y_i)\\dfrac{1}{1-\\theta_i(x_i;w)} \\right) \\dfrac{\\partial \\theta}{\\partial w} \\\n&=& \\sum_{i=1}^N \\big( y_i (1-\\theta_i(x_i;w)) - (1-y_i)\\theta_i(x_i;w) \\big) x_i \\\n&=& \\sum_{i=1}^N \\big( y_i - \\theta_i(x_i;w) \\big) x_i \\\n\\end{eqnarray}\n$$\n이 값은 $w$에 대한 비선형 함수이므로 선형 모형과 같이 간단하게 그레디언트가 0이 되는 모수 $w$ 값에 대한 수식을 구할 수 없으며 수치적인 최적화 방법(numerical optimization)을 통해 최적 모수 $w$의 값을 구해야 한다.\n수치적 최적화\n단순한 Steepest Gradient 방법을 사용한다면 최적화 알고리즘은 다음과 같다.\n그레디언트 벡터는\n$$\ng_k = \\dfrac{d}{dw}(-LL)\n$$\n이 방향으로 step size $\\eta_k$ 만큼 움직이면 다음과 같이 반복적으로 최적 모수값을 구할 수 있다.\n$$\n\\begin{eqnarray}\nw_{k+1} \n&=& w_{k} - \\eta_k g_k \\\n&=& w_{k} + \\eta_k \\sum_{i=1}^N \\big( y_i - \\theta_i(x_i) \\big) x_i\\\n\\end{eqnarray}\n$$\nScikit-Learn 패키지의 로지스틱 회귀\nScikit-Learn 패키지는 로지스틱 회귀 모형 LogisticRegression 를 제공한다.",
"from sklearn.datasets import make_classification\nX0, y = make_classification(n_features=1, n_redundant=0, n_informative=1, n_clusters_per_class=1, random_state=4)\nX = sm.add_constant(X0)\n\nfrom sklearn.linear_model import LogisticRegression\nmodel = LogisticRegression().fit(X0, y)\n\nxx = np.linspace(-3, 3, 100)\nsigm = 1.0/(1 + np.exp(-model.coef_[0][0]*xx - model.intercept_[0]))\nplt.plot(xx, sigm)\nplt.scatter(X0, y, marker='o', c=y, s=100)\nplt.scatter(X0, model.predict(X0), marker='x', c=y, s=200, lw=2, alpha=0.5, cmap=mpl.cm.jet)\nplt.xlim(-3, 3)\nplt.show()",
"statsmodels 패키지의 로지스틱 회귀\nstatsmodels 패키지는 로지스틱 회귀 모형 Logit 를 제공한다. 사용방법은 OLS 와 동일하다. Scikit-Learn 패키지와 달리 Logit 클래스는 classification 되기 전의 값을 출력한다",
"logit_mod = sm.Logit(y, X)\nlogit_res = logit_mod.fit(disp=0)\nprint(logit_res.summary())\n\nxx = np.linspace(-3, 3, 100)\nsigmoid = logit_res.predict(sm.add_constant(xx))\nplt.plot(xx, sigmoid, lw=5, alpha=0.5)\nplt.scatter(X0, y, marker='o', c=y, s=100)\nplt.scatter(X0, logit_res.predict(X), marker='x', c=y, s=200, lw=2, alpha=0.5, cmap=mpl.cm.jet)\nplt.xlim(-3, 3)\nplt.show()",
"예제 1: Michelin and Zagat 가이드 비교\n다음 데이터는 뉴욕시의 레스토랑에 대한 두 개의 가이드북에서 발취한 것이다.\n\nFood: Zagat Survey 2006 의 고객 평가 점수\nInMichelin: 해당 고객 평가 점수를 받은 레스토랑 중 2006 Michelin Guide New York City 에 실린 레스토랑의 수\nNotInMichelin: 해당 고객 평가 점수를 받은 레스토랑 중 2006 Michelin Guide New York City 에 실리지 않은 레스토랑의 수\nmi: 해당 고객 평가 점수를 받은 레스토랑의 수\nproportion: 해당 고객 평가 점수를 받은 레스토랑 중 2006 Michelin Guide New York City 에 실린 레스토랑의 비율",
"df = pd.read_table(\"~/data/sheather/MichelinFood.txt\")\ndf\n\ndf.plot(kind=\"scatter\", x=\"Food\", y=\"proportion\", s=100)\nplt.show()\n\nX = sm.add_constant(df.Food)\ny = df.proportion\nmodel = sm.Logit(y, X)\nresult = model.fit()\nprint(result.summary())\n\ndf.plot(kind=\"scatter\", x=\"Food\", y=\"proportion\", s=50, alpha=0.5)\nxx = np.linspace(10, 35, 100)\nplt.plot(xx, result.predict(sm.add_constant(xx)), \"r\", lw=4)\nplt.xlim(10, 35)\nplt.show()",
"예제 2: Michelin 가이드 예측\n다음 데이터는 뉴욕시의 개별 레스토랑의 고객 평가 점수와 Michelin 가이드 수록 여부를 보인 것이다.\n\nInMichelin: Michelin 가이드 수록 여부\nRestaurant Name: 레스토랑 이름\nFood: 식사에 대한 고객 평가 점수 (1~30)\nDecor: 인테리어에 대한 고객 평가 점수 (1~30)\nService: 서비스에 대한 고객 평가 점수 (1~30)\nPrice: 저녁 식사 가격 (US$)",
"df = pd.read_csv(\"~/data/sheather/MichelinNY.csv\")\ndf.tail()\n\nsns.stripplot(x=\"Food\", y=\"InMichelin\", data=df, jitter=True, orient='h', order=[1, 0])\nplt.grid(True)\nplt.show()\n\nX = sm.add_constant(df.Food)\ny = df.InMichelin\nmodel = sm.Logit(y, X)\nresult = model.fit()\nprint(result.summary())\n\nxx = np.linspace(10, 35, 100)\npred = result.predict(sm.add_constant(xx))\ndecision_value = xx[np.argmax(pred > 0.5)]\nprint(decision_value)\nplt.plot(xx, pred, \"r\", lw=4)\nplt.axvline(decision_value)\nplt.xlim(10, 35)\nplt.show()",
"예제 3: Fair's Affair Dataset",
"print(sm.datasets.fair.SOURCE)\nprint(sm.datasets.fair.NOTE)\n\ndf = sm.datasets.fair.load_pandas().data\ndf.head()\n\nsns.factorplot(x=\"affairs\", y=\"children\", row=\"yrs_married\", data=df,\n orient=\"h\", size=2, aspect=5, kind=\"box\")\nplt.show()\n\ndf['affair'] = (df['affairs'] > 0).astype(float)\nmodoel = smf.logit(\"affair ~ rate_marriage + religious + yrs_married + age + educ + children\", df).fit()\nprint(modoel.summary())"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/docs-l10n
|
site/en-snapshot/lite/tutorials/pose_classification.ipynb
|
apache-2.0
|
[
"Copyright 2021 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"Human Pose Classification with MoveNet and TensorFlow Lite\nThis notebook teaches you how to train a pose classification model using MoveNet and TensorFlow Lite. The result is a new TensorFlow Lite model that accepts the output from the MoveNet model as its input, and outputs a pose classification, such as the name of a yoga pose.\nThe procedure in this notebook consists of 3 parts:\n* Part 1: Preprocess the pose classification training data into a CSV file that specifies the landmarks (body keypoints) detected by the MoveNet model, along with the ground truth pose labels.\n* Part 2: Build and train a pose classification model that takes the landmark coordinates from the CSV file as input, and outputs the predicted labels.\n* Part 3: Convert the pose classification model to TFLite.\nBy default, this notebook uses an image dataset with labeled yoga poses, but we've also included a section in Part 1 where you can upload your own image dataset of poses.\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lite/tutorials/pose_classification\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/pose_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/pose_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/pose_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n <td>\n <a href=\"https://tfhub.dev/s?q=movenet\"><img src=\"https://www.tensorflow.org/images/hub_logo_32px.png\" />See TF Hub model</a>\n </td>\n</table>\n\nPreparation\nIn this section, you'll import the necessary libraries and define several functions to preprocess the training images into a CSV file that contains the landmark coordinates and ground truth labels.\nNothing observable happens here, but you can expand the hidden code cells to see the implementation for some of the functions we'll be calling later on.\nIf you only want to create the CSV file without knowing all the details, just run this section and proceed to Part 1.",
"!pip install -q opencv-python\n\nimport csv\nimport cv2\nimport itertools\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport tempfile\nimport tqdm\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow import keras\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix",
"Code to run pose estimation using MoveNet",
"#@title Functions to run pose estimation with MoveNet\n\n#@markdown You'll download the MoveNet Thunder model from [TensorFlow Hub](https://www.google.com/url?sa=D&q=https%3A%2F%2Ftfhub.dev%2Fs%3Fq%3Dmovenet), and reuse some inference and visualization logic from the [MoveNet Raspberry Pi (Python)](https://github.com/tensorflow/examples/tree/master/lite/examples/pose_estimation/raspberry_pi) sample app to detect landmarks (ear, nose, wrist etc.) from the input images.\n\n#@markdown *Note: You should use the most accurate pose estimation model (i.e. MoveNet Thunder) to detect the keypoints and use them to train the pose classification model to achieve the best accuracy. When running inference, you can use a pose estimation model of your choice (e.g. either MoveNet Lightning or Thunder).*\n\n# Download model from TF Hub and check out inference code from GitHub\n!wget -q -O movenet_thunder.tflite https://tfhub.dev/google/lite-model/movenet/singlepose/thunder/tflite/float16/4?lite-format=tflite\n!git clone https://github.com/tensorflow/examples.git\npose_sample_rpi_path = os.path.join(os.getcwd(), 'examples/lite/examples/pose_estimation/raspberry_pi')\nsys.path.append(pose_sample_rpi_path)\n\n# Load MoveNet Thunder model\nimport utils\nfrom data import BodyPart\nfrom ml import Movenet\nmovenet = Movenet('movenet_thunder')\n\n# Define function to run pose estimation using MoveNet Thunder.\n# You'll apply MoveNet's cropping algorithm and run inference multiple times on\n# the input image to improve pose estimation accuracy.\ndef detect(input_tensor, inference_count=3):\n \"\"\"Runs detection on an input image.\n \n Args:\n input_tensor: A [height, width, 3] Tensor of type tf.float32.\n Note that height and width can be anything since the image will be\n immediately resized according to the needs of the model within this\n function.\n inference_count: Number of times the model should run repeatly on the\n same input image to improve detection accuracy.\n \n Returns:\n A Person entity detected by the MoveNet.SinglePose.\n \"\"\"\n image_height, image_width, channel = input_tensor.shape\n \n # Detect pose using the full input image\n movenet.detect(input_tensor.numpy(), reset_crop_region=True)\n \n # Repeatedly using previous detection result to identify the region of\n # interest and only croping that region to improve detection accuracy\n for _ in range(inference_count - 1):\n person = movenet.detect(input_tensor.numpy(), \n reset_crop_region=False)\n\n return person\n\n#@title Functions to visualize the pose estimation results.\n\ndef draw_prediction_on_image(\n image, person, crop_region=None, close_figure=True,\n keep_input_size=False):\n \"\"\"Draws the keypoint predictions on image.\n \n Args:\n image: An numpy array with shape [height, width, channel] representing the\n pixel values of the input image.\n person: A person entity returned from the MoveNet.SinglePose model.\n close_figure: Whether to close the plt figure after the function returns.\n keep_input_size: Whether to keep the size of the input image.\n \n Returns:\n An numpy array with shape [out_height, out_width, channel] representing the\n image overlaid with keypoint predictions.\n \"\"\"\n # Draw the detection result on top of the image.\n image_np = utils.visualize(image, [person])\n \n # Plot the image with detection results.\n height, width, channel = image.shape\n aspect_ratio = float(width) / height\n fig, ax = plt.subplots(figsize=(12 * aspect_ratio, 12))\n im = ax.imshow(image_np)\n \n if close_figure:\n plt.close(fig)\n \n if not keep_input_size:\n image_np = utils.keep_aspect_ratio_resizer(image_np, (512, 512))\n\n return image_np\n\n#@title Code to load the images, detect pose landmarks and save them into a CSV file\n\nclass MoveNetPreprocessor(object):\n \"\"\"Helper class to preprocess pose sample images for classification.\"\"\"\n \n def __init__(self,\n images_in_folder,\n images_out_folder,\n csvs_out_path):\n \"\"\"Creates a preprocessor to detection pose from images and save as CSV.\n\n Args:\n images_in_folder: Path to the folder with the input images. It should\n follow this structure:\n yoga_poses\n |__ downdog\n |______ 00000128.jpg\n |______ 00000181.bmp\n |______ ...\n |__ goddess\n |______ 00000243.jpg\n |______ 00000306.jpg\n |______ ...\n ...\n images_out_folder: Path to write the images overlay with detected\n landmarks. These images are useful when you need to debug accuracy\n issues.\n csvs_out_path: Path to write the CSV containing the detected landmark\n coordinates and label of each image that can be used to train a pose\n classification model.\n \"\"\"\n self._images_in_folder = images_in_folder\n self._images_out_folder = images_out_folder\n self._csvs_out_path = csvs_out_path\n self._messages = []\n\n # Create a temp dir to store the pose CSVs per class\n self._csvs_out_folder_per_class = tempfile.mkdtemp()\n \n # Get list of pose classes and print image statistics\n self._pose_class_names = sorted(\n [n for n in os.listdir(self._images_in_folder) if not n.startswith('.')]\n )\n \n def process(self, per_pose_class_limit=None, detection_threshold=0.1):\n \"\"\"Preprocesses images in the given folder.\n Args:\n per_pose_class_limit: Number of images to load. As preprocessing usually\n takes time, this parameter can be specified to make the reduce of the\n dataset for testing.\n detection_threshold: Only keep images with all landmark confidence score\n above this threshold.\n \"\"\"\n # Loop through the classes and preprocess its images\n for pose_class_name in self._pose_class_names:\n print('Preprocessing', pose_class_name, file=sys.stderr)\n\n # Paths for the pose class.\n images_in_folder = os.path.join(self._images_in_folder, pose_class_name)\n images_out_folder = os.path.join(self._images_out_folder, pose_class_name)\n csv_out_path = os.path.join(self._csvs_out_folder_per_class,\n pose_class_name + '.csv')\n if not os.path.exists(images_out_folder):\n os.makedirs(images_out_folder)\n \n # Detect landmarks in each image and write it to a CSV file\n with open(csv_out_path, 'w') as csv_out_file:\n csv_out_writer = csv.writer(csv_out_file, \n delimiter=',', \n quoting=csv.QUOTE_MINIMAL)\n # Get list of images\n image_names = sorted(\n [n for n in os.listdir(images_in_folder) if not n.startswith('.')])\n if per_pose_class_limit is not None:\n image_names = image_names[:per_pose_class_limit]\n\n valid_image_count = 0\n \n # Detect pose landmarks from each image\n for image_name in tqdm.tqdm(image_names):\n image_path = os.path.join(images_in_folder, image_name)\n\n try:\n image = tf.io.read_file(image_path)\n image = tf.io.decode_jpeg(image)\n except:\n self._messages.append('Skipped ' + image_path + '. Invalid image.')\n continue\n else:\n image = tf.io.read_file(image_path)\n image = tf.io.decode_jpeg(image)\n image_height, image_width, channel = image.shape\n \n # Skip images that isn't RGB because Movenet requires RGB images\n if channel != 3:\n self._messages.append('Skipped ' + image_path +\n '. Image isn\\'t in RGB format.')\n continue\n person = detect(image)\n \n # Save landmarks if all landmarks were detected\n min_landmark_score = min(\n [keypoint.score for keypoint in person.keypoints])\n should_keep_image = min_landmark_score >= detection_threshold\n if not should_keep_image:\n self._messages.append('Skipped ' + image_path +\n '. No pose was confidentlly detected.')\n continue\n\n valid_image_count += 1\n\n # Draw the prediction result on top of the image for debugging later\n output_overlay = draw_prediction_on_image(\n image.numpy().astype(np.uint8), person, \n close_figure=True, keep_input_size=True)\n \n # Write detection result into an image file\n output_frame = cv2.cvtColor(output_overlay, cv2.COLOR_RGB2BGR)\n cv2.imwrite(os.path.join(images_out_folder, image_name), output_frame)\n \n # Get landmarks and scale it to the same size as the input image\n pose_landmarks = np.array(\n [[keypoint.coordinate.x, keypoint.coordinate.y, keypoint.score]\n for keypoint in person.keypoints],\n dtype=np.float32)\n\n # Write the landmark coordinates to its per-class CSV file\n coordinates = pose_landmarks.flatten().astype(np.str).tolist()\n csv_out_writer.writerow([image_name] + coordinates)\n\n if not valid_image_count:\n raise RuntimeError(\n 'No valid images found for the \"{}\" class.'\n .format(pose_class_name))\n \n # Print the error message collected during preprocessing.\n print('\\n'.join(self._messages))\n\n # Combine all per-class CSVs into a single output file\n all_landmarks_df = self._all_landmarks_as_dataframe()\n all_landmarks_df.to_csv(self._csvs_out_path, index=False)\n\n def class_names(self):\n \"\"\"List of classes found in the training dataset.\"\"\"\n return self._pose_class_names\n \n def _all_landmarks_as_dataframe(self):\n \"\"\"Merge all per-class CSVs into a single dataframe.\"\"\"\n total_df = None\n for class_index, class_name in enumerate(self._pose_class_names):\n csv_out_path = os.path.join(self._csvs_out_folder_per_class,\n class_name + '.csv')\n per_class_df = pd.read_csv(csv_out_path, header=None)\n \n # Add the labels\n per_class_df['class_no'] = [class_index]*len(per_class_df)\n per_class_df['class_name'] = [class_name]*len(per_class_df)\n\n # Append the folder name to the filename column (first column)\n per_class_df[per_class_df.columns[0]] = (os.path.join(class_name, '') \n + per_class_df[per_class_df.columns[0]].astype(str))\n\n if total_df is None:\n # For the first class, assign its data to the total dataframe\n total_df = per_class_df\n else:\n # Concatenate each class's data into the total dataframe\n total_df = pd.concat([total_df, per_class_df], axis=0)\n \n list_name = [[bodypart.name + '_x', bodypart.name + '_y', \n bodypart.name + '_score'] for bodypart in BodyPart] \n header_name = []\n for columns_name in list_name:\n header_name += columns_name\n header_name = ['file_name'] + header_name\n header_map = {total_df.columns[i]: header_name[i] \n for i in range(len(header_name))}\n \n total_df.rename(header_map, axis=1, inplace=True)\n\n return total_df\n\n#@title (Optional) Code snippet to try out the Movenet pose estimation logic\n\n#@markdown You can download an image from the internet, run the pose estimation logic on it and plot the detected landmarks on top of the input image. \n\n#@markdown *Note: This code snippet is also useful for debugging when you encounter an image with bad pose classification accuracy. You can run pose estimation on the image and see if the detected landmarks look correct or not before investigating the pose classification logic.*\n\ntest_image_url = \"https://cdn.pixabay.com/photo/2017/03/03/17/30/yoga-2114512_960_720.jpg\" #@param {type:\"string\"}\n!wget -O /tmp/image.jpeg {test_image_url}\n\nif len(test_image_url):\n image = tf.io.read_file('/tmp/image.jpeg')\n image = tf.io.decode_jpeg(image)\n person = detect(image)\n _ = draw_prediction_on_image(image.numpy(), person, crop_region=None, \n close_figure=False, keep_input_size=True)",
"Part 1: Preprocess the input images\nBecause the input for our pose classifier is the output landmarks from the MoveNet model, we need to generate our training dataset by running labeled images through MoveNet and then capturing all the landmark data and ground truth labels into a CSV file.\nThe dataset we've provided for this tutorial is a CG-generated yoga pose dataset. It contains images of multiple CG-generated models doing 5 different yoga poses. The directory is already split into a train dataset and a test dataset.\nSo in this section, we'll download the yoga dataset and run it through MoveNet so we can capture all the landmarks into a CSV file... However, it takes about 15 minutes to feed our yoga dataset to MoveNet and generate this CSV file. So as an alternative, you can download a pre-existing CSV file for the yoga dataset by setting is_skip_step_1 parameter below to True. That way, you'll skip this step and instead download the same CSV file that will be created in this preprocessing step.\nOn the other hand, if you want to train the pose classifier with your own image dataset, you need to upload your images and run this preprocessing step (leave is_skip_step_1 False)—follow the instructions below to upload your own pose dataset.",
"is_skip_step_1 = False #@param [\"False\", \"True\"] {type:\"raw\"}",
"(Optional) Upload your own pose dataset",
"use_custom_dataset = False #@param [\"False\", \"True\"] {type:\"raw\"}\n\ndataset_is_split = False #@param [\"False\", \"True\"] {type:\"raw\"}",
"If you want to train the pose classifier with your own labeled poses (they can be any poses, not just yoga poses), follow these steps:\n\n\nSet the above use_custom_dataset option to True.\n\n\nPrepare an archive file (ZIP, TAR, or other) that includes a folder with your images dataset. The folder must include sorted images of your poses as follows.\n\n\nIf you've already split your dataset into train and test sets, then set dataset_is_split to True. That is, your images folder must include \"train\" and \"test\" directories like this:\n```\nyoga_poses/\n|__ train/\n |__ downdog/\n |______ 00000128.jpg\n |______ ...\n|__ test/\n |__ downdog/\n |______ 00000181.jpg\n |______ ...\n```\n\nOr, if your dataset is NOT split yet, then set\n`dataset_is_split` to **False** and we'll split it up based\non a specified split fraction. That is, your uploaded images\nfolder should look like this:\n\n```\nyoga_poses/\n|__ downdog/\n |______ 00000128.jpg\n |______ 00000181.jpg\n |______ ...\n|__ goddess/\n |______ 00000243.jpg\n |______ 00000306.jpg\n |______ ...\n```\n\n\nClick the Files tab on the left (folder icon) and then click Upload to session storage (file icon).\nSelect your archive file and wait until it finishes uploading before you proceed.\nEdit the following code block to specify the name of your archive file and images directory. (By default, we expect a ZIP file, so you'll need to also modify that part if your archive is another format.)\nNow run the rest of the notebook.",
"#@markdown Be sure you run this cell. It's hiding the `split_into_train_test()` function that's called in the next code block.\n\nimport os\nimport random\nimport shutil\n\ndef split_into_train_test(images_origin, images_dest, test_split):\n \"\"\"Splits a directory of sorted images into training and test sets.\n\n Args:\n images_origin: Path to the directory with your images. This directory\n must include subdirectories for each of your labeled classes. For example:\n yoga_poses/\n |__ downdog/\n |______ 00000128.jpg\n |______ 00000181.jpg\n |______ ...\n |__ goddess/\n |______ 00000243.jpg\n |______ 00000306.jpg\n |______ ...\n ...\n images_dest: Path to a directory where you want the split dataset to be\n saved. The results looks like this:\n split_yoga_poses/\n |__ train/\n |__ downdog/\n |______ 00000128.jpg\n |______ ...\n |__ test/\n |__ downdog/\n |______ 00000181.jpg\n |______ ...\n test_split: Fraction of data to reserve for test (float between 0 and 1).\n \"\"\"\n _, dirs, _ = next(os.walk(images_origin))\n\n TRAIN_DIR = os.path.join(images_dest, 'train')\n TEST_DIR = os.path.join(images_dest, 'test')\n os.makedirs(TRAIN_DIR, exist_ok=True)\n os.makedirs(TEST_DIR, exist_ok=True)\n\n for dir in dirs:\n # Get all filenames for this dir, filtered by filetype\n filenames = os.listdir(os.path.join(images_origin, dir))\n filenames = [os.path.join(images_origin, dir, f) for f in filenames if (\n f.endswith('.png') or f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.bmp'))]\n # Shuffle the files, deterministically\n filenames.sort()\n random.seed(42)\n random.shuffle(filenames)\n # Divide them into train/test dirs\n os.makedirs(os.path.join(TEST_DIR, dir), exist_ok=True)\n os.makedirs(os.path.join(TRAIN_DIR, dir), exist_ok=True)\n test_count = int(len(filenames) * test_split)\n for i, file in enumerate(filenames):\n if i < test_count:\n destination = os.path.join(TEST_DIR, dir, os.path.split(file)[1])\n else:\n destination = os.path.join(TRAIN_DIR, dir, os.path.split(file)[1])\n shutil.copyfile(file, destination)\n print(f'Moved {test_count} of {len(filenames)} from class \"{dir}\" into test.')\n print(f'Your split dataset is in \"{images_dest}\"')\n\nif use_custom_dataset:\n # ATTENTION:\n # You must edit these two lines to match your archive and images folder name:\n # !tar -xf YOUR_DATASET_ARCHIVE_NAME.tar\n !unzip -q YOUR_DATASET_ARCHIVE_NAME.zip\n dataset_in = 'YOUR_DATASET_DIR_NAME'\n\n # You can leave the rest alone:\n if not os.path.isdir(dataset_in):\n raise Exception(\"dataset_in is not a valid directory\")\n if dataset_is_split:\n IMAGES_ROOT = dataset_in\n else:\n dataset_out = 'split_' + dataset_in\n split_into_train_test(dataset_in, dataset_out, test_split=0.2)\n IMAGES_ROOT = dataset_out",
"Note: If you're using split_into_train_test() to split the dataset, it expects all images to be PNG, JPEG, or BMP—it ignores other file types.\nDownload the yoga dataset",
"if not is_skip_step_1 and not use_custom_dataset:\n !wget -O yoga_poses.zip http://download.tensorflow.org/data/pose_classification/yoga_poses.zip\n !unzip -q yoga_poses.zip -d yoga_cg\n IMAGES_ROOT = \"yoga_cg\"",
"Preprocess the TRAIN dataset",
"if not is_skip_step_1:\n images_in_train_folder = os.path.join(IMAGES_ROOT, 'train')\n images_out_train_folder = 'poses_images_out_train'\n csvs_out_train_path = 'train_data.csv'\n\n preprocessor = MoveNetPreprocessor(\n images_in_folder=images_in_train_folder,\n images_out_folder=images_out_train_folder,\n csvs_out_path=csvs_out_train_path,\n )\n\n preprocessor.process(per_pose_class_limit=None)",
"Preprocess the TEST dataset",
"if not is_skip_step_1:\n images_in_test_folder = os.path.join(IMAGES_ROOT, 'test')\n images_out_test_folder = 'poses_images_out_test'\n csvs_out_test_path = 'test_data.csv'\n\n preprocessor = MoveNetPreprocessor(\n images_in_folder=images_in_test_folder,\n images_out_folder=images_out_test_folder,\n csvs_out_path=csvs_out_test_path,\n )\n\n preprocessor.process(per_pose_class_limit=None)",
"Part 2: Train a pose classification model that takes the landmark coordinates as input, and output the predicted labels.\nYou'll build a TensorFlow model that takes the landmark coordinates and predicts the pose class that the person in the input image performs. The model consists of two submodels:\n\nSubmodel 1 calculates a pose embedding (a.k.a feature vector) from the detected landmark coordinates.\nSubmodel 2 feeds pose embedding through several Dense layer to predict the pose class.\n\nYou'll then train the model based on the dataset that were preprocessed in part 1.\n(Optional) Download the preprocessed dataset if you didn't run part 1",
"# Download the preprocessed CSV files which are the same as the output of step 1\nif is_skip_step_1:\n !wget -O train_data.csv http://download.tensorflow.org/data/pose_classification/yoga_train_data.csv\n !wget -O test_data.csv http://download.tensorflow.org/data/pose_classification/yoga_test_data.csv\n\n csvs_out_train_path = 'train_data.csv'\n csvs_out_test_path = 'test_data.csv'\n is_skipped_step_1 = True",
"Load the preprocessed CSVs into TRAIN and TEST datasets.",
"def load_pose_landmarks(csv_path):\n \"\"\"Loads a CSV created by MoveNetPreprocessor.\n \n Returns:\n X: Detected landmark coordinates and scores of shape (N, 17 * 3)\n y: Ground truth labels of shape (N, label_count)\n classes: The list of all class names found in the dataset\n dataframe: The CSV loaded as a Pandas dataframe features (X) and ground\n truth labels (y) to use later to train a pose classification model.\n \"\"\"\n\n # Load the CSV file\n dataframe = pd.read_csv(csv_path)\n df_to_process = dataframe.copy()\n\n # Drop the file_name columns as you don't need it during training.\n df_to_process.drop(columns=['file_name'], inplace=True)\n\n # Extract the list of class names\n classes = df_to_process.pop('class_name').unique()\n\n # Extract the labels\n y = df_to_process.pop('class_no')\n\n # Convert the input features and labels into the correct format for training.\n X = df_to_process.astype('float64')\n y = keras.utils.to_categorical(y)\n\n return X, y, classes, dataframe",
"Load and split the original TRAIN dataset into TRAIN (85% of the data) and VALIDATE (the remaining 15%).",
"# Load the train data\nX, y, class_names, _ = load_pose_landmarks(csvs_out_train_path)\n\n# Split training data (X, y) into (X_train, y_train) and (X_val, y_val)\nX_train, X_val, y_train, y_val = train_test_split(X, y,\n test_size=0.15)\n\n# Load the test data\nX_test, y_test, _, df_test = load_pose_landmarks(csvs_out_test_path)",
"Define functions to convert the pose landmarks to a pose embedding (a.k.a. feature vector) for pose classification\nNext, convert the landmark coordinates to a feature vector by:\n1. Moving the pose center to the origin.\n2. Scaling the pose so that the pose size becomes 1\n3. Flattening these coordinates into a feature vector\nThen use this feature vector to train a neural-network based pose classifier.",
"def get_center_point(landmarks, left_bodypart, right_bodypart):\n \"\"\"Calculates the center point of the two given landmarks.\"\"\"\n\n left = tf.gather(landmarks, left_bodypart.value, axis=1)\n right = tf.gather(landmarks, right_bodypart.value, axis=1)\n center = left * 0.5 + right * 0.5\n return center\n\n\ndef get_pose_size(landmarks, torso_size_multiplier=2.5):\n \"\"\"Calculates pose size.\n\n It is the maximum of two values:\n * Torso size multiplied by `torso_size_multiplier`\n * Maximum distance from pose center to any pose landmark\n \"\"\"\n # Hips center\n hips_center = get_center_point(landmarks, BodyPart.LEFT_HIP, \n BodyPart.RIGHT_HIP)\n\n # Shoulders center\n shoulders_center = get_center_point(landmarks, BodyPart.LEFT_SHOULDER,\n BodyPart.RIGHT_SHOULDER)\n\n # Torso size as the minimum body size\n torso_size = tf.linalg.norm(shoulders_center - hips_center)\n\n # Pose center\n pose_center_new = get_center_point(landmarks, BodyPart.LEFT_HIP, \n BodyPart.RIGHT_HIP)\n pose_center_new = tf.expand_dims(pose_center_new, axis=1)\n # Broadcast the pose center to the same size as the landmark vector to\n # perform substraction\n pose_center_new = tf.broadcast_to(pose_center_new,\n [tf.size(landmarks) // (17*2), 17, 2])\n\n # Dist to pose center\n d = tf.gather(landmarks - pose_center_new, 0, axis=0,\n name=\"dist_to_pose_center\")\n # Max dist to pose center\n max_dist = tf.reduce_max(tf.linalg.norm(d, axis=0))\n\n # Normalize scale\n pose_size = tf.maximum(torso_size * torso_size_multiplier, max_dist)\n\n return pose_size\n\n\ndef normalize_pose_landmarks(landmarks):\n \"\"\"Normalizes the landmarks translation by moving the pose center to (0,0) and\n scaling it to a constant pose size.\n \"\"\"\n # Move landmarks so that the pose center becomes (0,0)\n pose_center = get_center_point(landmarks, BodyPart.LEFT_HIP, \n BodyPart.RIGHT_HIP)\n pose_center = tf.expand_dims(pose_center, axis=1)\n # Broadcast the pose center to the same size as the landmark vector to perform\n # substraction\n pose_center = tf.broadcast_to(pose_center, \n [tf.size(landmarks) // (17*2), 17, 2])\n landmarks = landmarks - pose_center\n\n # Scale the landmarks to a constant pose size\n pose_size = get_pose_size(landmarks)\n landmarks /= pose_size\n\n return landmarks\n\n\ndef landmarks_to_embedding(landmarks_and_scores):\n \"\"\"Converts the input landmarks into a pose embedding.\"\"\"\n # Reshape the flat input into a matrix with shape=(17, 3)\n reshaped_inputs = keras.layers.Reshape((17, 3))(landmarks_and_scores)\n\n # Normalize landmarks 2D\n landmarks = normalize_pose_landmarks(reshaped_inputs[:, :, :2])\n\n # Flatten the normalized landmark coordinates into a vector\n embedding = keras.layers.Flatten()(landmarks)\n\n return embedding",
"Define a Keras model for pose classification\nOur Keras model takes the detected pose landmarks, then calculates the pose embedding and predicts the pose class.",
"# Define the model\ninputs = tf.keras.Input(shape=(51))\nembedding = landmarks_to_embedding(inputs)\n\nlayer = keras.layers.Dense(128, activation=tf.nn.relu6)(embedding)\nlayer = keras.layers.Dropout(0.5)(layer)\nlayer = keras.layers.Dense(64, activation=tf.nn.relu6)(layer)\nlayer = keras.layers.Dropout(0.5)(layer)\noutputs = keras.layers.Dense(len(class_names), activation=\"softmax\")(layer)\n\nmodel = keras.Model(inputs, outputs)\nmodel.summary()\n\nmodel.compile(\n optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy']\n)\n\n# Add a checkpoint callback to store the checkpoint that has the highest\n# validation accuracy.\ncheckpoint_path = \"weights.best.hdf5\"\ncheckpoint = keras.callbacks.ModelCheckpoint(checkpoint_path,\n monitor='val_accuracy',\n verbose=1,\n save_best_only=True,\n mode='max')\nearlystopping = keras.callbacks.EarlyStopping(monitor='val_accuracy', \n patience=20)\n\n# Start training\nhistory = model.fit(X_train, y_train,\n epochs=200,\n batch_size=16,\n validation_data=(X_val, y_val),\n callbacks=[checkpoint, earlystopping])\n\n# Visualize the training history to see whether you're overfitting.\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('Model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['TRAIN', 'VAL'], loc='lower right')\nplt.show()\n\n# Evaluate the model using the TEST dataset\nloss, accuracy = model.evaluate(X_test, y_test)",
"Draw the confusion matrix to better understand the model performance",
"def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"Plots the confusion matrix.\"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=55)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n\n# Classify pose in the TEST dataset using the trained model\ny_pred = model.predict(X_test)\n\n# Convert the prediction result to class name\ny_pred_label = [class_names[i] for i in np.argmax(y_pred, axis=1)]\ny_true_label = [class_names[i] for i in np.argmax(y_test, axis=1)]\n\n# Plot the confusion matrix\ncm = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1))\nplot_confusion_matrix(cm,\n class_names,\n title ='Confusion Matrix of Pose Classification Model')\n\n# Print the classification report\nprint('\\nClassification Report:\\n', classification_report(y_true_label,\n y_pred_label))",
"(Optional) Investigate incorrect predictions\nYou can look at the poses from the TEST dataset that were incorrectly predicted to see whether the model accuracy can be improved.\nNote: This only works if you have run step 1 because you need the pose image files on your local machine to display them.",
"if is_skip_step_1:\n raise RuntimeError('You must have run step 1 to run this cell.')\n\n# If step 1 was skipped, skip this step.\nIMAGE_PER_ROW = 3\nMAX_NO_OF_IMAGE_TO_PLOT = 30\n\n# Extract the list of incorrectly predicted poses\nfalse_predict = [id_in_df for id_in_df in range(len(y_test)) \\\n if y_pred_label[id_in_df] != y_true_label[id_in_df]]\nif len(false_predict) > MAX_NO_OF_IMAGE_TO_PLOT:\n false_predict = false_predict[:MAX_NO_OF_IMAGE_TO_PLOT]\n\n# Plot the incorrectly predicted images\nrow_count = len(false_predict) // IMAGE_PER_ROW + 1\nfig = plt.figure(figsize=(10 * IMAGE_PER_ROW, 10 * row_count))\nfor i, id_in_df in enumerate(false_predict):\n ax = fig.add_subplot(row_count, IMAGE_PER_ROW, i + 1)\n image_path = os.path.join(images_out_test_folder,\n df_test.iloc[id_in_df]['file_name'])\n\n image = cv2.imread(image_path)\n plt.title(\"Predict: %s; Actual: %s\"\n % (y_pred_label[id_in_df], y_true_label[id_in_df]))\n plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\nplt.show()",
"Part 3: Convert the pose classification model to TensorFlow Lite\nYou'll convert the Keras pose classification model to the TensorFlow Lite format so that you can deploy it to mobile apps, web browsers and edge devices. When converting the model, you'll apply dynamic range quantization to reduce the pose classification TensorFlow Lite model size by about 4 times with insignificant accuracy loss.\nNote: TensorFlow Lite supports multiple quantization schemes. See the documentation if you are interested to learn more.",
"converter = tf.lite.TFLiteConverter.from_keras_model(model)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\ntflite_model = converter.convert()\n\nprint('Model size: %dKB' % (len(tflite_model) / 1024))\n\nwith open('pose_classifier.tflite', 'wb') as f:\n f.write(tflite_model)",
"Then you'll write the label file which contains mapping from the class indexes to the human readable class names.",
"with open('pose_labels.txt', 'w') as f:\n f.write('\\n'.join(class_names))",
"As you've applied quantization to reduce the model size, let's evaluate the quantized TFLite model to check whether the accuracy drop is acceptable.",
"def evaluate_model(interpreter, X, y_true):\n \"\"\"Evaluates the given TFLite model and return its accuracy.\"\"\"\n input_index = interpreter.get_input_details()[0][\"index\"]\n output_index = interpreter.get_output_details()[0][\"index\"]\n\n # Run predictions on all given poses.\n y_pred = []\n for i in range(len(y_true)):\n # Pre-processing: add batch dimension and convert to float32 to match with\n # the model's input data format.\n test_image = X[i: i + 1].astype('float32')\n interpreter.set_tensor(input_index, test_image)\n\n # Run inference.\n interpreter.invoke()\n\n # Post-processing: remove batch dimension and find the class with highest\n # probability.\n output = interpreter.tensor(output_index)\n predicted_label = np.argmax(output()[0])\n y_pred.append(predicted_label)\n\n # Compare prediction results with ground truth labels to calculate accuracy.\n y_pred = keras.utils.to_categorical(y_pred)\n return accuracy_score(y_true, y_pred)\n\n# Evaluate the accuracy of the converted TFLite model\nclassifier_interpreter = tf.lite.Interpreter(model_content=tflite_model)\nclassifier_interpreter.allocate_tensors()\nprint('Accuracy of TFLite model: %s' %\n evaluate_model(classifier_interpreter, X_test, y_test))",
"Now you can download the TFLite model (pose_classifier.tflite) and the label file (pose_labels.txt) to classify custom poses. See the Android and Python/Raspberry Pi sample app for an end-to-end example of how to use the TFLite pose classification model.",
"!zip pose_classifier.zip pose_labels.txt pose_classifier.tflite\n\n# Download the zip archive if running on Colab.\ntry:\n from google.colab import files\n files.download('pose_classifier.zip')\nexcept:\n pass"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bekbote/project_repository
|
0420_PytorchIntro_1555587538086.ipynb
|
apache-2.0
|
[
"<a href=\"https://colab.research.google.com/github/bekbote/project_repository/blob/master/0420_PytorchIntro_1555587538086.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nOutline\n\nPyTorch\nWhat are tensors\nInitialising, slicing, reshaping tensors\nNumpy and PyTorch interfacing\nGPU support for PyTorch + Enabling GPUs on Google Colab\nSpeed comparisons, Numpy -- PyTorch -- PyTorch on GPU\nAutodiff concepts and application\nWriting a basic learning loop using autograd\nExercises",
"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt",
"Initialise tensors",
"x = torch.ones(3, 2)\nprint(x)\nx = torch.zeros(3, 2)\nprint(x)\nx = torch.rand(3, 2)\nprint(x)\n\nx = torch.empty(3, 2)\nprint(x)\ny = torch.zeros_like(x)\nprint(y)\n\nx = torch.linspace(0, 1, steps=5)\nprint(x)\n\nx = torch.tensor([[1, 2], \n [3, 4], \n [5, 6]])\nprint(x)",
"Slicing tensors",
"print(x.size())\nprint(x[:, 1]) \nprint(x[0, :]) \n\ny = x[1, 1]\nprint(y)\nprint(y.item())",
"Reshaping tensors",
"print(x)\ny = x.view(2, 3)\nprint(y)\n\ny = x.view(6,-1) \nprint(y)",
"Simple Tensor Operations",
"x = torch.ones([3, 2])\ny = torch.ones([3, 2])\nz = x + y\nprint(z)\nz = x - y\nprint(z)\nz = x * y\nprint(z)\n\nz = y.add(x)\nprint(z)\nprint(y)\n\nz = y.add_(x)\nprint(z)\nprint(y)",
"Numpy <> PyTorch",
"x_np = x.numpy()\nprint(type(x), type(x_np))\nprint(x_np)\n\na = np.random.randn(5)\nprint(a)\na_pt = torch.from_numpy(a)\nprint(type(a), type(a_pt))\nprint(a_pt)\n\nnp.add(a, 1, out=a)\nprint(a)\nprint(a_pt) \n\n%%time\nfor i in range(100):\n a = np.random.randn(100,100)\n b = np.random.randn(100,100)\n c = np.matmul(a, b)\n\n%%time\nfor i in range(100):\n a = torch.randn([100, 100])\n b = torch.randn([100, 100])\n c = torch.matmul(a, b)\n\n%%time\nfor i in range(10):\n a = np.random.randn(10000,10000)\n b = np.random.randn(10000,10000)\n c = a + b\n\n%%time\nfor i in range(10):\n a = torch.randn([10000, 10000])\n b = torch.randn([10000, 10000])\n c = a + b",
"CUDA support",
"print(torch.cuda.device_count())\n\nprint(torch.cuda.device(0))\nprint(torch.cuda.get_device_name(0))\n\ncuda0 = torch.device('cuda:0')\n\na = torch.ones(3, 2, device=cuda0)\nb = torch.ones(3, 2, device=cuda0)\nc = a + b\nprint(c)\n\nprint(a)\n\n%%time\nfor i in range(10):\n a = np.random.randn(10000,10000)\n b = np.random.randn(10000,10000)\n np.add(b, a)\n\n%%time\nfor i in range(10):\n a_cpu = torch.randn([10000, 10000])\n b_cpu = torch.randn([10000, 10000])\n b_cpu.add_(a_cpu)\n\n%%time\nfor i in range(10):\n a = torch.randn([10000, 10000], device=cuda0)\n b = torch.randn([10000, 10000], device=cuda0)\n b.add_(a)\n\n%%time\nfor i in range(10):\n a = np.random.randn(10000,10000)\n b = np.random.randn(10000,10000)\n np.matmul(b, a)\n\n%%time\nfor i in range(10):\n a_cpu = torch.randn([10000, 10000])\n b_cpu = torch.randn([10000, 10000])\n torch.matmul(a_cpu, b_cpu)\n\n%%time\nfor i in range(10):\n a = torch.randn([10000, 10000], device=cuda0)\n b = torch.randn([10000, 10000], device=cuda0)\n torch.matmul(a, b)",
"Autodiff",
"x = torch.ones([3, 2], requires_grad=True)\nprint(x)\n\ny = x + 5\nprint(y)\n\nz = y*y + 1\nprint(z)\n\nt = torch.sum(z)\nprint(t)\n\nt.backward()\n\nprint(x.grad)",
"$t = \\sum_i z_i, z_i = y_i^2 + 1, y_i = x_i + 5$\n$\\frac{\\partial t}{\\partial x_i} = \\frac{\\partial z_i}{\\partial x_i} = \\frac{\\partial z_i}{\\partial y_i} \\frac{\\partial y_i}{\\partial x_i} = 2y_i \\times 1$\nAt x = 1, y = 6, $\\frac{\\partial t}{\\partial x_i} = 12$",
"x = torch.ones([3, 2], requires_grad=True)\ny = x + 5\nr = 1/(1 + torch.exp(-y))\nprint(r)\ns = torch.sum(r)\ns.backward()\nprint(x.grad)\n\nx = torch.ones([3, 2], requires_grad=True)\ny = x + 5\nr = 1/(1 + torch.exp(-y))\na = torch.ones([3, 2])\nr.backward(a)\nprint(x.grad)",
"$\\frac{\\partial{s}}{\\partial{x}} = \\frac{\\partial{s}}{\\partial{r}} \\cdot \\frac{\\partial{r}}{\\partial{x}}$\nFor the above code $a$ represents $\\frac{\\partial{s}}{\\partial{r}}$ and then $x.grad$ gives directly $\\frac{\\partial{s}}{\\partial{x}}$\nAutodiff example that looks like what we have been doing",
"x = torch.randn([20, 1], requires_grad=True)\ny = 3*x - 2\n\nw = torch.tensor([1.], requires_grad=True)\nb = torch.tensor([1.], requires_grad=True)\n\ny_hat = w*x + b\n\nloss = torch.sum((y_hat - y)**2)\n\nprint(loss)\n\nloss.backward()\n\nprint(w.grad, b.grad)",
"Do it in a loop",
"learning_rate = 0.01\n\nw = torch.tensor([1.], requires_grad=True)\nb = torch.tensor([1.], requires_grad=True)\n\nprint(w.item(), b.item())\n\nfor i in range(10):\n \n x = torch.randn([20, 1])\n y = 3*x - 2\n \n y_hat = w*x + b\n loss = torch.sum((y_hat - y)**2)\n \n loss.backward()\n \n with torch.no_grad():\n w -= learning_rate * w.grad\n b -= learning_rate * b.grad\n \n w.grad.zero_()\n b.grad.zero_()\n\n print(w.item(), b.item())\n ",
"Do it for a large problem",
"%%time\nlearning_rate = 0.001\nN = 10000000\nepochs = 200\n\nw = torch.rand([N], requires_grad=True)\nb = torch.ones([1], requires_grad=True)\n\n# print(torch.mean(w).item(), b.item())\n\nfor i in range(epochs):\n \n x = torch.randn([N])\n y = torch.dot(3*torch.ones([N]), x) - 2\n \n y_hat = torch.dot(w, x) + b\n loss = torch.sum((y_hat - y)**2)\n \n loss.backward()\n \n with torch.no_grad():\n w -= learning_rate * w.grad\n b -= learning_rate * b.grad\n \n w.grad.zero_()\n b.grad.zero_()\n\n# print(torch.mean(w).item(), b.item())\n \n\n%%time\nlearning_rate = 0.001\nN = 10000000\nepochs = 200\n\nw = torch.rand([N], requires_grad=True, device=cuda0)\nb = torch.ones([1], requires_grad=True, device=cuda0)\n\n# print(torch.mean(w).item(), b.item())\n\nfor i in range(epochs):\n \n x = torch.randn([N], device=cuda0)\n y = torch.dot(3*torch.ones([N], device=cuda0), x) - 2\n \n y_hat = torch.dot(w, x) + b\n loss = torch.sum((y_hat - y)**2)\n \n loss.backward()\n \n with torch.no_grad():\n w -= learning_rate * w.grad\n b -= learning_rate * b.grad\n \n w.grad.zero_()\n b.grad.zero_()\n\n #print(torch.mean(w).item(), b.item())\n "
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
goerlitz/text-mining
|
python/REST-API Content Retriever.ipynb
|
apache-2.0
|
[
"About\nRetrieve JSON documents which are accessible via REST API and store them in mongodb.\nPrerequesites\n\nA running mongodb instance to store the JSON documents (see docker container).\nPython libraries:\npymongo - python bindings for mongodb.\njsonpath_rw - path expressions for matching parts of a JSON document.\n\n\nA configuration file (config.yaml) with setting for\nmongodb instance and collection name\nREST API url and parameters\nJSON document format",
"from pymongo import MongoClient\nfrom urllib import urlopen\nfrom jsonpath_rw import jsonpath, parse\nfrom datetime import datetime\nimport json\nimport yaml",
"Prepare configuration settings",
"with open('config.yaml') as yaml_file:\n cfg = yaml.load(yaml_file)\n\nrest_api = cfg['rest-api']\njson_cfg = cfg['json-path']\nmongo_db = cfg['mongo']\n\napi_list_items = rest_api['url'] + rest_api['get_list']\napi_get_item = rest_api['url'] + rest_api['get_detail']\n\nitem_id_field = cfg['json-path']['item_id']\nitem_list_path = parse(cfg['json-path']['item_list'])",
"Prepare database connection",
"client = MongoClient(mongo_db['url'])\n\ndb = client[mongo_db['database']][mongo_db['collection']]\n\nprint \"%d entries in database.\" % db.find().count()",
"Fetching documents via REST API",
"# functions for REST API calls\n\ndef get_item_list(offset = 0, limit = 100, url = api_list_items):\n request = urlopen(url % (offset, limit))\n return json.loads(request.read())\n\ndef get_item(id, url = api_get_item):\n request = urlopen(url % id)\n return json.loads(request.read())\n\nmax_items = 10**6\nlimit=100\n\nfor offset in xrange(0, max_items, limit):\n print (\"%s - fetching items %s - %s\" % (datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), offset, offset+limit))\n \n # download item list\n result = get_item_list(offset=offset, limit=limit)\n item_list = [ item.value for item in item_list_path.find(result) ]\n \n # stop if result list is empty\n if len(item_list) == 0:\n print \"no more results returned\"\n break\n \n # extract IDs and compare with items already in database\n item_ids = [ item[item_id_field] for item in item_list ]\n known_ids = [ item[item_id_field] for item in db.find( {item_id_field: { \"$in\": item_ids }} ) ]\n new_ids = [ x for x in item_ids if x not in known_ids ]\n \n print \"-> got %d ids (%d known, %d new)\" % (len(item_ids), len(known_ids), len(new_ids))\n \n # fetch new items from REST API\n items = []\n for id in new_ids:\n item = get_item(id)\n items.append(item)\n \n # insert new items in database\n if len(items) != 0:\n result = db.insert_many(items)\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
PythonBootCampIAG-USP/NASA_PBC2015
|
Day_00/03_Functions/Functions.ipynb
|
mit
|
[
"Fun with Functions!\nReference:\nCode academy's Functions unit\nOur objective is to learn how to write and use functions.\nFunctions allow us to abstract a task, write code to perform it, and then use it in various situations.\nExample:\nA calculator takes two numbers and an operator as input and then performs the operator on the two numbers. <br>\nEx: 1 + 2 = 3 . <br>\nInputs: '1', '2', '+'<br>\nOutput: '3'<br>\nCheck out what happens when you run these in the ipython notebook:",
"1+2\n\nprint 1+2",
"ipython has some built in functions, like the addition operator, symbolized as the plus sign, and 'print'. \nMuch of the rest of these lectures will be focused on learning to use the many libraries of code developed by others to do some pretty spectacular things. \nBefore then, let's explore how we actually write functions!\nHere's an example of a function:",
"def spam() :\n \"\"\"print eggs!\"\"\"\n print \"Eggs!\"\n return 0",
"It's comprised of 3 parts: \n - the header, which defines the function\n - the comment, which lets users know what the function does\n - and the body, which actually does the task required. \nWhat does spam do when we call it?",
"spam()",
"Notice the different parts of the header in spam. <br>What are the minimum parts necessary to define the function? \nClearly we typically want to do more than just print out something predefined though!\nLet's return to our intial addition example.<br>\nWrite a function add1() that takes one input parameter, adds 1 to it, and returns the result.",
"def add1(param):\n '''add 1 to the input param'''\n y=param+1\n return y",
"Now try executing it:",
"add1(2)",
"What output did you get?\nCompare to your neighbor's add1 function.\nWhat does the second part of your function (ie the comment) say? \nWhy do we care about commenting our code? \nNow make a function called 'add' that takes two numbers as input parameters, adds them, and returns that number.",
"def add(a,b) :\n '''how to add to numbers! ... or two stings'''\n return a+b",
"And call it with arguments 1 and 2:",
"add(1,2)",
"A function can take any number of parameters as input. <br>\nThe number of arguments passed to the function through the parameters generally matches the number of parameters. \nTry passing fewer or more than 2 arguments to add(). <br> What happens?",
"add(1,2,3)",
"Functions can also call other functions!<br>\nRecall the equation for a line: y=mx+b <br>\nWrite a function called line() that calls add() and returns y, given x.",
"def line(x):\n '''return the y value of a line, given the x value'''\n m = 2\n b = 0\n y=add(m*x,b)\n return y\n\nline(1)",
"What if we want to be able to change the slope (m) and intercept (b) on the fly? \nScope:<br> What variables (aka parameters) exist outside the scope of the functions we have written? <br> Hint: try asking for their values (by naming them and then executing that cell)!",
"m",
"Redefine line() with user-defined slope and intercept.",
"def line(x, m, b):\n '''return the y value of a line, given the x value'''\n y=add(m*x,b)\n return y\n\nline(1, 2, 0)",
"What if we the slope and intercept to be default values of 2 and 0, respectively, but changable sometimes? \nHint: try setting the parameters to equal the default values!",
"def line(x, m=2, b=0):\n '''return the y value of a line, given the x value'''\n y=add(m*x,b) \n return y",
"What happens when you call line with only one, two, or three parameters? <br>\nWhat's the minimum number of parameters required?",
"line(1, b=3)",
"What if you only want to allow the function to work on slopes that are even? <br>\nWrite evenSlopedLine() such that a naieve user can call it and learn something useful about how to use the function if they give it an odd slope value.",
"def evenSlopedLine(x, m=0, b=0) :\n '''return the y value for a line with input x and optinal m and b'''\n if m%2==0 : \n y=add(m*x, b)\n return y\n else :\n return \"Sorry, only even slopes are allowed.\"\n\nevenSlopedLine(1, m=3)",
"Test it with m = 2 and m = 3:",
"def test(x, vals):\n \"\"\"Return the values of the function evenSlopedLine given x and an array of slopes.\"\"\"\n result=[]\n for n in vals : \n result.append(evenSlopedLine(x, n))\n return result\n \nx=1\nm=[2,3]\ntest(x, m)",
"Suppose you want evenSlopedLine() to just work. Use add1() to make it so. \nWhy might this be good? <br>\nWhat are some potential drawbacks?",
"def evenSlopedLine(x, m=0, b=0) :\n '''return the y value for a line with input x and optinal m and b'''\n if m%2==0 : \n y=add(m*x, b)\n return y\n else :\n m=add1(m)\n return add(m*x, b)\n",
"Rewrite test() as evenSlopesYs, taking an optional x value and returning the array of y values for slopes in the array: m = [1,2,3,4,5,6,7,8,9,10] .<br>\nHint: try different orders for the optional and required parameters!",
"def evenSlopesYs(vals, x=1):\n \"\"\"Return the values of the function evenSlopedLine given x and an array of slopes.\"\"\"\n result=[]\n for n in vals : \n result.append(evenSlopedLine(x, n))\n return result\nm=[1,2,3,4,5,6,7,8,9,10]\nevenSlopesYs(m)",
"Compare your working result with your neighbor's. <br> How did you each treat the odd slopes? <br> What might be advantages or disadvantages to your various solutions? \nBreakout session\nYou want to calculate the overlap of two circles given the position of their centers (x1,y1) and (x2,y2) and their radii r1 and r2. \nYou know that the overlap is 0 if the distance between the two circles is greater than the sum of their radii. <br>\nYou also know the overlap is the area of the smaller circle if one circle is contained within the other. \nYour colleague wrote some code to calculate the area of the intersection, if the two circles overlap but don't fall in one of the special cases just mentioned (no overlap or fully contained). This is great! Unfortunately your colleague wasn't a fan of functions, so you know only that the area of intersection is:",
"rr1=r1**2\nrr2=r2**2\nphi = (math.acos((rr1 + (d ** 2) - rr2) / (2 * r1 * d))) * 2\ntheta = (math.acos((rr2 + (d ** 2) - rr1) / (2 * r2 * d))) * 2\narea1 = 0.5 * theta * rr2 - 0.5 * rr2 * math.sin(theta)\narea2 = 0.5 * phi * rr1 - 0.5 * rr1 * math.sin(phi)\narea= area1 + area2",
"Part 1: Interarea()\nDefine a function called interarea() which takes as input the positions and radii of the two circles and returns the area of the intersection.\nFeel free to rename your colleague's variables if a different convention makes more sense to you.\nWrite at least 3 test cases to be sure your functions work for all scenarios!\n(There are hints at the end of the exercise's parts.)",
"import math as math\ndef interarea(x1,y1,r1,x2,y2,r2) :\n '''return the overlap area of two circles given the center points (x1, y1) and (x2, y2) and their radii: r1 and r2'''\n d = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n rr1=r1**2\n rr2=r2**2\n if d<abs(r2-r1) :\n print \"100% overlap!\"\n return math.pi*min(r1,r2)**2\n elif d>=r1+r2:\n print \"No overlap!\"\n return 0\n else :\n phi = (math.acos((rr1 + (d ** 2) - rr2) / (2 * r1 * d))) * 2\n theta = (math.acos((rr2 + (d ** 2) - rr1) / (2 * r2 * d))) * 2\n area1 = 0.5 * theta * rr2 - 0.5 * rr2 * math.sin(theta)\n area2 = 0.5 * phi * rr1 - 0.5 * rr1 * math.sin(phi)\n area= area1 + area2\n print \"partial overlap: area = \", area\n return area",
"Part 2: Overlap fraction: location\nYou want to know the area of the intersection because you're interested in associating a point source, defined as a position (x1,y1) with an error in the measurement of (x1Err, y1Err), with a known source of position (x2,y2) with a given error on the position measurement of r2Err.\nTo do that, you've decided to define the \"location overlap\" fraction as the intersection area divided by the maximum possible area of intersection allowed by the errors in the position measurements. \nWrite a function overlapFracLoc() which returns the location overlap fraction. <br> Hint! The overlap fraction should range between 0 and 1. \nYou can either create your own data set (eg w random.random()) or use the data provided for candidates (source 1) and flares (source 2) at the bottom of this notebook and equivalently in candidates.txt and flare.txt.\nPart 3: Extension!\nA: Extension fraction:\nThe first point source could also actually be an extended source. How exciting! <br>\nSo in addition to having a radius and thus circle defined by the position error measurement (x1Err, y1Err), it can also have an actual extension measured (r1Ext). \nWrite another function which calculates the fractional extension overlap, overlapFracExt(), defined to be the ratio of [the intersection of the circle defined by the actual extension (r1Ext) and the position error circle (x2,y2,r2Err)] to the maximum area of either the first or second source. \nB: Point source \"extension\" fraction\nOf course, not all the sources are extended, but we would still like to define this parameter. It's sensible if one considers the minimum resolvable radius, i.e. the boundary between when we can detect that a source is actually extended. \nIf the source is a point source, use an optionally settable parameter for the extension (==minimum resolvable radius), and return the ratio relative to the second source's area (defined by r2Err).\nHint! Part 1:\nAssuming cartesian coordinates, the distance between two points is:",
"d = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)",
"Note the error message when you first try to evaluate d: python doesn't know what math is. <br>\nMath turns out to be one of those useful libraries of functions (and variables like pi). You can import them within the scope of a variable you call (as) math with:",
"import math as math",
"Typically we import modules like math globally. <br> \nIt's also possible to import from the math module just the particular function desired:",
"from math import sqrt\n\nsqrt(25)",
"We can also rename it as:",
"from math import sqrt as squareroot\n\nsquareroot(25)",
"Note which version of import your colleague used in their code. \nHint! Part 2:\nA. By definition in Part 1, the maximum area of overlap is when one circle is fully contained within the other. Thus the fractional overlap is defined as the intersection area divided by the minimum of the radii. \nB. There are several ways to convert the given error measurement (x1Err, y1Err) into a radius. Consider the relative sizes and their impact on the final fraction calculated. Optionally, allow the user to select which option to use!\nUncompleted parts make good homework!\nMore useful exercises:\nTry writing your functions in a file and running the code from the terminal.<br>\nHint: Calling \"python filename.py\" will execute the code in filename.py.\nWrite functions to read in the data from files.\nWrite a wrapper function that takes the data read in from the files and calls the fractional overlap functions. <br>\ncandidates.txt contains the first sources' information, including position, position error, extension, and extension error (all in degrees, and incidentally in the RA & Dec coordinate system; just assume cartesian geometry!).<br>\nflare.txt contains the second sources' information, including position and position error radius (also in degrees and RA,Dec). \nWrite the results to the terminal and then to a file.\nHow else might you display the results? \nUse matplotlib to display the interesting results!\nUse the following data* within the notebook to test your functions with:\n*(extracted from candidates.txt and flare.txt)",
"candidatesData = {'SNR357.0-1.0': {'raErr': 0.271981431953, 'decErr': 0.234572055248, 'radius': 0.0, 'ra': 265.612194957, 'radiusErr': 0.0, 'dec': -32.045488282}, 'SNR305.2+0.4': {'raErr': 0.230708064968, 'decErr': 0.224605379155, 'radius': 0.0, 'ra': 197.711507061, 'radiusErr': 0.0, 'dec': -62.4200203869}, 'SNR34.5-1.8': {'raErr': 0.243113651368, 'decErr': 0.235113062829, 'radius': 0.0, 'ra': 285.178490429, 'radiusErr': 0.0, 'dec': 0.597646829113}, 'SNR179.0+3.4': {'raErr': 0.231809181189, 'decErr': 0.231489134935, 'radius': 0.0, 'ra': 89.1713292269, 'radiusErr': 0.0, 'dec': 31.4951149482}, 'SNR0.9-3.2': {'raErr': 0.287716937271, 'decErr': 0.287716937271, 'radius': 2.68576864989, 'ra': 270.078473699, 'radiusErr': 0.0745858407624, 'dec': -29.7964411324}, 'SNR6.7-0.8': {'raErr': 0.24706826093, 'decErr': 0.24706826093, 'radius': 1.61811862853, 'ra': 270.946260674, 'radiusErr': 0.0362000163985, 'dec': -23.5765462593}, 'SNR37.4-2.6': {'raErr': 0.223437466251, 'decErr': 0.218477218501, 'radius': 0.0, 'ra': 287.193642574, 'radiusErr': 0.0, 'dec': 2.79930826528}, 'SNR206.0-0.4': {'raErr': 0.311777416739, 'decErr': 0.311777416739, 'radius': 3.16518333722, 'ra': 99.3480499716, 'radiusErr': 0.0829090932598, 'dec': 6.04291711301}, 'SNR260.0-1.2': {'raErr': 0.219684652942, 'decErr': 0.219684652942, 'radius': 1.05326352149, 'ra': 127.619604964, 'radiusErr': 0.0241890082999, 'dec': -41.3686479476}, 'SNR4.5+3.1': {'raErr': 0.36342658877, 'decErr': 0.36342658877, 'radius': 3.63289679912, 'ra': 266.081085134, 'radiusErr': 0.0403179887527, 'dec': -23.5162043303}, 'SNR350.8+0.6': {'raErr': 0.226943837147, 'decErr': 0.218266375394, 'radius': 0.0, 'ra': 259.821259092, 'radiusErr': 0.0, 'dec': -36.3657425647}, 'SNR36.6-2.7': {'raErr': 0.213787209434, 'decErr': 0.218556786321, 'radius': 0.698134038916, 'ra': 286.888157707, 'radiusErr': 0.0193633172924, 'dec': 2.03796233627}, 'SNR133.1+2.0': {'raErr': 0.256251963544, 'decErr': 0.256251963544, 'radius': 1.4571202293, 'ra': 35.8303573643, 'radiusErr': 0.0589524317201, 'dec': 63.016863932}, 'SNR0.5-1.0': {'raErr': 0.213520647551, 'decErr': 0.491818560314, 'radius': 3.64836077677, 'ra': 267.714692466, 'radiusErr': 0.0246423475888, 'dec': -29.056555018}, 'SNR21.7-4.6': {'raErr': 0.261238806812, 'decErr': 0.228564707775, 'radius': 0.0, 'ra': 281.834000499, 'radiusErr': 0.0, 'dec': -12.0421945577}, 'SNR187.5+4.3': {'raErr': 0.207130074927, 'decErr': 0.205781790933, 'radius': 1.28816870319, 'ra': 94.7693692753, 'radiusErr': 0.00875163251335, 'dec': 24.5142843902}, 'SNR307.9+1.1': {'raErr': 0.268809096198, 'decErr': 0.268809096198, 'radius': 0.948980341874, 'ra': 203.294861341, 'radiusErr': 0.0688023427086, 'dec': -61.3380483636}, 'SNR26.7-2.9': {'raErr': 0.238906235566, 'decErr': 0.238906235566, 'radius': 0.666441980216, 'ra': 282.554062879, 'radiusErr': 0.0378130702637, 'dec': -6.8920362723}, 'SNR266.6+1.1': {'raErr': 0.253406814737, 'decErr': 0.253406814737, 'radius': 1.71293639428, 'ra': 135.870977376, 'radiusErr': 0.0445868105039, 'dec': -45.1219937402}, 'SNR192.6+1.5': {'raErr': 0.29267141516, 'decErr': 0.257917683615, 'radius': 0.0, 'ra': 94.6639732117, 'radiusErr': 0.0, 'dec': 18.6874706276}, 'SNR292.7+0.6': {'raErr': 0.211669864532, 'decErr': 0.211529641793, 'radius': 0.0, 'ra': 171.769078323, 'radiusErr': 0.0, 'dec': -60.5705794929}, 'SNR348.9-0.4': {'raErr': 0.241375544076, 'decErr': 0.241375544076, 'radius': 1.37703943137, 'ra': 259.425427937, 'radiusErr': 0.0346313902607, 'dec': -38.4677640832}, 'SNR1.3-2.9': {'raErr': 0.276048520226, 'decErr': 0.276048520226, 'radius': 2.89045818583, 'ra': 270.040273019, 'radiusErr': 0.0580412932745, 'dec': -29.2575899789}, 'SNR19.6-0.2': {'raErr': 0.218599592668, 'decErr': 0.215109650286, 'radius': 0.0, 'ra': 276.873652703, 'radiusErr': 0.0, 'dec': -11.8917300079}, 'SNR288.0+0.8': {'raErr': 0.233711496338, 'decErr': 0.233711496338, 'radius': 1.86996638915, 'ra': 163.294239207, 'radiusErr': 0.0508525104263, 'dec': -58.6315983807}, 'SNR313.2+0.8': {'raErr': 0.394070303943, 'decErr': 0.394070303943, 'radius': 1.70579742563, 'ra': 213.932796558, 'radiusErr': 0.0528298804548, 'dec': -60.4095049085}, 'SNR34.9-3.6': {'raErr': 0.298655659691, 'decErr': 0.268422313615, 'radius': 0.0, 'ra': 286.982102109, 'radiusErr': 0.0, 'dec': 0.0681712044775}, 'SNR77.8-10.4': {'raErr': 0.255675770164, 'decErr': 0.255675770164, 'radius': 2.54424199628, 'ra': 317.286698163, 'radiusErr': 0.0432313048963, 'dec': 32.3791351336}, 'SNR356.9-2.2': {'raErr': 0.262974308106, 'decErr': 0.257591367918, 'radius': 0.0, 'ra': 266.748578364, 'radiusErr': 0.0, 'dec': -32.7404998269}, 'SNR33.6-2.0': {'raErr': 0.379497447662, 'decErr': 0.379497447662, 'radius': 2.69385048285, 'ra': 284.957646547, 'radiusErr': 0.102425262102, 'dec': -0.316655458163}, 'SNR353.7+0.4': {'raErr': 0.246005751271, 'decErr': 0.246005751271, 'radius': 1.23307152431, 'ra': 261.999939154, 'radiusErr': 0.0819007090426, 'dec': -33.9988614089}, 'SNR338.4+0.7': {'raErr': 0.251629117163, 'decErr': 0.251629117163, 'radius': 2.8293996792, 'ra': 249.479447695, 'radiusErr': 0.100211831445, 'dec': -45.9863390198}, 'SNR338.5-1.2': {'raErr': 0.238002174149, 'decErr': 0.238002174149, 'radius': 0.935759126861, 'ra': 251.704491372, 'radiusErr': 0.0241741004183, 'dec': -47.181164597}, 'SNR264.4-0.8': {'raErr': 0.22305862071, 'decErr': 0.219152934832, 'radius': 0.0, 'ra': 131.758850198, 'radiusErr': 0.0, 'dec': -44.6702066761}, 'SNR321.8-1.9': {'raErr': 0.218281076948, 'decErr': 0.216582158807, 'radius': 0.0, 'ra': 231.615371743, 'radiusErr': 0.0, 'dec': -58.9345087487}, 'SNR292.3-0.0': {'raErr': 0.225969979162, 'decErr': 0.225969979162, 'radius': 0.927298739579, 'ra': 170.511752225, 'radiusErr': 0.0395919631572, 'dec': -61.0622576573}, 'SNR349.9+0.9': {'raErr': 0.216179967762, 'decErr': 0.213377009531, 'radius': 0.0, 'ra': 258.886658595, 'radiusErr': 0.0, 'dec': -36.8597486164}, 'SNR19.6-3.3': {'raErr': 0.344205084869, 'decErr': 0.344205084869, 'radius': 3.04704002084, 'ra': 279.659282116, 'radiusErr': 0.0545954052338, 'dec': -13.3957517685}, 'SNR3.2-2.2': {'raErr': 0.227442192502, 'decErr': 0.221265865521, 'radius': 0.0, 'ra': 270.352408502, 'radiusErr': 0.0, 'dec': -27.2731176644}, 'SNR326.9-2.1': {'raErr': 0.217548504499, 'decErr': 0.217548504499, 'radius': 0.938827953143, 'ra': 239.409968227, 'radiusErr': 0.0254444041526, 'dec': -56.0366371638}, 'SNR318.7-0.9': {'raErr': 0.235692743113, 'decErr': 0.224930318533, 'radius': 0.0, 'ra': 225.390216638, 'radiusErr': 0.0, 'dec': -59.7206378884}, 'SNR298.8+1.8': {'raErr': 0.225842286172, 'decErr': 0.221696652515, 'radius': 0.0, 'ra': 184.342648857, 'radiusErr': 0.0, 'dec': -60.7649405423}, 'SNR28.3-3.0': {'raErr': 0.228599044991, 'decErr': 0.228599044991, 'radius': 0.840031316494, 'ra': 283.438429381, 'radiusErr': 0.0356132924753, 'dec': -5.50680845713}, 'SNR349.9-0.6': {'raErr': 0.216028596904, 'decErr': 0.213251133676, 'radius': 0.0, 'ra': 260.422638137, 'radiusErr': 0.0, 'dec': -37.7348753207}, 'SNR110.2-0.5': {'raErr': 0.236849515328, 'decErr': 0.230260437312, 'radius': 0.0, 'ra': 346.95051627, 'radiusErr': 0.0, 'dec': 59.81091111}, 'SNR349.0-2.8': {'raErr': 0.29074521768, 'decErr': 0.384293598602, 'radius': 2.83871530095, 'ra': 262.10978964, 'radiusErr': 0.0871497833017, 'dec': -39.7314877386}, 'SNR38.0-1.2': {'raErr': 0.283037547031, 'decErr': 0.283037547031, 'radius': 1.34579950505, 'ra': 286.233309323, 'radiusErr': 0.0532988018448, 'dec': 3.94808854222}, 'SNR19.6-1.5': {'raErr': 0.257374355229, 'decErr': 0.257374355229, 'radius': 1.74060124067, 'ra': 278.047525227, 'radiusErr': 0.0919806094943, 'dec': -12.5523966455}, 'SNR91.5+4.2': {'raErr': 0.263638985711, 'decErr': 0.263638985711, 'radius': 1.47211097459, 'ra': 314.685294819, 'radiusErr': 0.059201947235, 'dec': 52.313575239}, 'SNR21.3-2.3': {'raErr': 0.294407339889, 'decErr': 0.246261917561, 'radius': 0.0, 'ra': 279.548261093, 'radiusErr': 0.0, 'dec': -11.4031668834}, 'SNR75.2+1.0': {'raErr': 0.215353155464, 'decErr': 0.213779870664, 'radius': 0.0, 'ra': 304.349927117, 'radiusErr': 0.0, 'dec': 37.3362577335}, 'SNR185.6-3.4': {'raErr': 0.202794333677, 'decErr': 0.20271688318, 'radius': 0.0, 'ra': 86.4210131902, 'radiusErr': 0.0, 'dec': 22.4377998483}, 'SNR31.0+0.3': {'raErr': 0.248025266891, 'decErr': 0.248025266891, 'radius': 0.607140537348, 'ra': 281.743487302, 'radiusErr': 0.0669140104212, 'dec': -1.55237255283}, 'SNR338.9-0.7': {'raErr': 0.225481752104, 'decErr': 0.225481752104, 'radius': 1.05623055203, 'ra': 251.521101135, 'radiusErr': 0.0446777776714, 'dec': -46.5753551249}, 'SNR330.6-0.0': {'raErr': 0.234954432574, 'decErr': 0.234954432574, 'radius': 0.966485195736, 'ra': 241.813325782, 'radiusErr': 0.0348944403116, 'dec': -52.0696464985}, 'SNR179.7-1.6': {'raErr': 0.295735580209, 'decErr': 0.295735580209, 'radius': 2.2746520516, 'ra': 84.6514062551, 'radiusErr': 0.0627806184125, 'dec': 28.3537157867}, 'SNR314.3+1.0': {'raErr': 0.274438303389, 'decErr': 0.274438303389, 'radius': 1.29691980645, 'ra': 215.909934841, 'radiusErr': 0.0371154850299, 'dec': -59.8191299263}, 'SNR298.4+11.8': {'raErr': 0.313375760068, 'decErr': 0.313375760068, 'radius': 1.76035220138, 'ra': 185.757884938, 'radiusErr': 0.0768070296844, 'dec': -50.8660030744}, 'SNR27.9-0.7': {'raErr': 0.258584479144, 'decErr': 0.258584479144, 'radius': 1.67659641636, 'ra': 281.196489622, 'radiusErr': 0.0440245156953, 'dec': -4.7862936741}, 'SNR24.4-0.1': {'raErr': 0.234521546046, 'decErr': 0.234521546046, 'radius': 1.12957934733, 'ra': 279.072202377, 'radiusErr': 0.0370680188077, 'dec': -7.60274129272}, 'SNR15.4-3.2': {'raErr': 0.236252794145, 'decErr': 0.236252794145, 'radius': 0.904611875572, 'ra': 277.632251388, 'radiusErr': 0.0661079886355, 'dec': -17.1098893637}, 'SNR1.5-1.8': {'raErr': 0.301136570075, 'decErr': 0.350579204193, 'radius': 3.97194529144, 'ra': 269.083870298, 'radiusErr': 0.0106048222889, 'dec': -28.5346640059}, 'SNR27.7-2.1': {'raErr': 0.251936191748, 'decErr': 0.251936191748, 'radius': 1.36738670143, 'ra': 282.350910245, 'radiusErr': 0.0378777867137, 'dec': -5.58498279785}, 'SNR47.3+0.1': {'raErr': 0.232245266658, 'decErr': 0.22595209949, 'radius': 0.0, 'ra': 289.433674631, 'radiusErr': 0.0, 'dec': 12.8215567861}, 'SNR340.0+1.2': {'raErr': 0.222737932148, 'decErr': 0.222737932148, 'radius': 0.511918833626, 'ra': 250.42984654, 'radiusErr': 0.0273043571285, 'dec': -44.4949793168}, 'SNR30.0-1.9': {'raErr': 0.232168094995, 'decErr': 0.232168094995, 'radius': 1.69218827621, 'ra': 283.220120639, 'radiusErr': 0.0330618850719, 'dec': -3.48120201817}, 'SNR7.0-2.0': {'raErr': 0.312222919137, 'decErr': 0.267941316416, 'radius': 0.0, 'ra': 272.286431813, 'radiusErr': 0.0, 'dec': -23.9070487126}, 'SNR44.2-1.5': {'raErr': 0.208068049359, 'decErr': 0.20768641136, 'radius': 0.0, 'ra': 289.366755954, 'radiusErr': 0.0, 'dec': 9.27952062116}, 'SNR340.5-1.4': {'raErr': 0.217865253515, 'decErr': 0.217865253515, 'radius': 1.1099562952, 'ra': 253.750178228, 'radiusErr': 0.026096770521, 'dec': -45.728520612}, 'SNR212.0-0.4': {'raErr': 0.236335938673, 'decErr': 0.233634411825, 'radius': 0.0, 'ra': 102.064573789, 'radiusErr': 0.0, 'dec': 0.585185159102}, 'SNR33.8-0.6': {'raErr': 0.282578061319, 'decErr': 0.282578061319, 'radius': 1.71854086244, 'ra': 283.818949902, 'radiusErr': 0.093282108915, 'dec': 0.518769676872}, 'SNR292.0+3.3': {'raErr': 0.216935056034, 'decErr': 0.215769395535, 'radius': 0.0, 'ra': 172.093936919, 'radiusErr': 0.0, 'dec': -57.8300773879}, 'SNR80.6+0.6': {'raErr': 0.229616117211, 'decErr': 0.229616117211, 'radius': 1.67791494779, 'ra': 308.825443364, 'radiusErr': 0.0342199404282, 'dec': 41.4608292626}, 'SNR113.6-1.9': {'raErr': 0.210205443684, 'decErr': 0.209880072488, 'radius': 0.0, 'ra': 354.133277338, 'radiusErr': 0.0, 'dec': 59.6193946617}, 'SNR31.7-1.4': {'raErr': 0.380821634236, 'decErr': 0.380821634236, 'radius': 3.28938499231, 'ra': 283.57307866, 'radiusErr': 0.0966106582556, 'dec': -1.72169220629}, 'SNR76.5+0.5': {'raErr': 0.239194370141, 'decErr': 0.237042031629, 'radius': 0.0, 'ra': 305.79604969, 'radiusErr': 0.0, 'dec': 38.0899242377}, 'SNR359.9-3.9': {'raErr': 0.370403396666, 'decErr': 0.370403396666, 'radius': 3.40579876002, 'ra': 270.248088067, 'radiusErr': 0.0322331856041, 'dec': -31.0404354684}, 'SNR13.9-2.5': {'raErr': 0.246910688874, 'decErr': 0.230218282388, 'radius': 0.0, 'ra': 276.166824682, 'radiusErr': 0.0, 'dec': -18.0610935207}, 'SNR1.8-4.3': {'raErr': 0.289021434669, 'decErr': 0.289021434669, 'radius': 3.81421375835, 'ra': 271.728824097, 'radiusErr': 0.0379135371627, 'dec': -29.5968366794}, 'SNR356.2+0.3': {'raErr': 0.235796937237, 'decErr': 0.231081834107, 'radius': 0.0, 'ra': 263.780658892, 'radiusErr': 0.0, 'dec': -31.9487883966}, 'SNR284.4-1.1': {'raErr': 0.208806630737, 'decErr': 0.208371410621, 'radius': 0.0, 'ra': 155.495025217, 'radiusErr': 0.0, 'dec': -58.5085968979}, 'SNR10.6+0.2': {'raErr': 0.225499913609, 'decErr': 0.225499913609, 'radius': 1.10449743091, 'ra': 272.032883863, 'radiusErr': 0.0341393645176, 'dec': -19.7032873662}, 'SNR7.2-1.0': {'raErr': 0.212435277411, 'decErr': 0.212435277411, 'radius': 1.15182527401, 'ra': 271.458009475, 'radiusErr': 0.0181587333311, 'dec': -23.2450887576}, 'SNR8.4-3.3': {'raErr': 0.24715969135, 'decErr': 0.244745829424, 'radius': 0.0, 'ra': 274.287183754, 'radiusErr': 0.0, 'dec': -23.2733938363}, 'SNR10.6-0.0': {'raErr': 0.216131293557, 'decErr': 0.216131293557, 'radius': 1.1415765588, 'ra': 272.248039653, 'radiusErr': 0.02081870657, 'dec': -19.7838255334}, 'SNR19.5-0.5': {'raErr': 0.329306643087, 'decErr': 0.329306643087, 'radius': 3.33374746458, 'ra': 277.035347808, 'radiusErr': 0.0638087205202, 'dec': -12.1823823697}, 'SNR29.9-0.1': {'raErr': 0.226943543794, 'decErr': 0.220524036188, 'radius': 0.0, 'ra': 281.586115676, 'radiusErr': 0.0, 'dec': -2.74345446505}, 'SNR333.5+1.0': {'raErr': 0.225792647797, 'decErr': 0.22178170716, 'radius': 0.0, 'ra': 244.032656947, 'radiusErr': 0.0, 'dec': -49.305185678}, 'SNR19.0-3.8': {'raErr': 0.240504129745, 'decErr': 0.240504129745, 'radius': 2.97308953392, 'ra': 279.888971769, 'radiusErr': 0.0297750778352, 'dec': -14.1878114482}, 'SNR19.1-4.5': {'raErr': 0.258761006736, 'decErr': 0.258761006736, 'radius': 1.43509359229, 'ra': 280.599105519, 'radiusErr': 0.0528473369628, 'dec': -14.3515550345}, 'SNR25.1-1.8': {'raErr': 0.229996217496, 'decErr': 0.229996217496, 'radius': 1.03107244152, 'ra': 280.858348847, 'radiusErr': 0.0380107405473, 'dec': -7.82248442903}, 'SNR13.1-1.2': {'raErr': 0.234350036095, 'decErr': 0.226731586895, 'radius': 0.0, 'ra': 274.602218968, 'radiusErr': 0.0, 'dec': -18.1865742766}, 'SNR338.9+0.1': {'raErr': 0.222766347396, 'decErr': 0.218998825863, 'radius': 0.0, 'ra': 250.52920877, 'radiusErr': 0.0, 'dec': -46.0119304027}, 'SNR32.2-0.1': {'raErr': 0.224030013612, 'decErr': 0.224030013612, 'radius': 1.28305718136, 'ra': 282.658087263, 'radiusErr': 0.0542669367717, 'dec': -0.688323988425}, 'SNR33.6+0.4': {'raErr': 0.274322951956, 'decErr': 0.245689538065, 'radius': 0.0, 'ra': 282.788714843, 'radiusErr': 0.0, 'dec': 0.823801501246}, 'SNR0.2-1.1': {'raErr': 0.231727942038, 'decErr': 0.229055997984, 'radius': 0.0, 'ra': 267.577987688, 'radiusErr': 0.0, 'dec': -29.3043856157}, 'SNR50.6-0.8': {'raErr': 0.209175932656, 'decErr': 0.209175932656, 'radius': 1.05319549445, 'ra': 291.900664176, 'radiusErr': 0.0159060774408, 'dec': 15.2813222061}, 'SNR21.1-2.8': {'raErr': 0.243908532658, 'decErr': 0.243908532658, 'radius': 2.39797220885, 'ra': 279.965228189, 'radiusErr': 0.0543193832304, 'dec': -11.7788597994}, 'SNR324.4-0.0': {'raErr': 0.219817336197, 'decErr': 0.218253702216, 'radius': 0.0, 'ra': 233.713747774, 'radiusErr': 0.0, 'dec': -55.9346857956}, 'SNR334.2-1.9': {'raErr': 0.233771109082, 'decErr': 0.233771109082, 'radius': 1.38014246042, 'ra': 248.115723197, 'radiusErr': 0.0490037345498, 'dec': -50.8741175307}, 'SNR338.2-0.9': {'raErr': 0.254174197169, 'decErr': 0.254174197169, 'radius': 1.26483497373, 'ra': 251.025366585, 'radiusErr': 0.0286707253048, 'dec': -47.2488966627}}\n\nflaresData = {'Flare54': {'dec': -6.21, 'radius': 1.8, 'ra': 174.2}, 'Flare55': {'dec': 13.27, 'radius': 1.01, 'ra': 238.56}, 'Flare56': {'dec': 52.7, 'radius': 1.39, 'ra': 121.5}, 'Flare57': {'dec': 78.56, 'radius': 0.68, 'ra': 271.96}, 'Flare50': {'dec': 50.57, 'radius': 0.63, 'ra': 132.32}, 'Flare51': {'dec': 32.07, 'radius': 1.01, 'ra': 350.45}, 'Flare52': {'dec': 4.84, 'radius': 1.39, 'ra': 153.94}, 'Flare53': {'dec': 11.11, 'radius': 1.01, 'ra': 72.07}, 'Flare58': {'dec': -46.67, 'radius': 1.01, 'ra': 41.7}, 'Flare59': {'dec': -1.96, 'radius': 1.39, 'ra': 323.52}, 'Flare138': {'dec': 0.91, 'radius': 1.8, 'ra': 165.18}, 'Flare139': {'dec': -27.99, 'radius': 1.39, 'ra': 34.97}, 'Flare134': {'dec': 35.73, 'radius': 1.39, 'ra': 34.79}, 'Flare135': {'dec': -48.83, 'radius': 1.39, 'ra': 269.92}, 'Flare136': {'dec': 38.28, 'radius': 0.63, 'ra': 249.16}, 'Flare137': {'dec': -29.32, 'radius': 1.39, 'ra': 161.57}, 'Flare130': {'dec': 42.92, 'radius': 0.84, 'ra': 36.29}, 'Flare131': {'dec': -50.17, 'radius': 1.39, 'ra': 137.24}, 'Flare132': {'dec': -21.06, 'radius': 1.8, 'ra': 57.75}, 'Flare133': {'dec': -2.43, 'radius': 1.8, 'ra': 55.26}, 'Flare166': {'dec': -31.93, 'radius': 1.39, 'ra': 155.78}, 'Flare61': {'dec': 45.71, 'radius': 1.01, 'ra': 315.87}, 'Flare60': {'dec': -26.66, 'radius': 1.39, 'ra': 333.6}, 'Flare63': {'dec': -33.7, 'radius': 1.01, 'ra': 258.88}, 'Flare62': {'dec': -12.98, 'radius': 0.84, 'ra': 263.2}, 'Flare65': {'dec': 60.67, 'radius': 0.95, 'ra': 39.28}, 'Flare64': {'dec': -4.97, 'radius': 0.59, 'ra': 203.18}, 'Flare67': {'dec': -25.88, 'radius': 0.63, 'ra': 191.56}, 'Flare66': {'dec': 18.02, 'radius': 1.01, 'ra': 259.88}, 'Flare69': {'dec': -1.49, 'radius': 1.39, 'ra': 65.66}, 'Flare68': {'dec': 45.49, 'radius': 1.39, 'ra': 79.32}, 'Flare161': {'dec': 28.58, 'radius': 0.59, 'ra': 39.5}, 'Flare160': {'dec': -11.54, 'radius': 0.68, 'ra': 18.94}, 'Flare129': {'dec': -53.72, 'radius': 1.39, 'ra': 332.38}, 'Flare128': {'dec': 32.35, 'radius': 0.63, 'ra': 282.58}, 'Flare127': {'dec': 15.5, 'radius': 1.39, 'ra': 30.95}, 'Flare126': {'dec': 2.16, 'radius': 0.63, 'ra': 187.43}, 'Flare125': {'dec': -20.05, 'radius': 1.3, 'ra': 287.56}, 'Flare124': {'dec': 52.1, 'radius': 1.01, 'ra': 265.8}, 'Flare123': {'dec': 4.69, 'radius': 0.74, 'ra': 76.12}, 'Flare122': {'dec': 61.53, 'radius': 1.39, 'ra': 121.02}, 'Flare121': {'dec': -1.77, 'radius': 1.39, 'ra': 75.92}, 'Flare120': {'dec': -83.96, 'radius': 1.39, 'ra': 328.85}, 'Flare165': {'dec': 19.5, 'radius': 1.01, 'ra': 108.05}, 'Flare173': {'dec': -29.58, 'radius': 1.39, 'ra': 207.21}, 'Flare164': {'dec': 67.09, 'radius': 0.49, 'ra': 283.29}, 'Flare78': {'dec': 5.78, 'radius': 1.39, 'ra': 100.11}, 'Flare79': {'dec': -39.31, 'radius': 0.63, 'ra': 270.92}, 'Flare76': {'dec': 16.01, 'radius': 1.8, 'ra': 81.3}, 'Flare77': {'dec': 47.51, 'radius': 1.8, 'ra': 24.81}, 'Flare74': {'dec': -30.49, 'radius': 0.86, 'ra': 328.63}, 'Flare75': {'dec': 37.19, 'radius': 1.39, 'ra': 303.22}, 'Flare72': {'dec': -15.89, 'radius': 0.41, 'ra': 356.35}, 'Flare73': {'dec': -51.19, 'radius': 0.68, 'ra': 32.42}, 'Flare70': {'dec': 48.85, 'radius': 0.59, 'ra': 198.29}, 'Flare71': {'dec': -56.45, 'radius': 1.39, 'ra': 36.99}, 'Flare112': {'dec': 33.76, 'radius': 1.39, 'ra': 305.99}, 'Flare113': {'dec': 49.73, 'radius': 0.68, 'ra': 178.24}, 'Flare110': {'dec': -35.87, 'radius': 1.39, 'ra': 136.3}, 'Flare111': {'dec': 4.69, 'radius': 1.39, 'ra': 162.82}, 'Flare116': {'dec': 31.62, 'radius': 0.59, 'ra': 230.75}, 'Flare117': {'dec': 11.8, 'radius': 0.84, 'ra': 338.35}, 'Flare114': {'dec': -13.9, 'radius': 1.39, 'ra': 149.43}, 'Flare115': {'dec': 50.11, 'radius': 1.39, 'ra': 265.32}, 'Flare118': {'dec': -66.29, 'radius': 1.39, 'ra': 353.25}, 'Flare119': {'dec': -49.79, 'radius': 0.84, 'ra': 352.17}, 'Flare198': {'dec': 34.83, 'radius': 1.8, 'ra': 167.58}, 'Flare199': {'dec': -22.74, 'radius': 1.39, 'ra': 195.04}, 'Flare192': {'dec': 54.96, 'radius': 0.68, 'ra': 115.18}, 'Flare193': {'dec': -47.35, 'radius': 0.74, 'ra': 314.16}, 'Flare190': {'dec': -54.96, 'radius': 1.39, 'ra': 84.37}, 'Flare191': {'dec': -60.37, 'radius': 1.39, 'ra': 66.94}, 'Flare196': {'dec': -39.36, 'radius': 1.39, 'ra': 339.75}, 'Flare197': {'dec': -38.05, 'radius': 0.49, 'ra': 67.02}, 'Flare194': {'dec': 24.37, 'radius': 1.39, 'ra': 149.38}, 'Flare195': {'dec': 50.77, 'radius': 0.74, 'ra': 330.62}, 'Flare167': {'dec': -21.19, 'radius': 0.84, 'ra': 291.03}, 'Flare170': {'dec': -40.44, 'radius': 1.01, 'ra': 353.29}, 'Flare171': {'dec': -31.63, 'radius': 1.8, 'ra': 229.26}, 'Flare172': {'dec': -24.46, 'radius': 0.84, 'ra': 45.65}, 'Flare105': {'dec': -12.27, 'radius': 0.84, 'ra': 132.41}, 'Flare104': {'dec': -36.49, 'radius': 1.39, 'ra': 37.14}, 'Flare107': {'dec': -13.39, 'radius': 0.53, 'ra': 233.37}, 'Flare106': {'dec': 1.77, 'radius': 1.39, 'ra': 33.84}, 'Flare101': {'dec': -55.85, 'radius': 1.8, 'ra': 202.86}, 'Flare100': {'dec': -64.92, 'radius': 0.86, 'ra': 195.99}, 'Flare103': {'dec': 4.69, 'radius': 1.39, 'ra': 203.41}, 'Flare102': {'dec': 1.35, 'radius': 1.8, 'ra': 48.14}, 'Flare174': {'dec': 44.19, 'radius': 1.39, 'ra': 300.26}, 'Flare109': {'dec': -3.85, 'radius': 1.39, 'ra': 350.73}, 'Flare108': {'dec': 30.21, 'radius': 1.39, 'ra': 208.34}, 'Flare175': {'dec': -56.45, 'radius': 1.39, 'ra': 119.83}, 'Flare189': {'dec': -5.5, 'radius': 0.84, 'ra': 170.21}, 'Flare176': {'dec': 45.07, 'radius': 0.84, 'ra': 103.04}, 'Flare200': {'dec': 24.32, 'radius': 1.39, 'ra': 153.47}, 'Flare185': {'dec': 7.23, 'radius': 0.84, 'ra': 83.26}, 'Flare184': {'dec': 16.52, 'radius': 0.44, 'ra': 39.72}, 'Flare187': {'dec': 32.49, 'radius': 0.84, 'ra': 198.25}, 'Flare177': {'dec': 65.88, 'radius': 1.39, 'ra': 148.76}, 'Flare181': {'dec': 1.59, 'radius': 1.39, 'ra': 137.67}, 'Flare180': {'dec': -36.43, 'radius': 1.39, 'ra': 80.08}, 'Flare183': {'dec': -36.21, 'radius': 0.59, 'ra': 60.65}, 'Flare182': {'dec': -0.74, 'radius': 1.01, 'ra': 70.45}, 'Flare18': {'dec': 54.9, 'radius': 1.39, 'ra': 181.36}, 'Flare19': {'dec': 28.17, 'radius': 1.39, 'ra': 141.14}, 'Flare201': {'dec': 1.13, 'radius': 1.08, 'ra': 147.17}, 'Flare10': {'dec': -52.35, 'radius': 1.01, 'ra': 259.7}, 'Flare11': {'dec': -6.03, 'radius': 1.8, 'ra': 341.82}, 'Flare12': {'dec': -30.2, 'radius': 1.8, 'ra': 101.75}, 'Flare13': {'dec': 42.23, 'radius': 0.56, 'ra': 330.56}, 'Flare14': {'dec': 41.0, 'radius': 1.39, 'ra': 250.49}, 'Flare15': {'dec': 14.14, 'radius': 0.56, 'ra': 111.15}, 'Flare16': {'dec': -62.38, 'radius': 1.8, 'ra': 256.22}, 'Flare17': {'dec': 33.08, 'radius': 0.84, 'ra': 110.14}, 'Flare83': {'dec': 10.1, 'radius': 0.68, 'ra': 33.18}, 'Flare82': {'dec': -9.12, 'radius': 0.5, 'ra': 228.34}, 'Flare81': {'dec': 17.72, 'radius': 1.8, 'ra': 326.07}, 'Flare80': {'dec': -70.28, 'radius': 1.39, 'ra': 91.08}, 'Flare87': {'dec': -35.96, 'radius': 1.39, 'ra': 288.58}, 'Flare86': {'dec': -35.57, 'radius': 0.63, 'ra': 224.76}, 'Flare85': {'dec': 24.17, 'radius': 1.39, 'ra': 116.92}, 'Flare84': {'dec': 32.55, 'radius': 1.01, 'ra': 268.36}, 'Flare178': {'dec': 70.23, 'radius': 0.84, 'ra': 266.67}, 'Flare179': {'dec': 36.98, 'radius': 1.39, 'ra': 112.62}, 'Flare89': {'dec': 33.63, 'radius': 0.46, 'ra': 95.65}, 'Flare88': {'dec': 40.93, 'radius': 1.08, 'ra': 308.59}, 'Flare163': {'dec': 1.56, 'radius': 1.8, 'ra': 116.55}, 'Flare2': {'dec': 9.38, 'radius': 1.39, 'ra': 267.78}, 'Flare3': {'dec': -44.26, 'radius': 0.51, 'ra': 84.07}, 'Flare0': {'dec': 39.12, 'radius': 0.74, 'ra': 263.99}, 'Flare1': {'dec': 61.27, 'radius': 0.84, 'ra': 18.12}, 'Flare6': {'dec': 44.87, 'radius': 0.56, 'ra': 206.64}, 'Flare7': {'dec': -7.55, 'radius': 0.56, 'ra': 306.65}, 'Flare4': {'dec': -38.36, 'radius': 1.39, 'ra': 299.07}, 'Flare5': {'dec': -27.82, 'radius': 0.84, 'ra': 343.02}, 'Flare8': {'dec': 48.59, 'radius': 1.39, 'ra': 283.36}, 'Flare9': {'dec': -55.05, 'radius': 1.8, 'ra': 7.28}, 'Flare188': {'dec': 70.77, 'radius': 1.39, 'ra': 131.43}, 'Flare90': {'dec': -46.03, 'radius': 1.8, 'ra': 320.5}, 'Flare91': {'dec': -11.71, 'radius': 0.95, 'ra': 112.24}, 'Flare92': {'dec': 2.49, 'radius': 1.39, 'ra': 122.8}, 'Flare93': {'dec': -40.37, 'radius': 1.39, 'ra': 53.43}, 'Flare94': {'dec': -42.84, 'radius': 1.39, 'ra': 299.19}, 'Flare95': {'dec': -53.17, 'radius': 0.84, 'ra': 159.57}, 'Flare96': {'dec': 29.94, 'radius': 1.39, 'ra': 184.36}, 'Flare97': {'dec': 47.56, 'radius': 1.39, 'ra': 250.29}, 'Flare98': {'dec': -32.97, 'radius': 1.39, 'ra': 268.0}, 'Flare99': {'dec': 10.49, 'radius': 0.35, 'ra': 226.28}, 'Flare169': {'dec': -34.21, 'radius': 1.8, 'ra': 84.89}, 'Flare168': {'dec': 31.46, 'radius': 1.39, 'ra': 329.9}, 'Flare186': {'dec': 41.63, 'radius': 0.84, 'ra': 50.15}, 'Flare156': {'dec': -64.43, 'radius': 1.8, 'ra': 171.21}, 'Flare157': {'dec': 68.58, 'radius': 0.84, 'ra': 255.11}, 'Flare154': {'dec': 4.41, 'radius': 1.01, 'ra': 190.12}, 'Flare155': {'dec': 79.94, 'radius': 1.39, 'ra': 57.74}, 'Flare152': {'dec': -21.06, 'radius': 0.51, 'ra': 278.63}, 'Flare153': {'dec': 60.94, 'radius': 0.63, 'ra': 158.35}, 'Flare150': {'dec': -70.17, 'radius': 1.01, 'ra': 202.32}, 'Flare151': {'dec': 6.21, 'radius': 1.39, 'ra': 160.17}, 'Flare206': {'dec': 40.39, 'radius': 1.39, 'ra': 351.55}, 'Flare207': {'dec': -22.6, 'radius': 0.74, 'ra': 42.88}, 'Flare204': {'dec': 30.37, 'radius': 0.68, 'ra': 30.87}, 'Flare205': {'dec': 11.16, 'radius': 1.39, 'ra': 309.08}, 'Flare202': {'dec': -48.41, 'radius': 0.59, 'ra': 83.24}, 'Flare203': {'dec': -75.45, 'radius': 0.68, 'ra': 327.06}, 'Flare158': {'dec': 43.38, 'radius': 0.51, 'ra': 257.7}, 'Flare159': {'dec': 22.39, 'radius': 1.8, 'ra': 52.48}, 'Flare25': {'dec': -2.74, 'radius': 1.8, 'ra': 136.96}, 'Flare24': {'dec': -2.3, 'radius': 1.39, 'ra': 7.85}, 'Flare27': {'dec': 0.84, 'radius': 1.39, 'ra': 129.98}, 'Flare26': {'dec': 13.97, 'radius': 1.01, 'ra': 84.5}, 'Flare21': {'dec': -8.21, 'radius': 0.63, 'ra': 122.14}, 'Flare20': {'dec': 71.14, 'radius': 0.56, 'ra': 109.46}, 'Flare23': {'dec': 58.34, 'radius': 0.68, 'ra': 16.04}, 'Flare22': {'dec': 39.92, 'radius': 0.84, 'ra': 176.37}, 'Flare29': {'dec': 44.56, 'radius': 0.68, 'ra': 140.16}, 'Flare28': {'dec': -5.35, 'radius': 1.8, 'ra': 4.77}, 'Flare162': {'dec': 9.36, 'radius': 1.39, 'ra': 352.57}, 'Flare47': {'dec': 23.04, 'radius': 1.39, 'ra': 18.03}, 'Flare46': {'dec': -7.87, 'radius': 1.3, 'ra': 337.38}, 'Flare45': {'dec': -33.63, 'radius': 1.39, 'ra': 199.72}, 'Flare44': {'dec': -11.17, 'radius': 0.74, 'ra': 207.78}, 'Flare43': {'dec': -21.46, 'radius': 1.39, 'ra': 352.31}, 'Flare42': {'dec': -20.18, 'radius': 0.84, 'ra': 97.19}, 'Flare41': {'dec': 32.52, 'radius': 1.8, 'ra': 53.44}, 'Flare40': {'dec': 16.21, 'radius': 0.53, 'ra': 343.57}, 'Flare49': {'dec': -19.23, 'radius': 0.68, 'ra': 172.33}, 'Flare48': {'dec': -23.56, 'radius': 0.49, 'ra': 74.06}, 'Flare149': {'dec': 21.37, 'radius': 0.51, 'ra': 186.22}, 'Flare148': {'dec': 27.29, 'radius': 1.39, 'ra': 265.0}, 'Flare141': {'dec': 81.38, 'radius': 1.39, 'ra': 161.49}, 'Flare140': {'dec': 32.2, 'radius': 1.01, 'ra': 18.41}, 'Flare143': {'dec': -5.82, 'radius': 0.51, 'ra': 194.27}, 'Flare142': {'dec': -46.74, 'radius': 1.39, 'ra': 105.88}, 'Flare145': {'dec': 56.84, 'radius': 1.01, 'ra': 276.32}, 'Flare144': {'dec': 37.89, 'radius': 1.39, 'ra': 165.75}, 'Flare147': {'dec': -61.6, 'radius': 0.84, 'ra': 38.73}, 'Flare146': {'dec': 20.45, 'radius': 0.74, 'ra': 133.77}, 'Flare211': {'dec': -17.3, 'radius': 1.8, 'ra': 30.96}, 'Flare210': {'dec': 29.51, 'radius': 0.63, 'ra': 180.23}, 'Flare213': {'dec': -25.73, 'radius': 0.74, 'ra': 246.58}, 'Flare212': {'dec': 4.21, 'radius': 1.39, 'ra': 127.78}, 'Flare208': {'dec': 48.34, 'radius': 1.39, 'ra': 254.18}, 'Flare209': {'dec': 34.34, 'radius': 0.68, 'ra': 347.93}, 'Flare32': {'dec': 10.4, 'radius': 0.74, 'ra': 47.18}, 'Flare33': {'dec': -52.33, 'radius': 1.39, 'ra': 276.98}, 'Flare30': {'dec': -25.2, 'radius': 1.39, 'ra': 55.7}, 'Flare31': {'dec': 77.17, 'radius': 1.8, 'ra': 256.91}, 'Flare36': {'dec': 55.3, 'radius': 1.39, 'ra': 198.03}, 'Flare37': {'dec': 35.99, 'radius': 0.74, 'ra': 214.86}, 'Flare34': {'dec': -80.23, 'radius': 1.39, 'ra': 287.49}, 'Flare35': {'dec': 21.6, 'radius': 1.39, 'ra': 83.23}, 'Flare38': {'dec': -14.4, 'radius': 0.68, 'ra': 339.35}, 'Flare39': {'dec': -41.83, 'radius': 0.63, 'ra': 217.09}}",
"NB: candidatesData and flaresData are dictionaries. The data in dicts is accessible via the keys, eg candidatesData[candidateName][variableName] :",
"candidatesData['SNR357.0-1.0']['raErr']"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
quantopian/research_public
|
videos/miscellaneous/dfs/dfs_quant_finance.ipynb
|
apache-2.0
|
[
"Before running this notebook, run the 2 cells containing supporting functions at the bottom. \nDaily Fantasy Sports and Quantitative Finance\nA case study on using quantitative finance tools to build daily fantasy sports lineups.\nDaily fantasy sports (DFS) have emerged in recent years as a popular skill game. Usually, games challenge participants to pick a team of players (called a 'lineup') in a particular sport on a given day with the objective of getting the most game points on that day. In the classic DFS format, players are assigned a fictional 'salary' or cost and the DFS participant has to pick a roster of players who fit under a total cost constraint, commonly referred to as a 'salary cap'. Players are ranked at the end of the day based on a sport-dependent scoring function.\nIn this notebook, we will take a look at how quantitative finance tools can be used to study daily fantasy sports data and build an optimal lineup using convex optimization. Specifically, we will take a look at the NBA (basketball) game on DraftKings. The data used in this notebooks is obtained from https://stats.nba.com and DraftKings (DK). The tools used are a mix of tools from the Quantopian API as well as other Python libraries.\nLoad and Format Data\nThe first step to most quantitative problems is to get data. In this study, the data was collected and uploaded to the Quantopian research environment as a .csv file. Let's load it into a pandas DataFrame using local_csv (a Quantopian-specific function).",
"import pandas as pd\n\ndf = local_csv('nba_data.csv')\n\ndf['game_datetime'] = pd.to_datetime(df['game_date'])\n\ndf = df.set_index(['game_datetime', 'player_id'])",
"It is important to know what our data looks like, so here is a preview of the first few rows of our DataFrame.",
"df.head(3)",
"And here is a look at a single row where we can see all of the columns.",
"df.iloc[10]",
"Alphalens (Factor Analysis)\nAlphalens is a tool on Quantopian for analyzing the predictve ability of a factor. In this notebook, we will use Alphalens to analyze the predictive ability of a fantasy points per game factor.\nThe first step to analzing our factor is to define it. Here, we will define the fantasy points per game factor, trailing_20_dk_score to be the rolling mean fantasy points per game (using DK scoring rules) over the last 20 days.",
"# Rolling 20 day mean fantasy score per game.\ntrailing_20_dk_score = df['dk_score'].unstack().shift(1).rolling(20, min_periods=5).mean().stack()\n\ntrailing_20_dk_score.head()",
"And then we need to define our objective metric, or 'what we want to maximize'. In finance, we want to maximize portfolio returns. In DraftKings DFS, we want to maximize our DraftKings score for the current day.",
"# DK scores.\nscores = df['dk_score'].unstack()",
"Next, we need to import Alphalens so that we can use it to analyze our factors.",
"import alphalens as al",
"Before using Alphalens, we need to format our data in a way that it expects. Let's start with our points factor. The below function creates a DataFrame using our points factor and our DK scores. It also specifies periods to be 1, meaning we only care about the performance of the player on one day. The data will be categorized into 5 'quantiles' to remove some of the noise from the relationship.\nNow, let's plot the relationship between each of our points per game factor quantiles and see how strongly they predict DK points on a given day.",
"# Format rolling fantasy points per game data for use in Alphalens.\nfactor_data = get_clean_factor_and_forward_scores(\n factor=trailing_20_dk_score,\n scores=scores.loc['2017-11-09':],\n periods=[1],\n)\n\nmean_by_q_daily, std_err_by_q_daily = al.performance.mean_return_by_quantile(factor_data,\n by_date=True,\n demeaned=False)\n\nal.plotting.plot_quantile_returns_violin(mean_by_q_daily/10000);",
"It looks like our fantasy points per game factor is a strong predictor. We see a narrow and distinct distribution around each quantile's mean DK points (y axis).\nLet's look at another visualization of the points per game factor and plot out the daily mean of each quantile.",
"quant_spread, std_err_spread = al.performance.compute_mean_returns_spread(mean_by_q_daily,\n upper_quant=5,\n lower_quant=1,\n std_err=std_err_by_q_daily)\n\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(14,8))\nquantiles = mean_by_q_daily.index.get_level_values(0)\ncolors = cm.rainbow(np.linspace(0, 1, len(quantiles.unique())))\nfor quantile in quantiles.unique():\n x = mean_by_q_daily[quantiles == quantile].index.get_level_values(1)\n y = mean_by_q_daily[quantiles == quantile]\n plt.plot(x, y, color=colors[quantile-1])\n \nax = plt.gca()\nax.legend(quantiles.unique())\nax.set_ylabel('DK Points')\nax.set_xlabel('Date')\n\nplt.title('Daily Mean DK Points Per Quantile (Points Per Game Factor)')",
"Optimize (Portfolio Optimization)\nNow that we have identified some strong predictors of fantasy points, let's use the data to build an optimal portfolio each day. Specifically, we will pick a team of players (construct a portfolio) based on their mean points per game over the last 20 days.\nOn Quantopian, we can use the Optimize API to help us calculate an optimal 'portfolio' each day. In finance, this means picking the stocks that we should hold each day based on an objective function and a set of constraints. In daily fantasy sports, the problem is the same: we want to pick a lineup based on an objective function (maximize DK points) subject to a series of constraints (based on the rules of the game).\nGenerally speaking, constraints are the easy part. The rules are set before every game and we know that our lineup has to follow these rules. Specifically, the NBA game at DraftKings has the following rules:\n- The lineup must be composed of exactly 8 players.\n- The sum of the fictional player salaries in a lineup must be less than 50,000.\n- A lineup must include players from at least two NBA games.\n- A lineup must include at least one player classified in each of the following positions:\n - PG\n - SG\n - PF\n - SF\n - C\n - G (PG or SG, in addition to the other PG and SG slots)\n - F (PF or SF, in addition to the other PF and SF slots)\n - U (the last spot can be occupied by a player of any position).\nThe challenging part is creating an objective function. We know that we want to maximize the number of DK points that we get on a given day, but without time travel, it's difficult to know exactly what the function looks like. In practice, this means we need to define an 'expected value' objective that represents our expected performance of each player. Earlier, we saw that our trailing points per game factor was a good predictor of DK points, so let's use that as our objective function.\nAs an optimization problem, this means that we want to pick a team of players subject to the DK rules (salary cap, positions, etc.) that maximizes the total trailing points per game factor.\nLet's define our problem using the Optimize API.",
"# Import the Optimize namespace.\nimport quantopian.optimize as opt",
"Let's start by assembling and formatting the player salaries and positions so that we can use them as constraints in our optimization problem.",
"# Player costs.\nplayer_costs = pd.DataFrame(df['dk_salary']).dropna().sort_index(level=['game_datetime', 'player_id'])\n\n# Player positions.\nplayer_positions = df['dk_position_codes'].dropna().apply((lambda x: [int(y) for y in x[1:-1].split(',')]))",
"Then, let's restructure our (trailing_20_dk_score) so that we can use it as an objective function in Optimize. We'll define it to be parametric on the date so that we can play around with the date later on.",
"# Define the game date that we want to test. Change this and re-run \n# cells below to build a lineup for another date.\nTEST_DATE = '2017-12-01'\n\n# Format our expected value factor (trailing_20_dk_score) for use in Optimize.\nexpected_value = trailing_20_dk_score[player_costs.index].dropna()\nexpected_value = expected_value[expected_value.index.get_level_values(0) == TEST_DATE]",
"Objective\nNext, let's define our objective function using optimize.MaximizeAlpha, which will try to maximize the value of our lineup based on our expected_value factor.",
"# Objective function to be fed into Optimize later on.\nobj = opt.MaximizeAlpha(expected_value)",
"Constraints\nMany of the built-in constraints are centered around finance problems, so we will have to make some simplifying assumptions in order to use them to build our lineup. Later in the notebook, we will build our own optimizer to build a lineup that strictly follows the DK rules.\nLet's start by defining our salary cap constraint:",
"# Salary cap constraint. The total cost of our lineup has to be less than 50,000.\n# We will define this as a FactorExposure constraint.\ncost_limit = opt.FactorExposure(\n player_costs, \n min_exposures={'dk_salary': 0}, \n max_exposures={'dk_salary': 50000}\n)",
"Next, we'll define a position constraint. To simplify things, we'll use more generic positions, and limit ourselves to a maximum of 4 forwards (F), 4 guards (G), and 2 centers (C), with a minimum of 3 F, 3 G, and 1 C.",
"# Map from each player to its position.\nlabels = df['position'].apply(lambda x: x[0])\n\n# # Maps from each position to its min/max exposure.\nmin_exposures = {'F': 3, 'G': 3, 'C': 1}\nmax_exposures = {'F': 4, 'G': 4, 'C': 2}\n\nplayer_position_constraint = opt.NetGroupExposure(labels, min_exposures, max_exposures)",
"At this point, it's worth noting that Optimize operates in weight space, meaning it chooses a percentage (not bounded by a total of 100%, can also be positive or negative) of the overall lineup to allocate to each asset/player. In an attempt to get around that, the next constraint we define will specify that we can hold no more than 1 of each player, and no fewer than 0 of each player (you can't short sell a player... yet).",
"# This constraints tells the Optimizer than we can hold at most 1 of each player\ndiscretize_players = opt.PositionConcentration(\n pd.Series([]),\n pd.Series([]),\n default_min_weight=0,\n default_max_weight=1,\n)",
"Lastly, let's define a constraint for our total number of players.",
"max_players = opt.MaxGrossExposure(8)",
"Calculate Optimal Portfolio (Build Lineup)\nThe next step is to calculate our optimal portfolio for the day using our objective and constraints.",
"result = opt.calculate_optimal_portfolio(\n objective=obj,\n constraints=[\n discretize_players,\n player_position_constraint,\n max_players,\n cost_limit,\n ]\n)",
"And here are the resulting weights:",
"resulting_picks = result[(result.index.get_level_values(0) == TEST_DATE)]\nplayer_weights = resulting_picks[resulting_picks>0]\n\nlineup_info = df.loc[player_weights.index][['matchup', 'player', 'position', 'dk_salary']]\n\nlineup_info['weight'] = player_weights\n\nlineup_info",
"Something that immediately pops out is the non-binary weight assigned to Kemba Walker. Obviously, it's not possible to pick 0.55% of a player in DFS, so this lineup won't be eligible to enter. Unfortunately, it's not currently possible to binarize the outputs of Optimize on Quantopian (since it's not a helpful feature in quantitative finance).\nCVXPY\nTo get around the limits of Optimize, we can take a similar approach and use cvxpy, a Python library for solving convex optimization problems (and the library on top of which Optimize was built), to build our own objective and constraints. In this version, we will follow the exact rules of the DraftKings NBA game (8 players, \\$50k salary cap, players in at least two games, and one player in each of the following positions: ['PG', 'SG', 'PF', 'SF', 'C', 'G', 'F', 'U']. The following cells build constraints to enforce these rules.",
"# List of DK positions.\nDK_POSITION_LIST = ['PG', 'SG', 'PF', 'SF', 'C', 'G', 'F', 'U']\n\n# Get the average play time for all players over the last 20 games. We will use this to\n# filter out players with skewed stats due to playing very little.\ntrailing_20_mins = df['min'].unstack().shift(1).rolling(20, min_periods=5).mean().stack()\n\n# Players with non-null salary on TEST_DATE.\nhave_salary = player_costs[player_costs.index.get_level_values(0) == TEST_DATE].dropna().index\n\n# Values from our factor for TEST_DATE.\ntrailing_dk_score_today = trailing_20_dk_score[\n (trailing_20_dk_score.index.get_level_values(0) == TEST_DATE)\n]\n\n# Players with at least 5 minutes per game on average over the last 20 days.\ntrailing_mins_today = trailing_20_mins[(trailing_20_mins.index.get_level_values(0) == TEST_DATE)]\nhave_play_time = trailing_mins_today[trailing_mins_today >= 5].index\n\n# Eligible players for us to pick have a non-null salary and at least 5 minutes\n# played per game over the last 20 days.\neligible_players_today = have_salary.intersection(have_play_time)\n\n# The game ID in which each player is playing.\nplayer_games = df.loc[eligible_players_today].game_id\n\n# The set of all game IDs on TEST_DATE\ntodays_games = df.loc[eligible_players_today].game_id.unique().tolist()",
"Next, we will import cvxpy and define our objective and constraints.",
"import cvxpy as cvx\n\n# Player salaries and expected values.\nsalaries = np.squeeze(np.array(player_costs.loc[eligible_players_today]))\nvalues = np.array(expected_value[eligible_players_today])\n\n# The variable we are solving for. We define our output variable as a Bool\n# since we have to make a binary decision on each player (pick or don't pick).\nselection = cvx.Bool(len(salaries))\nselection.is_positive = new_is_positive\n\n# Our lineup's total salary must be less than 50,000.\nsalary_cap = 50000\ncost_constraint = salaries * selection <= salary_cap\n\n# Our lineup must be composed of exactly 8 players.\nplayer_constraint = np.ones(len(salaries)) * selection == 8\n\n# Our total expected value is the sum of the value of each player in\n# the lineup. We define our objective to maximize the total expected\n# value.\ntotal_expected_value = values * selection\nobjective = cvx.Maximize(total_expected_value)\n\n# Put our cost and player count constraints in a list.\nconstraints = [cost_constraint, player_constraint]\n\n# Define our position constraints. Positions are represented along an 8-element\n# array corresponding to the positions in DK_POSITION_LIST.\nposition_min = np.array([1, 1, 1, 1, 1, 3, 3, 8])\npos_limits = {}\ni = 0\nfor pos in DK_POSITION_LIST:\n pos_limits[pos] = np.array(player_positions[eligible_players_today].apply(lambda x: x[i]))\n constraints.append((pos_limits[pos] * selection) >= position_min[i])\n i += 1\n\n# Define our game constraints. We rephrase the rule as 'you cannot pick more than\n# 7 players from any one game'.\nfor gid in todays_games:\n game_limit = np.array(player_games == gid)\n constraints.append((game_limit * selection) <= 7)\n \n \n\n# We tell cvxpy that we want maximize our expected value, subject to all\n# of our constraints.\noptimization_problem = cvx.Problem(objective, constraints)\n\nprint \"Our total expected value from today's lineup is:\"\n\n# Solving the problem.\noptimization_problem.solve(solver=cvx.ECOS_BB)",
"Based on our player-by-player expected values and the constraints we supplied to cvxpy, our optimal lineup has a total expected value of 273.37.\nLet's take a look at who is in this lineup to make sure that we implemented the rules properly:",
"# Format output and get relevant player info for display.\ndk_team = pd.Series(np.squeeze(selection.value).tolist()[0])\nplayer_info = df.loc[player_costs.loc[eligible_players_today].iloc[dk_team[dk_team > 0.1].index.values].index][['matchup', 'player', 'dk_position', 'dk_salary', 'dk_score']]\n\nplayer_info\n\nprint \"Total lineup salary: %d\" % player_info['dk_salary'].sum()\nprint \"Total actual score: %d\" % player_info['dk_score'].sum()",
"Our roster appears to satisfy the rules!\nBacktest (a.k.a. run the above code over several consecutive days)",
"def backtest(factor, dates, filters, df, player_costs, player_positions):\n historical_results = {}\n \n daily_expected_value = factor[filters].dropna()\n \n for _date in dates:\n try:\n print _date\n daily_filter = filters[filters.get_level_values(0) == _date]\n expected_value_today = daily_expected_value[daily_expected_value.index.get_level_values(0) == _date]\n\n # The game ID in which each player is playing.\n player_games = df.loc[daily_filter].game_id\n\n # The set of all game IDs on the current date.\n todays_games = df.loc[daily_filter].game_id.unique().tolist()\n\n # Player salaries and expected values.\n salaries = np.squeeze(np.array(player_costs.loc[daily_filter]))\n values = np.array(expected_value_today[daily_filter])\n\n # The variable we are solving for. We define our output variable as a Bool\n # since we have to make a binary decision on each player (pick or don't pick).\n selection = cvx.Bool(len(salaries))\n selection.is_positive = new_is_positive\n\n # Our lineup's total salary must be less than 50,000.\n salary_cap = 50000\n cost_constraint = salaries * selection <= salary_cap\n\n # Our lineup must be composed of exactly 8 players.\n player_constraint = np.ones(len(salaries)) * selection == 8\n\n # Our total expected value is the sum of the value of each player in\n # the lineup. We define our objective to maximize the total expected\n # value.\n total_expected_value = values * selection\n objective = cvx.Maximize(total_expected_value)\n\n # Put our cost and player count constraints in a list.\n constraints = [cost_constraint, player_constraint]\n\n # Define our position constraints. Positions are represented along an 8-element\n # array corresponding to the positions in DK_POSITION_LIST.\n position_min = np.array([1, 1, 1, 1, 1, 3, 3, 8])\n pos_limits = {}\n i = 0\n for pos in DK_POSITION_LIST:\n pos_limits[pos] = np.array(player_positions[daily_filter].apply(lambda x: x[i]))\n constraints.append((pos_limits[pos] * selection) >= position_min[i])\n i += 1\n\n # Define our game constraints. We rephrase the rule as 'you cannot pick more than\n # 7 players from any one game'.\n for gid in todays_games:\n game_limit = np.array(player_games == gid)\n constraints.append((game_limit * selection) <= 7)\n\n\n # We tell cvxpy that we want maximize our expected value, subject to all\n # of our constraints.\n knapsack_problem = cvx.Problem(objective, constraints)\n\n # Solving the problem.\n predicted_value = knapsack_problem.solve(solver=cvx.ECOS_BB)\n\n dk_team = pd.Series(np.squeeze(selection.value).tolist()[0])\n player_info = df.loc[player_costs.loc[filters].iloc[dk_team[dk_team > 0.1].index.values].index][['matchup', 'player', 'dk_position', 'dk_salary', 'dk_score']]\n historical_results[_date] = {\n 'actual_score': player_info.dk_score.sum(),\n 'expected_score': predicted_value,\n }\n except TypeError:\n pass\n except ValueError:\n pass\n \n return historical_results\n\ntest_dates = trailing_20_dk_score.index.get_level_values(0).unique().values[15:]\nhas_playtime = trailing_20_mins[trailing_20_mins >= 5]\n\nfilters = player_costs.index & has_playtime.index\nbacktest_result = backtest(\n trailing_20_dk_score,\n test_dates,\n filters,\n df,\n player_costs,\n player_positions,\n)\n\ndaily_results = pd.DataFrame.from_dict(backtest_result, orient='index')\nprint daily_results.mean()\ndaily_results.plot();",
"Conclusion\nIn this notebook, we looked at daily fantasy sports using quantitative finance tools. We started out by pulling in and looking at NBA data. We created and tested the ability of certain statistics to predict DK points using Alphalens. We attempted to build an optimal lineup using Optimize. And lastly, we used cvxpy to build a proper solution to the daily lineup construction problem.\nFuture Work\nIn both quantitative finance and daily fantasy sports, the hardest problem is forecasting results of an asset or player in the future. Now that we have a notebook built to help us test factors and build lineups, the next step is to iterate on ideas of predictive signals. In this example, we only looked at two relatively simple metrics (minutes played and points scored). It would be interesting to try new factors to see if they are predictive or provide a leg up on the competitiong.\nAnother area that we didn't explore in this notebook is risk. In most team sports, the measured statistics of one player tend to be correlated (positively or negatively) with other players in the game. For example, players might get fantasy points for getting an assist when a teammate scores. These correlations can be defined as risk factors. Risk factors can be exploited or hedged depending on the desired risk profile of the lineup. The objective function or constraints in the optimization problem could be altered to either try to exploit or hedge risk.\nHelper Functions - Cells Below Need to Be Run at Notebook Start",
"import numpy as np\nfrom pandas.tseries.offsets import CustomBusinessDay\n\nfrom scipy.stats import mode\n\ndef compute_forward_scores(factor,\n scores,\n periods=(1, 5, 10),\n filter_zscore=None):\n \"\"\"\n Finds the N period forward returns (as percent change) for each asset\n provided.\n Parameters\n ----------\n factor : pd.Series - MultiIndex\n A MultiIndex Series indexed by timestamp (level 0) and asset\n (level 1), containing the values for a single alpha factor.\n - See full explanation in utils.get_clean_factor_and_forward_returns\n scores : pd.DataFrame\n Pricing data to use in forward price calculation.\n Assets as columns, dates as index. Pricing data must\n span the factor analysis time period plus an additional buffer window\n that is greater than the maximum number of expected periods\n in the forward returns calculations.\n periods : sequence[int]\n periods to compute forward returns on.\n filter_zscore : int or float, optional\n Sets forward returns greater than X standard deviations\n from the the mean to nan. Set it to 'None' to avoid filtering.\n Caution: this outlier filtering incorporates lookahead bias.\n Returns\n -------\n forward_returns : pd.DataFrame - MultiIndex\n A MultiIndex DataFrame indexed by timestamp (level 0) and asset\n (level 1), containing the forward returns for assets.\n Forward returns column names follow the format accepted by\n pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).\n 'date' index freq property (forward_returns.index.levels[0].freq)\n will be set to a trading calendar (pandas DateOffset) inferred\n from the input data (see infer_trading_calendar for more details).\n \"\"\"\n\n factor_dateindex = factor.index.levels[0]\n if factor_dateindex.tz != scores.index.tz:\n raise NonMatchingTimezoneError(\"The timezone of 'factor' is not the \"\n \"same as the timezone of 'scores'. See \"\n \"the pandas methods tz_localize and \"\n \"tz_convert.\")\n\n freq = infer_trading_calendar(factor_dateindex, scores.index)\n\n factor_dateindex = factor_dateindex.intersection(scores.index)\n\n if len(factor_dateindex) == 0:\n raise ValueError(\"Factor and scores indices don't match: make sure \"\n \"they have the same convention in terms of datetimes \"\n \"and symbol-names\")\n\n forward_returns = pd.DataFrame(index=pd.MultiIndex.from_product(\n [factor_dateindex, scores.columns], names=['date', 'asset']))\n\n forward_returns.index.levels[0].freq = freq\n\n for period in sorted(periods):\n #\n # build forward returns\n #\n fwdret = (scores\n #.shift(-period)\n .reindex(factor_dateindex)\n )\n\n if filter_zscore is not None:\n mask = abs(fwdret - fwdret.mean()) > (filter_zscore * fwdret.std())\n fwdret[mask] = np.nan\n\n # Find the period length, which will be the column name\n # Becase the calendar inferred from factor and prices doesn't take\n # into consideration holidays yet, there could be some non-trading days\n # in between the trades so we'll test several entries to find out the\n # correct period length\n #\n entries_to_test = min(10, len(fwdret.index), len(scores.index)-period)\n days_diffs = []\n for i in range(entries_to_test):\n p_idx = scores.index.get_loc(fwdret.index[i])\n start = scores.index[p_idx]\n end = scores.index[p_idx+period]\n period_len = diff_custom_calendar_timedeltas(start, end, freq)\n days_diffs.append(period_len.components.days)\n\n delta_days = period_len.components.days - mode(days_diffs).mode[0]\n period_len -= pd.Timedelta(days=delta_days)\n\n # Finally use period_len as column name\n column_name = timedelta_to_string(period_len)\n forward_returns[column_name] = fwdret.stack()\n\n forward_returns.index = forward_returns.index.rename(['date', 'asset'])\n\n return forward_returns\n\ndef get_clean_factor_and_forward_scores(factor,\n scores,\n groupby=None,\n binning_by_group=False,\n quantiles=5,\n bins=None,\n periods=(1, 5, 10),\n filter_zscore=20,\n groupby_labels=None,\n max_loss=10.0):\n \n forward_scores = compute_forward_scores(factor, scores, periods,\n filter_zscore)\n\n factor_data = get_clean_factor(factor, forward_scores, groupby=groupby,\n groupby_labels=groupby_labels,\n quantiles=quantiles, bins=bins,\n binning_by_group=binning_by_group,\n max_loss=max_loss)\n\n return factor_data\n\n\ndef infer_trading_calendar(factor_idx, prices_idx):\n \"\"\"\n Infer the trading calendar from factor and price information.\n Parameters\n ----------\n factor_idx : pd.DatetimeIndex\n The factor datetimes for which we are computing the forward returns\n prices_idx : pd.DatetimeIndex\n The prices datetimes associated withthe factor data\n Returns\n -------\n calendar : pd.DateOffset\n \"\"\"\n full_idx = factor_idx.union(prices_idx)\n\n # drop days of the week that are not used\n days_to_keep = []\n days_of_the_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n for day, day_str in enumerate(days_of_the_week):\n if (full_idx.dayofweek == day).any():\n days_to_keep.append(day_str)\n\n days_to_keep = ' '.join(days_to_keep)\n\n # we currently don't infer holidays, but CustomBusinessDay class supports\n # custom holidays. So holidays could be inferred too eventually\n return CustomBusinessDay(weekmask=days_to_keep)\n\ndef diff_custom_calendar_timedeltas(start, end, freq):\n \"\"\"\n Compute the difference between two pd.Timedelta taking into consideration\n custom frequency, which is used to deal with custom calendars, such as a\n trading calendar\n Parameters\n ----------\n start : pd.Timestamp\n end : pd.Timestamp\n freq : DateOffset, optional\n Returns\n -------\n pd.Timedelta\n end - start\n \"\"\"\n actual_days = pd.date_range(start, end, freq=freq).shape[0] - 1\n timediff = end - start\n delta_days = timediff.components.days - actual_days\n return timediff - pd.Timedelta(days=delta_days)\n\ndef get_clean_factor(factor,\n forward_returns,\n groupby=None,\n binning_by_group=False,\n quantiles=5,\n bins=None,\n groupby_labels=None,\n max_loss=0.35):\n \"\"\"\n Formats the factor data, forward return data, and group mappings into a\n DataFrame that contains aligned MultiIndex indices of timestamp and asset.\n The returned data will be formatted to be suitable for Alphalens functions.\n It is safe to skip a call to this function and still make use of Alphalens\n functionalities as long as the factor data conforms to the format returned\n from get_clean_factor_and_forward_returns and documented here\n Parameters\n ----------\n factor : pd.Series - MultiIndex\n A MultiIndex Series indexed by timestamp (level 0) and asset\n (level 1), containing the values for a single alpha factor.\n ::\n -----------------------------------\n date | asset |\n -----------------------------------\n | AAPL | 0.5\n -----------------------\n | BA | -1.1\n -----------------------\n 2014-01-01 | CMG | 1.7\n -----------------------\n | DAL | -0.1\n -----------------------\n | LULU | 2.7\n -----------------------\n forward_returns : pd.DataFrame - MultiIndex\n A MultiIndex DataFrame indexed by timestamp (level 0) and asset\n (level 1), containing the forward returns for assets.\n Forward returns column names must follow the format accepted by\n pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).\n 'date' index freq property must be set to a trading calendar\n (pandas DateOffset), see infer_trading_calendar for more details.\n This information is currently used only in cumulative returns\n computation\n ::\n ---------------------------------------\n | | 1D | 5D | 10D\n ---------------------------------------\n date | asset | | |\n ---------------------------------------\n | AAPL | 0.09|-0.01|-0.079\n ----------------------------\n | BA | 0.02| 0.06| 0.020\n ----------------------------\n 2014-01-01 | CMG | 0.03| 0.09| 0.036\n ----------------------------\n | DAL |-0.02|-0.06|-0.029\n ----------------------------\n | LULU |-0.03| 0.05|-0.009\n ----------------------------\n groupby : pd.Series - MultiIndex or dict\n Either A MultiIndex Series indexed by date and asset,\n containing the period wise group codes for each asset, or\n a dict of asset to group mappings. If a dict is passed,\n it is assumed that group mappings are unchanged for the\n entire time period of the passed factor data.\n binning_by_group : bool\n If True, compute quantile buckets separately for each group.\n This is useful when the factor values range vary considerably\n across gorups so that it is wise to make the binning group relative.\n You should probably enable this if the factor is intended\n to be analyzed for a group neutral portfolio\n quantiles : int or sequence[float]\n Number of equal-sized quantile buckets to use in factor bucketing.\n Alternately sequence of quantiles, allowing non-equal-sized buckets\n e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]\n Only one of 'quantiles' or 'bins' can be not-None\n bins : int or sequence[float]\n Number of equal-width (valuewise) bins to use in factor bucketing.\n Alternately sequence of bin edges allowing for non-uniform bin width\n e.g. [-4, -2, -0.5, 0, 10]\n Chooses the buckets to be evenly spaced according to the values\n themselves. Useful when the factor contains discrete values.\n Only one of 'quantiles' or 'bins' can be not-None\n groupby_labels : dict\n A dictionary keyed by group code with values corresponding\n to the display name for each group.\n max_loss : float, optional\n Maximum percentage (0.00 to 1.00) of factor data dropping allowed,\n computed comparing the number of items in the input factor index and\n the number of items in the output DataFrame index.\n Factor data can be partially dropped due to being flawed itself\n (e.g. NaNs), not having provided enough price data to compute\n forward returns for all factor values, or because it is not possible\n to perform binning.\n Set max_loss=0 to avoid Exceptions suppression.\n Returns\n -------\n merged_data : pd.DataFrame - MultiIndex\n A MultiIndex Series indexed by date (level 0) and asset (level 1),\n containing the values for a single alpha factor, forward returns for\n each period, the factor quantile/bin that factor value belongs to, and\n (optionally) the group the asset belongs to.\n - forward returns column names follow the format accepted by\n pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)\n - 'date' index freq property (merged_data.index.levels[0].freq) is the\n same as that of the input forward returns data. This is currently\n used only in cumulative returns computation\n ::\n -------------------------------------------------------------------\n | | 1D | 5D | 10D |factor|group|factor_quantile\n -------------------------------------------------------------------\n date | asset | | | | | |\n -------------------------------------------------------------------\n | AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3\n --------------------------------------------------------\n | BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5\n --------------------------------------------------------\n 2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1\n --------------------------------------------------------\n | DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5\n --------------------------------------------------------\n | LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2\n --------------------------------------------------------\n \"\"\"\n\n initial_amount = float(len(factor.index))\n\n factor = factor.copy()\n factor.index = factor.index.rename(['date', 'asset'])\n\n merged_data = forward_returns.copy()\n merged_data['factor'] = factor\n\n if groupby is not None:\n if isinstance(groupby, dict):\n diff = set(factor.index.get_level_values(\n 'asset')) - set(groupby.keys())\n if len(diff) > 0:\n raise KeyError(\n \"Assets {} not in group mapping\".format(\n list(diff)))\n\n ss = pd.Series(groupby)\n groupby = pd.Series(index=factor.index,\n data=ss[factor.index.get_level_values(\n 'asset')].values)\n\n if groupby_labels is not None:\n diff = set(groupby.values) - set(groupby_labels.keys())\n if len(diff) > 0:\n raise KeyError(\n \"groups {} not in passed group names\".format(\n list(diff)))\n\n sn = pd.Series(groupby_labels)\n groupby = pd.Series(index=groupby.index,\n data=sn[groupby.values].values)\n\n merged_data['group'] = groupby.astype('category')\n\n merged_data = merged_data.dropna()\n\n fwdret_amount = float(len(merged_data.index))\n\n no_raise = False if max_loss == 0 else True\n merged_data['factor_quantile'] = quantize_factor(merged_data,\n quantiles,\n bins,\n binning_by_group,\n no_raise)\n\n merged_data = merged_data.dropna()\n\n binning_amount = float(len(merged_data.index))\n\n tot_loss = (initial_amount - binning_amount) / initial_amount\n fwdret_loss = (initial_amount - fwdret_amount) / initial_amount\n bin_loss = tot_loss - fwdret_loss\n\n# print(\"Dropped %.1f%% entries from factor data: %.1f%% in forward \"\n# \"returns computation and %.1f%% in binning phase \"\n# \"(set max_loss=0 to see potentially suppressed Exceptions).\" %\n# (tot_loss * 100, fwdret_loss * 100, bin_loss * 100))\n\n if tot_loss > max_loss:\n message = (\"max_loss (%.1f%%) exceeded %.1f%%, consider increasing it.\"\n % (max_loss * 100, tot_loss * 100))\n raise MaxLossExceededError(message)\n else:\n# print(\"max_loss is %.1f%%, not exceeded: OK!\" % (max_loss * 100))\n pass\n\n return merged_data\n\ndef timedelta_to_string(timedelta):\n \"\"\"\n Utility that converts a pandas.Timedelta to a string representation\n compatible with pandas.Timedelta constructor format\n Parameters\n ----------\n timedelta: pd.Timedelta\n Returns\n -------\n string\n string representation of 'timedelta'\n \"\"\"\n c = timedelta.components\n format = ''\n if c.days != 0:\n format += '%dD' % c.days\n if c.hours > 0:\n format += '%dh' % c.hours\n if c.minutes > 0:\n format += '%dm' % c.minutes\n if c.seconds > 0:\n format += '%ds' % c.seconds\n if c.milliseconds > 0:\n format += '%dms' % c.milliseconds\n if c.microseconds > 0:\n format += '%dus' % c.microseconds\n if c.nanoseconds > 0:\n format += '%dns' % c.nanoseconds\n return format\n\n\ndef add_custom_calendar_timedelta(inputs, timedelta, freq):\n \"\"\"\n Add timedelta to 'input' taking into consideration custom frequency, which\n is used to deal with custom calendars, such as a trading calendar\n Parameters\n ----------\n input : pd.DatetimeIndex or pd.Timestamp\n timedelta : pd.Timedelta\n freq : DateOffset, optional\n Returns\n -------\n pd.DatetimeIndex or pd.Timestamp\n input + timedelta\n \"\"\"\n days = timedelta.components.days\n offset = timedelta - pd.Timedelta(days=days)\n return inputs + freq * days + offset\n\ndef non_unique_bin_edges_error(func):\n \"\"\"\n Give user a more informative error in case it is not possible\n to properly calculate quantiles on the input dataframe (factor)\n \"\"\"\n message = \"\"\"\n An error occurred while computing bins/quantiles on the input provided.\n This usually happens when the input contains too many identical\n values and they span more than one quantile. The quantiles are choosen\n to have the same number of records each, but the same value cannot span\n multiple quantiles. Possible workarounds are:\n 1 - Decrease the number of quantiles\n 2 - Specify a custom quantiles range, e.g. [0, .50, .75, 1.] to get unequal\n number of records per quantile\n 3 - Use 'bins' option instead of 'quantiles', 'bins' chooses the\n buckets to be evenly spaced according to the values themselves, while\n 'quantiles' forces the buckets to have the same number of records.\n 4 - for factors with discrete values use the 'bins' option with custom\n ranges and create a range for each discrete value\n Please see utils.get_clean_factor_and_forward_returns documentation for\n full documentation of 'bins' and 'quantiles' options.\n\"\"\"\n\n def dec(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except ValueError as e:\n if 'Bin edges must be unique' in str(e):\n rethrow(e, message)\n raise\n return dec\n\n@non_unique_bin_edges_error\ndef quantize_factor(factor_data,\n quantiles=5,\n bins=None,\n by_group=False,\n no_raise=False):\n \"\"\"\n Computes period wise factor quantiles.\n Parameters\n ----------\n factor_data : pd.DataFrame - MultiIndex\n A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),\n containing the values for a single alpha factor, forward returns for\n each period, the factor quantile/bin that factor value belongs to, and\n (optionally) the group the asset belongs to.\n - See full explanation in utils.get_clean_factor_and_forward_returns\n quantiles : int or sequence[float]\n Number of equal-sized quantile buckets to use in factor bucketing.\n Alternately sequence of quantiles, allowing non-equal-sized buckets\n e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]\n Only one of 'quantiles' or 'bins' can be not-None\n bins : int or sequence[float]\n Number of equal-width (valuewise) bins to use in factor bucketing.\n Alternately sequence of bin edges allowing for non-uniform bin width\n e.g. [-4, -2, -0.5, 0, 10]\n Only one of 'quantiles' or 'bins' can be not-None\n by_group : bool\n If True, compute quantile buckets separately for each group.\n no_raise: bool, optional\n If True, no exceptions are thrown and the values for which the\n exception would have been thrown are set to np.NaN\n Returns\n -------\n factor_quantile : pd.Series\n Factor quantiles indexed by date and asset.\n \"\"\"\n if not ((quantiles is not None and bins is None) or\n (quantiles is None and bins is not None)):\n raise ValueError('Either quantiles or bins should be provided')\n\n def quantile_calc(x, _quantiles, _bins, _no_raise):\n try:\n if _quantiles is not None and _bins is None:\n return pd.qcut(x, _quantiles, labels=False) + 1\n elif _bins is not None and _quantiles is None:\n return pd.cut(x, _bins, labels=False) + 1\n except Exception as e:\n if _no_raise:\n return pd.Series(index=x.index)\n raise e\n\n grouper = [factor_data.index.get_level_values('date')]\n if by_group:\n grouper.append('group')\n\n factor_quantile = factor_data.groupby(grouper)['factor'] \\\n .apply(quantile_calc, quantiles, bins, no_raise)\n factor_quantile.name = 'factor_quantile'\n\n return factor_quantile.dropna()\n\ndef new_is_positive():\n return False"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
calroc/joypy
|
docs/Hylo-, Ana-, Cata-, and Para-morphisms - Recursion Combinators.ipynb
|
gpl-3.0
|
[
"Cf. \"Bananas, Lenses, & Barbed Wire\"\nHylomorphism\nA hylomorphism H :: A -> B converts a value of type A into a value of type B by means of:\n\nA generator G :: A -> (A, B)\nA combiner F :: (B, B) -> B\nA predicate P :: A -> Bool to detect the base case\nA base case value c :: B\nRecursive calls (zero or more); it has a \"call stack in the form of a cons list\".\n\nIt may be helpful to see this function implemented in imperative Python code.",
"def hylomorphism(c, F, P, G):\n '''Return a hylomorphism function H.'''\n\n def H(a):\n if P(a):\n result = c\n else:\n b, aa = G(a)\n result = F(b, H(aa))\n return result\n\n return H",
"Finding Triangular Numbers\nAs a concrete example let's use a function that, given a positive integer, returns the sum of all positive integers less than that one. (In this case the types A and B are both int.)\nWith range() and sum()",
"r = range(10)\nr\n\nsum(r)\n\nrange_sum = lambda n: sum(range(n))\nrange_sum(10)",
"As a hylomorphism",
"G = lambda n: (n - 1, n - 1)\nF = lambda a, b: a + b\nP = lambda n: n <= 1\n\nH = hylomorphism(0, F, P, G)\n\nH(10)",
"If you were to run the above code in a debugger and check out the call stack you would find that the variable b in each call to H() is storing the intermediate values as H() recurses. This is what was meant by \"call stack in the form of a cons list\".\nJoy Preamble",
"from notebook_preamble import D, DefinitionWrapper, J, V, define",
"Hylomorphism in Joy\nWe can define a combinator hylomorphism that will make a hylomorphism combinator H from constituent parts.\nH == c [F] [P] [G] hylomorphism\n\nThe function H is recursive, so we start with ifte and set the else-part to\nsome function J that will contain a quoted copy of H. (The then-part just\ndiscards the leftover a and replaces it with the base case value c.)\nH == [P] [pop c] [J] ifte\n\nThe else-part J gets just the argument a on the stack.\na J\na G The first thing to do is use the generator G\naa b which produces b and a new aa\naa b [H] dip we recur with H on the new aa\naa H b F and run F on the result.\n\nThis gives us a definition for J.\nJ == G [H] dip F\n\nPlug it in and convert to genrec.\nH == [P] [pop c] [G [H] dip F] ifte\nH == [P] [pop c] [G] [dip F] genrec\n\nThis is the form of a hylomorphism in Joy, which nicely illustrates that\nit is a simple specialization of the general recursion combinator.\nH == [P] [pop c] [G] [dip F] genrec\n\nDerivation of hylomorphism\nNow we just need to derive a definition that builds the genrec arguments\nout of the pieces given to the hylomorphism combinator.\nH == [P] [pop c] [G] [dip F] genrec\n [P] [c] [pop] swoncat [G] [F] [dip] swoncat genrec\n [P] c unit [pop] swoncat [G] [F] [dip] swoncat genrec\n [P] c [G] [F] [unit [pop] swoncat] dipd [dip] swoncat genrec\n\nWorking in reverse:\n- Use swoncat twice to decouple [c] and [F].\n- Use unit to dequote c.\n- Use dipd to untangle [unit [pop] swoncat] from the givens.\nAt this point all of the arguments (givens) to the hylomorphism are to the left so we have\na definition for hylomorphism:\nhylomorphism == [unit [pop] swoncat] dipd [dip] swoncat genrec\n\nThe order of parameters is different than the one we started with but\nthat hardly matters, you can rearrange them or just supply them in the\nexpected order.\n[P] c [G] [F] hylomorphism == H",
"define('hylomorphism == [unit [pop] swoncat] dipd [dip] swoncat genrec')",
"Demonstrate summing a range of integers from 0 to n-1.\n\n[P] is [0 <=]\nc is 0\n[G] is [1 - dup]\n[F] is [+]\n\nSo to sum the positive integers less than five we can do this.",
"V('5 [0 <=] 0 [1 - dup] [+] hylomorphism')",
"Anamorphism\nAn anamorphism can be defined as a hylomorphism that uses [] for c and\nswons for F.\n[P] [G] anamorphism == [P] [] [G] [swons] hylomorphism == A\n\nThis allows us to define an anamorphism combinator in terms of\nthe hylomorphism combinator.\n[] swap [swons] hylomorphism == anamorphism\n\nPartial evaluation gives us a \"pre-cooked\" form.\n[P] [G] . anamorphism\n[P] [G] . [] swap [swons] hylomorphism\n[P] [G] [] . swap [swons] hylomorphism\n[P] [] [G] . [swons] hylomorphism\n[P] [] [G] [swons] . hylomorphism\n[P] [] [G] [swons] . [unit [pop] swoncat] dipd [dip] swoncat genrec\n[P] [] [G] [swons] [unit [pop] swoncat] . dipd [dip] swoncat genrec\n[P] [] . unit [pop] swoncat [G] [swons] [dip] swoncat genrec\n[P] [[]] [pop] . swoncat [G] [swons] [dip] swoncat genrec\n[P] [pop []] [G] [swons] [dip] . swoncat genrec\n\n[P] [pop []] [G] [dip swons] genrec\n\n(We could also have just substituted for c and F in the definition of H.)\nH == [P] [pop c ] [G] [dip F ] genrec\nA == [P] [pop []] [G] [dip swons] genrec\n\nThe partial evaluation is overkill in this case but it serves as a\nreminder that this sort of program specialization can, in many cases, be\ncarried out automatically.)\nUntangle [G] from [pop []] using swap.\n[P] [G] [pop []] swap [dip swons] genrec\n\nAll of the arguments to anamorphism are to the left, so we have a definition for it.\nanamorphism == [pop []] swap [dip swons] genrec\n\nAn example of an anamorphism is the range function.\nrange == [0 <=] [1 - dup] anamorphism\n\nCatamorphism\nA catamorphism can be defined as a hylomorphism that uses [uncons swap] for [G]\nand [[] =] for the predicate [P].\nc [F] catamorphism == [[] =] c [uncons swap] [F] hylomorphism == C\n\nThis allows us to define a catamorphism combinator in terms of\nthe hylomorphism combinator.\n[[] =] roll> [uncons swap] swap hylomorphism == catamorphism\n\nPartial evaluation doesn't help much.\nc [F] . catamorphism\nc [F] . [[] =] roll> [uncons swap] swap hylomorphism\nc [F] [[] =] . roll> [uncons swap] swap hylomorphism\n[[] =] c [F] [uncons swap] . swap hylomorphism\n[[] =] c [uncons swap] [F] . hylomorphism\n[[] =] c [uncons swap] [F] [unit [pop] swoncat] . dipd [dip] swoncat genrec\n[[] =] c . unit [pop] swoncat [uncons swap] [F] [dip] swoncat genrec\n[[] =] [c] [pop] . swoncat [uncons swap] [F] [dip] swoncat genrec\n[[] =] [pop c] [uncons swap] [F] [dip] . swoncat genrec\n[[] =] [pop c] [uncons swap] [dip F] genrec\n\nBecause the arguments to catamorphism have to be prepared (unlike the arguments\nto anamorphism, which only need to be rearranged slightly) there isn't much point\nto \"pre-cooking\" the definition.\ncatamorphism == [[] =] roll> [uncons swap] swap hylomorphism\n\nAn example of a catamorphism is the sum function.\nsum == 0 [+] catamorphism\n\n\"Fusion Law\" for catas (UNFINISHED!!!)\nI'm not sure exactly how to translate the \"Fusion Law\" for catamorphisms into Joy.\nI know that a map composed with a cata can be expressed as a new cata:\n[F] map b [B] cata == b [F B] cata\n\nBut this isn't the one described in \"Bananas...\". That's more like:\nA cata composed with some function can be expressed as some other cata:\nb [B] catamorphism F == c [C] catamorphism\n\nGiven:\nb F == c\n\n...\n\nB F == [F] dip C\n\n...\n\nb[B]cata F == c[C]cata\n\nF(B(head, tail)) == C(head, F(tail))\n\n1 [2 3] B F 1 [2 3] F C\n\n\nb F == c\nB F == F C\n\nb [B] catamorphism F == c [C] catamorphism\nb [B] catamorphism F == b F [C] catamorphism\n\n...\n\nOr maybe,\n[F] map b [B] cata == c [C] cata ???\n\n[F] map b [B] cata == b [F B] cata I think this is generally true, unless F consumes stack items\n instead of just transforming TOS. Of course, there's always [F] unary.\nb [F] unary [[F] unary B] cata\n\n[10 *] map 0 swap [+] step == 0 swap [10 * +] step\n\nFor example:\nF == 10 *\nb == 0\nB == +\nc == 0\nC == F +\n\nb F == c\n0 10 * == 0\n\nB F == [F] dip C\n+ 10 * == [10 *] dip F +\n+ 10 * == [10 *] dip 10 * +\n\nn m + 10 * == 10(n+m)\n\nn m [10 *] dip 10 * +\nn 10 * m 10 * +\n10n m 10 * +\n10n 10m +\n10n+10m\n\n10n+10m = 10(n+m)\n\nErgo:\n0 [+] catamorphism 10 * == 0 [10 * +] catamorphism\n\nThe step combinator will usually be better to use than catamorphism.\nsum == 0 swap [+] step\nsum == 0 [+] catamorphism\n\nanamorphism catamorphism == hylomorphism\nHere is (part of) the payoff.\nAn anamorphism followed by (composed with) a\ncatamorphism is a hylomorphism, with the advantage that the hylomorphism \ndoes not create the intermediate list structure. The values are stored in\neither the call stack, for those implementations that use one, or in the pending\nexpression (\"continuation\") for the Joypy interpreter. They still have to \nbe somewhere, converting from an anamorphism and catamorphism to a hylomorphism\njust prevents using additional storage and doing additional processing.\n range == [0 <=] [1 - dup] anamorphism\n sum == 0 [+] catamorphism\n\nrange sum == [0 <=] [1 - dup] anamorphism 0 [+] catamorphism\n == [0 <=] 0 [1 - dup] [+] hylomorphism\n\nWe can let the hylomorphism combinator build range_sum for us or just\nsubstitute ourselves.\n H == [P] [pop c] [G] [dip F] genrec\nrange_sum == [0 <=] [pop 0] [1 - dup] [dip +] genrec",
"defs = '''\nanamorphism == [pop []] swap [dip swons] genrec\nhylomorphism == [unit [pop] swoncat] dipd [dip] swoncat genrec\ncatamorphism == [[] =] roll> [uncons swap] swap hylomorphism\nrange == [0 <=] [1 - dup] anamorphism\nsum == 0 [+] catamorphism\nrange_sum == [0 <=] 0 [1 - dup] [+] hylomorphism\n'''\n\nDefinitionWrapper.add_definitions(defs, D)\n\nJ('10 range')\n\nJ('[9 8 7 6 5 4 3 2 1 0] sum')\n\nV('10 range sum')\n\nV('10 range_sum')",
"Factorial Function and Paramorphisms\nA paramorphism P :: B -> A is a recursion combinator that uses dup on intermediate values.\nn swap [P] [pop] [[F] dupdip G] primrec\n\nWith\n- n :: A is the \"identity\" for F (like 1 for multiplication, 0 for addition)\n- F :: (A, B) -> A\n- G :: B -> B generates the next B value.\n- and lastly P :: B -> Bool detects the end of the series.\nFor Factorial function (types A and B are both integer):\nn == 1\nF == *\nG == --\nP == 1 <=",
"define('factorial == 1 swap [1 <=] [pop] [[*] dupdip --] primrec')",
"Try it with input 3 (omitting evaluation of predicate):\n3 1 swap [1 <=] [pop] [[*] dupdip --] primrec\n1 3 [1 <=] [pop] [[*] dupdip --] primrec\n\n1 3 [*] dupdip --\n1 3 * 3 --\n3 3 --\n3 2\n\n3 2 [*] dupdip --\n3 2 * 2 --\n6 2 --\n6 1\n\n6 1 [1 <=] [pop] [[*] dupdip --] primrec\n\n6 1 pop\n6",
"J('3 factorial')",
"Derive paramorphism from the form above.\nn swap [P] [pop] [[F] dupdip G] primrec\n\nn swap [P] [pop] [[F] dupdip G] primrec\nn [P] [swap] dip [pop] [[F] dupdip G] primrec\nn [P] [[F] dupdip G] [[swap] dip [pop]] dip primrec\nn [P] [F] [dupdip G] cons [[swap] dip [pop]] dip primrec\nn [P] [F] [G] [dupdip] swoncat cons [[swap] dip [pop]] dip primrec\n\nparamorphism == [dupdip] swoncat cons [[swap] dip [pop]] dip primrec",
"define('paramorphism == [dupdip] swoncat cons [[swap] dip [pop]] dip primrec')\ndefine('factorial == 1 [1 <=] [*] [--] paramorphism')\n\nJ('3 factorial')",
"tails\nAn example of a paramorphism for lists given in the \"Bananas...\" paper is tails which returns the list of \"tails\" of a list.\n[1 2 3] tails == [[] [3] [2 3]]\n\nUsing paramorphism we would write:\nn == []\nF == rest swons\nG == rest\nP == not\n\ntails == [] [not] [rest swons] [rest] paramorphism",
"define('tails == [] [not] [rest swons] [rest] paramorphism')\n\nJ('[1 2 3] tails')\n\nJ('25 range tails [popop] infra [sum] map')\n\nJ('25 range [range_sum] map')",
"Factoring rest\nRight before the recursion begins we have:\n[] [1 2 3] [not] [pop] [[rest swons] dupdip rest] primrec\n\nBut we might prefer to factor rest in the quote:\n[] [1 2 3] [not] [pop] [rest [swons] dupdip] primrec\n\nThere's no way to do that with the paramorphism combinator as defined. We would have to write and use a slightly different recursion combinator that accepted an additional \"preprocessor\" function [H] and built:\nn swap [P] [pop] [H [F] dupdip G] primrec\n\nOr just write it out manually. This is yet another place where the sufficiently smart compiler will one day automatically refactor the code. We could write a paramorphism combinator that checked [F] and [G] for common prefix and extracted it.\nPatterns of Recursion\nOur story so far...\n\nA combiner F :: (B, B) -> B\nA predicate P :: A -> Bool to detect the base case\nA base case value c :: B\n\nHylo- Ana-, Cata-\nw/ G :: A -> (A, B)\n\nH == [P ] [pop c ] [G ] [dip F ] genrec\nA == [P ] [pop []] [G ] [dip swons] genrec\nC == [[] =] [pop c ] [uncons swap] [dip F ] genrec\n\nPara-, ?-, ?-\nw/ G :: B -> B\n\nP == c swap [P ] [pop] [[F ] dupdip G ] primrec\n? == [] swap [P ] [pop] [[swons] dupdip G ] primrec\n? == c swap [[] =] [pop] [[F ] dupdip uncons swap] primrec\n\nFour Generalizations\nThere are at least four kinds of recursive combinator, depending on two choices. The first choice is whether the combiner function should be evaluated during the recursion or pushed into the pending expression to be \"collapsed\" at the end. The second choice is whether the combiner needs to operate on the current value of the datastructure or the generator's output.\nH == [P] [pop c] [G ] [dip F] genrec\nH == c swap [P] [pop] [G [F] dip ] [i] genrec\nH == [P] [pop c] [ [G] dupdip ] [dip F] genrec\nH == c swap [P] [pop] [ [F] dupdip G] [i] genrec\n\nConsider:\n... a G [H] dip F w/ a G == a' b\n... c a G [F] dip H a G == b a'\n... a [G] dupdip [H] dip F a G == a'\n... c a [F] dupdip G H a G == a'\n\n1\nH == [P] [pop c] [G] [dip F] genrec\n\nIterate n times.\n... a [P] [pop c] [G] [dip F] genrec\n... a G [H] dip F\n... a' b [H] dip F\n... a' H b F\n... a' G [H] dip F b F\n... a'' b [H] dip F b F\n... a'' H b F b F\n... a'' G [H] dip F b F b F\n... a''' b [H] dip F b F b F\n... a''' H b F b F b F\n... a''' pop c b F b F b F\n... c b F b F b F\n\nThis form builds up a continuation that contains the intermediate results along with the pending combiner functions. When the base case is reached the last term is replaced by the identity value c and the continuation \"collapses\" into the final result.\n2\nWhen you can start with the identity value c on the stack and the combiner can operate as you go, using the intermediate results immediately rather than queuing them up, use this form. An important difference is that the generator function must return its results in the reverse order.\nH == c swap [P] [pop] [G [F] dip] primrec\n\n... c a G [F] dip H\n... c b a' [F] dip H\n... c b F a' H\n... c b F a' G [F] dip H\n... c b F b a'' [F] dip H\n... c b F b F a'' H\n... c b F b F a'' G [F] dip H\n... c b F b F b a''' [F] dip H\n... c b F b F b F a''' H\n... c b F b F b F a''' pop\n... c b F b F b F\n\nThe end line here is the same as for above, but only because we didn't evaluate F when it normally would have been.\n3\nIf the combiner and the generator both need to work on the current value then dup must be used at some point, and the generator must produce one item instead of two (the b is instead the duplicate of a.)\nH == [P] [pop c] [[G] dupdip] [dip F] genrec\n\n... a [G] dupdip [H] dip F\n... a G a [H] dip F\n... a' a [H] dip F\n... a' H a F\n... a' [G] dupdip [H] dip F a F\n... a' G a' [H] dip F a F\n... a'' a' [H] dip F a F\n... a'' H a' F a F\n... a'' [G] dupdip [H] dip F a' F a F\n... a'' G a'' [H] dip F a' F a F\n... a''' a'' [H] dip F a' F a F\n... a''' H a'' F a' F a F\n... a''' pop c a'' F a' F a F\n... c a'' F a' F a F\n\n4\nAnd, last but not least, if you can combine as you go, starting with c, and the combiner needs to work on the current item this is the form:\nW == c swap [P] [pop] [[F] dupdip G] primrec\n\n... a c swap [P] [pop] [[F] dupdip G] primrec\n... c a [P] [pop] [[F] dupdip G] primrec\n... c a [F] dupdip G W\n... c a F a G W\n... c a F a' W\n... c a F a' [F] dupdip G W\n... c a F a' F a' G W\n... c a F a' F a'' W\n... c a F a' F a'' [F] dupdip G W\n... c a F a' F a'' F a'' G W\n... c a F a' F a'' F a''' W\n... c a F a' F a'' F a''' pop\n... c a F a' F a'' F\n\nEach of the four variations above can be specialized to ana- and catamorphic forms.",
"def WTFmorphism(c, F, P, G):\n '''Return a hylomorphism function H.'''\n\n def H(a, d=c):\n if P(a):\n result = d\n else:\n a, b = G(a)\n result = H(a, F(d, b))\n return result\n\n return H\n\nF = lambda a, b: a + b\nP = lambda n: n <= 1\nG = lambda n: (n - 1, n - 1)\n\nwtf = WTFmorphism(0, F, P, G)\n\nprint wtf(5)",
"H == [P ] [pop c ] [G ] [dip F ] genrec",
"DefinitionWrapper.add_definitions('''\nP == 1 <=\nGa == -- dup\nGb == --\nc == 0\nF == +\n''', D)\n\nV('[1 2 3] [[] =] [pop []] [uncons swap] [dip swons] genrec')\n\nV('3 [P] [pop c] [Ga] [dip F] genrec')\n\nV('3 [P] [pop []] [Ga] [dip swons] genrec')\n\nV('[2 1] [[] =] [pop c ] [uncons swap] [dip F] genrec')",
"Appendix - Fun with Symbols\n|[ (c, F), (G, P) ]| == (|c, F|) • [(G, P)]\n\n\"Bananas, Lenses, & Barbed Wire\"\n(|...|) [(...)] [<...>]\n\nI think they are having slightly too much fun with the symbols.\n\"Too much is always better than not enough.\"\nTree with node and list of trees.\ntree = [] | [node [tree*]]\n\ntreestep\ntree z [C] [N] treestep\n\n\n [] z [C] [N] treestep\n---------------------------\n z\n\n\n [node [tree*]] z [C] [N] treestep\n--------------------------------------- w/ K == z [C] [N] treestep\n node N [tree*] [K] map C\n\nDerive the recursive form.\nK == [not] [pop z] [J] ifte\n\n\n [node [tree*]] J\n------------------------------\n node N [tree*] [K] map C\n\n\nJ == .. [N] .. [K] .. [C] ..\n\n[node [tree*]] uncons [N] dip\nnode [[tree*]] [N] dip\nnode N [[tree*]]\n\nnode N [[tree*]] i [K] map\nnode N [tree*] [K] map\nnode N [K.tree*]\n\nJ == uncons [N] dip i [K] map [C] i\n\nK == [not] [pop z] [uncons [N] dip i [K] map [C] i] ifte\nK == [not] [pop z] [uncons [N] dip i] [map [C] i] genrec\n\nExtract the givens to parameterize the program.\n[not] [pop z] [uncons [N] dip unquote] [map [C] i] genrec\n[not] [z] [pop] swoncat [uncons [N] dip unquote] [map [C] i] genrec\n[not] z unit [pop] swoncat [uncons [N] dip unquote] [map [C] i] genrec\nz [not] swap unit [pop] swoncat [uncons [N] dip unquote] [map [C] i] genrec\n \\............TS0............/\nz TS0 [uncons [N] dip unquote] [map [C] i] genrec\nz [uncons [N] dip unquote] [TS0] dip [map [C] i] genrec\nz [[N] dip unquote] [uncons] swoncat [TS0] dip [map [C] i] genrec\nz [N] [dip unquote] cons [uncons] swoncat [TS0] dip [map [C] i] genrec\n \\...........TS1.................../\nz [N] TS1 [TS0] dip [map [C] i] genrec\nz [N] [map [C] i] [TS1 [TS0] dip] dip genrec\nz [N] [map C ] [TS1 [TS0] dip] dip genrec\nz [N] [C] [map] swoncat [TS1 [TS0] dip] dip genrec\nz [C] [N] swap [map] swoncat [TS1 [TS0] dip] dip genrec\n\n TS0 == [not] swap unit [pop] swoncat\n TS1 == [dip i] cons [uncons] swoncat\ntreestep == swap [map] swoncat [TS1 [TS0] dip] dip genrec\n\n [] 0 [C] [N] treestep\n---------------------------\n 0\n\n\n [n [tree*]] 0 [sum +] [] treestep\n --------------------------------------------------\n n [tree*] [0 [sum +] [] treestep] map sum +",
"DefinitionWrapper.add_definitions('''\n\n TS0 == [not] swap unit [pop] swoncat\n TS1 == [dip i] cons [uncons] swoncat\ntreestep == swap [map] swoncat [TS1 [TS0] dip] dip genrec\n\n''', D)\n\nV('[] 0 [sum +] [] treestep')\n\nV('[23 []] 0 [sum +] [] treestep')\n\nV('[23 [[2 []] [3 []]]] 0 [sum +] [] treestep')\n\nJ('[23 [[2 [[23 [[2 []] [3 []]]][23 [[2 []] [3 []]]]]] [3 [[23 [[2 []] [3 []]]][23 [[2 []] [3 []]]]]]]] 0 [sum +] [] treestep')\n\nJ('[] [] [unit cons] [23 +] treestep')\n\nJ('[23 []] [] [unit cons] [23 +] treestep')\n\nJ('[23 [[2 []] [3 []]]] [] [unit cons] [23 +] treestep')\n\ndefine('treemap == [] [unit cons] roll< treestep')\n\nJ('[23 [[2 []] [3 []]]] [23 +] treemap')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
computational-class/cjc2016
|
code/04.PythonCrawler_beautifulsoup.ipynb
|
mit
|
[
"数据抓取:\n\nRequests、Beautifulsoup、Xpath简介\n\n\n王成军\nwangchengjun@nju.edu.cn\n计算传播网 http://computational-communication.com\n爬虫基本原理\nhttp://www.cnblogs.com/zhaof/p/6898138.html\n需要解决的问题\n\n页面解析\n获取Javascript隐藏源数据\n自动翻页\n自动登录\n\n连接API接口\n\n\n一般的数据抓取,使用requests和beautifulsoup配合就可以了。\n\n尤其是对于翻页时url出现规则变化的网页,只需要处理规则化的url就可以了。\n以简单的例子是抓取天涯论坛上关于某一个关键词的帖子。\n在天涯论坛,关于雾霾的帖子的第一页是:\nhttp://bbs.tianya.cn/list.jsp?item=free&nextid=0&order=8&k=雾霾\n第二页是:\nhttp://bbs.tianya.cn/list.jsp?item=free&nextid=1&order=8&k=雾霾\n\n\n\n第一个爬虫\nBeautifulsoup Quick Start \nhttp://www.crummy.com/software/BeautifulSoup/bs4/doc/\n\nhttp://computational-class.github.io/bigdata/data/test.html",
"import requests\nfrom bs4 import BeautifulSoup\n\nhelp(requests.get) \n\nurl = 'http://computational-class.github.io/bigdata/data/test.html'\ncontent = requests.get(url)\nhelp(content)\n\nprint(content.text)\n\ncontent.encoding",
"Beautiful Soup\n\nBeautiful Soup is a Python library designed for quick turnaround projects like screen-scraping. Three features make it powerful:\n\n\nBeautiful Soup provides a few simple methods. It doesn't take much code to write an application\nBeautiful Soup automatically converts incoming documents to Unicode and outgoing documents to UTF-8. Then you just have to specify the original encoding.\nBeautiful Soup sits on top of popular Python parsers like lxml and html5lib.\n\nInstall beautifulsoup4\nopen your terminal/cmd\n<del> $ pip install beautifulsoup4\nhtml.parser\nBeautiful Soup supports the html.parser included in Python’s standard library\nlxml\nbut it also supports a number of third-party Python parsers. One is the lxml parser lxml. Depending on your setup, you might install lxml with one of these commands:\n\n$ apt-get install python-lxml\n$ easy_install lxml\n$ pip install lxml\n\nhtml5lib\nAnother alternative is the pure-Python html5lib parser html5lib, which parses HTML the way a web browser does. Depending on your setup, you might install html5lib with one of these commands:\n\n$ apt-get install python-html5lib\n$ easy_install html5lib\n$ pip install html5lib",
"url = 'http://computational-class.github.io/bigdata/data/test.html'\ncontent = requests.get(url)\ncontent = content.text\nsoup = BeautifulSoup(content, 'html.parser') \nsoup\n\nprint(soup.prettify())",
"html\nhead\ntitle\n\n\nbody\np (class = 'title', 'story' )\na (class = 'sister')\nhref/id\n\n\n\n\n\n\n\n\n\nSelect 方法\n\n标签名不加任何修饰\n类名前加点\nid名前加 #\n\n我们也可以利用这种特性,使用soup.select()方法筛选元素,返回类型是 list\nSelect方法三步骤\n\nInspect (检查)\n\nCopy\n\nCopy Selector\n\n\n\n鼠标选中标题The Dormouse's story, 右键检查Inspect\n\n鼠标移动到选中的源代码\n右键Copy-->Copy Selector \n\nbody > p.title > b",
"soup.select('body > p.title > b')[0].text",
"Select 方法: 通过标签名查找",
"soup.select('title')\n\nsoup.select('a')\n\nsoup.select('b')",
"Select 方法: 通过类名查找",
"soup.select('.title')\n\nsoup.select('.sister')\n\nsoup.select('.story')",
"Select 方法: 通过id名查找",
"soup.select('#link1')\n\nsoup.select('#link1')[0]['href']",
"Select 方法: 组合查找\n将标签名、类名、id名进行组合\n\n例如查找 p 标签中,id 等于 link1的内容",
"soup.select('p #link1')",
"Select 方法:属性查找\n加入属性元素\n- 属性需要用中括号>连接\n- 属性和标签属于同一节点,中间不能加空格。",
"soup.select(\"head > title\")\n\nsoup.select(\"body > p\")",
"find_all方法",
"soup('p')\n\nsoup.find_all('p')\n\n[i.text for i in soup('p')]\n\nfor i in soup('p'):\n print(i.text)\n\nfor tag in soup.find_all(True):\n print(tag.name)\n\nsoup('head') # or soup.head\n\nsoup('body') # or soup.body\n\nsoup('title') # or soup.title\n\nsoup('p')\n\nsoup.p\n\nsoup.title.name\n\nsoup.title.string\n\nsoup.title.text\n# 推荐使用text方法\n\nsoup.title.parent.name\n\nsoup.p\n\nsoup.p['class']\n\nsoup.find_all('p', {'class', 'title'})\n\nsoup.find_all('p', class_= 'title')\n\nsoup.find_all('p', {'class', 'story'})\n\nsoup.find_all('p', {'class', 'story'})[0].find_all('a')\n\nsoup.a\n\nsoup('a')\n\nsoup.find(id=\"link3\")\n\nsoup.find_all('a')\n\nsoup.find_all('a', {'class', 'sister'}) # compare with soup.find_all('a')\n\nsoup.find_all('a', {'class', 'sister'})[0]\n\nsoup.find_all('a', {'class', 'sister'})[0].text\n\nsoup.find_all('a', {'class', 'sister'})[0]['href']\n\nsoup.find_all('a', {'class', 'sister'})[0]['id']\n\nsoup.find_all([\"a\", \"b\"])\n\nprint(soup.get_text())",
"数据抓取:\n\n抓取微信公众号文章内容\n\n\n\n王成军\nwangchengjun@nju.edu.cn\n计算传播网 http://computational-communication.com",
"from IPython.display import display_html, HTML\nHTML(url = 'http://mp.weixin.qq.com/s?__biz=MzA3MjQ5MTE3OA==&mid=206241627&idx=1&sn=471e59c6cf7c8dae452245dbea22c8f3&3rd=MzA3MDU4NTYzMw==&scene=6#rd')\n# the webpage we would like to crawl",
"查看源代码 Inspect",
"url = \"http://mp.weixin.qq.com/s?__biz=MzA3MjQ5MTE3OA==&mid=206241627&idx=1&sn=471e59c6cf7c8dae452245dbea22c8f3&3rd=MzA3MDU4NTYzMw==&scene=6#rd\"\ncontent = requests.get(url).text #获取网页的html文本\nsoup = BeautifulSoup(content, 'html.parser') \n\ntitle = soup.select(\"#activity-name\") # #activity-name\ntitle[0].text.strip()\n\nsoup.find('h2', {'class', 'rich_media_title'}).text.strip()\n\nprint(soup.find('div', {'class', 'rich_media_meta_list'}) )\n\n\nsoup.select('#publish_time')\n\narticle = soup.find('div', {'class' , 'rich_media_content'}).text\nprint(article)\n\nrmml = soup.find('div', {'class', 'rich_media_meta_list'})\n#date = rmml.find(id = 'post-date').text\nrmc = soup.find('div', {'class', 'rich_media_content'})\ncontent = rmc.get_text()\nprint(title[0].text.strip())\n#print(date)\nprint(content) \n\n",
"wechatsogou\n\npip install wechatsogou --upgrade\n\nhttps://github.com/Chyroc/WechatSogou",
"!pip install wechatsogou --upgrade\n\nimport wechatsogou\n\n# 可配置参数\n\n# 直连\nws_api = wechatsogou.WechatSogouAPI()\n\n# 验证码输入错误的重试次数,默认为1\nws_api = wechatsogou.WechatSogouAPI(captcha_break_time=3)\n\n# 所有requests库的参数都能在这用\n# 如 配置代理,代理列表中至少需包含1个 HTTPS 协议的代理, 并确保代理可用\nws_api = wechatsogou.WechatSogouAPI(proxies={\n \"http\": \"127.0.0.1:8889\",\n \"https\": \"127.0.0.1:8889\",\n})\n\n# 如 设置超时\nws_api = wechatsogou.WechatSogouAPI(timeout=0.1)\n\nws_api =wechatsogou.WechatSogouAPI()\nws_api.get_gzh_info('南航青年志愿者')\n\narticles = ws_api.search_article('南京航空航天大学')\n\nfor i in articles:\n print(i)",
"requests + Xpath方法介绍:以豆瓣电影为例\nXpath 即为 XML 路径语言(XML Path Language),它是一种用来确定 XML 文档中某部分位置的语言。\nXpath 基于 XML 的树状结构,提供在数据结构树中找寻节点的能力。起初 Xpath 的提出的初衷是将其作为一个通用的、介于 Xpointer 与 XSL 间的语法模型。但是Xpath 很快的被开发者采用来当作小型查询语言。\n获取元素的Xpath信息并获得文本:\n这里的“元素的Xpath信息”是需要我们手动获取的,获取方式为:\n- 定位目标元素\n- 在网站上依次点击:右键 > 检查\n- copy xpath\n- xpath + '/text()'\n参考:https://mp.weixin.qq.com/s/zx3_eflBCrrfOqFEWjAUJw",
"import requests\nfrom lxml import etree\n\nurl = 'https://movie.douban.com/subject/26611804/'\ndata = requests.get(url).text\ns = etree.HTML(data) ",
"豆瓣电影的名称对应的的xpath为xpath_title,那么title表达为:\ntitle = s.xpath('xpath_info/text()')\n其中,xpath_info为:\n//*[@id=\"content\"]/h1/span[1]",
"title = s.xpath('//*[@id=\"content\"]/h1/span[1]/text()')[0]\ndirector = s.xpath('//*[@id=\"info\"]/span[1]/span[2]/a/text()')\nactors = s.xpath('//*[@id=\"info\"]/span[3]/span[2]/a/text()')\ntype1 = s.xpath('//*[@id=\"info\"]/span[5]/text()')\ntype2 = s.xpath('//*[@id=\"info\"]/span[6]/text()')\ntype3 = s.xpath('//*[@id=\"info\"]/span[7]/text()')\ntime = s.xpath('//*[@id=\"info\"]/span[11]/text()')\nlength = s.xpath('//*[@id=\"info\"]/span[13]/text()')\nscore = s.xpath('//*[@id=\"interest_sectl\"]/div[1]/div[2]/strong/text()')[0]\n\n\nprint(title, director, actors, type1, type2, type3, time, length, score)",
"Douban API\nhttps://developers.douban.com/wiki/?title=guide\nhttps://github.com/computational-class/douban-api-docs",
"import requests\n# https://movie.douban.com/subject/26611804/\nurl = 'https://api.douban.com/v2/movie/subject/26611804?apikey=0b2bdeda43b5688921839c8ecb20399b&start=0&count=20&client=&udid='\njsonm = requests.get(url).json()\n\njsonm.keys()\n\n#jsonm.values()\njsonm['rating']\n\njsonm['alt']\n\njsonm['casts'][0]\n\njsonm['directors']\n\njsonm['genres']",
"作业:抓取豆瓣电影 Top 250",
"import requests\nfrom bs4 import BeautifulSoup\nfrom lxml import etree\n\nurl0 = 'https://movie.douban.com/top250?start=0&filter='\ndata = requests.get(url0).text\ns = etree.HTML(data)\n\ns.xpath('//*[@id=\"content\"]/div/div[1]/ol/li[1]/div/div[2]/div[1]/a/span[1]/text()')[0]\n\ns.xpath('//*[@id=\"content\"]/div/div[1]/ol/li[2]/div/div[2]/div[1]/a/span[1]/text()')[0]\n\ns.xpath('//*[@id=\"content\"]/div/div[1]/ol/li[3]/div/div[2]/div[1]/a/span[1]/text()')[0]\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl0 = 'https://movie.douban.com/top250?start=0&filter='\ndata = requests.get(url0).text\nsoup = BeautifulSoup(data, 'lxml')\n\nmovies = soup.find_all('div', {'class', 'info'})\n\nlen(movies)\n\nmovies[0].a['href']\n\nmovies[0].find('span', {'class', 'title'}).text\n\nmovies[0].find('div', {'class', 'star'})\n\nmovies[0].find('span', {'class', 'rating_num'}).text\n\npeople_num = movies[0].find('div', {'class', 'star'}).find_all('span')[-1]\npeople_num.text.split('人评价')[0]\n\nfor i in movies:\n url = i.a['href']\n title = i.find('span', {'class', 'title'}).text\n des = i.find('div', {'class', 'star'})\n rating = des.find('span', {'class', 'rating_num'}).text\n rating_num = des.find_all('span')[-1].text.split('人评价')[0]\n print(url, title, rating, rating_num)\n\nfor i in range(0, 250, 25):\n print('https://movie.douban.com/top250?start=%d&filter='% i)\n\nimport requests\nfrom bs4 import BeautifulSoup\ndat = []\nfor j in range(0, 250, 25):\n urli = 'https://movie.douban.com/top250?start=%d&filter='% j\n data = requests.get(urli).text\n soup = BeautifulSoup(data, 'lxml')\n movies = soup.find_all('div', {'class', 'info'})\n for i in movies:\n url = i.a['href']\n title = i.find('span', {'class', 'title'}).text\n des = i.find('div', {'class', 'star'})\n rating = des.find('span', {'class', 'rating_num'}).text\n rating_num = des.find_all('span')[-1].text.split('人评价')[0]\n listi = [url, title, rating, rating_num]\n dat.append(listi)\n\nimport pandas as pd\ndf = pd.DataFrame(dat, columns = ['url', 'title', 'rating', 'rating_num'])\ndf['rating'] = df.rating.astype(float)\ndf['rating_num'] = df.rating_num.astype(int)\ndf.head()\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nplt.hist(df.rating_num)\nplt.show()\n\nplt.hist(df.rating)\nplt.show()\n\n# viz\nfig = plt.figure(figsize=(16, 16),facecolor='white')\nplt.plot(df.rating_num, df.rating, 'bo')\nfor i in df.index:\n plt.text(df.rating_num[i], df.rating[i], df.title[i], \n fontsize = df.rating[i], \n color = 'red', rotation = 45)\nplt.show() \n\ndf[df.rating > 9.4]\n\nalist = []\nfor i in df.index:\n alist.append( [df.rating_num[i], df.rating[i], df.title[i] ])\n\nblist =[[df.rating_num[i], df.rating[i], df.title[i] ] for i in df.index] \n\nalist\n\n \nfrom IPython.display import display_html, HTML\nHTML('<iframe src=http://nbviewer.jupyter.org/github/computational-class/bigdata/blob/gh-pages/vis/douban250bubble.html \\\n width=1000 height=500></iframe>')",
"作业:\n\n抓取复旦新媒体微信公众号最新一期的内容\n\nrequests.post模拟登录豆瓣(包括获取验证码)\nhttps://blog.csdn.net/zhuzuwei/article/details/80875538\n抓取江苏省政协十年提案",
"# headers = {\n# 'Accept': 'application/json, text/javascript, */*; q=0.01',\n# 'Accept-Encoding': 'gzip, deflate',\n# 'Accept-Language': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6',\n# 'Cache-Control': 'no-cache',\n# 'Connection': 'keep-alive',\n# 'Cookie': 'JSESSIONID=992CB756ADE61B87409672DC808FDD92',\n# 'DNT': '1',\n# 'Host': 'www.jszx.gov.cn',\n# 'Pragma': 'no-cache',\n# 'Referer': 'http://www.jszx.gov.cn/zxta/2019ta/',\n# 'Upgrade-Insecure-Requests': '1',\n# 'User-Agent': 'Mozilla/5.0 (iPad; CPU OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1'\n# }",
"打开http://www.jszx.gov.cn/zxta/2019ta/\n\n点击下一页,url不变!\n\n\n所以数据的更新是使用js推送的\n- 分析network中的内容,发现proposalList.jsp\n - 查看它的header,并发现了form_data\n\n<img src = './img/form_data.png'>\nhttp://www.jszx.gov.cn/zxta/2019ta/",
"import requests\nfrom bs4 import BeautifulSoup\n\nform_data = {'year':2019,\n 'pagenum':1,\n 'pagesize':20\n}\nurl = 'http://www.jszx.gov.cn/wcm/zxweb/proposalList.jsp'\ncontent = requests.get(url, form_data)\ncontent.encoding = 'utf-8'\njs = content.json()\n\njs['data']['totalcount']\n\ndat = js['data']['list']\npagenum = js['data']['pagecount']",
"抓取所有提案的链接",
"for i in range(2, pagenum+1):\n print(i)\n form_data['pagenum'] = i\n content = requests.get(url, form_data)\n content.encoding = 'utf-8'\n js = content.json()\n for j in js['data']['list']:\n dat.append(j)\n\nlen(dat)\n\ndat[0]\n\nimport pandas as pd\n\ndf = pd.DataFrame(dat)\ndf.head()\n\ndf.groupby('type').size()",
"抓取提案内容\nhttp://www.jszx.gov.cn/zxta/2019ta/index_61.html?pkid=18b1b347f9e34badb8934c2acec80e9e\nhttp://www.jszx.gov.cn/wcm/zxweb/proposalInfo.jsp?pkid=18b1b347f9e34badb8934c2acec80e9e",
"url_base = 'http://www.jszx.gov.cn/wcm/zxweb/proposalInfo.jsp?pkid='\nurls = [url_base + i for i in df['pkid']]\n\nimport sys\ndef flushPrint(www):\n sys.stdout.write('\\r')\n sys.stdout.write('%s' % www)\n sys.stdout.flush()\n \ntext = []\nfor k, i in enumerate(urls):\n flushPrint(k)\n content = requests.get(i)\n content.encoding = 'utf-8'\n js = content.json()\n js = js['data']['binfo']['_content']\n soup = BeautifulSoup(js, 'html.parser') \n text.append(soup.text)\n\nlen(text)\n\ndf['content'] = text\n\ndf.head()\n\ndf.to_csv('../data/jszx2019.csv', index = False)\n\ndd = pd.read_csv('../data/jszx2019.csv')\ndd.head()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mne-tools/mne-tools.github.io
|
0.24/_downloads/f574d1e7527e4460eb09a16f6f836e35/60_maxwell_filtering_sss.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Signal-space separation (SSS) and Maxwell filtering\nThis tutorial covers reducing environmental noise and compensating for head\nmovement with SSS and Maxwell filtering.\nAs usual we'll start by importing the modules we need, loading some\nexample data <sample-dataset>, and cropping it to save on memory:",
"import os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport mne\nfrom mne.preprocessing import find_bad_channels_maxwell\n\nsample_data_folder = mne.datasets.sample.data_path()\nsample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis_raw.fif')\nraw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)\nraw.crop(tmax=60)",
"Background on SSS and Maxwell filtering\nSignal-space separation (SSS) :footcite:TauluKajola2005,TauluSimola2006\nis a technique based on the physics\nof electromagnetic fields. SSS separates the measured signal into components\nattributable to sources inside the measurement volume of the sensor array\n(the internal components), and components attributable to sources outside\nthe measurement volume (the external components). The internal and external\ncomponents are linearly independent, so it is possible to simply discard the\nexternal components to reduce environmental noise. Maxwell filtering is a\nrelated procedure that omits the higher-order components of the internal\nsubspace, which are dominated by sensor noise. Typically, Maxwell filtering\nand SSS are performed together (in MNE-Python they are implemented together\nin a single function).\nLike SSP <tut-artifact-ssp>, SSS is a form of projection. Whereas SSP\nempirically determines a noise subspace based on data (empty-room recordings,\nEOG or ECG activity, etc) and projects the measurements onto a subspace\northogonal to the noise, SSS mathematically constructs the external and\ninternal subspaces from spherical harmonics_ and reconstructs the sensor\nsignals using only the internal subspace (i.e., does an oblique projection).\n<div class=\"alert alert-danger\"><h4>Warning</h4><p>Maxwell filtering was originally developed for Elekta Neuromag® systems,\n and should be considered *experimental* for non-Neuromag data. See the\n Notes section of the :func:`~mne.preprocessing.maxwell_filter` docstring\n for details.</p></div>\n\nThe MNE-Python implementation of SSS / Maxwell filtering currently provides\nthe following features:\n\nBasic bad channel detection\n (:func:~mne.preprocessing.find_bad_channels_maxwell)\nBad channel reconstruction\nCross-talk cancellation\nFine calibration correction\ntSSS\nCoordinate frame translation\nRegularization of internal components using information theory\nRaw movement compensation (using head positions estimated by MaxFilter)\ncHPI subtraction (see :func:mne.chpi.filter_chpi)\nHandling of 3D (in addition to 1D) fine calibration files\nEpoch-based movement compensation as described in\n :footcite:TauluKajola2005 through :func:mne.epochs.average_movements\nExperimental processing of data from (un-compensated) non-Elekta\n systems\n\nUsing SSS and Maxwell filtering in MNE-Python\nFor optimal use of SSS with data from Elekta Neuromag® systems, you should\nprovide the path to the fine calibration file (which encodes site-specific\ninformation about sensor orientation and calibration) as well as a crosstalk\ncompensation file (which reduces interference between Elekta's co-located\nmagnetometer and paired gradiometer sensor units).",
"fine_cal_file = os.path.join(sample_data_folder, 'SSS', 'sss_cal_mgh.dat')\ncrosstalk_file = os.path.join(sample_data_folder, 'SSS', 'ct_sparse_mgh.fif')",
"Before we perform SSS we'll look for bad channels — MEG 2443 is quite\nnoisy.\n<div class=\"alert alert-danger\"><h4>Warning</h4><p>It is critical to mark bad channels in ``raw.info['bads']`` *before*\n calling :func:`~mne.preprocessing.maxwell_filter` in order to prevent\n bad channel noise from spreading.</p></div>\n\nLet's see if we can automatically detect it.",
"raw.info['bads'] = []\nraw_check = raw.copy()\nauto_noisy_chs, auto_flat_chs, auto_scores = find_bad_channels_maxwell(\n raw_check, cross_talk=crosstalk_file, calibration=fine_cal_file,\n return_scores=True, verbose=True)\nprint(auto_noisy_chs) # we should find them!\nprint(auto_flat_chs) # none for this dataset",
"<div class=\"alert alert-info\"><h4>Note</h4><p>`~mne.preprocessing.find_bad_channels_maxwell` needs to operate on\n a signal without line noise or cHPI signals. By default, it simply\n applies a low-pass filter with a cutoff frequency of 40 Hz to the\n data, which should remove these artifacts. You may also specify a\n different cutoff by passing the ``h_freq`` keyword argument. If you\n set ``h_freq=None``, no filtering will be applied. This can be\n useful if your data has already been preconditioned, for example\n using :func:`mne.chpi.filter_chpi`,\n :func:`mne.io.Raw.notch_filter`, or :meth:`mne.io.Raw.filter`.</p></div>\n\nNow we can update the list of bad channels in the dataset.",
"bads = raw.info['bads'] + auto_noisy_chs + auto_flat_chs\nraw.info['bads'] = bads",
"We called ~mne.preprocessing.find_bad_channels_maxwell with the optional\nkeyword argument return_scores=True, causing the function to return a\ndictionary of all data related to the scoring used to classify channels as\nnoisy or flat. This information can be used to produce diagnostic figures.\nIn the following, we will generate such visualizations for\nthe automated detection of noisy gradiometer channels.",
"# Only select the data forgradiometer channels.\nch_type = 'grad'\nch_subset = auto_scores['ch_types'] == ch_type\nch_names = auto_scores['ch_names'][ch_subset]\nscores = auto_scores['scores_noisy'][ch_subset]\nlimits = auto_scores['limits_noisy'][ch_subset]\nbins = auto_scores['bins'] # The the windows that were evaluated.\n# We will label each segment by its start and stop time, with up to 3\n# digits before and 3 digits after the decimal place (1 ms precision).\nbin_labels = [f'{start:3.3f} – {stop:3.3f}'\n for start, stop in bins]\n\n# We store the data in a Pandas DataFrame. The seaborn heatmap function\n# we will call below will then be able to automatically assign the correct\n# labels to all axes.\ndata_to_plot = pd.DataFrame(data=scores,\n columns=pd.Index(bin_labels, name='Time (s)'),\n index=pd.Index(ch_names, name='Channel'))\n\n# First, plot the \"raw\" scores.\nfig, ax = plt.subplots(1, 2, figsize=(12, 8))\nfig.suptitle(f'Automated noisy channel detection: {ch_type}',\n fontsize=16, fontweight='bold')\nsns.heatmap(data=data_to_plot, cmap='Reds', cbar_kws=dict(label='Score'),\n ax=ax[0])\n[ax[0].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray')\n for x in range(1, len(bins))]\nax[0].set_title('All Scores', fontweight='bold')\n\n# Now, adjust the color range to highlight segments that exceeded the limit.\nsns.heatmap(data=data_to_plot,\n vmin=np.nanmin(limits), # bads in input data have NaN limits\n cmap='Reds', cbar_kws=dict(label='Score'), ax=ax[1])\n[ax[1].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray')\n for x in range(1, len(bins))]\nax[1].set_title('Scores > Limit', fontweight='bold')\n\n# The figure title should not overlap with the subplots.\nfig.tight_layout(rect=[0, 0.03, 1, 0.95])",
"<div class=\"alert alert-info\"><h4>Note</h4><p>You can use the very same code as above to produce figures for\n *flat* channel detection. Simply replace the word \"noisy\" with\n \"flat\", and replace ``vmin=np.nanmin(limits)`` with\n ``vmax=np.nanmax(limits)``.</p></div>\n\nYou can see the un-altered scores for each channel and time segment in the\nleft subplots, and thresholded scores – those which exceeded a certain limit\nof noisiness – in the right subplots. While the right subplot is entirely\nwhite for the magnetometers, we can see a horizontal line extending all the\nway from left to right for the gradiometers. This line corresponds to channel\nMEG 2443, which was reported as auto-detected noisy channel in the step\nabove. But we can also see another channel exceeding the limits, apparently\nin a more transient fashion. It was therefore not detected as bad, because\nthe number of segments in which it exceeded the limits was less than 5,\nwhich MNE-Python uses by default.\n<div class=\"alert alert-info\"><h4>Note</h4><p>You can request a different number of segments that must be\n found to be problematic before\n `~mne.preprocessing.find_bad_channels_maxwell` reports them as bad.\n To do this, pass the keyword argument ``min_count`` to the\n function.</p></div>\n\nObviously, this algorithm is not perfect. Specifically, on closer inspection\nof the raw data after looking at the diagnostic plots above, it becomes clear\nthat the channel exceeding the \"noise\" limits in some segments without\nqualifying as \"bad\", in fact contains some flux jumps. There were just not\nenough flux jumps in the recording for our automated procedure to report\nthe channel as bad. So it can still be useful to manually inspect and mark\nbad channels. The channel in question is MEG 2313. Let's mark it as bad:",
"raw.info['bads'] += ['MEG 2313'] # from manual inspection",
"After that, performing SSS and Maxwell filtering is done with a\nsingle call to :func:~mne.preprocessing.maxwell_filter, with the crosstalk\nand fine calibration filenames provided (if available):",
"raw_sss = mne.preprocessing.maxwell_filter(\n raw, cross_talk=crosstalk_file, calibration=fine_cal_file, verbose=True)",
"To see the effect, we can plot the data before and after SSS / Maxwell\nfiltering.",
"raw.pick(['meg']).plot(duration=2, butterfly=True)\nraw_sss.pick(['meg']).plot(duration=2, butterfly=True)",
"Notice that channels marked as \"bad\" have been effectively repaired by SSS,\neliminating the need to perform interpolation <tut-bad-channels>.\nThe heartbeat artifact has also been substantially reduced.\nThe :func:~mne.preprocessing.maxwell_filter function has parameters\nint_order and ext_order for setting the order of the spherical\nharmonic expansion of the interior and exterior components; the default\nvalues are appropriate for most use cases. Additional parameters include\ncoord_frame and origin for controlling the coordinate frame (\"head\"\nor \"meg\") and the origin of the sphere; the defaults are appropriate for most\nstudies that include digitization of the scalp surface / electrodes. See the\ndocumentation of :func:~mne.preprocessing.maxwell_filter for details.\nSpatiotemporal SSS (tSSS)\nAn assumption of SSS is that the measurement volume (the spherical shell\nwhere the sensors are physically located) is free of electromagnetic sources.\nThe thickness of this source-free measurement shell should be 4-8 cm for SSS\nto perform optimally. In practice, there may be sources falling within that\nmeasurement volume; these can often be mitigated by using Spatiotemporal\nSignal Space Separation (tSSS) :footcite:TauluSimola2006.\ntSSS works by looking for temporal\ncorrelation between components of the internal and external subspaces, and\nprojecting out any components that are common to the internal and external\nsubspaces. The projection is done in an analogous way to\nSSP <tut-artifact-ssp>, except that the noise vector is computed\nacross time points instead of across sensors.\nTo use tSSS in MNE-Python, pass a time (in seconds) to the parameter\nst_duration of :func:~mne.preprocessing.maxwell_filter. This will\ndetermine the \"chunk duration\" over which to compute the temporal projection.\nThe chunk duration effectively acts as a high-pass filter with a cutoff\nfrequency of $\\frac{1}{\\mathtt{st_duration}}~\\mathrm{Hz}$; this\neffective high-pass has an important consequence:\n\nIn general, larger values of st_duration are better (provided that your\n computer has sufficient memory) because larger values of st_duration\n will have a smaller effect on the signal.\n\nIf the chunk duration does not evenly divide your data length, the final\n(shorter) chunk will be added to the prior chunk before filtering, leading\nto slightly different effective filtering for the combined chunk (the\neffective cutoff frequency differing at most by a factor of 2). If you need\nto ensure identical processing of all analyzed chunks, either:\n\n\nchoose a chunk duration that evenly divides your data length (only\n recommended if analyzing a single subject or run), or\n\n\ninclude at least 2 * st_duration of post-experiment recording time at\n the end of the :class:~mne.io.Raw object, so that the data you intend to\n further analyze is guaranteed not to be in the final or penultimate chunks.\n\n\nAdditional parameters affecting tSSS include st_correlation (to set the\ncorrelation value above which correlated internal and external components\nwill be projected out) and st_only (to apply only the temporal projection\nwithout also performing SSS and Maxwell filtering). See the docstring of\n:func:~mne.preprocessing.maxwell_filter for details.\nMovement compensation\nIf you have information about subject head position relative to the sensors\n(i.e., continuous head position indicator coils, or :term:cHPI), SSS\ncan take that into account when projecting sensor data onto the internal\nsubspace. Head position data can be computed using\n:func:mne.chpi.compute_chpi_locs and :func:mne.chpi.compute_head_pos,\nor loaded with the:func:mne.chpi.read_head_pos function. The\nexample data <sample-dataset> doesn't include cHPI, so here we'll\nload a :file:.pos file used for testing, just to demonstrate:",
"head_pos_file = os.path.join(mne.datasets.testing.data_path(), 'SSS',\n 'test_move_anon_raw.pos')\nhead_pos = mne.chpi.read_head_pos(head_pos_file)\nmne.viz.plot_head_positions(head_pos, mode='traces')",
"The cHPI data file could also be passed as the head_pos parameter of\n:func:~mne.preprocessing.maxwell_filter. Not only would this account for\nmovement within a given recording session, but also would effectively\nnormalize head position across different measurement sessions and subjects.\nSee here <example-movement-comp> for an extended example of applying\nmovement compensation during Maxwell filtering / SSS. Another option is to\napply movement compensation when averaging epochs into an\n:class:~mne.Evoked instance, using the :func:mne.epochs.average_movements\nfunction.\nEach of these approaches requires time-varying estimates of head position,\nwhich is obtained from MaxFilter using the -headpos and -hp\narguments (see the MaxFilter manual for details).\nCaveats to using SSS / Maxwell filtering\n\n\nThere are patents related to the Maxwell filtering algorithm, which may\n legally preclude using it in commercial applications. More details are\n provided in the documentation of\n :func:~mne.preprocessing.maxwell_filter.\n\n\nSSS works best when both magnetometers and gradiometers are present, and\n is most effective when gradiometers are planar (due to the need for very\n accurate sensor geometry and fine calibration information). Thus its\n performance is dependent on the MEG system used to collect the data.\n\n\nReferences\n.. footbibliography::\n.. LINKS"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
cgpotts/cs224u
|
hw_formatting_guide.ipynb
|
apache-2.0
|
[
"Homework and bake-off code: Formatting guide",
"__author__ = \"Insop\"\n__version__ = \"CS224u, Stanford, Spring 2022\"",
"Contents\n\nOverview\nOriginal system code\nModifying provided code in the original notebook\nExternal imports\nCustom code\nLong running test code\n\n\n\nOverview\nThis notebook provides a list of Dos and Don'ts for writing code for original systems and bake-offs.\nOriginal system code\nOur assignments need to handle specific homework questions and also very open ended original systems that can have arbitrary dependencies and data requirements, so our instructions have to be quite detailed to handle both. \nHere's one quick reminder/clarification of a common issue:\nPlease be sure to include your Original System code and bake-off call within the scope of this if conditional:\nif 'IS_GRADESCOPE_ENV' not in os.environ:\n test_evaluate_pooled_bert(evaluate_pooled_bert)\nThis ensures that the autograder does not attempt to run your original system code. This includes any import statements used in your Original System – they should be within the if conditional. \nOverall – please do not modify any portion of these cells other than \n\nthe comment spaces for system text description and peak score reporting; and \nthe space in the if conditional where you are meant to put your code.\n\nSince we encourage creativity and do not want to constrain things, your original system code will instead be awarded credit manually by CFs after the assignment due date. This is also why you will not see a full grade out of 10 until after the submission deadline, when CFs have manually awarded the original system points.\nModifying provided code in the original notebook\nPlease do not modify provided code in the original notebook, such as changing the function arguments or default parameters. The autograder will call functions to test the homework problem code, and the autograder uses the function arguments as shown in the original notebook.\nHere is an example (from hw_colors.ipynb) where the provided code was modified to use func(vocab, 'data/glove.6B/glove.6B.50d.txt') instead of the original code func(vocab, 'glove.6B.50d.txt'). This might work fine in your local environment; however, the autograder will separately call func the same way as shown in the original notebook. That's why we suggest you to not modify the provided code.",
"def test_create_glove_embedding(func):\n vocab = ['NLU', 'is', 'the', 'future', '.', '$UNK', '<s>', '</s>']\n\n # DON'T modify functions like this!\n #\n # glove_embedding, glove_vocab = func(vocab, 'data/glove.6B/glove.6B.50d.txt')\n\n # DO KEEP the code as it was, since the autograder calls functions in\n # the same way shown in this line:\n glove_embedding, glove_vocab = func(vocab, 'glove.6B.50d.txt')\n\n assert isinstance(glove_embedding, np.ndarray), \\\n \"Expected embedding type {}; got {}\".format(\n glove_embedding.__class__.__name__, glove_embedding.__class__.__name__)\n assert glove_embedding.shape == (8, 50), \\\n \"Expected embedding shape (8, 50); got {}\".format(glove_embedding.shape)\n assert glove_vocab == vocab, \\\n \"Expected vocab {}; got {}\".format(vocab, glove_vocab)",
"External imports",
"#\n# DON'T!\n#\n# This will cause the autograder to fail!\n\npip install 'git+https://github.com/NVIDIA/dllogger'\n\n# Directly importing external modules outside of `if 'IS_GRADESCOPE_ENV'` scope\n# will also cause the autograder to fail.\n\n#\n# DO!\n#\n# This is good!\n#\nif 'IS_GRADESCOPE_ENV' not in os.environ:\n # You can install and import modules of your choice --\n # for example:\n # https://github.com/NVIDIA/dllogger/issues/1\n pip install 'git+https://github.com/NVIDIA/dllogger'",
"Custom code",
"#\n# DON'T!\n#\n# This type of custom code will fail, since the autograder is not\n# equipped with a GPU:\n#\ntry:\n t_gpu = torch.randn(3,3, device='cuda:0')\nexcept AssertionError as err:\n print(err)\nt_gpu\n\n#\n# DO\n#\n# This is good!\n#\nif 'IS_GRADESCOPE_ENV' not in os.environ:\n # This is okay since this code will not run in the autograder\n # environment:\n try:\n t_gpu = torch.randn(3,3, device='cuda:0')\n except AssertionError as err:\n print(err)\n t_gpu",
"Long running test code\nAny long running test code should be inside the if conditional block.",
"#\n# DON'T!\n#\n# This type of custom code will cause the autograder to time out:\n#\nmy_test_function_runs_an_hour()\n\n#\n# DO\n#\n# This is good!\n#\nif 'IS_GRADESCOPE_ENV' not in os.environ:\n # Run as many tests as you wish!\n my_test_function_runs_an_hour()",
"Time measurements\nAny time measurement code, such as %%time, should be inside the if conditional block.",
"#\n# DON'T!\n#\n# This type of custom code will cause the autograder fail with this message \n# \"NameError: name 'get_ipython' is not defined\"\n#\n\n%%time\n\nif 'IS_GRADESCOPE_ENV' not in os.environ:\n\n my_func_to_measure_time()\n\n#\n# DO\n#\n# This is good!\n#\nif 'IS_GRADESCOPE_ENV' not in os.environ:\n %%time\n my_func_to_measure_time()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
PBrockmann/ipython_ferretmagic
|
notebooks/ferretmagic_02_PassDataFromPythonToFerret.ipynb
|
mit
|
[
"<hr>\nPatrick BROCKMANN - LSCE (Climate and Environment Sciences Laboratory)<br>\n<img align=\"left\" width=\"40%\" src=\"http://www.lsce.ipsl.fr/Css/img/banniere_LSCE_75.png\" ><br><br>\n<hr>\n\nUpdated: 2019/11/13\nLoad the ferret extension",
"%load_ext ferretmagic",
"Put data from python\nFirst example: 1D array",
"import numpy as np\nb = {}\nb['name']='myvar1' \nx=np.linspace(-np.pi*4, np.pi*4, 500)\nb['data']=np.sin(x)/x\nb.keys()",
"A dataset must have been opened before putting data to ferret to get list of variables latter.\nhttps://github.com/NOAA-PMEL/PyFerret/issues/64",
"%%ferret\nuse levitus_climatology \n\n%ferret_putdata --axis_pos (0,1,2,3,4,5) b\n\n%%ferret\nset text/font=arial\n\nshow data\nppl color 2, 0, 50, 100, 75\nppl color 3, 100, 50, 0, 75\nplot/thick=3/color myvar1, myvar1[x=@shf:50]",
"Second example: 3D array (XYZ)\nCreate a dummy 3D array (XY and a Z axis)",
"nlons, nlats, dim3 = (145, 73, 10)\n\nlats = np.linspace(-np.pi / 2, np.pi / 2, nlats)\nlons = np.linspace(0, 2 * np.pi, nlons)\nlons, lats = np.meshgrid(lons, lats, indexing='ij')\n\nwave = 0.75 * (np.sin(2 * lats) ** 8) * np.cos(4 * lons)\nmean = 0.5 * np.cos(2 * lats) * ((np.sin(2 * lats)) ** 2 + 2)\n\nlats = np.rad2deg(lats)\nlons = np.rad2deg(lons)\ndata2D = wave + mean \n\nmyaxis = np.linspace(1, 1000, dim3)\ndataXYZ = np.repeat(np.expand_dims(data2D,axis=-1), dim3, axis=2)\n\nprint(dataXYZ.shape)",
"Please refer to http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret/data-dictionaries/",
"import pyferret\ndata2ferret = {}\ndata2ferret['name']='myvar2' \ndata2ferret['axis_names']=('lons', 'lats', 'depth')\ndata2ferret['axis_units']=('degrees_east', 'degrees_north', 'meters')\ndata2ferret['axis_types']=(\n pyferret.AXISTYPE_LONGITUDE,\n pyferret.AXISTYPE_LATITUDE,\n pyferret.AXISTYPE_LEVEL\n )\ndata2ferret['axis_coords']=(lons[:,0], lats[0,:], myaxis[:])\ndata2ferret['data']=dataXYZ\ndata2ferret.keys()\n\n%ferret_putdata data2ferret\n\n%%ferret\nshow data\nshade myvar2[k=1]",
"Third example: 3D array (XYT)\nCreate a dummy 3D array (XY and a T axis)",
"dataXYT = np.reshape(dataXYZ, (nlons, nlats, 1, dim3))\nprint(dataXYT.shape)\n\nimport pyferret\ndata2ferret = {}\ndata2ferret['name']='myvar3' \ndata2ferret['axis_names']=('lons', 'lats', '', 'time')\ndata2ferret['axis_units']=('degrees_east', 'degrees_north', '', '')\ndata2ferret['axis_types']=(\n pyferret.AXISTYPE_LONGITUDE,\n pyferret.AXISTYPE_LATITUDE,\n pyferret.AXISTYPE_NORMAL,\n pyferret.AXISTYPE_ABSTRACT\n )\ndata2ferret['axis_coords']=(lons[:,0], lats[0,:], None, None)\ndata2ferret['data']=dataXYT\ndata2ferret.keys()\n\n%ferret_putdata data2ferret\n\n%%ferret\nshow data\nshade myvar3[l=1]"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
SheffieldML/notebook
|
compbio/periodic/figure1.ipynb
|
bsd-3-clause
|
[
"Supplementary materials : Details on generating Figure 1\nThis document is a supplementary material of the article Detecting periodicities with Gaussian\nprocesses by N. Durrande, J. Hensman, M. Rattray and N. D. Lawrence. \nThe first step is to import the required packages. This tutorial has been written with GPy 0.8.8 which includes the kernels discussed in the article. The latter can be downloaded on the SheffieldML github page.",
"%matplotlib inline\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport GPy\n\nnp.random.seed(1)",
"Test functions\nWe now introduce 1-periodic tests functions that are defined over $[0,1)$ as:\n\\begin{equation}\n \\begin{split}\n f_1(x) & = \\cos(2 \\pi x) + \\varepsilon\\\n f_2(x) & = 1/2 \\cos(2 \\pi x) + 1/2 \\cos(4 \\pi x) + \\varepsilon\\\n f_3(x) & = \\left{\n \\begin{matrix}\n 1 + \\varepsilon \\text{ if } x \\in [0,0.2] \\\n -1 + \\varepsilon \\text{ if } x \\in (0.2,1) \\\n \\end{matrix}\n \\right. \\\n f_4(x) & = 4 |x-0.5| + 1) + \\varepsilon\\\n f_5(x) & = 1 - 2x + \\varepsilon\\\n f_6(x) & = + \\varepsilon\n \\end{split}\n\\end{equation}\nwhere $\\varepsilon$ is a $\\mathcal{N}(0,\\tau^2)$ random variable.",
"# domain boundaries\nlower = 0.\nupper = 3.\nx = np.linspace(lower,upper,500)\n\n# grid for function evaluations and plots\nn_pts = 50\nX = np.linspace(lower,upper,n_pts+1)\nX = X[0:-1]+X[1]/2\n\n# test functions\ndef f1(x,tau2=0.1):\n return(np.cos(x*2*np.pi) + np.sqrt(tau2)*np.random.normal(size=x.shape))\n\ndef f2(x,tau2=0.1):\n return(1./2*np.cos(x*2*np.pi)+1./2*np.cos(x*4*np.pi) + np.sqrt(tau2)*np.random.normal(size=x.shape))\n\ndef f3(x,tau2=0.1):\n alpha = 0.2\n return(2*(x - np.trunc(x) < alpha)-1 + np.sqrt(tau2)*np.random.normal(size=x.shape))\n\ndef f4(x,tau2=0.1):\n return(4*np.abs(x - np.trunc(x) - 0.5)-1 + np.sqrt(tau2)*np.random.normal(size=x.shape))\n\ndef f5(x,tau2=0.1):\n return(2*np.trunc(x) - 2*x +1 + np.sqrt(tau2)*np.random.normal(size=x.shape))\n\ndef f6(x,tau2=0.1):\n return(np.zeros((len(x),)) + np.sqrt(tau2)*np.random.normal(size=x.shape))\n\n",
"The associated graphs are:",
"names = ['f1','f2','f3','f4','f5','f6']\n\nfig, axs = plt.subplots(2,3,figsize=(12,7), sharex=True, sharey=True)\nfor i, (ax,testfunc) in enumerate(zip(axs.flat, [f1,f2,f3,f4,f5,f6])):\n ax.plot(x,testfunc(x,0.),'r', linewidth=1.5)\n ax.plot(X,testfunc(X),'kx',mew=1)\n ax.legend(['$\\\\mathrm{'+names[i]+'}$'],prop={'size':18},borderaxespad=0.)\n ax.set_ylim((-1.5,1.8))",
"Models\nCOSOPT: We consider here the following implementation",
"def fit_cosopt(X,Y):\n X = X[:,None]\n Y = Y[:,None]\n period = np.linspace(0.15,2,100)\n phase = np.linspace(-np.pi,np.pi,100)\n\n MSE = np.zeros((100,100))\n for i,per in enumerate(period):\n for j,pha in enumerate(phase):\n B = np.hstack((np.ones(X.shape),np.cos(X*2*np.pi/per+pha)))\n C = np.dot(np.linalg.inv(np.dot(B.T,B)),np.dot(B.T,Y))\n MSE[i,j] = np.mean((np.dot(B,C)-Y)**2)\n\n i,j = np.unravel_index(MSE.argmin(), MSE.shape)\n B = np.hstack((np.ones(X.shape),np.cos(X*2*np.pi/period[i]+phase[j])))\n C = np.dot(np.linalg.inv(np.dot(B.T,B)),np.dot(B.T,Y))\n\n return((C,period[i],phase[j]))\n\ndef pred_cosopt(x,m_cosopt):\n C,per,pha = m_cosopt\n Bx = np.hstack((0*x[:,None]+1, np.cos(x[:,None]*2*np.pi/per+pha)))\n P = np.dot(Bx,C)\n return(P.flatten())\n",
"Linear regression",
"def B(x):\n # function returning the matrix of basis functions evaluated at x\n #input: x, np.array with d columns\n #output: a matrix (b_j(x_i))_{i,j}\n B = np.ones((x.shape[0],1))\n for i in range(1,20):\n B = np.hstack((B,np.sin(2*np.pi*i*x[:,None]),np.cos(2*np.pi*i*x[:,None])))\n return(B)\n\ndef LR(X,F,B,tau2):\n #input: X, np.array with d columns representing the DoE\n # F, np.array with 1 column representing the observations\n # B, a function returning the (p) basis functions evaluated at x\n # tau2, noise variance\n #output: beta, estimate of coefficients np.array of shape (p,1)\n # covBeta, cov matrix of beta, np.array of shape (p,p)\n BX = B(X)\n covBeta = np.linalg.inv(np.dot(BX.T,BX))\n beta = np.dot(covBeta,np.dot(BX.T,F))\n return(beta,tau2*covBeta)\n\ndef predLR(x,B,beta,covBeta):\n #function returning predicted mean and variance\n #input: x, np.array with d columns representing m prediction points\n # B, a function returning the (p) basis functions evaluated at x\n # beta, estimate of the regression coefficients\n # covBeta, covariance matrix of beta\n #output: m, predicted mean at x, np.array of shape (m,1)\n # v, predicted variance, np.array of shape (m,1)\n m = np.dot(B(x),beta)\n v = np.dot(B(x),np.dot(covBeta,B(x).T))\n return(m,v)\n",
"Gaussian Process model",
"def fit_gp(X,Y):\n #input: X, np.array with d columns representing the DoE\n # Y, np.array with 1 column representing the observations\n #output: a GPy gaussian process model object\n X = X[:,None]\n Y = Y[:,None]\n k = GPy.kern.PeriodicMatern32(1,variance=1.,lengthscale=1., period=1., n_freq=20,lower=lower,upper=upper)\n bias = GPy.kern.Bias(1,variance=1.)\n m32 = GPy.models.GPRegression(X,Y,k+bias)\n m32.unconstrain('') # remove positivity constrains to avoids warnings\n m32.likelihood.constrain_bounded(0.001,3., warning=False) # boundaries for the observation noise\n m32.kern.periodic_Matern32.constrain_bounded(0.01,3., warning=False) # boundaries for the periodic variance and lengthscale \n m32.kern.periodic_Matern32.period.constrain_bounded(0.15,2., warning=False) # boundaries for the period\n m32.randomize()\n m32.optimize_restarts(5,robust=True)\n return(m32)\n\ndef pred_gp(x,m_gp):\n x = x[:,None]\n mu,var = m_gp.predict(x)\n return(mu.flatten())\n",
"Definition of the criterion for assesing the quality of a model prediction :",
"def RMSE(Ypred,Yreal):\n return( np.sqrt(np.mean((Yreal - Ypred)**2)) )",
"Fit models",
"M_COS = []\nM_GP = []\nM_LR = []\n\nfor i,testfunc in enumerate([f1,f2,f3,f4,f5,f6]):\n Y = testfunc(X)\n Yreal = testfunc(x,0)\n M_COS += [fit_cosopt(X,Y)]\n M_GP += [fit_gp(X,Y)]\n M_LR += [LR(X,Y,B,0.1)]",
"Generate figure",
"lower = 0.\nupper = 3.\nxRMSE = np.linspace(lower,upper,500)\n\nfig, axes = plt.subplots(2,3,figsize=(12,7), sharex=True, sharey=True, tight_layout=False)\nfor i,testfunc in enumerate([f1,f2,f3,f4,f5,f6]):\n Y = testfunc(X)\n Yreal = testfunc(x,0)\n ax = axes.flat[i]\n # test func\n plreal, = ax.plot(x,Yreal, '-r', linewidth=1.5,label=\"test function\")\n realRMSE = testfunc(xRMSE,0)\n # COSOPT\n cosopt_pred = pred_cosopt(x,M_COS[i])\n plcos, = ax.plot(x,cosopt_pred, '--g', linewidth=1.5,label=\"COSOPT\", dashes=(7,3))\n cRMSE = RMSE(pred_cosopt(xRMSE,M_COS[i]),realRMSE)\n #GP 32\n gp_pred = pred_gp(x,M_GP[i])\n plgp, = ax.plot(x,gp_pred, '--b', linewidth=1.5,label=\"periodic GP\", dashes=(15,3))\n gpRMSE = RMSE(pred_gp(xRMSE,M_GP[i]),realRMSE)\n # Lin Reg\n lr_pred = predLR(x,B,M_LR[i][0],M_LR[i][1])\n pllr, = ax.plot(x,lr_pred[0], ':k', linewidth=2,label=\"periodic GP\")\n lrRMSE = RMSE(predLR(xRMSE,B,M_LR[i][0],M_LR[i][1])[0],realRMSE)\n #\n ax.plot(X,testfunc(X),'kx',mew=1, alpha=0.5)\n ax.set_xlim((0.9,2.1))\n ax.set_ylim((-1.5,2.))\n ## RMSE\n ax.text(1.5, 1.8, 'RMSE=(%.2f, %.2f, %.2f)'%(cRMSE,gpRMSE,lrRMSE),\n verticalalignment='center', horizontalalignment='center', fontsize=13)\n\nfig.suptitle(' ')\nl = fig.legend((plreal,plcos,plgp,pllr),(\"test function\",\"COSOPT\",\"periodic GP\",\"Lin. Reg.\"),'upper center',ncol=4,handlelength=3,fancybox=True,columnspacing=3)\nl.draw_frame(False)\n\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/docs-l10n
|
site/ko/guide/checkpoint.ipynb
|
apache-2.0
|
[
"Copyright 2018 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"체크포인트 훈련하기\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/checkpoint\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />TensorFlow.org에서 보기</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/checkpoint.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />구글 코랩(Google Colab)에서 실행하기</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/checkpoint.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />깃헙(GitHub) 소스 보기</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/checkpoint.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nNote: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도\n불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다.\n이 번역에 개선할 부분이 있다면\ntensorflow/docs-l10n 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.\n문서 번역이나 리뷰에 참여하려면\ndocs-ko@tensorflow.org로\n메일을 보내주시기 바랍니다.\n\"텐서플로 모델 저장하기\" 라는 문구는 보통 둘중 하나를 의미합니다:\n\nCheckpoints, 혹은 \nSavedModel.\n\nCheckpoint는 모델이 사용한 모든 매개변수(tf.Variable 객체들)의 정확한 값을 캡처합니다. Chekcpoint는 모델에 의해 정의된 연산에 대한 설명을 포함하지 않으므로 일반적으로 저장된 매개변수 값을 사용할 소스 코드를 사용할 수 있을 때만 유용합니다.\n반면 SavedModel 형식은 매개변수 값(체크포인트) 외에 모델에 의해 정의된 연산에 대한 일련화된 설명을 포함합니다. 이 형식의 모델은 모델을 만든 소스 코드와 독립적입니다. 따라서 TensorFlow Serving, TensorFlow Lite, TensorFlow.js 또는 다른 프로그래밍 언어(C, C++, Java, Go, Rust, C# 등. TensorFlow APIs)로 배포하기에 적합합니다.\n이 가이드는 체크포인트 쓰기 및 읽기를 위한 API들을 다룹니다.\n설치",
"import tensorflow as tf\n\nclass Net(tf.keras.Model):\n \"\"\"A simple linear model.\"\"\"\n\n def __init__(self):\n super(Net, self).__init__()\n self.l1 = tf.keras.layers.Dense(5)\n\n def call(self, x):\n return self.l1(x)\n\nnet = Net()",
"tf.keras 훈련 API들로부터 저장하기\ntf.keras 저장하고 복구하는\n가이드를 읽어봅시다.\ntf.keras.Model.save_weights 가 텐서플로 CheckPoint를 저장합니다.",
"net.save_weights('easy_checkpoint')",
"Checkpoints 작성하기\n텐서플로 모델의 지속적인 상태는 tf.Variable 객체에 저장되어 있습니다. 이들은 직접으로 구성할 수 있지만, tf.keras.layers 혹은 tf.keras.Model와 같은 고수준 API들로 만들어 지기도 합니다.\n변수를 관리하는 가장 쉬운 방법은 Python 객체에 변수를 연결한 다음 해당 객체를 참조하는 것입니다. \ntf.train.Checkpoint, tf.keras.layers.Layer, and tf.keras.Model의 하위클래스들은 해당 속성에 할당된 변수를 자동 추적합니다. 다음 예시는 간단한 선형 model을 구성하고, 모든 model 변수의 값을 포합하는 checkpoint를 씁니다.\nModel.save_weights를 사용해 손쉽게 model-checkpoint를 저장할 수 있습니다.\n직접 Checkpoint작성하기\n설치\ntf.train.Checkpoint의 모든 특성을 입증하기 위해서 toy dataset과 optimization step을 정의해야 합니다.",
"def toy_dataset():\n inputs = tf.range(10.)[:, None]\n labels = inputs * 5. + tf.range(5.)[None, :]\n return tf.data.Dataset.from_tensor_slices(\n dict(x=inputs, y=labels)).repeat(10).batch(2)\n\ndef train_step(net, example, optimizer):\n \"\"\"Trains `net` on `example` using `optimizer`.\"\"\"\n with tf.GradientTape() as tape:\n output = net(example['x'])\n loss = tf.reduce_mean(tf.abs(output - example['y']))\n variables = net.trainable_variables\n gradients = tape.gradient(loss, variables)\n optimizer.apply_gradients(zip(gradients, variables))\n return loss",
"Checkpoint객체 생성\n인위적으로 checkpoint를 만드려면 tf.train.Checkpoint 객체가 필요합니다. Checkpoint하고 싶은 객체의 위치는 객체의 특성으로 설정이 되어 있습니다.\ntf.train.CheckpointManager도 다수의 checkpoint를 관리할때 도움이 됩니다",
"opt = tf.keras.optimizers.Adam(0.1)\nckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net)\nmanager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)",
"훈련하고 model checkpoint작성하기\n다음 훈련 루프는 model과 optimizer의 인스턴스를 만든 후 tf.train.Checkpoint 객체에 수집합니다. 이것은 각 데이터 배치에 있는 루프의 훈련 단계를 호출하고, 주기적으로 디스크에 checkpoint를 작성합니다.",
"def train_and_checkpoint(net, manager):\n ckpt.restore(manager.latest_checkpoint)\n if manager.latest_checkpoint:\n print(\"Restored from {}\".format(manager.latest_checkpoint))\n else:\n print(\"Initializing from scratch.\")\n\n for example in toy_dataset():\n loss = train_step(net, example, opt)\n ckpt.step.assign_add(1)\n if int(ckpt.step) % 10 == 0:\n save_path = manager.save()\n print(\"Saved checkpoint for step {}: {}\".format(int(ckpt.step), save_path))\n print(\"loss {:1.2f}\".format(loss.numpy()))\n\ntrain_and_checkpoint(net, manager)",
"복구하고 훈련 계속하기\n첫 번째 과정 이후 새로운 model과 매니저를 전달할 수 있지만, 일을 마무리 한 정확한 지점에서 훈련을 가져와야 합니다:",
"opt = tf.keras.optimizers.Adam(0.1)\nnet = Net()\nckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net)\nmanager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)\n\ntrain_and_checkpoint(net, manager)",
"tf.train.CheckpointManager 객체가 이전 checkpoint들을 제거합니다. 위는 가장 최근의 3개 checkpoint만 유지하도록 구성되어 있습니다.",
"print(manager.checkpoints) # 남은 checkpoint들 나열",
"예를 들어, './tf_ckpts/ckpt-10'같은 경로들은 디스크에 있는 파일이 아닙니다. 대신에 이 경로들은 index 파일과 변수 값들을 담고있는 파일들의 전위 표기입니다. 이 전위 표기들은 CheckpointManager 가 상태를 저장하는 하나의 checkpoint 파일 ('./tf_ckpts/checkpoint') 에 그룹으로 묶여있습니다.",
"!ls ./tf_ckpts",
"<a id=\"loading_mechanics\"/>\n작동 원리\n텐서플로는 로드되는 객체에서 시작하여 명명된 엣지가 있는 방향 그래프를 통과시켜 변수를 checkpoint된 값과 일치시킵니다. 엣지의 이름들은 특히 기여한 객체의 이름에서 따왔습니다. 예를들면, self.l1 = tf.keras.layers.Dense(5)안의 \"l1\". tf.train.Checkpoint 이것의 키워드 전달인자 이름을 사용했습니다, 여기에서는 \"step\" in tf.train.Checkpoint(step=...).\n위의 예에서 나온 종속성 그래프는 다음과 같습니다.:\n\noptimizer는 빨간색으로, regular 변수는 파란색으로, optimizer 슬롯 변수는 주황색으로 표시합니다. 다른 nodes는, 예를 들면 tf.train.Checkpoint, 이 검은색임을 나타냅니다.\n슬롯 변수는 optimizer의 일부지만 특정 변수에 대해 생성됩니다. 'm' 위의 엣지는 모멘텀에 해당하며, 아담 optimizer는 각 변수에 대해 추적합니다. 슬롯 변수는 변수와 optimizer가 모두 저장될 경우에만 checkpoint에 저장되며, 따라서 파선 엣지가 됩니다.\ntf.train.Checkpoint로 불러온 restore() 오브젝트 큐는그Checkpoint 개체에서 일치하는 방법이 있습니다. 변수 값 복원을 요청한 복원 작업 대기 행렬로 정리합니다. 예를 들어, 우리는 네트워크와 계층을 통해 그것에 대한 하나의 경로를 재구성함으로서 위에서 정의한 모델에서 커널만 로드할 수 있습니다.",
"to_restore = tf.Variable(tf.zeros([5]))\nprint(to_restore.numpy()) # 모두 0입니다.\nfake_layer = tf.train.Checkpoint(bias=to_restore)\nfake_net = tf.train.Checkpoint(l1=fake_layer)\nnew_root = tf.train.Checkpoint(net=fake_net)\nstatus = new_root.restore(tf.train.latest_checkpoint('./tf_ckpts/'))\nprint(to_restore.numpy()) # 우리는 복구된 변수를 이제 얻었습니다.",
"이 새로운 개체에 대한 의존도 그래프는 우리가 위에 적은 더 큰 checkpoint보다 작은 하위 그래프입니다. 이것은 오직 tf.train.Checkpoint에서 checkpoints 셀때 편향과 저장 카운터만 포함합니다.\n\nrestore() 함수는 선택적으로 확인을 거친 객체의 상태를 반환합니다. 새로 만든 checkpoint에서 우리가 만든 모든 개체가 복원되어 status.assert_existing_objects_match()가 통과합니다.",
"status.assert_existing_objects_matched()",
"checkpoint에는 계층의 커널과 optimizer의 변수를 포함하여 일치하지 않는 많은 개체가 있습니다. status.assert_consumed()는 checkpoint와 프로그램이 정확히 일치할 경우에만 통과하고 여기에 예외를 둘 것입니다.\n복구 지연\n텐서플로우의 Layer 객체는 입력 형상을 이용할 수 있을 때 변수 생성을 첫 번째 호출로 지연시킬 수 있습니다. 예를 들어, 'Dense' 층의 커널의 모양은 계층의 입력과 출력 형태 모두에 따라 달라지기 때문에, 생성자 인수로 필요한 출력 형태는 그 자체로 변수를 만들기에 충분한 정보가 아닙니다. 예를 들어, 'Dense' 층의 커널의 모양은 계층의 입력과 출력 형태 모두에 따라 달라지기 때문에, 생성자 인수로 필요한 출력 형태는 그 자체로 변수를 만들기에 충분한 정보가 아닙니다.\n이 관용구를 지지하려면 tf.train.Checkpoint queues는 일치하는 변수가 없는 것들을 복원합니다.",
"delayed_restore = tf.Variable(tf.zeros([1, 5]))\nprint(delayed_restore.numpy()) # 아직 복원이 안되어 값이 0입니다.\nfake_layer.kernel = delayed_restore\nprint(delayed_restore.numpy()) # 복원되었습니다.",
"checkpoints 수동 검사\ntf.train.list_variables에는 checkpoint 키와 변수 형태가 나열돼있습니다. Checkpoint의 키들은 위에 있는 그래프의 경로입니다.",
"tf.train.list_variables(tf.train.latest_checkpoint('./tf_ckpts/'))",
"목록 및 딕셔너리 추적\nself.l1 = tf.keras.layer.Dense(5),와 같은 직접적인 속성 할당은 목록과 사전적 속성에 할당하면 내용이 추적됩니다.",
"save = tf.train.Checkpoint()\nsave.listed = [tf.Variable(1.)]\nsave.listed.append(tf.Variable(2.))\nsave.mapped = {'one': save.listed[0]}\nsave.mapped['two'] = save.listed[1]\nsave_path = save.save('./tf_list_example')\n\nrestore = tf.train.Checkpoint()\nv2 = tf.Variable(0.)\nassert 0. == v2.numpy() # 아직 복구되지 않았습니다.\nrestore.mapped = {'two': v2}\nrestore.restore(save_path)\nassert 2. == v2.numpy()",
"당신은 래퍼(wrapper) 객체를 목록과 사전에 있음을 알아차릴겁니다. 이러한 래퍼는 기본 데이터 구조의 checkpoint 가능한 버전입니다. 속성 기반 로딩과 마찬가지로, 이러한 래퍼들은 변수의 값이 용기에 추가되는 즉시 복원됩니다.",
"restore.listed = []\nprint(restore.listed) # 리스트래퍼([])\nv1 = tf.Variable(0.)\nrestore.listed.append(v1) # 이전 셀의 restore()에서 v1 복원합니다.\nassert 1. == v1.numpy()",
"f.keras의 하위 클래스에 동일한 추적이 자동으로 적용되고 예를 들어 레이어 목록을 추적하는 데 사용할 수 있는 모델입니다.\nEstimator를 사용하여 객체 기반 checkpoint를 저장하기\nEstimator 가이드를 보십시오.\nEstimators는 기본적으로 이전 섹션에서 설명한 개체 그래프 대신 변수 이름을 가진 체크포인트를 저장합니다. tf.train.Checkpoint는 이름 기반 체크포인트를 사용할 수 있지만, 모델의 일부를 Estimator's model_fn 외부로 이동할 때 변수 이름이 변경될 수 있습니다. 객체 기반 checkpoints를 저장하면 Estimator 내에서 모델을 훈련시킨 후 외부에서 쉽게 사용할 수 있습니다.",
"import tensorflow.compat.v1 as tf_compat\n\ndef model_fn(features, labels, mode):\n net = Net()\n opt = tf.keras.optimizers.Adam(0.1)\n ckpt = tf.train.Checkpoint(step=tf_compat.train.get_global_step(),\n optimizer=opt, net=net)\n with tf.GradientTape() as tape:\n output = net(features['x'])\n loss = tf.reduce_mean(tf.abs(output - features['y']))\n variables = net.trainable_variables\n gradients = tape.gradient(loss, variables)\n return tf.estimator.EstimatorSpec(\n mode,\n loss=loss,\n train_op=tf.group(opt.apply_gradients(zip(gradients, variables)),\n ckpt.step.assign_add(1)),\n # Estimator가 \"ckpt\"를 객체 기반의 꼴로 저장하게 합니다.\n scaffold=tf_compat.train.Scaffold(saver=ckpt))\n\ntf.keras.backend.clear_session()\nest = tf.estimator.Estimator(model_fn, './tf_estimator_example/')\nest.train(toy_dataset, steps=10)",
"tf.train.Checkpoint는 그런 다음 model_dir에서 Estimator의 checkpoints를 로드할 수 있습니다.",
"opt = tf.keras.optimizers.Adam(0.1)\nnet = Net()\nckpt = tf.train.Checkpoint(\n step=tf.Variable(1, dtype=tf.int64), optimizer=opt, net=net)\nckpt.restore(tf.train.latest_checkpoint('./tf_estimator_example/'))\nckpt.step.numpy() # est.train(..., steps=10)부터",
"요약\n텐서프로우 객체는 사용하는 변수의 값을 저장하고 복원할 수 있는 쉬운 자동 메커니즘을 제공합니다."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
vinitsamel/udacitydeeplearning
|
embeddings/Skip-Grams-Solution.ipynb
|
mit
|
[
"Skip-gram word2vec\nIn this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation.\nReadings\nHere are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.\n\nA really good conceptual overview of word2vec from Chris McCormick \nFirst word2vec paper from Mikolov et al.\nNIPS paper with improvements for word2vec also from Mikolov et al.\nAn implementation of word2vec from Thushan Ganegedara\nTensorFlow word2vec tutorial\n\nWord embeddings\nWhen you're dealing with words in text, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The matrix multiplication going into the first hidden layer will have almost all of the resulting values be zero. This a huge waste of computation. \n\nTo solve this problem and greatly increase the efficiency of our networks, we use what are called embeddings. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the \"on\" input unit.\n\nInstead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example \"heart\" is encoded as 958, \"mind\" as 18094. Then to get hidden layer values for \"heart\", you just take the 958th row of the embedding matrix. This process is called an embedding lookup and the number of hidden units is the embedding dimension.\n<img src='assets/tokenize_lookup.png' width=500>\nThere is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix as well.\nEmbeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called Word2Vec uses the embedding layer to find vector representations of words that contain semantic meaning.\nWord2Vec\nThe word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as \"black\", \"white\", and \"red\" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.\n<img src=\"assets/word2vec_architectures.png\" width=\"500\">\nIn this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.\nFirst up, importing packages.",
"import time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport utils",
"Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.",
"from urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport zipfile\n\ndataset_folder_path = 'data'\ndataset_filename = 'text8.zip'\ndataset_name = 'Text8 Dataset'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(dataset_filename):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:\n urlretrieve(\n 'http://mattmahoney.net/dc/text8.zip',\n dataset_filename,\n pbar.hook)\n\nif not isdir(dataset_folder_path):\n with zipfile.ZipFile(dataset_filename) as zip_ref:\n zip_ref.extractall(dataset_folder_path)\n \nwith open('data/text8') as f:\n text = f.read()",
"Preprocessing\nHere I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to <PERIOD>. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.",
"words = utils.preprocess(text)\nprint(words[:30])\n\nprint(\"Total words: {}\".format(len(words)))\nprint(\"Unique words: {}\".format(len(set(words))))",
"And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word (\"the\") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.",
"vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)\nint_words = [vocab_to_int[word] for word in words]",
"Subsampling\nWords that show up often such as \"the\", \"of\", and \"for\" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by \n$$ P(w_i) = 1 - \\sqrt{\\frac{t}{f(w_i)}} $$\nwhere $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.\nI'm going to leave this up to you as an exercise. Check out my solution to see how I did it.\n\nExercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is that probability that a word is discarded. Assign the subsampled data to train_words.",
"from collections import Counter\nimport random\n\nthreshold = 1e-5\nword_counts = Counter(int_words)\ntotal_count = len(int_words)\nfreqs = {word: count/total_count for word, count in word_counts.items()}\np_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts}\ntrain_words = [word for word in int_words if random.random() < (1 - p_drop[word])]",
"Making batches\nNow that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$. \nFrom Mikolov et al.: \n\"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels.\"\n\nExercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.",
"def get_target(words, idx, window_size=5):\n ''' Get a list of words in a window around an index. '''\n \n R = np.random.randint(1, window_size+1)\n start = idx - R if (idx - R) > 0 else 0\n stop = idx + R\n target_words = set(words[start:idx] + words[idx+1:stop+1])\n \n return list(target_words)",
"Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.",
"def get_batches(words, batch_size, window_size=5):\n ''' Create a generator of word batches as a tuple (inputs, targets) '''\n \n n_batches = len(words)//batch_size\n \n # only full batches\n words = words[:n_batches*batch_size]\n \n for idx in range(0, len(words), batch_size):\n x, y = [], []\n batch = words[idx:idx+batch_size]\n for ii in range(len(batch)):\n batch_x = batch[ii]\n batch_y = get_target(batch, ii, window_size)\n y.extend(batch_y)\n x.extend([batch_x]*len(batch_y))\n yield x, y\n ",
"Building the graph\nFrom Chris McCormick's blog, we can see the general structure of our network.\n\nThe input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.\nThe idea here is to train the hidden layer weight matrix to find efficient representations for our words. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.\nI'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal.\n\nExercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1.",
"train_graph = tf.Graph()\nwith train_graph.as_default():\n inputs = tf.placeholder(tf.int32, [None], name='inputs')\n labels = tf.placeholder(tf.int32, [None, None], name='labels')",
"Embedding\nThe embedding matrix has a size of the number of words by the number of units in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \\times 300$. Remember that we're using tokenized data for our inputs, usually as integers, where the number of tokens is the number of words in our vocabulary.\n\nExercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform.",
"n_vocab = len(int_to_vocab)\nn_embedding = 200 # Number of embedding features \nwith train_graph.as_default():\n embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs)",
"Negative sampling\nFor every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called \"negative sampling\". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss.\n\nExercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works.",
"# Number of negative labels to sample\nn_sampled = 100\nwith train_graph.as_default():\n softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))\n softmax_b = tf.Variable(tf.zeros(n_vocab))\n \n # Calculate the loss using negative sampling\n loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, \n labels, embed,\n n_sampled, n_vocab)\n \n cost = tf.reduce_mean(loss)\n optimizer = tf.train.AdamOptimizer().minimize(cost)",
"Validation\nThis code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.",
"with train_graph.as_default():\n ## From Thushan Ganegedara's implementation\n valid_size = 16 # Random set of words to evaluate similarity on.\n valid_window = 100\n # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent \n valid_examples = np.array(random.sample(range(valid_window), valid_size//2))\n valid_examples = np.append(valid_examples, \n random.sample(range(1000,1000+valid_window), valid_size//2))\n\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n \n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))\n normalized_embedding = embedding / norm\n valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)\n similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))\n\n# If the checkpoints directory doesn't exist:\n!mkdir checkpoints\n\nepochs = 10\nbatch_size = 1000\nwindow_size = 10\n\nwith train_graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=train_graph) as sess:\n iteration = 1\n loss = 0\n sess.run(tf.global_variables_initializer())\n\n for e in range(1, epochs+1):\n batches = get_batches(train_words, batch_size, window_size)\n start = time.time()\n for x, y in batches:\n \n feed = {inputs: x,\n labels: np.array(y)[:, None]}\n train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)\n \n loss += train_loss\n \n if iteration % 100 == 0: \n end = time.time()\n print(\"Epoch {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Avg. Training loss: {:.4f}\".format(loss/100),\n \"{:.4f} sec/batch\".format((end-start)/100))\n loss = 0\n start = time.time()\n \n if iteration % 1000 == 0:\n # note that this is expensive (~20% slowdown if computed every 500 steps)\n sim = similarity.eval()\n for i in range(valid_size):\n valid_word = int_to_vocab[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k+1]\n log = 'Nearest to %s:' % valid_word\n for k in range(top_k):\n close_word = int_to_vocab[nearest[k]]\n log = '%s %s,' % (log, close_word)\n print(log)\n \n iteration += 1\n save_path = saver.save(sess, \"checkpoints/text8.ckpt\")\n embed_mat = sess.run(normalized_embedding)",
"Restore the trained network if you need to:",
"with train_graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=train_graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n embed_mat = sess.run(embedding)",
"Visualizing the word vectors\nBelow we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data.",
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\n\nviz_words = 500\ntsne = TSNE()\nembed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])\n\nfig, ax = plt.subplots(figsize=(14, 14))\nfor idx in range(viz_words):\n plt.scatter(*embed_tsne[idx, :], color='steelblue')\n plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
sdpython/pyensae
|
_doc/notebooks/example_corrplot.ipynb
|
mit
|
[
"example of a corrplot\nBiokit proposes nice graphs for correlation: corrplot function in Python but it only works with Python 2.7. I took the code out and put a modified version of in pyensae.",
"%matplotlib inline\n\nimport pyensae\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\nimport pandas\nimport numpy\nletters = \"ABCDEFGHIJKLM\"[0:10]\ndf = pandas.DataFrame(dict(( (k, numpy.random.random(10)+ord(k)-65) for k in letters)))\ndf.head()\n\nfrom pyensae.graphhelper import Corrplot\n\nc = Corrplot(df)\nc.plot(figsize=(12,6))",
"To avoid created another graph container:",
"fig = plt.figure(num=None, facecolor='white', figsize=(12,6))\nax = plt.subplot(1, 1, 1, aspect='equal', facecolor='white')\nc = Corrplot(df)\nc.plot(ax=ax)",
"We compare it with seaborn and this example\nDiscovering structure in heatmap data.",
"import seaborn as sns\n\ncmap = sns.diverging_palette(h_neg=210, h_pos=350, s=90, l=30, as_cmap=True, center=\"light\")\nsns.clustermap(df.corr(), figsize=(10, 10), cmap=cmap)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
davesque/cs231n
|
assignment1/knn.ipynb
|
mit
|
[
"k-Nearest Neighbor (kNN) exercise\nComplete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website.\nThe kNN classifier consists of two stages:\n\nDuring training, the classifier takes the training data and simply remembers it\nDuring testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples\nThe value of k is cross-validated\n\nIn this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code.",
"# Run some setup code for this notebook.\n\nimport random\nimport numpy as np\nfrom cs231n.data_utils import load_CIFAR10\nimport matplotlib.pyplot as plt\n\n# This is a bit of magic to make matplotlib figures appear inline in the notebook\n# rather than in a new window.\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# Some more magic so that the notebook will reload external python modules;\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\n# Load the raw CIFAR-10 data.\ncifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\nX_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n# As a sanity check, we print out the size of the training and test data.\nprint 'Training data shape: ', X_train.shape\nprint 'Training labels shape: ', y_train.shape\nprint 'Test data shape: ', X_test.shape\nprint 'Test labels shape: ', y_test.shape\n\n# Visualize some examples from the dataset.\n# We show a few examples of training images from each class.\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\nnum_classes = len(classes)\nsamples_per_class = 7\nfor y, cls in enumerate(classes):\n idxs = np.flatnonzero(y_train == y)\n idxs = np.random.choice(idxs, samples_per_class, replace=False)\n for i, idx in enumerate(idxs):\n plt_idx = i * num_classes + y + 1\n plt.subplot(samples_per_class, num_classes, plt_idx)\n plt.imshow(X_train[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls)\nplt.show()\n\n# Subsample the data for more efficient code execution in this exercise\nnum_training = 5000\nmask = range(num_training)\nX_train = X_train[mask]\ny_train = y_train[mask]\n\nnum_test = 500\nmask = range(num_test)\nX_test = X_test[mask]\ny_test = y_test[mask]\n\n# Reshape the image data into rows\nX_train = np.reshape(X_train, (X_train.shape[0], -1))\nX_test = np.reshape(X_test, (X_test.shape[0], -1))\nprint X_train.shape, X_test.shape\n\nfrom cs231n.classifiers import KNearestNeighbor\n\n# Create a kNN classifier instance. \n# Remember that training a kNN classifier is a noop: \n# the Classifier simply remembers the data and does no further processing \nclassifier = KNearestNeighbor()\nclassifier.train(X_train, y_train)",
"We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps: \n\nFirst we must compute the distances between all test examples and all train examples. \nGiven these distances, for each test example we find the k nearest examples and have them vote for the label\n\nLets begin with computing the distance matrix between all training and test examples. For example, if there are Ntr training examples and Nte test examples, this stage should result in a Nte x Ntr matrix where each element (i,j) is the distance between the i-th test and j-th train example.\nFirst, open cs231n/classifiers/k_nearest_neighbor.py and implement the function compute_distances_two_loops that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.",
"# Open cs231n/classifiers/k_nearest_neighbor.py and implement\n# compute_distances_two_loops.\n\n# Test your implementation:\ndists = classifier.compute_distances_two_loops(X_test)\nprint dists.shape\n\n# We can visualize the distance matrix: each row is a single test example and\n# its distances to training examples\nplt.imshow(dists, interpolation='none')\nplt.show()",
"Inline Question #1: Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.)\n\nWhat in the data is the cause behind the distinctly bright rows?\nWhat causes the columns?\n\nYour Answer:\nDistinctly bright rows represent images in the test set which are generally different from any image in the training set. Perhaps these test images are very unique in appearance and color palette.\nThe same is true for bright columns, but our terms are flipped. These columns represent training images which are unique in comparison to the test images -- there are no images in the test set that are very similar to them.",
"# Now implement the function predict_labels and run the code below:\n# We use k = 1 (which is Nearest Neighbor).\ny_test_pred = classifier.predict_labels(dists, k=1)\n\n# Compute and print the fraction of correctly predicted examples\nnum_correct = np.sum(y_test_pred == y_test)\naccuracy = float(num_correct) / num_test\nprint 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)",
"You should expect to see approximately 27% accuracy. Now lets try out a larger k, say k = 5:",
"y_test_pred = classifier.predict_labels(dists, k=5)\nnum_correct = np.sum(y_test_pred == y_test)\naccuracy = float(num_correct) / num_test\nprint 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)",
"You should expect to see a slightly better performance than with k = 1.",
"# Now lets speed up distance matrix computation by using partial vectorization\n# with one loop. Implement the function compute_distances_one_loop and run the\n# code below:\ndists_one = classifier.compute_distances_one_loop(X_test)\n\n# To ensure that our vectorized implementation is correct, we make sure that it\n# agrees with the naive implementation. There are many ways to decide whether\n# two matrices are similar; one of the simplest is the Frobenius norm. In case\n# you haven't seen it before, the Frobenius norm of two matrices is the square\n# root of the squared sum of differences of all elements; in other words, reshape\n# the matrices into vectors and compute the Euclidean distance between them.\ndifference = np.linalg.norm(dists - dists_one, ord='fro')\nprint 'Difference was: %f' % (difference, )\nif difference < 0.001:\n print 'Good! The distance matrices are the same'\nelse:\n print 'Uh-oh! The distance matrices are different'\n\n# Now implement the fully vectorized version inside compute_distances_no_loops\n# and run the code\ndists_two = classifier.compute_distances_no_loops(X_test)\n\n# check that the distance matrix agrees with the one we computed before:\ndifference = np.linalg.norm(dists - dists_two, ord='fro')\nprint 'Difference was: %f' % (difference, )\nif difference < 0.001:\n print 'Good! The distance matrices are the same'\nelse:\n print 'Uh-oh! The distance matrices are different'\n\n# Let's compare how fast the implementations are\ndef time_function(f, *args):\n \"\"\"\n Call a function f with args and return the time (in seconds) that it took to execute.\n \"\"\"\n import time\n tic = time.time()\n f(*args)\n toc = time.time()\n return toc - tic\n\ntwo_loop_time = time_function(classifier.compute_distances_two_loops, X_test)\nprint 'Two loop version took %f seconds' % two_loop_time\n\none_loop_time = time_function(classifier.compute_distances_one_loop, X_test)\nprint 'One loop version took %f seconds' % one_loop_time\n\nno_loop_time = time_function(classifier.compute_distances_no_loops, X_test)\nprint 'No loop version took %f seconds' % no_loop_time\n\n# you should see significantly faster performance with the fully vectorized implementation",
"Cross-validation\nWe have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation.",
"num_folds = 5\nk_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]\n\n################################################################################\n# TODO: #\n# Split up the training data into folds. After splitting, X_train_folds and #\n# y_train_folds should each be lists of length num_folds, where #\n# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #\n# Hint: Look up the numpy array_split function. #\n################################################################################\nindices = np.arange(5000)\nnp.random.shuffle(indices)\nfold_indices = np.array_split(indices, num_folds)\n\nX_train_folds = [X_train[i] for i in fold_indices]\ny_train_folds = [y_train[i] for i in fold_indices]\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\n# A dictionary holding the accuracies for different values of k that we find\n# when running cross-validation. After running cross-validation,\n# k_to_accuracies[k] should be a list of length num_folds giving the different\n# accuracy values that we found when using that value of k.\nfrom collections import defaultdict\nk_to_accuracies = defaultdict(list)\n\n################################################################################\n# TODO: #\n# Perform k-fold cross validation to find the best value of k. For each #\n# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #\n# where in each case you use all but one of the folds as training data and the #\n# last fold as a validation set. Store the accuracies for all fold and all #\n# values of k in the k_to_accuracies dictionary. #\n################################################################################\nfor f in range(num_folds):\n X_train_f = np.concatenate(X_train_folds[:f] + X_train_folds[f + 1:])\n y_train_f = np.concatenate(y_train_folds[:f] + y_train_folds[f + 1:])\n \n X_validate_f = X_train_folds[f]\n y_validate_f = y_train_folds[f]\n\n classifier = KNearestNeighbor()\n classifier.train(X_train_f, y_train_f)\n \n for k in k_choices:\n y_validate_pred = classifier.predict(X_validate_f, k=k)\n num_correct = np.sum(y_validate_pred == y_validate_f)\n accuracy = float(num_correct) / y_validate_f.shape[0]\n \n k_to_accuracies[k].append(accuracy)\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\n# Print out the computed accuracies\nfor k in sorted(k_to_accuracies):\n for accuracy in k_to_accuracies[k]:\n print 'k = %d, accuracy = %f' % (k, accuracy)\n\n# plot the raw observations\nfor k in k_choices:\n accuracies = k_to_accuracies[k]\n plt.scatter([k] * len(accuracies), accuracies)\n\n# plot the trend line with error bars that correspond to standard deviation\naccuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])\naccuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])\nplt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)\nplt.title('Cross-validation on k')\nplt.xlabel('k')\nplt.ylabel('Cross-validation accuracy')\nplt.show()\n\nstd_order = accuracies_std.argsort()\n(np.array(k_choices)[std_order], accuracies_std[std_order], std_order)\n\nmean_order = accuracies_mean.argsort()[::-1]\n(np.array(k_choices)[mean_order], accuracies_std[mean_order], mean_order)\n\n# Based on the cross-validation results above, choose the best value for k, \n# retrain the classifier using all the training data, and test it on the test\n# data. You should be able to get above 28% accuracy on the test data.\nbest_k = 5\n\nclassifier = KNearestNeighbor()\nclassifier.train(X_train, y_train)\ny_test_pred = classifier.predict(X_test, k=best_k)\n\n# Compute and display the accuracy\nnum_correct = np.sum(y_test_pred == y_test)\naccuracy = float(num_correct) / num_test\nprint 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mspieg/dynamical-systems
|
.ipynb_checkpoints/Simple_root_finding-checkpoint.ipynb
|
cc0-1.0
|
[
"<table>\n <tr align=left><td><img align=left src=\"./images/CC-BY.png\">\n <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Marc Spiegelman, Based on ipython notebook by Kyle Mandli from his course [Introduction to numerical methods](https://github.com/mandli/intro-numerical-methods)</td>\n</table>",
"%matplotlib inline\nimport numpy\nimport matplotlib.pyplot as plt",
"Root Finding\nGOAL: Find where $f(x) = 0$.\nExample: A simple non-linear problem\nlet $f(x) = x - \\cos(x)$, find values of $x$ where $f(x)=0$.\nBecause $f$ is non-linear it is possible that it has no roots, a finite number of roots or even an infinite number of roots. Thus the first thing one should do is try to visualize $f$ over a range of $x$ to see, qualitatively, whether there are any zero crossings and to identify brackets where $f$ changes sign.\nHere we will introduce our function using an \"inlined-function\" or \"lambda function\" in python",
"f = lambda x: x - numpy.cos(x)",
"which simply replaces any use of f(x) with x - cos(x), for example",
"print 'f(0.)=',f(0.)",
"or let's plot this for $x\\in [-10,10]$",
"x = numpy.linspace(-10.,10., 100)\n\nplt.figure()\nplt.plot(x,f(x),'b')\nplt.hold(True)\nplt.plot(x,numpy.zeros(x.shape),'r--')\nplt.xlabel(\"x\")\nplt.ylabel(\"f(x)\")\nplt.title(\"A plot\")\nplt.show()",
"Which, in this range, has a single root somewhere in the bracket $0<x<5$. The question is how to find it?\nThe \"right answer\"\nThe correct approach is to use (and understand) a good algorithm for bracketed root finding of functions of a single variable of which scipy.optimize provides several. Here we will use the brentq algorithm which is a workhorse for rootfinding as it is guaranteed to find at least one root given a proper bracket (where $f$ changes sign). The method is a generalized secant method so doesn't require derivatives of $f$ and has super-linear convergence (a simple bisection scheme also guarantees a root but has only linear convergence).",
"from scipy.optimize import brentq\n\n# give a bracket [a,b] such that f(a)*f(b) <= 0\na = 0.\nb = 5.\nx0 = brentq(f,a,b)\nprint\nprint \"root x0 = {0}, in bracket [{1},{2}]\".format(x0,a,b)\nprint \"residual f(x0) = {0}\".format(f(x0))\n\nplt.figure()\nplt.plot(x,f(x),'b')\nplt.hold(True)\nplt.plot(x,numpy.zeros(x.shape),'r--')\nplt.plot(x0,f(x0),'go')\nplt.xlabel(\"x\")\nplt.ylabel(\"f(x)\")\nplt.title(\"A root at $x_0={0}$\".format(x0))\nplt.show()",
"Successive Substition (Fixed point iteration)\nA more naive approach is to rewrite $f(x) = 0$ as a fixed point iteration\n$$ x = g(x) $$\nwhere $g(x)$ is another function such that when $x$ satisfies this equation, it is a root of $f$. For example here we could choose $g(x) = \\cos(x)$. \nWe can turn this equation into an iterative method by setting $x_0 =0.$ and forming a sequence of numbers\n$$ x_n = g(x_{n-1})$$\nand hope it converges. Algorithmically we could do something like",
"g = lambda x: numpy.cos(x) \n\nxn = numpy.zeros(21)\nfor i in xrange(len(xn)-1):\n print \"step {0}: x = {1}, residual f(x) = {2}\".format(i,xn[i], f(xn[i]))\n xn[i+1] = g(xn[i])",
"and plot out the residual.",
"plt.figure()\nplt.plot(range(len(xn)),f(xn),'b-o')\nplt.xlabel('Iterations')\nplt.ylabel('Residual $f(x)$')\nplt.title('Convergence of fixed point iteration)')\nplt.show()",
"which oscillates towards the value of the root. If we look at the absolute value of the error, we see that it converges linearly i.e.\n$$ |e_{n+1}| = K|e_{n}| $$\nWhere $K$ is a value that analysis shows should be $K=|g'(x^)|$ where $x^$ is the root. For our problem $g'(x) = -\\sin(x)$ and $K=0.673612029183$. Because $K<1$ the fixed point iteration is a \"contraction\" and the error eventually $\\rightarrow 0$ as $n\\rightarrow\\infty$. We demonstrated that this works for this problem graphically",
"plt.figure()\nplt.semilogy(range(len(xn)),numpy.abs(f(xn)),'b-o')\nplt.xlabel('Iterations')\nplt.ylabel('Residual $|f(x)|$')\nplt.title('Convergence of fixed point iteration')\nplt.show()",
"and numerically by comparing the ratio of $|f(x_{n+1})|/|f(x_n)|$",
"print\nfor i in range(len(xn)-1):\n print 'Step = {0}, K={1}'.format((i+1),numpy.abs(f(xn[i+1]))/numpy.abs(f(xn[i])))\n \ngprime = lambda x: -numpy.sin(x)\nprint\nprint \"|g'(x0)| = {0}\".format(numpy.abs(gprime(x0)))",
"Newton's Method\nA potentially more efficient method for Non-linear problems is Newton's method which can be considered another fixed-point iteration but promises much better convergence (near the fixed point, for simple roots). \nThe basic idea is that given some initial guess $x_n$ such that $f(x_n) \\neq 0$, there is some correction $\\delta_n$ such that $f(x_n + \\delta_n) = 0$. Expanding $f$ in a Taylor series around $x_n$ we get the linear approximation\n$$ f(x_n + \\delta_n) \\approx f(x_n) + f'(x_n)\\delta_n + O(\\delta_n^2) = 0$$\nneglecting terms of order $\\delta_n^2$ we can solve for the correction that would be exact if the problem were linear, i.e.\n$$ \\delta_n = -f(x_n)/f'(x_n) $$\nthen the next iterate is given by \n$$ x_{n+1} = x_{n} + \\delta_n $$\nand iterate until the residual $|f(x)| < \\mathrm{tol}$ for some tolerance.\nAlgorithmically...",
"fprime = lambda x: 1. + numpy.sin(x)\n\nxnn = numpy.zeros(10)\nprint \"\\nNewton's Method\\n\"\ni = 0\ntol = 1.e-16\nwhile numpy.abs(f(xnn[i])) > tol:\n print \"step {0}: x = {1}, residual f(x) = {2}\".format(i,xnn[i], f(xnn[i]))\n xnn[i+1] = xnn[i] - f(xnn[i])/fprime(xnn[i])\n i += 1\n \nimax = i\nxnn = xnn[:imax]",
"Analysis shows that near a simple root, Newton's method converges quadratically i.e.\n$$|e_{n+1}| = C |e_n|^2$$\nthus doubling the number of significant digits per iteration. This analysis is only valid near the root and in general, Newton's method can be highly unstable (for example if it finds a region where $f'(x)$ is close to zero), and in general requires some additional controls to maintain a bracket.\nComparing the two methods for this problem, however, shows that Newton's method converges quadratically, while the fixed point iteration converges linearly",
"plt.figure()\nplt.semilogy(range(len(xn)),numpy.abs(f(xn)),'b-o',label='fixed point')\nplt.hold(True)\nplt.semilogy(range(len(xnn)),numpy.abs(f(xnn)),'r-o',label='newton')\nplt.xlabel('Iterations')\nplt.ylabel('Residual $|f(x)|$')\nplt.legend(loc='best')\nplt.title('Comparison of Fixed point iteration to Newtons Method')\nplt.show()",
"Analysis:\nNeither Fixed Point iteration or Newton's method are guaranteed to converge even given a bracket and more sophisticated methods are needed for convergence and robustness (and thus the recommendation of a proper root finder like brentq from scipy). For further analysis of the convergence of fixed-point iterations and Newton's iteration as well as a host of other root finding algorithm's see Kyle Mandli's notes\n Introduction to numerical methods, in particular lecture 05_root_finding_optimization.ipynb"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
science-of-imagination/nengo-buffer
|
Project/mental_translation_training.ipynb
|
gpl-3.0
|
[
"Training Ensemble on MNIST Dataset\n\nOn the function points branch of nengo\nOn the vision branch of nengo_extras",
"import matplotlib.pyplot as plt\n%matplotlib inline\nimport nengo\nimport numpy as np\nimport scipy.ndimage\nimport matplotlib.animation as animation\nfrom matplotlib import pylab\nfrom PIL import Image\nimport nengo.spa as spa\nimport cPickle\nimport random\n\nfrom nengo_extras.data import load_mnist\nfrom nengo_extras.vision import Gabor, Mask",
"Represent each number using a one-hot where the index of the one represents the digit value",
"#Encode categorical integer features using a one-hot aka one-of-K scheme.\ndef one_hot(labels, c=None):\n assert labels.ndim == 1\n n = labels.shape[0]\n c = len(np.unique(labels)) if c is None else c\n y = np.zeros((n, c))\n y[np.arange(n), labels] = 1\n return y",
"Load the MNIST training and testing images",
"# --- load the data\nimg_rows, img_cols = 28, 28\n\n(X_train, y_train), (X_test, y_test) = load_mnist()\n\nX_train = 2 * X_train - 1 # normalize to -1 to 1\nX_test = 2 * X_test - 1 # normalize to -1 to 1\n\ntrain_targets = one_hot(y_train, 10)\ntest_targets = one_hot(y_test, 10)",
"The Network\n\nThe network parameters must be the same here as when the weight matrices are used later on\nThe network is made up of an ensemble and two nodes\nThe first connection ( to v) computes the weights from the activities of the images to the images themselves\nThe second connection (to v2) computes the weights from the activities of the images to the labels",
"rng = np.random.RandomState(9)\n\n# --- set up network parameters\n#Want to encode and decode the image\nn_vis = X_train.shape[1]\nn_out = X_train.shape[1]\n#number of neurons/dimensions of semantic pointer\nn_hid = 1000 #Try with more neurons for more accuracy\n\n\n#Want the encoding/decoding done on the training images\nens_params = dict(\n eval_points=X_train,\n neuron_type=nengo.LIF(), #Why not use LIF? originally used LIFRate()\n intercepts=nengo.dists.Choice([-0.5]),\n max_rates=nengo.dists.Choice([100]),\n )\n\n\n#Least-squares solver with L2 regularization.\nsolver = nengo.solvers.LstsqL2(reg=0.01)\n#solver = nengo.solvers.LstsqL2(reg=0.0001)\nsolver2 = nengo.solvers.LstsqL2(reg=0.01)\n\n#network that generates the weight matrices between neuron activity and images and the labels\nwith nengo.Network(seed=3) as model:\n a = nengo.Ensemble(n_hid, n_vis, seed=3, **ens_params)\n v = nengo.Node(size_in=n_out)\n conn = nengo.Connection(\n a, v, synapse=None,\n eval_points=X_train, function=X_train,#want the same thing out (identity)\n solver=solver)\n \n v2 = nengo.Node(size_in=train_targets.shape[1])\n conn2 = nengo.Connection(\n a, v2, synapse=None,\n eval_points=X_train, function=train_targets, #Want to get the labels out\n solver=solver2)\n \n\n# linear filter used for edge detection as encoders, more plausible for human visual system\nencoders = Gabor().generate(n_hid, (11, 11), rng=rng)\nencoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True)\n#Set the ensembles encoders to this\na.encoders = encoders\n\n#Check the encoders were correctly made\nplt.imshow(encoders[0].reshape(28, 28), vmin=encoders[0].min(), vmax=encoders[0].max(), cmap='gray')",
"Evaluating the network statically\n\nFunctions for computing representation of the image at different levels of encoding/decoding\nget_outs returns the output of the network\nable to evaluate on many images\nno need to run the simulator",
"#Get the one hot labels for the images\ndef get_outs(sim, images):\n #The activity of the neurons when an image is given as input\n _, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)\n #The activity multiplied by the weight matrix (calculated in the network) to give the one-hot labels\n return np.dot(acts, sim.data[conn2].weights.T)\n\n#Check how many of the labels were produced correctly\n#def get_error(sim, images, labels):\n# return np.argmax(get_outs(sim, images), axis=1) != labels\n\n#Get label of the images\n#def get_labels(sim,images):\n# return np.argmax(get_outs(sim, images), axis=1)\n\n#Get the neuron activity of an image or group of images (this is the semantic pointer in this case)\ndef get_activities(sim, images):\n _, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)\n return acts\n\n#Get the representation of the image after it has gone through the encoders (Gabor filters) but before it is in the neurons\n#This must be computed to create the weight matrix for rotation from neuron activity to this step\n# This allows a recurrent connection to be made from the neurons to themselves later\ndef get_encoder_outputs(sim,images):\n #Pass the images through the encoders\n outs = np.dot(images,sim.data[a].encoders.T) #before the neurons \n return outs",
"Images\nCreate lists of training and testing images\n- Original images at random translations\n- Those images transalted a fixed amount for each cardinal direction\n- Images not used for training, but later for testing",
"dim =28\n\n#Shift an image\ndef translate(img,x,y):\n newImg = scipy.ndimage.interpolation.shift(np.reshape(img, (dim,dim), 'F'),(x,y), cval=-1)\n return newImg.T.ravel()\n\n\nexample = X_train[1].copy()\n\nplt.subplot(121)\nplt.imshow(np.reshape(example,(dim,dim)),cmap=\"gray\")\nplt.subplot(122)\nplt.imshow(np.reshape(translate(example,4,0),(dim,dim)),cmap=\"gray\")\n\n#Images to train, starting at random translation\norig_imgs = X_train[:100000].copy()\nfor img in orig_imgs:\n img[:] = translate(img,random.randint(-4,4),random.randint(-4,4))\n\n\n#Images translated up a fixed amount from the original random translation\ntranslate_up_imgs = orig_imgs.copy()\nfor img in translate_up_imgs:\n img[:] = translate(img,0,-1)\n \n#Images translated down a fixed amount from the original random translation\ntranslate_down_imgs = orig_imgs.copy()\nfor img in translate_down_imgs:\n img[:] = translate(img,0,1)\n\n#Images translated right a fixed amount from the original random translation\ntranslate_right_imgs = orig_imgs.copy()\nfor img in translate_right_imgs:\n img[:] = translate(img,1,0)\n \n#Images translated left a fixed amount from the original random translation\ntranslate_left_imgs = orig_imgs.copy()\nfor img in translate_left_imgs:\n img[:] = translate(img,-1,0)\n\n\n#Images not used for training, but for testing (all at random translations)\ntest_imgs = X_test[:1000].copy()\nfor img in test_imgs:\n img[:] = translate(img,random.randint(-4,4),random.randint(-4,4))\n\n#Check to make sure images were generated correctly\nplt.subplot(151)\nplt.imshow(np.reshape(orig_imgs[1],(dim,dim)), cmap='gray')\nplt.subplot(152)\nplt.imshow(np.reshape(translate_up_imgs[1],(dim,dim)), cmap='gray')\nplt.subplot(153)\nplt.imshow(np.reshape(translate_down_imgs[1],(dim,dim)), cmap='gray')\nplt.subplot(154)\nplt.imshow(np.reshape(translate_left_imgs[1],(dim,dim)), cmap='gray')\nplt.subplot(155)\nplt.imshow(np.reshape(translate_right_imgs[1],(dim,dim)), cmap='gray')",
"Simulator\n\nCalculate the neuron activities of each set of images\nCalculate the labels of the original images\nCalculate the rotated images after they have gone through the encoders, but before they are in neuron activity\nGenerate the weight matrices between \noriginal activities and rotated activities\nlabels and image activities\noriginal activities and rotated images through the encoders",
"with nengo.Simulator(model) as sim: \n \n #Neuron activities of different mnist images\n #The semantic pointers\n orig_acts = get_activities(sim,orig_imgs)\n translate_up_acts = get_activities(sim,translate_up_imgs)\n translate_down_acts = get_activities(sim,translate_down_imgs)\n translate_left_acts = get_activities(sim,translate_left_imgs)\n translate_right_acts = get_activities(sim,translate_right_imgs)\n test_acts = get_activities(sim,test_imgs)\n \n X_test_acts = get_activities(sim,X_test)\n labels_out = get_outs(sim,X_test)\n \n\n translate_up_after_encoders = get_encoder_outputs(sim,translate_up_imgs)\n translate_down_after_encoders = get_encoder_outputs(sim,translate_down_imgs)\n translate_left_after_encoders = get_encoder_outputs(sim,translate_left_imgs)\n translate_right_after_encoders = get_encoder_outputs(sim,translate_right_imgs)\n \n \n #solvers for a learning rule\n solver_translate_up = nengo.solvers.LstsqL2(reg=1e-8)\n solver_translate_down = nengo.solvers.LstsqL2(reg=1e-8)\n solver_translate_left = nengo.solvers.LstsqL2(reg=1e-8)\n solver_translate_right = nengo.solvers.LstsqL2(reg=1e-8)\n solver_word = nengo.solvers.LstsqL2(reg=1e-8)\n solver_translate_up_encoder = nengo.solvers.LstsqL2(reg=1e-8)\n solver_translate_down_encoder = nengo.solvers.LstsqL2(reg=1e-8)\n solver_translate_left_encoder = nengo.solvers.LstsqL2(reg=1e-8)\n solver_translate_right_encoder = nengo.solvers.LstsqL2(reg=1e-8)\n \n \n #find weight matrix between neuron activity of the original image and the translated image\n #weights returns a tuple including information about learning process, just want the weight matrix\n translate_up_weights,_ = solver_translate_up(orig_acts, translate_up_acts)\n translate_down_weights,_ = solver_translate_down(orig_acts, translate_down_acts)\n translate_left_weights,_ = solver_translate_left(orig_acts, translate_left_acts)\n translate_right_weights,_ = solver_translate_right(orig_acts, translate_right_acts)\n \n \n #find weight matrix between labels and neuron activity\n label_weights,_ = solver_word(labels_out,X_test_acts)\n \n \n translate_up_after_encoder_weights,_ = solver_translate_up_encoder(orig_acts,translate_up_after_encoders)\n translate_down_after_encoder_weights,_ = solver_translate_down_encoder(orig_acts,translate_down_after_encoders)\n translate_left_after_encoder_weights,_ = solver_translate_left_encoder(orig_acts,translate_left_after_encoders)\n translate_right_after_encoder_weights,_ = solver_translate_right_encoder(orig_acts,translate_right_after_encoders)\n\n \n \n ",
"Saving weight matrices",
"#filename = \"label_weights\" + str(n_hid) +\".p\"\n#cPickle.dump(label_weights, open( filename, \"wb\" ) )\n\nfilename = \"activity_to_img_weights_translate\" + str(n_hid) +\".p\"\ncPickle.dump(sim.data[conn].weights.T, open( filename, \"wb\" ) )\n\nfilename = \"translate_up_weights\" + str(n_hid) +\".p\"\ncPickle.dump(translate_up_weights, open( filename, \"wb\" ) )\nfilename = \"trnaslate_down_weights\" + str(n_hid) +\".p\"\ncPickle.dump(translate_down_weights, open( filename, \"wb\" ) )\nfilename = \"translate_left_weights\" + str(n_hid) +\".p\"\ncPickle.dump(translate_left_weights, open( filename, \"wb\" ) )\nfilename = \"translate_right_weights\" + str(n_hid) +\".p\"\ncPickle.dump(translate_right_weights, open( filename, \"wb\" ) )\n\nfilename = \"translate_up_after_encoder_weights\" + str(n_hid) +\".p\"\ncPickle.dump(translate_up_after_encoder_weights, open( filename, \"wb\" ) )\nfilename = \"translate_down_after_encoder_weights\" + str(n_hid) +\".p\"\ncPickle.dump(translate_down_after_encoder_weights, open( filename, \"wb\" ) )\nfilename = \"translate_left_after_encoder_weights\" + str(n_hid) +\".p\"\ncPickle.dump(translate_left_after_encoder_weights, open( filename, \"wb\" ) )\nfilename = \"translate_right_after_encoder_weights\" + str(n_hid) +\".p\"\ncPickle.dump(translate_right_after_encoder_weights, open( filename, \"wb\" ) )"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
GoogleCloudPlatform/dialogflow-cx-bot-language-translation
|
CX-Bot-Translate__MAIN.ipynb
|
apache-2.0
|
[
"Copyright 2021 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nDialogflow CX Bot Language Translation (MAIN)\nPre-requsite: YOU have read the README.md\n\nInitial setup and initializations\nRun the sub-notebooks as required with editable cells to modify the standard flow\n\nThe standard flow:\n1. Setup blank Google Sheets\n2. Query CX Agent for existing configuration (Default and Supported Languages)\n3. Scan and Write CX Agent components that contains text into respective tabs in Google Sheets:\n - Intents, \n - Entity Types\n - Flows\n - Pages\n - Route Groups\n4. Cloud Translate all texts from Default Language to Target (Supported) Language/s as defined in the CX Agent\n5. Apply translated text back into CX Agent\nPROVIDE Required Information in the cell below:",
"# (MANDATORY) Provide the full URL to target Sheets and CX Agent\nGoogle_Sheets_URL = 'https://docs.google.com/spreadsheets/.........'\nCX_Agent_URL = 'https://dialogflow.cloud.google.com/cx/projects/.........'\n\n# To access Google Sheets and Dialogflow CX, authorization is required.\n# If you have NOT setup Google Sheets OAuth on this system (\"credentials.json\" and/or \"token.json\"), \n# you have to explicitly specify a Sheets Service Account Key JSON file\n#\n# If you have NOT setup Google Cloud SDK OAuth to the CX Agent, you will need to specify a Dialogflow CX Service Account Key JSON file\n# If filename strings are empty, we assume the Google Cloud SDK OAuth & Google Sheets OAuth tokens are already setup\n\nGSheets_JSON = '' #path/filename to JSON (optional)\nCX_Agent_JSON = '' #path/filename to JSON (optional)\n\n# (MANDATORY) GCP Project name with Cloud Translate API enabled\nCloud_Translate_Project_ID = 'my-cloud-translation-gcp-project.........' \n",
"ipynb env:",
"!python3 -V\n#!python3 -m pip list | wc -l\n#!python3 -m pip list | grep google\n\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:80% !important; }</style>\"))",
"Initial Setup, Verification & Validation\nImports",
"import os\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google.oauth2 import service_account\n\nimport google.cloud.dialogflowcx_v3beta1.services as cx_services\nimport google.cloud.dialogflowcx_v3beta1.types as cx_types\n\nimport pandas as pd\nimport html\nimport json\nimport copy\nimport time\nimport bs4\nfrom pprint import pprint\n\nfrom google.cloud.translate_v3beta1.services.translation_service import TranslationServiceClient\nfrom google.cloud.translate_v3beta1.types.translation_service import TranslateTextRequest, TranslateTextResponse, Translation\nCloud_Translate_Parent = f'projects/{Cloud_Translate_Project_ID}'\ntranslate_client = TranslationServiceClient()",
"Verification",
"# Returns a list of all JSON (*.json) files in current directory \ndef get_json_files():\n json_files = []\n for file in os.listdir('./'):\n if file.endswith('.json'):\n json_files.append(file)\n return json_files\n\ndef get_cx_id(agent_uri, before_str, after_str):\n #strip from \"/\" + after_str\n index = agent_uri.find('/'+after_str)\n cx_id = agent_uri[:index]\n #strip up to before_str + \"/\"\n index = cx_id.find(before_str+'/') + len(before_str+'/')\n cx_id = cx_id[index:]\n #print(f\"def get_cx_id: cx_id=[{cx_id}]\")\n return cx_id\n\n#################################\n## Google Sheets and CX Agent URL\n#################################\n\nif len(Google_Sheets_URL) != 0 and len(CX_Agent_URL) != 0:\n if not Google_Sheets_URL.startswith('https://docs.google.com/spreadsheets'):\n raise Exception(f'Google_Sheets_URL: [{Google_Sheets_URL}] does not look right. Please correct it before running this Notebook.')\n \n keywords = ['https://', 'dialogflow', 'cx/projects', 'locations', 'agents']\n if not all(keyword in CX_Agent_URL for keyword in keywords):\n raise Exception(f'CX_Agent_URL: [{CX_Agent_URL}] does not look right. The keywords {keywords} were not found. Please correct it before running this Notebook.')\n \nelse:\n raise Exception('Please review and provide the URLs for Google_Sheets_URL or CX_Agent_URL (or both) before running this Notebook')\n\n##############\n## OAuth JSONs\n##############\n\n# if both file names are provided, verify existence of file (if not, raise exception)\n# if GSheets JSON (provided or not), check existence\n# if CXAgent JSON (provide), check existence\nprint(f'GSheets_JSON file: \"{GSheets_JSON}\"\\nCX_Agent_JSON file: \"{CX_Agent_JSON}\"\\n')\nprint(f'INFO: JSON files found in directory: {get_json_files()}')\n\nif len(GSheets_JSON) != 0:\n if not (os.path.isfile(GSheets_JSON)):\n raise Exception(f'Exception: GSheets_JSON file \"{GSheets_JSON}\" does NOT exist!')\nelse:\n if not ( os.path.isfile('credentials.json') or os.path.isfile('token.json') ):\n raise Exception(f'Exception: GSheets_JSON file is not specified BUT [credentials.json] and [token.json] files are NOT found!')\n\nif len(CX_Agent_JSON) != 0:\n if not (os.path.isfile(CX_Agent_JSON)):\n raise Exception(f'Exception: CX_Agent_JSON file \"{CX_Agent_JSON}\" does NOT exist!')\nelse:\n print('CX_Agent_JSON file is not specified and we assume Google Cloud SDK OAuth has been setup on this system.')\n",
"Global Variables & Functions",
"Google_Sheets_ID = Google_Sheets_URL[Google_Sheets_URL.find('spreadsheets/d/')+len('spreadsheets/d/'):]\nGoogle_Sheets_ID = Google_Sheets_ID[:Google_Sheets_ID.find('/')]\nprint(f'Google_Sheets_URL: [{Google_Sheets_URL}]')\nprint(f'Google_Sheets_ID: [{Google_Sheets_ID}]')\n\nCX_Agent_Link = CX_Agent_URL[CX_Agent_URL.find('projects'):]\nprint(f'CX_Agent_URL: [{CX_Agent_URL}]')\nprint(f'CX_Agent_Link: [{CX_Agent_Link}]')\n\nCX_Project_ID = get_cx_id(CX_Agent_Link,'projects','locations')\nCX_Location_ID = get_cx_id(CX_Agent_Link,'locations','agents')\nprint(f'CX_Project_ID: [{CX_Project_ID}]')\nprint(f'CX_Location_ID: [{CX_Location_ID}]')\n\nGSheets_Creds = None\nGSheets_Scopes = ['https://www.googleapis.com/auth/spreadsheets']\n\nCX_Creds = None\nCX_Client_Options = None\n\nCX_Agent_Default_Lang = None\nCX_Agent_Supported_Langs = []\nTranslate_Source_Lang = None\n\n__DEBUG = False\n__INFO = True\n\ndef get_cx_credentials():\n global CX_Creds\n global CX_Agent_JSON\n if len(CX_Agent_JSON) == 0:\n return None\n elif CX_Creds is None:\n CX_Creds = service_account.Credentials.from_service_account_file(CX_Agent_JSON)\n return CX_Creds\n\ndef get_cx_client_options():\n global CX_Client_Options\n if CX_Client_Options is None:\n if CX_Location_ID != 'global':\n # Reference: https://cloud.google.com/dialogflow/cx/docs/concept/region#api\n api_endpoint= f'{CX_Location_ID}-dialogflow.googleapis.com:443'\n else:\n api_endpoint= 'dialogflow.googleapis.com:443'\n CX_Client_Options = {'api_endpoint': api_endpoint}\n return CX_Client_Options\n \ndef get_agent():\n agents_client = cx_services.agents.AgentsClient(credentials=get_cx_credentials(), client_options=get_cx_client_options())\n \n agent_request = cx_types.GetAgentRequest()\n agent_request.name = CX_Agent_Link\n response = agents_client.get_agent(agent_request)\n \n return response\n\ndef get_cx_default_lang():\n global CX_Agent_Default_Lang\n if CX_Agent_Default_Lang == None:\n agent = get_agent()\n CX_Agent_Default_Lang = agent.default_language_code\n return CX_Agent_Default_Lang\n \ndef get_cx_supported_langs():\n global CX_Agent_Supported_Langs\n if CX_Agent_Supported_Langs == []:\n agent = get_agent()\n CX_Agent_Supported_Langs = sorted(agent.supported_language_codes)\n return CX_Agent_Supported_Langs\n\ndef get_default_lang_for_translate():\n global Translate_Source_Lang\n if Translate_Source_Lang == None:\n # get_cx_default_lang() and take letters before '-' (if any) for Translate API call\n Translate_Source_Lang = get_cx_default_lang()\n index = Translate_Source_Lang.find('-')\n if index != -1:\n Translate_Source_Lang = Translate_Source_Lang[:index]\n return Translate_Source_Lang \n ",
"Validation",
"##############################################\n### Validation - Check if we can access Sheets\n##############################################\n\nif len(GSheets_JSON) != 0:\n GSheets_Creds = service_account.Credentials.from_service_account_file(GSheets_JSON)\nelse:\n if os.path.exists('token.json'):\n GSheets_Creds = Credentials.from_authorized_user_file('token.json', GSheets_Scopes)\n # If there are no (valid) credentials available, let the user log in.\n if not GSheets_Creds or not GSheets_Creds.valid:\n if GSheets_Creds and GSheets_Creds.expired and GSheets_Creds.refresh_token:\n GSheets_Creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', GSheets_Scopes)\n GSheets_Creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(GSheets_Creds.to_json())\n \ntry:\n service = build('sheets', 'v4', credentials=GSheets_Creds)\n sheets = service.spreadsheets().get(spreadsheetId=Google_Sheets_ID).execute()\n print(f'Validating Access to Sheets: \\nSUCCESS in accessing Google Sheets at ID:[{Google_Sheets_ID}] and URL:[{Google_Sheets_URL}].\\n')\nexcept Exception as e:\n raise Exception(f'Validating Access to Sheets: \\nFAILED to access Google Sheets at ID:[{Google_Sheets_ID}] and URL:[{Google_Sheets_URL}]. See below:\\n{e}\\n')\n \n################################################\n### Validation - Check if we can access CX Agent\n################################################\ntry:\n get_agent()\n print(f'Validating Access to CX Agent: \\nSUCCESS in accessing CX Agent at LINK:[{CX_Agent_Link}] and URL:[{CX_Agent_URL}].\\n')\nexcept Exception as e:\n raise Exception(f'Validating Access to CX Agent: \\nFAILED to access CX Agent at LINK:[{CX_Agent_Link}] and URL:[{CX_Agent_URL}]. See below:\\n{e}\\n')\n",
"1. Run / Execute below:\nRun the [Sheets] Notebook and Setup Sheets\nSelect the cell below, from the Menu -> Cell -> \"Run All Above\"\nOnly continue to run the cells below if all the cells above ran without errors or exceptions.",
"%run CX-Bot-Translate_Sheets.ipynb\n\n## initializes the Sheet (if empty)\n## otherwise, do nothing\ninit_format_sheets()\n",
"Run the [Agent] Notebook and Update Sheets",
"%run CX-Bot-Translate_Agent.ipynb\n\ntry:\n write_intents_to_sheets()\n write_entities_to_sheets()\n write_flows_to_sheets()\n write_pages_to_sheets() \n write_route_groups_to_sheets()\nexcept Exception as e:\n print(f'Exception:\\n{e}')",
"Run the [Translation] Notebook and Translate Agent in Sheets",
"%run CX-Bot-Translate_Translation.ipynb\n\n## Feel free to comment out CX components that does not need translation\ntry:\n translate_cx(SheetsName.Training_Phrases)\n translate_cx(SheetsName.Entities)\n translate_cx(SheetsName.Flows)\n translate_cx(SheetsName.Pages)\n translate_cx(SheetsName.Route_Groups)\nexcept Exception as e:\n print(f'Exception:\\n{e}')",
"Update Translations in Sheets to Agent\nRun the cells below when you are satisfied with translations in Sheets and you are ready to apply it to Dialogflow CX",
"## Feel free to comment out CX components that does not require updates from Sheets back to CX\ntry:\n update_all_intents()\n# update_intents_by_lang('Chinese - Simplified')\n update_all_entities()\n# update_entities_by_lang('Tamil')\n update_all_flows()\n# update_flows_by_lang('Tamil')\n update_all_pages()\n# update_pages_by_lang('Chinese - Simplified')\n update_all_route_groups()\n# update_route_groups_by_lang('Tamil')\nexcept Exception as e:\n print(f'Exception:\\n{e}')",
"END"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ShubhamDebnath/Coursera-Machine-Learning
|
Course 4/Convolution model Step by Step v2.ipynb
|
mit
|
[
"Convolutional Neural Networks: Step by Step\nWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. \nNotation:\n- Superscript $[l]$ denotes an object of the $l^{th}$ layer. \n - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.\n\n\nSuperscript $(i)$ denotes an object from the $i^{th}$ example. \n\nExample: $x^{(i)}$ is the $i^{th}$ training example input.\n\n\n\nLowerscript $i$ denotes the $i^{th}$ entry of a vector.\n\nExample: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.\n\n\n\n$n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. \n\n$n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. \n\nWe assume that you are already familiar with numpy and/or have completed the previous courses of the specialization. Let's get started!\n1 - Packages\nLet's first import all the packages that you will need during this assignment. \n- numpy is the fundamental package for scientific computing with Python.\n- matplotlib is a library to plot graphs in Python.\n- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.",
"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)",
"2 - Outline of the Assignment\nYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:\n\nConvolution functions, including:\nZero Padding\nConvolve window \nConvolution forward\nConvolution backward (optional)\n\n\nPooling functions, including:\nPooling forward\nCreate mask \nDistribute value\nPooling backward (optional)\n\n\n\nThis notebook will ask you to implement these functions from scratch in numpy. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:\n<img src=\"images/model.png\" style=\"width:800px;height:300px;\">\nNote that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. \n3 - Convolutional Neural Networks\nAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. \n<img src=\"images/conv_nn.png\" style=\"width:350px;height:200px;\">\nIn this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. \n3.1 - Zero-Padding\nZero-padding adds zeros around the border of an image:\n<img src=\"images/PAD.png\" style=\"width:600px;height:400px;\">\n<caption><center> <u> <font color='purple'> Figure 1 </u><font color='purple'> : Zero-Padding<br> Image (3 channels, RGB) with a padding of 2. </center></caption>\nThe main benefits of padding are the following:\n\n\nIt allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the \"same\" convolution, in which the height/width is exactly preserved after one layer. \n\n\nIt helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.\n\n\nExercise: Implement the following function, which pads all the images of a batch of examples X with zeros. Use np.pad. Note if you want to pad the array \"a\" of shape $(5,5,5,5,5)$ with pad = 1 for the 2nd dimension, pad = 3 for the 4th dimension and pad = 0 for the rest, you would do:\npython\na = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))",
"# GRADED FUNCTION: zero_pad\n\ndef zero_pad(X, pad):\n \"\"\"\n Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, \n as illustrated in Figure 1.\n \n Argument:\n X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images\n pad -- integer, amount of padding around each image on vertical and horizontal dimensions\n \n Returns:\n X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line)\n X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0) ), 'constant', constant_values = 0)\n ### END CODE HERE ###\n \n return X_pad\n\nnp.random.seed(1)\nx = np.random.randn(4, 3, 3, 2)\nx_pad = zero_pad(x, 2)\nprint (\"x.shape =\", x.shape)\nprint (\"x_pad.shape =\", x_pad.shape)\nprint (\"x[1,1] =\", x[1,1])\nprint (\"x_pad[1,1] =\", x_pad[1,1])\n\nfig, axarr = plt.subplots(1, 2)\naxarr[0].set_title('x')\naxarr[0].imshow(x[0,:,:,0])\naxarr[1].set_title('x_pad')\naxarr[1].imshow(x_pad[0,:,:,0])",
"Expected Output:\n<table>\n <tr>\n <td>\n **x.shape**:\n </td>\n <td>\n (4, 3, 3, 2)\n </td>\n </tr>\n <tr>\n <td>\n **x_pad.shape**:\n </td>\n <td>\n (4, 7, 7, 2)\n </td>\n </tr>\n <tr>\n <td>\n **x[1,1]**:\n </td>\n <td>\n [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\n </td>\n </tr>\n <tr>\n <td>\n **x_pad[1,1]**:\n </td>\n <td>\n [[ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]]\n </td>\n </tr>\n\n</table>\n\n3.2 - Single step of convolution\nIn this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: \n\nTakes an input volume \nApplies a filter at every position of the input\nOutputs another volume (usually of different size)\n\n<img src=\"images/Convolution_schematic.gif\" style=\"width:500px;height:300px;\">\n<caption><center> <u> <font color='purple'> Figure 2 </u><font color='purple'> : Convolution operation<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>\nIn a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. \nLater in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. \nExercise: Implement conv_single_step(). Hint.",
"# GRADED FUNCTION: conv_single_step\n\ndef conv_single_step(a_slice_prev, W, b):\n \"\"\"\n Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation \n of the previous layer.\n \n Arguments:\n a_slice_prev -- slice of input data of shape (f, f, n_C_prev)\n W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)\n b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)\n \n Returns:\n Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data\n \"\"\"\n\n ### START CODE HERE ### (≈ 2 lines of code)\n # Element-wise product between a_slice and W. Do not add the bias yet.\n s = np.multiply(a_slice_prev, W)\n # Sum over all entries of the volume s.\n Z = np.sum(s)\n # Add bias b to Z. Cast b to a float() so that Z results in a scalar value.\n Z = Z + float(b)\n ### END CODE HERE ###\n\n return Z\n\nnp.random.seed(1)\na_slice_prev = np.random.randn(4, 4, 3)\nW = np.random.randn(4, 4, 3)\nb = np.random.randn(1, 1, 1)\n\nZ = conv_single_step(a_slice_prev, W, b)\nprint(\"Z =\", Z)",
"Expected Output:\n<table>\n <tr>\n <td>\n **Z**\n </td>\n <td>\n -6.99908945068\n </td>\n </tr>\n\n</table>\n\n3.3 - Convolutional Neural Networks - Forward pass\nIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: \n<center>\n<video width=\"620\" height=\"440\" src=\"images/conv_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\nExercise: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. \nHint: \n1. To select a 2x2 slice at the upper left corner of a matrix \"a_prev\" (shape (5,5,3)), you would do:\npython\na_slice_prev = a_prev[0:2,0:2,:]\nThis will be useful when you will define a_slice_prev below, using the start/end indexes you will define.\n2. To define a_slice you will need to first define its corners vert_start, vert_end, horiz_start and horiz_end. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.\n<img src=\"images/vert_horiz_kiank.png\" style=\"width:400px;height:300px;\">\n<caption><center> <u> <font color='purple'> Figure 3 </u><font color='purple'> : Definition of a slice using vertical and horizontal start/end (with a 2x2 filter) <br> This figure shows only a single channel. </center></caption>\nReminder:\nThe formulas relating the output shape of the convolution to the input shape is:\n$$ n_H = \\lfloor \\frac{n_{H_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n$$ n_W = \\lfloor \\frac{n_{W_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n$$ n_C = \\text{number of filters used in the convolution}$$\nFor this exercise, we won't worry about vectorization, and will just implement everything with for-loops.",
"# GRADED FUNCTION: conv_forward\n\ndef conv_forward(A_prev, W, b, hparameters):\n \"\"\"\n Implements the forward propagation for a convolution function\n \n Arguments:\n A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)\n b -- Biases, numpy array of shape (1, 1, 1, n_C)\n hparameters -- python dictionary containing \"stride\" and \"pad\"\n \n Returns:\n Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward() function\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from A_prev's shape (≈1 line) \n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve dimensions from W's shape (≈1 line)\n (f, f, n_C_prev, n_C) = W.shape\n \n # Retrieve information from \"hparameters\" (≈2 lines)\n stride = hparameters['stride']\n pad = hparameters['pad']\n \n # Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)\n n_H = int((n_H_prev + 2*pad - f) /stride + 1)\n n_W = int((n_W_prev + 2*pad - f) /stride + 1)\n \n # Initialize the output volume Z with zeros. (≈1 line)\n Z = np.zeros((m, n_H, n_W, n_C))\n \n # Create A_prev_pad by padding A_prev\n A_prev_pad = zero_pad(A_prev, pad)\n \n for i in range(m): # loop over the batch of training examples\n a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation\n for h in range(n_H): # loop over vertical axis of the output volume\n for w in range(n_W): # loop over horizontal axis of the output volume\n for c in range(n_C): # loop over channels (= #filters) of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = f + vert_start\n horiz_start = w * stride\n horiz_end = f + horiz_start\n \n # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)\n a_slice_prev = a_prev_pad[vert_start: vert_end, horiz_start: horiz_end, :]\n \n # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)\n Z[i, h, w, c] = conv_single_step(a_slice_prev, W[..., c], b[..., c])\n \n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(Z.shape == (m, n_H, n_W, n_C))\n \n # Save information in \"cache\" for the backprop\n cache = (A_prev, W, b, hparameters)\n \n return Z, cache\n\nnp.random.seed(1)\nA_prev = np.random.randn(10,4,4,3)\nW = np.random.randn(2,2,3,8)\nb = np.random.randn(1,1,1,8)\nhparameters = {\"pad\" : 2,\n \"stride\": 2}\n\nZ, cache_conv = conv_forward(A_prev, W, b, hparameters)\nprint(\"Z's mean =\", np.mean(Z))\nprint(\"Z[3,2,1] =\", Z[3,2,1])\nprint(\"cache_conv[0][1][2][3] =\", cache_conv[0][1][2][3])",
"Expected Output:\n<table>\n <tr>\n <td>\n **Z's mean**\n </td>\n <td>\n 0.0489952035289\n </td>\n </tr>\n <tr>\n <td>\n **Z[3,2,1]**\n </td>\n <td>\n [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437\n 5.18531798 8.75898442]\n </td>\n </tr>\n <tr>\n <td>\n **cache_conv[0][1][2][3]**\n </td>\n <td>\n [-0.20075807 0.18656139 0.41005165]\n </td>\n </tr>\n\n</table>\n\nFinally, CONV layer should also contain an activation, in which case we would add the following line of code:\n```python\nConvolve the window to get back one output neuron\nZ[i, h, w, c] = ...\nApply activation\nA[i, h, w, c] = activation(Z[i, h, w, c])\n```\nYou don't need to do it here. \n4 - Pooling layer\nThe pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: \n\n\nMax-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.\n\n\nAverage-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.\n\n\n<table>\n<td>\n<img src=\"images/max_pool1.png\" style=\"width:500px;height:300px;\">\n<td>\n\n<td>\n<img src=\"images/a_pool.png\" style=\"width:500px;height:300px;\">\n<td>\n</table>\n\nThese pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over. \n4.1 - Forward Pooling\nNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. \nExercise: Implement the forward pass of the pooling layer. Follow the hints in the comments below.\nReminder:\nAs there's no padding, the formulas binding the output shape of the pooling to the input shape is:\n$$ n_H = \\lfloor \\frac{n_{H_{prev}} - f}{stride} \\rfloor +1 $$\n$$ n_W = \\lfloor \\frac{n_{W_{prev}} - f}{stride} \\rfloor +1 $$\n$$ n_C = n_{C_{prev}}$$",
"# GRADED FUNCTION: pool_forward\n\ndef pool_forward(A_prev, hparameters, mode = \"max\"):\n \"\"\"\n Implements the forward pass of the pooling layer\n \n Arguments:\n A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n hparameters -- python dictionary containing \"f\" and \"stride\"\n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters \n \"\"\"\n \n # Retrieve dimensions from the input shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve hyperparameters from \"hparameters\"\n f = hparameters[\"f\"]\n stride = hparameters[\"stride\"]\n \n # Define the dimensions of the output\n n_H = int(1 + (n_H_prev - f) / stride)\n n_W = int(1 + (n_W_prev - f) / stride)\n n_C = n_C_prev\n \n # Initialize output matrix A\n A = np.zeros((m, n_H, n_W, n_C)) \n \n ### START CODE HERE ###\n for i in range(m): # loop over the training examples\n for h in range(n_H): # loop on the vertical axis of the output volume\n for w in range(n_W): # loop on the horizontal axis of the output volume\n for c in range (n_C): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = f + vert_start\n horiz_start = w * stride\n horiz_end = f + horiz_start\n \n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\n a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]\n \n # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.\n if mode == \"max\":\n A[i, h, w, c] = np.max(a_prev_slice)\n elif mode == \"average\":\n A[i, h, w, c] = np.mean(a_prev_slice)\n \n ### END CODE HERE ###\n \n # Store the input and hparameters in \"cache\" for pool_backward()\n cache = (A_prev, hparameters)\n \n # Making sure your output shape is correct\n assert(A.shape == (m, n_H, n_W, n_C))\n \n return A, cache\n\nnp.random.seed(1)\nA_prev = np.random.randn(2, 4, 4, 3)\nhparameters = {\"stride\" : 2, \"f\": 3}\n\nA, cache = pool_forward(A_prev, hparameters)\nprint(\"mode = max\")\nprint(\"A =\", A)\nprint()\nA, cache = pool_forward(A_prev, hparameters, mode = \"average\")\nprint(\"mode = average\")\nprint(\"A =\", A)",
"Expected Output:\n<table>\n\n <tr>\n <td>\n A =\n </td>\n <td>\n [[[[ 1.74481176 0.86540763 1.13376944]]]\n\n\n [[[ 1.13162939 1.51981682 2.18557541]]]]\n\n </td>\n </tr>\n <tr>\n <td>\n A =\n </td>\n <td>\n [[[[ 0.02105773 -0.20328806 -0.40389855]]]\n\n\n [[[-0.22154621 0.51716526 0.48155844]]]]\n\n </td>\n </tr>\n\n</table>\n\nCongratulations! You have now implemented the forward passes of all the layers of a convolutional network. \nThe remainer of this notebook is optional, and will not be graded.\n5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)\nIn modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. \nWhen in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.\n5.1 - Convolutional layer backward pass\nLet's start by implementing the backward pass for a CONV layer. \n5.1.1 - Computing dA:\nThis is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:\n$$ dA += \\sum {h=0} ^{n_H} \\sum{w=0} ^{n_W} W_c \\times dZ_{hw} \\tag{1}$$\nWhere $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. \nIn code, inside the appropriate for-loops, this formula translates into:\npython\nda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]\n5.1.2 - Computing dW:\nThis is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:\n$$ dW_c += \\sum {h=0} ^{n_H} \\sum{w=0} ^ {n_W} a_{slice} \\times dZ_{hw} \\tag{2}$$\nWhere $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. \nIn code, inside the appropriate for-loops, this formula translates into:\npython\ndW[:,:,:,c] += a_slice * dZ[i, h, w, c]\n5.1.3 - Computing db:\nThis is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:\n$$ db = \\sum_h \\sum_w dZ_{hw} \\tag{3}$$\nAs you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. \nIn code, inside the appropriate for-loops, this formula translates into:\npython\ndb[:,:,:,c] += dZ[i, h, w, c]\nExercise: Implement the conv_backward function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above.",
"def conv_backward(dZ, cache):\n \"\"\"\n Implement the backward propagation for a convolution function\n \n Arguments:\n dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward(), output of conv_forward()\n \n Returns:\n dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),\n numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n dW -- gradient of the cost with respect to the weights of the conv layer (W)\n numpy array of shape (f, f, n_C_prev, n_C)\n db -- gradient of the cost with respect to the biases of the conv layer (b)\n numpy array of shape (1, 1, 1, n_C)\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve information from \"cache\"\n (A_prev, W, b, hparameters) = cache\n \n # Retrieve dimensions from A_prev's shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve dimensions from W's shape\n (f, f, n_C_prev, n_C) = W.shape\n \n # Retrieve information from \"hparameters\"\n stride = hparameters['stride']\n pad = hparameters['pad']\n \n # Retrieve dimensions from dZ's shape\n (m, n_H, n_W, n_C) = dZ.shape\n \n # Initialize dA_prev, dW, db with the correct shapes\n dA_prev = np.zeros(A_prev.shape) \n dW = np.zeros(W.shape)\n db = np.zeros(b.shape)\n\n # Pad A_prev and dA_prev\n A_prev_pad = zero_pad(A_prev, pad)\n dA_prev_pad = zero_pad(dA_prev, pad)\n \n for i in range(m): # loop over the training examples\n \n # select ith training example from A_prev_pad and dA_prev_pad\n a_prev_pad = A_prev_pad[i]\n da_prev_pad = dA_prev_pad[i]\n \n for h in range(n_H): # loop over vertical axis of the output volume\n for w in range(n_W): # loop over horizontal axis of the output volume\n for c in range(n_C): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\"\n vert_start = h * stride\n vert_end = f + vert_start\n horiz_start = w * stride\n horiz_end = f + horiz_start\n \n # Use the corners to define the slice from a_prev_pad\n a_slice = a_prev_pad[vert_start: vert_end, horiz_start:horiz_end, :]\n\n # Update gradients for the window and the filter's parameters using the code formulas given above\n da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]\n dW[:,:,:,c] += a_slice * dZ[i, h, w, c]\n db[:,:,:,c] += dZ[i, h, w, c]\n \n # Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])\n dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :]\n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))\n \n return dA_prev, dW, db\n\nnp.random.seed(1)\ndA, dW, db = conv_backward(Z, cache_conv)\nprint(\"dA_mean =\", np.mean(dA))\nprint(\"dW_mean =\", np.mean(dW))\nprint(\"db_mean =\", np.mean(db))",
"Expected Output: \n<table>\n <tr>\n <td>\n **dA_mean**\n </td>\n <td>\n 1.45243777754\n </td>\n </tr>\n <tr>\n <td>\n **dW_mean**\n </td>\n <td>\n 1.72699145831\n </td>\n </tr>\n <tr>\n <td>\n **db_mean**\n </td>\n <td>\n 7.83923256462\n </td>\n </tr>\n\n</table>\n\n5.2 Pooling layer - backward pass\nNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. \n5.2.1 Max pooling - backward pass\nBefore jumping into the backpropagation of the pooling layer, you are going to build a helper function called create_mask_from_window() which does the following: \n$$ X = \\begin{bmatrix}\n1 && 3 \\\n4 && 2\n\\end{bmatrix} \\quad \\rightarrow \\quad M =\\begin{bmatrix}\n0 && 0 \\\n1 && 0\n\\end{bmatrix}\\tag{4}$$\nAs you can see, this function creates a \"mask\" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. \nExercise: Implement create_mask_from_window(). This function will be helpful for pooling backward. \nHints:\n- np.max() may be helpful. It computes the maximum of an array.\n- If you have a matrix X and a scalar x: A = (X == x) will return a matrix A of the same size as X such that:\nA[i,j] = True if X[i,j] = x\nA[i,j] = False if X[i,j] != x\n- Here, you don't need to consider cases where there are several maxima in a matrix.",
"def create_mask_from_window(x):\n \"\"\"\n Creates a mask from an input matrix x, to identify the max entry of x.\n \n Arguments:\n x -- Array of shape (f, f)\n \n Returns:\n mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.\n \"\"\"\n \n ### START CODE HERE ### (≈1 line)\n mask = x == np.max(x)\n ### END CODE HERE ###\n \n return mask\n\nnp.random.seed(1)\nx = np.random.randn(2,3)\nmask = create_mask_from_window(x)\nprint('x = ', x)\nprint(\"mask = \", mask)",
"Expected Output: \n<table> \n<tr> \n<td>\n\n**x =**\n</td>\n\n<td>\n\n[[ 1.62434536 -0.61175641 -0.52817175] <br>\n [-1.07296862 0.86540763 -2.3015387 ]]\n\n </td>\n</tr>\n\n<tr> \n<td>\n**mask =**\n</td>\n<td>\n[[ True False False] <br>\n [False False False]]\n</td>\n</tr>\n\n\n</table>\n\nWhy do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will \"propagate\" the gradient back to this particular input value that had influenced the cost. \n5.2.2 - Average pooling - backward pass\nIn max pooling, for each input window, all the \"influence\" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.\nFor example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: \n$$ dZ = 1 \\quad \\rightarrow \\quad dZ =\\begin{bmatrix}\n1/4 && 1/4 \\\n1/4 && 1/4\n\\end{bmatrix}\\tag{5}$$\nThis implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. \nExercise: Implement the function below to equally distribute a value dz through a matrix of dimension shape. Hint",
"def distribute_value(dz, shape):\n \"\"\"\n Distributes the input value in the matrix of dimension shape\n \n Arguments:\n dz -- input scalar\n shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz\n \n Returns:\n a -- Array of size (n_H, n_W) for which we distributed the value of dz\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from shape (≈1 line)\n (n_H, n_W) = shape\n \n # Compute the value to distribute on the matrix (≈1 line)\n average = dz/(n_H * n_W)\n \n # Create a matrix where every entry is the \"average\" value (≈1 line)\n a = np.ones(shape) * average\n ### END CODE HERE ###\n \n return a\n\na = distribute_value(2, (2,2))\nprint('distributed value =', a)",
"Expected Output: \n<table> \n<tr> \n<td>\ndistributed_value =\n</td>\n<td>\n[[ 0.5 0.5]\n<br\\> \n[ 0.5 0.5]]\n</td>\n</tr>\n</table>\n\n5.2.3 Putting it together: Pooling backward\nYou now have everything you need to compute backward propagation on a pooling layer.\nExercise: Implement the pool_backward function in both modes (\"max\" and \"average\"). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an if/elif statement to see if the mode is equal to 'max' or 'average'. If it is equal to 'average' you should use the distribute_value() function you implemented above to create a matrix of the same shape as a_slice. Otherwise, the mode is equal to 'max', and you will create a mask with create_mask_from_window() and multiply it by the corresponding value of dZ.",
"def pool_backward(dA, cache, mode = \"max\"):\n \"\"\"\n Implements the backward pass of the pooling layer\n \n Arguments:\n dA -- gradient of cost with respect to the output of the pooling layer, same shape as A\n cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters \n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev\n \"\"\"\n \n ### START CODE HERE ###\n \n # Retrieve information from cache (≈1 line)\n (A_prev, hparameters) = cache\n \n # Retrieve hyperparameters from \"hparameters\" (≈2 lines)\n stride = hparameters['stride']\n f = hparameters['f']\n \n # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)\n m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape\n m, n_H, n_W, n_C = dA.shape\n \n # Initialize dA_prev with zeros (≈1 line)\n dA_prev = np.zeros(A_prev.shape)\n \n for i in range(m): # loop over the training examples\n \n # select training example from A_prev (≈1 line)\n a_prev = A_prev[i]\n \n for h in range(n_H): # loop on the vertical axis\n for w in range(n_W): # loop on the horizontal axis\n for c in range(n_C): # loop over the channels (depth)\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h\n vert_end = f + vert_start\n horiz_start = w\n horiz_end = f + horiz_start\n \n # Compute the backward propagation in both modes.\n if mode == \"max\":\n \n # Use the corners and \"c\" to define the current slice from a_prev (≈1 line)\n a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]\n # Create the mask from a_prev_slice (≈1 line)\n mask = create_mask_from_window(a_prev_slice)\n # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += np.multiply(mask, dA[i, h, w, c])\n \n elif mode == \"average\":\n \n # Get the value a from dA (≈1 line)\n da = dA[i, h, w, c]\n # Define the shape of the filter as fxf (≈1 line)\n shape = (f,f)\n # Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += distribute_value(da, shape)\n \n ### END CODE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == A_prev.shape)\n \n return dA_prev\n\nnp.random.seed(1)\nA_prev = np.random.randn(5, 5, 3, 2)\nhparameters = {\"stride\" : 1, \"f\": 2}\nA, cache = pool_forward(A_prev, hparameters)\ndA = np.random.randn(5, 4, 2, 2)\n\ndA_prev = pool_backward(dA, cache, mode = \"max\")\nprint(\"mode = max\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) \nprint()\ndA_prev = pool_backward(dA, cache, mode = \"average\")\nprint(\"mode = average\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) ",
"Expected Output: \nmode = max:\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\n**dA_prev[1,1] =** \n</td>\n<td>\n[[ 0. 0. ] <br>\n [ 5.05844394 -1.68282702] <br>\n [ 0. 0. ]]\n</td>\n</tr>\n</table>\n\nmode = average\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\n**dA_prev[1,1] =** \n</td>\n<td>\n[[ 0.08485462 0.2787552 ] <br>\n [ 1.26461098 -0.25749373] <br>\n [ 1.17975636 -0.53624893]]\n</td>\n</tr>\n</table>\n\nCongratulations !\nCongratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
davofis/computational_seismology
|
06_finite_elements/fe_elastic_1d.ipynb
|
gpl-3.0
|
[
"<div style='background-image: url(\"../../share/images/header.svg\") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>\n <div style=\"float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px\">\n <div style=\"position: relative ; top: 50% ; transform: translatey(-50%)\">\n <div style=\"font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%\">Computational Seismology</div>\n <div style=\"font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)\">Finite Element Method - 1D Elastic Wave Equation</div>\n </div>\n </div>\n</div>\n\nSeismo-Live: http://seismo-live.org\nAuthors:\n\nDavid Vargas (@dvargas)\nHeiner Igel (@heinerigel)\n\nBasic Equations\nThis notebook presents a finite element code for the 1D elastic wave equation. Additionally, a solution using finite difference scheme is given for comparison.\nThe problem of solving the wave equation\n\\begin{equation}\n\\rho(x) \\partial_t^2 u(x,t) = \\partial_x (\\mu(x) \\partial_x u(x,t)) + f(x,t)\n\\end{equation}\nusing the finite element method is done after a series of steps performed on the above equation.\n1) We first obtain a weak form of the wave equation by integrating over the entire physical domain $D$ and at the same time multiplying by some basis $\\varphi_{i}$. \n2) Integration by parts and implementation of the stress-free boundary condition is performed.\n3) We approximate our unknown displacement field $u(x, t)$ by a sum over space-dependent basis functions $\\varphi_i$ weighted by time-dependent coefficients $u_i(t)$.\n\\begin{equation}\nu(x,t) \\ \\approx \\ \\overline{u}(x,t) \\ = \\ \\sum_{i=1}^{n} u_i(t) \\ \\varphi_i(x)\n\\end{equation}\n4) Utilize the same basis functions used to expand $u(x, t)$ as test functions in the weak form, this is the Galerkin principle.\n5) We can turn the continuous weak form into a system of linear equations by considering the approximated displacement field.\n\\begin{equation}\n\\mathbf{M}^T\\partial_t^2 \\mathbf{u} + \\mathbf{K}^T\\mathbf{u} = \\mathbf{f}\n\\end{equation}\n6) For the second time-derivative, we use a standard finite-difference approximation. Finally, we arrive at the explicit time extrapolation scheme.\n\\begin{equation}\n\\mathbf{u}(t + dt) = dt^2 (\\mathbf{M}^T)^{-1}[\\mathbf{f} - \\mathbf{K}^T\\mathbf{u}] + 2\\mathbf{u} - \\mathbf{u}(t-dt).\n\\end{equation}\nwhere $\\mathbf{M}$ is known as the mass matrix, and $\\mathbf{K}$ the stiffness matrix.\n7) As interpolating functions, we choose interpolants such that $\\varphi_{i}(x_{i}) = 1$ and zero elsewhere. Then, we transform the space coordinate into a local system. According to $\\xi = x − x_{i}$ and $h_{i} = x_{i+1} − x_{i}$, we have:\n<p style=\"width:35%;float:right;padding-left:50px\">\n<img src=fig_fe_basis_h.png>\n<span style=\"font-size:smaller\">\n</span>\n</p>\n\n\\begin{equation}\n \\varphi_{i}(\\xi) =\n \\begin{cases}\n \\frac{\\xi}{h_{i-1}} + 1 & \\quad \\text{if} \\quad -h_{i-1} \\le \\xi \\le 0\\\n 1 + \\frac{\\xi}{h_{i}} & \\quad \\text{if} \\quad 0 \\le \\xi \\le h_{i}\\\n 0 & \\quad elsewhere\\\n \\end{cases}\n\\end{equation}\nwith the corresponding derivatives\n\\begin{equation}\n \\partial_{\\xi}\\varphi_{i}(\\xi) =\n \\begin{cases}\n \\frac{1}{h_{i-1}} & \\quad \\text{if} \\quad -h_{i-1} \\le \\xi \\le 0\\\n -\\frac{1}{h_{i}} & \\quad \\text{if} \\quad 0 \\le \\xi \\le h_{i}\\\n 0 & \\quad elsewhere\\\n \\end{cases}\n\\end{equation}\nThe figure on the left-hand side illustrates the shape of $\\varphi_{i}(\\xi)$ and $\\partial_{\\xi}\\varphi_{i}(\\xi)$ with varying $h$.\nCode implementation starts with the initialization of a particular setup of our problem. Then, we define the source that introduces perturbations following by initialization of the mass and stiffness matrices. Finally, time extrapolation is done.",
"# Import all necessary libraries, this is a configuration step for the exercise.\n# Please run it before the simulation code!\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# Show the plots in the Notebook\nplt.switch_backend(\"nbagg\")",
"1. Initialization of setup",
"# Initialization of setup\n# ---------------------------------------------------------------\n# Basic parameters\nnt = 2000 # Number of time steps\nvs = 3000 # Wave velocity [m/s] \nro0 = 2500 # Density [kg/m^3]\nnx = 1000 # Number of grid points \nisx = 500 # Source location [m] \nxmax = 10000. # Maximum length\neps = 0.5 # Stability limit\niplot = 20 # Snapshot frequency\n\ndx = xmax/(nx-1) # calculate space increment\nx = np.arange(0, nx)*dx # initialize space coordinates\nx = np.transpose(x)\n\nh = np.diff(x) # Element sizes [m]\n\n# parameters\nro = x*0 + ro0\nmu = x*0 + ro*vs**2\n\n# time step from stabiity criterion\ndt = 0.5*eps*dx/np.max(np.sqrt(mu/ro))\n# initialize time axis\nt = np.arange(1, nt+1)*dt \n\n# ---------------------------------------------------------------\n# Initialize fields\n# ---------------------------------------------------------------\nu = np.zeros(nx)\nuold = np.zeros(nx)\nunew = np.zeros(nx)\n\np = np.zeros(nx)\npold = np.zeros(nx)\npnew = np.zeros(nx)",
"2. Source time function\nIn 1D the propagating signal is an integral of the source time function. As we look for a Gaussian waveform, we initialize the source time function $f(t)$ using the first derivative of a Gaussian function.\n\\begin{equation}\nf(t) = -\\dfrac{2}{\\sigma^2}(t - t_0)e^{-\\dfrac{(t - t_0)^2}{\\sigma^2}}\n\\end{equation}\nExercise 1\nInitialize a source time function called 'src'. Use $\\sigma = 20 dt$ as Gaussian width, and time shift $t_0 = 3\\sigma$. Then, visualize the source in a given plot.",
"#################################################################\n# INITIALIZE THE SOURCE TIME FUCTION HERE!\n#################################################################\n\n# Source vector\nf = np.zeros(nx); f[isx:isx+1] = f[isx:isx+1] + 1.\n\n#################################################################\n# PLOT THE SOURCE TIME FUNCTION HERE!\n#################################################################",
"3. The Mass Matrix\nHaving implemented the desired source, now we initialize the mass and stiffness matrices. In general, the mass matrix is given\n\\begin{equation}\nM_{ij} = \\int_{D} \\rho \\varphi_i \\varphi_j dx = \\int_{D_{\\xi}} \\rho \\varphi_i \\varphi_j d\\xi\n\\end{equation}\nnext, the defined basis are introduced and some algebraic treatment is done to arrive at the explicit form of the mass matrix\nExercise 2\nImplement the mass matrix \n\\begin{equation}\nM_{ij} = \\frac{\\rho h}{6}\n \\begin{pmatrix}\n \\ddots & & & & 0\\\n 1 & 4 & 1 & & \\\n & 1 & 4 & 1 & \\\n & & 1 & 4 & 1\\\n 0 & & & & \\ddots\n \\end{pmatrix} \n\\end{equation}\nCompute the inverse mass matrix and display your result to visually inspect how it looks like",
"#################################################################\n# IMPLEMENT THE MASS MATRIX HERE!\n#################################################################\n\n\n#################################################################\n# COMPUTE THE INVERSE MASS MATRIX HERE!\n#################################################################\n\n\n#################################################################\n# DISPLAY THE INVERSE MASS MATRIX HERE!\n#################################################################\n\n",
"4. The Stiffness matrix\nOn the other hand, the general form of the stiffness matrix is\n\\begin{equation}\nK_{ij} = \\int_{D} \\mu \\partial_x\\varphi_i \\partial_x\\varphi_j dx = \\int_{D_{\\xi}} \\mu \\partial_\\xi\\varphi_i \\partial_\\xi\\varphi_j d\\xi\n\\end{equation} \nat this point, the defined basis are introduced. Again, with the help of some algebraic treatment, we arrive at the explicit form of the stiffness matrix\nExercise 3\nImplement the stiffness matrix \n\\begin{equation}\nK_{ij} = \\frac{\\mu}{h}\n \\begin{pmatrix}\n \\ddots & & & & 0\\\n -1 & 2 & -1 & & \\\n &-1 & 2 & -1 & \\\n & & -1 & 2 & -1\\\n 0 & & & & \\ddots\n \\end{pmatrix} \n\\end{equation}\nDisplay the stiffness matrix to visually inspect how it looks like",
"#################################################################\n# IMPLEMENT THE STIFFNESS MATRIX HERE!\n#################################################################\n\n\n#################################################################\n# DISPLAY THE STIFFNESS MATRIX HERE!\n#################################################################\n\n",
"5. Finite differences matrices\nWe implement a finite difference scheme in order to compare with the finite elements solution. \nExercise 4\nImplement the finite differences matrices $M$ and $D$. Where $M$ is a diagonal mass matrix containing the inverse densities, and differentiation matrix \n\\begin{equation}\nD_{ij} = \\frac{\\mu}{dt^2}\n \\begin{pmatrix}\n -2 & 1 & & & \\\n 1 & -2 & 1 & & \\\n & & \\ddots & & \\\n & & 1 & -2 & 1\\\n & & & 1 & -2\n \\end{pmatrix} \n\\end{equation}\nDisplay both matrices to visually inspect how they look like",
"#################################################################\n# INITIALIZE FINITE DIFFERENCES HERE!\n#################################################################\n\n\n#################################################################\n# DISPLAY THE DIFFERENCES MATRICES HERE!\n#################################################################\n\n",
"6. Finite element solution\nFinally we implement the finite element solution using the computed mass $M$ and stiffness $K$ matrices together with a finite differences extrapolation scheme\n\\begin{equation}\n\\mathbf{u}(t + dt) = dt^2 (\\mathbf{M}^T)^{-1}[\\mathbf{f} - \\mathbf{K}^T\\mathbf{u}] + 2\\mathbf{u} - \\mathbf{u}(t-dt).\n\\end{equation}",
"# Initialize animated plot\n# ---------------------------------------------------------------\nplt.figure(figsize=(12,4))\n\nline1 = plt.plot(x, u, 'k', lw=1.5, label='FEM')\nline2 = plt.plot(x, p, 'r', lw=1.5, label='FDM')\nplt.title('Finite elements 1D Animation', fontsize=16)\nplt.ylabel('Amplitude', fontsize=12)\nplt.xlabel('x (m)', fontsize=12)\n\nplt.ion() # set interective mode\nplt.show()\n\n# ---------------------------------------------------------------\n# Time extrapolation\n# ---------------------------------------------------------------\nfor it in range(nt):\n # --------------------------------------\n # Finite Element Method\n unew = (dt**2) * Minv @ (f*src[it] - K @ u) + 2*u - uold \n uold, u = u, unew\n \n # --------------------------------------\n # Finite Difference Method\n pnew = (dt**2) * Mf @ (D @ p + f/dx*src[it]) + 2*p - pold\n pold, p = p, pnew\n \n # -------------------------------------- \n # Animation plot. Display both solutions\n if not it % iplot:\n for l in line1:\n l.remove()\n del l\n for l in line2:\n l.remove()\n del l\n line1 = plt.plot(x, u, 'k', lw=1.5, label='FEM')\n line2 = plt.plot(x, p, 'r', lw=1.5, label='FDM')\n plt.legend()\n plt.gcf().canvas.draw()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
shakhova/BananaML
|
kaggle_flight/Desicion_trees_practise.ipynb
|
gpl-3.0
|
[
"Как строится дерево решений\nНа прошлом занятии мы затронули понятие энтропии - рассмотрим ее подробнее.\nОпределение\nЭнтропия Шеннона определяется для системы с $N$ возможными состояниями следующим образом:\n$$\\Large S = -\\sum_{i=1}^{N}p_ilog_2p_i,$$\nгде $p_i$ – вероятности нахождения системы в $i$-ом состоянии. Это очень важное понятие, используемое в физике, теории информации и других областях. Опуская предпосылки введения (комбинаторные и теоретико-информационные) этого понятия, отметим, что, интуитивно, энтропия соответствует степени хаоса в системе. Чем выше энтропия, тем менее упорядочена система и наоборот. \n<h4>Пример</h4>\nДля иллюстрации того, как энтропия поможет определить хорошие признаки для построения дерева, вспомним пример определения цвета шарика по его координате. Конечно, ничего общего с жизнью это не имеет, но позволяет показать, как энтропия используется для построения дерева решений.\n<img src=\"https://habrastorage.org/files/c96/80a/a4b/c9680aa4babc40f4bbc8b3595e203979.png\"/><br>\nЗдесь 9 синих шариков и 11 желтых. Если мы наудачу вытащили шарик, то он с вероятностью $p_1=\\frac{9}{20}$ будет синим и с вероятностью $p_2=\\frac{11}{20}$ – желтым. Значит, энтропия состояния $S_0 = -\\frac{9}{20}log_2{\\frac{9}{20}}-\\frac{11}{20}log_2{\\frac{11}{20}} \\approx 1$. Само это значение пока ни о чем нам не говорит. Теперь посмотрим, как изменится энтропия, если разбить шарики на две группы – с координатой меньше либо равной 12 и больше 12.\n<img src=\"https://habrastorage.org/files/186/444/a8b/186444a8bd0e451c8324ca8529f8d4f4.png\"/><br>\nВ левой группе оказалось 13 шаров, из которых 8 синих и 5 желтых. Энтропия этой группы равна $S_1 = -\\frac{5}{13}log_2{\\frac{5}{13}}-\\frac{8}{13}log_2{\\frac{8}{13}} \\approx 0.96$. В правой группе оказалось 7 шаров, из которых 1 синий и 6 желтых. Энтропия правой группы равна $S_2 = -\\frac{1}{7}log_2{\\frac{1}{7}}-\\frac{6}{7}log_2{\\frac{6}{7}} \\approx 0.6$. Как видим, энтропия уменьшилась в обеих группах по сравнению с начальным состоянием, хоть в левой и не сильно. Поскольку энтропия – по сути степень хаоса (или неопределенности) в системе, уменьшение энтропии называют приростом информации. Формально прирост информации (information gain, IG) при разбиении выборки по признаку $Q$ (в нашем примере это признак \"$x \\leq 12$\") определяется как \n$$\\Large IG(Q) = S_O - \\sum_{i=1}^{q}\\frac{|N_i|}{N}S_i,$$\nгде $q$ – число групп после разбиения, $N_i$ – число элементов выборки, у которых признак $Q$ имеет $i$-ое значение. В нашем случае после разделения получилось две группы ($q = 2$) – одна из 13 элементов ($N_1 = 13$), вторая – из 7 ($N_2 = 7$). Прирост информации получился \n$$\\Large IG(\"x \\leq 12\") = S_0 - \\frac{13}{20}S_1 - \\frac{7}{20}S_2 \\approx 0.16.$$\nПолучается, разделив шарики на две группы по признаку \"координата меньше либо равна 12\", мы уже получили более упорядоченную систему, чем в начале. Продолжим деление шариков на группы до тех пор, пока в каждой группе шарики не будут одного цвета.\n<img src=\"https://habrastorage.org/files/dae/a88/2b0/daea882b0a8e4ef4b23325c88f0353a1.png\"/><br>\nДля правой группы потребовалось всего одно дополнительное разбиение по признаку \"координата меньше либо равна 18\", для левой – еще три. Очевидно, энтропия группы с шариками одного цвета равна 0 ($log_2{1} = 0$), что соответствует представлению, что группа шариков одного цвета – упорядоченная. \nВ итоге мы построили дерево решений, предсказывающее цвет шарика по его координате. Отметим, что такое дерево решений может плохо работать для новых объектов (определения цвета новых шариков), поскольку оно идеально подстроилось под обучающую выборку (изначальные 20 шариков). Для классификации новых шариков лучше подойдет дерево с меньшим числом \"вопросов\", или разделений, пусть даже оно и не идеально разбивает по цветам обучающую выборку. Эту проблему, переобучение, мы еще рассмотрим далее. \nАлгоритм построения дерева\nВ основе популярных алгоритмов построения дерева решений, таких как ID3 и C4.5, лежит принцип жадной максимизации прироста информации – на каждом шаге выбирается тот признак, при разделении по которому прирост информации оказывается наибольшим. Дальше процедура повторяется рекурсивно, пока энтропия не окажется равной нулю или какой-то малой величине (если дерево не подгоняется идеально под обучающую выборку во избежание переобучения).\nВ разных алгоритмах применяются разные эвристики для \"ранней остановки\" или \"отсечения\", чтобы избежать построения переобученного дерева. \npython\ndef build(L):\n create node t\n if the stopping criterion is True:\n assign a predictive model to t\n else:\n Find the best binary split L = L_left + L_right\n t.left = build(L_left)\n t.right = build(L_right)\n return t\nДругие критерии качества разбиения в задаче классификации\nМы разобрались, в том, как понятие энтропии позволяет формализовать представление о качестве разбиения в дереве. Но это всего-лишь эвристика, существуют и другие:\n\nНеопределенность Джини (Gini impurity): $G = 1 - \\sum\\limits_k (p_k)^2$. Максимизацию этого критерия можно интерпретировать как максимизацию числа пар объектов одного класса, оказавшихся в одном поддереве. Подробнее об этом (как и обо многом другом) можно узнать из репозитория Евгения Соколова. Не путать с индексом Джини! Подробнее об этой путанице – в блогпосте Александра Дьяконова\nОшибка классификации (misclassification error): $E = 1 - \\max\\limits_k p_k$\n\nНа практике ошибка классификации почти не используется, а неопределенность Джини и прирост информации работают почти одинаково.\nВ случае задачи бинарной классификации ($p_+$ – вероятность объекта иметь метку +) энтропия и неопределенность Джини примут следующий вид:<br><br>\n$$ S = -p_+ \\log_2{p_+} -p_- \\log_2{p_-} = -p_+ \\log_2{p_+} -(1 - p_{+}) \\log_2{(1 - p_{+})};$$\n$$ G = 1 - p_+^2 - p_-^2 = 1 - p_+^2 - (1 - p_+)^2 = 2p_+(1-p_+).$$\nКогда мы построим графики этух двух функций от аргумента $p_+$, то увидим, что график энтропии очень близок к графику удвоенной неопределенности Джини, и поэтому на практике эти два критерия \"работают\" почти одинаково.",
"from __future__ import division, print_function\n# отключим всякие предупреждения Anaconda\nimport warnings\nwarnings.filterwarnings('ignore')\nimport numpy as np\nimport pandas as pd\n%matplotlib inline\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n\nplt.rcParams['figure.figsize'] = (6,4)\nxx = np.linspace(0,1,50)\nplt.plot(xx, [2 * x * (1-x) for x in xx], label='gini')\nplt.plot(xx, [4 * x * (1-x) for x in xx], label='2*gini')\nplt.plot(xx, [-x * np.log2(x) - (1-x) * np.log2(1 - x) for x in xx], label='entropy')\nplt.plot(xx, [1 - max(x, 1-x) for x in xx], label='missclass')\nplt.plot(xx, [2 - 2 * max(x, 1-x) for x in xx], label='2*missclass')\nplt.xlabel('p+')\nplt.ylabel('criterion')\nplt.title('Критерии качества как функции от p+ (бинарная классификация)')\nplt.legend();",
"А теперь практический пример\nРассмотрим пример применения дерева решений из библиотеки Scikit-learn для синтетических данных. Сгенерируем данные. Два класса будут сгенерированы из двух нормальных распределений с разными средними.",
"# первый класс\nnp.random.seed(7)\ntrain_data = np.random.normal(size=(100, 2))\ntrain_labels = np.zeros(100)\n\n# добавляем второй класс\ntrain_data = np.r_[train_data, np.random.normal(size=(100, 2), loc=2)]\ntrain_labels = np.r_[train_labels, np.ones(100)]",
"Напишем вспомогательную функцию, которая будет возвращать решетку для дальнейшей красивой визуализации.",
"def get_grid(data, eps=0.01):\n x_min, x_max = data[:, 0].min() - 1, data[:, 0].max() + 1\n y_min, y_max = data[:, 1].min() - 1, data[:, 1].max() + 1\n return np.meshgrid(np.arange(x_min, x_max, eps),\n np.arange(y_min, y_max, eps))",
"Отобразим данные. Неформально, задача классификации в этом случае – построить какую-то \"хорошую\" границу, разделяющую 2 класса (красные точки от желтых). Интуиция подсказывает, что хорошо на новых данных будет работать какая-то гладкая граница, разделяющая 2 класса, или хотя бы просто прямая (в $n$-мерном случае - гиперплоскость).",
"plt.rcParams['figure.figsize'] = (10,8)\nplt.scatter(train_data[:, 0], train_data[:, 1], c=train_labels, s=100, \n cmap='autumn', edgecolors='black', linewidth=1.5)\nplt.plot(range(-2,5), range(4,-3,-1));",
"Попробуем разделить эти два класса, обучив дерево решений. В дереве будем использовать параметр max_depth, ограничивающий глубину дерева. Визуализируем полученную границу разделения класссов.",
"from sklearn.tree import DecisionTreeClassifier\n\n# параметр min_samples_leaf указывает, при каком минимальном количестве\n# элементов в узле он будет дальше разделяться\n\nrs = 17\n\nclf_tree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=rs)\n\n# обучаем дерево\n\nclf_tree.fit(train_data, train_labels)\n\n# немного кода для отображения разделяющей поверхности\nxx, yy = get_grid(train_data)\npredicted = clf_tree.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)\nplt.pcolormesh(xx, yy, predicted, cmap='autumn')\nplt.scatter(train_data[:, 0], train_data[:, 1], c=train_labels, s=100, \n cmap='autumn', edgecolors='black', linewidth=1.5);",
"А как выглядит само построенное дерево? Видим, что дерево \"нарезает\" пространство на 7 прямоугольников (в дереве 7 листьев). В каждом таком прямоугольнике прогноз дерева будет константным, по превалированию объектов того или иного класса.",
"# используем .dot формат для визуализации дерева\n\nfrom sklearn.tree import export_graphviz\nexport_graphviz(clf_tree, feature_names=['x1', 'x2'], \n out_file='small_tree.dot', filled=True)\n!dot -Tpng small_tree.dot -o small_tree.png\n!rm small_tree.dot",
"<img src='small_tree.png'>\nКак \"читается\" такое дерево?\nВ начале было 200 объектов, 100 – одного класса и 100 – другого. Энтропия начального состояния была максимальной – 1. Затем было сделано разбиение объектов на 2 группы в зависимости от сравнения признака $x_1$ со значением $1.1034$ (найдите этот участок границы на рисунке выше, до дерева). При этом энтропия и в левой, и в правой группе объектов уменьшилась. И так далее, дерево строится до глубины 3. При такой визуализации чем больше объектов одного класса, тем цвет вершины ближе к темно-оранжевому и, наоборот, чем больше объектов второго класса, тем ближе цвет к темно-синему. В начале объектов одного лкасса поровну, поэтому корневая вершина дерева – белого цвета.\nКак дерево решений работает с количественными признаками\nДопустим, в выборке имеется количественный признак \"Возраст\", имеющий много уникальных значений. Дерево решений будет искать лучшее (по критерию типа прироста информации) разбиение выборки, проверяя бинарные признаки типа \"Возраст < 17\", \"Возраст < 22.87\" и т.д. Для решения этой проблемы применяют эвристики для ограничения числа порогов, с которыми мы сравниваем количественный признак. \nРассмотрим это на игрушечном примере. Пусть в нашем датасете на kaggle появился новый признак:",
"data = pd.DataFrame({'Возраст пилота': [19,64,18,20,38,49,55,25,29,31,33], \n 'Задержка рейса': [1,0,1,0,1,0,0,1,1,0,1]})\n\ndata",
"Отсортируем ее по возрастанию возраста.",
"data.sort_values('Возраст пилота')",
"Обучим на этих данных дерево решений (без ограничения глубины) и посмотрим на него.",
"age_tree = DecisionTreeClassifier(random_state=17)\nage_tree.fit(data['Возраст пилота'].values.reshape(-1, 1), data['Задержка рейса'].values)",
"Видим, что дерево задействовало 5 значений, с которыми сравнивается возраст: 43.5, 19, 22.5, 30 и 32 года. Если приглядеться, то это аккурат средние значения между возрастами, при которых целевой класс \"меняется\" с 1 на 0 или наоборот. \nТо есть в качестве порогов для \"нарезания\" количественного признака, дерево \"смотрит\" на те значения, при которых целевой класс меняет свое значение. \nПодумайте, почему не имеет смысла в данном случае рассматривать признак \"Возраст пилота < 18\".",
"export_graphviz(age_tree, feature_names=['Возраст пилота'], \n out_file='age_tree.dot', filled=True)\n!dot -Tpng age_tree.dot -o age_tree.png",
"<img src='age_tree.png'>\nРассмотрим пример посложнее: добавим признак \"Зарплата пилота\" (тыс. рублей/месяц).",
"data2 = pd.DataFrame({'Возраст пилота': [19,64,18,20,38,49,55,25,29,31,33], \n 'Зарплата пилота': [25,80,22,36,37,59,74,70,33,102,88], \n 'Задержка рейса': [1,0,1,0,1,0,0,1,1,0,1]})\n\ndata2",
"Если отсортировать по возрасту, то целевой класс (\"Задержка рейса\") меняется (с 1 на 0 или наоборот) 5 раз. А если отсортировать по зарплате – то 7 раз. Как теперь дерево будет выбирать признаки? Посмотрим.",
"data2.sort_values('Возраст пилота')\n\ndata2.sort_values('Зарплата пилота')\n\nage_sal_tree = DecisionTreeClassifier(random_state=17)\nage_sal_tree.fit(data2[['Возраст пилота', 'Зарплата пилота']].values, data2['Задержка рейса'].values);\n\nexport_graphviz(age_sal_tree, feature_names=['Возраст пилота', 'Зарплата пилота'], \n out_file='age_sal_tree.dot', filled=True)\n!dot -Tpng age_sal_tree.dot -o age_sal_tree.png",
"<img src='age_sal_tree.png'>\nВидим, что в дереве задействованы как разбиения по возрасту, так и по зарплате. Причем пороги, с которыми сравниваются признаки: \n43.5 и 22.5 года – для возраста \nи 95 и 30.5 тыс. руб/мес – для зарплаты. \nИ опять можно заметить, что 95 тыс. – это среднее между 88 и 102, при этом человек с зарплатой 88 оказался \"плохим\", а с 102 – \"хорошим\". То же самое для 30.5 тыс. То есть перебирались сравнения зарплаты и возраста не со всеми возможными значениями, а только с несколькими. А почему в дереве оказались именно эти признаки? Потому что по ним разбиения оказались лучше (по критерию неопределенности Джини). \nВывод: самая простая эвристика для обработки количественных признаков в дереве решений: количественный признак сортируется по возрастанию, и в дереве проверяются только те пороги, при которых целевой признак меняет значение. \nДополнительно, когда в данных много количественных признаков, и у каждого много уникальных значений, могут отбираться не все пороги, описанные выше, а только топ-N, дающих максимальный прирост все того же критерия. То есть, по сути, для каждого порога строится дерево глубины 1, считается насколько снизилась энтропия (или неопределенность Джини) и выбираются только лучшие пороги, с которыми стоит сравнивать количественный признак. \nОсновные способы борьбы с переобучением в случае деревьев решений\n\nискусственное ограничение глубины или минимального числа объектов в листе: построение дерева просто в какой-то момент прекращается;\nстрижка дерева (pruning). При таком подходе дерево сначала строится до максимальной глубины, потом постепенно, снизу вверх, некоторые вершины дерева убираются за счет сравнения по качеству дерева с данным разбиением и без него (сравнение проводится с помощью кросс-валидации, о которой чуть ниже). Подробнее можно почитать в материалах репозитория Евгения Соколова.\n\nКласс DecisionTreeClassifier в Scikit-learn\nОсновные параметры класса sklearn.tree.DecisionTreeClassifier:\n\nmax_depth – максимальная глубина дерева\nmax_features - максимальное число признаков, по которым ищется лучшее разбиение в дереве (это нужно потому, что при большом количестве признаков будет \"дорого\" искать лучшее (по критерию типа прироста информации) разбиение среди всех признаков)\nmin_samples_leaf – минимальное число объектов в листе. У этого параметра есть понятная интерпретация: скажем, если он равен 5, то дерево будет порождать только те классифицирующие правила, которые верны как мимимум для 5 объектов\n\nПараметры дерева надо настраивать в зависимости от входных данных, делается это обычно с помощью кросс-валидации.\nПопробуем сделать это на нашем любимом датасете.",
"from sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\ndf_k = pd.read_csv('/Users/Nonna/Desktop/BananaML/BananaML/kaggle_flight/train_dataset.csv')\ndf_k = shuffle(df_k)\ndf_k = df_k.head(250)\n\ntrain_df = df_k[['Month', 'DayofMonth', 'DayOfWeek', \n 'UniqueCarrier', 'target']]\n\ntrain_df = train_df.fillna(train_df.mean())\n\ntrain_df = pd.get_dummies(train_df, columns = ['Month', 'DayofMonth', 'DayOfWeek', \n 'UniqueCarrier'])\n\nx_train, x_test, y_train, y_test = train_test_split(train_df.drop('target', axis = 1), train_df.target, test_size=0.3, random_state=42)\nprint(x_train.shape, x_test.shape)",
"Теперь настроим параметры дерева на кросс-валидации. Настраивать будем максимальную глубину и максимальное используемое на каждом разбиении число признаков. Суть того, как работает GridSearchCV: для каждой уникальной пары значений параметров max_depth и max_features будет проведена 5-кратная кросс-валидация и выберется лучшее сочетание параметров.",
"from sklearn.model_selection import GridSearchCV, cross_val_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import roc_auc_score\n\ntree = DecisionTreeClassifier(max_depth=5, random_state=17)\n\ntree_params = {'max_depth': range(1,11),\n 'max_features': range(4,19)}\n\ntree_grid = GridSearchCV(tree, tree_params,\n cv=5, n_jobs=-1,\n verbose=True, scoring='roc_auc')\n\ntree_grid.fit(x_train, y_train)",
"Лучшее сочетание параметров и соответствующая средняя доля правильных ответов на кросс-валидации:",
"tree_grid.best_params_\n\ntree_grid.best_score_\n\nroc_auc_score(y_test, tree_grid.predict(x_test))\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nforest = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=17)\nprint(np.mean(cross_val_score(forest, x_train, y_train, cv=5)))\n\nforest_params = {'max_depth': range(1,11),\n 'max_features': range(4,19)}\n\nforest_grid = GridSearchCV(forest, forest_params,\n cv=5, n_jobs=-1,\n verbose=True, scoring='roc_auc')\n\nforest_grid.fit(x_train, y_train)\n\nforest_grid.best_params_, forest_grid.best_score_\n\nroc_auc_score(y_test, forest_grid.predict(x_test))",
"Нарисуем получившееся дерево:",
"from sklearn.tree import export_graphviz\nexport_graphviz(tree_grid.best_estimator_, feature_names=train_df.columns[:-1], \n out_file='flight_tree.dot', filled=True)\n!dot -Tpng flight_tree.dot -o flight_tree.png",
"<img src='flight_tree.png'>\nДеревья решений в задаче распознавания рукописных цифр MNIST\nТеперь посмотрим на описанные 2 алгоритма в реальной задаче. Используемый \"встроенные\" в sklearn данные по рукописным цифрам. Эта задача будет примером, когда метод ближайших соседей работает на удивление хорошо.",
"from sklearn.datasets import load_digits",
"Загружаем данные.",
"data = load_digits()\nX, y = data.data, data.target",
"Картинки здесь представляются матрицей 8 x 8 (интенсивности белого цвета для каждого пикселя). Далее эта матрица \"разворачивается\" в вектор длины 64, получается признаковое описание объекта.",
"X[0,:].reshape([8,8])",
"Нарисуем несколько рукописных цифр, видим, что они угадываются.",
"f, axes = plt.subplots(1, 4, sharey=True, figsize=(16,6))\nfor i in range(4):\n axes[i].imshow(X[i,:].reshape([8,8]));\n",
"Посмотрим на соотношение классов в выборке, видим, что примерно поровну нулей, единиц, ..., девяток.",
"np.bincount(y)",
"Выделим 70% выборки (X_train, y_train) под обучение и 30% будут отложенной выборкой (X_holdout, y_holdout). отложенная выборка никак не будет участвовать в настройке параметров моделей, на ней мы в конце, после этой настройки, оценим качество полученной модели.",
"X_train, X_holdout, y_train, y_holdout = train_test_split(X, y, test_size=0.3,\n random_state=17)",
"Обучим дерево решений, опять параметры пока наугад берем.",
"tree = DecisionTreeClassifier(max_depth=5, random_state=17)\n\n%%time\ntree.fit(X_train, y_train)",
"Сделаем прогнозы для отложенной выборки. Видим, что метод ближайших соседей справился намного лучше. Но это мы пока выбирали параметры наугад.",
"from sklearn.metrics import accuracy_score\n\ntree_pred = tree.predict(X_holdout)\naccuracy_score(y_holdout, tree_pred)",
"Теперь так же, как раньше настроим параметры моделей на кросс-валидации",
"tree_params = {'max_depth': [1, 2, 3, 5, 10, 20, 25, 30, 40, 50, 64],\n 'max_features': [1, 2, 3, 5, 10, 20 ,30, 50, 64]}\n\ntree_grid = GridSearchCV(tree, tree_params,\n cv=5, n_jobs=-1,\n verbose=True, scoring='accuracy')\n\ntree_grid.fit(X_train, y_train)",
"Лучшее сочетание параметров и соответствующая средняя доля правильных ответов на кросс-валидации:",
"tree_grid.best_params_, tree_grid.best_score_\n\naccuracy_score(y_holdout, tree_grid.predict(X_holdout))",
"Это уже не 66%, но и не 97%.\nОбучим на этих же данных случайный лес, он на большинстве выборок работает лучше, чем просто деревья. Но сейчас у нас исключение.",
"np.mean(cross_val_score(RandomForestClassifier(random_state=17), X_train, y_train, cv=5))\n\nrf = RandomForestClassifier(random_state=17, n_jobs=-1).fit(X_train, y_train)\naccuracy_score(y_holdout, rf.predict(X_holdout))",
"Результаты эксперимента:\n| | CV | Holdout |\n|-----|:-----:|:-------:|\n| DT | 0.844 | 0.838 |\n| RF | 0.935 | 0.941 | \nОбозначения: CV и Holdout– средние доли правильных ответов модели на кросс-валидации и отложенной выборке соот-но. DT – дерево решений, RF – случайный лес\nПлюсы и минусы деревьев решений\nПлюсы:\n - Порождение четких правил классификации, понятных человеку, например, \"если возраст < 25 и интерес к мотоциклам, то отказать в кредите\". Это свойство называют интерпретируемостью модели;\n - Деревья решений могут легко визуализироваться, то есть может \"интерпретироваться\" (строгого определения я не видел) как сама модель (дерево), так и прогноз для отдельного взятого тестового объекта (путь в дереве);\n - Быстрые процессы обучения и прогнозирования;\n - Малое число параметров модели;\n - Поддержка и числовых, и категориальных признаков.\nМинусы:\n - У порождения четких правил классификации есть и другая сторона: деревья очень чувствительны к шумам во входных данных, вся модель может кардинально измениться, если немного изменится обучающая выборка (например, если убрать один из признаков или добавить несколько объектов), поэтому и правила классификации могут сильно изменяться, что ухудшает интерпретируемость модели;\n - Разделяющая граница, построенная деревом решений, имеет свои ограничения (состоит из гиперплоскостей, перпендикулярных какой-то из координатной оси), и на практике дерево решений по качеству классификации уступает некоторым другим методам;\n - Необходимость отсекать ветви дерева (pruning) или устанавливать минимальное число элементов в листьях дерева или максимальную глубину дерева для борьбы с переобучением. Впрочем, переобучение - проблема всех методов машинного обучения;\n - Нестабильность. Небольшие изменения в данных могут существенно изменять построенное дерево решений. С этой проблемой борются с помощью ансамблей деревьев решений (рассмотрим далее);\n - Проблема поиска оптимального дерева решений (минимального по размеру и способного без ошибок классифицировать выборку) NP-полна, поэтому на практике используются эвристики типа жадного поиска признака с максимальным приростом информации, которые не гарантируют нахождения глобально оптимального дерева;\n - Сложно поддерживаются пропуски в данных. Friedman оценил, что на поддержку пропусков в данных ушло около 50% кода CART (классический алгоритм построения деревьев классификации и регрессии – Classification And Regression Trees, в sklearn реализована улучшенная версия именно этого алгоритма);\n - Модель умеет только интерполировать, но не экстраполировать (это же верно и для леса и бустинга на деревьях). То есть дерево решений делает константный прогноз для объектов, находящихся в признаковом пространстве вне параллелепипеда, охватывающего все объекты обучающей выборки. В нашем примере с желтыми и синими шариками это значит, что модель дает одинаковый прогноз для всех шариков с координатой > 19 или < 0.\nСложный случай для деревьев\nВ продолжение обсуждения плюсов и минусов приведем очень простой пример задачи классификации, с которым дерево справляется, но делает все как-то \"сложнее\", чем хотелось бы. Создадим множество точек на плоскости (2 признака), каждая точка будет относиться к одному из классов (+1, красные, или -1 – желтые). Если смотреть на это как на задачу классификации, то вроде все очень просто – классы разделяются прямой.",
"def form_linearly_separable_data(n=500, x1_min=0, x1_max=30, x2_min=0, x2_max=30):\n data, target = [], []\n for i in range(n):\n x1, x2 = np.random.randint(x1_min, x1_max), np.random.randint(x2_min, x2_max)\n \n if np.abs(x1 - x2) > 0.5:\n data.append([x1, x2])\n target.append(np.sign(x1 - x2))\n return np.array(data), np.array(target)\n\nX, y = form_linearly_separable_data()\n\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap='autumn', edgecolors='black');",
"Однако дерево решений строит уж больно сложную границу и само по себе оказывается глубоким. Кроме того, представьте, как плохо дерево будет обобщаться на пространство вне представленного квадрата $30 \\times 30$, обрамляющего обучающую выборку.",
"tree = DecisionTreeClassifier(random_state=17).fit(X, y)\n\nxx, yy = get_grid(X, eps=.05)\npredicted = tree.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)\nplt.pcolormesh(xx, yy, predicted, cmap='autumn')\nplt.scatter(X[:, 0], X[:, 1], c=y, s=100, \n cmap='autumn', edgecolors='black', linewidth=1.5)\nplt.title('Easy task. Decision tree compexifies everything');",
"Вот такая сложная конструкция, хотя решение (хорошая разделяющая поверхность) – это всего лишь прямая $x_1 = x_2$.",
"export_graphviz(tree, feature_names=['x1', 'x2'], \n out_file='deep_toy_tree.dot', filled=True)\n!dot -Tpng deep_toy_tree.dot -o deep_toy_tree.png",
"<img src='deep_toy_tree.png'>",
"! jupyter nbconvert Desicion_trees_practise.ipynb --to html"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kgrodzicki/machine-learning-specialization
|
course-1-machine-learning-foundations/notebooks/week3/Analyzing product sentiment.ipynb
|
mit
|
[
"Predicting sentiment from product reviews\nFire up GraphLab Create",
"import graphlab",
"Read some product review data\nLoading reviews for a set of baby products.",
"products = graphlab.SFrame('amazon_baby.gl/')",
"Let's explore this data together\nData includes the product name, the review text and the rating of the review.",
"products.head()",
"Build the word count vector for each review",
"products['word_count'] = graphlab.text_analytics.count_words(products['review'])\n\nproducts.head()\n\ngraphlab.canvas.set_target('ipynb')\n\nproducts['name'].show()",
"Examining the reviews for most-sold product: 'Vulli Sophie the Giraffe Teether'",
"giraffe_reviews = products[products['name'] == 'Vulli Sophie the Giraffe Teether']\n\nlen(giraffe_reviews)\n\ngiraffe_reviews['rating'].show(view='Categorical')",
"Build a sentiment classifier",
"products['rating'].show(view='Categorical')",
"Define what's a positive and a negative sentiment\nWe will ignore all reviews with rating = 3, since they tend to have a neutral sentiment. Reviews with a rating of 4 or higher will be considered positive, while the ones with rating of 2 or lower will have a negative sentiment.",
"#ignore all 3* reviews\nproducts = products[products['rating'] != 3]\n\n#positive sentiment = 4* or 5* reviews\nproducts['sentiment'] = products['rating'] >=4\n\nproducts.head()",
"Let's train the sentiment classifier",
"train_data,test_data = products.random_split(.8, seed=0)\n\nsentiment_model = graphlab.logistic_classifier.create(train_data,\n target='sentiment',\n features=['word_count'],\n validation_set=test_data)",
"Evaluate the sentiment model",
"sentiment_model.evaluate(test_data, metric='roc_curve')\n\nsentiment_model.show(view='Evaluation')",
"Applying the learned model to understand sentiment for Giraffe",
"giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability')\n\ngiraffe_reviews.head()",
"Sort the reviews based on the predicted sentiment and explore",
"giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False)\n\ngiraffe_reviews.head()",
"Most positive reviews for the giraffe",
"giraffe_reviews[0]['review']\n\ngiraffe_reviews[1]['review']",
"Show most negative reviews for giraffe",
"giraffe_reviews[-1]['review']\n\ngiraffe_reviews[-2]['review']\n\ndiaper_champ_reviews = products[products['name'] == 'Baby Trend Diaper Champ']\n\ndiaper_champ_reviews['predicted_sentiment'] = sentiment_model.predict(diaper_champ_reviews, output_type='probability')\n\ndiaper_champ_reviews = diaper_champ_reviews.sort('predicted_sentiment', ascending=False)\n\ndiaper_champ_reviews.head()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
andim/evolimmune
|
figSIevol/figure-SIevol.ipynb
|
mit
|
[
"Figure S3: Finite population size simulations\nPrerequisites: opt.npz from Figure SIopt, and finite population size simulations results generated with:\nmake run\nmake agg\n\nImport packages.",
"import sys\nsys.path.append('../lib')\nfrom cycler import cycler\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport palettable\nimport plotting\nimport analysis\nfrom evolimmune import varname_to_tex, derived_quantities\n%load_ext autoreload\n%autoreload 2\nplt.style.use(['paper'])",
"Import results for population of infinite size.",
"dfinf = analysis.loadnpz('../figSIopt/data/opt.npz')\nderived_quantities(dfinf)\nanalysis.printunique(dfinf)",
"Import results from finite population size simulations.",
"df = analysis.loadnpz('data/scan.npz')\nderived_quantities(df)\nanalysis.printunique(df)\n\n# number of runs per parameter\ndf.groupby(by=['nind', 'tauenv', 'ngen', 'pienv']).count().max().max()",
"Putting things together to produce final plot",
"median = True\nplt.rc('axes', prop_cycle=cycler('color', palettable.colorbrewer.qualitative.Dark2_6.mpl_colors))\nblack = matplotlib.rcParams['text.color']\nlinewidth = matplotlib.rcParams['lines.linewidth']\ncolumns = sorted(df.pienv.unique())\nvariables = ['cconstitutive', 'q', 'p', 'pup']\nymargin = 0.05\nxmargin = 0.02\nplotkwargs = dict()\nlims = dict(pup=(0, 0.2), q=(0, 0.2))\nfig, axes = plt.subplots(ncols=len(columns), nrows=len(variables), figsize=(7.0, 1.0+3.5*len(variables)/len(columns)))\nfor i, val in enumerate(columns):\n for j, var in enumerate(variables):\n ax = axes[j, i]\n lim = lims[var] if var in lims else (0, 1)\n dlim = lim[1]-lim[0]\n \n closestval = dfinf.ix[(dfinf.pienv-val).abs().argmin()]['pienv']\n dfsub = dfinf[np.abs(dfinf.pienv-closestval)<1e-3]\n dfsub.sort_values(by='tauenv', inplace=True)\n x, y = dfsub.tauenv, dfsub[var]\n ax.plot(x, y, '-', label=r'$\\infty$', c=black, lw=linewidth*2, **plotkwargs)\n \n for nind, dfg in sorted(df.groupby(by='nind')):\n dfgg = dfg[df.pienv==val].groupby(by='tauenv', as_index=False)\n dfgg_tauenv = dfgg[['tauenv']].mean()['tauenv']\n if median:\n dfggm = dfgg[[var]].median()[var]\n else:\n dfggm = dfgg[[var]].mean()[var]\n x, y = dfgg_tauenv, dfggm\n line, = ax.plot(x, y, label='%i'%nind, **plotkwargs)\n if median:\n dfggu = dfgg[[var]].quantile(0.75)[var]\n dfggl = dfgg[[var]].quantile(0.25)[var]\n else:\n dfggs = dfgg[[var]].std(ddof=1)[var]\n dfggu = dfggm + dfggs\n dfggl = dfggm - dfggs\n ax.fill_between(dfgg_tauenv, dfggl, dfggu,\n facecolor=line.get_color(), edgecolor='none', alpha=0.5)\n\n ax.set_ylim(lim[0]-ymargin*dlim, lim[1]+ymargin*dlim)\n ax.set_xlim(0.09, 11.0)\n ax.set_xscale('log')\n ax.margins(x=xmargin, y=ymargin*dlim)\n plotting.despine(ax, spines='all')\n ax.grid()\n ax.locator_params(axis='y', nbins=5)\nax.legend(loc='upper center', title='population size', bbox_to_anchor=(0.54, 1),\n bbox_transform=plt.gcf().transFigure, ncol=4)\nfor ax in analysis.flatten(axes[:-1, :]):\n plt.setp(ax.get_xticklabels(), visible=False)\nfor ax in analysis.flatten(axes[:, 1:]):\n plt.setp(ax.get_yticklabels(), visible=False)\nfor ax in axes[-1, :]:\n ax.set_xlabel(varname_to_tex['tauenv'])\nfor j, var in enumerate(variables):\n axes[j, 0].set_ylabel(varname_to_tex[var])\nplotting.label_axes(axes[0, :], labels=[(varname_to_tex['pienv'][1:-1] + r'\\, = \\, %s' % val) for val in columns],\n labelstyle='$%s$',\n xy=(.5, 0.9), xycoords=('axes fraction', 'figure fraction'), fontweight = 'bold', fontsize='medium',\n verticalalignment='top', horizontalalignment='center')\nfig.tight_layout(h_pad=1.5, w_pad=1.0, rect=(0.0, 0.0, 1.0, 0.87), pad=0.25)\nfig.savefig('SIevol.pdf')\nfig.savefig('SIevol.svg')",
"Influence of finite population size on optimal immune strategies from an agent-based simulation with evolving strategy parameters (switching rates and degree of adaptability) as described in the text.\nFor the infinite population, $p$ is only shown for $q > 0$, because for $q = 0$ the value of $p$ is not constrained other than being positive.\nSubplots show the median (solid line) and interquartile range (shaded area) of the strategy parameters at the end of a simulation of $100000$ generations length.\nBoth are calculated from 500 independent simulations.\nIn each simulation, the strategy parameters evolve from a random initial distribution via mutation and selection.\nMutations take place with a rate $0.01 \\exp(-t/10000)$ per generation and are normally distributed with mean zero and standard deviation $0.25 \\exp(-t/10000)$.\nThe bound constraints on the parameters were enforced by setting the strategy parameters to the boundary value if outside after a mutation.\nCosts of different immune states as in Fig. 2."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tensorflow/docs-l10n
|
site/zh-cn/lite/performance/post_training_integer_quant_16x8.ipynb
|
apache-2.0
|
[
"Copyright 2020 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"使用 int16 激活值进行训练后整数量化\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://tensorflow.google.cn/lite/performance/post_training_integer_quant_16x8\"><img src=\"https://tensorflow.google.cn/images/tf_logo_32px.png\">在 TensorFlow.org上查看</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/lite/performance/post_training_integer_quant_16x8.ipynb\"><img src=\"https://tensorflow.google.cn/images/colab_logo_32px.png\">在 Google Colab 中运行 </a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/lite/performance/post_training_integer_quant_16x8.ipynb\"> <img src=\"https://tensorflow.google.cn/images/GitHub-Mark-32px.png\"> 在 GitHub 上查看源代码</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/lite/performance/post_training_integer_quant_16x8.ipynb\"><img src=\"https://tensorflow.google.cn/images/download_logo_32px.png\">下载笔记本</a></td>\n</table>\n\n概述\n现在,将模型从 TensorFlow 转换为 TensorFlow Lite 的 FlatBuffer 格式时,TensorFlow Lite 支持将激活转换为 16 位整数值,同时将权重转换为 8 位整数值。我们将此模式称为“16x8 量化模式”。当激活对量化敏感时,此模式可以大幅提高量化模型的准确率,同时还可以将模型大小缩减四分之一至四分之三。此外,这种完全量化的模型可供仅支持整数的硬件加速器使用。\n一些可以从这种训练后量化模式受益的示例模型包括:\n\n超分辨率,\n音频信号处理,如噪声消除和波束成形,\n图像降噪,\n基于单张图像的 HDR 重建\n\n在本教程中,您将从头开始训练一个 MNIST 模型,并在 TensorFlow 中检查其准确率,然后使用此模式将该模型转换为 Tensorflow Lite FlatBuffer。最后,您将检查转换的模型的准确率,并将其与原始 float32 模型进行对比。请注意,本示例旨在演示此模式的用法,并不会展现与 TensorFlow Lite 中提供的其他量化技术相比的优势。\n构建 MNIST 模型\n设置",
"import logging\nlogging.getLogger(\"tensorflow\").setLevel(logging.DEBUG)\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport pathlib",
"检查 16x8 量化模式是否可用",
"tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8",
"训练并导出模型",
"# Load MNIST dataset\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 to 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# Define the model architecture\nmodel = keras.Sequential([\n keras.layers.InputLayer(input_shape=(28, 28)),\n keras.layers.Reshape(target_shape=(28, 28, 1)),\n keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Flatten(),\n keras.layers.Dense(10)\n])\n\n# Train the digit classification model\nmodel.compile(optimizer='adam',\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nmodel.fit(\n train_images,\n train_labels,\n epochs=1,\n validation_data=(test_images, test_labels)\n)",
"在此示例中,您只对模型进行了一个周期的训练,因此只训练到约 96% 的准确率。\n转换为 TensorFlow Lite 模型\n现在,您可以使用 Python TFLiteConverter 将训练的模型转换为 TensorFlow Lite 模型。\n现在,使用 TFliteConverter 将模型转换为默认的 float32 格式:",
"converter = tf.lite.TFLiteConverter.from_keras_model(model)\ntflite_model = converter.convert()",
"将其写入 .tflite 文件:",
"tflite_models_dir = pathlib.Path(\"/tmp/mnist_tflite_models/\")\ntflite_models_dir.mkdir(exist_ok=True, parents=True)\n\ntflite_model_file = tflite_models_dir/\"mnist_model.tflite\"\ntflite_model_file.write_bytes(tflite_model)",
"要改为将模型量化为 16x8 量化模式,首先将 optimizations 标记设置为使用默认优化。然后将 16x8 量化模式指定为目标规范中要求的受支持运算:",
"converter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]",
"对于 int8 训练后量化,通过将转换器选项 inference_input(output)_type 设置为 tf.int16,可以产生全整数量化模型。\n设置校准数据:",
"mnist_train, _ = tf.keras.datasets.mnist.load_data()\nimages = tf.cast(mnist_train[0], tf.float32) / 255.0\nmnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)\ndef representative_data_gen():\n for input_value in mnist_ds.take(100):\n # Model has only one input so each data point has one element.\n yield [input_value]\nconverter.representative_dataset = representative_data_gen",
"最后,像往常一样转换模型。请注意,为了方便调用,转换后的模型默认仍将使用浮点输入和输出。",
"tflite_16x8_model = converter.convert()\ntflite_model_16x8_file = tflite_models_dir/\"mnist_model_quant_16x8.tflite\"\ntflite_model_16x8_file.write_bytes(tflite_16x8_model)",
"请注意,生成文件的大小约为原来的 1/3。",
"!ls -lh {tflite_models_dir}",
"运行 TensorFlow Lite 模型\n使用 Python TensorFlow Lite 解释器运行 TensorFlow Lite 模型。\n将模型加载到解释器中",
"interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))\ninterpreter.allocate_tensors()\n\ninterpreter_16x8 = tf.lite.Interpreter(model_path=str(tflite_model_16x8_file))\ninterpreter_16x8.allocate_tensors()",
"在单个图像上测试模型",
"test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)\n\ninput_index = interpreter.get_input_details()[0][\"index\"]\noutput_index = interpreter.get_output_details()[0][\"index\"]\n\ninterpreter.set_tensor(input_index, test_image)\ninterpreter.invoke()\npredictions = interpreter.get_tensor(output_index)\n\nimport matplotlib.pylab as plt\n\nplt.imshow(test_images[0])\ntemplate = \"True:{true}, predicted:{predict}\"\n_ = plt.title(template.format(true= str(test_labels[0]),\n predict=str(np.argmax(predictions[0]))))\nplt.grid(False)\n\ntest_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)\n\ninput_index = interpreter_16x8.get_input_details()[0][\"index\"]\noutput_index = interpreter_16x8.get_output_details()[0][\"index\"]\n\ninterpreter_16x8.set_tensor(input_index, test_image)\ninterpreter_16x8.invoke()\npredictions = interpreter_16x8.get_tensor(output_index)\n\nplt.imshow(test_images[0])\ntemplate = \"True:{true}, predicted:{predict}\"\n_ = plt.title(template.format(true= str(test_labels[0]),\n predict=str(np.argmax(predictions[0]))))\nplt.grid(False)",
"评估模型",
"# A helper function to evaluate the TF Lite model using \"test\" dataset.\ndef evaluate_model(interpreter):\n input_index = interpreter.get_input_details()[0][\"index\"]\n output_index = interpreter.get_output_details()[0][\"index\"]\n\n # Run predictions on every image in the \"test\" dataset.\n prediction_digits = []\n for test_image in test_images:\n # Pre-processing: add batch dimension and convert to float32 to match with\n # the model's input data format.\n test_image = np.expand_dims(test_image, axis=0).astype(np.float32)\n interpreter.set_tensor(input_index, test_image)\n\n # Run inference.\n interpreter.invoke()\n\n # Post-processing: remove batch dimension and find the digit with highest\n # probability.\n output = interpreter.tensor(output_index)\n digit = np.argmax(output()[0])\n prediction_digits.append(digit)\n\n # Compare prediction results with ground truth labels to calculate accuracy.\n accurate_count = 0\n for index in range(len(prediction_digits)):\n if prediction_digits[index] == test_labels[index]:\n accurate_count += 1\n accuracy = accurate_count * 1.0 / len(prediction_digits)\n\n return accuracy\n\nprint(evaluate_model(interpreter))",
"在 16x8 量化模型上重复评估:",
"# NOTE: This quantization mode is an experimental post-training mode,\n# it does not have any optimized kernels implementations or\n# specialized machine learning hardware accelerators. Therefore,\n# it could be slower than the float interpreter.\nprint(evaluate_model(interpreter_16x8))",
"在此示例中,您已将模型量化为 16x8 模型,准确率没有任何差异,但文件大小只有原来的 1/3。"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Vasilyeu/mobile_customer
|
Mobile_customer.ipynb
|
mit
|
[
"Тестовое задание на позицию Data Analyst\nВасильев Сергей, vasiluev@tut.by, +375 29 7731272\n04.04.2017\n1. Импортируем библиотеки и загружаем данные",
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import roc_curve\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.metrics import auc\nfrom sklearn.ensemble import ExtraTreesClassifier\n\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\npd.set_option('display.max_rows', 100)\n# отключим предупреждения Anaconda\nimport warnings\nwarnings.simplefilter('ignore')\n#увеличим дефолтный размер графиков\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 12, 8\n\ntrain = pd.read_csv(\"train.csv\", sep=';')\ntest = pd.read_csv(\"test.csv\", sep=';')\n\n# проверим правильность загрузки\ntrain.head()\n\ntest.head()",
"2. Изучим данные и обработаем пропущенные значения",
"train.info()\n\ntest.info()\n\n# определим функцию для заполнения пропущенных значений\ndef replace_nan(data):\n # в столбцах 'START_PACK' и 'OFFER_GROUP' заменим NaN на 'Unknown'\n data['START_PACK'] = data['START_PACK'].fillna('Unknown')\n data['OFFER_GROUP'] = data['OFFER_GROUP'].fillna('Unknown')\n \n # столбцы с датами приведем к формату datetime\n data['ACT_DATE'] = pd.to_datetime(data['ACT_DATE'], format='%Y-%m-%d', errors='ignore')\n data['BIRTHDAY'] = pd.to_datetime(data['BIRTHDAY'], format='%Y-%m-%d', errors='ignore')\n \n # в столбце GENDER заменим NaN на M, так как 16034 из 28600 записей имеют значение M\n data['GENDER'] = data['GENDER'].fillna('M')\n \n # по условию задачи, NaN в столбце 'MLLS_STATE' означает что абонент не является участником программы лояльности\n data['MLLS_STATE'] = data['MLLS_STATE'].fillna('No')\n \n # по условиям задачи NaN в столбце 'OBLIG_NUM' означает, что абонент не пользовался рассрочкой\n data['OBLIG_NUM'] = data['OBLIG_NUM'].fillna(0.0)\n \n # NaN в столбце 'ASSET_TYPE_LAST' вероятно означает, что абонент не приобретал оборудование в компании\n data['ASSET_TYPE_LAST'] = data['ASSET_TYPE_LAST'].fillna('Not buying')\n \n # в столбце 'USAGE_AREA' заменим NaN на 'Undefined'\n data['USAGE_AREA'] = data['USAGE_AREA'].fillna('Undefined')\n \n # в остальных столбцах заменим NaN на 0.0, считая что отсутствие данных означает отсутствие активности\n data['REFILL_OCT_16'] = data['REFILL_OCT_16'].fillna(0.0)\n data['REFILL_NOV_16'] = data['REFILL_NOV_16'].fillna(0.0)\n data['OUTGOING_OCT_16'] = data['OUTGOING_OCT_16'].fillna(0.0)\n data['OUTGOING_NOV_16'] = data['OUTGOING_NOV_16'].fillna(0.0)\n data['GPRS_OCT_16'] = data['GPRS_OCT_16'].fillna(0.0)\n data['GPRS_NOV_16'] = data['GPRS_NOV_16'].fillna(0.0)\n data['REVENUE_OCT_16'] = data['REVENUE_OCT_16'].fillna(0.0)\n data['REVENUE_NOV_16'] = data['REVENUE_NOV_16'].fillna(0.0)\n\n# переведем BYR в BYN\ndef byr_to_byn(data):\n data['REFILL_OCT_16'] = data['REFILL_OCT_16']/10000.0\n data['REFILL_NOV_16'] = data['REFILL_NOV_16']/10000.0\n\n# создадим несколько новых признаков\ndef new_features(data):\n \n # срок с даты подключения до 1 декабря 2016 в днях\n data['AGE_ACT'] = [int(i.days) for i in (pd.datetime(2016, 12, 1) - data['ACT_DATE'])]\n \n # день недели, в который состоялось подключение\n data['WEEKDAY'] = data['ACT_DATE'].dt.dayofweek\n \n # добавим год рождения абонента и заменим пропущенные данные средним\n data['BIRTH_YEAR'] = pd.DatetimeIndex(data['BIRTHDAY']).year\n data['BIRTH_YEAR'] = data['BIRTH_YEAR'].fillna(data['BIRTH_YEAR'].mean())\n \n # добавим столбец с возрастом абонента на момент подключения\n data['AGE_AB'] = pd.DatetimeIndex(data['ACT_DATE']).year - data['BIRTH_YEAR']\n \n # добавим столбцы с разностями показателей ноября и октября\n data['REFIL_DELTA'] = data['REFILL_NOV_16'] - data['REFILL_OCT_16']\n data['OUTGOING_DELTA'] = data['OUTGOING_NOV_16'] - data['OUTGOING_OCT_16']\n data['GPRS_DELTA'] = data['GPRS_NOV_16'] - data['GPRS_OCT_16']\n data['REVENUE_DELTA'] = data['REVENUE_NOV_16'] - data['REVENUE_OCT_16']\n \n # удалим столбецы 'BIRTHDAY' и 'ACT_DATE'\n del data['BIRTHDAY']\n del data['ACT_DATE']\n\n# переведем BYR в BYN\nbyr_to_byn(train)\nbyr_to_byn(test)\n\n# обработаем тренировочные данные\nreplace_nan(train)\nnew_features(train)\n\n# обработаем тестовые данные\nreplace_nan(test)\nnew_features(test)\n\ntrain.info()",
"Теперь у нас есть наборы данных test и train без отсутствующих данных и с несколькими новыми признаками\n3. Подготовка данных для машинного обучения",
"# преобразование категориальных данных\nle = LabelEncoder()\nfor n in ['STATUS', 'TP_CURRENT', 'START_PACK', 'OFFER_GROUP', 'GENDER', 'MLLS_STATE', \n 'PORTED_IN', 'PORTED_OUT', 'OBLIG_ON_START', 'ASSET_TYPE_LAST', 'DEVICE_TYPE_BUS', 'USAGE_AREA']:\n le.fit(train[n])\n train[n] = le.transform(train[n])\n test[n] = le.transform(test[n])\n\n# стандартизация данных\nfeatures = list(train.columns)\ndel features[0]\ndel features[22]\nscaler = StandardScaler()\nfor n in features:\n scaler.fit(train[n])\n train[n] = scaler.transform(train[n])\n test[n] = scaler.transform(test[n])\n\n# разбиваем train на тренировочный и тестовый набор\nX_train, X_test, y_train, y_test = train_test_split(train[features], \n train.ACTIVITY_DEC_16, \n test_size=0.20, \n random_state=123)",
"4. Построим первую модель на всех признаках",
"# ансамбль классификаторов методом Weighted Average Probabilities\nclf1 = LogisticRegression(random_state=42)\nclf2 = RandomForestClassifier(random_state=42)\nclf3 = SGDClassifier(loss='log', random_state=42)\n\neclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('sgd', clf3)], voting='soft', weights=[1,1,1])\n\n# проверка качества модели кросс-валидацией с вычислением ROC AUC на всех признаках\nfor clf, label in zip([clf1, clf2, clf3, eclf], \n ['Logistic Regression', 'Random Forest', 'SGD', 'Ensemble']):\n scores2 = cross_val_score(estimator=clf, X=X_train, y=y_train, cv=10, scoring='roc_auc')\n print(\"ROC AUC: %0.6f (+/- %0.6f) [%s]\" % (scores2.mean(), scores2.std(), label))",
"На тренировочных данных наилучший результат дает ансамбль из трех алгоритмов\n5. Проверим важность признаков методом Random Forest",
"# Построим лес и подсчитаем важность признаков\nforest = ExtraTreesClassifier(n_estimators=250,\n random_state=0)\n\nforest.fit(X_train, y_train)\nimportances = forest.feature_importances_\nstd = np.std([tree.feature_importances_ for tree in forest.estimators_],\n axis=0)\nindices = np.argsort(importances)[::-1]\n\n# Выведем ранг признаков по важности\nprint(\"Feature ranking:\")\n\nfor f in range(X_train.shape[1]):\n print(\"%d. %s (%f)\" % (f + 1, list(X_train.columns)[indices[f]], importances[indices[f]]))\n\n# Сделаем график важности признаков\nplt.figure()\nplt.title(\"Feature importances\")\nplt.bar(range(X_train.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\nplt.xticks(range(X_train.shape[1]), indices)\nplt.xlim([-1, X_train.shape[1]])\nplt.show()",
"Как видим, наибольшей важностью обладают признаки STATUS, USAGE_AREA, DEVICE_TYPE_BUS и REVENUE_NOV_16\nПризнак PORTED_OUT вообще не дает никакой полезной информации\n6.Отберем признаки для классификации",
"# создадим список признкаов отсортированный по важности\nimp_features = []\nfor i in indices:\n imp_features.append(features[i])\n\n# перебором установлено, что наилучшую точность дает использование 17 самых важных признаков. Остальные можно отбросить\nbest_features = imp_features[:17]\nX_train2 = X_train[best_features]\n# проверка качества модели кросс-валидацией с вычислением ROC AUC\nfor clf, label in zip([clf1, clf2, clf3, eclf], \n ['Logistic Regression', 'Random Forest', 'SGD', 'Ensemble']):\n scores2 = cross_val_score(estimator=clf, X=X_train2, y=y_train, cv=10, scoring='roc_auc')\n print(\"ROC AUC: %0.6f (+/- %0.6f) [%s]\" % (scores2.mean(), scores2.std(), label))",
"7. Построение классификатора по тестовым данным",
"# roc curve по тестовым данным\ncolors = ['black', 'orange', 'blue', 'green']\nlinestyles = [':', '--', '-.', '-']\nfor clf, label, clr, ls in zip([clf1, clf2, clf3, eclf], \n ['Logistic Regression', 'Random Forest', 'SGD', 'Ensemble'], \n colors, linestyles):\n y_pred = clf.fit(X_train[best_features], y_train).predict_proba(X_test[best_features])[:, 1]\n fpr, tpr, thresholds = roc_curve(y_true=y_test, y_score=y_pred)\n roc_auc = auc(x=fpr, y=tpr)\n plt.plot(fpr, tpr, color=clr, linestyle=ls, label='%s (auc = %0.2f)' % (label, roc_auc))\nplt.legend(loc='lower right')\nplt.plot([0, 1], [0, 1], linestyle='--', color='gray', linewidth=2)\nplt.xlim([-0.1, 1.1])\nplt.ylim([-0.1, 1.1])\nplt.grid()\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.show()",
"Показатели ROC AUC полученые на кросс валидации и на тестовой выборке совпадают, что говорит о том, что модель не переобучена и не недообучена.\n8. Получение итогового результата",
"result_pred = eclf.fit(X_train[best_features], y_train).predict_proba(test[best_features])\nresult = pd.DataFrame(test['USER_ID'])\nresult['ACTIVITY_DEC_16_PROB'] = list(result_pred[:, 1])\nresult.to_csv('result.csv', encoding='utf8', index=None)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
dataDogma/Computer-Science
|
Courses/DAT-208x/.ipynb_checkpoints/DAT208X - Week 5 - Section 1 - Plotting_with_MatplotLib-checkpoint.ipynb
|
gpl-3.0
|
[
"Table of Content\n\nWhy Visualization is important\n\n\nExercise 1\n\n\nLab: Section 1\n\n\nLine plot 1\n\n\nLine plot 2\n\n\nLine plot 3\n\n\nScatter Plot 1\n\n\nScatter Plot 2\n\n\n\n\nHistograms\n\n\nCreating Histograms with Matplotlib\n\n\nExercise 2\n\n\nLab: Histograms\n\n\nBuild a histogram 1.\n\n\nBuild a histogram 2 : bins\n\n\nBuild a histogram 3 : compare\n\n\nChoose the right plot 1\n\n\n\n\nWhy visualization is important?\n\nThere are many reasons to why data visualization is important, nevertheless some of the important ones are as follows:\n\n\nFor better understanding of the data in question.\n\n\nFor sharing the insights with others.\n\n\nFor effectively communicating results to the non-technical masses as well.\n\n\n\nHence python offers a package called, \"Matplotlib\", a data visualization package.\nlet's see some help on matplotlib, but before we proceed with that, we have to first import the package.\nFurther there are dozens of sub-packages associated with Matplotlib, hence the ones used here are some of the common ones, these are:\n\n.pyplot()\n.scatter()\n\n\nImporting convention:\nimport matplotlib.pyplot as plt\nNote: plt is an alias and not be confused by it.\n\nExercise 1\n\nRQ1: \nWhat is the characteristic about data visualization?\nAns: Visualization is a very powerful tool for exploring your data and reporting results. \n\nRQ2: What is the conventional way of importing the pyplot sub-package from the matplotlib package?\nAns: import matplotlib.pyplot as plt\n\nRQ3: You are creating a line plot using the following code:\na = [1, 2, 3, 4]\n b = [3, 9, 2, 6]\n plt.plot(a, b)\n plt.show()\n_ \nWhich two options describe the result of your code?_\nAns: a : Horizontal axis, b : Vertical axis.\n\nRQ4: You are modifying the following code that calls the plot() function to create a line plot:\na = [1, 2, 3, 4]\n b = [3, 9, 2, 6]\n plt.plot(a, b)\n plt.show()\nWhat should you change in the code to create a scatter plot instead of a line plot?\nAns: Change plot() in plt.plot() to scatter().\n\nGo to top: TOC\nLab:\n\nObjective:\n\n\nExperiment with matplotlib package.\n\n\nCreate both line plots and scatter plots.\n\n\n\nGo to top: TOC\nLine Plot 1\n\nGeneral Recepie:\n```\n import matplotlib as plt\nplt.show(< variable@Horizontal_axis >, < variable@Vertical_axis >)\n\nplt.plot( x, y )\n\nplt.show()```\n\n\nPreface: \nIn the video, you already saw how much the world population has grown over the past years. Will it continue to do so?\nThe world bank has estimates of the world population for the years 1950 up to 2100.\n\n\nthe years are loaded in your workspace as a lit called year.\n\n\nCorresponding populations as a list called pop.\n\n\n\nInstructions:\n\n\nprint() the last item from both the year and the pop list to see what the predicted population for the year 2100 is.\n\n\nBefore you can start, you should import matplotlib.pyplot as plt. \n\npyplot is a sub-package of matplotlib, hence the dot.\n\n\n\nUse plt.plot() to build a line plot. year should be mapped on the horizontal axis,\n\npop on the vertical axis. Don't forget to finish off with the show() function to actually display the plot.\n\n\nGo to top: TOC",
"# Print the last item from year and pop\n# print(year[-1])\n# print(pop[-1])\n\n\n# Import matplotlib.pyplot as plt\n# import matplotlib.pyplot as plt\n\n# Make a line plot: year on the x-axis, pop on the y-axis\n# plt.plot( year, pop)\n# plt.show()",
"Line Plot 2\n\nQuestion: What is the first year in which there will be more than ten billion human beings on this planet?\nAns: By 2060, the world population will rise appx. to 10 billion.\n\nGo to top: TOC\nLine plot 3\n\nPreface:\nNow that you've built your first line plot, let's start working on the data that professor Hans Rosling used to build his beautiful bubble chart. It was collected in 2007. Two lists are available for you:\n\n\nlife_exp which contains the life expectancy for each country and\n\n\ngdp_cap, which contains the GDP per capita, for each country expressed in US Dollar.\n\n\nGDP stands for Gross Domestic Product. It basically represents the size of the economy of a country.Divide this by the population and you get the GDP per capita.\n\nInstructions:\n\n\nPrint the last item from both the list gdp_cap, and the list life_exp; it is information about Zimbabwe.\n\n\nBuild a line chart, with gdp_cap on the x-axis, and life_exp on the y-axis. \n\nDoes it make sense to plot this data on a line plot?\n\n\n\nDon't forget to finish off with a plt.show() command, to actually display the plot.\n\n\n\nGo to top: TOC",
"# Print the last item of gdp_cap and life_exp\n# print( gdp_cap[ -1 ] )\n# print( life_exp[ -1 ])\n\n# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis\n# plt.plot( gdp_cap, life_exp )\n\n# Display the plot\n# plt.show()",
"Scatter Plot 1\n\nIt's fine to use such tools, but how do we know which one is best suited for which purpose? As a rule of thumb:\n\n\nWhen we have a time scale along the horizontal axis.\n\nWe generally prefer a line plot.\n\n\n\nWhen we're trying to assess if there's a correlation b/w two variables.\n\nWe go with scatter plot.\n\n\n\n\nImporting convention:\nimport matplotlib.pyplot as plt\n plt.scatter( x, y )\n plt.show()\nPreface:\nLet's continue with the gdp_cap versus life_exp plot, the GDP and life expectancy data for different countries in 2007. Maybe a scatter plot will be a better alternative?\nInstructions:\n\n\nChange the line plot that's coded in the script to a scatter plot.\n\n\nA correlation will become clear when you display the GDP per capita on a logarithmic scale. Add the line plt.xscale('log').\n\n\nFinish off your script with plt.show() to display the plot.\n\n\n\nGo to top: TOC",
"# Change the line plot below to a scatter plot\n#plt.scatter(gdp_cap, life_exp)\n\n# Put the x-axis on a logarithmic scale\n#plt.xscale('log')\n\n# Show plot\n#plt.show()",
"Scatter Plot 2\n\nPreface:\nIn the previous exercise, you saw that that the higher GDP usually corresponds to a higher life expectancy. In other words, there is a positive correlation.\nDo you think there's a relationship between population and life expectancy of a country? \n\nInstructions:\n\n\nStart from scratch: import matplotlib.pyplot as plt.\n\n\nBuild a scatter plot, where pop is mapped on the horizontal axis, and life_exp is mapped on the vertical axis.\n\n\nFinish the script with plt.show() to actually display the plot. Do you see a correlation?\n\n\nGo to top: TOC",
"# Import package\n# import matplotlib.pyplot as plt\n\n# Build Scatter plot\n# plt.scatter( pop, life_exp )\n\n# Show plot\n# plt.show()\n\n\"\"\"Conclusion: Ther's no correlation b/w population\nand Life Expectency! Which makes perfect sense.\"\"\"",
"Histograms\n\nIn descriptive statistics, a histogram is a tool that precedes over to precursor methods such as dot plots on number line. It's a tool to visualize the frequency of a distribution.\nIn particular, Histograms are useful data visualziation tool for \"Qunatitative data\" in question.\nHistogram concept:\n\n\nStart off with number line, with data points superimposed respectively to there magnitude.\n\n\nNext divide the line into equal chunks, called bins.\n\n\nWith each bin containing data points, we count the number of data points in each bin.\n\n\nFinally, we draw a bar for each bin. The height of the bar corresponds to the number of data points that fall in this bin.\n\n\n\nGo to top: TOC\nCreating Histograms with Matplotlib\n\nImporting convention:\n\nimport matplotlib.pyplot as plt\n\nfollowed by calling the histogram func. using plt.\n\nplt.hist(<list variable>, <no. of bins>)\n\nNote: A good bit about the .hist() is, it automatically computes the boundries for all the bins. Also calculates how many values in each one.\nExercise 2\n\nRQ1: What is a characteristic of a histogram?\nAns: \n\nRQ2: You are working with a Python list with 10 different values. You divide the values into 5 equally-sized bins.\nHow wide will these bins be if the lowest value in your list is 0 and the highest is 20?\nAns: The range of the values is 20, if we divide into 5 bins, then each bin will have a width of 4.\nFor a visual que, see below cell.",
"x = [0, 0, 0, 0, 0, 0, 0, 0, 0, 20]\n\nimport matplotlib.pyplot as plt\n\nplt.hist( x, 5 )\n\nplt.show()",
"RQ3: You write the following code:\n```\nimport matplotlib.pyplot as plt\nx = [1, 3, 6, 3, 2, 7, 3, 9, 7, 5, 2, 4]\nplt.hist(x)\nplt.show()```\n\nYou need to extend the plt.hist() command to specifically set the number of bins to 4. What should you do?\nAns: plt.hist(x, 4)\n\nGo to top: TOC\nLab histograms\n\nObjective:\n\n\nExperiment with histograms.\n\n\nWork with different bins.\n\n\nWork with dfferent datasets.\n\n\n\nLab exercises:\n\n\nBuild a histogram 1\n\n\nBuild a histogram 2 : bins\n\n\nBuild a histogram 3 : compare\n\n\nChoose the right plot 1.\n\n\nChosse the right plot 2.\n\n\n\nGo to top: TOC\nBuild a histogram 1.\n\nPreface:\nlife_exp, the list containing data on the life expentancy for different countries in 2007(at data camp only!)\nTo see how life expectancy in different countries is distributed, let's create a histogram of life_exp.\nInstructions:\n\n\nUse plt.hist() to create a histogram of the values in life_exp.\n\nDo not specify the number of bins; Python will set the number of bins to 10 by default for you.\n\n\n\nAdd plt.show() to actually display the histogram. Can you tell which bin contains the most observations?\n\n\n\n```\nCreate histogram of life_exp data\nplt.hist(life_exp)\nDisplay histogram\nplt.show()\n```\n\nGo to top: TOC\nBuild a histogram 2 : bins\n\n\n\nBy default python sets the number of bins to 10.\n\n\nNumber of bins is important,\n\n\nCan zoom in / out of the data.\n\n\nzooming in : shows much more detail, but not the bigger picture.\n\n\nzooming out : shows the bigger picture.\n\n\n\n\nTo control the no. of bins to divide your data in,\n- by setting the `bins` argument.\n\n\n\n\nPreface:\nWe'll be makin two plots here.\n\nUse plt.clf() to clean up again to start fresh.\n\n\nInstructions:\n\n\nBuild a histogram of life_exp, with 5 bins.\n\nCan you tell which bins contains the most observations?\n\n\n\nBuild another histogram of life_exp, this time with 20 bins.\n\nIs this better?\n\n\nGo to top: TOC",
"# Build histogram with 5 bins\n # Ans: plt.hist(life_exp, bins = 5)\n # 4th and 5th bins.\n\n# Show and clean up plot\n# plt.show()\n# plt.clf()\n\n# Build histogram with 20 bins\n# Ans: plt.hist( life_exp, bins = 20 )\n # Much better, 15th bin contains maximum value,\n # i.e. most people tend to live upto 71-73 years.\n\n# Show and clean up again\n # plt.show()\n # plt.clf()",
"Build a histogram 3 : compare\n\nPreface\nIn the video, you saw population pyramids for the present day and for the future. Because we were using a histogram, it was very easy to make a comparison.\nLet's do similar comparison.\nlife_exp contains life expectancy data for different countries in 2007. You also have access to a second list now, life_exp1950, containing similar data for 1950. Can you make a histogram for both datasets?\nYou'll again be making two plots. The plt.show() and plt.clf() commands to render everything nicely are already included. Also matplotlib.pyplot is imported for you, as plt.\n\nInstructions:\n\n\nBuild a histogram of life_exp with 15 bins.\n\n\nBuild a histogram of life_exp1950, also with 15 bins.\n\nIs there a big difference with the histogram for the 2007 data?\n\n\nGo to top: TOC",
"# Histogram of life_exp, 15 bins\n #Ans: plt.hist( life_exp, bins = 15)\n\n# Show and clear plot\n#plt.show()\n#plt.clf()\n\n# Histogram of life_exp1950, 15 bins\n #Ans: plt.hist( life_exp1950, bins = 15)\n\n# Show and clear plot again\n#plt.show()\n#plt.clf()\n\n\"\"\"\nConclusion: Neither one of these histogram is useful to \nbetter understand the life expectancy data.\n\nWhy? \n\"\"\"",
"Choose the right plot 1\n\nScenario:\nYou're a professor teaching Data Science with Python, and you want to visually assess if the grades on your exam follow a normal distribution. Which plot do you use?\nAnswer: Since a histogram is a very good tool to visualize a frequency distribution of either one or multiple varibales, it's also a good tool to visualize if the distribution in question follows a normal(gaussian) distribution.\n\nChoose the right plot 2\nScenario:\nYou're a professor in Data Analytics with Python, and you want to visually assess if longer answers on exam questions lead to higher grades. Which plot do you use?\nAnswer:\nSince we are trying to find a visual relationship or correlation b/w two variables \"longer-answer\" and \"higher-grades\", in such case:\nA scatter plot is a good visualizing tool to identify if the data points are \"spread out\" meaning no relationship or \"linear grouping of data points\" meaning there's some kind of relationship b/w the variables in question.\n\nGo to top: TOC\nLecture: Customization\n\nData visualization is:\n\n\nScience and Art.\n\nTo tell a story with data.\n\n\n\nWe have many options, i.e. can create different types of plots.\n\n\nFor each plot, there are infinite no. of customizations.\n\n\nThese may include, colors, shapes, lables, legend, axes etc.\n\n\n\n\nChoice depends on:\n\n\nData.\n\n\nStory you want to tell.\n\n\n\n\nExercise 3:\n\nRQ1: You are customizing a plot by labelling its axes. You need to do this by using matplotlib.\nWhich code should you use?\nAns: xlabel(\"x-axis title\") and ylabel(\"y-axis title\").\n\nRQ2: Which matplotlib function do you use to build a line plot where the area under the graph is colored?\nAns: fill_between()\n\nRQ3: Typically, you place all customization commands between the plot() call and the show() call, as follows:\n```\nimport matplotlib.pyplot as plt\nx = [1, 2, 3]\ny = [4, 5, 6]\nplt.plot(x, y)\ncustomization here\nplt.show()```\nWhat will happen if you place the customization code after the show() function instead?\n```\nimport matplotlib.pyplot as plt\nx = [1, 2, 3]\ny = [4, 5, 6]\nplt.plot(x, y)\nplt.show()\ncustomization here```\nAns: Let's check it out!",
"import matplotlib.pyplot as plt\n\nx = [1, 2, 3]\ny = [4, 5, 6]\n\nplt.plot(x, y)\n\n# customization here\nplt.xlabel(\"var1\")\nplt.ylabel(\"var2\")\n\nplt.show()\n\n\"\"\"It seems that customization should be done b/w \nplot() and show() function.\"\"\"\n\nimport matplotlib.pyplot as plt\n\nx = [1, 2, 3]\ny = [4, 5, 6]\n\nplt.plot(x, y)\n\n# customization here\nplt.show()\n\nplt.xlabel(\"var1\")\nplt.ylabel(\"var2\")",
"Lab : Customization\n\nObjective:\n+ Customization of visual data.\n\n\nAdd\n\n\nLabels\n\n\nTitle\n\n\nSize\n\n\nTicks\n\n\nColour\n\n\n\n\n\nLabeles:\nYou're going to work on the scatter plot with world development data: GDP per capita on the x-axis (logarithmic scale), life expectancy on the y-axis. The code for this plot is available in the script.\nAs a first step, let's add axis labels and a title to the plot. You can do this with the xlabel(), ylabel() and title() functions, available in matplotlib.pyplot. This sub-package is already imported as plt.\n\nInstructions:\n\n\nThe strings xlab and ylab are already set for you. Use these variables to set the label of the x- and y-axis.\n\n\nThe string title is also coded for you. Use it to add a title to the plot.\n\n\nAfter these customizations, finish the script with plt.show() to actually display the plot.",
"# Basic scatter plot, log scale\n# plt.scatter(gdp_cap, life_exp)\n# plt.xscale('log') \n\n# Strings\n# xlab = 'GDP per Capita [in USD]'\n# ylab = 'Life Expectancy [in years]'\n# title = 'World Development in 2007'\n\n# Add axis labels\n# plt.xlabel(xlab)\n# plt.ylabel(ylab)\n\n# Add title\n# plt.title(title)\n\n# After customizing, display the plot\n# plt.show()",
"Ticks:"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
kingsgeocomp/applied_gsa
|
Practical-06-3. Correlation.ipynb
|
mit
|
[
"Considering Correlated Variables (a.k.a. Feature Selection)\nDepending on the clustering technique, correlated variables can have an unexpected effect on the results by allowing some dimensions to be 'double-weighted' in the results. So we don't want to keep too many correlated variables in the clustering data since that will bias the clustering algorithms and may result in poor 'performance'. \n<div style=\"padding:5px;margin-top:5px;margin-bottom:5px;border:dotted 1px red;background-color:rgb(255,233,233);color:red\">STOP. Think about _why_ correlation between two variables could lead to 'double-weighting in the clustering results!</div>\n\nOne way to deal this is to produce a correlation table for all variables and then look to remove problematic variables. For a gentle introduction (that kinds of leaves you hanging at the end) there's a nice-looking blog post on Medium: \n\nFeature selection and dimensionality reduction are important because of three main reasons:\n- Prevents Overfitting: A high-dimensional dataset having too many features can sometimes lead to overfitting (model captures both real and random effects).\n- Simplicity: An over-complex model having too many features can be hard to interpret especially when features are correlated with each other.\n- Computational Efficiency: A model trained on a lower-dimensional dataset is computationally efficient (execution of algorithm requires less computational time).\nDimensionality reduction, therefore, plays a crucial role in data preprocessing.\n\nThere's also this post and this one. We could also use Principal Components Analysis (PCA) to perform dimensionality reduction whilst also dealing with correlation between the variables.",
"# Here's an output table which gives you nice, specific \n# numbers but is hard to read so I'm only showing the \n# first ten rows and columns... \nscdf.corr().iloc[1:7,1:7]",
"Finding Strong Correlations Visually",
"# And here's a correlation heatmap... which is easier to read but has\n# less detail. What it *does* highlight is high levels of *negative*\n# correlation as well as positive, so you'll need absolute difference, \n# not just whether something is more than 0.x correlated.\n# \n# From https://seaborn.pydata.org/examples/many_pairwise_correlations.html\ncdf = scdf.corr()\n\n# Generate a mask for the upper triangle\nmask = np.zeros_like(cdf, dtype=np.bool)\nmask[np.triu_indices_from(mask)] = True\n\n# Set up the matplotlib figure\nf, ax = plt.subplots(figsize=(10, 10))\n\n# Generate a custom diverging colormap\ncm = sns.diverging_palette(240, 10, as_cmap=True)\n\n# Draw the heatmap with the mask and correct aspect ratio\nsns.heatmap(cdf, mask=mask, cmap=cm, vmax=1.0, vmin=-1.0, center=0,\n square=True, linewidths=.1, cbar_kws={\"shrink\": .5})",
"<div style=\"padding:5px;margin-top:5px;margin-bottom:5px;border:dotted 1px red;background-color:rgb(255,233,233);color:red\">STOP. Make sure that you understand what the figure above is showing before proceeding to the next stage.</div>\n\nFinding Strong Correlations Numerically",
"# Generate the matrix but capture the output this time\ncdf = scdf.corr()\ncdf['name'] = cdf.index # We need a copy of the index\n\ncorrh = 0.66 # Specify threshold for highly correlated?\nprint(\"! High correlation threshold is {0}.\".format(corrh))\n\nnum_corrs = []\nhi_corrs = []\n\nfor c in cdf.name.unique():\n if c != 'name':\n # Some formatting\n print(\"=\" * 10 + f\" {c} \" + \"=\" * 10)\n \n # Find highly correlated variables\n hits = cdf.loc[(abs(cdf[c]) >= corrh), c]\n hits.drop(c, inplace=True)\n \n if hits.size == 0: # No correlations > corrs\n print(\"+ Not highly correlated with other variables.\")\n else:\n num_corrs.append(hits.size)\n \n print(\"- High correlations ({0}) with other variables:\".format(hits.size))\n print(\" \" + \"\\n \".join(hits.index.values))\n hi_corrs.append(hits.size) \n\nsns.distplot(hi_corrs, bins=range(0,20), kde=False).set_title(\n \"Number of Strong Correlations (> \" + str(corrh) + \") with Other Variables\")",
"Stripping Out 'Redundant' Variables\nLet's remove any variable that has a 'lot' of strong correlations correlations with other variables, though we need to define what is 'a lot'. This will reduce the dimensionality of our data and make clustering a bit easier. An alternative approach to dimensionality reduction -- which can be more 'robust' if we ensure that all of the data has unit variance (which we've done using the MinMaxScaler), though harder for many to understand -- would be to apply Principal Components Analysis (PCA) to the data set and to work with the eigenvalues afterwards. PCA is also available in sklearn.\nWe'll set our threshold at 5.0 based on a visual inspection of the chart above.",
"corrh = 0.66 # Specify threshold for highly correlated?\nmaxcorrs = 4.0 # What's our threshold for too many strong correlations?\nthreshold = 0.5*maxcorrs # What's our threshold for too many strong correlations with columns we keep!\n\nprint(\"! High correlation threshold is {0}.\".format(corrh))\n\nto_drop = [] # Columns to drop\nto_keep = [] # Columns to keep\n\nnum_corrs = []\nhi_corrs = []\n\nfor c in cdf.columns:\n if c != 'name':\n \n # Find highly correlated variables, but let's\n # keep the focus on *positive* correlation now\n hits = cdf.loc[(cdf[c] >= corrh), c]\n hits.drop(c, inplace=True)\n \n multi_vals = False\n \n # Remove ones with many correlations\n if hits.size >= maxcorrs: \n print(f\"- {c} exceeds maxcorr ({maxcorrs}) correlation threshold (by {hits.size-threshold}).\")\n s1 = set(to_keep)\n s2 = set(hits.index.values)\n #print(\"Comparing to_keep (\" + \", \".join(s1) + \") to hits (\" + \", \".join(s2) + \")\")\n s1 &= s2\n #print(\"Column found in 'many correlations' :\" + str(s1))\n if len(s1) >= threshold: \n multi_vals = True\n print(f\" - Dropping b/c exceed {threshold} correlations with retained cols: \\n -\" + \"\\n -\".join(s1))\n else:\n print(f\" + Keeping b/c fewer than {threshold} correlations with retained columns.\")\n else: \n print(f\"+ {c} falls below maxcorr ({maxcorrs}) correlation threshold (by {abs(threshold-hits.size)}).\")\n \n if multi_vals==True:\n to_drop.append(c)\n else:\n to_keep.append(c)\n \n\nprint(\" \")\nprint(\"To drop ({0}): \".format(len(to_drop)) + \", \".join(to_drop))\nprint(\" \")\nprint(\"To keep ({0}): \".format(len(to_keep)) + \", \".join(to_keep))\n\nto_save = scdf.drop(to_drop, axis=1, errors='raise')\nprint(\"Retained variables: \" + \", \".join(to_save.columns.values))\nto_save.to_pickle(os.path.join('data','LSOA_2Cluster.pickle'))\ndel(to_save)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Naereen/notebooks
|
agreg/Algorithme de Cocke-Kasami-Younger (python3).ipynb
|
mit
|
[
"Table of Contents\n<p><div class=\"lev1 toc-item\"><a href=\"#Table-des-matières\" data-toc-modified-id=\"Table-des-matières-1\"><span class=\"toc-item-num\">1 </span>Table des matières</a></div><div class=\"lev1 toc-item\"><a href=\"#1.-Agrégation-externe-de-mathématiques\" data-toc-modified-id=\"1.-Agrégation-externe-de-mathématiques-2\"><span class=\"toc-item-num\">2 </span>1. Agrégation externe de mathématiques</a></div><div class=\"lev2 toc-item\"><a href=\"#1.1-Leçon-orale,-option-informatique\" data-toc-modified-id=\"1.1-Leçon-orale,-option-informatique-21\"><span class=\"toc-item-num\">2.1 </span>1.1 Leçon orale, option informatique</a></div><div class=\"lev4 toc-item\"><a href=\"#Feedbacks?\" data-toc-modified-id=\"Feedbacks?-2101\"><span class=\"toc-item-num\">2.1.0.1 </span>Feedbacks?</a></div><div class=\"lev1 toc-item\"><a href=\"#2.-Algorithme-de-Cocke-Kasami-Younger\" data-toc-modified-id=\"2.-Algorithme-de-Cocke-Kasami-Younger-3\"><span class=\"toc-item-num\">3 </span>2. Algorithme de Cocke-Kasami-Younger</a></div><div class=\"lev3 toc-item\"><a href=\"#2.0.1-Implémentation-d'un-développement-pour-les-leçons-906,-907,-910,-923.\" data-toc-modified-id=\"2.0.1-Implémentation-d'un-développement-pour-les-leçons-906,-907,-910,-923.-301\"><span class=\"toc-item-num\">3.0.1 </span>2.0.1 Implémentation d'un développement pour les leçons 906, 907, 910, 923.</a></div><div class=\"lev3 toc-item\"><a href=\"#2.0.2-Références-:\" data-toc-modified-id=\"2.0.2-Références-:-302\"><span class=\"toc-item-num\">3.0.2 </span>2.0.2 Références :</a></div><div class=\"lev2 toc-item\"><a href=\"#2.1-Classes-pour-répresenter-une-grammaire\" data-toc-modified-id=\"2.1-Classes-pour-répresenter-une-grammaire-31\"><span class=\"toc-item-num\">3.1 </span>2.1 Classes pour répresenter une grammaire</a></div><div class=\"lev3 toc-item\"><a href=\"#2.1.1-Du-typage-en-Python-?!\" data-toc-modified-id=\"2.1.1-Du-typage-en-Python-?!-311\"><span class=\"toc-item-num\">3.1.1 </span>2.1.1 Du typage en Python ?!</a></div><div class=\"lev3 toc-item\"><a href=\"#2.1.2-La-classe-Grammaire\" data-toc-modified-id=\"2.1.2-La-classe-Grammaire-312\"><span class=\"toc-item-num\">3.1.2 </span>2.1.2 La classe <code>Grammaire</code></a></div><div class=\"lev3 toc-item\"><a href=\"#2.1.3-Premier-exemple-de-grammaire-(non-Chomsky)\" data-toc-modified-id=\"2.1.3-Premier-exemple-de-grammaire-(non-Chomsky)-313\"><span class=\"toc-item-num\">3.1.3 </span>2.1.3 Premier exemple de grammaire (non-Chomsky)</a></div><div class=\"lev3 toc-item\"><a href=\"#2.1.4-Second-exemple-de-grammaire-(non-Chomsky)\" data-toc-modified-id=\"2.1.4-Second-exemple-de-grammaire-(non-Chomsky)-314\"><span class=\"toc-item-num\">3.1.4 </span>2.1.4 Second exemple de grammaire (non-Chomsky)</a></div><div class=\"lev3 toc-item\"><a href=\"#2.1.5-Dernier-exemple-de-grammaire\" data-toc-modified-id=\"2.1.5-Dernier-exemple-de-grammaire-315\"><span class=\"toc-item-num\">3.1.5 </span>2.1.5 Dernier exemple de grammaire</a></div><div class=\"lev2 toc-item\"><a href=\"#2.2-Vérifier-qu'une-grammaire-est-bien-formée\" data-toc-modified-id=\"2.2-Vérifier-qu'une-grammaire-est-bien-formée-32\"><span class=\"toc-item-num\">3.2 </span>2.2 Vérifier qu'une grammaire est bien formée</a></div><div class=\"lev2 toc-item\"><a href=\"#2.3-Vérifier-qu'une-grammaire-est-en-forme-normale-de-Chomsky\" data-toc-modified-id=\"2.3-Vérifier-qu'une-grammaire-est-en-forme-normale-de-Chomsky-33\"><span class=\"toc-item-num\">3.3 </span>2.3 Vérifier qu'une grammaire est en forme normale de Chomsky</a></div><div class=\"lev2 toc-item\"><a href=\"#2.4-(enfin)-L'algorithme-de-Cocke-Kasami-Younger\" data-toc-modified-id=\"2.4-(enfin)-L'algorithme-de-Cocke-Kasami-Younger-34\"><span class=\"toc-item-num\">3.4 </span>2.4 (enfin) L'algorithme de Cocke-Kasami-Younger</a></div><div class=\"lev2 toc-item\"><a href=\"#2.5-Exemples\" data-toc-modified-id=\"2.5-Exemples-35\"><span class=\"toc-item-num\">3.5 </span>2.5 Exemples</a></div><div class=\"lev3 toc-item\"><a href=\"#2.5.1-Avec-$G_3$\" data-toc-modified-id=\"2.5.1-Avec-$G_3$-351\"><span class=\"toc-item-num\">3.5.1 </span>2.5.1 Avec <span class=\"MathJax_Preview\" style=\"color: inherit;\"><span class=\"MJXp-math\" id=\"MJXp-Span-545\"><span class=\"MJXp-msubsup\" id=\"MJXp-Span-546\"><span class=\"MJXp-mi MJXp-italic\" id=\"MJXp-Span-547\" style=\"margin-right: 0.05em;\">G</span><span class=\"MJXp-mn MJXp-script\" id=\"MJXp-Span-548\" style=\"vertical-align: -0.4em;\">3</span></span></span></span><script type=\"math/tex\" id=\"MathJax-Element-96\">G_3</script></a></div><div class=\"lev3 toc-item\"><a href=\"#2.5.2-Avec-$G_6$\" data-toc-modified-id=\"2.5.2-Avec-$G_6$-352\"><span class=\"toc-item-num\">3.5.2 </span>2.5.2 Avec <span class=\"MathJax_Preview\" style=\"color: inherit;\"><span class=\"MJXp-math\" id=\"MJXp-Span-556\"><span class=\"MJXp-msubsup\" id=\"MJXp-Span-557\"><span class=\"MJXp-mi MJXp-italic\" id=\"MJXp-Span-558\" style=\"margin-right: 0.05em;\">G</span><span class=\"MJXp-mn MJXp-script\" id=\"MJXp-Span-559\" style=\"vertical-align: -0.4em;\">6</span></span></span></span><script type=\"math/tex\" id=\"MathJax-Element-98\">G_6</script></a></div><div class=\"lev2 toc-item\"><a href=\"#2.6-Mise-en-forme-normale-de-Chomsky-(bonus)\" data-toc-modified-id=\"2.6-Mise-en-forme-normale-de-Chomsky-(bonus)-36\"><span class=\"toc-item-num\">3.6 </span>2.6 Mise en forme normale de Chomsky <em>(bonus)</em></a></div><div class=\"lev3 toc-item\"><a href=\"#2.6.1-Exemple-pour-$G_1$\" data-toc-modified-id=\"2.6.1-Exemple-pour-$G_1$-361\"><span class=\"toc-item-num\">3.6.1 </span>2.6.1 Exemple pour <span class=\"MathJax_Preview\">G_1</span><script type=\"math/tex\">G_1</script></a></div><div class=\"lev3 toc-item\"><a href=\"#2.6.2-Exemple-pour-$G_6$\" data-toc-modified-id=\"2.6.2-Exemple-pour-$G_6$-362\"><span class=\"toc-item-num\">3.6.2 </span>2.6.2 Exemple pour <span class=\"MathJax_Preview\">G_6</span><script type=\"math/tex\">G_6</script></a></div>\n\n# Table des matières\n* [1. Agrégation externe de mathématiques](#1.-Agrégation-externe-de-mathématiques)\n * [1.1 Leçon orale, option informatique](#1.1-Leçon-orale,-option-informatique)\n* [2. Algorithme de Cocke-Kasami-Younger](#2.-Algorithme-de-Cocke-Kasami-Younger)\n * \n * [2.0.1 Implémentation d'un développement pour les leçons 906, 907, 910, 923.](#2.0.1-Implémentation-d'un-développement-pour-les-leçons-906,-907,-910,-923.)\n * [2.0.2 Références :](#2.0.2-Références-:)\n * [2.1 Classes pour répresenter une grammaire](#2.1-Classes-pour-répresenter-une-grammaire)\n * [2.1.1 Du typage en Python ?!](#2.1.1-Du-typage-en-Python-?!)\n * [2.1.2 La classe ``Grammaire``](#2.1.2-La-classe-Grammaire)\n * [2.1.3 Premier exemple de grammaire (non-Chomsky)](#2.1.3-Premier-exemple-de-grammaire-%28non-Chomsky%29)\n * [2.1.4 Second exemple de grammaire (non-Chomsky)](#2.1.4-Second-exemple-de-grammaire-%28non-Chomsky%29)\n * [2.1.5 Dernier exemple de grammaire](#2.1.5-Dernier-exemple-de-grammaire)\n * [2.2 Vérifier qu'une grammaire est bien formée](#2.2-Vérifier-qu'une-grammaire-est-bien-formée)\n * [2.3 Vérifier qu'une grammaire est en forme normale de Chomsky](#2.3-Vérifier-qu'une-grammaire-est-en-forme-normale-de-Chomsky)\n * [2.4 (enfin) L'algorithme de Cocke-Kasami-Younger](#2.4-%28enfin%29-L'algorithme-de-Cocke-Kasami-Younger)\n * [2.5 Exemples](#2.5-Exemples)\n * [2.5.1 Avec $G_3$](#2.5.1-Avec-$G_3$)\n * [2.5.2 Avec $G_6$](#2.5.2-Avec-$G_6$)\n * [2.6 Mise en forme normale de Chomsky *(bonus)*](#2.6-Mise-en-forme-normale-de-Chomsky-*%28bonus%29*)\n * [2.6.1 Exemple pour $G_1$](#2.6.1-Exemple-pour-$G_1$)\n * [2.6.2 Exemple pour $G_6$](#2.6.2-Exemple-pour-$G_6$)\n\n\n# 1. Agrégation externe de mathématiques\n\n## 1.1 Leçon orale, option informatique\n\n> - Ce [notebook Jupyter](http://jupyter.org/) est une implémentation d'un algorithme constituant un développement pour l'option informatique de l'agrégation externe de mathématiques.\n> - Il s'agit de l'[algorithme de Cocke-Kasami-Younger](https://fr.wikipedia.org/wiki/Algorithme_de_Cocke-Younger-Kasami).\n> - Cette implémentation (partielle) a été rédigée par [Lilian Besson](http://perso.crans.org/besson/) ([sur GitHub ?](https://github.com/Naereen/), [sur Bitbucket ?](https://bitbucket.org/lbesson)), et [est open-source](https://github.com/Naereen/notebooks/blob/master/agreg/Algorithme%20de%20Cocke-Kasami-Younger%20%28python3%29.ipynb).\n\n> #### Feedbacks?\n> - Vous avez trouvé un bug ? → [Signalez-le moi svp !](https://github.com/Naereen/notebooks/issues/new), merci d'avance.\n> - Vous avez une question ? → [Posez la svp !](https://github.com/Naereen/ama.fr) [](https://GitHub.com/Naereen/ama.fr)\n\n----\n\n# 2. Algorithme de Cocke-Kasami-Younger\n\n### 2.0.1 Implémentation d'un développement pour les leçons 906, 907, 910, 923.\n\nL'algorithme de Cocke-Kasami-Younger (CYK) permet de résoudre le problème du mot en temps $\\mathcal{O}(|w|^3)$, par programmation dynamique.\nLa grammaire $G$ doit déjà avoir été mise en forme de [forme normale de Chomsky](https://fr.wikipedia.org/wiki/Forme_normale_de_Chomsky), ce qui prend un temps $\\mathcal{O}(|G|^2)$ et produit une grammaire équivalente $G'$ de taille $\\mathcal{O}(|G|^2)$ en partant de $G$ (qui doit être bien formée).\n\n### 2.0.2 Références :\n\n- [Cocke-Kasami-Younger sur Wikipedia](https://fr.wikipedia.org/wiki/Algorithme_de_Cocke-Younger-Kasami),\n- Bien traité dans [\"Hopcroft, Ullman\", Ch7.4.4, p298](https://catalogue.ens-cachan.fr/cgi-bin/koha/opac-detail.pl?biblionumber=23694),\n- Esquissé dans [\"Carton\", Ex4.7 Fig4.2 p170](https://catalogue.ens-cachan.fr/cgi-bin/koha/opac-detail.pl?biblionumber=41719),\n- [Développement tapé en PDF par Theo Pierron (2014)](http://perso.eleves.ens-rennes.fr/~tpier758/agreg/dvpt/info/CYK.pdf),\n- [Ces slides d'un cours sur les langages et les grammaires](http://pageperso.lif.univ-mrs.fr/~alexis.nasr/Ens/M2/pcfg.pdf).\n\n----\n\n## 2.1 Classes pour répresenter une grammaire\n\nAu lieu de types formels définis en OCaml, on utilise des classes en Python, pour répresenter une grammaire (pas seulement en forme normale de Chomsky mais dans une forme un peu plus générale).\n\n### 2.1.1 Du typage en Python ?!\n\nMais comme je veux frimer en utilisant des types formels, on va utiliser des [annotations de types en Python](https://www.python.org/dev/peps/pep-0484/).\nC'est assez nouveau, disponible **à partir de Python 3.5**. Si vous voulez en savoir plus, une bonne première lecture peut être [cette page](https://mypy.readthedocs.io/en/latest/builtin_types.html).\n\n*Note :* ces annotations de types ne sont PAS nécessaires.",
"# On a besoin de listes et de tuples\nfrom typing import List, Tuple # Module disponible en Python version >= 3.5",
"On définit les types qui nous intéressent :",
"# Type pour une variable, juste une chaine, e.g. 'X' ou 'S'\nVar = str\n# Type pour un alphabet\nAlphabet = List[Var]\n# Type pour une règle : un symbole transformé en une liste de symboles\nRegle = Tuple[Var, List[Var]]",
"Note : ces annotations de types ne sont là que pour illustrer et aider le programmeur, Python reste un langage dynamiquement typé (i.e. on fait ce qu'on veut...).\n2.1.2 La classe Grammaire\nUne grammaire $G$ est définie par :\n\n$\\Sigma$ son alphabet de production, qui sont les lettres dans les mots produits à la fin, e.g., $\\Sigma = { a, b}$,\n$V$ son alphabet de travail, qui sont les lettres utilisées dans la génération de mots, mais pas dans les mots à la fin, e.g., $V = {S, A}$,\n$S$ est le symbole de travail initial,\n$R$ est un ensemble de règles, qui sont de la forme $U \\rightarrow x_1 \\dots x_n$ pour $U \\in V$ une variable de travail (pas de production), et $x_1, \\dots, x_n$ sont variables de production ou de travail (dans $\\Sigma \\cup V$), e.g., $R = { S \\rightarrow \\varepsilon, S \\rightarrow A S b, A \\rightarrow a, A \\rightarrow a a }$.\n\nEt ainsi on peut definir un classe Grammaire, qui n'est rien d'autre qu'un moyen d'encapsuler ces différentes valeurs $\\Sigma$, $V$, $S$, et $R$ (en OCaml, ce serait un type avec des champs d'enregistrement, défini par exemple par type grammar = { sigma : string list; v: string list; s: string; r: (string, strin list) list; };;).\nOn ajoute aussi une méthode __str__ à cette classe Grammaire pour afficher la grammaire joliment.",
"class Grammaire(object):\n \"\"\" Type pour les grammaires algébriques (en forme de Chomsky). \"\"\"\n def __init__(self, sigma: Alphabet, v: Alphabet, s: Var, r: List[Regle], nom=\"G\"):\n \"\"\" Grammaire en forme de Chomsky :\n - sigma : alphabet de production, type Alphabet,\n - v : alphabet de travail, type Alphabet,\n - s : symbol initial, type Var,\n - r : liste de règles, type List[Regle].\n \"\"\"\n # On se contente de stocker les champs :\n self.sigma = sigma\n self.v = v\n self.s = s\n self.r = r\n self.nom = nom\n \n def __str__(self) -> str:\n \"\"\" Permet d'afficher une grammaire.\"\"\"\n str_regles = ', '.join(\n \"{} -> {}\".format(regle[0], ''.join(regle[1]) if regle[1] else 'ε')\n for regle in self.r\n )\n return r\"\"\"Grammaire {} :\n - Alphabet Σ = {},\n - Non terminaux V = {},\n - Symbole initial : '{}',\n - Règles : {}.\"\"\".format(self.nom, set(self.sigma), set(self.v), self.s, str_regles)",
"2.1.3 Premier exemple de grammaire (non-Chomsky)\nOn commence avec un premier exemple basique, la grammaire $G_1$ avec pour seule règle : $S \\rightarrow aSb \\;|\\; \\varepsilon$.\nC'est la grammaire naturelle, bien formée, pour les mots de la forme $a^n b^n$ pour tout $n \\geq 0$.\nCf. cet exemple sur Wikipedia.\nPar contre, elle n'est pas en forme normale de Chomsky.",
"g1 = Grammaire(\n ['a', 'b'], # Alphabet de production\n ['S'], # Alphabet de travail\n 'S', # Symbole initial (un seul)\n [ # Règles\n ('S', []), # S -> ε\n ('S', ['a', 'S', 'b']), # S -> a S b\n ],\n nom=\"G1\"\n)\nprint(g1)",
"2.1.4 Second exemple de grammaire (non-Chomsky)\nVoici un autre exemple basique, la grammaire $G_2$ qui engendre les expressions arithmétiques\nen trois variables $x$, $y$ et $z$, correctement parenthésées.\nUne seule règle de production, ou une union de règle de production, suffit :\n $$ S \\rightarrow x \\;|\\; y \\;|\\; z \\;|\\; S+S \\;|\\; S-S \\;|\\; S∗S \\;|\\; S/S \\;|\\; (S). $$\nCf. cet autre exemple sur Wikipedia.",
"g2 = Grammaire(\n ['x', 'y', 'z', '+', '-', '*', '/', '(', ')'], # Alphabet de production\n ['S'], # Alphabet de travail\n 'S', # Symbole initial (un seul)\n [ # Règles\n ('S', ['x']), # S -> x\n ('S', ['y']), # S -> y\n ('S', ['z']), # S -> z\n ('S', ['S', '+', 'S']), # S -> S + S\n ('S', ['S', '-', 'S']), # S -> S - S\n ('S', ['S', '*', 'S']), # S -> S * S\n ('S', ['S', '/', 'S']), # S -> S / S\n ('S', ['(', 'S', ')']), # S -> (S)\n ],\n nom=\"G2\"\n)\nprint(g2)",
"2.1.5 Dernier exemple de grammaire\nVoici un dernier exemple, moins basique, la grammaire $G_3$ qui engendre des phrases \"simples\" (et très limitées) en anglais.\nInspirée de cet exemple sur Wikipedia (en anglais).\nCette grammaire $G_3$ est sous forme normale de Chomsky.",
"g3 = Grammaire(\n # Alphabet de production, des vrais mots anglais (avec une espace pour que la phrase soit lisible\n ['she ', 'eats ', 'with ', 'fish ', 'fork ', 'a ', 'an ', 'ork ', 'sword '],\n # Alphabet de travail, des catégories de mots : V pour verbes, P pour pronom etc.\n ['S', 'NP ', 'VP ', 'PP ', 'V ', 'Det ', 'DetVo ', 'N ', 'NVo ', 'P '],\n # Det = a : déterminant\n # DetVo = an : déterminant avant un nom commençant par une voyelle\n # N = (fish, fork, sword) : un nom\n # NVo = ork : un nom commençant par une voyelle\n # NP = she | a (fish, fork, sword) | an ork : un sujet\n # V = eats : verbe conjugué\n # P = with : conjonction de coordination\n # VP = eats : verbe conjugué suivi d'un objet\n # PP : with NP : complément d'objet direct\n 'S', # Symbole initial (un seul)\n [ # Règles\n # Règles de constuction de phrase\n ( 'S', ['NP ', 'VP '] ), # 'S' -> 'NP' 'VP'\n ( 'VP ', ['VP ', 'PP '] ), # 'VP' -> 'VP' 'PP'\n ( 'VP ', ['V ', 'NP '] ), # 'VP' -> 'V' 'NP'\n ( 'PP ', ['P ', 'NP '] ), # 'PP' -> 'P' 'NP'\n ( 'NP ', ['Det ', 'N '] ), # 'NP' -> 'Det' 'N'\n ( 'NP ', ['DetVo ', 'NVo '] ), # 'NP' -> 'DetVo' 'NVo'\n # Règles de création de mots\n ( 'VP ', ['eats '] ), # 'VP' -> 'eats '\n ( 'NP ', ['she '] ), # 'NP' -> 'she '\n ( 'V ', ['eats '] ), # 'V' -> 'eats '\n ( 'P ', ['with '] ), # 'P' -> 'with '\n ( 'N ', ['fish '] ), # 'N' -> 'fish '\n ( 'N ', ['fork '] ), # 'N' -> 'fork '\n ( 'N ', ['sword '] ), # 'N' -> 'sword '\n ( 'NVo ', ['ork '] ), # 'NVo' -> 'ork '\n ( 'Det ', ['a '] ), # 'Det' -> 'a '\n ( 'DetVo ', ['an '] ), # 'DetVo' -> 'an '\n ],\n nom=\"G3\"\n)\nprint(g3)",
"Nous utiliserons ces exemples de grammaire plus tard, pour vérifier que nos fonctions sont correctement écrites.\n\n2.2 Vérifier qu'une grammaire est bien formée\nOn veut pouvoir vérifier qu'une grammaire $G$ (i.e., un objet instance de Grammaire) est bien formée (cf. votre cours de langage formel pour une définition propre) :\n\n$S$ doit être une variable de travail, i.e., $S \\in V$,\nLes variables de production et les variables de travail doivent être distinctes, i.e., $\\Sigma \\cap V = \\emptyset$,\nPour chaque règle, $r = A \\rightarrow w$, les membres gauches des règles sont réduits à une seule variable de travail, et les membres droits sont des mots, vides ou constitués de variables de production ou de travail, i.e., $A \\in V$, et $w \\in (\\Sigma \\cup V)^{\\star}$,\n\nOn vérifie ça facilement avec la fonction suivante :",
"def estBienFormee(self: Grammaire) -> bool:\n \"\"\" Vérifie que G est bien formée. \"\"\"\n sigma, v, s, regles = set(self.sigma), set(self.v), self.s, self.r\n tests = [\n s in v, # s est bien une variable de travail\n sigma.isdisjoint(v), # Lettres et variables de travail sont disjointes\n all(\n regle[0] in v # Les membres gauches de règles sont des variables\n and # Les membres droits de règles sont des variables ou des lettres\n all(r in sigma | v for r in regle[1])\n for regle in regles\n )\n ]\n return all(tests)\n\n# On ajoute la fonction comme une méthode (au cas où...)\nGrammaire.estBienFormee = estBienFormee\n\nfor g in [g1, g2, g3]:\n print(g)\n print(\"La grammaire\", g.nom, \"est-elle bien formée ?\", estBienFormee(g))\n print()",
"On peut définir une autre grammaire qui n'est pas bien formée, pour voir.\nCette grammaire $G_4$ engendre les mots de la forme $a^{n+k} b^n$ pour $n,k \\in \\mathbb{N}$, mais on lui donne une règle de dédoublement des $a$ : $a \\rightarrow a a$ (notez que $a$, une variable de production, est à gauche d'une règle).",
"g4 = Grammaire(\n ['a', 'b'], # Alphabet de production\n ['S'], # Alphabet de travail\n 'S', # Symbole initial (un seul)\n [ # Règles\n ('S', []), # S -> ε\n ('S', ['a', 'S', 'b']), # S -> a S b\n ('a', ['a', 'a']), # a -> a a, cette règle n'est pas en forme normale\n ],\n nom=\"G4\"\n)\nprint(g4)\nprint(\"La grammaire\", g4.nom, \"est-elle bien formée ?\", estBienFormee(g4))",
"Juste par curiosité, la voici transformée pour devenir bien formée, ici on a juste eu besoin d'ajouter une variable de travail $A$ qui peut donner $a$ ou $A A$ :",
"g5 = Grammaire(\n ['a', 'b'], # Alphabet de production\n ['S', 'A'], # Alphabet de travail\n 'S', # Symbole initial (un seul)\n [ # Règles\n ('S', []), # S -> ε\n ('S', ['A', 'S', 'b']), # S -> A S b\n ('A', ['A', 'A']), # A -> A A, voila comment on gère a -> a a\n ('A', ['a']), # A -> a\n ],\n nom=\"G5\"\n)\nprint(g5)\nprint(\"La grammaire\", g5.nom, \"est-elle bien formée ?\", estBienFormee(g5))",
"2.3 Vérifier qu'une grammaire est en forme normale de Chomsky\nOn veut maintenant pouvoir vérifier qu'une grammaire $G$ (i.e., un objet instance de Grammaire) est bien en forme normale de Chomsky.\nEn effet, l'algorithme CKY n'a aucune chance de fonctionner si la grammaire n'est pas sous la bonne forme.\nPour que $G$ soit en forme normale de Chomsky :\n- elle doit d'abord être bien formée (cf. ci-dessus),\n- et chaque règle doit être\n - soit de la forme $S \\rightarrow \\varepsilon$,\n - soit de la forme $A \\rightarrow a$ pour $(A, a)$ dans $V \\times \\Sigma$,\n - soit de la forme $A \\rightarrow B C$ pour $(A, B, C)$ dans $V^3$ (certains ouvrages demandent à ce qu'il n'y ait aucune production de $S$ le symbole initial, i.e., $B,C \\neq S$, mais ça ne change rien pour l'algorithme qu'on implémente plus bas).\nOn vérifie ça facilement, point par point, dans la fonction suivante :",
"def estChomsky(self: Grammaire) -> bool:\n \"\"\" Vérifie que G est sous forme normale de Chomksy. \"\"\"\n sigma, v, s, regles = set(self.sigma), set(self.v), self.s, self.r\n estBienChomsky = all(\n ( # S -> epsilon\n regle[0] == s and not regle[1]\n ) or ( # A -> a\n len(regle[1]) == 1\n and regle[1][0] in sigma # a in Sigma\n ) or ( # A -> B C\n len(regle[1]) == 2\n and regle[1][0] in v # B in V, not Sigma\n and regle[1][1] in v # C in V, not Sigma\n )\n for regle in regles\n )\n return estBienChomsky and estBienFormee(self)\n\n# On ajoute la fonction comme une méthode (au cas où...)\nGrammaire.estChomsky = estChomsky",
"On peut tester avec les cinq grammaires definies plus haut ($G_1$, $G_2$, $G_3$, $G_4$, $G_5$).\nSeule la grammaire $G_3$ est de Chomsky.",
"for g in [g1, g2, g3, g4, g5]:\n print(g)\n print(\"La grammaire\", g.nom, \"est-elle de bien formée ?\", estBienFormee(g))\n print(\"La grammaire\", g.nom, \"est-elle de Chomsky ?\", estChomsky(g))\n print()",
"À la main, on peut transformer $G_5$ pour la mettre en forme de Chomsky (et après, on passe à CYK).\nNotez que cette transformation est automatique, elle est implémentée dans le cas general (d'une grammaire $G$ bien formée), ci-dessus en partie 5.",
"g6 = Grammaire(\n ['a', 'b'], # Alphabet de production\n ['S', 'T', 'A', 'B'], # Alphabet de travail\n 'S', # Symbole initial (un seul)\n [ # Règles\n ('S', []), # S -> ε, on efface S si on veut produire le mot vide\n # On coupe la règle S -> A S B en deux :\n ('S', ['A', 'T']), # S -> A T\n ('T', ['S', 'B']), # T -> S B\n ('A', ['A', 'A']), # A -> A A, voilà comment on gère a -> a a\n # Production de lettres\n ('A', ['a']), # A -> a\n ('B', ['b']), # B -> b\n ],\n nom=\"G6\"\n)\nprint(g6)\nprint(\"La grammaire\", g6.nom, \"est-elle bien formée ?\", estBienFormee(g6))\nprint(\"La grammaire\", g6.nom, \"est-elle de Chomsky ?\", estChomsky(g6))",
"2.4 (enfin) L'algorithme de Cocke-Kasami-Younger\nOn passe enfin à l'algorithme de Cocke-Kasami-Younger.\nL'algorithme va prendre une grammaire $G$, bien formée, de taille $|G|$ (definie comme la somme des longueurs de $\\Sigma$ et $V$ et la somme des tailles des règles), ainsi qu'un mot $w$ de taille $n = |w|$ (attention, ce n'est pas une str mais une liste de variables List[Var], i.e., une liste de str).\nLe but est de vérifier si le mot $w$ peut être engendrée par la grammaire $G$, i.e., de déterminer si $w \\in L(G)$.\nPour le détail de fonctionnement, cf. le code Python ci dessous, ou la page Wikipedia.\nL'algorithme aura :\n\nune complexité en mémoire en $\\mathcal{O}(|G| + |w|^2)$,\nune complexité en temps en $\\mathcal{O}(|G| \\times |w|^3)$, ce qui montrera que le problème du mot pour les grammaires en forme de Chomsky est dans $\\mathcal{P}$ (en temps polynomial, c'est déjà cool) et en temps raisonnable (cubique en $n = |w|$, c'est encore mieux !).\n\nOn va utiliser une table de hachage E contiendra, à la fin du calcul, les $E_{i, j}$ définis par :\n$$ E_{i, j} := { A \\in V : w[i, j] \\in L_G(A) }.$$\nOu l'on a noté $w[i, j] = w_i \\dots w_j$ le sous-mot d'indices $i,\\dots,j$, et $L_G(A)$ le langage engendré par $G$ en partant du symbole $A$ (et pas du symbole initial $S$).\nNote : la table de hachage n'est pas vraiment requise, une liste de liste fonctionnerait aussi mais la notation en serait moins proche de celle utilisée en maths.",
"def cocke_kasami_younger(self, w):\n \"\"\" Vérifie si le mot w est dans L(G). \"\"\"\n assert estChomsky(self), \"Erreur : {} n'est pas en forme de Chomsky, l'algorithme de Cocke-Kasami-Younger ne fonctionnera pas.\".format(self.nom)\n sigma, v, s, regles = set(self.sigma), set(self.v), self.s, self.r\n n = len(w)\n E = dict() # De taille n^2\n # Cas special pour tester si le mot vide est dans L(G)\n if n == 0:\n return (s, []) in regles, E\n # Boucle en O(n^2)\n for i in range(n):\n for j in range(n):\n E[(i, j)] = set()\n # Boucle en O(n x |G|)\n for i in range(n):\n for regle in regles:\n # Si regle est de la forme : A -> a\n if len(regle[1]) == 1:\n A = regle[0]\n a = regle[1][0]\n if w[i] == a: # Notez que c'est le seul moment ou utilise le mot w !\n E[(i, i)] = E[(i, i)] | {A}\n # Boucle en O(n^3 x |G|)\n for d in range(1, n): # Longueur du morceau\n for i in range(n - d): # Début du morceau\n j = i + d # Fin du morceau, on regarde w[i]..w[j]\n for k in range(i, j): # Parcourt du morceau, ..w[k].., sans la fin\n for regle in regles:\n # Si regle est de la forme A -> B C\n if len(regle[1]) == 2:\n A = regle[0]\n B, C = regle[1]\n if B in E[(i, k)] and C in E[(k + 1, j)]:\n E[(i, j)] = E[(i, j)] | {A}\n # On a fini, il suffit maintenant d'utiliser la table créée par programmation dynamique\n return s in E[(0, n - 1)], E\n\n# On ajoute la fonction comme une méthode (au cas où...)\nGrammaire.genere = cocke_kasami_younger",
"2.5 Exemples\nOn présente ici des exemples d'utilisation de cette fonction cocke_kasami_younger avec les grammaires $G_i$ présentées plus haut et quelques examples de mots $w$.",
"def testeMot(g, w):\n \"\"\" Joli affichage pour un test \"\"\"\n print(\"# Test si w in L(G) :\")\n print(\" Pour\", g.nom, \"et w =\", w)\n estDansLG, E = cocke_kasami_younger(g, w)\n if estDansLG:\n print(\" ==> Ce mot est bien engendré par G !\")\n else:\n print(\" ==> Ce mot n'est pas engendré par G !\")\n return estDansLG, E",
"2.5.1 Avec $G_3$",
"print(g3)\nprint(estChomsky(g3))\n\nw1 = [ \"she \", \"eats \", \"a \", \"fish \", \"with \", \"a \", \"fork \" ] # True\nestDansLG1, E1 = testeMot(g3, w1)",
"Pour cet exemple, on peut afficher la table E (en ne montrant que les cases qui ont un $E_{i, j}$ non-vide) :",
"for k in E1.copy():\n if k in E1 and not E1[k]: # On retire les clés qui ont un E[(i, j)] vide\n del(E1[k])\nprint(E1)",
"",
"w2 = [ \"she \", \"attacks \", \"a \", \"fish \", \"with \", \"a \", \"fork \" ] # False\nestDansLG2, E2 = testeMot(g3, w2)\n\nw3 = [ \"she \", \"eats \", \"an \", \"ork \", \"with \", \"a \", \"sword \" ] # True\nestDansLG3, E3 = testeMot(g3, w3)",
"D'autres exemples :",
"w4 = [ \"she \", \"eats \", \"an \", \"fish \", \"with \", \"a \", \"fork \" ] # False\nestDansLG4, E4 = testeMot(g3, w4)\nw5 = [ \"she \", \"eat \", \"a \", \"fish \", \"with \", \"a \", \"fork \" ] # False\nestDansLG5, E5 = testeMot(g3, w5)\nw6 = [ \"she \", \"eats \", \"a \", \"fish \", \"with \", \"a \", \"fish \" , \"with \", \"a \", \"fish \" , \"with \", \"a \", \"fish \" , \"with \", \"a \", \"fish \" ] # True\nestDansLG6, E6 = testeMot(g3, w6)",
"2.5.2 Avec $G_6$",
"print(g6)\nfor w in [ [], ['a', 'b'], ['a', 'a', 'a', 'b', 'b', 'b'], # True, True, True\n ['a', 'a', 'a', 'a', 'b', 'b', 'b'], # True\n ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b'], # True\n ['a', 'b', 'a'], ['a', 'a', 'a', 'b', 'b', 'b', 'b'], # False, False\n ['c'], ['a', 'a', 'a', 'c'], # False, False\n ]:\n testeMot(g6, w)",
"2.6 Mise en forme normale de Chomsky (bonus)\nOn pourrait aussi implémenter la mise en forme normale de Chomsky, comme exposée et prouvée dans le développement.\nLa preuve faite dans le développement garantit que la fonction ci-dessous transforme une grammaire $G$ en grammaire équivalente $G'$, avec l'éventuelle perte du mot vide $\\varepsilon$ :\n$$ L(G') = L(G) \\setminus { \\varepsilon }. $$\nL'algorithme aura :\n\nune complexité en mémoire en $\\mathcal{O}(|G|)$,\nune complexité en temps en $\\mathcal{O}(|G| |\\Sigma_G|)$.\n\nC'est un algorithme en deux étapes :\n\nD'abord, on transforme $G$ en $G'$ : on ajoute des variables de travail pour chaque lettre de production, $V_a \\in V$ pour $a \\in \\Sigma$, on remplace chaque $a$ dans des membres gauches de règles par la nouvelle $V_a$, et ensuite on ajoute des règles de production de lettre $V_a \\rightarrow a$ dans $R$,\nEnsuite, $G''$ est obtenue en découpant les règles de $G$ qui sont de tailles $> 2$ : une règle $S \\rightarrow S_1 \\dots S_n$ devient $n-1$ règles : $S \\rightarrow S_1 S_2'$, $S_i' \\rightarrow S_i S_{i+1}'$ (pour $i = 2,\\dots,n - 2$), et $S_{n-1}' \\rightarrow S_{n-1} S_n$. Il faut aussi ajouter toutes ces nouvelles variables $S_i'$ (en s'assurant qu'elles sont uniques, pour chaque règle), on ajoute pour cela le numéro de la règle : $S_i'=$ A'_k pour la k -ième règle et le symbole $S_i=$ A.",
"def miseChomsky(self):\n \"\"\" Met en forme normale de Chomsky la grammaire self, qui doit être bien formée.\n \n - On suppose que l'alphabet Sigma est dans {a,..,z},\n - On suppose que l'alphabet v est dans {A,..,Z}.\n \"\"\"\n assert estBienFormee(self), \"Erreur : {} n'est pas en bien formée, la mise en forme normale de Chomsky ne fonctionnera pas.\".format(self.nom)\n sigma, v, s, regles = set(self.sigma), set(self.v), self.s, self.r\n if estChomsky(self):\n print(\"Info : la grammaire {} est déjà en forme normale de Chomsky, il n'y a rien à faire.\".format(self.nom))\n return Grammaire(sigma, v, s, regles)\n assert sigma < set(chr(i) for i in range(ord('a'), ord('z') + 1)), \"Erreur : {} n'a pas ses lettres de production Sigma dans 'a'..'z' ...\".format(self.nom)\n assert v < set(chr(i) for i in range(ord('A'), ord('Z') + 1)), \"Erreur : {} n'a pas ses lettres de travail V dans 'A'..'Z' ...\".format(self.nom)\n\n # Algorithme en deux étapes, G --> G', puis G' --> G''\n \n # 1. G --> G' : On ajoute des variables de travail et on substitue a -> V_a dans les autres règles\n # On pose les attributs de G', qui vont être changés\n sigma2 = list(sigma)\n v2 = set(v)\n s2 = s\n regles2 = []\n\n V_ = lambda a: 'V_{}'.format(a)\n for a in sigma:\n v2.add(V_(a))\n regles2.append([V_(a), [a]]) # Ajout de la règle V_a -> a (production de la lettre correspondante)\n substitutionLettre = lambda b: V_(b) if (b in sigma) else b\n substitutionMot = lambda lb: [substitutionLettre(b) for b in lb]\n for regle in regles:\n S = regle[0]\n w = regle[1]\n if len(w) >= 2: # Si ce n'est pas une règle A -> epsilon\n regles2.append([S, substitutionMot(w)])\n else: # Ici on devrait garder la possibilte de creer le mot vide\n regles2.append([S, w])\n nom2 = self.nom + \"'\"\n print(Grammaire(list(sigma2), list(v2), s2, regles2, nom=nom2))\n \n # 2. G' --> G'' : On découpe les règles A -> A1..An qui ont n > 2\n # On pose les attributs de G'', qui vont être changés\n sigma3 = list(sigma2)\n v3 = set(v2)\n s3 = s2\n regles3 = []\n \n for k, regle in enumerate(regles2):\n S = regle[0]\n w = regle[1] # w = S1 .. Sn\n n = len(w)\n if n > 2:\n prime = lambda Si: \"%s'_%d\" % (Si, k) # Ajouter le k dans le nom assure que les nouvelles variables de travail sont toutes uniques\n # Premiere règle : S -> S_1 S'_2\n regles3.append([S, [w[0], prime(w[1])]])\n v3.add(prime(w[1]))\n for i in range(1, len(w) - 2):\n # Pour chaque règle intermédiaire : S'_i -> S_i S'_{i+1}\n regles3.append([prime(w[i]), [w[i], prime(w[i + 1])]])\n v3.add(prime(w[i]))\n v3.add(prime(w[i + 1]))\n # Dernière règle : S'_{n-1} -> S_{n-1} S_n\n regles3.append([prime(w[n - 2]), [w[n - 2], w[n - 1]]])\n v3.add(prime(w[n - 2]))\n else:\n regles3.append([S, w])\n # Terminé\n nom3 = self.nom + \"''\"\n return Grammaire(list(sigma3), list(v3), s3, regles3, nom=nom3)\n\n# On ajoute la fonction comme une méthode (au cas où...)\nGrammaire.miseChomsky = miseChomsky",
"2.6.1 Exemple pour $G_1$",
"print(g1)\nprint(\"\\n(Non) La grammaire\", g1.nom, \"est-elle de Chomsky ?\", estChomsky(g1))\nprint(\"\\nOn essaie de la mettre sous forme normale de Chomksy...\\n\")\ng1_Chom = miseChomsky(g1)\nprint(g1_Chom)\nprint(\"\\n ==> La grammaire\", g1_Chom.nom, \"est-elle de Chomsky ?\", estChomsky(g1_Chom))",
"2.6.2 Exemple pour $G_6$",
"print(g5)\nprint(\"\\n(Non) La grammaire\", g5.nom, \"est-elle de Chomsky ?\", estChomsky(g5))\nprint(\"\\nOn essaie de la mettre sous forme normale de Chomksy...\\n\")\ng5_Chom = miseChomsky(g5)\nprint(g5_Chom)\nprint(\"\\n ==> La grammaire\", g5_Chom.nom, \"est-elle de Chomsky ?\", estChomsky(g5_Chom))",
"C'est tout pour aujourd'hui les amis !\nAllez voir d'autres notebooks si vous voulez."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
abulbasar/machine-learning
|
Scikit - 08 Clustering.ipynb
|
apache-2.0
|
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import make_blobs\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.metrics import homogeneity_score\n\nfrom scipy.cluster.hierarchy import linkage, dendrogram\n\nnp.set_printoptions(suppress=True, precision=5)\n\n\n%matplotlib inline\n\nX, y = make_blobs(n_samples = 150, n_features=2, \n centers=3, cluster_std=0.5, shuffle=True, random_state=0)\n\nplt.scatter(X[:, 0], X[:, 1], c = \"steelblue\", marker = \"o\", s = 50)\nplt.xlabel(\"X1\")\nplt.ylabel(\"X2\")\n\nkm = KMeans(n_clusters=3, init=\"random\", n_init = 10, \n max_iter = 300, tol = 1e-04, random_state=0)\ny_km = km.fit_predict(X)\n\ndef show_cluster(X, y, estimator = None, ignore_noise = True):\n levels = set(y)\n \n if ignore_noise and -1 in levels:\n levels.remove(-1)\n \n colors = sns.color_palette(\"husl\", len(levels))\n centroids = None \n if estimator is not None and hasattr(estimator, \"cluster_centers_\"):\n centroids = estimator.cluster_centers_ \n\n for k in levels:\n data = X[y == k, :]\n plt.scatter(data[:, 0], data[:, 1], color = colors[k], s = 50, label = \"Cluster %s\" % k)\n\n if not centroids is None:\n plt.scatter(centroids[:, 0], centroids[:, 1], color = \"black\", marker = \"*\", s = 150)\n\n plt.xlabel(\"X1\")\n plt.ylabel(\"X2\")\n plt.legend(loc = \"lower left\")\n \nshow_cluster(X, y_km, km)\n\nkm.cluster_centers_\n\n#Sum of distances of samples to their closest cluster center.\nprint(\"Distortion (Within Cluster SSE): %.2f\" % km.inertia_)\n\n#Sum of distances of samples to their closest cluster center.\nhomogeneity_score(y, y_km)\n\nX, y = make_blobs(n_samples = 150, n_features=2, centers=3, \n cluster_std=1.0, shuffle=True, random_state=0)\nkm = KMeans(n_clusters=3, init=\"random\", n_init = 10, \n max_iter = 300, tol = 1e-04, random_state=0)\ny_km = km.fit_predict(X)\nprint(\"Homogeneity score: \", homogeneity_score(y, y_km), \"Inertia: \", km.inertia_)\nplt.figure(figsize=(10, 5))\nplt.subplot(1, 2, 1)\nshow_cluster(X, y, km)\nplt.title(\"True Clusters\")\nplt.subplot(1, 2, 2)\nshow_cluster(X, y_km, km)\nplt.title(\"Estimated clusters\")",
"Find optimal number of clusters using elbow method",
"def find_elbow(X, n = 10):\n distortions = []\n for i in range(1, n):\n km = KMeans(n_clusters=i, max_iter=300, n_init=10, random_state=0, init=\"k-means++\")\n km.fit(X)\n distortions.append(km.inertia_)\n plt.plot(range(1, n), distortions)\n plt.xlabel(\"Number of clusters (K)\")\n plt.ylabel(\"Distortion\")\n\nfind_elbow(X)",
"Find number of clusters from Dendogram",
"plt.figure(figsize = (15, 10))\nrow_clusters = linkage(X, method=\"complete\", metric=\"euclidean\")\nf = dendrogram(row_clusters)",
"Half Moon Dataset and DBSCAN",
"from sklearn.datasets import make_moons\n\nX, y = make_moons(n_samples=200, noise=0.09, random_state=0)\n\nplt.scatter(X[:, 0], X[:, 1], c = \"steelblue\", marker = \"o\", s = 50)\nplt.xlabel(\"X1\")\nplt.ylabel(\"X2\")\n\nkm = KMeans(n_clusters=2, init=\"random\", n_init = 10, max_iter = 300, tol = 1e-04, random_state=0)\ny_km = km.fit_predict(X)\n#show_cluster(km, X, y_km)\n\nplt.figure(figsize=(10, 5))\nplt.subplot(1, 2, 1)\nshow_cluster(X, y)\nplt.title(\"True Clusters\")\nplt.subplot(1, 2, 2)\nshow_cluster(X, y_km, km)\nplt.title(\"Estimated clusters\")\n\nhomogeneity_score(y, y_km)\n\ndbscan = DBSCAN(eps=0.2, min_samples=10, metric=\"euclidean\")\ny_db = dbscan.fit_predict(X)\n\nplt.figure(figsize=(10, 5))\nplt.subplot(1, 2, 1)\nshow_cluster(X, y, dbscan)\nplt.title(\"True Clusters\")\nplt.subplot(1, 2, 2)\nshow_cluster(X, y_db, dbscan)\nplt.title(\"Estimated clusters\")\n\nlabels = set(y_db)\nif -1 in labels: #Noise\n labels.remove(-1)\nprint(\"No of clusters: \", len(labels))\n\nhomogeneity_score(y, y_db)",
"Applying clustering to grouplens movies dataset based on genre",
"movies = pd.read_csv(\"/data/movielens/movies.csv\", index_col=\"movieId\")\nmovies.head()\n\nmovies.sample(10)\n\nmovies = movies[~movies[\"genres\"].str.contains(\"\\(no genres listed\\)\")]\nmovies.sample(10)\n\ngenres = set()\nmovies[\"genres\"].apply(lambda g: genres.update(g.split(r\"|\")))\ngenres = list(genres)\ngenres.sort()\nprint(genres, len(genres))\n\ndef to_vector(g):\n indices = np.array([genres.index(v) for v in g.split(r\"|\")])\n l = np.zeros(len(genres))\n l[indices] = 1\n return l\n\ngenres_idx = movies[\"genres\"].apply(to_vector)\ngenres_idx.head(10)\n\nX = np.array(genres_idx.tolist())\nprint(\"X.shape: \", X.shape)",
"Normalize the data",
"scaler = StandardScaler()\nX_std = scaler.fit_transform(X)\n\nplt.figure(figsize = (15, 10))\nrow_clusters = linkage(X_std, method=\"complete\", metric=\"euclidean\")\nf = dendrogram(row_clusters, p = 5, truncate_mode=\"level\")",
"To visualize the clusters lets apply PCA with 2 components.",
"from sklearn.decomposition import KernelPCA, PCA\n\npca = PCA(random_state=0)\nX_pca = pca.fit_transform(X_std)\n\nratios = pca.explained_variance_ratio_\nplt.bar(range(len(ratios)), ratios)\nplt.step(range(len(ratios)), np.cumsum(ratios), \n label = \"Cumsum of Explained variance ratio\")\nplt.title(\"Explained variance\")\nplt.ylabel(\"Explained Variance Ratio\")\nplt.xlabel(\"Number of PCA components\")",
"With 2 principle components havelow explained variance coverage.",
"pca = PCA(random_state=0, n_components=2)\nX_pca = pca.fit_transform(X_std)\n\nplt.figure(figsize = (15, 8))\nplt.scatter(X_pca[:, 0], X_pca[:, 1])\nplt.xlabel(\"PCA1\")\nplt.ylabel(\"PCA2\")",
"There is not visual indication of clusters from 2 PCA components which is consistent with the finding that explained variance with 2 components is only 2%",
"find_elbow(X_std, 40)\n\nknn = KMeans(n_clusters=8, max_iter=300, random_state=0)\ny_pred = knn.fit_predict(X_std)",
"For each observations, compute distance to the nearest cluster centroid.",
"def distance(p1, p2):\n p1, p2 = p1.flatten(), p2.flatten()\n return np.sqrt(np.sum((p1 - p2) ** 2))\n\ndistances = []\nfor i in range(X_std.shape[0]):\n p1 = X_std[i, :]\n cluster = knn.labels_[i]\n center = knn.cluster_centers_[cluster]\n distances.append(distance(p1, center))\n\nmovies[\"distance\"] = np.array(distances)\nmovies.sort_values(\"distance\", ascending=False)[:10]\n\nmovies[y_pred == 3].sample(10)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
edosedgar/xs-pkg
|
deep_learning/HW2/homework_part1.ipynb
|
gpl-2.0
|
[
"Homework 2, part 1 (40 points)\nThis warm-up problem set is provided to help you get used to PyTorch.\nPlease, only fill parts marked with \"Your code here\".",
"import numpy as np\nimport math\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport torch\nassert torch.__version__ >= '1.0.0'\n\nimport tqdm",
"To learn best practices $-$ for example,\n\nhow to choose between .sqrt() and .sqrt_(),\nwhen to use .view() and how is it different from .reshape(),\nwhich dtype to use,\n\n$-$ you are expected to google a lot, read tutorials on the Web and study documentation.\nQuick documentation on functions and modules is available with ? and help(), like so:",
"help(torch.sqrt)\n\n# to close the Jupyter help bar, press `Esc` or `q`\n?torch.cat",
"Task 1 (3 points)\nUse tensors only: no lists, loops, numpy arrays etc.\nClarification update:\n\nyou mustn't emulate PyTorch tensors with lists or tuples. Using a list for scaffolding utilities not provided by PyTorch core (e.g. to store model's layers or to group function arguments) is OK;\nno loops;\nyou mustn't use numpy or other tensor libraries except PyTorch.\n\n$\\rho(\\theta)$ is defined in polar coordinate system:\n$$\\rho(\\theta) = (1 + 0.9 \\cdot \\cos{8\\theta} ) \\cdot (1 + 0.1 \\cdot \\cos{24\\theta}) \\cdot (0.9 + 0.05 \\cdot \\cos {200\\theta}) \\cdot (1 + \\sin{\\theta})$$\n\nCreate a regular grid of 1000 values of $\\theta$ between $-\\pi$ and $\\pi$.\nCompute $\\rho(\\theta)$ at these values.\nConvert it into Cartesian coordinates (howto).",
"theta = torch.linspace(-math.pi, math.pi, 1000)\nassert theta.shape == (1000,)\n\nrho = (1 + 0.9 * torch.cos(8 * theta)) * (1 + 0.1 * torch.cos(24 * theta)) * (0.9 + 0.05 * torch.cos(200 * theta)) * (1 + torch.sin(theta))\nassert torch.is_same_size(rho, theta)\n\nx = rho * torch.cos(theta)\ny = rho * torch.sin(theta)\n\n# Run this cell and make sure the plot is correct\nplt.figure(figsize=[6,6])\nplt.fill(x.numpy(), y.numpy(), color='green')\nplt.grid()",
"Task 2 (7 points)\nUse tensors only: no lists, loops, numpy arrays etc.\nClarification update: see task 1.\nWe will implement Conway's Game of Life in PyTorch.\n\nIf you skipped the URL above, here are the rules:\n* You have a 2D grid of cells, where each cell is \"alive\"(1) or \"dead\"(0)\n* At one step in time, the generation update happens:\n * Any living cell that has 2 or 3 neighbors survives, otherwise (0,1 or 4+ neighbors) it dies\n * Any cell with exactly 3 neighbors becomes alive if it was dead\nYou are given a reference numpy implementation of the update step. Your task is to convert it to PyTorch.",
"from scipy.signal import correlate2d as conv2d\n\ndef numpy_update(alive_map):\n # Count neighbours with convolution\n conv_kernel = np.array([[1,1,1],\n [1,0,1],\n [1,1,1]])\n \n num_alive_neighbors = conv2d(alive_map, conv_kernel, mode='same')\n \n # Apply game rules\n born = np.logical_and(num_alive_neighbors == 3, alive_map == 0)\n survived = np.logical_and(np.isin(num_alive_neighbors, [2,3]), alive_map == 1)\n \n np.copyto(alive_map, np.logical_or(born, survived))\n\ndef torch_update(alive_map):\n \"\"\"\n Game of Life update function that does to `alive_map` exactly the same as `numpy_update`.\n \n :param alive_map: `torch.tensor` of shape `(height, width)` and dtype `torch.float32`\n containing 0s (dead) an 1s (alive)\n \"\"\"\n conv_kernel = torch.Tensor([[[[1, 1, 1], [1, 0, 1], [1, 1, 1]]]])\n \n neighbors_map = torch.conv2d(alive_map.unsqueeze(0).unsqueeze(0),\n conv_kernel, padding=1).squeeze()\n born = (neighbors_map == 3) & (alive_map == 0)\n survived = ((neighbors_map == 2) | (neighbors_map == 3)) & (alive_map == 1)\n \n alive_map.copy_(born | survived)\n\n# Generate a random initial map\nalive_map_numpy = np.random.choice([0, 1], p=(0.5, 0.5), size=(100, 100))\nalive_map_torch = torch.tensor(alive_map_numpy).float().clone()\n\nnumpy_update(alive_map_numpy)\ntorch_update(alive_map_torch)\n\n# results should be identical\nassert np.allclose(alive_map_torch.numpy(), alive_map_numpy), \\\n \"Your PyTorch implementation doesn't match numpy_update.\"\nprint(\"Well done!\")\n\n%matplotlib notebook\nplt.ion()\n\n# initialize game field\nalive_map = np.random.choice([0, 1], size=(100, 100))\nalive_map = torch.tensor(alive_map).float()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nfig.show()\n\nfor _ in range(100):\n torch_update(alive_map)\n \n # re-draw image\n ax.clear()\n ax.imshow(alive_map.numpy(), cmap='gray')\n fig.canvas.draw()\n\n# A fun setup for your amusement\nalive_map = np.arange(100) % 2 + np.zeros([100, 100])\nalive_map[48:52, 50] = 1\n\nalive_map = torch.tensor(alive_map).float()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nfig.show()\n\nfor _ in range(150):\n torch_update(alive_map)\n ax.clear()\n ax.imshow(alive_map.numpy(), cmap='gray')\n fig.canvas.draw()",
"More fun with Game of Life: video\nTask 3 (30 points)\nYou have to solve yet another character recognition problem: 10 letters, ~14 000 train samples.\nFor this, we ask you to build a multilayer perceptron (i.e. a neural network of linear layers) from scratch using low-level PyTorch interface.\nRequirements:\n1. at least 82% accuracy\n2. at least 2 linear layers\n3. use softmax followed by categorical cross-entropy\nYou are NOT allowed to use\n* numpy arrays\n* torch.nn, torch.optim, torch.utils.data.DataLoader\n* convolutions\nClarification update:\n\nyou mustn't emulate PyTorch tensors with lists or tuples. Using a list for scaffolding utilities not provided by PyTorch core (e.g. to store model's layers or to group function arguments) is OK;\nyou mustn't use numpy or other tensor libraries except PyTorch;\nthe purpose of part 1 is to make you google and read the documentation a LOT so that you learn which intrinsics PyTorch provides and what are their interfaces. This is why if there is some tensor functionality that is directly native to PyTorch, you mustn't emulate it with loops. Example:\n\n```\nx = torch.rand(1_000_000)\nWrong: slow and unreadable\nfor idx in range(x.numel()):\n x[idx] = math.sqrt(x[idx])\nCorrect\nx.sqrt_()\n```\n\n\nLoops are prohibited except for iterating over\n\n\nparameters (and their companion tensors used by optimizer, e.g. running averages),\n\nlayers,\nepochs (or \"global\" gradient steps if you don't use epoch logic),\nbatches in the dataset (using loops for collecting samples into a batch is not allowed).\n\nTips:\n\nPick random batches (either shuffle data before each epoch or sample each batch randomly).\nDo not initialize weights with zeros (learn why). Gaussian noise with small variance will do.\n50 hidden neurons and a sigmoid nonlinearity will do for a start. Many ways to improve.\nTo improve accuracy, consider changing layers' sizes, nonlinearities, optimization methods, weights initialization.\nDon't use GPU yet.\n\nReproducibility requirement: you have to format your code cells so that Cell -> Run All on a fresh notebook reliably trains your model to the desired accuracy in a couple of minutes and reports the accuracy reached.\nHappy googling!",
"np.random.seed(666)\ntorch.manual_seed(666)\n\nfrom notmnist import load_notmnist\nletters = 'ABCDEFGHIJ' \nX_train, y_train, X_test, y_test = map(torch.tensor, load_notmnist(letters=letters))\nX_train.squeeze_()\nX_test.squeeze_();\n\n%matplotlib inline\n\nfig, axarr = plt.subplots(2, 10, figsize=(15,3))\n\nfor idx, ax in enumerate(axarr.ravel()):\n ax.imshow(X_train[idx].numpy(), cmap='gray')\n ax.axis('off')\n ax.set_title(letters[y_train[idx]])",
"The cell below has an example layout for encapsulating your neural network. Feel free to modify the interface if you need to (add arguments, add return values, add methods etc.). For example, you may want to add a method do_gradient_step() that executes one optimization algorithm (SGD / Adadelta / Adam / ...) step.",
"class NeuralNet:\n def __init__(self, lr):\n # Your code here\n self.lr = lr\n self.EPS = 1e-15\n \n # First linear layer\n self.linear1w = torch.randn(784, 300, dtype=torch.float32, requires_grad=True)\n self.linear1b = torch.randn(1, 300, dtype=torch.float32, requires_grad=True)\n \n # Second linear layer\n self.linear2w = torch.randn(300, 10, dtype=torch.float32, requires_grad=True)\n self.linear2b = torch.randn(1, 10, dtype=torch.float32, requires_grad=True)\n \n def predict(self, images):\n \"\"\"\n images: `torch.tensor` of shape `batch_size x height x width`\n and dtype `torch.float32`.\n \n returns: `output`, a `torch.tensor` of shape `batch_size x 10`,\n where `output[i][j]` is the probability of `i`-th\n batch sample to belong to `j`-th class.\n \"\"\"\n def log_softmax(input):\n input = input - torch.max(input, dim=1, keepdim=True)[0]\n return input - torch.log(torch.sum(torch.exp(input), dim=1, keepdim=True))\n\n linear1_out = torch.add(images @ self.linear1w, self.linear1b).clamp(min=0)\n linear2_out = torch.add(linear1_out @ self.linear2w, self.linear2b)\n return log_softmax(linear2_out)\n \n def get_loss(self, input, target):\n\n def nll(input, target):\n return -torch.sum(target * input) /input.shape[0]\n \n return nll(input, target)\n \n def zero_grad(self):\n with torch.no_grad():\n self.linear1w.grad.zero_()\n self.linear1b.grad.zero_()\n self.linear2w.grad.zero_()\n self.linear2b.grad.zero_() \n \n def update_weights(self, loss):\n loss.backward()\n \n with torch.no_grad():\n self.linear1w -= self.lr * self.linear1w.grad\n self.linear1b -= self.lr * self.linear1b.grad\n \n self.linear2w -= self.lr * self.linear2w.grad\n self.linear2b -= self.lr * self.linear2b.grad\n \n self.zero_grad() ",
"Define subroutines for one-hot encoding, accuracy calculating and batch generating:",
"def one_hot_encode(input, classes=10):\n return torch.eye(classes)[input]\n\ndef accuracy(model, images, labels):\n \"\"\"\n model: `NeuralNet`\n images: `torch.tensor` of shape `N x height x width`\n and dtype `torch.float32`\n labels: `torch.tensor` of shape `N` and dtype `torch.int64`. Contains\n class index for each sample\n \n returns:\n fraction of samples from `images` correctly classified by `model`\n \"\"\"\n with torch.no_grad():\n labels_pred = model.predict(images)\n numbers = labels_pred.argmax(dim=-1)\n numbers_target = labels.argmax(dim=-1)\n return (numbers == numbers_target).float().mean()\n\nclass batch_generator:\n def __init__(self, images, batch_size):\n dataset_size = images[0].size()[0]\n permutation = torch.randperm(dataset_size)\n self.images = images[0][permutation]\n self.targets = images[1][permutation]\n \n self.images = self.images.split(batch_size, dim=0)\n self.targets = self.targets.split(batch_size, dim=0)\n \n self.current = 0\n self.high = len(self.targets)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.current >= self.high:\n raise StopIteration\n else:\n self.current += 1\n return self.images[self.current - 1], self.targets[self.current - 1]",
"Prepare dataset: reshape and one-hot encode:",
"train_size, _, _ = X_train.shape\ntest_size, _, _ = X_test.shape\nX_train = X_train.reshape(train_size, -1)\nX_test = X_test.reshape(test_size, -1)\n\ny_train_oh = one_hot_encode(y_train)\ny_test_oh = one_hot_encode(y_test)\n\nprint(\"Train size: \", X_train.shape)\nprint(\"Test size: \", X_test.shape)",
"Define model and train",
"model = NeuralNet(1e-2)\nbatch_size = 128\nepochs = 50\nloss_history = torch.Tensor(epochs)\n\nfor epoch in tqdm.trange(epochs):\n # Update weights\n for X_batch, y_batch in batch_generator((X_train, y_train_oh), batch_size):\n predicted = model.predict(X_batch)\n loss = model.get_loss(predicted, y_batch)\n model.update_weights(loss)\n # Calculate loss\n test_predicted = model.predict(X_test)\n loss = model.get_loss(test_predicted, y_test_oh)\n loss_history[epoch] = loss\n model.zero_grad()",
"Plot loss:",
"plt.figure(figsize=(14, 7)) \nplt.title(\"Loss\")\nplt.xlabel(\"#epoch\")\nplt.ylabel(\"Loss\")\nplt.plot(loss_history.detach().numpy(), label=\"Validation loss\")\nplt.legend(loc='best')\nplt.grid()\nplt.show()",
"Final evalutation:",
"train_acc = accuracy(model, X_train, y_train_oh) * 100\ntest_acc = accuracy(model, X_test, y_test_oh) * 100\nprint(\"Train accuracy: %.2f, test accuracy: %.2f\" % (train_acc, test_acc))\n\nassert test_acc >= 82.0, \"You have to do better\""
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.