repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
list
types
list
mulhod/reviewer_experience_prediction
jupyter_notebooks/Exploring_Label_Distribution.ipynb
mit
[ "Exploring Label Distribution and Converting Distribution to Bins of Values", "import math\nimport itertools\nfrom collections import Counter\n\nimport numpy as np\nimport scipy as sp\nfrom pymongo import collection\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom src import *\nfrom src.mongodb import *\nfrom src.datasets import *\nfrom src.experiments import *\nfrom data import APPID_DICT", "Games", "list(APPID_DICT.keys())", "Labels", "list(LABELS)", "Issues\n\nThe main concern is that, in order to know whether it will be potentially interesting and worth exploration to do experiments with a certain label, it is necessary to know\nif it can be used as is (raw values), which is unlikely, and, if not,\nhow its distribution can be carved up (specifically, what values for nbins and bin_factor to use in learn, etc.), and\nwhether or not the current algorithm for deciding on the range of included values (i.e., excluding outliers) and making the value bins works or if it needs to be automated somehow (i.e., even potentially using some kind of cluster analysis, perhaps)\n\n\n\nProposed Plan of Action\n\nSome of this information can be collected via functions in the experiments extension, specifically distributional_info and evenly_distribute_samples\nCollect data on the distributions of all of the labels for a subset of games and explore the way that the values are distributed, considering alternate ways that the values could be clustered together", "# Connect to reviews collection\ndb = connect_to_db(host='localhost', port=37017)\n\ndef do_some_distributional_research(db: collection, game: str,\n labels: list = LABELS,\n partition: str = 'all'):\n \"\"\"\n Run the `distributional_info` function and then apply some\n transformations, `nbins`/`bin_factor` values, etc., to the\n results.\n\n Generates distributional information for each combination\n of label, number of bins, bin factor, and transformation.\n\n :param db: MongoDB collection\n :type db: collection\n :param game: name of game\n :type game: str\n :param labels: list of labels\n :type labels: list\n :param partition: name of data partition (or 'all' to use all\n data)\n :type partition: str\n\n :yields: tuple of dictionary containing label value\n distribution information and a list of the original\n label values\n :ytype: tuple\n \"\"\"\n\n # Get distributional data for each label via the\n # `distributional_info` function and make some plots, etc.\n transformations = {'None': None,\n 'ln': lambda x: np.log(x) if x > 1 else 0.0,\n '**5': lambda x: x**5.0,\n '**2': lambda x: x**2.0,\n '**0.5': lambda x: x**0.5,\n '**0.25': lambda x: x**0.25}\n nbins_values = [None, 2, 3, 4, 5]\n bin_factor_values = [None, 0.25, 0.5, 5.0, 8.0, 10.0]\n filtered_nbins_bin_factor_product = \\\n filter(lambda x: ((x[0] == None and x[1] == None)\n or (x[0] != None)),\n itertools.product(nbins_values, bin_factor_values))\n\n transformations_dict = {transformation: {} for transformation\n in transformations}\n stats_dicts = {str(label): dict(transformations_dict)\n for label in labels}\n for label in labels:\n\n # Get all raw label values and convert to floats\n raw_label_values = \\\n (list(distributional_info(db,\n label,\n [game],\n partition)\n ['id_strings_labels_dict'].values()))\n raw_label_values = np.array([float(val) for val in raw_label_values])\n raw_values_to_return = raw_label_values\n\n # If the label has percentage values, i.e., values between\n # 0.0 and 1.0 (inclusive), multiply the values by 100 before\n # doing anything else\n # Note: Define these specific labels somewhere!\n if label in LABELS_WITH_PCT_VALUES:\n raw_label_values *= 100.0\n\n # Apply various types of transformations to the data and\n # measure the normality of the resulting distribution, etc.\n for transformation, transformer in transformations.items():\n if transformer:\n label_values = np.array([transformer(x)\n for x in raw_label_values])\n else:\n label_values = np.array(raw_label_values)\n\n # Apply various combinations of `nbins`/`bin_factor`\n # values (including not specifying those values)\n label_transformation_string = '{0}_{1}'.format(label, transformation)\n for nbins, bin_factor in filtered_nbins_bin_factor_product:\n nbins_bin_factor_string = '{0}_{1}'.format(nbins, bin_factor)\n stats_dict = {}\n\n # Don't bin the values if `nbins` and `bin_factor` are\n # unspecified\n if not nbins and not bin_factor:\n pass\n else:\n\n # Get min/max values\n _min = np.floor(label_values.min())\n _max = np.ceil(label_values.max())\n\n # If `bin_factor` is unspecified, use the default\n # value, 1.0\n bin_factor = bin_factor if bin_factor else 1.0\n\n # Get bin range tuples and validate\n try:\n bin_ranges = get_bin_ranges(_min, _max, nbins,\n bin_factor)\n except ValueError as e:\n print('Encountered invalid bin_ranges:\\n\\t'\n 'nbins: {0}\\n\\tbin_factor: {1}\\n\\tmin: '\n '{2}\\n\\tmax: {3}\\n\\ttransformation: {4}'\n '\\n\\tlabel: {5}'\n .format(nbins, bin_factor, _min, _max,\n transformation, label))\n continue\n\n # Convert raw values\n stats_dict['bin_ranges'] = bin_ranges\n label_values = np.array([get_bin(bin_ranges, val)\n for val in label_values])\n stats_dict['label_values'] = label_values\n\n # Collect some stats and measurements\n stats_dict.update({'min': label_values.min(),\n 'max': label_values.max(),\n 'std': label_values.std(),\n 'mean': label_values.mean(),\n 'median': np.median(label_values),\n 'mode': sp.stats.mode(label_values).mode[0],\n 'normaltest': sp.stats.normaltest(label_values)})\n\n yield ({label_transformation_string: {nbins_bin_factor_string: stats_dict}},\n raw_values_to_return)\n\n# Let's build up a dictionary of distributional information for each label and\n# for each in a random subset of 3 games\n# Execute a number of times until you get the subset you want\ngames_subset = list(np.random.choice([game for game in APPID_DICT\n if not game.startswith('sample')],\n 3, replace=False))\ndist_info_dict = {}\nfor game in games_subset:\n try:\n if dist_info_dict.get(game):\n continue\n dist_info_dict[game] = do_some_distributional_research(db, game)\n except ValueError as e:\n continue\n\n# Each game will have 21 different outputs, so let's break things up a bit\ndist_info_dict_Arma_3 = dist_info_dict['Arma_3']\ndist_info_dict_Team_Fortress_2 = dist_info_dict['Team_Fortress_2']\ndist_info_dict_Counter_Strike = dist_info_dict['Counter_Strike']\n\nArma_3_stats_dicts_all_labels_all_data = do_some_distributional_research(db, 'Arma_3')\n\nnext(Arma_3_stats_dicts_all_labels_all_data)", "Examining the Distribution of Labels for Arma 3", "dist_info_dict_Arma_3.keys()", "num_reviews", "dist_info_dict_Arma_3['num_reviews']['labels_counter']\n\n# Use `get_bin_ranges` to determine the ranges of bins\nnum_reviews_Arma_3 = dist_info_dict_Arma_3['num_reviews']['labels_counter']\nnum_reviews_Arma_3_values = np.array(list(num_reviews_Arma_3.keys()))\nnum_reviews_Arma_3_min_value = num_reviews_Arma_3_values.min()\nnum_reviews_Arma_3_max_value = num_reviews_Arma_3_values.max()\nnum_reviews_Arma_3_bin_ranges_3_1 = get_bin_ranges(num_reviews_Arma_3_min_value,\n num_reviews_Arma_3_max_value,\n nbins=3,\n factor=1.0)\nnum_reviews_Arma_3_bin_ranges_3_1_5 = get_bin_ranges(num_reviews_Arma_3_min_value,\n num_reviews_Arma_3_max_value,\n nbins=3,\n factor=1.5)\nnum_reviews_Arma_3_bin_ranges_3_2 = get_bin_ranges(num_reviews_Arma_3_min_value,\n num_reviews_Arma_3_max_value,\n nbins=3,\n factor=2.0)\nnum_reviews_Arma_3_bin_ranges_3_3 = get_bin_ranges(num_reviews_Arma_3_min_value,\n num_reviews_Arma_3_max_value,\n nbins=3,\n factor=3.0)\nnum_reviews_Arma_3_bin_ranges_2_3 = get_bin_ranges(num_reviews_Arma_3_min_value,\n num_reviews_Arma_3_max_value,\n nbins=2,\n factor=3.0)\nnum_reviews_Arma_3_bin_ranges_2_10 = get_bin_ranges(num_reviews_Arma_3_min_value,\n num_reviews_Arma_3_max_value,\n nbins=2,\n factor=10.0)\nprint(\"bins = 3, bin_factor = 1.0: {}\".format(num_reviews_Arma_3_bin_ranges_3_1))\nprint(\"bins = 3, bin_factor = 1.5: {}\".format(num_reviews_Arma_3_bin_ranges_3_1_5))\nprint(\"bins = 3, bin_factor = 2.0: {}\".format(num_reviews_Arma_3_bin_ranges_3_2))\nprint(\"bins = 3, bin_factor = 3.0: {}\".format(num_reviews_Arma_3_bin_ranges_3_3))\nprint(\"bins = 2, bin_factor = 3.0: {}\".format(num_reviews_Arma_3_bin_ranges_2_3))\nprint(\"bins = 2, bin_factor = 10.0: {}\".format(num_reviews_Arma_3_bin_ranges_2_10))\n\nnum_reviews_raw_label_values_Arma_3 = list(dist_info_dict_Arma_3['num_reviews']['id_strings_labels_dict'].values())\n\nplt.hist(list(np.random.normal(200, 100, 1000)))\nplt.title(\"Normal Distribution Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist(num_reviews_raw_label_values_Arma_3)\nplt.title(\"Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist(num_reviews_raw_label_values_Arma_3, normed=True)\nplt.title(\"Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist([np.log(x) for x in num_reviews_raw_label_values_Arma_3 if x != 0])\nplt.title(\"Log Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist([np.log(x) for x in num_reviews_raw_label_values_Arma_3 if x != 0],\n normed=True)\nplt.title(\"Log Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist([np.log(x) for x in num_reviews_raw_label_values_Arma_3 if x != 0],\n normed=True, cumulative=True)\nplt.title(\"Log Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist([np.log(x + 1) for x in num_reviews_raw_label_values_Arma_3])\nplt.title(\"Log(x + 1) Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist([np.log2(x + 1) for x in num_reviews_raw_label_values_Arma_3])\nplt.title(\"Log2(x + 1) Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist([np.log10(x + 1) for x in num_reviews_raw_label_values_Arma_3])\nplt.title(\"Log10(x + 1) Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nsp.stats.mstats.zscore(num_reviews_raw_label_values_Arma_3)\n\nplt.hist(sp.stats.mstats.zscore(num_reviews_raw_label_values_Arma_3))\nplt.title(\"z-score num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist([math.sqrt(x) for x in num_reviews_raw_label_values_Arma_3])\nplt.title(\"sqrt(x) Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\n\nplt.hist([x**2 for x in num_reviews_raw_label_values_Arma_3])\nplt.title(\"x^2 Arma_3 num_reviews Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")", "total_game_hours_bin", "dist_info_dict_Arma_3['total_game_hours_bin']['labels_counter']", "total_game_hours", "dist_info_dict_Arma_3['total_game_hours']['labels_counter']\n\ntotal_game_hours_raw_label_values_Arma_3 = list(dist_info_dict_Arma_3['total_game_hours']['id_strings_labels_dict'].values())\n\nplt.hist([x**0.25 for x in total_game_hours_raw_label_values_Arma_3])\nplt.title(\"Log x Arma_3 total_game_hours Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")", "total_game_hours_last_two_weeks", "dist_info_dict_Arma_3['total_game_hours_last_two_weeks']['labels_counter']", "num_found_helpful", "dist_info_dict_Arma_3['num_found_helpful']['labels_counter']", "num_found_unhelpful", "dist_info_dict_Arma_3['num_found_unhelpful']['labels_counter']", "found_helpful_percentage", "dist_info_dict_Arma_3['found_helpful_percentage']['labels_counter']", "num_voted_helpfulness", "dist_info_dict_Arma_3['num_voted_helpfulness']['labels_counter']", "num_achievements_attained", "dist_info_dict_Arma_3['num_achievements_attained']['labels_counter']\n\nnum_achievements_attained_raw_label_values_Arma_3 = list(dist_info_dict_Arma_3['num_achievements_attained']['id_strings_labels_dict'].values())\n\nplt.hist([np.log(x) for x in num_achievements_attained_raw_label_values_Arma_3 if x != 0])\nplt.title(\"Log Arma_3 num_achievements_attained Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")", "num_achievements_percentage", "dist_info_dict_Arma_3['num_achievements_percentage']['labels_counter']\n\nnum_achievements_percentage_raw_label_values_Arma_3 = list(dist_info_dict_Arma_3['num_achievements_percentage']['id_strings_labels_dict'].values())\n\nplt.hist(num_achievements_percentage_raw_label_values_Arma_3)\nplt.title(\"Arma_3 num_achievements_percentage Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")", "num_achievements_possible", "dist_info_dict_Arma_3['num_achievements_possible']['labels_counter']", "num_guides", "dist_info_dict_Arma_3['num_guides']['labels_counter']", "num_workshop_items", "dist_info_dict_Arma_3['num_workshop_items']['labels_counter']", "num_friends", "num_friends_raw_label_values_Arma_3 = list(dist_info_dict_Arma_3['num_friends']['id_strings_labels_dict'].values())\n\nplt.hist([np.log(x) for x in num_friends_raw_label_values_Arma_3 if x != 0])\nplt.title(\"Log Arma_3 num_friends Histogram\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")", "num_games_owned", "dist_info_dict_Arma_3['num_games_owned']['labels_counter']", "num_comments", "dist_info_dict_Arma_3['num_comments']['labels_counter']", "friend_player_level", "dist_info_dict_Arma_3['friend_player_level']['labels_counter']", "num_groups", "dist_info_dict_Arma_3['num_groups']['labels_counter']", "num_screenshots", "dist_info_dict_Arma_3['num_screenshots']['labels_counter']", "num_badges", "dist_info_dict_Arma_3['num_badges']['labels_counter']", "num_found_funny", "dist_info_dict_Arma_3['num_found_funny']['labels_counter']", "Examining the Distribution of Labels for Team Fortress 2", "for label in dist_info_dict_Team_Fortress_2:\n print(\"Label = {}\\n\".format(label))\n print(\"{}\\n\".format(dist_info_dict_Team_Fortress_2[label]['labels_counter']))", "Examining the Distribution of Labels for Counter Strike", "for label in dist_info_dict_Counter_Strike:\n print(\"Label = {}\\n\".format(label))\n print(\"{}\\n\".format(dist_info_dict_Counter_Strike[label]['labels_counter']))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
computational-class/computational-communication-2016
code/tutorials-scikit-learn-master/2. Robust and calibrated estimators with Scikit-Learn.ipynb
mit
[ "<center>\n <img src=\"img/scikit-learn-logo.png\" width=\"40%\" />\n <br />\n <h1>Robust and calibrated estimators with Scikit-Learn</h1>\n <br /><br />\n Gilles Louppe (<a href=\"https://twitter.com/glouppe\">@glouppe</a>)\n <br /><br />\n New York University\n</center>", "# Global imports and settings\n\n# Matplotlib\n%matplotlib inline\nfrom matplotlib import pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (8, 8)\nplt.rcParams[\"figure.max_open_warning\"] = -1\n\n# Print options\nimport numpy as np\nnp.set_printoptions(precision=3)\n\n# Slideshow\nfrom notebook.services.config import ConfigManager\ncm = ConfigManager()\ncm.update('livereveal', {'width': 1440, 'height': 768, 'scroll': True, 'theme': 'simple'})\n\n# Silence warnings\nimport warnings\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\nwarnings.simplefilter(action=\"ignore\", category=UserWarning)\nwarnings.simplefilter(action=\"ignore\", category=RuntimeWarning)\n\n# Utils\nfrom robustness import plot_surface\nfrom robustness import plot_outlier_detector\n\n%%javascript\nReveal.addEventListener(\"slidechanged\", function(event){ window.location.hash = \"header\"; });", "Motivation\nIn theory,\n- Samples $x$ are drawn from a distribution $P$;\n- As data increases, convergence towards the optimal model is guaranteed. \nIn practice,\n- A few samples may be distant from other samples:\n - either because they correspond to rare observations,\n - or because they are due to experimental errors;\n- Because data is finite, outliers might strongly affect the resulting model.\nToday's goal: build models that are robust to outliers!\nOutline\n\nMotivation\nNovelty and anomaly detection\nEnsembling for robustness \nFrom least squares to least absolute deviances\nCalibration\n\nNovelty and anomaly detection\nNovelty detection: \n- Training data is not polluted by outliers, and we are interested in detecting anomalies in new observations.\nOutlier detection:\n- Training data contains outliers, and we need to fit the central mode of the training data, ignoring the deviant observations.\nAPI", "# Unsupervised learning \nestimator.fit(X_train) # no \"y_train\"\n\n# Detecting novelty or outliers \ny_pred = estimator.predict(X_test) # inliers == 1, outliers == -1\ny_score = estimator.decision_function(X_test) # outliers == highest scores\n\n# Generate data\nfrom sklearn.datasets import make_blobs\n\ninliers, _ = make_blobs(n_samples=200, centers=2, random_state=1)\noutliers = np.random.rand(50, 2)\noutliers = np.min(inliers, axis=0) + (np.max(inliers, axis=0) - np.min(inliers, axis=0)) * outliers\n\nX = np.vstack((inliers, outliers))\nground_truth = np.ones(len(X), dtype=np.int)\nground_truth[-len(outliers):] = 0\n\nfrom sklearn.svm import OneClassSVM\nfrom sklearn.covariance import EllipticEnvelope\nfrom sklearn.ensemble import IsolationForest\n\n# Unsupervised learning\nestimator = OneClassSVM(nu=0.4, kernel=\"rbf\", gamma=0.1)\n# clf = EllipticEnvelope(contamination=.1)\n# clf = IsolationForest(max_samples=100)\nestimator.fit(X)\n\nplot_outlier_detector(estimator, X, ground_truth)", "Ensembling for robustness\nBias-variance decomposition\nTheorem. For the squared error loss, the bias-variance decomposition of the expected\ngeneralization error at $X=\\mathbf{x}$ is\n$$\n\\mathbb{E}{\\cal L} { Err(\\varphi{\\cal L}(\\mathbf{x})) } = \\text{noise}(\\mathbf{x}) + \\text{bias}^2(\\mathbf{x}) + \\text{var}(\\mathbf{x})\n$$\n<center>\n <img src=\"img/bv.png\" width=\"50%\" />\n</center>\nVariance and robustness\n\nLow variance implies robustness to outliers\nHigh variance implies sensitivity to data pecularities\n\nEnsembling reduces variance\nTheorem. For the squared error loss, the bias-variance decomposition of the expected generalization error at $X=x$ of an ensemble of $M$ randomized models $\\varphi_{{\\cal L},\\theta_m}$ is\n$$\n\\mathbb{E}{\\cal L} { Err(\\psi{{\\cal L},\\theta_1,\\dots,\\theta_M}(\\mathbf{x})) } = \\text{noise}(\\mathbf{x}) + \\text{bias}^2(\\mathbf{x}) + \\text{var}(\\mathbf{x})\n$$\nwhere\n\\begin{align}\n\\text{noise}(\\mathbf{x}) &= Err(\\varphi_B(\\mathbf{x})), \\\n\\text{bias}^2(\\mathbf{x}) &= (\\varphi_B(\\mathbf{x}) - \\mathbb{E}{{\\cal L},\\theta} { \\varphi{{\\cal L},\\theta}(\\mathbf{x}) } )^2, \\\n\\text{var}(\\mathbf{x}) &= \\rho(\\mathbf{x}) \\sigma^2_{{\\cal L},\\theta}(\\mathbf{x}) + \\frac{1 - \\rho(\\mathbf{x})}{M} \\sigma^2_{{\\cal L},\\theta}(\\mathbf{x}).\n\\end{align}", "# Load data\nfrom sklearn.datasets import load_iris\n\niris = load_iris()\nX = iris.data[:, [0, 1]]\ny = iris.target\n\nfrom sklearn.tree import DecisionTreeClassifier\nclf = DecisionTreeClassifier().fit(X, y)\nplot_surface(clf, X, y)\n\nfrom sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier(n_estimators=100).fit(X, y)\nplot_surface(clf, X, y)", "From least squares to least absolute deviances\nRobust learning\n\nMost methods minimize the mean squared error $\\frac{1}{N} \\sum_i (y_i - \\varphi(x_i))^2$\nBy definition, squaring residuals gives emphasis to large residuals.\n\nOutliers are thus very likely to have a significant effect.\n\n\nA robust alternative is to minimize instead the mean absolute deviation $\\frac{1}{N} \\sum_i |y_i - \\varphi(x_i)|$\n\nLarge residuals are therefore given much less emphasis.", "# Generate data\nfrom sklearn.datasets import make_regression\n\nn_outliers = 3\nX, y, coef = make_regression(n_samples=100, n_features=1, n_informative=1, noise=10,\n coef=True, random_state=0)\n\nnp.random.seed(1)\nX[-n_outliers:] = 1 + 0.25 * np.random.normal(size=(n_outliers, 1))\ny[-n_outliers:] = -100 + 10 * np.random.normal(size=n_outliers)\n\nplt.scatter(X[:-n_outliers], y[:-n_outliers], color=\"b\")\nplt.scatter(X[-n_outliers:], y[-n_outliers:], color=\"r\")\nplt.xlim(-3, 3)\nplt.ylim(-150, 120)\nplt.show()\n\n# Fit with least squares vs. least absolute deviances\nfrom sklearn.ensemble import GradientBoostingRegressor\n\nclf_ls = GradientBoostingRegressor(loss=\"ls\")\nclf_lad = GradientBoostingRegressor(loss=\"lad\")\nclf_ls.fit(X, y)\nclf_lad.fit(X, y)\n\n# Plot\nX_test = np.linspace(-5, 5).reshape(-1, 1)\nplt.scatter(X[:-n_outliers], y[:-n_outliers], color=\"b\")\nplt.scatter(X[-n_outliers:], y[-n_outliers:], color=\"r\")\nplt.plot(X_test, clf_ls.predict(X_test), \"g\", label=\"Least squares\")\nplt.plot(X_test, clf_lad.predict(X_test), \"y\", label=\"Lead absolute deviances\")\nplt.xlim(-3, 3)\nplt.ylim(-150, 120)\nplt.legend()\nplt.show()", "Robust scaling\n\nStandardization of a dataset is a common requirement for many machine learning estimators. \n\nTypically this is done by removing the mean and scaling to unit variance. \n\n\nFor similar reasons as before, outliers can influence the sample mean / variance in a negative way. \n\nIn such cases, the median and the interquartile range often give better results.", "# Generate data\nfrom sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\n\nX, y = make_blobs(n_samples=100, centers=[(0, 0), (-1, 0)], random_state=0)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\nX_train[0, 0] = -1000 # a fairly large outlier\n\n# Scale data\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import RobustScaler\n\nstandard_scaler = StandardScaler()\nXtr_s = standard_scaler.fit_transform(X_train)\nXte_s = standard_scaler.transform(X_test)\n\nrobust_scaler = RobustScaler()\nXtr_r = robust_scaler.fit_transform(X_train)\nXte_r = robust_scaler.transform(X_test)\n\n# Plot data\nfig, ax = plt.subplots(1, 3, figsize=(12, 4))\nax[0].scatter(X_train[:, 0], X_train[:, 1], color=np.where(y_train == 0, 'r', 'b'))\nax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(y_train == 0, 'r', 'b'))\nax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(y_train == 0, 'r', 'b'))\nax[0].set_title(\"Unscaled data\")\nax[1].set_title(\"After standard scaling (zoomed in)\")\nax[2].set_title(\"After robust scaling (zoomed in)\")\n\n# for the scaled data, we zoom in to the data center (outlier can't be seen!)\nfor a in ax[1:]:\n a.set_xlim(-3, 3)\n a.set_ylim(-3, 3)\n \nplt.show()\n\n# Classify using kNN\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier()\nknn.fit(Xtr_s, y_train)\nacc_s = knn.score(Xte_s, y_test)\nprint(\"Test set accuracy using standard scaler: %.3f\" % acc_s)\n\nknn.fit(Xtr_r, y_train)\nacc_r = knn.score(Xte_r, y_test)\nprint(\"Test set accuracy using robust scaler: %.3f\" % acc_r)", "Calibration\n\nIn classification, you often want to predict not only the class label, but also the associated probability.\nHowever, not all classifiers provide well-calibrated probabilities.\nThus, a separate calibration of predicted probabilities is often desirable as a postprocessing", "from sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\n\n# Generate 3 blobs with 2 classes where the second blob contains\n# half positive samples and half negative samples. Probability in this\n# blob is therefore 0.5.\nX, y = make_blobs(n_samples=10000, n_features=2, cluster_std=1.0, \n centers=[(-5, -5), (0, 0), (5, 5)], shuffle=False)\ny[:len(X) // 2] = 0\ny[len(X) // 2:] = 1\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)\n\n# Plot\nfor this_y, color in zip([0, 1], [\"r\", \"b\"]):\n this_X = X_train[y_train == this_y]\n plt.scatter(this_X[:, 0], this_X[:, 1], c=color, alpha=0.2, label=\"Class %s\" % this_y)\nplt.legend(loc=\"best\")\nplt.title(\"Data\")\nplt.show()\n\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.calibration import CalibratedClassifierCV\n\n# Without calibration\nclf = GaussianNB()\nclf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights\nprob_pos_clf = clf.predict_proba(X_test)[:, 1]\n\n# With isotonic calibration\nclf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')\nclf_isotonic.fit(X_train, y_train)\nprob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]\n\n# Plot\norder = np.lexsort((prob_pos_clf, ))\nplt.plot(prob_pos_clf[order], 'r', label='No calibration')\nplt.plot(prob_pos_isotonic[order], 'b', label='Isotonic calibration')\nplt.plot(np.linspace(0, y_test.size, 51)[1::2], y_test[order].reshape(25, -1).mean(1), 'k--', label=r'Empirical')\n\nplt.xlabel(\"Instances sorted according to predicted probability \"\n \"(uncalibrated GNB)\")\nplt.ylabel(\"P(y=1)\")\nplt.legend(loc=\"upper left\")\nplt.title(\"Gaussian naive Bayes probabilities\")\nplt.ylim([-0.05, 1.05])\nplt.show()", "Summary\nFor robust and calibrated estimators:\n- remove outliers before training;\n- reduce variance by ensembling estimators;\n- drive your analysis with loss functions that are robust to outliers;\n - avoid the squared error loss!\n- calibrate the output of your classifier if probabilities are important for your problem.", "questions?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
syednasar/datascience
visualize/Matplot Vizualization Sample.ipynb
mit
[ "Matplotlib\nBelow are some code from the code source Data Science from Scratch: https://github.com/joelgrus/data-science-from-scratch", "from matplotlib import pyplot as plt\nimport matplotlib.pyplot as plt\nfrom collections import Counter\n%matplotlib inline \n\nyears = [1950, 1960, 1970, 1980, 1990, 2000, 2010]\ngdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]\n\n# create a line chart, years on x-axis, gdp on y-axis\nplt.plot(years, gdp, color='green', marker='o', linestyle='solid')\n\n# add a title\nplt.title(\"Nominal GDP\")\n\n# add a label to the y-axis\nplt.ylabel(\"Billions of $\")\nplt.show()", "Bar Chart", "movies = [\"Annie Hall\", \"Ben-Hur\", \"Casablanca\", \"Gandhi\", \"West Side Story\"]\nnum_oscars = [5, 11, 3, 8, 10]\n\n# bars are by default width 0.8, so we'll add 0.1 to the left coordinates\n# so that each bar is centered\nxs = [i + 0.1 for i, _ in enumerate(movies)]\n\nprint \"xs\", xs\n\n# plot bars with left x-coordinates [xs], heights [num_oscars]\nplt.bar(xs, num_oscars)\n\nplt.ylabel(\"# of Academy Awards\")\nplt.title(\"My Favorite Movies\")\n\n# label x-axis with movie names at bar centers\nplt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)\n\nplt.show()", "Histogram\nA bar chart can also be a good choice for plotting histograms of bucketed numeric values, in order to visually explore \nhow the values are distributed.", "grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]\ndecile = lambda grade: grade // 10 * 10\n\nhistogram = Counter(decile(grade) for grade in grades)\n\nprint \"Histogram values\", histogram\n\nplt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4\n histogram.values(), # give each bar its correct height\n 8) # give each bar a width of 8\n\nplt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,\n # y-axis from 0 to 5\n\nplt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100\nplt.xlabel(\"Decile\")\nplt.ylabel(\"# of Students\")\nplt.title(\"Distribution of Exam 1 Grades\")\nplt.show()\n\nmentions = [500, 505]\nyears = [2013, 2014]\n\nplt.bar([2012.6, 2013.6], mentions, 0.8)\nplt.xticks(years)\nplt.ylabel(\"# of times I heard someone say 'data science'\")\n\n# if you don't do this, matplotlib will label the x-axis 0, 1\n# and then add a +2.013e3 off in the corner (bad matplotlib!)\nplt.ticklabel_format(useOffset=False)\n\n# misleading y-axis only shows the part above 500\nplt.axis([2012.5,2014.5,499,506])\nplt.title(\"Look at the 'Huge' Increase!\")\nplt.show()\n\n\n\nplt.bar([2012.6, 2013.6], mentions, 0.8)\nplt.xticks(years)\nplt.ylabel(\"# of times I heard someone say 'data science'\")\n\n# if you don't do this, matplotlib will label the x-axis 0, 1\n# and then add a +2.013e3 off in the corner (bad matplotlib!)\nplt.ticklabel_format(useOffset=False)\n\n\nplt.axis([2012.5,2014.5,0,550])\nplt.title(\"Not So Huge Anymore\")\nplt.show()", "Line Charts", "variance = [1, 2, 4, 8, 16, 32, 64, 128, 256]\nbias_squared = [256, 128, 64, 32, 16, 8, 4, 2, 1]\ntotal_error = [x + y for x, y in zip(variance, bias_squared)]\nxs = [i for i, _ in enumerate(variance)]\n\n# we can make multiple calls to plt.plot\n# to show multiple series on the same chart\nplt.plot(xs, variance, 'g-', label='variance') # green solid line\nplt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line\nplt.plot(xs, total_error, 'b:', label='total error') # blue dotted line\n\n# because we've assigned labels to each series\n# we can get a legend for free\n# loc=9 means \"top center\"\nplt.legend(loc=9)\nplt.xlabel(\"model complexity\")\nplt.title(\"The Bias-Variance Tradeoff\")\nplt.show()", "Scatterplots\nA scatterplot is the right choice for visualizing the relationship between two paired sets of data.", "friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]\nminutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]\nlabels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n\nplt.scatter(friends, minutes)\n\n# label each point\nfor label, friend_count, minute_count in zip(labels, friends, minutes):\n plt.annotate(label,\n xy=(friend_count, minute_count), # put the label with its point\n xytext=(5, -5), # but slightly offset\n textcoords='offset points')\n\nplt.title(\"Daily Minutes vs. Number of Friends\")\nplt.xlabel(\"# of friends\")\nplt.ylabel(\"daily minutes spent on the site\")\nplt.show()", "Wrong plot\nAdded plt.axis(\"equal\") to fix the scale", "test_1_grades = [ 99, 90, 85, 97, 80]\ntest_2_grades = [100, 85, 60, 90, 70]\n\nplt.scatter(test_1_grades, test_2_grades)\nplt.axis(\"equal\") ##to fix the scale\nplt.title(\"Axes Aren't Comparable\")\nplt.xlabel(\"test 1 grade\")\nplt.ylabel(\"test 2 grade\")\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
nick-youngblut/SIPSim
ipynb/bac_genome/fullCyc/trimDataset/rep4_DBL-comm_bw_HR.ipynb
mit
[ "Goal\n\nTrying varying levels of bandwidth and DBL scaling with pre-fractionation abundances ('DBL-comm')\nVarying parameters\nbandwidth (bw)\n0.2, 0.6, 1\n\n\ndiffusive boundary layer (DBL) scaling (DBL scaling by abundance)\n0.15, 0.2, 0.25\n\n\nThis notebook is the same as rep3_DBL-comm_bw but more narrow parameter ranges\n\nInit", "import os\nimport glob\nimport re\nimport nestly\n\n%load_ext rpy2.ipython\n%load_ext pushnote\n\n%%R\nlibrary(ggplot2)\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(gridExtra)\nlibrary(phyloseq)", "BD min/max", "%%R\n## min G+C cutoff\nmin_GC = 13.5\n## max G+C cutoff\nmax_GC = 80\n## max G+C shift\nmax_13C_shift_in_BD = 0.036\n\n\nmin_BD = min_GC/100.0 * 0.098 + 1.66 \nmax_BD = max_GC/100.0 * 0.098 + 1.66 \n\nmax_BD = max_BD + max_13C_shift_in_BD\n\ncat('Min BD:', min_BD, '\\n')\ncat('Max BD:', max_BD, '\\n')", "Nestly\n\nassuming fragments already simulated", "workDir = '/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/'\nbuildDir = os.path.join(workDir, 'rep4_DBL-comm_bw_HR')\nR_dir = '/home/nick/notebook/SIPSim/lib/R/'\n\nfragFile = '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde_parsed.pkl'\ncommFile = '/home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm.txt'\n\n# emperical data for validation\nemp_shan_file = '/home/nick/notebook/SIPSim/dev/fullCyc_trim/SIP-core_unk_shan.txt'\nemp_BDspan_file = '/home/nick/notebook/SIPSim/dev/fullCyc_trim/SIP-core_unk_trm_BD-span.txt'\nemp_corr_file = '/home/nick/notebook/SIPSim/dev/fullCyc_trim/SIP-core_unk_trm_corr.txt'\n\nnreps = 4\n\n# building tree structure\nnest = nestly.Nest()\n\n# varying params\nnest.add('DBL_scaling', [0.15, 0.2, 0.25])\nnest.add('bandwidth', [0.2, 0.6, 1])\nnest.add('rep', [x + 1 for x in xrange(nreps)])\n\n\n## set params\nnest.add('abs', ['1e9'], create_dir=False)\nnest.add('percIncorp', [0], create_dir=False)\nnest.add('percTaxa', [0], create_dir=False)\nnest.add('np', [6], create_dir=False)\nnest.add('subsample_dist', ['lognormal'], create_dir=False)\nnest.add('subsample_mean', [9.432], create_dir=False)\nnest.add('subsample_scale', [0.5], create_dir=False)\nnest.add('subsample_min', [10000], create_dir=False)\nnest.add('subsample_max', [30000], create_dir=False)\n\n### input/output files\nnest.add('buildDir', [buildDir], create_dir=False)\nnest.add('R_dir', [R_dir], create_dir=False)\nnest.add('fragFile', [fragFile], create_dir=False)\nnest.add('commFile', [commFile], create_dir=False)\n\n\n# building directory tree\nnest.build(buildDir)\n\n# bash file to run\nbashFile = os.path.join(buildDir, 'SIPSimRun.sh')\n\n%%writefile $bashFile\n#!/bin/bash\n\nexport PATH={R_dir}:$PATH\n\necho '#-- SIPSim pipeline --#'\n\necho '# shuffling taxa in comm file'\ncomm_shuffle_taxa.r {commFile} > comm.txt\n\n \necho '# adding diffusion' \nSIPSim diffusion \\\n {fragFile} \\\n --bw {bandwidth} \\\n --np {np} \\\n > ampFrags_KDE_dif.pkl \n\necho '# adding DBL contamination; abundance-weighted smearing'\nSIPSim DBL \\\n ampFrags_KDE_dif.pkl \\\n --comm comm.txt \\\n --commx {DBL_scaling} \\\n --bw {bandwidth} \\\n --np {np} \\\n > ampFrags_KDE_dif_DBL.pkl\n \necho '# making incorp file'\nSIPSim incorpConfigExample \\\n --percTaxa {percTaxa} \\\n --percIncorpUnif {percIncorp} \\\n > {percTaxa}_{percIncorp}.config\n\necho '# adding isotope incorporation to BD distribution'\nSIPSim isotope_incorp \\\n ampFrags_KDE_dif_DBL.pkl \\\n {percTaxa}_{percIncorp}.config \\\n --comm comm.txt \\\n --bw {bandwidth} \\\n --np {np} \\\n > ampFrags_KDE_dif_DBL_inc.pkl\n\necho '# simulating gradient fractions'\nSIPSim gradient_fractions \\\n comm.txt \\\n > fracs.txt \n\necho '# simulating an OTU table'\nSIPSim OTU_table \\\n ampFrags_KDE_dif_DBL_inc.pkl \\\n comm.txt \\\n fracs.txt \\\n --abs {abs} \\\n --np {np} \\\n > OTU_abs{abs}.txt\n \n#-- w/ PCR simulation --#\necho '# simulating PCR'\nSIPSim OTU_PCR \\\n OTU_abs{abs}.txt \\\n > OTU_abs{abs}_PCR.txt \n \necho '# subsampling from the OTU table (simulating sequencing of the DNA pool)'\nSIPSim OTU_subsample \\\n --dist {subsample_dist} \\\n --dist_params mean:{subsample_mean},sigma:{subsample_scale} \\\n --min_size {subsample_min} \\\n --max_size {subsample_max} \\\n OTU_abs{abs}_PCR.txt \\\n > OTU_abs{abs}_PCR_sub.txt\n \necho '# making a wide-formatted table'\nSIPSim OTU_wideLong -w \\\n OTU_abs{abs}_PCR_sub.txt \\\n > OTU_abs{abs}_PCR_sub_w.txt\n \necho '# making metadata (phyloseq: sample_data)'\nSIPSim OTU_sampleData \\\n OTU_abs{abs}_PCR_sub.txt \\\n > OTU_abs{abs}_PCR_sub_meta.txt\n \n\n#-- w/out PCR simulation --# \necho '# subsampling from the OTU table (simulating sequencing of the DNA pool)'\nSIPSim OTU_subsample \\\n --dist {subsample_dist} \\\n --dist_params mean:{subsample_mean},sigma:{subsample_scale} \\\n --min_size {subsample_min} \\\n --max_size {subsample_max} \\\n OTU_abs{abs}.txt \\\n > OTU_abs{abs}_sub.txt\n \necho '# making a wide-formatted table'\nSIPSim OTU_wideLong -w \\\n OTU_abs{abs}_sub.txt \\\n > OTU_abs{abs}_sub_w.txt\n \necho '# making metadata (phyloseq: sample_data)'\nSIPSim OTU_sampleData \\\n OTU_abs{abs}_sub.txt \\\n > OTU_abs{abs}_sub_meta.txt \n \n \n \n#-- making summary tables --#\n# PCR\nshannon_calc.r OTU_abs{abs}_PCR_sub.txt > OTU_abs{abs}_PCR_sub_shan.txt\nBD_span_calc.r OTU_abs{abs}_PCR_sub.txt comm.txt > OTU_abs{abs}_PCR_sub_BD-span.txt\ncorrelogram_make.r OTU_abs{abs}_PCR_sub.txt > OTU_abs{abs}_PCR_sub_corr.txt \n# no PCR\nshannon_calc.r OTU_abs{abs}_sub.txt > OTU_abs{abs}_sub_shan.txt\nBD_span_calc.r OTU_abs{abs}_sub.txt comm.txt > OTU_abs{abs}_sub_BD-span.txt\ncorrelogram_make.r OTU_abs{abs}_sub.txt > OTU_abs{abs}_sub_corr.txt\n\n!chmod 777 $bashFile\n!cd $workDir; \\\n nestrun --template-file $bashFile -d rep4_DBL-comm_bw_HR --log-file log.txt -j 3", "Notes\n\nErrors due to memory limitations\nre-ran these simulations\n\nComparing to emperical data\n\ncorrelation/regression analyses of metrics on community composition", "%%R\n\n# function for loading dataset files\nload.data.files = function(sim.files, emp.file){\n # loading\n ## simulations\n df = list()\n for(x in sim.files){\n # simulation\n tmp = read.delim(x, sep='\\t')\n xx = strsplit(x, '/')[[1]]\n tmp$DBL_scale = xx[10] %>% as.numeric\n tmp$bw = xx[11] %>% as.numeric\n tmp$SIM_rep = xx[12] %>% as.numeric \n tmp$dataset = 'Simulation' \n df[[x]] = tmp \n \n # emperical (matched for each simulation)\n if(xx[12] %>% as.numeric == 1){\n tmp = read.delim(emp.file, sep='\\t')\n tmp$DBL_scale = xx[10] %>% as.numeric\n tmp$bw = xx[11] %>% as.numeric\n tmp$SIM_rep = 1\n tmp$dataset = 'Emperical' \n xy = paste0(x, '_EMP')\n df[[xy]] = tmp\n }\n }\n df = do.call(rbind, df) %>% as.data.frame \n rownames(df) = 1:nrow(df)\n\n # return\n return(df)\n }", "Shannon index", "sim_shan_files = !find $buildDir -name \"OTU_abs1e9_PCR_sub_shan.txt\"\nprint len(sim_shan_files)\nprint emp_shan_file\n\n%%R -i sim_shan_files -i emp_shan_file\n\ndf.shan = load.data.files(sim_shan_files, emp_shan_file) \ndf.shan %>% tail(n=3)\n\n%%R -w 800 -h 600\n# summarizing\ndf.shan.s = df.shan %>%\n group_by(dataset, bw, DBL_scale, BD_bin = ntile(Buoyant_density, 24)) %>%\n summarize(mean_shannon = mean(shannon), \n sd_shannon = sd(shannon), \n mean_BD = mean(Buoyant_density))\n\nggplot(df.shan.s, aes(mean_BD, mean_shannon, color=dataset,\n ymin=mean_shannon-sd_shannon, ymax=mean_shannon+sd_shannon)) +\n geom_pointrange() +\n facet_grid(DBL_scale ~ bw) +\n labs(x='Buoyant density (binned; 24 bins)', y='Shannon index') +\n theme_bw() +\n theme(\n text = element_text(size=16)\n )\n\n%%R -w 650 -h 600\n# pairwise correlations for each dataset\ndf.shan.bin = df.shan %>%\n group_by(BD_bin = ntile(Buoyant_density, 24))\n\n#calc.spearman = function(x){\n# cor(x[,'shannon.x'], x['shannon.y'], method='spearman')[1,1]\n#}\n\ncalc.pearson = function(x){\n cor(x[,'shannon.x'], x['shannon.y'], method='pearson')[1,1]\n}\n\n\ndf.shan.corr = inner_join(df.shan.bin, df.shan.bin, c('BD_bin' = 'BD_bin',\n 'bw' = 'bw',\n 'DBL_scale' = 'DBL_scale')) %>%\n group_by(bw, DBL_scale, dataset.x, dataset.y) %>%\n nest() %>%\n mutate(model = purrr::map(data, calc.pearson)) %>%\n unnest(pearson = model %>% purrr::map(function(x) x)) %>%\n ungroup() %>%\n select(-data, -model) %>%\n mutate(pearson_txt = round(pearson, 2))\n\n \n# plotting\nggplot(df.shan.corr, aes(dataset.x, dataset.y, fill=pearson)) +\n geom_tile() +\n geom_text(aes(label=pearson_txt), color='white', size=6) +\n scale_fill_gradient(low='black', high='red') +\n labs(title='Shannon index') +\n facet_grid(DBL_scale ~ bw) + \n theme(\n text = element_text(size=16)\n ) ", "BD spans", "sim_BDspan_files = !find $buildDir -name \"OTU_abs1e9_PCR_sub_BD-span.txt\"\nprint len(sim_BDspan_files)\nprint emp_BDspan_file\n\n%%R -i sim_BDspan_files -i emp_BDspan_file\ndf.BDspan = load.data.files(sim_BDspan_files, emp_BDspan_file) \ndf.BDspan %>% head\n\n%%R -w 700 -h 600\n\n# plotting\nggplot(df.BDspan, aes(mean_preFrac_abund, BD_range_perc, fill=dataset)) +\n geom_hex(alpha=0.5) +\n scale_x_log10() +\n facet_grid(DBL_scale ~ bw) +\n labs(x='Pre-fractionation abundance', y='BD span') +\n theme_bw() +\n theme(\n text = element_text(size=16)\n )\n\n%%R -i sim_BDspan_files -i emp_BDspan_file\n\n# binning by pre-fractionation abundances\nn.tile = 20\ndf.BDspan = df.BDspan %>%\n group_by(dataset, library, DBL_scale, bw, preFrac_abund_bin = ntile(mean_preFrac_abund, n.tile)) %>%\n summarize(mean_preFrac_abund = mean(mean_preFrac_abund),\n var_BD_range = var(BD_range),\n sd_BD_range = sd(BD_range))\n\ndf.BDspan %>% tail(n=3)\n\n%%R -w 650 -h 600\ncalc.spearman = function(x){\n cor(x[,'var_BD_range.x'], x['var_BD_range.y'], method='spearman')[1,1]\n}\n\ndf.BDspan.corr = inner_join(df.BDspan, df.BDspan, c('preFrac_abund_bin' = 'preFrac_abund_bin',\n 'DBL_scale' = 'DBL_scale',\n 'bw' = 'bw')) %>%\n group_by(DBL_scale, bw, dataset.x, dataset.y) %>%\n nest() %>%\n mutate(model = purrr::map(data, calc.spearman)) %>%\n unnest(spearman = model %>% purrr::map(function(x) x)) %>%\n ungroup() %>%\n select(-data, -model) %>%\n mutate(spearman_txt = round(spearman, 2))\n\n\n# plotting\nggplot(df.BDspan.corr, aes(dataset.x, dataset.y, fill=spearman)) +\n geom_tile() +\n geom_text(aes(label=spearman_txt), color='white', size=6) +\n scale_fill_gradient(low='black', high='red') +\n labs(title='BD span') +\n facet_grid(DBL_scale ~ bw) +\n theme(\n text = element_text(size=16)\n ) ", "correlograms (jaccard ~ BD)", "sim_corr_files = !find $buildDir -name \"OTU_abs1e9_PCR_sub_corr.txt\"\nprint len(sim_corr_files)\nprint emp_corr_file\n\n%%R -i sim_corr_files -i emp_corr_file\n\ndf.corr = load.data.files(sim_corr_files, emp_corr_file) \n\n# binning\ndf.corr = df.corr %>%\n filter(!is.na(Mantel.corr)) %>%\n group_by(DBL_scale, bw, dataset, library, class.index.bin = ntile(class.index, 12)) \n\ndf.corr %>% tail(n=3) %>% as.data.frame\n\n%%R -w 800 -h 600\n# plotting\ndf.corr.s = df.corr %>%\n group_by(DBL_scale, bw, dataset, class.index.bin) %>%\n summarize(mean_Mantel.corr = mean(Mantel.corr),\n sd_Mantel.corr = sd(Mantel.corr), \n mean_class.index = mean(class.index))\n\nggplot(df.corr.s, aes(mean_class.index, mean_Mantel.corr, color=dataset,\n ymin=mean_Mantel.corr-sd_Mantel.corr,\n ymax=mean_Mantel.corr+sd_Mantel.corr)) +\n geom_pointrange() +\n labs(x='Class index (binned; 12 bins)', y='Mantel correlation coef.') +\n facet_grid(DBL_scale ~ bw) + \n theme_bw() +\n theme(\n text = element_text(size=16)\n )\n\n%%R -w 700 -h 600\n# pairwise correlations for each dataset\ndf.shan.bin = df.shan %>%\n group_by(BD_bin = ntile(Buoyant_density, 24))\n\ncalc.pearson = function(x){\n cor(x[,'Mantel.corr.x'], x['Mantel.corr.y'], method='pearson')[1,1]\n}\n\ndf.corr.lm = inner_join(df.corr, df.corr, c('class.index.bin' = 'class.index.bin',\n 'bw' = 'bw',\n 'DBL_scale' = 'DBL_scale')) %>%\n group_by(bw, DBL_scale, dataset.x, dataset.y) %>%\n nest() %>%\n mutate(model = purrr::map(data, calc.pearson)) %>%\n unnest(pearson = model %>% purrr::map(function(x) x)) %>%\n ungroup() %>%\n select(-data, -model) %>%\n mutate(pearson_txt = round(pearson, 2))\n\n \n# plotting\nggplot(df.corr.lm, aes(dataset.x, dataset.y, fill=pearson)) +\n geom_tile() +\n geom_text(aes(label=pearson_txt), color='white', size=6) +\n scale_fill_gradient(low='black', high='red') +\n labs(title='Beta diversity correlogram') +\n facet_grid(DBL_scale ~ bw) + \n theme(\n text = element_text(size=16)\n ) ", "Summary plots for all simulations", "course_data_dir = \"/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/rep4_DBL-comm_bw/\"", "Shannon", "sim_shan_files1 = !find $course_data_dir -name \"OTU_abs1e9_PCR_sub_shan.txt\"\nto_rm = '/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/rep4_DBL-comm_bw/0.2/0.6'\nsim_shan_files1 = [x for x in sim_shan_files1 if not x.startswith(to_rm)]\nsim_shan_files2 = !find $buildDir -name \"OTU_abs1e9_PCR_sub_shan.txt\"\nsim_shan_files = sim_shan_files1 + sim_shan_files2\n\nprint len(sim_shan_files)\n\n%%R -i sim_shan_files -i emp_shan_file\n\ndf.shan = load.data.files(sim_shan_files, emp_shan_file) \ndf.shan %>% tail(n=3)\n\n%%R -h 300\n# pairwise correlations for each dataset\ndf.shan.bin = df.shan %>%\n group_by(BD_bin = ntile(Buoyant_density, 24))\n\ncalc.pearson = function(x){\n cor(x[,'shannon.x'], x['shannon.y'], method='pearson')[1,1]\n}\n\n\ndf.shan.corr = inner_join(df.shan.bin, df.shan.bin, c('BD_bin' = 'BD_bin',\n 'bw' = 'bw',\n 'DBL_scale' = 'DBL_scale')) %>%\n group_by(bw, DBL_scale, dataset.x, dataset.y) %>%\n nest() %>%\n mutate(model = purrr::map(data, calc.pearson)) %>%\n unnest(pearson = model %>% purrr::map(function(x) x)) %>%\n ungroup() %>%\n select(-data, -model) %>%\n mutate(pearson_txt = round(pearson, 2))\n\n \n# getting emperical-emperical corr \nemp.val = df.shan.corr %>% \n filter((dataset.x == 'Emperical' &\n dataset.y == 'Emperical')) %>%\n group_by() %>%\n summarize(max_value = max(pearson)) %>%\n ungroup() %>%\n select(max_value) %>% as.matrix %>% as.vector\nemp.val = emp.val[1] \n \n# filtering \ndf.shan.corr.f = df.shan.corr %>% \n filter((dataset.x == 'Simulation' &\n dataset.y == 'Emperical')) %>%\n mutate(DBL_scale = DBL_scale %>% as.character,\n bw = bw %>% as.character,\n gt_emp = ifelse(pearson > emp.val, 'bold.italic', 'plain')) %>%\n complete(DBL_scale, bw)\ndf.shan.corr.f %>% head(n=3)\n \n \n# plotting\nggplot(df.shan.corr.f, aes(DBL_scale,bw, fill=pearson)) +\n geom_tile() +\n geom_text(aes(label=pearson_txt,fontface=gt_emp), color='white', size=6) +\n scale_color_manual(values=c('white', 'black')) +\n scale_fill_gradient('Pearson', low='black', high='red') +\n labs(title='Shannon index', x='DBL scaling', y='KDE Bandwidth') + \n theme(\n text = element_text(size=16)\n ) ", "BD span", "sim_BDspan_files1 = !find $course_data_dir -name \"OTU_abs1e9_PCR_sub_BD-span.txt\"\nto_rm = '/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/rep4_DBL-comm_bw/0.2/0.6'\nsim_BDspan_files1 = [x for x in sim_BDspan_files1 if not x.startswith(to_rm)]\nsim_BDspan_files2 = !find $buildDir -name \"OTU_abs1e9_PCR_sub_BD-span.txt\"\nsim_BDspan_files = sim_BDspan_files1 + sim_BDspan_files2\n\nprint len(sim_BDspan_files)\n\n%%R -i sim_BDspan_files -i emp_BDspan_file\n\ndf.BDspan = load.data.files(sim_BDspan_files, emp_BDspan_file) \ndf.BDspan %>% head(n=3)\n\n%%R \n\n# binning by pre-fractionation abundances\nn.tile = 20\ndf.BDspan = df.BDspan %>%\n group_by(dataset, library, DBL_scale, bw, preFrac_abund_bin = ntile(mean_preFrac_abund, n.tile)) %>%\n summarize(mean_preFrac_abund = mean(mean_preFrac_abund),\n var_BD_range = var(BD_range),\n sd_BD_range = sd(BD_range))\n\ndf.BDspan %>% tail(n=3)\n\n%%R -h 300\ncalc.spearman = function(x){\n cor(x[,'var_BD_range.x'], x['var_BD_range.y'], method='spearman')[1,1]\n}\n\ndf.BDspan.corr = inner_join(df.BDspan, df.BDspan, c('preFrac_abund_bin' = 'preFrac_abund_bin',\n 'DBL_scale' = 'DBL_scale',\n 'bw' = 'bw')) %>%\n group_by(DBL_scale, bw, dataset.x, dataset.y) %>%\n nest() %>%\n mutate(model = purrr::map(data, calc.spearman)) %>%\n unnest(spearman = model %>% purrr::map(function(x) x)) %>%\n ungroup() %>%\n select(-data, -model) %>%\n mutate(spearman_txt = round(spearman, 2))\n\n \n# getting emperical-emperical corr \nemp.val = df.BDspan.corr %>% \n filter((dataset.x == 'Emperical' &\n dataset.y == 'Emperical')) %>%\n group_by() %>%\n summarize(max_value = max(spearman, na.rm=TRUE)) %>%\n ungroup() %>%\n select(max_value) %>% as.matrix %>% as.vector\nemp.val = emp.val[1] \n \n# filtering \ndf.BDspan.corr.f = df.BDspan.corr %>% \n filter((dataset.x == 'Simulation' &\n dataset.y == 'Emperical')) %>%\n mutate(DBL_scale = DBL_scale %>% as.character,\n bw = bw %>% as.character,\n gt_emp = ifelse(spearman > emp.val, 'bold.italic', 'plain')) %>%\n complete(DBL_scale, bw)\n \n# plotting\nggplot(df.BDspan.corr.f, aes(DBL_scale, bw, fill=spearman)) +\n geom_tile() +\n geom_text(aes(label=spearman_txt, fontface=gt_emp), color='white', size=6) +\n scale_color_manual(values=c('white', 'black')) +\n scale_fill_gradient('Spearman', low='black', high='red') +\n labs(title='BD span', x='DBL scaling', y='KDE Bandwidth') + \n theme(\n text = element_text(size=16)\n ) ", "correlogram", "sim_corr_files1 = !find $course_data_dir -name \"OTU_abs1e9_PCR_sub_corr.txt\"\nto_rm = '/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/rep4_DBL-comm_bw/0.2/0.6'\nsim_corr_files1 = [x for x in sim_corr_files1 if not x.startswith(to_rm)]\nsim_corr_files2 = !find $buildDir -name \"OTU_abs1e9_PCR_sub_corr.txt\"\nsim_corr_files = sim_corr_files1 + sim_corr_files2\n\nprint len(sim_corr_files)\n\n%%R -i sim_corr_files -i emp_corr_file\n\ndf.corr = load.data.files(sim_corr_files, emp_corr_file) \n\n# binning\ndf.corr = df.corr %>%\n filter(!is.na(Mantel.corr)) %>%\n group_by(DBL_scale, bw, dataset, library, class.index.bin = ntile(class.index, 12)) \n\ndf.corr %>% tail(n=3) %>% as.data.frame\n\n%%R -h 300\n# pairwise correlations for each dataset\ndf.shan.bin = df.shan %>%\n group_by(BD_bin = ntile(Buoyant_density, 24))\n\ncalc.pearson = function(x){\n cor(x[,'Mantel.corr.x'], x['Mantel.corr.y'], method='pearson')[1,1]\n}\n\ndf.corr.lm = inner_join(df.corr, df.corr, c('class.index.bin' = 'class.index.bin',\n 'bw' = 'bw',\n 'DBL_scale' = 'DBL_scale')) %>%\n group_by(bw, DBL_scale, dataset.x, dataset.y) %>%\n nest() %>%\n mutate(model = purrr::map(data, calc.pearson)) %>%\n unnest(pearson = model %>% purrr::map(function(x) x)) %>%\n ungroup() %>%\n select(-data, -model) %>%\n mutate(pearson_txt = round(pearson, 2))\n\n\n# getting emperical-emperical corr \nemp.val = df.corr.lm %>% \n filter((dataset.x == 'Emperical' &\n dataset.y == 'Emperical')) %>%\n group_by() %>%\n summarize(max_value = max(pearson)) %>%\n ungroup() %>%\n select(max_value) %>% as.matrix %>% as.vector\nemp.val = emp.val[1]\nprint(emp.val) \n \n# filtering \ndf.corr.lm.f = df.corr.lm %>% \n filter((dataset.x == 'Simulation' &\n dataset.y == 'Emperical')) %>%\n mutate(DBL_scale = DBL_scale %>% as.character,\n bw = bw %>% as.character,\n gt_emp = ifelse(pearson >= emp.val, 'bold.italic', 'plain')) %>%\n complete(DBL_scale, bw)\ndf.corr.lm.f %>% head(n=3)\n \n \n# plotting\nggplot(df.corr.lm.f, aes(DBL_scale,bw, fill=pearson)) +\n geom_tile() +\n geom_text(aes(label=pearson_txt,fontface=gt_emp), color='white', size=6) +\n scale_color_manual(values=c('white', 'black')) +\n scale_fill_gradient('Pearson', low='black', high='red') +\n labs(title='Beta diversity correlogram', x='DBL scaling', y='KDE Bandwidth') + \n theme(\n text = element_text(size=16)\n ) " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
catherinekuhn/ee-python
.ipynb_checkpoints/simple map-checkpoint.ipynb
cc0-1.0
[ "Generating a basic map image in Earth Engine\nInstall ee-python\nFollow the installation directions found here:\nhttps://github.com/catherinekuhn/CloudtoStreet/blob/master/Python%20API%20directions.ipynb\nCheck your environment\nMake sure that you are in the correct environment. To check your current environment, type the following. The environment you are in will have a star next to it. \n conda info --envs\n\nIf you are not in the ee-python environment, you can switch into it using \n source activate ee-python\n\nImport & Authentication", "# Import the Earth Engine Python Package into Python environment.\nimport ee\nimport ee.mapclient\n\n# Initialize the Earth Engine object, using the authentication credentials.\nee.Initialize()", "Visualize Geographic Data", "image = ee.Image('srtm90_v4')\nfrom IPython.display import Image\nImage(url=image.getThumbUrl({'min':0, 'max': 3000}))\n\n# Print the information for an image asset. the 'srtm90_v4 file is a digital elevation model. \n# that is housed in Google's cloud and has an elevation value for every pixel across the whole earth \n# at a resolution of 30 meters. That is the map you see below in the static notebook. \n\nprint(image.getInfo())\n\n#celebrate the metadata!!", "Try it with mapclient", "\"\"\"Display an image given its ID.\"\"\"\n\nimage = ee.Image('srtm90_v4')\nee.mapclient.addToMap(image, {'min': 0, 'max': 3000})\nee.mapclient.centerMap(-91.1646, 43.8438, 10)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
chanitacr/sc-python
02-widgets-answers.ipynb
mit
[ "Widges and Interactions", "!conda install -y netcdf4\n\nfrom netCDF4 import Dataset, num2date, date2num\nfrom numpy import *\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\n\nx=linspace(0,1,100)\nf=2\n\nplt.plot(x, sin(2*pi*x*f))\n\ndef pltsin(f):\n plt.plot(x,sin(2*pi*x*f))\n\npltsin(5)\n\ninteract(pltsin, f=(1,10,0.1))", "Add to the funtion to allow amplitude to be varied and add in an additional slider to vary both f and a\nHint you might want to limit the magnitude of 'y' in the plot - look up matplotlib", "def pltsin(f,a):\n plt.plot(x,a * sin(2*pi*x*f))\n\ninteract(pltsin, f=(1, 10, 0.1), a=(1, 10, 0.1))", "Climate data", "f=Dataset('ncep-data/air.sig995.2013.nc')\n\nair=f.variables['air'] #get variable\n\nplt.imshow(air[364,:,:]) #display first timestep\n\ndef sh(time):\n plt.imshow(air[time,:,:])\n\n#now make it interactive\n\ninteract(sh, time=(0,364,1))\n\n# Browse variable\n\ndef sh(var='air', time=0):\n f=Dataset ('ncep-data/'+var+'.sig995.2013.nc')\n vv=f.variables[var]\n plt.imshow(vv[time,:,:])\n\n# Give a list of varibles\n\nvariabs= ['air','uwnd','vwnd','rhum']\n\n# Now interact with it\n\ninteract(sh, time=(0,355,1), var=variabs)\n\n# Browse variable\n\ndef sh(var='air', year=\"2013\", time=0):\n f=Dataset ('ncep-data/'+var+'.sig995.'+year+'.nc')\n vv=f.variables[var]\n plt.imshow(vv[time,:,:])\n\n# Create a list of years\n\nyears= [str(x) for x in range (2013,2016)]\nyears = ['2013', '2014', '2015']\n\n# Give a list of varibles\n\nvariabs= ['air','uwnd','vwnd','rhum']\n\n# Now interact with it\n\ninteract(sh, time=(0,355,1), var=variabs, year=years)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
DLR-SC/tigl
examples/python/notebooks/geometry_airfoil.ipynb
apache-2.0
[ "Airfoil example\nIn this example we are building a NACA 2412 airfoil from a list of points.\nLets import everything we need:", "import tigl3.curve_factories\nfrom OCC.gp import gp_Pnt\nfrom OCC.Display.SimpleGui import init_display", "Now we build up an array of points from a NACA generator. It consists of 21 x and y coordinates. Since TiGl always works in 3 dimensions, we have to add dummy z values of 0.", "# list of points on NACA2412 profile\npx = [1.000084, 0.975825, 0.905287, 0.795069, 0.655665, 0.500588, 0.34468, 0.203313, 0.091996, 0.022051, 0.0, 0.026892, 0.098987, 0.208902, 0.346303, 0.499412, 0.653352, 0.792716, 0.90373, 0.975232, 0.999916]\npy = [0.001257, 0.006231, 0.019752, 0.03826, 0.057302, 0.072381, 0.079198, 0.072947, 0.054325, 0.028152, 0.0, -0.023408, -0.037507, -0.042346, -0.039941, -0.033493, -0.0245, -0.015499, -0.008033, -0.003035, -0.001257]\n\npoints = [pnt for pnt in zip(px, py, [0.]*len(px))]", "Interpolation of the airfoil with a B-spline\nTiGL brings many algorithms, that build curves and surfaces. The core algorithms can be found in the tigl3.geometry package. These algorithms depend however on the opencascade data structures. To make it more convenient, the tigl3.curve_factories package offers more python functions to create curves.\nThe most basic function is tigl3.curve_factories.points_to_curve. This takes an array of points and builds up the interpolating b-spline.", "curve = tigl3.curve_factories.interpolate_points(points)\n# There are more parameters to control the outcome:\n# curve = tigl3.curve_factories.points_to_curve(points, np.linspace(0,1, 21), close_continuous=False)", "The points_to_curve function has some optional parameters as well.\n - degree: Controls the polynomial degree. If degree=1, the curve will be piecewise linear.\n - params: Controls, at which parameter the points will be interpolated. This array must have the same number of items as the points array!\n - close_continuous: If you interpolate e.g. a fuselage section, you probably want a continous passing of the curve at the start and the end of the section. if close_continuous=True, the passing will be continous. For wings, where a discontinous trailing edge is desired, it should be False.\nVisualization of the result\nNow lets visualize the result. We are using the pythonOCC SimpleGui to draw the curve and the points. The jupyter renderer does not yet support curves and points (only surfaces). \nWe first draw all the points without updating the viewer. This would be very slow.\nThen, we draw the curve.\nNote, a separate window will open!", "# start up the gui\ndisplay, start_display, add_menu, add_function_to_menu = init_display()\n\n# make tesselation more accurate\ndisplay.Context.SetDeviationCoefficient(0.0001)\n\n# draw the points\nfor point in points:\n display.DisplayShape(gp_Pnt(*point), update=False)\n\n# draw the curve \ndisplay.DisplayShape(curve)\n\n# match content to screen and start the event loop\ndisplay.FitAll()\nstart_display()", "Result:" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
molgor/spystats
notebooks/.ipynb_checkpoints/Spatial Model Fitting using GLS-Copy1-checkpoint.ipynb
bsd-2-clause
[ "Spatial Model fitting in GLS\nIn this exercise we will fit a linear model using a Spatial structure as covariance matrix. \nWe will use GLS to get better estimators.\nAs always we will need to load the necessary libraries.", "# Load Biospytial modules and etc.\n%matplotlib inline\nimport sys\nsys.path.append('/apps')\nsys.path.append('..')\nsys.path.append('../spystats')\nimport django\ndjango.setup()\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n## Use the ggplot style\nplt.style.use('ggplot')\n\nimport tools", "Use this to automate the process. Be carefull it can overwrite current results\nrun ../HEC_runs/fit_fia_logbiomass_logspp_GLS.py /RawDataCSV/idiv_share/plotsClimateData_11092017.csv /apps/external_plugins/spystats/HEC_runs/results/logbiomas_logsppn_res.csv -85 -80 30 35\nImporting data\nWe will use the FIA dataset and for exemplary purposes we will take a subsample of this data. \nAlso important.\nThe empirical variogram has been calculated for the entire data set using the residuals of an OLS model. \nWe will use some auxiliary functions defined in the fit_fia_logbiomass_logspp_GLS.\nYou can inspect the functions using the ?? symbol.", "from HEC_runs.fit_fia_logbiomass_logspp_GLS import prepareDataFrame,loadVariogramFromData,buildSpatialStructure, calculateGLS, initAnalysis, fitGLSRobust\n\nsection = initAnalysis(\"/RawDataCSV/idiv_share/FIA_Plots_Biomass_11092017.csv\",\n \"/apps/external_plugins/spystats/HEC_runs/results/variogram/data_envelope.csv\",\n -130,-60,30,40)\n\n#section = initAnalysis(\"/RawDataCSV/idiv_share/plotsClimateData_11092017.csv\",\n# \"/apps/external_plugins/spystats/HEC_runs/results/variogram/data_envelope.csv\",\n# -85,-80,30,35)\n\n# IN HEC\n#section = initAnalysis(\"/home/hpc/28/escamill/csv_data/idiv/FIA_Plots_Biomass_11092017.csv\",\"/home/hpc/28/escamill/spystats/HEC_runs/results/variogram/data_envelope.csv\",-85,-80,30,35)\n\nsection.shape", "Now we will obtain the data from the calculated empirical variogram.", "gvg,tt = loadVariogramFromData(\"/apps/external_plugins/spystats/HEC_runs/results/variogram/data_envelope.csv\",section)\n\ngvg.plot(refresh=False,with_envelope=True)\n\nresum,gvgn,resultspd,results = fitGLSRobust(section,gvg,num_iterations=10,distance_threshold=1000000)\n\nresum.as_text\n\nplt.plot(resultspd.rsq)\nplt.title(\"GLS feedback algorithm\")\nplt.xlabel(\"Number of iterations\")\nplt.ylabel(\"R-sq fitness estimator\")\n\nresultspd.columns\n\na = map(lambda x : x.to_dict(), resultspd['params'])\n\nparamsd = pd.DataFrame(a)\n\nparamsd\n\nplt.plot(paramsd.Intercept.loc[1:])\nplt.get_yaxis().get_major_formatter().set_useOffset(False)\n\nfig = plt.figure(figsize=(10,10))\nplt.plot(paramsd.logSppN.iloc[1:])\n\nvariogram_data_path = \"/apps/external_plugins/spystats/HEC_runs/results/variogram/data_envelope.csv\"\nthrs_dist = 100000\nemp_var_log_log = pd.read_csv(variogram_data_path)", "Instantiating the variogram object", "gvg = tools.Variogram(section,'logBiomass',using_distance_threshold=thrs_dist)\ngvg.envelope = emp_var_log_log\ngvg.empirical = emp_var_log_log.variogram\ngvg.lags = emp_var_log_log.lags\n#emp_var_log_log = emp_var_log_log.dropna()\n#vdata = gvg.envelope.dropna()", "Instantiating theoretical variogram model", "matern_model = tools.MaternVariogram(sill=0.34,range_a=100000,nugget=0.33,kappa=4)\nwhittle_model = tools.WhittleVariogram(sill=0.34,range_a=100000,nugget=0.0,alpha=3)\nexp_model = tools.ExponentialVariogram(sill=0.34,range_a=100000,nugget=0.33)\ngaussian_model = tools.GaussianVariogram(sill=0.34,range_a=100000,nugget=0.33)\nspherical_model = tools.SphericalVariogram(sill=0.34,range_a=100000,nugget=0.33)\n\ngvg.model = whittle_model\n#gvg.model = matern_model\n#models = map(lambda model : gvg.fitVariogramModel(model),[matern_model,whittle_model,exp_model,gaussian_model,spherical_model])\n\ngvg.fitVariogramModel(whittle_model)\n\nimport numpy as np\nxx = np.linspace(0,1000000,1000)\n\ngvg.plot(refresh=False,with_envelope=True)\nplt.plot(xx,whittle_model.f(xx),lw=2.0,c='k')\nplt.title(\"Empirical Variogram with fitted Whittle Model\")\n\ndef randomSelection(n,p):\n idxs = np.random.choice(n,p,replace=False)\n random_sample = new_data.iloc[idxs]\n return random_sample\n#################\nn = len(new_data)\np = 3000 # The amount of samples taken (let's do it without replacement)\n\nrandom_sample = randomSelection(n,100)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
chapmanbe/nlm_clinical_nlp
BasicSentenceMarkup.ipynb
mit
[ "Demonstration of Basic Sentence Markup with pyConTextNLP\npyConTextNLP uses NetworkX directional graphs to represent the markup: nodes in the graph will be the concepts that are identified in the sentence and edges in the graph will be the relationships between those concepts.", "import pyConTextNLP.pyConTextGraph as pyConText\nimport pyConTextNLP.itemData as itemData\nimport networkx as nx", "pyConTextGraph contains the bulk of the pyConTextNLP functionality, including basic class definitions such as the ConTextMarkup class that represents the markup of a sentence.\nitemData contains a class definition for an itemData and functions for reading itemData definitions which are assumed to be in a tab seperated file that is specified as either a local file or a remote resource. In this example we will read definitions straight from the GitHub repository.\nAn itemData in its most basic form is a four-tuple consisting of \nA literal (e.g. \"pulmonary embolism\", \"no definite evidence of\")\nA category (e.g. \"CRITICAL_FINDING\", \"PROBABLE_EXISTENCE\")\nA regular expression that defines how to identify the literal concept. If no regular expression is specified, a regular expression will be built directly from the literal by wrapping it with word boundaries (e.g. r\"\"\"\\bpulmonary embolism\\b\"\"\")\nA rule that defines how the concept works in the sentence (e.g. a negation term that looks forward in the sentence).\n\n\n\n\n\nSentences\nThese example reports are taken from (with modification) the MIMIC2 demo data set that is a publically available database of de-identified medical records for deceased individuals.", "reports = [\n \"\"\"IMPRESSION: Evaluation limited by lack of IV contrast; however, no evidence of\n bowel obstruction or mass identified within the abdomen or pelvis. Non-specific interstitial opacities and bronchiectasis seen at the right\n base, suggestive of post-inflammatory changes.\"\"\",\n \"\"\"IMPRESSION: Evidence of early pulmonary vascular congestion and interstitial edema. Probable scarring at the medial aspect of the right lung base, with no\n definite consolidation.\"\"\"\n ,\n \"\"\"IMPRESSION:\n \n 1. 2.0 cm cyst of the right renal lower pole. Otherwise, normal appearance\n of the right kidney with patent vasculature and no sonographic evidence of\n renal artery stenosis.\n 2. Surgically absent left kidney.\"\"\",\n \"\"\"IMPRESSION: No pneumothorax.\"\"\",\n \"\"\"IMPRESSION: No definite pneumothorax\"\"\"\n \"\"\"IMPRESSION: New opacity at the left lower lobe consistent with pneumonia.\"\"\"\n]", "Read the itemData definitions\nWe're reading directly from GitHub. You could read from a local file using a file:// URL.", "modifiers = itemData.instantiateFromCSVtoitemData(\n \"https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/lexical_kb_05042016.tsv\")\ntargets = itemData.instantiateFromCSVtoitemData(\n \"https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/utah_crit.tsv\")\n", "Example function to analyze each sentence\nThis the function we'll use for each report. The following section of this document steps through each line.", "def markup_sentence(s, modifiers, targets, prune_inactive=True):\n \"\"\"\n \"\"\"\n markup = pyConText.ConTextMarkup()\n markup.setRawText(s)\n markup.cleanText()\n markup.markItems(modifiers, mode=\"modifier\")\n markup.markItems(targets, mode=\"target\")\n markup.pruneMarks()\n markup.dropMarks('Exclusion')\n # apply modifiers to any targets within the modifiers scope\n markup.applyModifiers()\n markup.pruneSelfModifyingRelationships()\n if prune_inactive:\n markup.dropInactiveModifiers()\n return markup", "We're going to start with our simplest of sentences", "reports[3]", "marking up a sentence\nWe start by creating an instance of the ConTextMarkup class. This is a subclass of a NetworkX DiGraph. Information will be stored in the nodes and edges.", "markup = pyConText.ConTextMarkup()\n\nisinstance(markup,nx.DiGraph)\n\n#### Set the text to be processed\n\nmarkup.setRawText(reports[3].lower())\nprint(markup)\nprint(len(markup.getRawText()))\n", "Clean the text\nPrior to processing we do some basic cleaning of the text, sucha s replacing multiple white spaces with a single space. You'll notice this in the spacing between the colon and \"no\" in the raw and clean versions of the text.", "markup.cleanText()\nprint(markup)\nprint(len(markup.getText()))", "Identify concepts in the sentence\nThe markItems method takes a list of itemData and uses the regular expressions to identify any instances of the itemData in the sentence. With the mode keyword we specify whether these itemData are targets or modifiers. This value will be stored as a data attribute of the node that is created in the graph for any identified concepts.", "markup.markItems(modifiers, mode=\"modifier\")\nprint(markup.nodes(data=True))\nprint(type(markup.nodes()[0]))", "What does our initial markup look like?\n\nWe've identified one concept in the sentence: no\nWe've created a tagObject for this concept which keeps track of the actual phrase identified by the regular expression, what the category of the itemData was (definite_negated_existence), this is a list because there can be multiple categories. There is also an absurdly long identifier for the node. Note that our mode modifier has been stored as a data element of the node. In NetworkX each node (or edge) has a dictionary for data.\n\nNow let's markup the targets", "markup.markItems(targets, mode=\"target\")\n\nprint(markup.nodes(data=True))", "What does our markup look like now?\nWe've added another node to the graph. This time the target pneumothorax.\nPrune Marks\nAfter identifying concepts, we prune concepts that are a subset of another identified concept. This results in no changes here, but the importance will be shown later with a different sentence.", "markup.pruneMarks()\nprint(markup.nodes())", "Are there any relationships in our markup?\nWe do not yet have any relationships (edges) between our concepts (target and modifier edges)", "print(markup.edges())", "Apply modifiers\nWe now call the applyModifiers method of the ConTextMarkup object to identify any relationships between the nodes.", "markup.applyModifiers()\nprint(markup.edges())", "We now have a relationship!\nWe now have a directed edge between our no node and our pneumothorax node. This will be interepreted as pneumothorax being a definitely negated concept in the sentence.\nWhat's next?\nThe value of pruning is shown in this notebook." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs
site/en/tutorials/structured_data/feature_columns.ipynb
apache-2.0
[ "Copyright 2019 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Classify structured data with feature columns\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/structured_data/feature_columns\">\n <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />\n View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/structured_data/feature_columns.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/structured_data/feature_columns.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/structured_data/feature_columns.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\n\nWarning: The tf.feature_columns module described in this tutorial is not recommended for new code. Keras preprocessing layers cover this functionality, for migration instructions see the Migrating feature columns guide. The tf.feature_columns module was designed for use with TF1 Estimators. It does fall under our compatibility guarantees, but will receive no fixes other than security vulnerabilities.\n\nThis tutorial demonstrates how to classify structured data (e.g. tabular data in a CSV). We will use Keras to define the model, and tf.feature_column as a bridge to map from columns in a CSV to features used to train the model. This tutorial contains complete code to:\n\nLoad a CSV file using Pandas.\nBuild an input pipeline to batch and shuffle the rows using tf.data.\nMap from columns in the CSV to features used to train the model using feature columns.\nBuild, train, and evaluate a model using Keras.\n\nThe Dataset\nWe will use a simplified version of the PetFinder dataset. There are several thousand rows in the CSV. Each row describes a pet, and each column describes an attribute. We will use this information to predict the speed at which the pet will be adopted.\nFollowing is a description of this dataset. Notice there are both numeric and categorical columns. There is a free text column which we will not use in this tutorial.\nColumn | Description| Feature Type | Data Type\n------------|--------------------|----------------------|-----------------\nType | Type of animal (Dog, Cat) | Categorical | string\nAge | Age of the pet | Numerical | integer\nBreed1 | Primary breed of the pet | Categorical | string\nColor1 | Color 1 of pet | Categorical | string\nColor2 | Color 2 of pet | Categorical | string\nMaturitySize | Size at maturity | Categorical | string\nFurLength | Fur length | Categorical | string\nVaccinated | Pet has been vaccinated | Categorical | string\nSterilized | Pet has been sterilized | Categorical | string\nHealth | Health Condition | Categorical | string\nFee | Adoption Fee | Numerical | integer\nDescription | Profile write-up for this pet | Text | string\nPhotoAmt | Total uploaded photos for this pet | Numerical | integer\nAdoptionSpeed | Speed of adoption | Classification | integer\nImport TensorFlow and other libraries", "!pip install sklearn\n\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\n\nfrom tensorflow import feature_column\nfrom tensorflow.keras import layers\nfrom sklearn.model_selection import train_test_split", "Use Pandas to create a dataframe\nPandas is a Python library with many helpful utilities for loading and working with structured data. We will use Pandas to download the dataset from a URL, and load it into a dataframe.", "import pathlib\n\ndataset_url = 'http://storage.googleapis.com/download.tensorflow.org/data/petfinder-mini.zip'\ncsv_file = 'datasets/petfinder-mini/petfinder-mini.csv'\n\ntf.keras.utils.get_file('petfinder_mini.zip', dataset_url,\n extract=True, cache_dir='.')\ndataframe = pd.read_csv(csv_file)\n\ndataframe.head()", "Create target variable\nThe task in the original dataset is to predict the speed at which a pet will be adopted (e.g., in the first week, the first month, the first three months, and so on). Let's simplify this for our tutorial. Here, we will transform this into a binary classification problem, and simply predict whether the pet was adopted, or not.\nAfter modifying the label column, 0 will indicate the pet was not adopted, and 1 will indicate it was.", "# In the original dataset \"4\" indicates the pet was not adopted.\ndataframe['target'] = np.where(dataframe['AdoptionSpeed']==4, 0, 1)\n\n# Drop un-used columns.\ndataframe = dataframe.drop(columns=['AdoptionSpeed', 'Description'])", "Split the dataframe into train, validation, and test\nThe dataset we downloaded was a single CSV file. We will split this into train, validation, and test sets.", "train, test = train_test_split(dataframe, test_size=0.2)\ntrain, val = train_test_split(train, test_size=0.2)\nprint(len(train), 'train examples')\nprint(len(val), 'validation examples')\nprint(len(test), 'test examples')", "Create an input pipeline using tf.data\nNext, we will wrap the dataframes with tf.data. This will enable us to use feature columns as a bridge to map from the columns in the Pandas dataframe to features used to train the model. If we were working with a very large CSV file (so large that it does not fit into memory), we would use tf.data to read it from disk directly. That is not covered in this tutorial.", "# A utility method to create a tf.data dataset from a Pandas Dataframe\ndef df_to_dataset(dataframe, shuffle=True, batch_size=32):\n dataframe = dataframe.copy()\n labels = dataframe.pop('target')\n ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))\n if shuffle:\n ds = ds.shuffle(buffer_size=len(dataframe))\n ds = ds.batch(batch_size)\n return ds\n\nbatch_size = 5 # A small batch sized is used for demonstration purposes\ntrain_ds = df_to_dataset(train, batch_size=batch_size)\nval_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)\ntest_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)", "Understand the input pipeline\nNow that we have created the input pipeline, let's call it to see the format of the data it returns. We have used a small batch size to keep the output readable.", "for feature_batch, label_batch in train_ds.take(1):\n print('Every feature:', list(feature_batch.keys()))\n print('A batch of ages:', feature_batch['Age'])\n print('A batch of targets:', label_batch )", "We can see that the dataset returns a dictionary of column names (from the dataframe) that map to column values from rows in the dataframe.\nDemonstrate several types of feature columns\nTensorFlow provides many types of feature columns. In this section, we will create several types of feature columns, and demonstrate how they transform a column from the dataframe.", "# We will use this batch to demonstrate several types of feature columns\nexample_batch = next(iter(train_ds))[0]\n\n# A utility method to create a feature column\n# and to transform a batch of data\ndef demo(feature_column):\n feature_layer = layers.DenseFeatures(feature_column)\n print(feature_layer(example_batch).numpy())", "Numeric columns\nThe output of a feature column becomes the input to the model (using the demo function defined above, we will be able to see exactly how each column from the dataframe is transformed). A numeric column is the simplest type of column. It is used to represent real valued features. When using this column, your model will receive the column value from the dataframe unchanged.", "photo_count = feature_column.numeric_column('PhotoAmt')\ndemo(photo_count)", "In the PetFinder dataset, most columns from the dataframe are categorical.\nBucketized columns\nOften, you don't want to feed a number directly into the model, but instead split its value into different categories based on numerical ranges. Consider raw data that represents a person's age. Instead of representing age as a numeric column, we could split the age into several buckets using a bucketized column. Notice the one-hot values below describe which age range each row matches.", "age = feature_column.numeric_column('Age')\nage_buckets = feature_column.bucketized_column(age, boundaries=[1, 3, 5])\ndemo(age_buckets)", "Categorical columns\nIn this dataset, Type is represented as a string (e.g. 'Dog', or 'Cat'). We cannot feed strings directly to a model. Instead, we must first map them to numeric values. The categorical vocabulary columns provide a way to represent strings as a one-hot vector (much like you have seen above with age buckets). The vocabulary can be passed as a list using categorical_column_with_vocabulary_list, or loaded from a file using categorical_column_with_vocabulary_file.", "animal_type = feature_column.categorical_column_with_vocabulary_list(\n 'Type', ['Cat', 'Dog'])\n\nanimal_type_one_hot = feature_column.indicator_column(animal_type)\ndemo(animal_type_one_hot)", "Embedding columns\nSuppose instead of having just a few possible strings, we have thousands (or more) values per category. For a number of reasons, as the number of categories grow large, it becomes infeasible to train a neural network using one-hot encodings. We can use an embedding column to overcome this limitation. Instead of representing the data as a one-hot vector of many dimensions, an embedding column represents that data as a lower-dimensional, dense vector in which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the example below) is a parameter that must be tuned.\nKey point: using an embedding column is best when a categorical column has many possible values. We are using one here for demonstration purposes, so you have a complete example you can modify for a different dataset in the future.", "# Notice the input to the embedding column is the categorical column\n# we previously created\nbreed1 = feature_column.categorical_column_with_vocabulary_list(\n 'Breed1', dataframe.Breed1.unique())\nbreed1_embedding = feature_column.embedding_column(breed1, dimension=8)\ndemo(breed1_embedding)", "Hashed feature columns\nAnother way to represent a categorical column with a large number of values is to use a categorical_column_with_hash_bucket. This feature column calculates a hash value of the input, then selects one of the hash_bucket_size buckets to encode a string. When using this column, you do not need to provide the vocabulary, and you can choose to make the number of hash_buckets significantly smaller than the number of actual categories to save space.\nKey point: An important downside of this technique is that there may be collisions in which different strings are mapped to the same bucket. In practice, this can work well for some datasets regardless.", "breed1_hashed = feature_column.categorical_column_with_hash_bucket(\n 'Breed1', hash_bucket_size=10)\ndemo(feature_column.indicator_column(breed1_hashed))", "Crossed feature columns\nCombining features into a single feature, better known as feature crosses, enables a model to learn separate weights for each combination of features. Here, we will create a new feature that is the cross of Age and Type. Note that crossed_column does not build the full table of all possible combinations (which could be very large). Instead, it is backed by a hashed_column, so you can choose how large the table is.", "crossed_feature = feature_column.crossed_column([age_buckets, animal_type], hash_bucket_size=10)\ndemo(feature_column.indicator_column(crossed_feature))", "Choose which columns to use\nWe have seen how to use several types of feature columns. Now we will use them to train a model. The goal of this tutorial is to show you the complete code (e.g. mechanics) needed to work with feature columns. We have selected a few columns to train our model below arbitrarily.\nKey point: If your aim is to build an accurate model, try a larger dataset of your own, and think carefully about which features are the most meaningful to include, and how they should be represented.", "feature_columns = []\n\n# numeric cols\nfor header in ['PhotoAmt', 'Fee', 'Age']:\n feature_columns.append(feature_column.numeric_column(header))\n\n# bucketized cols\nage = feature_column.numeric_column('Age')\nage_buckets = feature_column.bucketized_column(age, boundaries=[1, 2, 3, 4, 5])\nfeature_columns.append(age_buckets)\n\n# indicator_columns\nindicator_column_names = ['Type', 'Color1', 'Color2', 'Gender', 'MaturitySize',\n 'FurLength', 'Vaccinated', 'Sterilized', 'Health']\nfor col_name in indicator_column_names:\n categorical_column = feature_column.categorical_column_with_vocabulary_list(\n col_name, dataframe[col_name].unique())\n indicator_column = feature_column.indicator_column(categorical_column)\n feature_columns.append(indicator_column)\n\n# embedding columns\nbreed1 = feature_column.categorical_column_with_vocabulary_list(\n 'Breed1', dataframe.Breed1.unique())\nbreed1_embedding = feature_column.embedding_column(breed1, dimension=8)\nfeature_columns.append(breed1_embedding)\n\n# crossed columns\nage_type_feature = feature_column.crossed_column([age_buckets, animal_type], hash_bucket_size=100)\nfeature_columns.append(feature_column.indicator_column(age_type_feature))", "Create a feature layer\nNow that we have defined our feature columns, we will use a DenseFeatures layer to input them to our Keras model.", "feature_layer = tf.keras.layers.DenseFeatures(feature_columns)", "Earlier, we used a small batch size to demonstrate how feature columns worked. We create a new input pipeline with a larger batch size.", "batch_size = 32\ntrain_ds = df_to_dataset(train, batch_size=batch_size)\nval_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)\ntest_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)", "Create, compile, and train the model", "model = tf.keras.Sequential([\n feature_layer,\n layers.Dense(128, activation='relu'),\n layers.Dense(128, activation='relu'),\n layers.Dropout(.1),\n layers.Dense(1)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(train_ds,\n validation_data=val_ds,\n epochs=10)\n\nloss, accuracy = model.evaluate(test_ds)\nprint(\"Accuracy\", accuracy)", "Key point: You will typically see best results with deep learning with much larger and more complex datasets. When working with a small dataset like this one, we recommend using a decision tree or random forest as a strong baseline. The goal of this tutorial is not to train an accurate model, but to demonstrate the mechanics of working with structured data, so you have code to use as a starting point when working with your own datasets in the future.\nNext steps\nThe best way to learn more about classifying structured data is to try it yourself. We suggest finding another dataset to work with, and training a model to classify it using code similar to the above. To improve accuracy, think carefully about which features to include in your model, and how they should be represented." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
edwardd1/phys202-2015-work
assignments/assignment11/OptimizationEx01.ipynb
mit
[ "Optimization Exercise 1\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as opt\nfrom scipy.optimize import minimize, rosen, rosen_der", "Hat potential\nThe following potential is often used in Physics and other fields to describe symmetry breaking and is often known as the \"hat potential\":\n$$ V(x) = -a x^2 + b x^4 $$\nWrite a function hat(x,a,b) that returns the value of this function:", "def hat(x,a,b):\n return -a*(x**2) + b*(x**4)\n\nassert hat(0.0, 1.0, 1.0)==0.0\nassert hat(0.0, 1.0, 1.0)==0.0\nassert hat(1.0, 10.0, 1.0)==-9.0", "Plot this function over the range $x\\in\\left[-3,3\\right]$ with $b=1.0$ and $a=5.0$:", "a = 5.0\nb = 1.0\n\nx = np.arange(-3, 3, 0.1)\ny = hat(x,a,b)\nplt.plot(x,y)\n\nassert True # leave this to grade the plot", "Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$.\n\nUse scipy.optimize.minimize to find the minima. You will have to think carefully about how to get this function to find both minima.\nPrint the x values of the minima.\nPlot the function as a blue line.\nOn the same axes, show the minima as red circles.\nCustomize your visualization to make it beatiful and effective.", "n = minimize(hat, x, (a,b), method='BFGS')\nn.x\n\nassert True # leave this for grading the plot", "To check your numerical results, find the locations of the minima analytically. Show and describe the steps in your derivation using LaTeX equations. Evaluate the location of the minima using the above parameters.\nYOUR ANSWER HERE" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
deculler/MachineLearningTables
Chapter3-1.ipynb
bsd-2-clause
[ "Concepts and data from \"An Introduction to Statistical Learning, with applications in R\" (Springer, 2013) with permission from the authors: G. James, D. Witten, T. Hastie and R. Tibshirani \" available at www.StatLearning.com.\nFor Tables reference see http://data8.org/datascience/tables.html\nhttp://jeffskinnerbox.me/notebooks/matplotlib-2d-and-3d-plotting-in-ipython.html", "# HIDDEN\n# For Tables reference see http://data8.org/datascience/tables.html\n# This useful nonsense should just go at the top of your notebook.\nfrom datascience import *\n%matplotlib inline\nimport matplotlib.pyplot as plots\nimport numpy as np\nfrom sklearn import linear_model\nplots.style.use('fivethirtyeight')\nplots.rc('lines', linewidth=1, color='r')\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\n# datascience version number of last run of this notebook\nversion.__version__\n\n\nimport sys\nsys.path.append(\"..\")\nfrom ml_table import ML_Table\n\nimport locale\nlocale.setlocale( locale.LC_ALL, 'en_US.UTF-8' ) ", "Acquiring and seeing trends in multidimensional data", "# Getting the data\nadvertising = ML_Table.read_table(\"./data/Advertising.csv\")\nadvertising.relabel(0, \"id\")", "FIGURE 3.1. For the Advertising data, the least squares fit for the regression of sales onto TV is shown. The fit is found by minimizing the sum of squared errors. \nEach line segment represents an error, and the fit makes a compromise by averaging their squares. In this case a linear fit captures the essence of the relationship, although it is somewhat deficient in the left of the plot.", "ax = advertising.plot_fit_1d('Sales', 'TV', advertising.regression_1d('Sales', 'TV'))\n_ = ax.set_xlim(-20,300)\n\nlr = advertising.linear_regression('Sales', 'TV')\n\nadvertising.plot_fit_1d('Sales', 'TV', lr.model)\n\nadvertising.lm_summary_1d('Sales', 'TV')", "Let $\\hat{y}_i = \\hat{\\beta_0} + \\hat{\\beta_1}x_i$ be the prediction for $Y$ based on the $i$-th value of $X$.", "# Get the actual parameters that are captured within the model\nadvertising.regression_1d_params('Sales', 'TV')\n\n# Regression yields a model. The computational representation of a model is a function\n# That can be applied to an input, 'TV' to get an estimate of an output, 'Sales'\nadvertise_model_tv = advertising.regression_1d('Sales', 'TV')\n\n# Sales with no TV advertising\nadvertise_model_tv(0)\n\n# Sales with 100 units TV advertising\nadvertise_model_tv(100)\n\n# Here's the output of the model applied to the input data\nadvertise_model_tv(advertising['TV'])", "The residual is the difference between the model output and the observed output", "residual = advertising['Sales'] - advertise_model_tv(advertising['TV'])\nresidual", "The residual is not very useful directly because balances over-estimates and under-estimates to produce the best overall estimate with the least error. We can understand the overall goodness of fit by the residual sum of squares - RSS.", "# Residual Sum of Squares\nRSS = sum(residual*residual)\nRSS\n\n# This is common enough that we have it provided as a method\nadvertising.RSS_model('Sales', advertising.regression_1d('Sales', 'TV'), 'TV')\n\n# And we should move toward a general regression framework\nadvertising.RSS_model('Sales', advertising.linear_regression('Sales', 'TV').model, 'TV')", "With this, we can build an independent model for each of the inputs and look at the associates RSS.", "advertising_models = Table().with_column('Input', ['TV', 'Radio', 'Newspaper'])\nadvertising_models['Model'] = advertising_models.apply(lambda i: advertising.regression_1d('Sales', i), 'Input')\nadvertising_models['RSS'] = advertising_models.apply(lambda i, m: advertising.RSS_model('Sales', m, i), ['Input', 'Model'])\nadvertising_models\n\nadvertising_models = Table().with_column('Input', ['TV', 'Radio', 'Newspaper'])\nadvertising_models['Model'] = advertising_models.apply(lambda i: advertising.linear_regression('Sales', i).model, 'Input')\nadvertising_models['RSS'] = advertising_models.apply(lambda i, m: advertising.RSS_model('Sales', m, i), ['Input', 'Model'])\nadvertising_models", "We can look at how well each of these inputs predict the output by visualizing the residuals.\nThe magnitude of the RSS gives a sense of the error.", "for mode, mdl in zip(advertising_models['Input'], advertising_models['Model']) :\n advertising.plot_fit_1d('Sales', mode, mdl)\n\n# RSS at arbitrary point\nres = lambda b0, b1: advertising.RSS('Sales', b0 + b1*advertising['TV'])\n\nres(7.0325935491276965, 0.047536640433019729)", "Figure 3.2 - RSS and least squares regression\nRegression using least squares finds parameters $b0$ and $b1$ tohat minimize the RSS. The least squares regression line estimates the population regression line.\nFigure 3.2 shows (what is claimed to be) the RSS contour and surface around the regression point. The computed analog of Figure 3.2 is shown below, with the role of $b0$ and $b1$ reversed to match the tuple returned from the regression method. The plot is the text is incorrect is some important ways. The RSS is not radially symmetric around ($b0$, $b1$). Lines with a larger intercept and smaller slope or vice versa are very close to the minima, i.e., the surface is nearly flat along the upper-left to lower-right diagonal, especially where fit is not very good, since the output depends on more than this one input.\nJust because a process minimizes the error does not mean that the minima is sharply defined or that the error surface has no structure. Below we go a bit beyond the text to illustrate this more fully.", "ax = advertising.RSS_contour('Sales', 'TV', sensitivity=0.2)\n\nax = advertising.RSS_wireframe('Sales', 'TV', sensitivity=0.2)\n\n# The minima point\nadvertising.linear_regression('Sales', 'TV').params\n\n# Some other points along the trough\npoints = [(0.042, 8.0), (0.044, 7.6), (0.050, 6.6), (0.054, 6.0)]\nax = advertising.RSS_contour('Sales', 'TV', sensitivity=0.2)\nax.plot([b0 for b1, b0 in points], [b1 for b1, b0 in points], 'ro')\n[advertising.RSS('Sales', b0 + b1*advertising['TV']) for b1,b0 in points]\n\n# Models as lines corresponding to points along the near-minimal vectors.\nax = advertising.plot_fit_1d('Sales', 'TV', advertising.linear_regression('Sales', 'TV').model)\nfor b1, b0 in points:\n fit = lambda x: b0 + b1*x\n ax.plot([0, 300], [fit(0), fit(300)])\n_ = ax.set_xlim(-20,300)", "3.1.2 Assessing the accuracy of coefficient estimates\nThe particular minima that is found through least squares regression is effected by the particular sample of the population that is observed and utilized for estimating the coefficients of the underlying population model. \nTo see this we can generate a synthetic population based on an ideal model plus noise. Here we can peek at the entire population (which in most settings cannot be observed). We then take samples of this population and fit a regression to those. We can then see how these regression lines differ from the population regression line, which is not exactly the ideal model.", "def model (x):\n return 3*x + 2\n\ndef population(n, noise_scale = 1):\n sample = ML_Table.runiform('x', n, -2, 2)\n noise = ML_Table.rnorm('e', n, sd=noise_scale)\n sample['Y'] = sample.apply(model, 'x') + noise['e']\n return sample\n\ndata = population(100, 2)\n\ndata.scatter('x')\n\nax = data.plot_fit('Y', data.linear_regression('Y').model)\n\ndata.linear_regression('Y').params\n\n# A random sample of the population\nsample = data.sample(10)\nsample.plot_fit('Y', sample.linear_regression('Y').model)\n\nnsamples = 5\nax = data.plot_fit('Y', data.linear_regression('Y').model, linewidth=3)\nfor s in range(nsamples):\n fit = data.sample(10).linear_regression('Y').model\n ax.plot([-2, 2], [fit(-2), fit(2)], linewidth=1)", "\"The property of unbiasedness holds for the least squares coefficient estimates given by (3.4) as well: if we estimate β0 and β1 on the basis of a particular data set, then our estimates won’t be exactly equal to β0 and β1. But if we could average the estimates obtained over a huge number of data sets, then the average of these estimates would be spot on!\"\nTo compute the standard errors associated with $β_0$ and $β_1$, we use the following formulas.\nThe slope, $b_1$:\n$SE(\\hat{β_1})^2 = \\frac{σ^2}{\\sum_{i=1}^n (x_i - \\bar{x})^2}$\nThe intercept, $b_0$:\n$SE(\\hat{β_0})^2 = σ^2 [\\frac{1}{n} + \\frac{\\bar{x}^2}{\\sum_{i=1}^n (x_i - \\bar{x})^2} ] $,\nwhere $σ^2 = Var(ε)$. \nIn general, $σ^2$ is not known, but can be estimated from the data.\nThe estimate of σ is known as the residual standard error, and is given by the formula\n$RSE = \\sqrt{RSS/(n − 2)}$.", "adv_sigma = advertising.RSE_model('Sales', advertising.linear_regression('Sales', 'TV').model, 'TV')\n\nadv_sigma\n\nb0, b1 = advertising.linear_regression('Sales', 'TV').params\nb0, b1\n\nadvertising.RSS_model('Sales', advertising.linear_regression('Sales', 'TV').model, 'TV')\n\nSE_b0, SE_b1 = advertising.SE_1d_params('Sales', 'TV')\nSE_b0, SE_b1\n\n# b0 95% confidence interval\n(b0-2*SE_b0, b0+2*SE_b0)\n\n# b1 95% confidence interval\n(b1-2*SE_b1, b1+2*SE_b1)\n\n# t-statistic of the slope\nb0/SE_b0\n\n# t-statistics of the intercept\nb1/SE_b1\n\n# Similar to summary of a linear model in R\nadvertising.lm_summary_1d('Sales', 'TV')\n\n# We can just barely reject the null hypothesis for Newspaper\n# advertising effecting sales\nadvertising.lm_summary_1d('Sales', 'Newspaper')", "3.1.3 Assessing the Acurracy of the Model\nOnce we have rejected the null hypothesis in favor of the alternative hypothesis, it is natural to want to quantify the extent to which the model fits the data. The quality of a linear regression fit is typically assessed using two related quantities: the residual standard error (RSE) and the $R^2$ statistic.\nThe RSE provides an absolute measure of lack of fit of the model to the data.", "adver_model = advertising.regression_1d('Sales', 'TV')\nadvertising.RSE_model('Sales', adver_model, 'TV')", "The $R^2$ statistic provides an alternative measure of fit. It takes the form of a proportion—the proportion of variance explained—and so it always takes on a value between 0 and 1, and is independent of the scale of Y.\nTo calculate $R^2$, we use the formula\n$R^2 = \\frac{TSS−RSS}{TSS} = 1 − \\frac{RSS}{TSS}$\nwhere $TSS = \\sum (y_i - \\bar{y})^2$ is the total sum of squares.", "advertising.R2_model('Sales', adver_model, 'TV')\n\n# the other models of advertising suggest that there is more going on\nadvertising.R2_model('Sales', adver_model, 'Radio')\n\nadvertising.R2_model('Sales', adver_model, 'Newspaper')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mamrehn/machine-learning-tutorials
ipynb/[scikit-learn] first steps.ipynb
cc0-1.0
[ "This is a quick tutorial to get started with scikit-learn.\nParts of the code presented are based on this machineLearning\ntutorial.\nFirst, let's take a look at the versions of the libraries involved.", "import numpy; print('numpy:\\t', numpy.__version__, sep='\\t')\nimport scipy; print('scipy:\\t', scipy.__version__, sep='\\t')\nimport matplotlib; print('matplotlib:', matplotlib.__version__, sep='\\t')\nimport sklearn; print('scikit-learn:', sklearn.__version__, sep='\\t')", "Then load some data.", "from sklearn import datasets\n\n#datasets.load_ -> [press tab for completion]\niris = datasets.load_iris()\niris.keys()\n\nfor k in iris.keys():\n print('\\n== ', k, '==\\n', str(iris[k])[0:390])\n\nfor k in iris.keys():\n print(k, ':', type(iris[k]))\n\n[(k, iris[k].shape) for k in iris.keys() if type(iris[k]) == numpy.ndarray]\n\n# note: this also imports numpy as np, imports matplotlib.pyplot as plt, and others\n%pylab inline", "Benchmark classificator by ml-benchmarks:", "def dtime_to_seconds(dtime):\n return dtime.seconds + (dtime.microseconds * 1e-6)\n\ndef bench(func, data, n=10):\n assert n > 2\n score = np.inf\n try:\n time = []\n for i in range(n):\n score, t = func(*data)\n time.append(dtime_to_seconds(t))\n # remove extremal values\n time.pop(np.argmax(time))\n time.pop(np.argmin(time))\n except Exception as detail:\n print('%s error in function %s: ', (repr(detail), func))\n time = []\n return score, np.array(time)\n\ndef bench_skl(X, y, T, valid):\n from sklearn import linear_model, ensemble\n start = datetime.now()\n # http://scikit-learn.org/stable/modules/classes.html\n clf = ensemble.RandomForestClassifier(n_estimators=1000, n_jobs=5, verbose=0)\n #clf = linear_model.ElasticNet(alpha=0.5, l1_ratio=0.5)\n #clf = linear_model.LogisticRegression()\n #clf = neighbors.NeighborsClassifier(n_neighbors=n_neighbors, algorithm='brute_inplace')\n #clf = skl_cluster.KMeans(k=n_components, n_init=1)\n #...\n clf.fit(X, y)\n\n ## Regression\n # pred = clf.predict(T)\n # delta = datetime.now() - start\n # mse = np.linalg.norm(pred - valid, 2) ** 2\n # return mse, delta\n\n # Classification\n score = np.mean(clf.predict(T) == valid)\n return score, datetime.now() - start\n\nfrom sklearn import datasets\nimport numpy as np\nfrom datetime import datetime\n\niris = datasets.load_iris()\n\nsample_range = np.random.random_sample(size=iris.target.shape[0])\nTH = 0.7\n\nX = np.array([(iris.data[i,]) for i in range(len(iris.target)) if sample_range[i] >= TH])\nY = np.array([(iris.target[i,]) for i in range(len(iris.target)) if sample_range[i] >= TH])\nT = np.array([(iris.data[i,]) for i in range(len(iris.target)) if sample_range[i] < TH])\nvalid = np.array([(iris.target[i,]) for i in range(len(iris.target)) if sample_range[i] < TH])\n\nnum_tries = 25\nscore, times = bench(bench_skl, (X,Y,T,valid), num_tries)\nprint('Tries:', num_tries, 'Score:', score, 'Time:', np.mean(times), '(mean)', np.median(times), '(median)')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
ralph-group/pymeasure
examples/Notebook Experiments/script2.ipynb
mit
[ "More features for Experiment class: custom config, Measurable parameter, analysis function\nThis example uses the Experiment class to create a measurement from a procedure object, with the Measurable parameter to automatically generate sorted DATA_COLUMNS and MEASURE lists (which is then passed to the get_datapoint function of the Procedure class).\nThe file my_config.ini is passed to set custom data saving, logging and matplotlib options.\nThe analysis function is passed as an optional attribute, to produce on-the-fly data analysis for live plotting (only the raw data is saved on disk). To have analysed data save on disk, create an empty Measurable and update it in the measure loop as also shown in the example below.", "%%writefile my_config.ini\n[Filename]\nprefix = my_data_\ndated_folder = 1\ndirectory = data\next = csv\nindex = \ndatetimeformat = %Y%m%d_%H%M%S\n\n[Logging]\nconsole = 1\nconsole_level = WARNING\nfilename = test.log\nfile_level = DEBUG\n\n[matplotlib.rcParams]\naxes.axisbelow = True\naxes.prop_cycle = cycler('color', ['b', 'g', 'r', 'c', 'm', 'y', 'k'])\naxes.edgecolor = 'white'\naxes.facecolor = '#EAEAF2'\naxes.grid = True\naxes.labelcolor = '.15'\naxes.labelsize = 11.0\naxes.linewidth = 0.0\naxes.titlesize = 12.0\nfigure.facecolor = 'white'\nfigure.figsize = [8.0, 5.5]\nfont.sans-serif = ['Arial', 'Liberation Sans', 'Bitstream Vera Sans', 'sans-serif']\ngrid.color = 'white'\ngrid.linestyle = '-'\ngrid.linewidth = 1.0\nimage.cmap = 'Greys'\nlegend.fontsize = 10.0\nlegend.frameon = False\nlegend.numpoints = 1\nlegend.scatterpoints = 1\nlines.linewidth = 1.75\nlines.markeredgewidth = 0.0\nlines.markersize = 7.0\nlines.solid_capstyle = 'round'\npatch.facecolor = (0.2980392156862745, 0.4470588235294118, 0.6901960784313725)\npatch.linewidth = 0.3\ntext.color = '.15'\nxtick.color = '.15'\nxtick.direction = 'out'\nxtick.labelsize = 10.0\nxtick.major.pad = 7.0\nxtick.major.size = 0.0\nxtick.major.width = 1.0\nxtick.minor.size = 0.0\nytick.color = '.15'\nytick.direction = 'out'\nytick.labelsize = 10.0\nytick.major.pad = 7.0\nytick.major.size = 0.0\nytick.major.width = 1.0\nytick.minor.size = 0.0\n\n%%writefile procedures.py\nimport random\nfrom time import sleep\n\nimport logging\nlog = logging.getLogger('')\nlog.addHandler(logging.NullHandler())\n\nfrom pymeasure.experiment import Procedure, IntegerParameter, Parameter, FloatParameter, Measurable\n\nclass TestProcedure(Procedure):\n \n iterations = IntegerParameter('Loop Iterations', default=100)\n delay = FloatParameter('Delay Time', units='s', default=0.2)\n seed = Parameter('Random Seed', default='12345')\n iteration = Measurable('Iteration', default = 0)\n random_number = Measurable('Random Number', random.random)\n offset = Measurable('Random Number + 1', default = 0)\n\n def startup(self):\n log.info(\"Setting up random number generator\")\n random.seed(self.seed)\n \n def measure(self):\n data = self.get_datapoint()\n data['Random Number + 1'] = data['Random Number'] + 1\n log.debug(\"Produced numbers: %s\" % data)\n self.emit('results', data)\n self.emit('progress', 100.*self.iteration.value/self.iterations)\n\n def execute(self):\n log.info(\"Starting to generate numbers\")\n for self.iteration.value in range(self.iterations):\n self.measure()\n sleep(self.delay)\n if self.should_stop():\n log.warning(\"Catch stop command in procedure\")\n break\n\n def shutdown(self):\n log.info(\"Finished\")\n\n%%writefile analysis.py\ndef add_offset(data, offset):\n return data['Random Number'] + offset\n\ndef analyse(data):\n data['Random Number + 2'] = add_offset(data, 2)\n return data\n\nfrom pymeasure.experiment import Experiment, config\nfrom procedures import TestProcedure\nfrom analysis import analyse\nconfig.set_file('my_config.ini')\n%matplotlib inline\n\nprocedure = TestProcedure(iterations=10, delay=.1)\nexperiment = Experiment('test', procedure, analyse)\n\nexperiment.start()\nimport pylab as pl\npl.figure(figsize=(10,4))\nax1 = pl.subplot(121)\nexperiment.plot('Iteration', 'Random Number', ax=ax1)\nax2 = pl.subplot(122)\nexperiment.plot('Iteration', 'Random Number + 1', ax=ax2)\nexperiment.plot_live()", "Analysed data", "experiment.data", "Raw data (as saved on disk)", "experiment.results.data", "Filename generated by config preferences", "experiment.filename" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
csc-training/python-introduction
notebooks/examples/7 - NumPy.ipynb
mit
[ "NumPy\nNumPy is the fundamental package for scientific computing with Python. The main additions to the standard Python are\n\nNew datatype, NumPy array\nstatic, multidimensional\nFast processing of arrays\nTools for linear algebra, random numbers, ...\n\nNumpy array\nThe NumPy array is static, which means that\n\nAll elements have the same type, i.e. contrary to Python lists one cannot have both e.g. integer numbers and strings as elements\nThe size of the array is fixed at the time of creation, so elements cannot be added or removed\n\nArray can have arbitrary number of dimensions, and even though the size of the array is fixed, the shape can be changed, i.e 2x2 matrix can be changed into 4 element vector. It is possible to combine, split, and resize arrays, but a new array is then always created.\nThe picture below illustrates the differences between NumPy arrays and Python list. As the NumPy array is (normally) contiguous in memory, the processing is much faster, and the dynamic nature of list adds also lots of overhead.\n\nFirst thing when starting to work with NumPy, is to import the package. The package is commonly imported as np", "import numpy as np", "Creating NumPy arrays\nFrom a list (or tuple):", "a = np.array((1, 2, 3, 4))\nprint(a)\nprint(a.dtype)\nprint(a.size)\n\na = np.array((1,2,3,4), dtype=float) # Type can be explicitly specified\nprint(a)\nprint(a.dtype)\nprint(a.size)", "Multidimensional lists (or tuples) produce multidimensional arrays", "my_list = [[1,2,3], [4,5,6]]\na = np.array(my_list)\nprint(a)\nprint(a.size)\nprint(a.shape)", "Evenly spaced values", "a = np.arange(6) # half open interval up to 6\nprint(a)\na = np.arange(0.1, 1, 0.2) # half open interval with start, stop, step\nprint(a)\nb = np.linspace(-4.5, 4.5, 5) # specified number of samples within closed interval\nprint(b)", "Specific sized arrays", "mat = np.empty((2, 2, 2), float) # uninitialized 2x2x2 array\nmat = np.zeros((3,3), int) # initialized to zeros\nmat = np.ones((2,3), complex) #initialized to ones", "Indexing and slicing arrays\nIndexing is similar to lists, with different dimensions separated by commas", "a = np.arange(6)\nprint(a[2])\nprint(a[-2])\nmat = np.array([[1, 2, 3], [4, 5, 6]])\nprint(mat)\nprint(mat[0,2])\nprint(mat[-1,-2])", "Contrary to lists, slicing is possible over all dimensions", "mat = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])\nprint(mat[1, 1:3])\nmat = np.zeros((4,4))\nmat[1:-1,1:-1] = 2\nprint(mat)", "Views and copies of arrays\nAs with all mutable Python objects, simple assignment creates a reference to array. If a and b are references to same array, changing contents of b changes also contents of a. NumPy arrays have a copy method for actual copying of array", "a = np.arange(6)\nprint(a)\nb = a # b is a reference, changing values in b changes also a\nb[2] = -3\nprint(a)\nb = a.copy() # b is copy, changing b does not affect a\nb[0] = 66\nprint(b)\nprint(a)", "Slicing creates a view to the array, and modifying the view changes corresponding original contents", "c = a[1:4]\nprint(c)\nc[-1] = 47\nprint(a)", "Array operations\nMost arithmetic operations for NumPy arrays are done elementwise. Note for Matlab users! Multiplication is done elementwise.", "a = np.array([1.0, 2.0, 3.0])\nb = 2.0\nprint(b * a)\nprint(b + a)\nprint(a + a)\nprint(a * a)", "NumPy has special functions which can work with the array arguments (sin, cos, exp, sqrt, log, ...)", "x = np.linspace(-np.pi, np.pi, 5)\ny = np.sin(x)", "Vectorized operations\nFor loops and indexing in Python are slow. If the corresponding operation can be written in terms of full (or partial) arrays, operation can be speeded up significantly.\nExample: calculating the difference between successive array elements", "N = 1000\na = np.arange(N)\ndif = np.zeros(N-1, a.dtype)\n\n%%timeit #timeit magic allows easy timing for the execution of an cell\n# brute force with for loop\nfor i in range(1, N):\n dif[i-1] = a[i] - a[i-1]\n\n%%timeit\n# vectorized operation\ndif = a[1:] - a[:-1]", "Linear algebra\nNumPy contains linear algebra operations for matrix and vector products, eigenproblems and linear systems. Typically, NumPy is built against optimized BLAS libraries which means that these operations are very efficient (much faster than naive implementation e.g. with C or Fortran)", "A = np.array(((2, 1), (1, 3)))\nB = np.array(((-2, 4.2), (4.2, 6)))\nC = np.dot(A, B) # matrix-matrix product\nw, v = np.linalg.eig(A) # eigenvalues in w, eigenvectors in v\nb = np.array((1, 2))\nx = np.linalg.solve(C, b) # Solve Cx = b\nprint(np.dot(C, x)) # np.dot calculates also matrix-vector and vector-vector products", "Simple plotting with matplotlib\nmatplotlib is powerful 2D plotting library for Python. matplotlib can produce publication quality figures in various hardcopy formats. matplotlib can be used in scripts and in interactive shells, as well as in notebooks. matplotlib tries to make easy things easy and hard things possible. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc, with just a few lines of code. Good way to learn about matplotlib's possibilities is to check the screenshots and gallery which provide also the code to produce the figures.\nFor simple plotting, one commonly imports matplotlib.pyplot package", "import matplotlib.pyplot as plt", "For showing fictures in the notebook, one can invoke the following magic", "%matplotlib inline", "Simple line plots of NumPy arrays", "x = np.linspace(-np.pi, np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)\nplt.title('A simple plot')\nplt.xlabel('time (s)')", "Multiple subplots", "x = np.linspace(-np.pi, np.pi, 100)\ny1 = np.sin(x)\ny2 = np.cos(x)\nplt.subplot(211) #create 2x1 plot, use 1st\nplt.plot(x, y1, linewidth=2)\nplt.ylabel('sin')\nplt.subplot(212) #use 2nd\nplt.plot(x, y2, '--or') # use dashed line, 'o' markers and red color\nplt.ylabel('cos', fontsize=16)\nplt.xlabel(r'$\\theta$') # when using Latex, string has to be so called raw string (r'my string')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jdvelasq/ingenieria-economica
2016-03/IE-01-calculos-basicos.ipynb
mit
[ "Modelos de Valor del Dinero en el Tiempo\nNotas de clase sobre ingeniería economica avanzada usando Python\nJuan David Velásquez Henao\njdvelasq@unal.edu.co \nUniversidad Nacional de Colombia, Sede Medellín\nFacultad de Minas\nMedellín, Colombia \nSoftware utilizado\n\nEste es un documento interactivo escrito como un notebook de Jupyter , en el cual se presenta un tutorial sobre finanzas corporativas usando Python. Los notebooks de Jupyter permiten incoporar simultáneamente código, texto, gráficos y ecuaciones. El código presentado en este notebook puede ejecutarse en los sistemas operativos Linux y OS X. \nHaga click aquí para obtener instrucciones detalladas sobre como instalar Jupyter en Windows y Mac OS X.\nDescargue la última versión de este documento a su disco duro; luego, carguelo y ejecutelo en línea en Try Jupyter!\n\nContenido\n\n\n\n\n\nBibliografía \n\n\n[1] SAS/ETS 14.1 User's Guide, 2015. \n[2] hp 12c platinum financial calculator. User's guide. \n[3] HP Business Consultant II Owner's manual.\n[4] C.S. Park and G.P. Sharp-Bette. Advanced Engineering Economics. John Wiley & Sons, Inc., 1990.\n\n\nDiagrama de flujo de dinero\nPara la realización de cálculos financieros, el flujo de dinero en el tiempo es representado mediante el diagrama de flujo de dinero. En este, las flechas hacia arriba representan dinero recibido, mientras que las flechas hacia abajo representan dinero pagado.\n<img src=\"images/cash-flow.png\" width=600>\nValor del dinero en el tiempo\nContenido\nUn peso hoy vale más que un peso mañana, debido a que un peso hoy se puede invertir (hoy) para obtener intereses.\n<img src=\"images/cashflow-1-period.png\" width=600>\nDe forma general:\n\n$PV$ -- Valor actual\n$FV$ -- valor futuro (FV) \n$r$ -- tasa de interés \n\n$$FV = PV \\times (1 + r)$$ \n$$PV= \\left( \\displaystyle\\frac{1}{1+r}\\right) \\times FV $$\nComponentes de la tasa de interés:\n$$1+r = \\left( \\displaystyle\\ 1 + r_\\alpha\\right) \\left( \\displaystyle\\ 1 + f\\right) \\left( \\displaystyle\\ 1 + r_\\pi\\right)$$\n\n\n$r_{α}$ -- Interés real\n\n\n$f$ -- Inflación\n\n\n$r_{π}$ -- Componente de riesgo\n\n\nInterés simple\nEn el interés simple se reciben directamente los intereses del capital sin reinvertirlos. Es típico en algunos tipos de préstamos.\nEjemplo.-- [2, pag. 43] Un amigo necesita un préstamo de 450 por 60 días. Usted le presta a un interés simple del 7%, calculado sobre una base anual de 360 días. Cuál es la cantidad total que usted debe recibir al final de periodo?\n<img src=\"images/simple-interest.png\" width=350>", "## cálculo manual\n-450 * (1 + 0.07 * 60 / 360)", "Ejemplo.-- [2, pag. 43] Realice el mismo ejemplo anterior sobre una base de 365 días.", "## cálculo manual\n-450 * (1 + 0.07 * 60 / 365)", "Ejemplo.-- ¿Qué cantidad de dinero se poseerá después de prestar \\$ 2300 al 25% de interés simple anual durante 3 años?", "## cálculo manual\n2300 * (1 + 0.25 * 3)", "Ejercicio.-- ¿Qué cantidad de dinero se poseerá después de prestar \\$ 10000 al 30% de interés simple anual durante 2 años? (R/ \\$ 16000)\n\nInterés compuesto\nContenido\nEn el interés simple se reciben directamente los pagos del interés. En el interés compuesto, los intereses se suman al capital $(P)$, tal que se reciben intereses sobre los intereses de periodos pasados.\n\nDiferencia:\n$$P[(1+i)^N-1]-iNP = P[(1+i)^N-(1+iN)]$$\nConcepto de equivalencia financiera\nContenido\nDos flujos de fondos son equivalentes a la tasa de interés $r$ si el uno puede ser convertido en el otro usando las tranformaciones apropiadas de interés compuesto.\n<img src=\"images/equivalencia.png\" width=700>\nModelos de valor del dinero en el tiempo\nContenido\nEquivalencia entre un pago actual y un pago futuro.\nContenido\n<img src=\"images/equiv-pago-actual-futuro-disc.png\" width=400>\n$$F = - P * (1+r)^n$$\n$$P = - F * (1 + r)^{-n} = -\\frac{F}{(1+r)^n}$$\nPara resolver este tipo de problemas se requieren tres de las cuatro variables de la formula.\n\nEjercicio.-- Exprese $r$ en función de $P$, $F$ y $n$.\nEjercicio.-- Exprese $n$ en función de $P$, $F$ y $r$.\n\nEjemplo.-- ¿Cuánto dinero se deberá invertir hoy si se quiere obtener al final de 5 años \\$ 7800 a un interés trimestral del 2%?\n<img src=\"images/sesion-1-ejemplo-1.png\" width=400>", "-7800 / ((1 + 0.02) ** 20)", "Ejemplo.-- Cual es el valor futuro de \\$ 730 dentro de un año con un interés mensual del 2.3%", "730 * (1 + 0.023)**12", "Equivalencia entre un pago actual y una serie de pagos iguales perpetuos.\nContenido\n<img src=\"images/equiv-pmt-inf.png\" width=300>\nSe calcula $P$ como el valor presente de los pagos iguales $A$:\n$$ P=\\frac{A}{(1+r)} + \\frac{A}{(1+r)^2} + ... $$\nSumando $A$ a ambos lados de la ecuación:\n$$P+A~=~A \\left [ 1 + \\left(\\frac{1}{1+r}\\right) + \\left(\\frac{1}{1+r}\\right) ^2 + ... \\right ]~=~A\\left [ \\frac{1}{1-\\left(\\frac{1}{1+r}\\right)}\\right] ~=~ A + \\frac{A}{r}$$\nAl despejar $P$ se obtiene:\n$$P=\\frac{A}{r}$$\nEquivalencia pagos iguales finitos.\nContenido\n<img src=\"images/equiv-pmt-finitos.png\" width=500>\nLas formulas financieras para anualidades se obtienen al considerar la resta de dos anualidades infinitas; la primera se inicia en el instánte 0 y la segunda en el instánte $n$: \n$$F~=~ P\\times(1+r)^n - P^* ~=~\\frac{A}{r} \\times (1+r)^n - \\frac{A}{r} ~=~ A \\left [ \\frac{(1 + r)^n -1}{r} \\right ]$$\n\nEjercicio.-- Derive la ecuación para calcular el valor presente ($P$) en función de $A$, $r$ y $n$.\n$$\\frac{1}{8} * A ^2$$\nEjercicio.-- Escriba $A$ en función de $P$, $r$ y $n$.\nEjercicio.-- Escriba $r$ en función de $P$, $n$ y $A$.\nEjercicio.-- Escriba $n$ en función de $n$ en función de $P$, $A$ y $r$.\nEjercicio.-- Cuál tasa de interés hace que los siguientes flujos sean equivalentes?\n\n\nRecibir \\$ 1000 hoy.\n\n\nRecibir \\$ 600 al final del periodo 1 y otros \\$ 600 al final del periodo 2.\n\n\n<img src=\"images/sesion-1-ejemplo-2.png\" width=400>\n\nModo de pago de anualidades finitas\nContenido\nEn el caso de pagos iguales periodicos, es posible establecer el pago al principio o al final del periodo. \n<img src=\"images/payment-mode.png\" width=500>\n\nEjercicio.-- Derive las ecuaciones de equivalencia entre una anualidad finita anticipada y su valor futuro equivalente y despeje $A$, $F$, $r$ y $n$.\n<img src=\"images/eqiv-pmt-ant-finita.png\" width=400>\n\nModelo general de equivalencia\nLos modelos de valor de dinero en el tiempo (TVM -- time value of money) se basan en los siguientes esquemas.\n<img src=\"images/tvm.png\" width=700>\n\nEjercicio.-- ¿Cuál es el valor de A para que los dos flujos sean equivalentes usando una tasa ($r$) del 8%?\n<img src=\"images/ejercicio-A.png\" width=500>\n\nFlujos típicos\nContenido\nPara resolver un problema, primero se debe identificar el flujo de efectivo típico [2, pag. 48]\n<img src=\"images/flujos-tipicos.png\" width=700>\n\nEjercicio.-- Cuál es el valor presente de un bono (obligación) a dos años, cuyo pago (principal) es de \\$ 100, con pagos trimestrales de intereses a una tasa del 10%?\n\nLibreria cashflows\ncashflows es una librería para la ejecución de cálculos financieros. Las funciones implementadas son similares a las usadas en Microsoft Excel, las calculadoras financieras y otros softwares similares.", "# importa la librería financiera.\n# solo es necesario ejecutar la importación una sola vez.\nimport cashflows as cf", "tvm es un modelo para la realización de cálculos simples del valor del dinero en el tiempo. Este modelo usa variables internas para almacenar la información y funciones para realizar los cálculos.\nNomenclatura para los parámetros:\n\n\npval -- valor presente.\n\n\nfval -- valor futuro.\n\n\npmt -- pago periodico.\n\n\nnper -- cantidad de periodos.\n\n\nnrate -- tasa de interés por periodo.\n\n\npyr -- número de periodos por año.\n\n\nwhen -- momento del periodo en que se paga la anualidad: 'end' (o 0) indica el pago al final del periodo; 'begin' (o 1) indica el pago al principio del periodo.\n\n\nNomenclatura para las funciones de equivalencia financiera:\n\n\npvfv(pval=None, fval=None, nrate=None, nper=None, pyr=1, noprint=True) -- valor presente - valor futuro.\n\n\npmtfv(pmt=None, fval=None, nrate=None, nper=None, pyr=1, noprint=True) -- pago periodico - valor futuro.\n\n\npvpmt(pmt=None, pval=None, nrate=None, nper=None, pyr=1, noprint=True) -- valor presente - pago periodico.\n\n\ntvmm(pval=None, fval=None, pmt=None, nrate=None, nper=None, due=0, pyr=1, noprint=True) -- modelos de valor del dinero en el tiempo.\n\n\namortize(pval=None, fval=None, pmt=None, nrate=None, nper=None, due=0, pyr=1, noprint=True) -- imprime la tabla de amortizaciones para los cálculos realizados.\n\n\nA continuación se presentan varios ejemplos de su uso.\nEjemplo (cuenta de ahorros).-- [3, pág. 88] Se depositan \\$ 2000 en una cuenta de ahorros que paga un interés anual del 7.2% (calculado anualmente). Si no se hacen otros depósitos en la cuenta, cuanto tiempo se requiere para que la cuenta tenga \\$ 3000? R/ 5.83\n<img src=\"images/sesion-1-ejemplo-4.png\" width=350>", "cf.pvfv(nrate = 7.2, # tasa de interes\n pval = -2000, # valor presente\n fval = +3000) # valor futuro\n\n# Ya que nper es un valor entre 5 y 6, se requieren 6 años \n# para tener un balance de al menos $ 3000.\n# El balance al final de los seis años es (R/ 3035.28):\ncf.pvfv(nrate = 7.2, # tasa de interes\n pval = -2000, # valor presente\n nper = 6) # numero de periodos", "Ejemplo (cuenta de ahorro).-- ¿Cuánto dinero se deberá invertir hoy si se quiere obtener al final de 5 años \\$ 7800 a un interés trimestral del 2%? R/ -5249.18\n<img src=\"images/sesion-1-ejemplo-1.png\" width=400>", "cf.pvfv(nrate = 2.0, # tasa de interes\n fval = 7800, # valor futuro \n pyr = 1,\n nper = 5*4) # número de periodos: 5 años * 4 trimestres por año\n# calcula el valor presente", "Ejemplo (valorización).-- Cuál es el valor futuro de \\$ 730 dentro de un año con un interés mensual del 2.3%? R/ \\$ 959.03 \n<img src=\"images/sesion-1-ejemplo-3.png\" width=350>", "cf.pvfv(nrate = 2.3, \n nper = 12, \n pval = -730)", "Ejemplo (leasing).-- [3, pág 02] Se hace un leasing por una maquinaria por 4 años (48 meses) con pagos mensuales de \\$ 2400; se debe pagar una cuota adicional de \\$ 2400 al principio del leasing para reemplazar la última cuota (que ocurre al principio del mes 48). El contrato incluye una opción de compra al final del periodo de leasing por \\$ 15000. Cual valor capitalizado del leasing para una tasa del 1.5% mensual? \n<img src=\"images/sesion-1-ejemplo-5.png\" width=350>", "# se pagan 47 cuotas de $ 2400 al principio del mes.\ncf.tvmm(nper = 47,\n pmt = -2400,\n due = 1,\n fval = 0,\n nrate = 1.5)\n# valor presente de las 47 cuotas al principio del periodo R/ $ 81735.58\n\nx = _ + 2400 # + cuota adicional al principio del leasing R/ $ 84135.58\nx\n\nx + cf.pvfv(nper = 48,\n fval = -15000,\n nrate = 1.5) # opcion de compra R/ $ 91476.00\n\n", "Ejemplo (hipoteca).-- [2, pág. 50] Se hará un préstamo de \\$ 35000 con un interes del (10.5% / 12). Si se hacen pagos mensuales de \\$ 325 al final de cada mes, cuánto tiempo se requiere para cancelar la deuda?", "## crea una instancia del modelo y se almacena en una variable\nm = cf.tvmm(nrate = 10.5/12, # tasa de interes\n pval = 35000, # valor presente \n pmt = -325,\n fval = 0,\n due = 0) # 'end' \nm", "Si se hacen 327 pagos de \\$ 325, cuánto será el pago No. 328?", "# pago en exceso el valor de la deuda\nm = cf.tvmm(nrate = 10.5/12, # tasa de interes\n pval = 35000, # valor presente \n pmt = -325,\n nper = 327,\n due = 0) # 'end' \nm\n\n# pago en exceso el valor de la deuda\nm * (1 + 10.5/12/100) + 325", "Se se hacen únicamente 327 pagos, cuál es el valor del pago final para cancelar completamente la deuda?", "m = cf.tvmm(nrate = 10.5/12, # tasa de interes\n pval = 35000, # valor presente \n pmt = -325,\n nper = 327,\n due = 0) # 'end' \nm\n\n-325 + m", "Ejemplo.-- [2, pág. 53] Se abre una cuenta hoy con un depósito de \\$ 775. La tasa de interés es de (6.25%/24). Si se siguen realizando depósitos de \\$ 50, cuánto tiempo se requiere para alcanzar un saldo de \\$ 4000?", "cf.tvmm(nrate = 6.25/24, # tasa de interes\n pval = -775, # deposito inicial \n pmt = -50, # depositos periodicos\n fval = 4000, # saldo final\n due = 0) # depositos al final del periodo \n\n\n# saldo al final de 58 periodos\ncf.tvmm(nrate = 6.25/24, # tasa de interes\n nper = 58, # numero de periodos\n pval = -775, # deposito inicial \n pmt = -50, # depositos periodicos\n due = 0) # depositos al final del periodo ", "Ejemplo.-- [2, pág. 55] Qué tasa de interés debe obtenerse para acumular \\$ 10000 en 32 periodos si se hace una inversión de \\$ 6000? R/ 1.61%", "cf.pvfv(pval = -6000, # deposito inicial\n nper = 32, # numero de periodos\n fval = 10000) # saldo final\n\ncf.tvmm(pval = -6000, # deposito inicial\n nper = 32, # numero de periodos\n pmt = 0, # pago periodico\n fval = 10000) # saldo final", "Ejemplo.-- [2, pág. 57] Si se va a realizar un leasing a una tasa de interés de (5.9%/12) y se deben realizar 48 pagos de \\$ 450 y un pago inicial de \\$ 1500 al constituirse el crédito, cuál es el monto del préstamo?", "cf.pvpmt(pmt = -450, # pago mensual\n nrate = 5.9/12, # tasa de interés\n nper = 48) # numero de periodos\n\n_ + 1500", "Ejemplo.-- [2, pág. 58] Cuánto se puede pagar por una propiedad que generará un flujo neto anual de \\$ 17500 durante 5 años, si al final la propiedad se puede vender en \\$ 540.000? (la tasa de interés es del 12%)", "cf.tvmm(pmt = 17500, # pago periodico anual\n fval = 540000, # valor de venta\n nrate = 12.0, # tasa de interés\n nper = 5) # numero de periodos", "Ejemplo.-- [2, pág. 59] Calcule el pago mensual de una hipoteca por \\$ 243400 pagada en 348 meses a una tasa del 5.25%/12.", "cf.pvpmt(pval = 243400, # monto\n nrate = 5.25/12, # tasa de interés\n nper = 348) # numero de periodos", "Ejemplo.-- [2, pág. 59] Cuánto es el monto periodico que debe consignarse mensualmente en una cuenta de ahorros si el saldo inicial es de \\$ 3200, el saldo final es de \\$ 60000, la tasa es de 9.75%/2 y plazo es de 30 meses?", "cf.tvmm(pval = -3200, # apertura\n fval = 60000, # saldo futuro\n nper = 30, # numero de periodos\n nrate = 9.75/2) # tasa de interés", "Ejemplo.-- [2, pág. 61] Si se tiene una hipoteca de \\$ 243400 con un pago mensual de \\$ 1363.29 en 348 meses a una tasa del 5.25%/12, cuál es el pago que debe realizarse en la cuota 60 para cancelar completamente la deuda?", "cf.tvmm(pval = 243400, # monto\n nrate = 5.25/12, # tasa de interés\n pmt = -1363.29, # pago mensual\n nper = 60) # numero de periodos", "Ejemplo.-- [2, pág. 61] Si se consignan \\$ 50 al principio de cada mes en una nueva cuenta que paga 6.25%/12, cuál es el saldo al final de 24 meses?", "cf.tvmm(pval = 0, # monto\n nrate = 6.25/12, # tasa de interés\n pmt = -50.0, # pago mensual\n due = 1, # pago al principio del periodo\n nper = 24) # numero de periodos", "Ejemplo.-- [2, 62] Se compra una propiedad por \\$ 32000. Si se presenta una depreciación del 2% por año, cuál será el valor de la propiedad al final de 6 años? R/ \\$ 28346.96", "cf.pvfv(pval = -32000,\n nrate = -2.0, \n nper = 6)", "Ejemplo.-- [3, pág. 81] Se está financiando la compra de un carro nuevo con un leasing a tres años a una tasa de interés del 10.5%/12. El precio del carro es de \\$ 7250. Se debe realizar un pago inicial de \\$ 1500. Cuánto es el pago mensual si los pagos se hacen al final del mes?", "cf.pvpmt(pval = 7250-1500,\n nrate = 10.5/12, \n nper = 36)\n\n# si se desea reducir el pago periodico en $ 10, cual tasa de interés debería obtenerse.\ncf.pvpmt(pval = 7250-1500,\n pmt = -176.89, \n nper = 36)", "Ejemplo.-- [3, pág. 82] El pago máximo mensual por una hipoteca a 30 años es de \\$ 630. Si usted puede realizar un pago inicial de \\$ 12.000 y la tasa es de 11.5%/12, cuál es el precio máximo de la propiedad?", "cf.pvpmt(pmt = -630, \n nrate = 11.5/12,\n nper = 30*12) + 12000", "Ejemplo.-- [3, pág. 83] Se constituye una hipoteca a 25 años por un monto de \\$ 75250 y una tasa de interés mensual de 13.8%/12. (a) cuál es el pago mensual; (b) Si se anticipa la venta de la propiedad al final de 4 años, cuánto debe pagarse para cancelar la deuda?", "# (a) R/ $ 894.33\ncf.pvpmt(pval = 75250, \n nrate = 13.8/12,\n nper = 25*12) \n\n# (b) R/ $ -73408.81\ncf.tvmm(pval = 75250, \n pmt = -894.33,\n nrate = 13.8/12,\n nper = 4*12) ", "Ejemplo (ahorro programado).-- Se abre una cuenta ahorros con un depósito inicial de \\$ 2000, y posteriormente se hacen depósitos quincenales de \\$ 80 durante 15 años. La cuenta paga un interés del 0.346% quincenal. Cuánto dinero habrá en la cuenta después del último depósito? R/ 63988.44", "cf.tvmm(pval = -2000,\n pmt = -80,\n nper = 15*12*2,\n nrate = 0.346)", "Ejemplo (leasing).-- Se hace un leasing por 3 años para un carro nuevo que vale hoy \\$ 13500, con la opción de comprar el carro por \\$ 7500 al final del leasing. Cuánto es el pago mensual si el interés es del 1.16% mensual? (tenga en cuenta que en el leasing se paga la cuota al principio del periodo) R/ $ 288.49", "cf.tvmm(pval = 13500,\n fval = -7500,\n nper = 36,\n due = 1,\n nrate = 1.16)", "Ejemplo (préstamo).-- Se va a comprar un equipo con un préstamo por \\$ 35000 con pagos mensuales de \\$ 325 mensuales. Si la tasa de interés es del 0.88% mensual, cuántos periodos se requieren para realizar el pago de la deuda?", "x = cf.pvpmt(pval = 35000,\n nrate = 0.88,\n pmt = -325)\nx # R/ 336.77\n\n# OPCION 1: se hace nper = 337 para calcular cuanto se paga \n# como excedente en la última cuota. R/ $ 74.51\ncf.tvmm(pval = 35000,\n pmt = -325,\n nrate = 0.88,\n nper = 337)\n\n\n325 - _ # Pago final, restando el excedente. R/ \\$ 250.49\n\n# OPCION 2: Se hace un pago adicional con la \n# cuota 336 para cancelar el préstamo\ncf.tvmm(pval = 35000,\n pmt = -325,\n nrate = 0.88,\n nper = 336)\n\n-325 + _ # pago final: cuota actual + saldo por cancelar", "Ejemplo (cálculo de la tasa de interés).-- Si hoy se ahorran \\$ 6000 y sin hacer más depósitos se tienen \\$ 10000 al cabo de 8 años, ¿cuál es la tasa de interés, si los intereses son liquidados trimestralmente? R/ 1.61%", "cf.pvfv(pval = -6000,\n fval = +10000,\n nper = 4*8)", "Ejemplo (inversión).-- Se comprarán unas bodegas para arrendamiento. Si el arriendo es de \\$ 17500 anual, y las bodegas se venden al final del año 5 por \\$ 540000. Cuál es el monto máximo que puede pagarse por las bodegas si la tasa es del 12% anual? R/ \\$ 369494.09", "cf.tvmm(fval = 540000,\n pmt = 17500,\n nrate = 12.0, \n nper = 5)", "Ejemplo (ahorro programado).-- Si al principio de cada mes se ahorran \\$ 100 a una tasa del 2% durante 24 meses, ¿cuánto dinero se tendrá al final del mes 24?.", "cf.tvmm(nrate = 2.0,\n pval = 0,\n nper = 24,\n pmt = -100,\n due = 1)", "Ejemplo (préstamo).-- Se desea determinar el valor de las cuotas mensuales iguales que se deberán pagar por un préstamo de \\$ 16700 financiado a 5 años al 2% mensual", "cf.pvpmt(nrate = 2.0,\n pval = 16700,\n nper = 5*12)", "Ejemplo (préstamo con pago final adicional).-- Se realiza una hipoteca por \\$ 243400 a 5 años y con una tasa de interes de 0.12% con pagos mensuales de \\$ 4000 y un pago adicional en el último mes. ¿cuál es el valor de dicho pago adicional?", "cf.tvmm(nrate = 0.12,\n pval = 243400,\n nper = 5*12,\n pmt = -4000)", "Ejemplo.-- Cuál será el valor futuro de \\$100, \\$ 200, \\$ 300 y \\$ 400 en 5 años a una tasa de interés de 3% anual?", "cf.pvfv(pval = [100, 200, 300, 400], # una de las variables puede ser un vector\n nper = 5,\n nrate = 3.0)", "Ejemplo.-- Cuál será el valor futuro de \\$100 en 1, 2, 3 y 4 años a una tasa de interés de 3% anual?", "cf.pvfv(pval = 100, # una de las variables puede ser un vector\n nper = [1, 2, 3, 4],\n nrate = 3.0)", "Ejemplo.-- Cuál será el valor futuro de \\$100 si se invierte a un año al 1%, a 2 años al 1.5% y a 3 años al 1.7%, con una capitalización anual?", "cf.tvmm(pval = 100, # si varias variables son listas\n pmt = 0, # todas ellas deben tener la misma\n nper = [1, 2, 3], # longitud\n nrate = [1.0, 1.5, 1.7]) # ", "Ejemplo.-- Cuál es la amortización para los siguientes préstamos? (fv es el pago final residual (como en el leasing)\n```\n plazo 5, 5, 6, 7\n pv 100, 110, 110, 105\n fv -20, -10, -20, 0\n tasa 0.020, 0.017, 0.016, 0.017\n```", "cf.tvmm(pval = [ 100, 110, 110, 105 ],\n fval = [ -20, -10, -20, 0 ], \n nper = [ 5, 5, 6, 7 ], \n nrate = [ 2.0, 1.7, 1.6, 1.7 ])", "Ejemplo (tablas de amortización - Parte 1).-- Construya la tabla de amortización (balance) para un préstamo de \\$ 1000 a 6 meses con pagos mensuales iguales a una tasa de interés del 1% mensual.", "# principal, interest, payment, balance\nppal, interest, payment, balance = cf.amortize(pval=1000, fval=0, pmt=None, nrate=1.0, nper=6, due=0)\n\nppal\n\nsum(ppal)\n\ninterest\n\nsum(interest)\n\npayment\n\nsum(payment)", "Ejercicios\nEjercicio.-- Si un banco promete una tasa anual del 8% ¿Cuál será el valor que se obtendrá al final de 3 años de una inversion de \\$ 4500? (R/ \\$ 5668.70)\nEjercicio.-- ¿Cuál es el valor presente de un pago único de \\$ 1500 recibido dentro de 2 años si la tasa de interés es de 2.7% mensual? (R/ \\$ -791.41)\nEjercicio.-- ¿Cuál es el valor futuro de \\$ 609 dentro de 2 años a una tasa de interes mensual de 2%? (R/ $ 979.54) \nEjercicio.-- ¿Cuál es el valor presente de un pago único de \\$ 890 recibido dentro de 6 años a una tasa de interés trimestral de 2.7%? (R/ \\$ 469.57) \nEjercicio.-- ¿Qué cantidad de dinero se poseerá después de prestar \\$ 2300 al 25% de interés compuesto anual durante 3 años?\nEjercicio.-- ¿Qué cantidad de dinero se poseerá después de prestar \\$ 10000 al 30% de interés anual durante 2 años? (R/ \\$ 16900)\nEjercicio.-- ¿cuánto dinero se deberá abonar mensualmente (al principio del mes) si se desea reunir \\$ 28700 al final de 5 años y los ahorros rentan el 4% mensuales? (R/ \\$ 120.59)\nEjercicio.-- Se decide ahorrar mensualmente \\$ 900 los cuales depositará al principio de cada mes en una entidad\nfinanciera que paga un interés del 2.5% mensual. ¿Cuánto habrá acumulado al cabo de 2 años? (R/ \\$ 29114.13) \nEjercicio.-- Suponga que usted quiere asegurarse de que puede financiar la educación universitaria de su hija dentro de 14 años a partir de hoy. Su hija retirará \\$ 500 al inicio de cada mes de una cuenta de ahorros durante 4 años. ¿Cuánto se tiene que depositar hoy en la cuenta si el interés es del 1% mensual?\nEjercicio.-- Calcule la cantidad de pago en 29 años de una hipoteca de \\$ 243400 con un interés anual de 5.25%.\nEjercicio.-- Mirando hacia la jubilación, usted desea acumular \\$ 60000 después de 15 años haciendo depósitos en una cuenta que paga 5.3% de interés semestral. Usted abre la cuenta con un depósito de \\$ 3200 con la intención de hacer depósitos semestrales, comenzando seis meses más tarde a partir de sus ingresos. Calcular la cantidad de dinero que se deberá depositar semestralmente (no se consigna al final del último semestre). \nEjercicio.-- Se va a realizar un préstamo de \\$ 35000 a un interés de 0.91% (mensual) y con pagos mensuales de \\$ 325 al final de cada mes. ¿Cuántos meses se requieren para pagar el préstamo?. \n\nContenido" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
a-pagano/BigDive5
DataScience/MapReduce-Introduction.ipynb
mit
[ "Map Reduce Explained\nIs a programming model invented by Google to perform distributed computation on huge amounts of data, it's heavily inspired by the functional programming map and reduce functions.\nThe process is divided in two steps:\n - A mapping step: the input is divided in subparts until it reaches its minimum processable particle which gets processed and the corresponding result gets returned.\n - A reduction step: each value processed by the mapping step goes back to the reduction step that joins the values together to a single result.\n<img src=\"files/MapReduce.jpeg\" width=\"640\"/>\nPractice Example\n<img src=\"files/MapReduce_example.png\"/>\nExample Problem without MapReduce\nOur example problem is to compute the sum of the doubles of various numbers.\nSo if we have 1, 2, 3 as the input the result would be 2+4+6 -> 12", "numbers = range(1000)\n\ndef doubled_sum(values):\n total = 0\n for n in numbers:\n total += n*2\n return total\n\nprint doubled_sum(numbers)", "Example Problem using MapReduce\nAs Python implements the functional programming paradigm too, it provides the functions required to implement the map-reduce paradigm builtin.\nThe foundation tools to implement map reduce are:\n\n\"mapper\" which is in charge of mapping each input value to a corresponding output value\n\"reducer\" which is in charge of merging multiple mapper outputs into a single output. \n\nBoth phases can be called multiple times (the output of a reducer can become the input of another reducer and a mapper can call other mappers).\nMany MapReduce implementations also have additional phases like \"combination\" and \"aggregation\" which are executed after the mapper or the reducer to further cleanup their output.", "numbers = range(1000)\n\ndef mapper(value):\n return value*2\n\ndef reducer(*values):\n return sum(values)\n#first_step = map(mapper, numbers)\n#print first_step\nresult = reduce(reducer, map(mapper, numbers))\nprint result", "The previous map-reduce in pure python implementation lacks of course the core feature of MapReduce: working parallely.\nIt's easy to understand that as each mapper and reducer works only on a subset of the data (its own input) it can work independently from the status of the other mappers and reducers. So the computation can proceed parallely.\nParallel Map Reduce in Pure Python\nIt's really easy to simulate a parallel map reduce in python by using the multiprocessing module", "from itertools import islice\nimport multiprocessing\n\n\nclass ParallelMapReduce(object):\n def __init__(self, map_func, reduce_func, num_workers=None):\n self.num_workers = num_workers\n self.map_func = map_func\n self.reduce_func = reduce_func\n self.pool = multiprocessing.Pool(num_workers)\n \n def partition(self, n, iterable):\n i = iter(iterable)\n piece = list(islice(i, n))\n while piece:\n yield piece\n piece = list(islice(i, n))\n \n def __call__(self, inputs):\n values = self.pool.map(self.map_func, inputs)\n \n print '>>> MAPPED VALUES (%s values): %s, ...' % (len(values), str(values[:10]))\n\n values = self.pool.map(self.reduce_func, \n self.partition(len(values)//self.num_workers, values))\n print '>>> REDUCED VALUES', values\n\n return self.reduce_func(values)", "The previous mapreduce implementation takes a Mapper and a Reducer and splits them across num_workers until it gets back the final result", "numbers = range(1000)\n\ndef mapper(value):\n return value*2\n\ndef reducer(values):\n return sum(values)\n\nmapreduce = ParallelMapReduce(mapper, reducer, 10)\nprint mapreduce(numbers)", "Distributed Map Reduce\nOur parallel map reduce works parallely by using the multiple cores our computer provides, but it is unable to distribute the work on multiple computers. As map reduce is actually meant to work on milions of items it is vital to be able to distribute the work across multiple servers.\nThis is the reason why various MapReduce frameworks and toolkits were created, on Python environment there are some wildly used solutions:\nDisco\nBorn at Nokia Research centers it is one of the solutions with the best tradeof in complexity and features. It relies on an ERlang core involved in dispatching jobs to the various workers and Python is considered the standard language used to implement workers (while they can be written in any language respecting the Disco protocol). \nIt also provides its own Distributed File System (DDFS) and Distributed Database (DiscoDB) which can be used to store data shared by the various workers.\nOctoPy\nIt's a pure Python solution that is aimed at being really quick to setup, doesn't provide a shared storage for data.\nCan be useful for really small problems where the cost of configuring a cluster can overcome the solution benefit.\nHadoop\nIt's the standard de facto in MapReduce frameworks, heavily implemented in Java it's the most complex solution to setup and maintain. Through the Hadoop Streaming feature it is possible to run mappers and reducers in any programming language that can be called by a shell script. Hadoop Streaming it is used be various Python libraries to run python implemented mappers and reducers. \nLike Disco it's a full fledged solution that provides also a Distributed File System where data can be stored for processing HDFS\nAmazon EMR - Elastic MapReduce\nEMR, Elastic Map Reduce is an Amazon provided Hadoop cluster in the cloud.\nAs setting up a complex MapReduce toolkit like Hadoop is a complex and long procedure, it is common to rely on services that provide the computation cluster on demand. During the BigDive course we are going to test our solutions locally, but we will see also how to run them on EMR. You will notice that while EMR has a huge kickstart cost, it quickly becames more convienient when data increases\nIt relies on Elastic Compute Cloud (EC2) to power up a set of computational instances that will became nodes of the Hadoop cluster and on Simple Storage Service (S3) to provide a distribuited file system to which each computation node can access data.\n<img src=\"files/ec2.png\" width=\"640\"/>\nKickstart Cost\nThe issue with using EMR is that you have a big kickstart cost caused by:\n\nEC2 instances need to be created and fully booted to start processing data\nYour data has to be uploaded to an S3 bucket for instances to be able to read it and process it\n\nFor this reason, as the kickstart cost of EMR takes minutes, we will test most examples locally using the multiprocessing module to emulate Hadoop nodes. But we will see that our code is perfectly able to run on EMR without changes, so that you can start using EMR as soon as your data gets big enough to justify the kickstart cost.\nHadoop Streaming\nThe feature that permits to run map reduce processes written in any language on hadoop is called Hadoop Streaming,\nHadoop Streaming is a tool that takes as a parameter any runnable software and runs it as a mapper or as a reducer.\nThis makes possible to pass as mappers and reducers Python scripts, which will be started in the Hadoop Cluster.\nFor input and output a line based protocol (data separated by \"\\n\") is used:\n\nEach \"line\" of the input sent to the mapper has to be considered one value to map\nEach \"line\" of the output from the mapper is condidered an emitted (key, value) pair where key and value are tab (\"\\t\") separated\n\nSo the typical I/O in hadoop streaming looks like:\nkey1\\tvalue1\\nkey2\\tvalue2\\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
undercertainty/ou_nlp
14_recurrent_neural_networks.ipynb
apache-2.0
[ "Chapter 14 – Recurrent Neural Networks\nThis notebook contains all the sample code and solutions to the exercices in chapter 14.\nSetup\nFirst, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:", "# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\ndef reset_graph(seed=42):\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"rnn\"\n\ndef save_fig(fig_id, tight_layout=True):\n path = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID, fig_id + \".png\")\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format='png', dpi=300)", "Then of course we will need TensorFlow:", "import tensorflow as tf", "Basic RNNs\nManual RNN", "reset_graph()\n\nn_inputs = 3\nn_neurons = 5\n\nX0 = tf.placeholder(tf.float32, [None, n_inputs])\nX1 = tf.placeholder(tf.float32, [None, n_inputs])\n\nWx = tf.Variable(tf.random_normal(shape=[n_inputs, n_neurons],dtype=tf.float32))\nWy = tf.Variable(tf.random_normal(shape=[n_neurons,n_neurons],dtype=tf.float32))\nb = tf.Variable(tf.zeros([1, n_neurons], dtype=tf.float32))\n\nY0 = tf.tanh(tf.matmul(X0, Wx) + b)\nY1 = tf.tanh(tf.matmul(Y0, Wy) + tf.matmul(X1, Wx) + b)\n\ninit = tf.global_variables_initializer()\n\nimport numpy as np\n\nX0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]]) # t = 0\nX1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]]) # t = 1\n\nwith tf.Session() as sess:\n init.run()\n Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})\n\nprint(Y0_val)\n\nprint(Y1_val)", "Using static_rnn()", "n_inputs = 3\nn_neurons = 5\n\nreset_graph()\n\nX0 = tf.placeholder(tf.float32, [None, n_inputs])\nX1 = tf.placeholder(tf.float32, [None, n_inputs])\n\nbasic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\noutput_seqs, states = tf.contrib.rnn.static_rnn(basic_cell, [X0, X1],\n dtype=tf.float32)\nY0, Y1 = output_seqs\n\ninit = tf.global_variables_initializer()\n\nX0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]])\nX1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]])\n\nwith tf.Session() as sess:\n init.run()\n Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})\n\nY0_val\n\nY1_val\n\nfrom IPython.display import clear_output, Image, display, HTML\n\ndef strip_consts(graph_def, max_const_size=32):\n \"\"\"Strip large constant values from graph_def.\"\"\"\n strip_def = tf.GraphDef()\n for n0 in graph_def.node:\n n = strip_def.node.add() \n n.MergeFrom(n0)\n if n.op == 'Const':\n tensor = n.attr['value'].tensor\n size = len(tensor.tensor_content)\n if size > max_const_size:\n tensor.tensor_content = \"b<stripped %d bytes>\"%size\n return strip_def\n\ndef show_graph(graph_def, max_const_size=32):\n \"\"\"Visualize TensorFlow graph.\"\"\"\n if hasattr(graph_def, 'as_graph_def'):\n graph_def = graph_def.as_graph_def()\n strip_def = strip_consts(graph_def, max_const_size=max_const_size)\n code = \"\"\"\n <script>\n function load() {{\n document.getElementById(\"{id}\").pbtxt = {data};\n }}\n </script>\n <link rel=\"import\" href=\"https://tensorboard.appspot.com/tf-graph-basic.build.html\" onload=load()>\n <div style=\"height:600px\">\n <tf-graph-basic id=\"{id}\"></tf-graph-basic>\n </div>\n \"\"\".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))\n\n iframe = \"\"\"\n <iframe seamless style=\"width:1200px;height:620px;border:0\" srcdoc=\"{}\"></iframe>\n \"\"\".format(code.replace('\"', '&quot;'))\n display(HTML(iframe))\n\nshow_graph(tf.get_default_graph())", "Packing sequences", "n_steps = 2\nn_inputs = 3\nn_neurons = 5\n\nreset_graph()\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\nX_seqs = tf.unstack(tf.transpose(X, perm=[1, 0, 2]))\n\nbasic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\noutput_seqs, states = tf.contrib.rnn.static_rnn(basic_cell, X_seqs,\n dtype=tf.float32)\noutputs = tf.transpose(tf.stack(output_seqs), perm=[1, 0, 2])\n\ninit = tf.global_variables_initializer()\n\nX_batch = np.array([\n # t = 0 t = 1 \n [[0, 1, 2], [9, 8, 7]], # instance 1\n [[3, 4, 5], [0, 0, 0]], # instance 2\n [[6, 7, 8], [6, 5, 4]], # instance 3\n [[9, 0, 1], [3, 2, 1]], # instance 4\n ])\n\nwith tf.Session() as sess:\n init.run()\n outputs_val = outputs.eval(feed_dict={X: X_batch})\n\nprint(outputs_val)\n\nprint(np.transpose(outputs_val, axes=[1, 0, 2])[1])", "Using dynamic_rnn()", "n_steps = 2\nn_inputs = 3\nn_neurons = 5\n\nreset_graph()\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n\nbasic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\noutputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)\n\ninit = tf.global_variables_initializer()\n\nX_batch = np.array([\n [[0, 1, 2], [9, 8, 7]], # instance 1\n [[3, 4, 5], [0, 0, 0]], # instance 2\n [[6, 7, 8], [6, 5, 4]], # instance 3\n [[9, 0, 1], [3, 2, 1]], # instance 4\n ])\n\nwith tf.Session() as sess:\n init.run()\n outputs_val = outputs.eval(feed_dict={X: X_batch})\n\nprint(outputs_val)\n\nshow_graph(tf.get_default_graph())", "Setting the sequence lengths", "n_steps = 2\nn_inputs = 3\nn_neurons = 5\n\nreset_graph()\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\nbasic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\n\nseq_length = tf.placeholder(tf.int32, [None])\noutputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32,\n sequence_length=seq_length)\n\ninit = tf.global_variables_initializer()\n\nX_batch = np.array([\n # step 0 step 1\n [[0, 1, 2], [9, 8, 7]], # instance 1\n [[3, 4, 5], [0, 0, 0]], # instance 2 (padded with zero vectors)\n [[6, 7, 8], [6, 5, 4]], # instance 3\n [[9, 0, 1], [3, 2, 1]], # instance 4\n ])\nseq_length_batch = np.array([2, 1, 2, 2])\n\nwith tf.Session() as sess:\n init.run()\n outputs_val, states_val = sess.run(\n [outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch})\n\nprint(outputs_val)\n\nprint(states_val)", "Training a sequence classifier\nNote: the book uses tensorflow.contrib.layers.fully_connected() rather than tf.layers.dense() (which did not exist when this chapter was written). It is now preferable to use tf.layers.dense(), because anything in the contrib module may change or be deleted without notice. The dense() function is almost identical to the fully_connected() function. The main differences relevant to this chapter are:\n* several parameters are renamed: scope becomes name, activation_fn becomes activation (and similarly the _fn suffix is removed from other parameters such as normalizer_fn), weights_initializer becomes kernel_initializer, etc.\n* the default activation is now None rather than tf.nn.relu.", "reset_graph()\n\nn_steps = 28\nn_inputs = 28\nn_neurons = 150\nn_outputs = 10\n\nlearning_rate = 0.001\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.int32, [None])\n\nbasic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\noutputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)\n\nlogits = tf.layers.dense(states, n_outputs)\nxentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,\n logits=logits)\nloss = tf.reduce_mean(xentropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\ncorrect = tf.nn.in_top_k(logits, y, 1)\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\ninit = tf.global_variables_initializer()\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\")\nX_test = mnist.test.images.reshape((-1, n_steps, n_inputs))\ny_test = mnist.test.labels\n\nn_epochs = 100\nbatch_size = 150\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n for iteration in range(mnist.train.num_examples // batch_size):\n X_batch, y_batch = mnist.train.next_batch(batch_size)\n X_batch = X_batch.reshape((-1, n_steps, n_inputs))\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n print(epoch, \"Train accuracy:\", acc_train, \"Test accuracy:\", acc_test)", "Multi-layer RNN", "reset_graph()\n\nn_steps = 28\nn_inputs = 28\nn_outputs = 10\n\nlearning_rate = 0.001\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.int32, [None])\n\nn_neurons = 100\nn_layers = 3\n\nlayers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons,\n activation=tf.nn.relu)\n for layer in range(n_layers)]\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)\noutputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\nstates_concat = tf.concat(axis=1, values=states)\nlogits = tf.layers.dense(states_concat, n_outputs)\nxentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\nloss = tf.reduce_mean(xentropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\ncorrect = tf.nn.in_top_k(logits, y, 1)\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\ninit = tf.global_variables_initializer()\n\nn_epochs = 10\nbatch_size = 150\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n for iteration in range(mnist.train.num_examples // batch_size):\n X_batch, y_batch = mnist.train.next_batch(batch_size)\n X_batch = X_batch.reshape((-1, n_steps, n_inputs))\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n print(epoch, \"Train accuracy:\", acc_train, \"Test accuracy:\", acc_test)", "Time series", "t_min, t_max = 0, 30\nresolution = 0.1\n\ndef time_series(t):\n return t * np.sin(t) / 3 + 2 * np.sin(t*5)\n\ndef next_batch(batch_size, n_steps):\n t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)\n Ts = t0 + np.arange(0., n_steps + 1) * resolution\n ys = time_series(Ts)\n return ys[:, :-1].reshape(-1, n_steps, 1), ys[:, 1:].reshape(-1, n_steps, 1)\n\nt = np.linspace(t_min, t_max, int((t_max - t_min) / resolution))\n\nn_steps = 20\nt_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)\n\nplt.figure(figsize=(11,4))\nplt.subplot(121)\nplt.title(\"A time series (generated)\", fontsize=14)\nplt.plot(t, time_series(t), label=r\"$t . \\sin(t) / 3 + 2 . \\sin(5t)$\")\nplt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"b-\", linewidth=3, label=\"A training instance\")\nplt.legend(loc=\"lower left\", fontsize=14)\nplt.axis([0, 30, -17, 13])\nplt.xlabel(\"Time\")\nplt.ylabel(\"Value\")\n\nplt.subplot(122)\nplt.title(\"A training instance\", fontsize=14)\nplt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"bo\", markersize=10, label=\"instance\")\nplt.plot(t_instance[1:], time_series(t_instance[1:]), \"w*\", markersize=10, label=\"target\")\nplt.legend(loc=\"upper left\")\nplt.xlabel(\"Time\")\n\n\nsave_fig(\"time_series_plot\")\nplt.show()\n\nX_batch, y_batch = next_batch(1, n_steps)\n\nnp.c_[X_batch[0], y_batch[0]]", "Using an OuputProjectionWrapper\nLet's create the RNN. It will contain 100 recurrent neurons and we will unroll it over 20 time steps since each traiing instance will be 20 inputs long. Each input will contain only one feature (the value at that time). The targets are also sequences of 20 inputs, each containing a sigle value:", "reset_graph()\n\nn_steps = 20\nn_inputs = 1\nn_neurons = 100\nn_outputs = 1\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_steps, n_outputs])\n\ncell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)\noutputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)", "At each time step we now have an output vector of size 100. But what we actually want is a single output value at each time step. The simplest solution is to wrap the cell in an OutputProjectionWrapper.", "reset_graph()\n\nn_steps = 20\nn_inputs = 1\nn_neurons = 100\nn_outputs = 1\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_steps, n_outputs])\n\ncell = tf.contrib.rnn.OutputProjectionWrapper(\n tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),\n output_size=n_outputs)\n\noutputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n\nlearning_rate = 0.001\n\nloss = tf.reduce_mean(tf.square(outputs - y)) # MSE\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\ninit = tf.global_variables_initializer()\n\nsaver = tf.train.Saver()\n\nn_iterations = 1500\nbatch_size = 50\n\nwith tf.Session() as sess:\n init.run()\n for iteration in range(n_iterations):\n X_batch, y_batch = next_batch(batch_size, n_steps)\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n if iteration % 100 == 0:\n mse = loss.eval(feed_dict={X: X_batch, y: y_batch})\n print(iteration, \"\\tMSE:\", mse)\n \n saver.save(sess, \"./my_time_series_model\") # not shown in the book\n\nwith tf.Session() as sess: # not shown in the book\n saver.restore(sess, \"./my_time_series_model\") # not shown\n\n X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))\n y_pred = sess.run(outputs, feed_dict={X: X_new})\n\ny_pred\n\nplt.title(\"Testing the model\", fontsize=14)\nplt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"bo\", markersize=10, label=\"instance\")\nplt.plot(t_instance[1:], time_series(t_instance[1:]), \"w*\", markersize=10, label=\"target\")\nplt.plot(t_instance[1:], y_pred[0,:,0], \"r.\", markersize=10, label=\"prediction\")\nplt.legend(loc=\"upper left\")\nplt.xlabel(\"Time\")\n\nsave_fig(\"time_series_pred_plot\")\nplt.show()", "Without using an OutputProjectionWrapper", "reset_graph()\n\nn_steps = 20\nn_inputs = 1\nn_neurons = 100\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_steps, n_outputs])\n\ncell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)\nrnn_outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n\nn_outputs = 1\nlearning_rate = 0.001\n\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\n\nloss = tf.reduce_mean(tf.square(outputs - y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nn_iterations = 1500\nbatch_size = 50\n\nwith tf.Session() as sess:\n init.run()\n for iteration in range(n_iterations):\n X_batch, y_batch = next_batch(batch_size, n_steps)\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n if iteration % 100 == 0:\n mse = loss.eval(feed_dict={X: X_batch, y: y_batch})\n print(iteration, \"\\tMSE:\", mse)\n \n X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))\n y_pred = sess.run(outputs, feed_dict={X: X_new})\n \n saver.save(sess, \"./my_time_series_model\")\n\ny_pred\n\nplt.title(\"Testing the model\", fontsize=14)\nplt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"bo\", markersize=10, label=\"instance\")\nplt.plot(t_instance[1:], time_series(t_instance[1:]), \"w*\", markersize=10, label=\"target\")\nplt.plot(t_instance[1:], y_pred[0,:,0], \"r.\", markersize=10, label=\"prediction\")\nplt.legend(loc=\"upper left\")\nplt.xlabel(\"Time\")\n\nplt.show()", "Generating a creative new sequence", "with tf.Session() as sess: # not shown in the book\n saver.restore(sess, \"./my_time_series_model\") # not shown\n\n sequence = [0.] * n_steps\n for iteration in range(300):\n X_batch = np.array(sequence[-n_steps:]).reshape(1, n_steps, 1)\n y_pred = sess.run(outputs, feed_dict={X: X_batch})\n sequence.append(y_pred[0, -1, 0])\n\nplt.figure(figsize=(8,4))\nplt.plot(np.arange(len(sequence)), sequence, \"b-\")\nplt.plot(t[:n_steps], sequence[:n_steps], \"b-\", linewidth=3)\nplt.xlabel(\"Time\")\nplt.ylabel(\"Value\")\nplt.show()\n\nwith tf.Session() as sess:\n saver.restore(sess, \"./my_time_series_model\")\n\n sequence1 = [0. for i in range(n_steps)]\n for iteration in range(len(t) - n_steps):\n X_batch = np.array(sequence1[-n_steps:]).reshape(1, n_steps, 1)\n y_pred = sess.run(outputs, feed_dict={X: X_batch})\n sequence1.append(y_pred[0, -1, 0])\n\n sequence2 = [time_series(i * resolution + t_min + (t_max-t_min/3)) for i in range(n_steps)]\n for iteration in range(len(t) - n_steps):\n X_batch = np.array(sequence2[-n_steps:]).reshape(1, n_steps, 1)\n y_pred = sess.run(outputs, feed_dict={X: X_batch})\n sequence2.append(y_pred[0, -1, 0])\n\nplt.figure(figsize=(11,4))\nplt.subplot(121)\nplt.plot(t, sequence1, \"b-\")\nplt.plot(t[:n_steps], sequence1[:n_steps], \"b-\", linewidth=3)\nplt.xlabel(\"Time\")\nplt.ylabel(\"Value\")\n\nplt.subplot(122)\nplt.plot(t, sequence2, \"b-\")\nplt.plot(t[:n_steps], sequence2[:n_steps], \"b-\", linewidth=3)\nplt.xlabel(\"Time\")\nsave_fig(\"creative_sequence_plot\")\nplt.show()", "Deep RNN\nMultiRNNCell", "reset_graph()\n\nn_inputs = 2\nn_steps = 5\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\n\nn_neurons = 100\nn_layers = 3\n\nlayers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\n for layer in range(n_layers)]\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)\noutputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\ninit = tf.global_variables_initializer()\n\nX_batch = rnd.rand(2, n_steps, n_inputs)\n\nwith tf.Session() as sess:\n init.run()\n outputs_val, states_val = sess.run([outputs, states], feed_dict={X: X_batch})\n\noutputs_val.shape", "Distributing a Deep RNN Across Multiple GPUs\nDo NOT do this:", "with tf.device(\"/gpu:0\"): # BAD! This is ignored.\n layer1 = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\n\nwith tf.device(\"/gpu:1\"): # BAD! Ignored again.\n layer2 = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)", "Instead, you need a DeviceCellWrapper:", "import tensorflow as tf\n\nclass DeviceCellWrapper(tf.contrib.rnn.RNNCell):\n def __init__(self, device, cell):\n self._cell = cell\n self._device = device\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def __call__(self, inputs, state, scope=None):\n with tf.device(self._device):\n return self._cell(inputs, state, scope)\n\nreset_graph()\n\nn_inputs = 5\nn_steps = 20\nn_neurons = 100\n\nX = tf.placeholder(tf.float32, shape=[None, n_steps, n_inputs])\n\ndevices = [\"/cpu:0\", \"/cpu:0\", \"/cpu:0\"] # replace with [\"/gpu:0\", \"/gpu:1\", \"/gpu:2\"] if you have 3 GPUs\ncells = [DeviceCellWrapper(dev,tf.contrib.rnn.BasicRNNCell(num_units=n_neurons))\n for dev in devices]\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(cells)\noutputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n init.run()\n print(sess.run(outputs, feed_dict={X: rnd.rand(2, n_steps, n_inputs)}))", "Dropout", "reset_graph()\n\nn_inputs = 1\nn_neurons = 100\nn_layers = 3\nn_steps = 20\nn_outputs = 1\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_steps, n_outputs])\n\nkeep_prob = 0.5\n\ncells = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\n for layer in range(n_layers)]\ncells_drop = [tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)\n for cell in cells]\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(cells_drop)\nrnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\nlearning_rate = 0.01\n\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\n\nloss = tf.reduce_mean(tf.square(outputs - y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()", "Unfortunately, this code is only usable for training, because the DropoutWrapper class has no training parameter, so it always applies dropout, even when the model is not being trained, so we must first train the model, then create a different model for testing, without the DropoutWrapper.", "n_iterations = 1000\nbatch_size = 50\n\nwith tf.Session() as sess:\n init.run()\n for iteration in range(n_iterations):\n X_batch, y_batch = next_batch(batch_size, n_steps)\n _, mse = sess.run([training_op, loss], feed_dict={X: X_batch, y: y_batch})\n if iteration % 100 == 0:\n print(iteration, \"Training MSE:\", mse)\n \n saver.save(sess, \"./my_dropout_time_series_model\")", "Now that the model is trained, we need to create the model again, but without the DropoutWrapper for testing:", "reset_graph()\n\nn_inputs = 1\nn_neurons = 100\nn_layers = 3\nn_steps = 20\nn_outputs = 1\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_steps, n_outputs])\n\nkeep_prob = 0.5\n\ncells = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\n for layer in range(n_layers)]\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(cells)\nrnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\nlearning_rate = 0.01\n\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])\nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\n\nloss = tf.reduce_mean(tf.square(outputs - y))\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n saver.restore(sess, \"./my_dropout_time_series_model\")\n\n X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))\n y_pred = sess.run(outputs, feed_dict={X: X_new})\n\nplt.title(\"Testing the model\", fontsize=14)\nplt.plot(t_instance[:-1], time_series(t_instance[:-1]), \"bo\", markersize=10, label=\"instance\")\nplt.plot(t_instance[1:], time_series(t_instance[1:]), \"w*\", markersize=10, label=\"target\")\nplt.plot(t_instance[1:], y_pred[0,:,0], \"r.\", markersize=10, label=\"prediction\")\nplt.legend(loc=\"upper left\")\nplt.xlabel(\"Time\")\n\nplt.show()", "Oops, it seems that Dropout does not help at all in this particular case. :/\nAnother option is to write a script with a command line argument to specify whether you want to train the mode or use it for making predictions:", "reset_graph()\n\nimport sys\ntraining = True # in a script, this would be (sys.argv[-1] == \"train\") instead\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_steps, n_outputs])\n\ncells = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)\n for layer in range(n_layers)]\nif training:\n cells = [tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)\n for cell in cells]\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(cells)\nrnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons]) # not shown in the book\nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs) # not shown\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs]) # not shown\nloss = tf.reduce_mean(tf.square(outputs - y)) # not shown\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) # not shown\ntraining_op = optimizer.minimize(loss) # not shown\ninit = tf.global_variables_initializer() # not shown\nsaver = tf.train.Saver() # not shown\n\nwith tf.Session() as sess:\n if training:\n init.run()\n for iteration in range(n_iterations):\n X_batch, y_batch = next_batch(batch_size, n_steps) # not shown\n _, mse = sess.run([training_op, loss], feed_dict={X: X_batch, y: y_batch}) # not shown\n if iteration % 100 == 0: # not shown\n print(iteration, \"Training MSE:\", mse) # not shown\n save_path = saver.save(sess, \"/tmp/my_model.ckpt\")\n else:\n saver.restore(sess, \"/tmp/my_model.ckpt\")\n X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs))) # not shown\n y_pred = sess.run(outputs, feed_dict={X: X_new}) # not shown", "LSTM", "reset_graph()\n\nlstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)\n\nn_steps = 28\nn_inputs = 28\nn_neurons = 150\nn_outputs = 10\nn_layers = 3\n\nlearning_rate = 0.001\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.int32, [None])\n\nlstm_cells = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)\n for layer in range(n_layers)]\nmulti_cell = tf.contrib.rnn.MultiRNNCell(lstm_cells)\noutputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)\ntop_layer_h_state = states[-1][1]\nlogits = tf.layers.dense(top_layer_h_state, n_outputs, name=\"softmax\")\nxentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\nloss = tf.reduce_mean(xentropy, name=\"loss\")\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\ncorrect = tf.nn.in_top_k(logits, y, 1)\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n \ninit = tf.global_variables_initializer()\n\nstates\n\ntop_layer_h_state\n\nn_epochs = 10\nbatch_size = 150\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n for iteration in range(mnist.train.num_examples // batch_size):\n X_batch, y_batch = mnist.train.next_batch(batch_size)\n X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n print(\"Epoch\", epoch, \"Train accuracy =\", acc_train, \"Test accuracy =\", acc_test)\n\nlstm_cell = tf.contrib.rnn.LSTMCell(num_units=n_neurons, use_peepholes=True)\n\ngru_cell = tf.contrib.rnn.GRUCell(num_units=n_neurons)", "Embeddings\nThis section is based on TensorFlow's Word2Vec tutorial.\nFetch the data", "from six.moves import urllib\n\nimport errno\nimport os\nimport zipfile\n\nWORDS_PATH = \"datasets/words\"\nWORDS_URL = 'http://mattmahoney.net/dc/text8.zip'\n\ndef mkdir_p(path):\n \"\"\"Create directories, ok if they already exist.\n \n This is for python 2 support. In python >=3.2, simply use:\n >>> os.makedirs(path, exist_ok=True)\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\ndef fetch_words_data(words_url=WORDS_URL, words_path=WORDS_PATH):\n os.makedirs(words_path, exist_ok=True)\n zip_path = os.path.join(words_path, \"words.zip\")\n if not os.path.exists(zip_path):\n urllib.request.urlretrieve(words_url, zip_path)\n with zipfile.ZipFile(zip_path) as f:\n data = f.read(f.namelist()[0])\n return data.decode(\"ascii\").split()\n\nwords = fetch_words_data()\n\nwords[:5]", "Build the dictionary", "from collections import Counter\n\nvocabulary_size = 50000\n\nvocabulary = [(\"UNK\", None)] + Counter(words).most_common(vocabulary_size - 1)\nvocabulary = np.array([word for word, _ in vocabulary])\ndictionary = {word: code for code, word in enumerate(vocabulary)}\ndata = np.array([dictionary.get(word, 0) for word in words])\n\n\" \".join(words[:9]), data[:9]\n\n\" \".join([vocabulary[word_index] for word_index in [5241, 3081, 12, 6, 195, 2, 3134, 46, 59]])\n\nwords[24], data[24]", "Generate batches", "import random\nfrom collections import deque\n\ndef generate_batch(batch_size, num_skips, skip_window):\n global data_index\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1 # [ skip_window target skip_window ]\n buffer = deque(maxlen=span)\n for _ in range(span):\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size // num_skips):\n target = skip_window # target label at the center of the buffer\n targets_to_avoid = [ skip_window ]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n targets_to_avoid.append(target)\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n return batch, labels\n\ndata_index=0\nbatch, labels = generate_batch(8, 2, 1)\n\nbatch, [vocabulary[word] for word in batch]\n\nlabels, [vocabulary[word] for word in labels[:, 0]]", "Build the model", "batch_size = 128\nembedding_size = 128 # Dimension of the embedding vector.\nskip_window = 1 # How many words to consider left and right.\nnum_skips = 2 # How many times to reuse an input to generate a label.\n\n# We pick a random validation set to sample nearest neighbors. Here we limit the\n# validation samples to the words that have a low numeric ID, which by\n# construction are also the most frequent.\nvalid_size = 16 # Random set of words to evaluate similarity on.\nvalid_window = 100 # Only pick dev samples in the head of the distribution.\nvalid_examples = rnd.choice(valid_window, valid_size, replace=False)\nnum_sampled = 64 # Number of negative examples to sample.\n\nlearning_rate = 0.01\n\nreset_graph()\n\n# Input data.\ntrain_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\nvalid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n\nvocabulary_size = 50000\nembedding_size = 150\n\n# Look up embeddings for inputs.\ninit_embeds = tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)\nembeddings = tf.Variable(init_embeds)\n\ntrain_inputs = tf.placeholder(tf.int32, shape=[None])\nembed = tf.nn.embedding_lookup(embeddings, train_inputs)\n\n# Construct the variables for the NCE loss\nnce_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / np.sqrt(embedding_size)))\nnce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n# Compute the average NCE loss for the batch.\n# tf.nce_loss automatically draws a new sample of the negative labels each\n# time we evaluate the loss.\nloss = tf.reduce_mean(\n tf.nn.nce_loss(nce_weights, nce_biases, train_labels, embed,\n num_sampled, vocabulary_size))\n\n# Construct the Adam optimizer\noptimizer = tf.train.AdamOptimizer(learning_rate)\ntraining_op = optimizer.minimize(loss)\n\n# Compute the cosine similarity between minibatch examples and all embeddings.\nnorm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), axis=1, keep_dims=True))\nnormalized_embeddings = embeddings / norm\nvalid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\nsimilarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)\n\n# Add variable initializer.\ninit = tf.global_variables_initializer()", "Train the model", "num_steps = 10001\n\nwith tf.Session() as session:\n init.run()\n\n average_loss = 0\n for step in range(num_steps):\n print(\"\\rIteration: {}\".format(step), end=\"\\t\")\n batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)\n feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}\n\n # We perform one update step by evaluating the training op (including it\n # in the list of returned values for session.run()\n _, loss_val = session.run([training_op, loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % 2000 == 0:\n if step > 0:\n average_loss /= 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print(\"Average loss at step \", step, \": \", average_loss)\n average_loss = 0\n\n # Note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % 10000 == 0:\n sim = similarity.eval()\n for i in range(valid_size):\n valid_word = vocabulary[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k+1]\n log_str = \"Nearest to %s:\" % valid_word\n for k in range(top_k):\n close_word = vocabulary[nearest[k]]\n log_str = \"%s %s,\" % (log_str, close_word)\n print(log_str)\n\n final_embeddings = normalized_embeddings.eval()", "Let's save the final embeddings (of course you can use a TensorFlow Saver if you prefer):", "np.save(\"./my_final_embeddings.npy\", final_embeddings)", "Plot the embeddings", "def plot_with_labels(low_dim_embs, labels):\n assert low_dim_embs.shape[0] >= len(labels), \"More labels than embeddings\"\n plt.figure(figsize=(18, 18)) #in inches\n for i, label in enumerate(labels):\n x, y = low_dim_embs[i,:]\n plt.scatter(x, y)\n plt.annotate(label,\n xy=(x, y),\n xytext=(5, 2),\n textcoords='offset points',\n ha='right',\n va='bottom')\n\nfrom sklearn.manifold import TSNE\n\ntsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\nplot_only = 500\nlow_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])\nlabels = [vocabulary[i] for i in range(plot_only)]\nplot_with_labels(low_dim_embs, labels)", "Machine Translation\nThe basic_rnn_seq2seq() function creates a simple Encoder/Decoder model: it first runs an RNN to encode encoder_inputs into a state vector, then runs a decoder initialized with the last encoder state on decoder_inputs. Encoder and decoder use the same RNN cell type but they don't share parameters.", "import tensorflow as tf\nreset_graph()\n\nn_steps = 50\nn_neurons = 200\nn_layers = 3\nnum_encoder_symbols = 20000\nnum_decoder_symbols = 20000\nembedding_size = 150\nlearning_rate = 0.01\n\nX = tf.placeholder(tf.int32, [None, n_steps]) # English sentences\nY = tf.placeholder(tf.int32, [None, n_steps]) # French translations\nW = tf.placeholder(tf.float32, [None, n_steps - 1, 1])\nY_input = Y[:, :-1]\nY_target = Y[:, 1:]\n\nencoder_inputs = tf.unstack(tf.transpose(X)) # list of 1D tensors\ndecoder_inputs = tf.unstack(tf.transpose(Y_input)) # list of 1D tensors\n\nlstm_cells = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)\n for layer in range(n_layers)]\ncell = tf.contrib.rnn.MultiRNNCell(lstm_cells)\n\noutput_seqs, states = tf.contrib.legacy_seq2seq.embedding_rnn_seq2seq(\n encoder_inputs,\n decoder_inputs,\n cell,\n num_encoder_symbols,\n num_decoder_symbols,\n embedding_size)\n\nlogits = tf.transpose(tf.unstack(output_seqs), perm=[1, 0, 2])\n\nlogits_flat = tf.reshape(logits, [-1, num_decoder_symbols])\nY_target_flat = tf.reshape(Y_target, [-1])\nW_flat = tf.reshape(W, [-1])\nxentropy = W_flat * tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y_target_flat, logits=logits_flat)\nloss = tf.reduce_mean(xentropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(loss)\n\ninit = tf.global_variables_initializer()", "Exercise solutions\nComing soon" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kit-cel/wt
sigNT/systems/anticausal_stable.ipynb
gpl-2.0
[ "Content and Objective\n\nShow that systems with poles outside the unit circle can be stable\nExample showing that by windowing and \"causal-izing\" only negligible loss is observed\n\nImporting and Plotting Options", "import numpy as np\n\nfrom scipy import signal\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\n# plotting options \nfont = {'size' : 20}\nplt.rc('font', **font)\nplt.rc('text', usetex=True)\n\nmatplotlib.rc('figure', figsize=(18, 6) )", "Define and Analyze Anticausal System", "# define length of impulse response and anticausal times\nN = 5 \nn = np.arange( -N, 1, 1)\n\n# define impulse response for negative time indices and apply zero padding to h \na = 2.\nh = a**n\nh = np.append( h, np.zeros( 3 * len(h) ) )\n\n# get parameters in frequency regime out of FFT identities\ndelta_Omega = 2 * np.pi / len(h)\nOmega = np.arange( 0, 2 * np.pi, delta_Omega )\n\n# get frequency response by FFT\nH = np.fft.fft( h ) \n\n# ideal frequency response for the impulse response\nH_ideal = - a / ( np.exp( 1j * Omega ) - a )", "Remarks: \n\nNote that frequency range is not symmetric to zero frequency, but is deliberately defined as being $[0, 2\\Omega)$. So, please get used to different representations and definitions.\n\nPlotting", "plt.subplot(121)\nplt.plot( Omega, np.abs( H ), label='$|H(\\\\Omega)|$')\nplt.plot( Omega, np.abs( H_ideal ), label='$|H_\\\\mathrm{ideal}(\\\\Omega)|$')\nplt.grid( True ) \nplt.xlabel('$\\\\Omega$')\nplt.legend(loc='upper right')\n\nplt.subplot(122)\nplt.plot( Omega, np.angle( H ), label='$\\\\angle H(\\\\Omega)$' )\nplt.plot( Omega, np.angle( H_ideal ), label='$\\\\angle H_\\\\mathrm{ideal}(\\\\Omega)$' )\nplt.grid( True ) \nplt.xlabel('$\\\\Omega$')\nplt.legend(loc='upper right')", "Question: Obviously, magnitudes correspond quite well. Explain the (significantly) different phases." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
besser82/shogun
doc/ipython-notebooks/multiclass/KNN.ipynb
bsd-3-clause
[ "K-Nearest Neighbors (KNN)\nby Chiyuan Zhang and S&ouml;ren Sonnenburg\nThis notebook illustrates the <a href=\"http://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm\">K-Nearest Neighbors</a> (KNN) algorithm on the USPS digit recognition dataset in Shogun. Further, the effect of <a href=\"http://en.wikipedia.org/wiki/Cover_tree\">Cover Trees</a> on speed is illustrated by comparing KNN with and without it. Finally, a comparison with <a href=\"http://en.wikipedia.org/wiki/Support_vector_machine#Multiclass_SVM\">Multiclass Support Vector Machines</a> is shown. \nThe basics\nThe training of a KNN model basically does nothing but memorizing all the training points and the associated labels, which is very cheap in computation but costly in storage. The prediction is implemented by finding the K nearest neighbors of the query point, and voting. Here K is a hyper-parameter for the algorithm. Smaller values for K give the model low bias but high variance; while larger values for K give low variance but high bias.\nIn SHOGUN, you can use KNN to perform KNN learning. To construct a KNN machine, you must choose the hyper-parameter K and a distance function. Usually, we simply use the standard EuclideanDistance, but in general, any subclass of Distance could be used. For demonstration, in this tutorial we select a random subset of 1000 samples from the USPS digit recognition dataset, and run 2-fold cross validation of KNN with varying K.\nFirst we load and init data split:", "import numpy as np\nimport os\nSHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')\n\nfrom scipy.io import loadmat, savemat\nfrom numpy import random\nfrom os import path\n\nmat = loadmat(os.path.join(SHOGUN_DATA_DIR, 'multiclass/usps.mat'))\nXall = mat['data']\nYall = np.array(mat['label'].squeeze(), dtype=np.double)\n\n# map from 1..10 to 0..9, since shogun\n# requires multiclass labels to be\n# 0, 1, ..., K-1\nYall = Yall - 1\n\nrandom.seed(0)\n\nsubset = random.permutation(len(Yall))\n\nXtrain = Xall[:, subset[:5000]]\nYtrain = Yall[subset[:5000]]\n\nXtest = Xall[:, subset[5000:6000]]\nYtest = Yall[subset[5000:6000]]\n\nNsplit = 2\nall_ks = range(1, 21)\n\nprint(Xall.shape)\nprint(Xtrain.shape)\nprint(Xtest.shape)", "Let us plot the first five examples of the train data (first row) and test data (second row).", "%matplotlib inline\nimport pylab as P\ndef plot_example(dat, lab):\n for i in range(5):\n ax=P.subplot(1,5,i+1)\n P.title(int(lab[i]))\n ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')\n ax.set_xticks([])\n ax.set_yticks([])\n \n \n_=P.figure(figsize=(17,6))\nP.gray()\nplot_example(Xtrain, Ytrain)\n\n_=P.figure(figsize=(17,6))\nP.gray()\nplot_example(Xtest, Ytest)", "Then we import shogun components and convert the data to shogun objects:", "import shogun as sg\nfrom shogun import MulticlassLabels, features\nfrom shogun import KNN\n\nlabels = MulticlassLabels(Ytrain)\nfeats = features(Xtrain)\nk=3\ndist = sg.distance('EuclideanDistance')\nknn = KNN(k, dist, labels)\nlabels_test = MulticlassLabels(Ytest)\nfeats_test = features(Xtest)\nknn.train(feats)\npred = knn.apply_multiclass(feats_test)\nprint(\"Predictions\", pred.get_int_labels()[:5])\nprint(\"Ground Truth\", Ytest[:5])\n\nfrom shogun import MulticlassAccuracy\nevaluator = MulticlassAccuracy()\naccuracy = evaluator.evaluate(pred, labels_test)\n\nprint(\"Accuracy = %2.2f%%\" % (100*accuracy))", "Let's plot a few missclassified examples - I guess we all agree that these are notably harder to detect.", "idx=np.where(pred != Ytest)[0]\nXbad=Xtest[:,idx]\nYbad=Ytest[idx]\n_=P.figure(figsize=(17,6))\nP.gray()\nplot_example(Xbad, Ybad)", "Now the question is - is 97.30% accuracy the best we can do? While one would usually re-train KNN with different values for k here and likely perform Cross-validation, we just use a small trick here that saves us lots of computation time: When we have to determine the $K\\geq k$ nearest neighbors we will know the nearest neigbors for all $k=1...K$ and can thus get the predictions for multiple k's in one step:", "knn.put('k', 13)\nmultiple_k=knn.classify_for_multiple_k()\nprint(multiple_k.shape)", "We have the prediction for each of the 13 k's now and can quickly compute the accuracies:", "for k in range(13):\n print(\"Accuracy for k=%d is %2.2f%%\" % (k+1, 100*np.mean(multiple_k[:,k]==Ytest)))", "So k=3 seems to have been the optimal choice.\nAccellerating KNN\nObviously applying KNN is very costly: for each prediction you have to compare the object against all training objects. While the implementation in SHOGUN will use all available CPU cores to parallelize this computation it might still be slow when you have big data sets. In SHOGUN, you can use Cover Trees to speed up the nearest neighbor searching process in KNN. Just call set_use_covertree on the KNN machine to enable or disable this feature. We also show the prediction time comparison with and without Cover Tree in this tutorial. So let's just have a comparison utilizing the data above:", "from shogun import Time, KNN_COVER_TREE, KNN_BRUTE\nstart = Time.get_curtime()\nknn.put('k', 3)\nknn.put('knn_solver', KNN_BRUTE)\npred = knn.apply_multiclass(feats_test)\nprint(\"Standard KNN took %2.1fs\" % (Time.get_curtime() - start))\n\n\nstart = Time.get_curtime()\nknn.put('k', 3)\nknn.put('knn_solver', KNN_COVER_TREE)\npred = knn.apply_multiclass(feats_test)\nprint(\"Covertree KNN took %2.1fs\" % (Time.get_curtime() - start))\n", "So we can significantly speed it up. Let's do a more systematic comparison. For that a helper function is defined to run the evaluation for KNN:", "def evaluate(labels, feats, use_cover_tree=False):\n from shogun import MulticlassAccuracy, CrossValidationSplitting\n import time\n split = CrossValidationSplitting(labels, Nsplit)\n split.build_subsets()\n \n accuracy = np.zeros((Nsplit, len(all_ks)))\n acc_train = np.zeros(accuracy.shape)\n time_test = np.zeros(accuracy.shape)\n for i in range(Nsplit):\n idx_train = split.generate_subset_inverse(i)\n idx_test = split.generate_subset_indices(i)\n\n for j, k in enumerate(all_ks):\n #print \"Round %d for k=%d...\" % (i, k)\n\n feats.add_subset(idx_train)\n labels.add_subset(idx_train)\n\n dist = sg.distance('EuclideanDistance')\n dist.init(feats, feats)\n knn = KNN(k, dist, labels)\n knn.set_store_model_features(True)\n if use_cover_tree:\n knn.put('knn_solver', KNN_COVER_TREE)\n else:\n knn.put('knn_solver', KNN_BRUTE)\n knn.train()\n\n evaluator = MulticlassAccuracy()\n pred = knn.apply_multiclass()\n acc_train[i, j] = evaluator.evaluate(pred, labels)\n\n feats.remove_subset()\n labels.remove_subset()\n feats.add_subset(idx_test)\n labels.add_subset(idx_test)\n\n t_start = time.clock()\n pred = knn.apply_multiclass(feats)\n time_test[i, j] = (time.clock() - t_start) / labels.get_num_labels()\n\n accuracy[i, j] = evaluator.evaluate(pred, labels)\n\n feats.remove_subset()\n labels.remove_subset()\n return {'eout': accuracy, 'ein': acc_train, 'time': time_test}", "Evaluate KNN with and without Cover Tree. This takes a few seconds:", "labels = MulticlassLabels(Ytest)\nfeats = features(Xtest)\nprint(\"Evaluating KNN...\")\nwo_ct = evaluate(labels, feats, use_cover_tree=False)\nwi_ct = evaluate(labels, feats, use_cover_tree=True)\nprint(\"Done!\")", "Generate plots with the data collected in the evaluation:", "import matplotlib\n\nfig = P.figure(figsize=(8,5))\nP.plot(all_ks, wo_ct['eout'].mean(axis=0), 'r-*')\nP.plot(all_ks, wo_ct['ein'].mean(axis=0), 'r--*')\nP.legend([\"Test Accuracy\", \"Training Accuracy\"])\nP.xlabel('K')\nP.ylabel('Accuracy')\nP.title('KNN Accuracy')\nP.tight_layout()\n\nfig = P.figure(figsize=(8,5))\nP.plot(all_ks, wo_ct['time'].mean(axis=0), 'r-*')\nP.plot(all_ks, wi_ct['time'].mean(axis=0), 'b-d')\nP.xlabel(\"K\")\nP.ylabel(\"time\")\nP.title('KNN time')\nP.legend([\"Plain KNN\", \"CoverTree KNN\"], loc='center right')\nP.tight_layout()", "Although simple and elegant, KNN is generally very resource costly. Because all the training samples are to be memorized literally, the memory cost of KNN learning becomes prohibitive when the dataset is huge. Even when the memory is big enough to hold all the data, the prediction will be slow, since the distances between the query point and all the training points need to be computed and ranked. The situation becomes worse if in addition the data samples are all very high-dimensional. Leaving aside computation time issues, k-NN is a very versatile and competitive algorithm. It can be applied to any kind of objects (not just numerical data) - as long as one can design a suitable distance function. In pratice k-NN used with bagging can create improved and more robust results.\nComparison to Multiclass Support Vector Machines\nIn contrast to KNN - multiclass Support Vector Machines (SVMs) attempt to model the decision function separating each class from one another. They compare examples utilizing similarity measures (so called Kernels) instead of distances like KNN does. When applied, they are in Big-O notation computationally as expensive as KNN but involve another (costly) training step. They do not scale very well to cases with a huge number of classes but usually lead to favorable results when applied to small number of classes cases. So for reference let us compare how a standard multiclass SVM performs wrt. KNN on the mnist data set from above.\nLet us first train a multiclass svm using a Gaussian kernel (kind of the SVM equivalent to the euclidean distance).", "from shogun import GMNPSVM\n\nwidth=80\nC=1\n\ngk=sg.kernel(\"GaussianKernel\", log_width=np.log(width))\n\nsvm=GMNPSVM(C, gk, labels)\n_=svm.train(feats)", "Let's apply the SVM to the same test data set to compare results:", "out=svm.apply(feats_test)\nevaluator = MulticlassAccuracy()\naccuracy = evaluator.evaluate(out, labels_test)\n\nprint(\"Accuracy = %2.2f%%\" % (100*accuracy))", "Since the SVM performs way better on this task - let's apply it to all data we did not use in training.", "Xrem=Xall[:,subset[6000:]]\nYrem=Yall[subset[6000:]]\n\nfeats_rem=features(Xrem)\nlabels_rem=MulticlassLabels(Yrem)\nout=svm.apply(feats_rem)\n\nevaluator = MulticlassAccuracy()\naccuracy = evaluator.evaluate(out, labels_rem)\n\nprint(\"Accuracy = %2.2f%%\" % (100*accuracy))\n\nidx=np.where(out.get_labels() != Yrem)[0]\nXbad=Xrem[:,idx]\nYbad=Yrem[idx]\n_=P.figure(figsize=(17,6))\nP.gray()\nplot_example(Xbad, Ybad)", "The misclassified examples are indeed much harder to label even for human beings." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
trangel/Insight-Data-Science
general-docs/nlp-preprocessing/notebook_drafts/NLP-preprocessing-GA.ipynb
gpl-3.0
[ "<h1>Natural Language Preprocessing</h1>\n<br>\n<em><b>Gregory Antell & Emily Halket</b></em>\n<br>\n<em><b>December, 2016</b></em>\nThis notebook provides a brief overview of common steps taken during natural language preprocessing (NLP). The goal is to get you started thinking about how to process your data, not to provide a formal pipeline. Unstructured text analysis is often restricted by computational techniques rather than data collection.\n<p>Preprocessing follows a general series of steps, each requiring decisions that can substantially impact the final output if not considered carefully. For this tutorial, we will be emphasizing how different sources of text require different approaches for preprocessing and modeling. As you approach your own data, think about the implications of each decision on the outcome of your analysis.</p>\n\n<h2>Requirements</h2>\n<p>This tutorial requires several commonly used Python packages for data analysis and Natural Language Processing (NLP):</p>\n<ul>\n<li><b>Pandas: </b>for data structures and analysis in Python\n<li><b>NLTK: </b>Natural Language Toolkit\n<li><b>gensim: </b>for topic modelling\n</ul>", "# import requirements\nimport pandas as pd\nimport nltk\n#import gensim\nimport spacy", "<h2>Data</h2>\n<p>Here we will be exploring two different data sets:</p>\n<ol>\n<li>New York Times op-eds\n<li>Stack Overflow questions and comments\n</ol>\n<p>While the New York Times data set consists of traditional English prose and substantially longer articles, the Stack Overflow data set is vastly different. It contains <b> Finish statement later? Also, this part may want to be moved to a second section where we actually do the comparison </b></p>\n\n<p>In this repository, there is a subset of 100 op-ed articles from the New York Times. We will read these articles into a data frame. We will start off by looking at one article to illustrate the steps of preprocessing, and then we will compare both data sets to illustrate how the process is informed by the nature of the data. </p>", "# New York Times data\n## read subset of data from csv file into panadas dataframe\ndf = pd.read_csv('1_100.csv')\n## for now, chosing one article to illustrate preprocessing\narticle = df['full_text'][939]\n\n# Stack Overflow data\n## ## read subset of data from csv file into panadas dataframe\ndf2 = pd.read_csv('doc_200.csv')\n## for now, chosing one article to illustrate preprocessing\nposting = df2['Document'][1]", "Let's take a peek at the raw text of this article to see what we are dealing with!\nRight off the bat you can see that we have a mixture of uppercase and lowercase words, punctuation, and some character encoding. The Stack Overflow dataset also contains many html tags. These need to be addressed.", "# NY Times\narticle[:500]\n\n# Stack Overflow\nposting[:500]", "<h2>Preprocessing Text</h2>\n\n<p> After looking at our raw text, we know that there are a number of textual attributes that we will need to address before we can ultimately represent our text as quantified features. Using some built in string functions, we can address the character encoding and mixed capitalization.", "print(article[:500].decode('utf-8').lower())\n\nprint(posting[:500].decode('utf-8').lower())", "<h3>1. Tokenization</h3>\n<p>In order to process text, it must be deconstructed into its constituent elements through a process termed <b><em>tokenization</em></b>. Often, the <b><em>tokens</em></b> yielded from this process are individual words in a document. Tokens represent the linguistic units of a document.</p>\n\n<p>A simplistic way to tokenize text relies on white space, such as in <code>nltk.tokenize.WhitespaceTokenizer</code>. Relying on white space, however, does not take <b>punctuation</b> into account, and depending on this some tokens will include punctuation and will require further preprocessing (e.g. 'account,'). Depending on your data, the punctuation may provide meaningful information, so you will want to think about whether it should be preserved or if it can be removed. Tokenization is particularly challenging in the biomedical field, where many phrases contain substantial punctuation (parentheses, hyphens, etc.) and negation detection is critical.</p>\n\n<p>NLTK contains many built-in modules for tokenization, such as <code>nltk.tokenize.WhitespaceTokenizer</code> and <code>nltk.tokenize.RegexpTokenizer</code>.\n\n<p>See also:\n<br>\n<a href=https://www.ibm.com/developerworks/community/blogs/nlp/entry/tokenization?lang=en>The Art of Tokenization</a></p>\n<a href=https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4231086/>Negation's Not Solved: Generalizability Versus Optimizability in Clinical Natural Language Processing</a></p>\n\nExample: Whitespace Tokenization\nHere we apply the Whitespace Tokenizer on the sample article. Notice that we are again decoding characters (such as quotation marks) and using all lowercase characters. Because we used white space as the marker between tokens, we still have punctuation (e.g. 'life.' and '\\u201cif')", "from nltk.tokenize import WhitespaceTokenizer\nws_tokenizer = WhitespaceTokenizer()\n\n# tokenize example document\nnyt_ws_tokens = ws_tokenizer.tokenize(article.decode('utf-8').lower())\n\nprint nyt_ws_tokens[:75]", "Example: Regular Expression Tokenization\nBy applying the regular expression tokenizer we can return a list of word tokens without punctuation.", "from nltk.tokenize import RegexpTokenizer\nre_tokenizer = RegexpTokenizer(r'\\w+')\n\nnyt_re_tokens = re_tokenizer.tokenize(article.decode('utf-8').lower())\n\nprint nyt_re_tokens[:100]", "<h3>2. Stop Words</h3>\n<p>Depending on the application, many words provide little value when building an NLP model. Accordingly, these are termed <b><em>stop words</em></b>. Examples of stop words include pronouns, articles, prepositions and conjunctions, but there are many other words, or non meaningful tokens, that you may wish to remove. For instance, there may be artifacts from the web scraping process that you need to remove. </p>\n<p>Stop words can be determined and handled in many different ways, including:\n<ul>\n<li>Using a list of words determined <em>a priori</em>, either a standard list from the NLTK package or one modified from such a list based on domain knowledge of a particular subject\n<br><br>\n<li>Sorting the terms by <b><em>collection frequency</em></b> (the total number of times each term appears in the document collection), and then to taking the most frequent terms as a stop list based on semantic content.\n<br><br>\n<li>Using no defined stop list at all, and dealing with text data in a purely statistical manner. In general, search engines do not use stop lists.\n</ul>\n\nAs you work with your text, you may decide to iterate on this process. See also: <a href=http://nlp.stanford.edu/IR-book/html/htmledition/dropping-common-terms-stop-words-1.html>Stop Words</a>\n\n#### Example: Stopword Corpus\n\nFor this example, we will use the english stopword corpus from NLTK.", "from nltk.corpus import stopwords\n\n# print the first 5 standard English stop words\nstop_list = [w for w in stopwords.words('english')]\nprint stop_list[:5]\n\n# print the type of the elements in the stop words list\nprint type(stop_list[0])", "Let's remove the stop words and compare to our original list of tokens from our regular expression tokenizer.", "cleaned_tokens = []\nstop_words = set(stopwords.words('english'))\nfor token in nyt_re_tokens:\n if token not in stop_words:\n cleaned_tokens.append(token)\n\nprint 'Number of tokens before removing stop words: %d' % len(nyt_re_tokens)\nprint 'Number of tokens after removing stop words: %d' % len(cleaned_tokens)", "You can see that by removing stop words, we now have less than half the number of tokens as our original list. Taking a peek at the cleaned tokens, we can see that a lot of the information that makes the sentence read like something a human would expect has been lost but the key nouns, verbs, adjectives, and adverbs remain.", "print cleaned_tokens[:50]", "You may notice from looking at this sample, however, that a potentially meaningful word has been removed: 'not'. This stopword corpus includes the words 'no', 'nor', and 'not'and so by removing these words we have removed negation. \n<h3>3. Stemming and Lemmatization</h3>\n\n<b> I think we might want to beef up the explanation here a little bit more. Also, do we want to go into POS tagging? </b>\n<p>The overarching goal of stemming and lemmatization is to reduce differential forms of a word to a common base form. This step will allow you to count occurrences of words in the vectorization step. In deciding how to reduce the differential forms of words, you will want to consider how much information you will need to retain for your application. For instance, in many cases markers of tense and plurality are not informative, and so removing these markers will allow you to reduce the number of features.</p>\n\n<p> <b>Stemming</b> is the process of representing the word as its root word while removing inflection. For example, the stem of the word 'explained' is 'explain'. By passing this word through the stemmer you would remove the tense inflection. There are multiple approaches to stemming: Porter stemming, Porter2 (snowball) stemming, and Lancaster stemming. You can read more in depth about these approaches.</p>", "from nltk.stem.porter import PorterStemmer\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.stem.lancaster import LancasterStemmer\n\nporter = PorterStemmer()\nsnowball = SnowballStemmer('english')\nlancaster = LancasterStemmer()\n\nprint 'Porter Stem of \"explanation\": %s' % porter.stem('explanation')\nprint 'Porter2 (Snowball) Stem of \"explanation\": %s' %snowball.stem('explanation')\nprint 'Lancaster Stem of \"explanation\": %s' %lancaster.stem('explanation')", "While <b><em>stemming</em></b> is a heuristic process that selectively removes the end of words, <b><em>lemmatization</em></b> is a more sophisticated process that takes into account variables such as part-of-speech, meaning, and context within a document or neighboring sentences.</p>", "from nltk.stem.wordnet import WordNetLemmatizer\nlemmatizer = WordNetLemmatizer()\n\nprint lemmatizer.lemmatize('explanation')", "<p>In this example, lemmatization retains a bit more information than stemming. Within stemming, the Lancaster method is more aggressive than Porter and Snowball. Remember that this step allows us to reduce words to a common base form so that we can reduce our feature space and perform counting of occurrences. It will depend on your data and your application as to how much information you need to retain. </p>\n\n<p>See also: <a href=http://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html>Stemming and lemmatization</a></p>\n\nExample: Stemming and Lemmatization\nTo illustrate the difference between stemming and lemmatization, we will apply both methods to our articles.", "stemmed_tokens = []\nlemmatized_tokens = []\n\nfor token in cleaned_tokens:\n stemmed_tokens.append(stemmer.stem(token))\n lemmatized_tokens.append(lemmatizer.lemmatize(token))", "Let's take a look at a sample of our stemmed tokens", "print stemmed_tokens[:50]", "In contrast, here are the same tokens in their lemmatized form", "print lemmatized_tokens[:50]", "<h3>4. Vectorization </h3>\n\n<p> Often in natural language processing we want to represent our text as a quantitative set of features for subsequent analysis. One way to generate features from text is to count the occurrences words. This apporoach is often referred to as a bag of words approach.</p>\n\n<p>In the example of our article, we could represent the article as a vector of counts for each token. If we did the same for all of the other articles, we would have a set of vectors with each vector representing an article. If we had only one article, then we could have split the article into sentences and then represented each sentence as a vector. </p>\n\n<p>If we apply a count vectorizer to our article, we will have a vector with the length of the number of unique tokens. </p>\n\nExample: Count Vectorization of Article\nFor this example we will use the stemmed tokens from our article. We will need to join the tokens together to represent one article.\nCheck out the documentation for CountVectorizer in scikit-learn. You will see that there are a number of parameters that you can specify - including the maximum number of features. Depending on your data, you may choose to restrict the number of features by removing words that appear with least frequency.", "from sklearn.feature_extraction.text import CountVectorizer\nvectorizer = CountVectorizer()\n\nstemmed_article = ' '.join(wd for wd in stemmed_tokens)\n\narticle_vect = vectorizer.fit_transform([stemmed_article])", "Unigrams v. Bigrams v. Ngrams\ntf-idf" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jamesfolberth/NGC_STEM_camp_AWS
notebooks/20Q/playGame/20Qsports_StephensSolution.ipynb
bsd-3-clause
[ "Stephen's implementation of the decision tree 20Q game\nRead in the data, as in the setup script", "import csv\nsports = [] # This is a python \"list\" data structure (it is \"mutable\")\n# The file has a list of sports, one per line.\n# There are spaces in some names, but no commas or weird punctuation\nwith open('../data/SportsDataset_ListOfSports.csv','r') as csvfile:\n myreader = csv.reader(csvfile)\n for index, row in enumerate( myreader ):\n sports.append(' '.join(row) ) # the join() call merges all fields\n# Make a look-up table: if you input the name of the sport, it tells you the index\n# Also, print out a list of all the sports, to make sure it looks OK\nSport2Index = {}\nfor ind, sprt in enumerate( sports ):\n Sport2Index[sprt] = ind\n print('Sport #', ind,'is',sprt)\n# And example usage of the index lookup:\n#print('The sport \"', sports[7],'\" has 0-based index', Sport2Index[sports[7]])\n\n# -- And read in the list of questions --\n# this csv file has only a single row\nquestions = []\nwith open('../data/SportsDataset_ListOfAttributes.csv','r') as csvfile:\n myreader = csv.reader( csvfile )\n for row in myreader:\n questions = row\nQuestion2Index = {}\nfor ind, quest in enumerate( questions ):\n Question2Index[quest] = ind\n #print('Question #', ind,': ',quest)\n# And example usage of the index lookup:\n#print('The question \"', questions[10],'\" has 0-based index', Question2Index[questions[10]])\n\n# -- And read in the training data --\nYesNoDict = { \"Yes\": 1, \"No\": -1, \"Unsure\": 0, \"\": 0 }\n# Load from the csv file.\n# Note: the file only has \"1\"s, because blanks mean \"No\"\n\nX = []\nwith open('../data/SportsDataset_DataAttributes.csv','r') as csvfile:\n myreader = csv.reader(csvfile)\n for row in myreader:\n data = [];\n for col in row:\n data.append( col or \"-1\")\n X.append( list(map(int,data)) ) # integers, not strings\n\n# This data file is listed in the same order as the sports\n# The variable \"y\" contains the index of the sport\ny = range(len(sports)) # this doesn't work\ny = list( map(int,y) ) # Instead, we need to ask python to really enumerate it!", "Actually classify (here, we depart from public script)", "from sklearn import tree\nfrom sklearn.ensemble import RandomForestClassifier\n#clf = tree.DecisionTreeClassifier(max_depth=8,min_samples_leaf=2)\nclf = tree.DecisionTreeClassifier(max_depth=13,min_samples_leaf=1)\nclf.fit(X,y)\n# Try changing the training data, so that we don't get 100% accuracy:\n#X2 = X.copy()\n#X2[15][-1] = -1\n#clf.fit(X2,y)\n\n# -- Visualize the decision tree --\n\nimport graphviz\n\ndot_data = tree.export_graphviz( clf, out_file='sportsTree.dot', feature_names = questions,impurity=False,\n class_names = sports,filled=True, rounded=True,label=None,\n proportion=True)\n# export to out_file = 'sportsTree.dot', then in vim, use `%s/\\\\n\\[.*\\]\\\\n/\\\\n/g` to remove labels\n#graph = graphviz.Source( dot_data )\n#graph.render('sportsTree')\n#graph\n\nfrom IPython.display import Image\nImage(url='sportsTree.png')\n\n# let's see how well we do\n# You can also use clf.score(X,y) \ndef correctPercentage( predictions, actual ):\n correct = 0\n for i,guess in enumerate(predictions):\n if guess == actual[i]:\n correct = correct + 1\n return correct/len(predictions)\n\nclf2 = RandomForestClassifier(max_depth=10,n_estimators=10)\nclf2 = clf2.fit(X,y)\nprint(correctPercentage( clf.predict(X), y ))\nprint(correctPercentage( clf2.predict(X), y ))\nclf.score(X,y)\n\n# cross validate (hard to do, due to small amount of data)\nclf3 = tree.DecisionTreeClassifier(random_state=0,max_depth=8)\nfrom sklearn.model_selection import cross_val_score\ncross_val_score(clf3, X, y)#, cv=2)\nlen(X)", "Now, start the real hacking, so that it let's us interact\nsome info here: http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier\nand also movie list here: https://docs.google.com/spreadsheets/d/1-849aPzi8Su_c5HwwDFERrogXjvSaZFfp_y9MHeO1IA/edit?usp=sharing", "tree_ = clf.tree_\nfrom sklearn.tree import _tree\nimport numpy as np\n#dir(_tree.Tree) # inspect what we have to work with\n#dir(_tree)\n\ndef parseInput(str):\n # first, ignore capitalization\n str=str.lower()\n if str[0] == 'y':\n return 1\n elif str[0] == 'n':\n return -1\n else:\n return 0\n \ndef askQuestion(node=0):\n Q = tree_.feature[node]\n threshold = tree_.threshold[node]\n if Q == _tree.TREE_UNDEFINED or Q == _tree.TREE_LEAF:\n # at a leaf node, so make the prediction\n vals = tree_.value[node][0] # size of all movies\n ind = np.argmax( vals )\n print('GUESS: ', sports[ind] )\n else:\n # ask a question and recurse\n print(questions[Q])\n ans = parseInput(input(\" [Yes/no/unsure] \"))\n if ans <= threshold:\n askQuestion(tree_.children_left[node])\n else:\n askQuestion(tree_.children_right[node])\n\n# or maybe ask for all 13 questions\ndef fullSport():\n x = [0]*len(questions)\n for i,Q in enumerate( questions ):\n print(Q)\n x[i] = parseInput(input(\" [Yes/no/unsure] \"))\n return x\n\n# Play game!\naskQuestion()\n\n# Or get all 13 unique questions on one movie, and try random forests\nx = fullSport()\nprint('PREDICTION (random forests): ', sports[ clf2.predict([x])[0] ] )\nprint('PREDICTION (decision tree ): ', sports[ clf.predict([x])[0] ] )" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
dataDogma/Computer-Science
Databases-Content/Intro_to_DB/Stanford - Introduction_to_database - Getting the database ready.ipynb
gpl-3.0
[ "Using SQL in Jupyter via SqlAlchemy ORM\n\nUsing the ORM( Object Relational Mappers) for various databases used in this notebook, some of them are:\n\n\nSqlite\n\n\nMySQL\n\n\nOracle\n\n\nPostgreSQL\n\n\nMongoDB\n\n\nNeo4j\n\n\n\nSqlite\n\nTable of Contents\n\n\nVersion Check\n\n\nConnecting to Sqlite DB engine\n\n\nDeclaring a Mapping\n\n\nCreating a Schema\n\n\nVersion Check\n\nA quick check to verify the version we are going to use.", "# importing the sqlalechemy ORM( object realtional mapper )\nimport sqlalchemy", "Connecting\n\nWe will use an in-memory-only SQLite database. To connect we use create_engine():\nSome miniscules:\n\n\nThe return value of create_engine() is an instance of Engine,\n\n\nrepresents the core interface to the database,\n\n\nadapted through a dialect that handles the details of the database and DBAPI in use.\n\n\nIn this case the SQLite dialect will interpret instructions to the Python built-in sqlite3 module.\n\n\n\n\nThe first time a method like Engine.execute() or Engine.connect() is called, the Engine establishes a real DBAPI connection to the database, which is then used to emit the SQL\n\n\nNote: When using the ORM, we typically don’t use the Engine directly once created; instead, it’s used behind the scenes by the ORM as we’ll see shortly.", "# import create_engine function for connecting to the ORM engine\nfrom sqlalchemy import create_engine\n\nengine = create_engine('sqlite:///:memory:', echo=False) # set echo to false for less output", "Declaring a Mapping\n\nWhen using the ORM, the configurational process starts by,\n\n\ndescribing the database tables we’ll be dealing with.\n\n\nthen by defining our own classes which will be mapped to those tables.\n\n\nIn modern SQLAlchemy, these two tasks are usually performed together, using a system known as Declarative, which allows us to create classes that include directives to describe the actual database table they will be mapped to.\n\nClasses mapped using the Declarative system, are defined in terms of a base class which,\n\n\nMaintains a \"catalog\" of classes and tables relative to that base,\n\nknown as Declarative base class.\n\n\n\nOur demo application will usually have just one instance of this base in a commonly imported module. We create the base class using the declarative_base() function, as follows:", "from sqlalchemy.ext.declarative import declarative_base\n\n# Initialize Base\nBase = declarative_base()", "Now that we have a 'Base', we can define any number of mapped classes in terms of it.\nLet's start with a single \"Table\" called - \"User\", which will store records for the end-users using our application.\n\n\nA new class called User will be the class to which we map this table.\n\n\nWithin the class, we define details about the table to which we’ll be mapping,\n\nprimarily the table name, and names and datatypes of columns:\n\n\nGO to top: TOC", "from sqlalchemy import Column, Integer, String\n\nclass User(Base):\n \n\"\"\"\n+ Class needs min of one attribute, here it's __tablename__.\n\n+ And a min of one \"Coloumn\" which is a part of a \"primary key\".\n\n\"\"\" \n __tablename__ = 'users' \n\n id = Column(Integer, primary_key=True) # and \n name = Column(String)\n fullname = Column(String)\n password = Column(String)\n\n def __repr__(self):\n return \"<User(name='%s', fullname='%s', password='%s')>\" % (\n self.name, self.fullname, self.password)", "Note:\n\n\nSQLAlchemy never makes any assumptions by itself about the table to which a class refers,\n\n\nIncluding that it has no built-in conventions for,\n\n\nnames,\n\n\ndatatypes,\n\n\nor constraints\n\n\n\n\nWhen our class is constructed, two things happen:\n\n\nDeclarative replaces all the Column objects with special Python accessors called descriptors.\n\nThis process is called -- \"Instrumentation\".\n\n\n\nThe “instrumented” mapped class will provide us,\n\n\nmeans to refer to our table in a SQL context,\n\n\nAs well as to persist and load the values of columns from the database.\n\n\n\n\n\n\nOutside of what the mapping process does to our class, the class remains otherwise mostly a normal Python class, to which we can define any number of ordinary attributes and methods needed by our application.\nCreating a Schema\n\nWith our User class constructed, via the Declarative system, let's define information about our table, known as Table metadata. The object used by SQLAlchemy to represent this information for a specific table is called the Table object, and here Declarative has made one for us.\nLet's inspect what our object User using the __table__ attribute.", "User.__table__" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tanle8/Data-Science
1-uIDS-courseNotes/l3-dataAnalysis.ipynb
mit
[ "Lesson 3: Data Analysis\nStatistics\nTerminology\n\nSignificant level\n In statistical hypothesis testing, a result has statistical significance when it is very unlikely to have occurred given the null hypothesis.[3] More precisely, the significance level defined for a study, α, is the probability of the study rejecting the null hypothesis, given that it were true; and the p-value of a result, p, is the probability of obtaining a result at least as extreme, given that the null hypothesis were true. The result is statistically significant, by the standards of the study, when p < α.\n Link to wikipedia article\nNormal Distribution", "# Kurt's Introduction\nfrom IPython.display import HTML\nHTML('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/umJQ6gVT8kY\" frameborder=\"0\" allowfullscreen></iframe>')\n\n# Why is Statistics Useful?\nfrom IPython.display import HTML\nHTML('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/DyeRm96wH5M\" frameborder=\"0\" allowfullscreen></iframe>')\n\n# Introduction to Normal (Gauss Distribution)\nfrom IPython.display import HTML\nHTML ('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/ZfOTcwXAdEw\" frameborder=\"0\" allowfullscreen></iframe>')", "The equation for the normal distribution is:\n$$f(x) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}}.e^{\\frac{-(x - \\mu)^2}{2\\sigma^2}}$$\nT-Test\nTo be more explicit:\n- It is important to note that you cannot \"accept\" a null.\n- You can just \"retain\" or \"fail to reject\".\nIf you would like to learn more about the t-test, check out this lesson in Intro to Inferential Statistics.\nWelch's T-Test In Python\nYou can check out additional information about the scipy implementation of the t-test below:\nhttp://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html", "# t-Test video\nfrom IPython.display import HTML\nHTML('<iframe width=\"369\" height=\"208\" src=\"https://www.youtube.com/embed/tjSj2OkV51A\" frameborder=\"0\" allowfullscreen></iframe>')\n\n# Welch's Two-Sample t-Test\nfrom IPython.display import HTML\nHTML('<iframe width=\"369\" height=\"208\" src=\"https://www.youtube.com/embed/B_1cnwYn7so\" frameborder=\"0\" allowfullscreen></iframe>')", "14. Quiz - Welch's t-Test Exercise\nPerforms a t-test on two sets of baseball data (left-handed and right-handed hitters).\nYou will be given a csv file that has three columns.\nA player's name, handedness (L for lefthanded or R for righthanded) and their\ncareer batting average (called 'avg').\nYou can look at the csv file by downloading the baseball_stats file from Downloadables below. \nWrite a function that will:\n- read that the csv file into a pandas data frame,and\n- Run Welch's t-test on the two cohorts defined by handedness.\n - One cohort should be a data frame of right-handed batters. And \n - the other cohort should be a data frame of left-handed batters.\nWe have included the scipy.stats library to help you write\nor implement Welch's t-test:\nhttp://docs.scipy.org/doc/scipy/reference/stats.html\nWith a significance level of 95%, if there is no difference\nbetween the two cohorts, return a tuple consisting of\nTrue, and then the tuple returned by scipy.stats.ttest. \nIf there is a difference, return a tuple consisting of\nFalse, and then the tuple returned by scipy.stats.ttest.\nFor example, the tuple that you return may look like:\n(True, (9.93570222, 0.000023))\nSupporting materials\nbaseball_stats.csv", "import numpy\nimport scipy.stats\nimport pandas\n\ndef compare_averages(filename):\n \"\"\"\n The description for this quiz is above text.\n \"\"\"\n baseball_data = pandas.read_csv(filename)\n lh_player = baseball_data.loc[baseball_data['handedness'] == 'L', 'avg']\n rh_player = baseball_data.loc[baseball_data['handedness'] == 'R', 'avg']\n \n # Welch's t-test\n (t, p) = scipy.stats.ttest_ind(lh_player, rh_player, equal_var=False)\n \n # Welch's t-test results.\n result = (p > 0.05, (t, p))\n \n return result\n ", "Your calculated t-statistic is 9.93570222624\nThe correct t-statistic is +/-9.93570222624", "# Exaplaination for Welch's t-Test exercise\nfrom IPython.display import HTML\nHTML('<iframe width=\"550\" height=\"309\" src=\"https://www.youtube.com/embed/TrSU-GH7TDY\" frameborder=\"0\" allowfullscreen></iframe>')", "Non-normal Data\nWhen performing the t-Test, we assume that our data is normal.\nIn the wild, you'll often encounter probability distributions.\nThey're distinctly not normal. They might look like two diagrams below or even completely different.\n\nAs you imagine, there are still statistical tests that we can utilize when our data is not normal.\nFirst of, we should have some machinery in place for determining whether or not our data is Gaussian in the first place. A crude, inaccurate way of determining whether or not our data is normal is simply to plot a histogram of our data ans ask, does this look like a bell curve? In both of the cases above, the answer would definitely be no. But, we can do little bit better than that. There are some statistical tests that we can use to measure the likelihood that a sample is drawn from a normally distributed population. One such test is the Shapiro-Wilk test. The theory of this test is out of this course's scope. But you can implement this test easyly like this:\nPython\n(W, p) = scipy.stats.shapiro(data)\n- with W is the Shapiro-Wilk test statistic, \n- p value, which should be interpreted the same way as we would interpret the p-value for our t-test.\nThat is, given null hypothesis that this data is drawn from a normal distribution, what is the likelihood that we would observe a value of W that was at least as extreme as the one that we see?\nNon-Parametric Test\nA statistical test that does not assume our data is drawn from any particular underlying probability distribution.\nMann-Whitney U test is a test that tests null hypothesis that two populations are the same:\nPython\n(U, P) = scipy.stats.mannwhitneyu(x, y)\n- x and y are two samples.\nNote\nThese have just been some of the methods that we can use when performing statistical tests on data. As you can imagine, there are a number of additional ways to handle data from different probability distributions or data that looks like it came from no probability distribution whatsoever.\nData scientist can perform many statistical procedures. But it's vital to understand the underlying structure of the data set and consequently, which statistical tests are appropriate given the data that we have. \nThere are many different types of statistical tests and even many different schools of thought within statistics regarding the correct way to analyze data. This has really just been an opportunity to get your feet wet with statistical analysis. It's just the tip of the iceberg.\n2. What is Machine Learning?\nIn addition to statistics, many data scientists are well versed in machine learning.\nMachine Learning is a branch of artificial intelligence that's focused on constructing systems that learn from large amounts of data to make predictions.\nThese are all the potential applications of machine learning.", "# Why is Machine Learning Useful?\n\nfrom IPython.display import HTML\nHTML('<iframe width=\"846\" height=\"476\" src=\"https://www.youtube.com/embed/uKEm9_HvkKQ\" frameborder=\"0\" allowfullscreen></iframe>')", "Statistics vs. Machine Learning\nWhat is the difference between statistics and machine learning", "# Kurt's Favorite ML Algorithm\nfrom IPython.display import HTML\nHTML('<iframe width=\"798\" height=\"449\" src=\"https://www.youtube.com/embed/qwUYjU_kmdc\" frameborder=\"0\" allowfullscreen></iframe>')", "Different Types of Learning\nPrediction with Regression\nLinear Regression with Gradient\nCost Function\nHow to minimize the cost function\nGradient Descent in Python", "# Gradient Descent in Python\nimport numpy\nimport pandas\n\ndef compute_cost(features, values, theta):\n \"\"\"\n Compute the cost of a list of parameters - theta, given a list of features \n (input data points) and values (output data points).\n \"\"\"\n m = len(values)\n sum_of_square_errors = numpy.square(numpy.dot(features, theta) - values).sum()\n cost = sum_of_square_errors / (2*m)\n\n return cost\n\ndef gradient_descent(features, values, theta, alpha, num_iterations):\n \"\"\"\n Perform gradient descent given a data set with an arbitrary number of features.\n \"\"\"\n\n # Write code here that performs num_iterations updates to the elements of theta.\n # times. Every time you compute the cost for a given list of thetas, append it \n # to cost_history.\n # See the Instructor notes for hints. \n \n cost_history = []\n m = len(values) \n ###########################\n ### YOUR CODE GOES HERE ###\n ###########################\n for iteration in range(num_iterations):\n # Append new cost of given list of theta to cost_history\n cost_history.append(compute_cost(features, values, theta))\n # compute gradient descent\n diff = numpy.dot(features.transpose(), values - numpy.dot(features, theta))\n theta = theta + (alpha/m)*diff\n \n return theta, pandas.Series(cost_history) # leave this line for the grader\n\n\n\nTheta =\n[ 45.35759233 -9.02442042 13.69229668]\n\nCost History = \n0 3769.194036\n1 3748.133469\n2 3727.492258\n3 3707.261946\n4 3687.434249\n5 3668.001052\n6 3648.954405\n7 3630.286519\n8 3611.989767\n9 3594.056675\n10 3576.479921\n11 3559.252334\n12 3542.366888\n13 3525.816700\n14 3509.595027\n15 3493.695263\n16 3478.110938\n17 3462.835711\n18 3447.863371\n19 3433.187834\n20 3418.803138\n21 3404.703444\n22 3390.883030\n23 3377.336290\n24 3364.057733\n25 3351.041978\n26 3338.283754\n27 3325.777897\n28 3313.519347\n29 3301.503147\n ... \n970 2686.739779\n971 2686.739192\n972 2686.738609\n973 2686.738029\n974 2686.737453\n975 2686.736881\n976 2686.736312\n977 2686.735747\n978 2686.735186\n979 2686.734628\n980 2686.734074\n981 2686.733523\n982 2686.732975\n983 2686.732431\n984 2686.731891\n985 2686.731354\n986 2686.730820\n987 2686.730290\n988 2686.729764\n989 2686.729240\n990 2686.728720\n991 2686.728203\n992 2686.727690\n993 2686.727179\n994 2686.726672\n995 2686.726168\n996 2686.725668\n997 2686.725170\n998 2686.724676\n999 2686.724185\ndtype: float64", "Coefficients Of Determination\nWe need some ways to evaluate the effectiveness of our models. \nOne way we can measure this is a quantity called the coefficient of determination also referred to as $R^2$.\nWe can define the coefficients of determination ($R^2$).\n$$R^2 = 1 - \\frac{\\sum_n(y_i - f_i)^2}{\\sum_n(y_i - \\bar{y})^2}$$\n\nNote:\ndata: $y_i ... y_n$\npredictions: $f_i ... f_n$\n\naverage of data: $\\bar{y}$\n\n\nThe closer $R^2$ to 1, the better our models.\n\nThe closer $R^2$ to 0, the poorer our models.\n\nQuiz: Calculating R^2\n```Python\nimport numpy as np\ndef compute_r_squared(data, predictions):\n # Write a function that, given two input numpy arrays, 'data', and 'predictions,'\n # returns the coefficient of determination, R^2, for the model that produced \n # predictions.\n # \n # Numpy has a couple of functions -- np.mean() and np.sum() --\n # that you might find useful, but you don't have to use them.\n# YOUR CODE GOES HERE\nr_squared = 1 - sum((predictions - data)**2) / sum((data - np.mean(data))**2)\n\nreturn r_squared\n\n```\nOther Considerations\n\nOther types of linear regression\nOrdinary lest squares regression\nParameter estimation\nUnder/Overfitting\nMultiple local minima\n\nKurt's Advice For ML Best Practices\nQualitative viewpoint:\nAny problem you're looking at, it's always very valuable to start thinking about:\n- What sort of things do we know?\n- What sort of expectations do we have?\n- What sort of qualitative things can we get from an exploratory analysis of the data?\nSo, using k-Means clustering and PCA are a good start to do some sort of dimentionality reduction, some ways of geting the data to the point where you can look and ger some qualitative insights.\nUnderstand the general structure of it, you can start to see patterns, emerging data that make sense, or either confirm or possibly go against other theories or in grained beliefs that people have.\n-> Getting data down to that point is very importatnt.\nQuantittative viewpoint:\nTrying to understand causal connections like which features are actually causing it. It's important to use a lot of caution around that and never just sort of dump a bunch of data into a model with lots of features and then jsut naively look at the thing that have the strongest weights in your model and here's what driving it.\nTips for aspiring Data Scientists\nThere're 3 areas or part of this kind of work and you should think about which parts do you really enjoy the most.\n\nFor some people, it's the process of building things, of writing code, ...\nIf you really enjoy the analysis part, the statistical and mathematical side of things, there's a lot more you can there in terms of coming up to speed with new machine learning techniques, learning statistics more in depth.\nOn the communications and strategies side there's obviously a lot that you can do to improve your communication skills, undestand how to abstract from the details, how to abstract our the high level issues that are important to a company.\n\nNote: A quick overview on dimensionality reduction and PCA (principal component analysis):\nhttp://georgemdallas.wordpress.com/2013/10/30/principal-component-analysis-4-dummies-eigenvectors-eigenvalues-and-dimension-reduction/", "from IPython.display import HTML\nHTML('<iframe width=\"798\" height=\"449\" src=\"https://www.youtube.com/embed/zS9SmHPVjJs\" frameborder=\"0\" allowfullscreen></iframe>')\n\nfrom IPython.display import HTML\nHTML('<iframe width=\"798\" height=\"449\" src=\"https://www.youtube.com/embed/wuUQl3o_hVI\" frameborder=\"0\" allowfullscreen></iframe>')\n\n# Assignment 3\n\nfrom IPython.display import HTML\nHTML('<iframe width=\"798\" height=\"449\" src=\"https://www.youtube.com/embed/OWGuZuBxS8E\" frameborder=\"0\" allowfullscreen></iframe>')\n\n# Lesson 3 Recap\n\nfrom IPython.display import HTML\nHTML('<iframe width=\"798\" height=\"449\" src=\"https://www.youtube.com/embed/u1Sh-BjiFfM\" frameborder=\"0\" allowfullscreen></iframe>')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ray-project/ray
doc/source/ray-core/examples/plot_parameter_server.ipynb
apache-2.0
[ "Parameter Server\n{tip}\nFor a production-grade implementation of distributed\ntraining, use [Ray Train](https://docs.ray.io/en/master/train/train.html).\nThe parameter server is a framework for distributed machine learning training.\nIn the parameter server framework, a centralized server (or group of server\nnodes) maintains global shared parameters of a machine-learning model\n(e.g., a neural network) while the data and computation of calculating\nupdates (i.e., gradient descent updates) are distributed over worker nodes.\n{image} /ray-core/images/param_actor.png\n:align: center\nParameter servers are a core part of many machine learning applications. This\ndocument walks through how to implement simple synchronous and asynchronous\nparameter servers using Ray actors.\nTo run the application, first install some dependencies.\nbash\npip install torch torchvision filelock\nLet's first define some helper functions and import some dependencies.", "import os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom filelock import FileLock\nimport numpy as np\n\nimport ray\n\n\ndef get_data_loader():\n \"\"\"Safely downloads data. Returns training/validation set dataloader.\"\"\"\n mnist_transforms = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n )\n\n # We add FileLock here because multiple workers will want to\n # download data, and this may cause overwrites since\n # DataLoader is not threadsafe.\n with FileLock(os.path.expanduser(\"~/data.lock\")):\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"~/data\", train=True, download=True, transform=mnist_transforms\n ),\n batch_size=128,\n shuffle=True,\n )\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\"~/data\", train=False, transform=mnist_transforms),\n batch_size=128,\n shuffle=True,\n )\n return train_loader, test_loader\n\n\ndef evaluate(model, test_loader):\n \"\"\"Evaluates the accuracy of the model on a validation dataset.\"\"\"\n model.eval()\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(test_loader):\n # This is only set to finish evaluation faster.\n if batch_idx * len(data) > 1024:\n break\n outputs = model(data)\n _, predicted = torch.max(outputs.data, 1)\n total += target.size(0)\n correct += (predicted == target).sum().item()\n return 100.0 * correct / total", "Setup: Defining the Neural Network\nWe define a small neural network to use in training. We provide\nsome helper functions for obtaining data, including getter/setter\nmethods for gradients and weights.", "class ConvNet(nn.Module):\n \"\"\"Small ConvNet for MNIST.\"\"\"\n\n def __init__(self):\n super(ConvNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 3, kernel_size=3)\n self.fc = nn.Linear(192, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 3))\n x = x.view(-1, 192)\n x = self.fc(x)\n return F.log_softmax(x, dim=1)\n\n def get_weights(self):\n return {k: v.cpu() for k, v in self.state_dict().items()}\n\n def set_weights(self, weights):\n self.load_state_dict(weights)\n\n def get_gradients(self):\n grads = []\n for p in self.parameters():\n grad = None if p.grad is None else p.grad.data.cpu().numpy()\n grads.append(grad)\n return grads\n\n def set_gradients(self, gradients):\n for g, p in zip(gradients, self.parameters()):\n if g is not None:\n p.grad = torch.from_numpy(g)", "Defining the Parameter Server\nThe parameter server will hold a copy of the model.\nDuring training, it will:\n\n\nReceive gradients and apply them to its model.\n\n\nSend the updated model back to the workers.\n\n\nThe @ray.remote decorator defines a remote process. It wraps the\nParameterServer class and allows users to instantiate it as a\nremote actor.", "@ray.remote\nclass ParameterServer(object):\n def __init__(self, lr):\n self.model = ConvNet()\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr)\n\n def apply_gradients(self, *gradients):\n summed_gradients = [\n np.stack(gradient_zip).sum(axis=0) for gradient_zip in zip(*gradients)\n ]\n self.optimizer.zero_grad()\n self.model.set_gradients(summed_gradients)\n self.optimizer.step()\n return self.model.get_weights()\n\n def get_weights(self):\n return self.model.get_weights()", "Defining the Worker\nThe worker will also hold a copy of the model. During training. it will\ncontinuously evaluate data and send gradients\nto the parameter server. The worker will synchronize its model with the\nParameter Server model weights.", "@ray.remote\nclass DataWorker(object):\n def __init__(self):\n self.model = ConvNet()\n self.data_iterator = iter(get_data_loader()[0])\n\n def compute_gradients(self, weights):\n self.model.set_weights(weights)\n try:\n data, target = next(self.data_iterator)\n except StopIteration: # When the epoch ends, start a new epoch.\n self.data_iterator = iter(get_data_loader()[0])\n data, target = next(self.data_iterator)\n self.model.zero_grad()\n output = self.model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n return self.model.get_gradients()", "Synchronous Parameter Server Training\nWe'll now create a synchronous parameter server training scheme. We'll first\ninstantiate a process for the parameter server, along with multiple\nworkers.", "iterations = 200\nnum_workers = 2\n\nray.init(ignore_reinit_error=True)\nps = ParameterServer.remote(1e-2)\nworkers = [DataWorker.remote() for i in range(num_workers)]", "We'll also instantiate a model on the driver process to evaluate the test\naccuracy during training.", "model = ConvNet()\ntest_loader = get_data_loader()[1]", "Training alternates between:\n\nComputing the gradients given the current weights from the server\nUpdating the parameter server's weights with the gradients.", "print(\"Running synchronous parameter server training.\")\ncurrent_weights = ps.get_weights.remote()\nfor i in range(iterations):\n gradients = [worker.compute_gradients.remote(current_weights) for worker in workers]\n # Calculate update after all gradients are available.\n current_weights = ps.apply_gradients.remote(*gradients)\n\n if i % 10 == 0:\n # Evaluate the current model.\n model.set_weights(ray.get(current_weights))\n accuracy = evaluate(model, test_loader)\n print(\"Iter {}: \\taccuracy is {:.1f}\".format(i, accuracy))\n\nprint(\"Final accuracy is {:.1f}.\".format(accuracy))\n# Clean up Ray resources and processes before the next example.\nray.shutdown()", "Asynchronous Parameter Server Training\nWe'll now create a synchronous parameter server training scheme. We'll first\ninstantiate a process for the parameter server, along with multiple\nworkers.", "print(\"Running Asynchronous Parameter Server Training.\")\n\nray.init(ignore_reinit_error=True)\nps = ParameterServer.remote(1e-2)\nworkers = [DataWorker.remote() for i in range(num_workers)]", "Here, workers will asynchronously compute the gradients given its\ncurrent weights and send these gradients to the parameter server as\nsoon as they are ready. When the Parameter server finishes applying the\nnew gradient, the server will send back a copy of the current weights to the\nworker. The worker will then update the weights and repeat.", "current_weights = ps.get_weights.remote()\n\ngradients = {}\nfor worker in workers:\n gradients[worker.compute_gradients.remote(current_weights)] = worker\n\nfor i in range(iterations * num_workers):\n ready_gradient_list, _ = ray.wait(list(gradients))\n ready_gradient_id = ready_gradient_list[0]\n worker = gradients.pop(ready_gradient_id)\n\n # Compute and apply gradients.\n current_weights = ps.apply_gradients.remote(*[ready_gradient_id])\n gradients[worker.compute_gradients.remote(current_weights)] = worker\n\n if i % 10 == 0:\n # Evaluate the current model after every 10 updates.\n model.set_weights(ray.get(current_weights))\n accuracy = evaluate(model, test_loader)\n print(\"Iter {}: \\taccuracy is {:.1f}\".format(i, accuracy))\n\nprint(\"Final accuracy is {:.1f}.\".format(accuracy))", "Final Thoughts\nThis approach is powerful because it enables you to implement a parameter\nserver with a few lines of code as part of a Python application.\nAs a result, this simplifies the deployment of applications that use\nparameter servers and to modify the behavior of the parameter server.\nFor example, sharding the parameter server, changing the update rule,\nswitching between asynchronous and synchronous updates, ignoring\nstraggler workers, or any number of other customizations,\nwill only require a few extra lines of code." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mohanprasath/Course-Work
numpy/numpy_exercises_from_kyubyong/Statistics_solutions.ipynb
gpl-3.0
[ "Statistics", "__author__ = \"kyubyong. kbpark.linguist@gmail.com\"\n\nimport numpy as np\n\nnp.__version__", "Order statistics\nQ1. Return the minimum value of x along the second axis.", "x = np.arange(4).reshape((2, 2))\nprint(\"x=\\n\", x)\nprint(\"ans=\\n\", np.amin(x, 1))", "Q2. Return the maximum value of x along the second axis. Reduce the second axis to the dimension with size one.", "x = np.arange(4).reshape((2, 2))\nprint(\"x=\\n\", x)\nprint(\"ans=\\n\", np.amax(x, 1, keepdims=True))", "Q3. Calcuate the difference between the maximum and the minimum of x along the second axis.", "x = np.arange(10).reshape((2, 5))\nprint(\"x=\\n\", x)\n\nout1 = np.ptp(x, 1)\nout2 = np.amax(x, 1) - np.amin(x, 1)\nassert np.allclose(out1, out2)\nprint(\"ans=\\n\", out1)\n", "Q4. Compute the 75th percentile of x along the second axis.", "x = np.arange(1, 11).reshape((2, 5))\nprint(\"x=\\n\", x)\n\nprint(\"ans=\\n\", np.percentile(x, 75, 1))", "Averages and variances\nQ5. Compute the median of flattened x.", "x = np.arange(1, 10).reshape((3, 3))\nprint(\"x=\\n\", x)\n\nprint(\"ans=\\n\", np.median(x))", "Q6. Compute the weighted average of x.", "x = np.arange(5)\nweights = np.arange(1, 6)\n\nout1 = np.average(x, weights=weights)\nout2 = (x*(weights/weights.sum())).sum()\nassert np.allclose(out1, out2)\nprint(out1)", "Q7. Compute the mean, standard deviation, and variance of x along the second axis.", "x = np.arange(5)\nprint(\"x=\\n\",x)\n\nout1 = np.mean(x)\nout2 = np.average(x)\nassert np.allclose(out1, out2)\nprint(\"mean=\\n\", out1)\n\nout3 = np.std(x)\nout4 = np.sqrt(np.mean((x - np.mean(x)) ** 2 ))\nassert np.allclose(out3, out4)\nprint(\"std=\\n\", out3)\n\nout5 = np.var(x)\nout6 = np.mean((x - np.mean(x)) ** 2 )\nassert np.allclose(out5, out6)\nprint(\"variance=\\n\", out5)\n", "Correlating\nQ8. Compute the covariance matrix of x and y.", "x = np.array([0, 1, 2])\ny = np.array([2, 1, 0])\n\nprint(\"ans=\\n\", np.cov(x, y))", "Q9. In the above covariance matrix, what does the -1 mean?\nIt means x and y correlate perfectly in opposite directions.\nQ10. Compute Pearson product-moment correlation coefficients of x and y.", "x = np.array([0, 1, 3])\ny = np.array([2, 4, 5])\n\nprint(\"ans=\\n\", np.corrcoef(x, y))", "Q11. Compute cross-correlation of x and y.", "x = np.array([0, 1, 3])\ny = np.array([2, 4, 5])\n\nprint(\"ans=\\n\", np.correlate(x, y))", "Histograms\nQ12. Compute the histogram of x against the bins.", "x = np.array([0.5, 0.7, 1.0, 1.2, 1.3, 2.1])\nbins = np.array([0, 1, 2, 3])\nprint(\"ans=\\n\", np.histogram(x, bins))\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.hist(x, bins=bins)\nplt.show()", "Q13. Compute the 2d histogram of x and y.", "xedges = [0, 1, 2, 3]\nyedges = [0, 1, 2, 3, 4]\nx = np.array([0, 0.1, 0.2, 1., 1.1, 2., 2.1])\ny = np.array([0, 0.1, 0.2, 1., 1.1, 2., 3.3])\nH, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\nprint(\"ans=\\n\", H)\n\nplt.scatter(x, y)\nplt.grid()", "Q14. Count number of occurrences of 0 through 7 in x.", "x = np.array([0, 1, 1, 3, 2, 1, 7])\nprint(\"ans=\\n\", np.bincount(x))", "Q15. Return the indices of the bins to which each value in x belongs.", "x = np.array([0.2, 6.4, 3.0, 1.6])\nbins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])\n\nprint(\"ans=\\n\", np.digitize(x, bins))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gfeiden/Notebook
Projects/upper_sco_age/isochrone_comparison.ipynb
mit
[ "Mass Track & Isochrone Comparisons\nComparing standard and magnetic isochrones at 10 Myr.", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "Load each of the isochrones.", "std = np.genfromtxt('/Users/grefe950/evolve/dmestar/iso/gs98/p000/a0/amlt1884/dmestar_00010.0myr_z+0.00_a+0.00_phx.iso')\nmag = np.genfromtxt('/Users/grefe950/evolve/data/beq/models/GS98/dmestar_00010.0myr_z+0.00_a+0.00_phx_magBeq.iso')", "Load a few mass tracks of each variety. First, standard (i.e., non-magnetic) mass tracks.", "std_m0900 = np.genfromtxt('/Users/grefe950/evolve/dmestar/trk/gs98/p000/a0/amlt1884/m0900_GS98_p000_p0_y28_mlt1.884.trk',\n usecols=(0, 1, 2, 3, 4))\nstd_m1200 = np.genfromtxt('/Users/grefe950/evolve/dmestar/trk/gs98/p000/a0/amlt1884/m1200_GS98_p000_p0_y28_mlt1.884.trk',\n usecols=(0, 1, 2, 3, 4))\nstd_m1500 = np.genfromtxt('/Users/grefe950/evolve/dmestar/trk/gs98/p000/a0/amlt1884/m1500_GS98_p000_p0_y28_mlt1.884.trk',\n usecols=(0, 1, 2, 3, 4))\nstd_m1700 = np.genfromtxt('/Users/grefe950/evolve/dmestar/trk/gs98/p000/a0/amlt1884/m1700_GS98_p000_p0_y28_mlt1.884.trk',\n usecols=(0, 1, 2, 3, 4))\n\nmag_m0900 = np.genfromtxt('/Users/grefe950/evolve/data/beq/models/GS98/m0900_GS98_p000_p0_y28_mlt1.884_mag23kG.trk',\n usecols=(0, 1, 2, 3, 4))\nmag_m1200 = np.genfromtxt('/Users/grefe950/evolve/data/beq/models/GS98/m1200_GS98_p000_p0_y28_mlt1.884_mag18kG.trk',\n usecols=(0, 1, 2, 3, 4))\nmag_m1500 = np.genfromtxt('/Users/grefe950/evolve/data/beq/models/GS98/m1500_GS98_p000_p0_y28_mlt1.884_mag13kG.trk',\n usecols=(0, 1, 2, 3, 4))\nmag_m1700 = np.genfromtxt('/Users/grefe950/evolve/data/beq/models/GS98/m1700_GS98_p000_p0_y28_mlt1.884_mag08kG.trk',\n usecols=(0, 1, 2, 3, 4))", "Now we need to place each onto a fixed grid so that we can analyze the relative change of the magnetic track to the standard track.", "from scipy.interpolate import interp1d\nlog10_ages = np.arange(6.0, 8.01, 0.01)\n\ndef setEqualAgeGrid(iso):\n icurve = interp1d(iso[:,0], iso, axis=0, kind='linear')\n return icurve(10**np.arange(6.0, 8.01, 0.01))\n\n# Compare relative radii as a function of age\nfig, ax = plt.subplots(3, 1, figsize=(6.0, 12.0))\n\nages = 10**log10_ages\n\nstd_trk = setEqualAgeGrid(std_m0900)\nmag_trk = setEqualAgeGrid(mag_m0900)\nax[0].semilogx(ages, 10**mag_trk[:,4]/10**std_trk[:,4] - 1.0, '-', lw=3)\nax[1].semilogx(ages, 10**mag_trk[:,1]/10**std_trk[:,1] - 1.0, '-', lw=3)\nax[2].semilogx(ages, 10**mag_trk[:,3]/10**std_trk[:,3] - 1.0, '-', lw=3)\n\nstd_trk = setEqualAgeGrid(std_m1200)\nmag_trk = setEqualAgeGrid(mag_m1200)\nax[0].semilogx(ages, 10**mag_trk[:,4]/10**std_trk[:,4] - 1.0, '--', lw=3)\nax[1].semilogx(ages, 10**mag_trk[:,1]/10**std_trk[:,1] - 1.0, '--', lw=3)\nax[2].semilogx(ages, 10**mag_trk[:,3]/10**std_trk[:,3] - 1.0, '--', lw=3)\n\nstd_trk = setEqualAgeGrid(std_m1500)\nmag_trk = setEqualAgeGrid(mag_m1500)\nax[0].semilogx(ages, 10**mag_trk[:,4]/10**std_trk[:,4] - 1.0, '-.', lw=3)\nax[1].semilogx(ages, 10**mag_trk[:,1]/10**std_trk[:,1] - 1.0, '-.', lw=3)\nax[2].semilogx(ages, 10**mag_trk[:,3]/10**std_trk[:,3] - 1.0, '-.', lw=3)\n\n# Compare evolution\nfig, ax = plt.subplots(3, 1, figsize=(6.0, 12.0))\n\nages = 10**log10_ages\n\nstd_trk = setEqualAgeGrid(std_m0900)\nmag_trk = setEqualAgeGrid(mag_m0900)\nax[0].semilogx(ages, 10**mag_trk[:,4], '-', lw=3, c='b')\nax[0].semilogx(ages, 10**std_trk[:,4], '--', lw=3, c='b')\nax[1].semilogx(ages, 10**mag_trk[:,1], '-', lw=3, c='b')\nax[1].semilogx(ages, 10**std_trk[:,1], '--', lw=3, c='b')\nax[2].semilogx(ages, 10**mag_trk[:,3], '-', lw=3, c='b')\nax[2].semilogx(ages, 10**std_trk[:,3], '--', lw=3, c='b')\n\nstd_trk = setEqualAgeGrid(std_m1200)\nmag_trk = setEqualAgeGrid(mag_m1200)\nax[0].semilogx(ages, 10**mag_trk[:,4], '-', lw=3, c='r')\nax[0].semilogx(ages, 10**std_trk[:,4], '--', lw=3, c='r')\nax[1].semilogx(ages, 10**mag_trk[:,1], '-', lw=3, c='r')\nax[1].semilogx(ages, 10**std_trk[:,1], '--', lw=3, c='r')\nax[2].semilogx(ages, 10**mag_trk[:,3], '-', lw=3, c='r')\nax[2].semilogx(ages, 10**std_trk[:,3], '--', lw=3, c='r')\n\nstd_trk = setEqualAgeGrid(std_m1500)\nmag_trk = setEqualAgeGrid(mag_m1500)\nax[0].semilogx(ages, 10**mag_trk[:,4], '-', lw=3, c='g')\nax[0].semilogx(ages, 10**std_trk[:,4], '--', lw=3, c='g')\nax[1].semilogx(ages, 10**mag_trk[:,1], '-', lw=3, c='g')\nax[1].semilogx(ages, 10**std_trk[:,1], '--', lw=3, c='g')\nax[2].semilogx(ages, 10**mag_trk[:,3], '-', lw=3, c='g')\nax[2].semilogx(ages, 10**std_trk[:,3], '--', lw=3, c='g')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tedunderwood/biographies
code/timeseries_lag.ipynb
mit
[ "Time series correlations\nwith and without a lag\nI'm going to see how well distributions across time agree with each other, in general, and then see whether the average correlation can be improved by a lag in either direction.", "import os, csv\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import pearsonr\n%matplotlib inline\n\n\nrelativepath = os.path.join('..', 'data', 'bio_logratios.csv')\nbio = pd.read_csv(relativepath)\n\nrelativepath = os.path.join('..', 'data', 'fiction_logratios.csv')\nfic = pd.read_csv(relativepath)\n\nbio.head()\n\nfic.head()", "Getting shared words\nThis code is basically stolen from Natalie.", "bio_list = list(bio.columns.values)\nfic_list = list(fic.columns.values)\nshared_words = set(fic_list).intersection(set(bio_list))\nshared_list = list(shared_words)\nshared_list.pop(shared_list.index('thedate'))\nfic_subset = fic[list(shared_list)]\nbio_subset = bio[list(shared_list)]\nprint(fic_subset.shape)\nprint(bio_subset.shape)\n\nword = 'hair'\nr, p = pearsonr(fic_subset[word], bio_subset[word])\nprint(r, p)", "Fisher's transform: averaging correlation coefficients\nDefining a function to average r values.", "def fisher_average(listofrvals):\n zscores = (np.arctanh(listofrvals))\n avg_z = sum(zscores) / len(zscores)\n transformed_r = np.tanh(avg_z)\n naive_r = sum(listofrvals) / len(listofrvals)\n \n return transformed_r, naive_r\n ", "Measuring correlations without lag\nAn initial pass on calculating the correlation between two time series for each word:\na) genderedness-across-time in fiction and\nb) genderedness-across-time in biography.", "# Let's calculate correlations for each word across time\n\nr_list = []\nsignificant = 0\n\nfor w in shared_list:\n r, p = pearsonr(fic_subset[w], bio_subset[w])\n r_list.append(r)\n if (p / 2) < 0.05 and r > 0:\n significant += 1\n # The scipy function reports a two-tailed p-value\n # But we are clearly expecting positive correlations\n # and wouldn't consider negative ones as significant.\n # So I'm dividing p by 2 to convert to a one-tailed test.\n\nprint(significant)\nprint(fisher_average(r_list))", "Correlations with lag\nLet's try sliding the time series by as much as 12 years either way. Of course, when we shift the time series relative to each other, the area of overlap in the middle will be smaller than the original timeline. In order to make an apples-to-apples comparison between time series of the same length, we'll need to cut something off the end of the time series even when we're just shifting them zero, or one years, relative to each other. In other words, if we want to compare the central overlap between two time series shifted 12 years relative to each other, we need to compare it to an unshifted pairing that has twelve years taken off either end.\nTrimming the ends is potentially a complicating factor, since the ends of the timeline might be inherently less (or more) likely to correlate. We can partly address this by running the shift comparison several times, with a different \"ceiling\" on the shift each time. I.e., the first time we'll shift a maximum of two years and That way we can see which differences are due to considering a smaller timeline, and which differences are due to the shift itself.", "def shifted_correlation(vectora, vectorb, shift_ceiling, shift):\n \n assert len(vectora) == len(vectorb)\n maxlen = len(vectora)\n floor = shift_ceiling\n ceiling = maxlen - shift_ceiling\n floorb = floor + shift\n ceilingb = ceiling + shift\n slicea = vectora[floor : ceiling]\n sliceb = vectorb[floorb : ceilingb]\n return pearsonr(slicea, sliceb)\n\nthe_correlations = dict()\n# we're going to create a different dictionary entry\n# for each \"shift-ceiling\"\n\nfor shift_ceiling in range(2, 15):\n print(shift_ceiling)\n \n shifts = []\n correlations = []\n \n for shift in range(-shift_ceiling, shift_ceiling + 1):\n temp_r = []\n significant = 0\n\n for w in shared_list:\n r, p = shifted_correlation(fic_subset[w], bio_subset[w], shift_ceiling, shift)\n temp_r.append(r)\n if (p / 2) < 0.05 and r > 0:\n significant += 1\n\n true_avg, naive_avg = fisher_average(temp_r)\n shifts.append(shift)\n correlations.append(true_avg)\n \n the_correlations[shift_ceiling] = list(zip(shifts, correlations))\n \n # we save a list of tuples where x[0] = shift and x[1] = avg corr\n\nfor sc in range(2,15):\n x, y = zip(*the_correlations[sc])\n plt.plot(x, y)\n\nplt.show()", "Interpretation.\nI'm not exactly sure what to make of those results. I think the peak correlation is (usually) around 0, which suggests that lag is not helping.\nOn the other hand, there's a pretty clear asymmetry in the curve. Correlations are generally lower when biography is shifted back relative to fiction than when it's shifted forward. I'm not sure I should make anything of that, but it's intriguing enough to make me want to keep trying this with different slices of the timeline.\nThat will help rule out the possibility that this is due to something odd about the ends of the timeline.\nLet's try five different 100-year periods. 1800-1900, 1820-1920, and so on. In each case, we'll try a 16-year lag.", "import math\n\ndef correlate_arbitrary_slice(vectora, vectorb, shift, absolute_floor):\n \n assert len(vectora) == len(vectorb)\n maxlen = len(vectora)\n floor = absolute_floor\n ceiling = absolute_floor + 100\n floorb = floor + shift\n ceilingb = ceiling + shift\n slicea = vectora[floor : ceiling]\n sliceb = vectorb[floorb : ceilingb]\n \n return pearsonr(slicea, sliceb)\n\nthe_correlations = dict()\n# we're going to create a different dictionary entry\n# for each \"shift-ceiling\"\n\nfor absolute_floor in range(20, 100, 20):\n print(absolute_floor)\n shift_ceiling = 15\n shifts = []\n correlations = []\n \n for shift in range(-shift_ceiling, shift_ceiling + 1):\n temp_r = []\n significant = 0\n\n for w in shared_list:\n r, p = correlate_arbitrary_slice(fic_subset[w], bio_subset[w], shift, absolute_floor)\n if not math.isnan(r):\n temp_r.append(r)\n if (p / 2) < 0.05 and r > 0:\n significant += 1\n\n true_avg, naive_avg = fisher_average(temp_r)\n shifts.append(shift)\n correlations.append(true_avg)\n \n the_correlations[absolute_floor] = list(zip(shifts, correlations))\n \n # we save a list of tuples where x[0] = shift and x[1] = avg corr\n\nfor af in range(20, 100, 20):\n x, y = zip(*the_correlations[af])\n plt.plot(x, y)\n\nplt.show()", "Okay, that's very telling.\nIf the code I wrote is running correctly, there are big differences in the average correlation across different segments of the timeline. We need to figure out why that would be true, first of all! But secondly, if that's true, it's really not going to work to compare shift-backward and shift-forward, because of course you'l get different correlations.", "for af in range(20, 100, 20):\n x, y = zip(*the_correlations[af])\n meancorr = sum(y) / len(y)\n print(\"From \" + str(1780 + af) + \" to \" + str(1780 + af + 100) + \": \" + str(meancorr))", "Okay, this could be caused by an error of some kind. But if it's not an error, it means biography and fiction are tracking each other less well as we move forward in time.\nWorth keeping in mind that it could be caused by distribution of examples in the datasets being uneven across time, or gender differences in authorship, or the vocabulary being biased in some way across time.\nCalculating means\ncode stolen from Natalie", "bio_mean = bio_subset.mean(axis=0).reset_index()\nbio_mean.columns = ['word', 'bio_mean']\nfic_mean = fic_subset.mean(axis=0).reset_index()\nfic_mean.columns = ['word', 'fic_mean']\nmeans_df = pd.DataFrame(bio_mean).merge(pd.DataFrame(fic_mean), on='word', how='inner')\nmeans_df.head()\n\nmeans_df.sort_values(by='fic_mean', ascending=False).head(10)\n\nall_words = list(means_df.word.unique())\nlen(all_words)\n\ndef plot_diff_means(words, adjust=False, title=False, label_outliers=False, normalize_axes=False, save=False):\n df = means_df.loc[(means_df.word.isin(words)),:]\n ax0 = df.plot('bio_mean', 'fic_mean', kind='scatter', s=50, figsize=(15,15))\n ax0.plot(0, 0, 'r+', ms=15, mew=2, label='(0,0) Gender Neutral')\n \n texts = []\n for x, y, s in zip(df['bio_mean'], df['fic_mean'], df['word']):\n if label_outliers:\n if ((-1.5 < x < 2) == False) | ((-2 < y < 1.5) == False):\n texts.append((x, y, s))\n \n plt.legend(loc='best')\n plt.xlabel('m <-- Avg Non-fiction Diff 1780-1923 --> f', fontsize=16, )\n plt.ylabel('m <-- Avg Fiction Diff 1780-1923 --> f', fontsize=16)\n if title:\n plt.title(title)\n if normalize_axes:\n ylim = ax0.get_ylim()\n xlim = ax0.get_xlim()\n new_low = min(xlim[0], ylim[0])\n new_hi = max(xlim[1], ylim[1])\n plt.ylim(new_low, new_hi)\n plt.xlim(new_low, new_hi)\n \n if label_outliers:\n for x, y, label in texts:\n plt.annotate(label, xy=(x, y), size = 11)\n \n if save:\n if title:\n plt.savefig('./{}.png'.format(title))\n else:\n plt.savefig('./genderDiffMeans.png')\n plt.show()\n \nplot_diff_means(all_words, adjust=True, title=\"Average Gender Difference for All Shared Words (Equal Axes)\", label_outliers=True, normalize_axes=True, save=True)\nplot_diff_means(all_words, adjust=True, title=\"Average Gender Difference for All Shared Words\", label_outliers=True, normalize_axes=False, save = True)\n\npearsonr(means_df['bio_mean'], means_df['fic_mean'])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.19/_downloads/e5c0288e15772e4fb31189b766e9d7be/plot_metadata_epochs.ipynb
bsd-3-clause
[ "%matplotlib inline", "Pandas querying and metadata with Epochs objects\nDemonstrating pandas-style string querying with Epochs metadata.\nFor related uses of :class:mne.Epochs, see the starting tutorial\ntut-epochs-class.\nSometimes you may have a complex trial structure that cannot be easily\nsummarized as a set of unique integers. In this case, it may be useful to use\nthe metadata attribute of :class:mne.Epochs objects. This must be a\n:class:pandas.DataFrame where each row corresponds to an epoch, and each\ncolumn corresponds to a metadata attribute of each epoch. Columns must\ncontain either strings, ints, or floats.\nIn this dataset, subjects were presented with individual words\non a screen, and the EEG activity in response to each word was recorded.\nWe know which word was displayed in each epoch, as well as\nextra information about the word (e.g., word frequency).\nLoading the data\nFirst we'll load the data. If metadata exists for an :class:mne.Epochs\nfif file, it will automatically be loaded in the metadata attribute.", "# Authors: Chris Holdgraf <choldgraf@gmail.com>\n# Jona Sassenhagen <jona.sassenhagen@gmail.com>\n# Eric Larson <larson.eric.d@gmail.com>\n\n# License: BSD (3-clause)\n\nimport mne\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Load the data from the internet\npath = mne.datasets.kiloword.data_path() + '/kword_metadata-epo.fif'\nepochs = mne.read_epochs(path)\n\n# The metadata exists as a Pandas DataFrame\nprint(epochs.metadata.head(10))", "We can use this metadata attribute to select subsets of Epochs. This\nuses the Pandas :meth:pandas.DataFrame.query method under the hood.\nAny valid query string will work. Below we'll make two plots to compare\nbetween them:", "av1 = epochs['Concreteness < 5 and WordFrequency < 2'].average()\nav2 = epochs['Concreteness > 5 and WordFrequency > 2'].average()\n\njoint_kwargs = dict(ts_args=dict(time_unit='s'),\n topomap_args=dict(time_unit='s'))\nav1.plot_joint(show=False, **joint_kwargs)\nav2.plot_joint(show=False, **joint_kwargs)", "Next we'll choose a subset of words to keep.", "words = ['film', 'cent', 'shot', 'cold', 'main']\nepochs['WORD in {}'.format(words)].plot_image(show=False)", "Note that traditional epochs sub-selection still works. The traditional\nMNE methods for selecting epochs will supersede the rich metadata querying.", "epochs['cent'].average().plot(show=False, time_unit='s')", "Below we'll show a more involved example that leverages the metadata\nof each epoch. We'll create a new column in our metadata object and use\nit to generate averages for many subsets of trials.", "# Create two new metadata columns\nmetadata = epochs.metadata\nis_concrete = metadata[\"Concreteness\"] > metadata[\"Concreteness\"].median()\nmetadata[\"is_concrete\"] = np.where(is_concrete, 'Concrete', 'Abstract')\nis_long = metadata[\"NumberOfLetters\"] > 5\nmetadata[\"is_long\"] = np.where(is_long, 'Long', 'Short')\nepochs.metadata = metadata", "Now we can quickly extract (and plot) subsets of the data. For example, to\nlook at words split by word length and concreteness:", "query = \"is_long == '{0}' & is_concrete == '{1}'\"\nevokeds = dict()\nfor concreteness in (\"Concrete\", \"Abstract\"):\n for length in (\"Long\", \"Short\"):\n subset = epochs[query.format(length, concreteness)]\n evokeds[\"/\".join((concreteness, length))] = list(subset.iter_evoked())\n\n# For the actual visualisation, we store a number of shared parameters.\nstyle_plot = dict(\n colors={\"Long\": \"Crimson\", \"Short\": \"Cornflowerblue\"},\n linestyles={\"Concrete\": \"-\", \"Abstract\": \":\"},\n split_legend=True,\n ci=.68,\n show_sensors='lower right',\n legend='lower left',\n truncate_yaxis=\"auto\",\n picks=epochs.ch_names.index(\"Pz\"),\n)\n\nfig, ax = plt.subplots(figsize=(6, 4))\nmne.viz.plot_compare_evokeds(evokeds, axes=ax, **style_plot)\nplt.show()", "To compare words which are 4, 5, 6, 7 or 8 letters long:", "letters = epochs.metadata[\"NumberOfLetters\"].unique().astype(int).astype(str)\n\nevokeds = dict()\nfor n_letters in letters:\n evokeds[n_letters] = epochs[\"NumberOfLetters == \" + n_letters].average()\n\nstyle_plot[\"colors\"] = {n_letters: int(n_letters)\n for n_letters in letters}\nstyle_plot[\"cmap\"] = (\"# of Letters\", \"viridis_r\")\ndel style_plot['linestyles']\n\nfig, ax = plt.subplots(figsize=(6, 4))\nmne.viz.plot_compare_evokeds(evokeds, axes=ax, **style_plot)\nplt.show()", "And finally, for the interaction between concreteness and continuous length\nin letters:", "evokeds = dict()\nquery = \"is_concrete == '{0}' & NumberOfLetters == {1}\"\nfor concreteness in (\"Concrete\", \"Abstract\"):\n for n_letters in letters:\n subset = epochs[query.format(concreteness, n_letters)]\n evokeds[\"/\".join((concreteness, n_letters))] = subset.average()\n\nstyle_plot[\"linestyles\"] = {\"Concrete\": \"-\", \"Abstract\": \":\"}\n\nfig, ax = plt.subplots(figsize=(6, 4))\nmne.viz.plot_compare_evokeds(evokeds, axes=ax, **style_plot)\nplt.show()", "<div class=\"alert alert-info\"><h4>Note</h4><p>Creating an :class:`mne.Epochs` object with metadata is done by passing\n a :class:`pandas.DataFrame` to the ``metadata`` kwarg as follows:</p></div>", "data = epochs.get_data()\nmetadata = epochs.metadata.copy()\nepochs_new = mne.EpochsArray(data, epochs.info, metadata=metadata)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
dipanjank/ml
data_analysis/letter_recognition_uci.ipynb
gpl-3.0
[ "<h1 align=\"center\">Letter Recognition - UCI</h1>", "import pandas as pd\nimport numpy as np\n%pylab inline\npylab.style.use('ggplot')", "Getting the Data\nThis is a dataset of 20 image features for uppercase English characters.\nhttps://archive.ics.uci.edu/ml/datasets/Letter+Recognition", "url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/letter-recognition/letter-recognition.data'\nletter_df = pd.read_csv(url, header=None)\n\nletter_df.head()", "Next we attach the column names.", "s = \"\"\" 1.\tlettr\tcapital letter\t(26 values from A to Z)\n\t 2.\tx-box\thorizontal position of box\t(integer)\n\t 3.\ty-box\tvertical position of box\t(integer)\n\t 4.\twidth\twidth of box\t\t\t(integer)\n\t 5.\thigh \theight of box\t\t\t(integer)\n\t 6.\tonpix\ttotal # on pixels\t\t(integer)\n\t 7.\tx-bar\tmean x of on pixels in box\t(integer)\n\t 8.\ty-bar\tmean y of on pixels in box\t(integer)\n\t 9.\tx2bar\tmean x variance\t\t\t(integer)\n\t10.\ty2bar\tmean y variance\t\t\t(integer)\n\t11.\txybar\tmean x y correlation\t\t(integer)\n\t12.\tx2ybr\tmean of x * x * y\t\t(integer)\n\t13.\txy2br\tmean of x * y * y\t\t(integer)\n\t14.\tx-ege\tmean edge count left to right\t(integer)\n\t15.\txegvy\tcorrelation of x-ege with y\t(integer)\n\t16.\ty-ege\tmean edge count bottom to top\t(integer)\n\t17.\tyegvx\tcorrelation of y-ege with x\t(integer)\"\"\"\n\nlines = [l.strip() for l in s.split('\\n')]\nfeature_names = [l.split()[1] for l in lines]\nfeature_names = [f.replace('-', '_') for f in feature_names]\n\nletter_df.columns = feature_names\n\nletter_df.head()", "Check for Class Imbalance", "letter_counts = letter_df['lettr'].value_counts()\nletter_counts.sort_index(ascending=False).plot(kind='barh')", "All the classes are represented in a fairly balanced manner, so looks like in this instance we don't have to address class imbalance.\nFeature Correlations", "features_df = letter_df.drop('lettr', axis=1)\nletters = letter_df['lettr']\n\nimport seaborn as sns\nf_corrs = features_df.corr()\n\nfig, ax = pylab.subplots(figsize=(12, 12))\nsns.heatmap(f_corrs, annot=True, ax=ax)", "The first 5 features, x_box, y_box, width, high, onpix are highly correlated with each other. \nANOVA Feature Selection\nANOVA F-test based feature selection requires all feature values to be positive.", "features_df[features_df < 0].sum(axis=0)", "The above condition holds in our case - none of the features have negative values.", "from sklearn.feature_selection import f_classif\n\nt_stats, p_vals = f_classif(features_df, letters)\n\nf_test_results = pd.DataFrame(np.column_stack([t_stats, p_vals]),\n index=features_df.columns.copy(),\n columns=['test_statistic', 'p_value'])\n\nf_test_results.plot(kind='bar', subplots=True)", "Top 5 Features with GaussianNB Classifier", "from sklearn.feature_selection import SelectKBest\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import cross_val_score, StratifiedKFold\nfrom sklearn.naive_bayes import GaussianNB\n\nestimator = GaussianNB()\nselector = SelectKBest(f_classif, k=5)\n\npipeline = Pipeline([\n ('selector', selector),\n ('model', estimator)\n])\n\ncross_validator = StratifiedKFold(n_splits=10, shuffle=True)\n\nscores = cross_val_score(pipeline, features_df, letters, \n cv=cross_validator, scoring='f1_macro')\n\nscore_1 = pd.Series(scores)\n\nscore_1.plot(kind='bar')", "Top 10 Features with GaussianNB Classifier", "estimator = GaussianNB()\nselector = SelectKBest(f_classif, k=10)\n\npipeline = Pipeline([\n ('selector', selector),\n ('model', estimator)\n])\n\ncross_validator = StratifiedKFold(n_splits=10, shuffle=True)\nscores = cross_val_score(pipeline, features_df, letters, \n cv=cross_validator, scoring='f1_macro')\n\nscore_2 = pd.Series(scores)\n\ncombined_scores = pd.concat([score_1, score_2], axis=1, keys=['cv_top5', 'cv_top10'])\ncombined_scores.plot(kind='bar')", "Top 15 Features with GaussianNB Classifier", "estimator = GaussianNB()\nselector = SelectKBest(f_classif, k=15)\n\npipeline = Pipeline([\n ('selector', selector),\n ('model', estimator)\n])\n\ncross_validator = StratifiedKFold(n_splits=10, shuffle=True)\nscores = cross_val_score(pipeline, features_df, letters, \n cv=cross_validator, scoring='f1_macro')\n\nscore_3 = pd.Series(scores)\n\ncombined_scores2 = pd.concat([score_2, score_3], axis=1, keys=['cv_top10', 'cv_top15'])\ncombined_scores2.plot(kind='bar')", "We get a significant (in layman terms, not in a statistical testing sense) boost in accuracy by going from top 5 to top 10 features. But the improvement in accuracy with top 15 features over top 10 features are marginal.\nPairplot", "top_5_feature_names = f_test_results.nlargest(5, columns='test_statistic').index\npairplot_df = features_df.loc[:, top_5_feature_names].copy()\npairplot_df['letter'] = letters\n\nsns.pairplot(pairplot_df, hue='letter')\n\nfrom sklearn.svm import SVC\n\nestimator = SVC(C=100.0, kernel='rbf')\nselector = SelectKBest(f_classif, k=5)\n\npipeline = Pipeline([\n ('selector', selector),\n ('model', estimator)\n])\n\ncross_validator = StratifiedKFold(n_splits=10, shuffle=True)\nscores = cross_val_score(pipeline, features_df, letters, \n cv=cross_validator, scoring='f1_macro')\n\nsvm_5 = pd.Series(scores)\n\nsvm_5.plot(kind='bar', title='10 Fold CV with SVM (top 5 features)')\n\ncombined_3 = pd.concat([score_3, svm_5], axis=1, keys=['Gaussian_15', 'svm_5'])\ncombined_3.plot(kind='bar')\n\nestimator = SVC(C=100.0, kernel='rbf')\nselector = SelectKBest(f_classif, k=10)\n\npipeline = Pipeline([\n ('selector', selector),\n ('model', estimator)\n])\n\ncross_validator = StratifiedKFold(n_splits=10, shuffle=True)\nscores = cross_val_score(pipeline, features_df, letters, \n cv=cross_validator, scoring='f1_macro')\n\nsvm_10 = pd.Series(scores)\n\ncombined_4 = pd.concat([svm_5, svm_10], axis=1, keys=['svm_5', 'svm_10'])\ncombined_4.plot(kind='bar')\n\nestimator = SVC(C=100.0, kernel='rbf')\nselector = SelectKBest(f_classif, k=15)\n\npipeline = Pipeline([\n ('selector', selector),\n ('model', estimator)\n])\n\ncross_validator = StratifiedKFold(n_splits=10, shuffle=True)\nscores = cross_val_score(pipeline, features_df, letters, \n cv=cross_validator, scoring='f1_macro')\n\nsvm_15 = pd.Series(scores)\n\ncombined_5 = pd.concat([svm_10, svm_15], axis=1, keys=['svm_10', 'svm_15'])\ncombined_5.plot(kind='bar')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
cerrno/neurokernel
notebooks/sensory_int.ipynb
bsd-3-clause
[ "Sensory Integration Demo\nThis notebook demonstrates how to use Neurokernel to integrate multiple independently developed LPUs. In this example, partial models of Drosophila's olfaction and vision systems are connected to an LPU that performs basic multisensory coincidence detection.\nBackground\nThe olfaction and vision models employed in this example were independently developed and implemented. The olfaction model contains a model of the antennal lobe LPU, while the vision model contains models of the fly's lamina (combined with cells in the retina), and medulla LPUs.\nThe integration LPU consists of 8 neurons that each accept input from both the antennal lobe and medulla. All 3 projection neurons in glomerulus DA1 in the antennal lobe project to all of the neurons in the integration LPU. The medulla contains 8 wide field tangential neurons that receive inputs from 8 groups of medullar columns (depicted below) that cover overlapping verticle and horizontal portions of the visual field and also connect to the neurons in the integration LPU. These 8 neurons are sensitive to quick light intensity changes. \nIt should be noted that the integration LPU employed in this example is artificial and does not directly correspond to any specific biological LPU in the fly brain. \n<img src='files/files/sensory-integration-grid.png' />\nThe various LPUs comprised by the integration model are connected as follows:\n<img src='files/files/sensory-integration-pipeline.png' />\nA script for generating the GEXF file containing the antennal lobe LPU model \nconfiguration and additional GEXF files containing the configurations of the vision and \nintegration LPUs are available in the examples/data/sensory_int subdirectory of the Neurokernel\nsource code.\nExecuting the Model\nAssuming that the Neurokernel source has been cloned to ~/neurokernel, we first generate the odorant and visual input stimuli and construct the sensory integration LPU used in the example:", "%cd -q ~/neurokernel/examples/sensory_int/data\n%run gen_vis_input.py\n%run gen_olf_input.py\n%run gen_integrate.py", "Once the input and the configuration are ready, we execute the entire model. Note that the interconnections between the integration LPU and both the antennal lobe and medulla LPUs are configured in the simulation script rather than in a GEXF file.", "%cd -q ~/neurokernel/examples/sensory_int/\n%run sensory_int_demo.py", "Next, we generate a video to show the final result:", "%run visualize_output.py", "The resulting video can be viewed below:", "import IPython.display\nIPython.display.YouTubeVideo('e-eUOtOF9fc')", "The first row of the video depicts the input to the visual system. The visual input has two periods of input activity interleaved with quiescent periods. The first event is a quick vertically moving black-to-white edge followed by a white-to-black edge. The second event is a quick horizontally moving black-to-white edge followed by white-to-black edge.\nThe second row of the video depicts the odorant stimulus profile; this stimulus consists of a series of ON and OFF events.\nThe third row is a raster plot of the spikes generated by the 8 neurons in the integration LPU. Each neuron emits spikes if the visual signal stimulates the columns that are connected to it at the same time the odorant is on. For example, note that a visual stimulus in the leftmost vertical region of columns alone (34s to 39s) or the delivery of the odorant alone (3s to 7s) can not induce integration neuron 1 to emit spikes; the neuron does detect when the visual and olfactory inputs coincide, however.\nAcknowledgements\nThe olfaction, vision, and sensory integration models demonstrated in this notebook were developed by Nikul H. Ukani, Chung-Heng Yeh, and Yiyin Zhou." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
petermchale/yeast_bioinformatics
analysis.ipynb
mit
[ "Yeast bioinformatic analysis\nThis Notebook lives at Github. \nHere is a rendered version of this notebook.\nResearch Question\nThe eukaryotic genome is adorned by molecules called transcription factors (TFs). At any given time, some of these are regulating gene expression, e.g. by interacting with RNA polymerase, but others are not. How can we distinguish the functional TF-DNA binding events from a potentially large background of non-functional binding events?\nQuantifying TF-DNA binding\nTo approach this question, we first need to quantify the strength with which TFs bind DNA. A TF binds DNA by making contact with a sequence of $L$ nucleotides \n\\begin{equation} \n\\vec{s} = \\langle s_1, s_2, \\ldots, s_L\\rangle,\n\\end{equation} \nwhere $s_i \\in {A,C,G,T}$. Denote by $E(\\vec{s})$ the binding energy of a given TF to a DNA sub-sequence $\\vec{s}$. \nWith binding lengths of $L = 10-20$ nucleotides, there are too many possible $\\vec{s}$ to measure $E(\\vec{s})$ exhaustively.\nFortunately, the contribution of each nucleotide to the binding energy of the sub-sequence is approximately independent and additive: \n\\begin{equation} \nE(\\vec{s}) = \\sum_{i=1}^L \\epsilon_i(s_i),\n\\end{equation}\nreducing the impractical problem of determining the large number of values of $E(\\vec{s})$ to the practical problem of the determining the \n $L\\times 4$ energy matrix, $\\epsilon_i(s)$. This matrix has been determined for a TF called Gal4 using in vitro measurements of the equilibrium binding constants \n\\begin{equation} \nK(\\vec{s}) \\propto e^{-\\beta E(\\vec{s})} \n\\end{equation} \nfor all sequences $\\vec{s}$ that differ in just one nucleotide from a given sequence. I manually fetched these data from the literature [Liang et al 1996], and stored them in the file data/Gal4_affinity.in.", "import sys, os \nsys.path.append(os.getcwd() + '/source')\n\nfrom extract import createEnergyMatrix\nenergy_matrix = createEnergyMatrix('data/Gal4_affinity.in')", "Here, I have stored the energy matrix as a list of dictionaries for computational reasons, but we can use pandas to visualize it:", "import pandas as pd \ndf = pd.DataFrame(energy_matrix)\ndf", "In the above data structure:\n* row labels are positions within a binding site\n* column labels are the identities of nucleotides at those positions\n* matrix elements are TF-DNA binding energies\nSo, for example, a DNA sequence that binds optimally to Gal4 can be extracted by listing the nucleotides with the lowest energy at each position:", "print(df.idxmin(axis=1))", "Extracting the yeast DNA sequence\nI manually downloaded, from the Saccharomyces Genome Database, the DNA sequence of the third chromosome of yeast, stored in FASTA format, and read it into a string:", "from extract import getFasta\nwith open('data/chr03.fsa') as f:\n header, chromosome = getFasta(f)\nprint('\\nHere is the beginning of the DNA sequence of the chromosome:\\n')\nprint(chromosome[:100])\nprint('\\nThere are ', len(chromosome), 'nucleotides in this chromosome')", "Distribution of TF-DNA binding energies genome-wide\nWith the energy matrix and chromosome sequence in hand, I next computed the energy with which Gal4 binds every possible sub-sequence of length $L = 17$ on the chromosome:", "from auxFunctions import calcEnergyListWithMatrix\nTFBS = calcEnergyListWithMatrix(chromosome, energy_matrix)\nTFBS.head()", "Here is how those TF-DNA binding energies are distributed throughout the genome:", "import numpy as np \nfrom matplotlib import pyplot as plt\nfrom auxFunctions import binList\n%matplotlib inline\n\nenergyBins, numberSites = binList(TFBS['TF-DNA binding energy'], xMin=-5.0, xMax=50, binWidth=0.25)\n\nfontsize = 14\nfontsize_tick = 12\nfig = plt.figure(figsize=(7,5), facecolor='w')\nax = fig.add_subplot(111)\nax.plot(energyBins, numberSites, linewidth=0, marker='s', markersize=8, color='red')\nax.set_xlabel('TF-DNA binding energy (kT)', fontsize=fontsize)\nax.set_ylabel('number of genomic sites', fontsize=fontsize)\nax.set_yscale('log')\nax.set_xlim(0, 40)\nax.tick_params(axis='both', which='major', labelsize=fontsize_tick)", "Putting the y-axis on a log scale reveals that the distribution is approximately parabolic, implying that on a linear scale the distribution is approximately Gaussian. This is expected from the fact that each TF-DNA binding energy is a sum of single-nucleotide energies that are, to a good approximate, independently and identically distributed (Central Limit Theorem). \nNotice also that, though highly-specific (low-energy) sites do indeed exist, the sheer number of less-specific (intermediate- to high-energy) sites across the genome can, in principle, soak up a significant number of TFs. \nExtracting yeast promoters\nTo identify where regulatory regions of genes are likely to be, I downloaded the following General Feature Format file from the Saccharomyces Genome Database:", "!head -20 data/saccharomyces_cerevisiae_chr03.gff", "This file contains all genomic features on the third chromosome of this species. I located the coding-sequence features, and used them to extract regions of DNA that lie upstream of each transcription start site.", "def extractPromoters():\n \"\"\" parse saccharomyces_cerevisiae_chr03.gff and extract promoters \"\"\"\n\n promLength = 100\n\n with open('data/saccharomyces_cerevisiae_chr03.gff') as fin:\n\n # skip over header lines\n line = fin.readline()\n while line[0] == \"#\":\n line = fin.readline()\n\n features = []\n while line:\n seqid, source, feature_type, start, end, score, strand, phase, attributes = line.split()\n if feature_type == 'CDS':\n attributes = attributes.split(';')\n initDict = [attribute.split('=') for attribute in attributes]\n attributes = dict(initDict)\n systematicGeneName = attributes['Parent']\n\n if 'orf_classification' in attributes:\n classification = attributes['orf_classification']\n else:\n classification = '.'\n if 'gene' in attributes:\n standardGeneName = attributes['gene']\n else:\n standardGeneName = '.'\n\n # which DNA strand the gene is encoded on determines where the promoter is located\n if strand == '+':\n promStart = int(start) - promLength\n promEnd = int(start)\n elif strand == '-':\n promStart = int(end)\n promEnd = int(end) + promLength\n\n promoter = chromosome[max(promStart, 0):promEnd]\n features += [(standardGeneName, systematicGeneName, classification, promStart, promEnd, promoter)]\n line = fin.readline()\n\n return pd.DataFrame(data=features, columns=['standard gene name', 'systematic gene name', \n 'classification', 'promoter start position', \n 'promoter end position', \n 'promoter sequence'])\n\npromoters = extractPromoters()\npromoters.head()", "I have placed a . in a field to indicate missing data.\nDetermining whether potential TF binding sites lie in promoters\nI next classified each $L$-subsequence (potential binding site) according to whether it lies in a promoter region or not, and added that information as two new columns in the appropriate pandas data frame:", "import warnings\nwarnings.filterwarnings('ignore')\n\nTFBS_high_affinity = TFBS[TFBS['TF-DNA binding energy'] < 10]\n\nTFBS_high_affinity_categorical_variable = [0]*len(TFBS_high_affinity)\nTFBS_high_affinity_promoter = ['.']*len(TFBS_high_affinity)\ncount = -1\nfor TFBS_index, TFBS_row in TFBS_high_affinity.iterrows():\n count += 1\n TFBS_start = TFBS_row['binding-site start position']\n TFBS_end = TFBS_row['binding-site end position']\n for promoter_index, promoter_row in promoters.iterrows():\n promoter_start = promoter_row['promoter start position']\n promoter_end = promoter_row['promoter end position']\n if (promoter_start < TFBS_start) and (TFBS_end < promoter_end):\n TFBS_high_affinity_categorical_variable[count] = 1\n TFBS_high_affinity_promoter[count] = promoter_row['systematic gene name']\n break\nTFBS_high_affinity['promoter categorical variable'] = TFBS_high_affinity_categorical_variable\nTFBS_high_affinity['promoter'] = TFBS_high_affinity_promoter\nTFBS_high_affinity.head()", "Are promoters enriched for subsequences that bind TFs tightly?\nI used scikit-learn to determine whether subsequences with greater affinity for the TF (i.e. lower energy) tend to be located in promoter regions more often than you'd expect by chance:", "energies = TFBS_high_affinity['TF-DNA binding energy']\ncategories = TFBS_high_affinity['promoter categorical variable']\n\n# sample data\nfig = plt.figure(figsize=(7,5), facecolor='w')\nax = fig.add_subplot(111)\nax.plot(energies, categories, \n linewidth=0, marker='o', markersize=8, color='red', label='sample data')\nax.set_xlabel('TF-DNA binding energy (kT)', fontsize=fontsize)\nax.set_ylabel('promoter categorical variable', fontsize=fontsize)\nax.tick_params(axis='both', which='major', labelsize=fontsize_tick)\nax.set_ylim(-0.1, 1.1)\n\n# logistic regression\nfrom sklearn.linear_model import LogisticRegression\nlr = LogisticRegression()\nenergies_rs = energies.values.reshape((len(energies),1))\nlr.fit(energies_rs, categories)\nx = np.linspace(energies.min(),energies.max())\nx_rs = x.reshape((len(x), 1))\nprobs = lr.predict_proba(x_rs)\nax.plot(x, probs[:,1], linewidth=2, marker=None, color='black', \n label='logistic probability of lying in promoter region')\n\n# probability that a randomly choosen site lies in a promoter \nnumber_promoters, number_cols = promoters.shape\npromoter_size = len(promoters['promoter sequence'][0])\npromoter_size_summed_over_chromosome = number_promoters*promoter_size\nchromosome_size = len(chromosome)\nnull_probability_of_random_bp_lying_in_promoter = float(promoter_size_summed_over_chromosome)/float(chromosome_size)\nax.plot([x.min(), x.max()], \n [null_probability_of_random_bp_lying_in_promoter, \n null_probability_of_random_bp_lying_in_promoter],\n 'r--',\n linewidth=3, marker=None, color='black',\n label='null probability of lying in promoter region')\n\n# label the plot\nlegend = ax.legend(loc='center', fontsize=fontsize)", "Actionable insight\nThe data show that high-affinity subsequences are more likely to lie in promoter regions than expected by chance, suggesting that there has been evolutionary pressure to retain strong binding sites in promoters. The actionable insight is therefore that searches for new functional TF binding sites should be focused on promoter regions." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
emmaqian/DataScientistBootcamp
DS_HW2_HuiminQian_060117.ipynb
mit
[ "数据应用学院 Data Scientist Program Hw2\n<h1 id=\"tocheading\">Table of Contents</h1>\n<div id=\"toc\"></div>", "%%javascript\n$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')\n\n# import the necessary package at the very beginning\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport sklearn\n", "1. Gnerate x = a sequence of points, y = sin(x)+a where a is a small random error.", "## Type Your Answer Below ##\nnp.random.seed(1)\nX = np.random.random([100, 1]).ravel()*10 # generate a set of 100 random float in range [0, 10]\nX[:5]\n\nrandom_error = np.random.randn(100) # genrate a set of 100 random error from a standard normal distribution\nrandom_error[:5]\n\nY = np.sin(X) + random_error # y = sin(x)+a where a is a small random error\nY[:5]", "2. Draw a scatter plot of x and y.", "## Type Your Answer Below ##\nplt.scatter(x=X, y=Y, marker='o', alpha=0.4, color='b')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.title('Y=sin(X) + random_error')\n\nprint('X: ', X.shape, ' ', 'Y: ', Y.shape )", "3. Use linear regression model to predict y, with only one feature--x. Please print out the training and validation score of your model and the mathematical formula of your model.\nYou need to split the data into training and testing data before you build the model. This is the same procedure you need to do in the following questions.", "## Type Your Answer Below ##\n# reshape X from row vector in shape(100, ) to column vector in shape (100, 1)\nX_re = X.reshape(X.shape[0], 1)\nX_re.shape\n\n# initiate a linear regression model\nfrom sklearn.linear_model import LinearRegression\nlr = LinearRegression()\nlr\n\n# Use train_test_split to train and test lr\nfrom sklearn import model_selection\nXtrain, Xtest, Ytrain, Ytest = model_selection.train_test_split(X_re, Y, train_size=70, random_state=1)\nprint(Xtrain.shape, Xtest.shape, Ytrain.shape, Ytest.shape)\n\nlr.fit(Xtrain, Ytrain)\nYpred = lr.predict(Xtest)\nprint('The mathematical formula of linear regression model: ', 'Y = ' + str(lr.coef_) + '*' + 'X + ' + str(lr.intercept_), '\\n')\nprint('The coefficient of determination R^2 of the training set: ', lr.score(Xtrain, Ytrain), '\\n')\nprint('The coefficient of determination R^2 of the testing set: ', lr.score(Xtest, Ytest), '\\n')\nplt.scatter(Ytest, Ypred, marker='o', alpha=0.5)\nplt.xlabel('Ytest')\nplt.ylabel('Ypred')\nplt.title('Linear regression model performance')\n\n# Get the training and validation score of your model \n# training and validation score具体指的什么?\n\nfrom sklearn.model_selection import cross_val_score\ncv_scores = cross_val_score(lr, X_re, Y, cv=3) # 3-fold cross validation\nprint('cv_scores: ', cv_scores)\nprint('mean of cv_scores: ', cv_scores.mean())\n#The mean score and the 95% confidence interval of the score estimate are hence given by:\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (cv_scores.mean(), cv_scores.std() * 2))", "怎么理解cv_scores是负数?\n4. Draw a plot showing your predicted y, real y, and ground truth--sin(x) of x.", "## Type Your Answer Below ##\n\n# show predicted y in red color\nYpred = lr.predict(X_re)\nplt.plot(X, Ypred, label='Predicted Y', color='r')\n\n# show real y in blue color\nplt.scatter(X, Y, label='Real Y', color='b')\n\n# show ground truth - sin(X) in green color\nYground = np.sin(X)\nplt.scatter(X, Yground, label='Ground truth Y', color='g')\n\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.title('Three types of Y in a plot')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)", "5. Try to build a linear model using two features--x and x^2. Please print out the training and validation score score and mathematical formula.", "## Type Your Answer Below ##\nX2 = X_re**2\nX2 = np.hstack([X_re, X2])\nprint(X2.shape)\n\nlr2 = LinearRegression()\nlr2.fit(X2, Y)\ncv_scores2 = cross_val_score(lr2, X2, Y, cv=3)\nprint('cv_scores for model using x and x^2: ', cv_scores2)\nprint('mean of cv_scores for model using x and x^2: ', cv_scores2.mean())\n#The mean score and the 95% confidence interval of the score estimate are hence given by:\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (cv_scores2.mean(), cv_scores2.std() * 2))\n\nprint('The mathematical formula of linear regression model: ', 'Y = ' + str(lr2.coef_[0]) + '*X ' + str(lr2.coef_[1]) + \"*X^2 + \" + str(lr.intercept_), '\\n')\n\n\n# visualize new set of Ypred, Y, Yground_truth\nYpred2 = lr2.predict(X2)\nYground = np.sin(X)\n\nplt.scatter(X, Ypred2, label='predicted y using x and x**2', color='r')\nplt.scatter(X, Y, label='real y', color='b')\nplt.scatter(X, Yground, label='ground truth - sin(x)', color='g')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.title('Three types of Y in a plot')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)", "6. Try to build linear models with features from x to x, x^2, x^3,... x^15, and plot the changes of training score and validation score with the number of features gets larger. Accoding to the result you get, what's the best number of features here?\nIn this question, you need to build 15 models, with features of [x],[x,x^2],[x,x^2,x^3],...,[x,x^2,...,x^15]. For each model you need to calculate the training score and validation score then make the plot as we required.", "from sklearn.model_selection import validation_curve\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import cross_val_score\n\n\nindex =[] # generate an array with number 1 to 15\nfor i in range(1, 16):\n index.append(i)\n \ndf = pd.DataFrame(columns = index) # create a new dataframe with 15 columns\ndf.iloc[:, 0] = X # the 1st column is X**1\n\nmean_cv_scores = []\nmean_train_scores = []\nmean_valid_scores= []\n\nfor i in index:\n print(\"################ Adding \" + \"x**\" + str(i) + \" ######################\")\n df.loc[:, i] = X**i # Add a new column of values\n lr = LinearRegression() # start a new linear regression model with the new column taking into consideration\n #lr.fit(df.iloc[:, :i], Y)\n #Ypredict = lr.predict(df.iloc[:, :i])\n cv_scores = cross_val_score(lr, df.iloc[:, :i], Y, cv=3)\n print(\"mean cv score for the model is:\", np.mean(cv_scores))\n mean_cv_scores.append(np.mean(cv_scores))\n train_score, valid_score = validation_curve(Ridge(), df.iloc[:, :i], Y, \"alpha\", np.logspace(-7, 3, 3))\n print(\"mean train score is: \", np.mean(train_score))\n print(\"mean valid score is: \", np.mean(valid_score))\n mean_train_scores.append(np.mean(train_score))\n mean_valid_scores.append(np.mean(valid_score))\n print()\n\nplt.plot(df.columns, mean_train_scores, c='b', label='mean train scores') #plot the training score and validation score showing what happens when feature set gets larger\nplt.plot(df.columns, mean_valid_scores, c='r', label = 'mean valid scores') \nplt.xlabel('feature')\nplt.ylabel('mean of evaluation scores')\nplt.legend(loc=0)\n\nplt.plot(df.columns, mean_cv_scores, label='mean cv scores') #plot the training score and validation score showing what happens when feature set gets larger\nplt.xlabel('feature')\nplt.ylabel('mean of cross validation score')\nplt.legend(loc=0)", "<font color='red'> 7. Observation: </font>\nAt X^13, the mean train scores reached their maxium. \nAt X^7, the mean valid scores reached their maxium. \nAt X^13, the mean cv scores reached minimun point. \n<font color='red'> 8. Conclusion: </font>\nSo the best number of features here is 13." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
machinelearningnanodegree/stanford-cs231
solutions/kvn219/assignment1/two_layer_net.ipynb
mit
[ "Implementing a Neural Network\nIn this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.", "from __future__ import division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom cs231n.classifiers.neural_net import TwoLayerNet\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\nnp.set_printoptions(precision=4)\npd.set_option('precision',1)\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))", "We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.", "# Create a small net and some toy data to check your implementations.\n# Note that we set the random seed for repeatable experiments.\n\ninput_size = 4\nhidden_size = 10\nnum_classes = 3\nnum_inputs = 5\n\ndef init_toy_model():\n np.random.seed(0)\n return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)\n\ndef init_toy_data():\n np.random.seed(1)\n X = 10 * np.random.randn(num_inputs, input_size)\n y = np.array([0, 1, 2, 2, 1])\n return X, y\n\nnet = init_toy_model()\nX, y = init_toy_data()", "Forward pass: compute scores\nOpen the file cs231n/classifiers/neural_net.py and look at the method TwoLayerNet.loss. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters. \nImplement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.", "scores = net.loss(X)\nprint 'Your scores:'\nprint scores\nprint\nprint 'correct scores:'\ncorrect_scores = np.asarray([\n [-0.81233741, -1.27654624, -0.70335995],\n [-0.17129677, -1.18803311, -0.47310444],\n [-0.51590475, -1.01354314, -0.8504215 ],\n [-0.15419291, -0.48629638, -0.52901952],\n [-0.00618733, -0.12435261, -0.15226949]])\nprint correct_scores\nprint \n\n# The difference should be very small. We get < 1e-7\nprint 'Difference between your scores and correct scores:'\nprint np.sum(np.abs(scores - correct_scores))", "Forward pass: compute loss\nIn the same function, implement the second part that computes the data and regularizaion loss.", "loss, _ = net.loss(X, y, reg=0.1)\ncorrect_loss = 1.30378789133\n\n# should be very small, we get < 1e-12\nprint 'Difference between your loss and correct loss:'\nprint np.sum(np.abs(loss - correct_loss))", "Backward pass\nImplement the rest of the function. This will compute the gradient of the loss with respect to the variables W1, b1, W2, and b2. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:", "from cs231n.gradient_check import eval_numerical_gradient\n\n# Use numeric gradient checking to check your implementation of the backward pass.\n# If your implementation is correct, the difference between the numeric and\n# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.\n\nloss, grads = net.loss(X, y, reg=0.1)\n\n# these should all be less than 1e-8 or so\nfor param_name in grads:\n f = lambda W: net.loss(X, y, reg=0.1)[0]\n param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)\n print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))", "Train the network\nTo train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function TwoLayerNet.train and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement TwoLayerNet.predict, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.\nOnce you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.", "net = init_toy_model()\nstats = net.train(X, y, X, y,\n learning_rate=1e-1,\n reg=1e-5,\n num_iters=100,\n verbose=True)\n\nprint 'Final training loss: ', stats['loss_history'][-1]\n\n# plot the loss history\nplt.plot(stats['loss_history'])\nplt.xlabel('iteration')\nplt.ylabel('training loss')\nplt.title('Training Loss history')\nplt.show()", "Load the data\nNow that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.", "from cs231n.data_utils import load_CIFAR10\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):\n \"\"\"\n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for the two-layer neural net classifier. These are the same steps as\n we used for the SVM, but condensed to a single function. \n \"\"\"\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n \n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Reshape data to rows\n X_train = X_train.reshape(num_training, -1)\n X_val = X_val.reshape(num_validation, -1)\n X_test = X_test.reshape(num_test, -1)\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\n# Invoke the above function to get our data.\nX_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()\nprint 'Train data shape: ', X_train.shape\nprint 'Train labels shape: ', y_train.shape\nprint 'Validation data shape: ', X_val.shape\nprint 'Validation labels shape: ', y_val.shape\nprint 'Test data shape: ', X_test.shape\nprint 'Test labels shape: ', y_test.shape", "Train a network\nTo train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.", "input_size = 32 * 32 * 3\nhidden_size = 50\nnum_classes = 10\n\nnet = TwoLayerNet(input_size, hidden_size, num_classes)\n\n# Train the network\nstats = net.train(X_train, y_train, X_val, y_val,\n num_iters=1000, batch_size=200,\n learning_rate=1e-4, learning_rate_decay=0.95,\n reg=0.5, verbose=True)\n\n# Predict on the validation set\nval_acc = (net.predict(X_val) == y_val).mean()\nprint 'Validation accuracy: ', val_acc\n\n", "Debug the training\nWith the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.\nOne strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.\nAnother strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.", "# Plot the loss function and train / validation accuracies\nplt.subplot(2, 1, 1)\nplt.plot(stats['loss_history'])\nplt.title('Loss history')\nplt.xlabel('Iteration')\nplt.ylabel('Loss')\n\nplt.subplot(2, 1, 2)\nplt.plot(stats['train_acc_history'], label='train')\nplt.plot(stats['val_acc_history'], label='val')\nplt.title('Classification accuracy history')\nplt.xlabel('Epoch')\nplt.ylabel('Clasification accuracy')\nplt.legend(loc='best')\nplt.show()\n\nfrom cs231n.vis_utils import visualize_grid\n\n# Visualize the weights of the network\n\ndef show_net_weights(net):\n W1 = net.params['W1']\n W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)\n plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))\n plt.gca().axis('off')\n plt.show()\n\nshow_net_weights(net)", "Tune your hyperparameters\nWhat's wrong?. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.\nTuning. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.\nApproximate results. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.\nExperiment: You goal in this exercise is to get as good of a result on CIFAR-10 as you can, with a fully-connected Neural Network. For every 1% above 52% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.).", "X_train.shape\n\nbest_net = None # store the best model into this \n\n#################################################################################\n# TODO: Tune hyperparameters using the validation set. Store your best trained #\n# model in best_net. #\n# #\n# To help debug your network, it may help to use visualizations similar to the #\n# ones we used above; these visualizations will have significant qualitative #\n# differences from the ones we saw above for the poorly tuned network. #\n# #\n# Tweaking hyperparameters by hand can be fun, but you might find it useful to #\n# write code to sweep through possible combinations of hyperparameters #\n# automatically like we did on the previous exercises. #\n#################################################################################\ninput_size = 32 * 32 * 3\nhidden_size = 50\nnum_classes = 10\nbest_val = -1\n\nnet = TwoLayerNet(input_size, hidden_size, num_classes)\n\n# hyperparameters\nlearning_rates = [1e-3, 1e-4, 1e-5]\nregs = [0.4, 0.5, 0.6]\nlearning_rate_decays = [0.5, 0.75, 0.95]\n\n\n# Train the network\n\nfor lr in learning_rates:\n for rate in regs:\n for decay in learning_rate_decays:\n print \n print(\"learning rate: {}, epochs: {}, decay: {}\".format(lr, rate, decay))\n stats = net.train(X_train, y_train, X_val, y_val,\n num_iters=1000,\n batch_size=200,\n learning_rate=lr,\n learning_rate_decay=0.95,\n reg=rate,\n verbose=False)\n\n # Predict on the validation set\n val_acc = (net.predict(X_val) == y_val).mean()\n\n print 'Validation accuracy: ', val_acc\n\n if net.loss > best_val:\n best_net = net\n \n#################################################################################\n# END OF YOUR CODE #\n#################################################################################\n\n# visualize the weights of the best network\nshow_net_weights(best_net)", "Run on the test set\nWhen you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.\nWe will give you extra bonus point for every 1% of accuracy above 52%.", "test_acc = (best_net.predict(X_test) == y_test).mean()\nprint 'Test accuracy: ', test_acc" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gogrean/SurfFit
examples/notebooks/PyXel Example - Constant Model.ipynb
gpl-3.0
[ "import os\nos.getcwd()\nimport sys\nsys.path.insert(0, '/Users/gogrean/code/pyxel')\n\nimport pyxel\n\npyxel", "PyXel Example: Constant Model\nThis example shows how to fit a constant to the sky background level in the direction of the merging galaxy cluster ZwCl 2341.1+0000. The constant model is loaded from astropy.modeling. Chandra data is used for the analysis.\nBelow we import the packages required to run the complete example:", "%matplotlib inline\nimport os\nimport pickle\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nfrom astropy.modeling.functional_models import Const1D\n\nfrom pyxel import Image, load_region\nfrom pyxel.fitters import CstatFitter\nfrom pyxel.models import IntModel", "There are four Chandra observations of ZwCl 2341.1+0000. The fully processed images in the energy band 0.5-2 keV are available in the PyXel GitHub repository. There are three types of images: cluster count images, instrumental background count images, and exposure maps. Point sources have been removed from the exposure maps to differentiate between pixels with no photons and pixels that were unexposed or contaminated with point sources.\nBelow we read the images and create a surface brightness profile in annuli centered on the cluster. The profile is binned linearly to a minimum of 25 counts/bin. If the profile is unchanged, then it only needs to be created once, rather than every time the code is run. Therefore, we save it in skybkg.pkl. If this file exists, then the profile is simply read from it, which is much faster than recreating it.", "DATADIR = \"../data/\"\npkl = DATADIR + \"skybkg.pkl\"\nif os.path.exists(pkl):\n with open(pkl, \"rb\") as f:\n p = pickle.load(f)\nelse:\n src_imgs = Image([DATADIR + \"srcfree_bin4_500-2000_5786_band1_thresh.img\",\n DATADIR + \"srcfree_bin4_500-2000_17170_band1_thresh.img\",\n DATADIR + \"srcfree_bin4_500-2000_17490_band1_thresh.img\",\n DATADIR + \"srcfree_bin4_500-2000_18702_band1_thresh.img\",\n DATADIR + \"srcfree_bin4_500-2000_18703_band1_thresh.img\"])\n exp_imgs = Image([DATADIR + \"srcfree_bin4_500-2000_5786_band1_thresh.expmap_nosrcedg\",\n DATADIR + \"srcfree_bin4_500-2000_17170_band1_thresh.expmap_nosrcedg\",\n DATADIR + \"srcfree_bin4_500-2000_17490_band1_thresh.expmap_nosrcedg\",\n DATADIR + \"srcfree_bin4_500-2000_18702_band1_thresh.expmap_nosrcedg\",\n DATADIR + \"srcfree_bin4_500-2000_18703_band1_thresh.expmap_nosrcedg\"])\n bkg_imgs = Image([DATADIR + \"5786_bin4_500-2000_bgstow_goodreg.img\",\n DATADIR + \"17170_bin4_500-2000_bgstow_goodreg.img\",\n DATADIR + \"17490_bin4_500-2000_bgstow_goodreg.img\",\n DATADIR + \"18702_bin4_500-2000_bgstow_goodreg.img\",\n DATADIR + \"18703_bin4_500-2000_bgstow_goodreg.img\"])\n region = load_region(DATADIR + \"skybkg.reg\")\n p = region.sb_profile(src_imgs, bkg_imgs, exp_imgs, min_counts=25, islog=False)\n with open(pkl, \"wb\") as f:\n pickle.dump(p, f)", "Beyond ~5.6 arcmin, the profile flattens to an approximately constant level. Regions beyond this radius therefore contain only sky background emission. Below we select the profile parameters required by the fit, restricted to the radius range 5.6-9.6 arcmin.", "rmin, rmax = 5.6, 9.6\n\n# These are needed to fit the data using C-stat.\nr = np.array([pp[0] for pp in p if rmin <= pp[0] <= rmax])\nr_err = np.array([pp[1] for pp in p if rmin <= pp[0] <= rmax])\nraw_cts = np.array([pp[2] for pp in p if rmin <= pp[0] <= rmax])\nbkg_cts = np.array([pp[4] for pp in p if rmin <= pp[0] <= rmax])\nt_raw = np.array([pp[11] for pp in p if rmin <= pp[0] <= rmax])\nt_bkg = np.array([pp[12] for pp in p if rmin <= pp[0] <= rmax])\n\n# These we load too, so that we can make a pretty figure in the end.\nsx = np.array([pp[7] for pp in p if rmin <= pp[0] <= rmax])\nsx_err = np.array([pp[8] for pp in p if rmin <= pp[0] <= rmax])\nbkg = np.array([pp[9] for pp in p if rmin <= pp[0] <= rmax])\nbkg_err = np.array([pp[10] for pp in p if rmin <= pp[0] <= rmax])", "We plot the profile to have an estimate for the background level. This estimate will be the guess for our fit.", "fig = plt.figure(figsize=(10,5))\nax = fig.add_subplot(111)\nax.scatter(r, sx, c=\"#1e8f1e\", alpha=0.85, s=35, marker=\"s\",\n label=\"0.5-2 keV Source + Sky Bkg\")\nax.errorbar(r, sx, xerr=r_err, yerr=sx_err, linestyle=\"None\", color=\"#1e8f1e\")\nax.step(r, bkg, where=\"mid\", color=\"#1f77b4\", linewidth=2,\n label=\"0.5-2 keV Particle Bkg\")\nax.step(r, bkg - bkg_err, where=\"mid\", color=\"#1f77b4\", linewidth=2, alpha=0.5, linestyle=\"--\")\nax.step(r, bkg + bkg_err, where=\"mid\", color=\"#1f77b4\", linewidth=2, alpha=0.5, linestyle=\"--\")\nax.semilogx()\nax.semilogy()\nax.get_xaxis().set_major_formatter(mtick.ScalarFormatter())\nax.get_xaxis().set_minor_formatter(mtick.ScalarFormatter())\nplt.tick_params(axis=\"both\", which=\"major\", labelsize=14)\nplt.xlim(rmin, rmax)\nplt.ylim(5e-8, 1e-5)\nplt.xlabel(\"Distance (arcmin)\", size=15)\nplt.ylabel(r\"SB (photons cm$^{-2}$ s$^{-1}$ arcmin$^{-2}$)\", size=15)\nplt.legend(loc=1)\nplt.title(\"Sky Background\", size=15)\nplt.show()", "The sky background level is a bit below 1e-6, so 1e-6 should be a good guess, especially given the simplicity of the model. We fit the data using the extended C-statistic (same as in Xspec).", "model = Const1D(amplitude=1e-6)\nint_model = IntModel(model, widths=r_err)\nfit = CstatFitter()\nfitted_model = fit(int_model, r, raw_cts, bkg_cts, t_raw, t_bkg, maxiter=500)\nprint(fitted_model)", "Uncertainties on the parameters are calculated using MCMC. The uncertainties below are calculated at the 90% confidence level. We save the chain to a file, so that we can simply load it next time (e.g., if the level at which the uncertainties are calculated is changed). To load an existing chain file, suppy the filename to chain_filename and set clobber_chain=False. MCMC runs can be expensive, especially for complex models (can take up to a few hours when run on two cores in the case of an integrated broken power-law model), so it's generally a very good idea to save the results.", "mcmc_err = fit.mcmc_err(fitted_model, r, raw_cts, bkg_cts, t_raw, t_bkg, \n cl=90., save_chain=True, clobber_chain=True, \n chain_filename=DATADIR+\"skybkg_chain.dat\")", "Finally, we plot the best-fitting value from the MCMC run, and the 90% uncertainty band.", "fig = plt.figure(figsize=(10,5))\nax = fig.add_subplot(111)\nax.scatter(r, sx, c=\"#1e8f1e\", alpha=0.85, s=35, marker=\"s\",\n label=\"0.5-2 keV Source + Sky Bkg\")\nax.errorbar(r, sx, xerr=r_err, yerr=sx_err, linestyle=\"None\", color=\"#1e8f1e\")\nax.step(r, bkg, where=\"mid\", color=\"#1f77b4\", linewidth=2,\n label=\"0.5-2 keV Particle Bkg\")\nax.step(r, bkg - bkg_err, where=\"mid\", color=\"#1f77b4\", linewidth=2, alpha=0.5, linestyle=\"--\")\nax.step(r, bkg + bkg_err, where=\"mid\", color=\"#1f77b4\", linewidth=2, alpha=0.5, linestyle=\"--\")\n\nax.plot(r, fitted_model(r), color=\"#ffa500\", linewidth=2, alpha=0.75)\nax.fill_between(r, mcmc_err[0][1] + mcmc_err[0][2], mcmc_err[0][1] + mcmc_err[0][3], alpha=0.3, color=\"#ffa500\")\n\nax.semilogx()\nax.semilogy()\nax.get_xaxis().set_major_formatter(mtick.ScalarFormatter())\nax.get_xaxis().set_minor_formatter(mtick.ScalarFormatter())\nplt.tick_params(axis=\"both\", which=\"major\", labelsize=14)\nplt.xlim(rmin, rmax)\nplt.ylim(5e-8, 1e-5)\nplt.xlabel(\"Distance (arcmin)\", size=15)\nplt.ylabel(r\"SB (photons cm$^{-2}$ s$^{-1}$ arcmin$^{-2}$)\", size=15)\nplt.legend(loc=1)\nplt.title(\"Sky Background\", size=15)\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
barjacks/pythonrecherche
Kursteilnehmer/Sven Millischer/06 /03 Python Functions, 10 Übungen.ipynb
mit
[ "03 Python Functions, 10 Übungen\nHier nochmals zur Erinnerung, wie Funktionen geschrieben werden.", "def test(element):\n element = element * 2\n return element", "Multipliziert Integers oder Floats mit 2", "test(5)", "1.Schreibe eine Funktion, die aus einer Liste, die grösste Zahl herauszieht. Es ist verboten mit \"max\" zu arbeiten. :-)", "lst = [12, 45, 373, 1028]\n\ndef highstnbr(mylist):\n mylist.sort()\n return mylist[-1]\n\nhighstnbr(lst)", "2.Schreibe eine Funktion, die alle Elemente einer Liste, addiert. Es ist verboten mit \"sum\" zu arbeiten.", "lst = [12, 45, 373, 1028]\n\ndef addtntor(mylist):\n total=0\n for elem in mylist:\n total+=elem\n return total\n\naddtntor(lst)", "3.Schreibe eine Funktion, die alle Elemente einer Liste multipliziert.", "lst = [12, 45, 373, 1028]\n\ndef multplr(mylist):\n total=1\n for elem in mylist: \n total*=elem\n return total\n\nmultplr(lst)", "4.Schreibe eine Funktion, die einen String nimmt, und spiegelt. Also \"hallo\" zu \"ollah\".", "spruch = \"hallo\"\n\ndef mirror(mylist):\n for elem in mylist:\n return mylist[::-1]\n\nmirror(spruch)\n\n5.Schreibe eine Funktion, die prüft, ob eine Zahl in einer bestimmten Zahlenfolge zu finden ist.\n\nliste = [45, 34, 64, 45]\n\ndef searchnbr(mylist):\n if 56 in mylist:\n return \"Treffer\"\n else: \n return \"Kein Treffer\"\n\nsearchnbr(liste)", "6.Lösche die mehrfach genannten Elemente aus der folgenden Liste.", "liste = [5,5,5,5,3,2,11,5]\n\nlist(set(liste)) evenlst.append(x) evenlst = []", "7.Drucke die geraden Zahlen aus der folgenden Liste aus:", "lst = [34,23,22,443,45,78,23,89,23]\n\nfor x in lst:\n if x % 2 == 0:\n print(x) ", "8.Prüfe mit einer Funktionen, wieviele Grossbuchstaben in folgendem Satz zu finden sind.", "satz = \"In Oesterreich zeichnet sich ein Rechtsrutsch ab. OeVP und FPOe haben stark zugelegt. Gemaess der neusten Hochrechnung ist die Partei von Sebastian Kurz mit 31,6 Prozent der Stimmen Wahlsiegerin, auf Platz zwei folgt die SPÖ (26,9 Prozent) vor der FPOe (26,0 Prozent).\"\n\ndef counting_caps(phrase):\n caps = 0\n for x in phrase:\n if x.isupper():\n caps += 1\n return caps\n\n counting_caps(satz)", "9.Prüfe mit einer Funktionen, wieviele 'e's in folgendem Satz zu finden sind.", "satz = \"In Oesterreich zeichnet sich ein Rechtsrutsch ab. OeVP und FPOe haben stark zugelegt. Gemaess der neusten Hochrechnung ist die Partei von Sebastian Kurz mit 31,6 Prozent der Stimmen Wahlsiegerin, auf Platz zwei folgt die SPÖ (26,9 Prozent) vor der FPOe (26,0 Prozent).\"\n\ndef counting_character(phrase):\n character = 0\n for x in phrase:\n if x.count('e'):\n character += 1\n return character\n\n counting_character(satz)", "10.Schreibe eine Funktion, die auf eine andere Funktion nutzt." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mauroalberti/geocouche
pygsf/checks/Check Runge-Kutta-Fehlberg interpolation.ipynb
gpl-2.0
[ "Preliminary settings\nIn order to plot fields, we run the following commands:", "%matplotlib inline\nimport matplotlib.pyplot as plt", "We import the math library:", "import math", "The modules to import for dealing with grids are:", "from pygsf.mathematics.arrays import *\nfrom pygsf.spatial.rasters.geotransform import *\nfrom pygsf.spatial.rasters.fields import *", "For calculating pathlines:", "from pygsf.space_time.movements import interpolate_rkf", "Velocity field with circular motion\nThe example circular motion vector field has components:\nv = y i - x j\nas deriving from the equation:\nv = - z x r\nwhere z is the vertical vector, r the position vector and x the vector product.", "k = 2 * math.pi\n\ndef z_func_fx(x, y):\n\n return k*y\n\ndef z_func_fy(x, y):\n\n return -k*x", "The velocity field parameters for testing the results are:\nv = w * r\nw = v / r\n|v| = sqrt(k^2y^2 + k^2x^2) = k * r\n1 cycle -> 2 pi r\nv = ds / dt -> ds = v * dt\n2 pi r = v dt\n2 pi r = v T -> T = 2 pi r / v = 2 pi / k\ngeotransform and grid definitions", "rows=100; cols=100\n\nsize_x = 1; size_y = 1\n\ntlx = -50.0; tly = 50.0 \n\ngt1 = GeoTransform(\n inTopLeftX=tlx, \n inTopLeftY=tly, \n inPixWidth=size_x, \n inPixHeight=size_y)", "vector field x-component", "fx1 = array_from_function(\n row_num=rows, \n col_num=cols, \n geotransform=gt1, \n z_transfer_func=z_func_fx)\n\nprint(fx1)", "vector field y-component", "fy1 = array_from_function(\n row_num=rows, \n col_num=cols, \n geotransform=gt1, \n z_transfer_func=z_func_fy)\n\nprint(fy1)", "flow characteristics: magnitude and streamlines\nTo visualize the parameters of the flow, we calculate the geographic coordinates:", "X, Y = gtToxyCellCenters(\n gt=gt1,\n num_rows=rows,\n num_cols=cols)", "and the vector field magnitude:", "magn = magnitude(\n fld_x=fx1, \n fld_y=fy1)\n\nfig = plt.figure(figsize=(14, 6))\n\nplt.contourf(X, Y, np.log10(magn), cmap=\"bwr\")\ncbar = plt.colorbar()\ncbar.ax.set_ylabel('Magnitude (log10)')\nplt.streamplot(X, Y, fx1/magn, fy1/magn, color=\"black\")\nplt.axis(\"image\")\nplt.title('Vector field magnitude and streamlines')\nplt.xlabel('x')\nplt.ylabel('y')\n\n\nimport math\nfrom pygsf.spatial.vectorial.vectorial import Point\nfrom pygsf.spatial.rasters.geoarray import GeoArray\n\nga = GeoArray(\n inGeotransform=gt1,\n inProjection=\"undef\",\n inLevels=[fx1, fy1])\n\ntime_increm = 1.0e-4\n\nperiod = 2 * math.pi / k\n\nnumber_of_cycles = 100\n\nsteps = number_of_cycles * (period / time_increm)\nprint (steps)\n\nfirst_pt = Point(0, 20)\n\nstr_pt = first_pt\npts_x, pts_y = [first_pt.x], [first_pt.y]\n\nfor n in range(int(steps)):\n\n end_pt, error = interpolate_rkf(\n geoarray=ga, \n delta_time=time_increm,\n start_pt=str_pt)\n \n if end_pt is None:\n break\n \n pts_x.append(end_pt.x)\n pts_y.append(end_pt.y)\n str_pt = end_pt\n \nprint (end_pt)", "After 100 cycles the calculated point position is in the expected (initial) position: x=0, y=200.", "fig = plt.figure(figsize=(14, 6))\n\nplt.contourf(X, Y, np.log10(magn), cmap=\"bwr\")\ncbar = plt.colorbar()\ncbar.ax.set_ylabel('Magnitude (log10)')\nplt.streamplot(X, Y, fx1/magn, fy1/magn, color=\"black\")\nplt.scatter(pts_x, pts_y, color='b')\nplt.axis(\"image\")\nplt.title('Vector field magnitude and streamlines')\nplt.xlabel('x')\nplt.ylabel('y')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
google/applied-machine-learning-intensive
content/04_classification/05_introduction_to_image_classification/colab.ipynb
apache-2.0
[ "<a href=\"https://colab.research.google.com/github/google/applied-machine-learning-intensive/blob/master/content/04_classification/05_introduction_to_image_classification/colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nCopyright 2020 Google LLC.", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Introduction to Image Classification\nWe have learned about binary and multiclass classification, and we've done so using datasets consisting of feature columns that contain numeric and string values. The numbers could be continuous or categorical. The strings we have used so far were all categorical features.\nIn this lab we will perform another type of classification: image classification.\nImage classification can be binary: \"Is this an image of a dog?\"\nIt can also be multiclass: \"Is this an image of a cat, dog, horse, or cow?\"\nThe questions above assume there is only one item in an image. There is an even more advanced form of multiclass classification that answers the following question: What are all of the classes in an image and where are they located? For example: \"Where are all of the cats, dogs, horses, and cows in this image?\".\nIn this introduction to image classification, we'll focus on classification where there is only one item depicted in each image. In future labs we'll learn about the more advanced forms of image classification.\nThe Dataset\nThe dataset we'll use for this Colab is the Fashion-MNIST dataset, which contains 70,000 grayscale images labeled with one of ten categories.\nThe categories are:\nLabel | Class\n------|------------\n0 | T-shirt/top\n1 | Trouser\n2 | Pullover\n3 | Dress\n4 | Coat\n5 | Sandal\n6 | Shirt\n7 | Sneaker\n8 | Bag\n9 | Ankle boot\nThe images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here:\n<table>\n <tr><td>\n <img src=\"https://tensorflow.org/images/fashion-mnist-sprite.png\"\n alt=\"Fashion MNIST sprite\" width=\"600\">\n </td></tr>\n <tr><td align=\"center\">\n <b>Figure 1.</b> <a href=\"https://github.com/zalandoresearch/fashion-mnist\">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>&nbsp;\n </td></tr>\n</table>\n\nLoad the Data\nNow that we have a rough understanding of the data we're going to use in our model, let's load the data into this lab. The Fashion MNIST dataset is conveniently available from the Keras Datasets repository along with a utility function for downloading and loading the data into NumPy arrays.\nIn the code cell below, we import TensorFlow and download the Fashion-MNIST data.", "import tensorflow as tf\n\n(train_images, train_labels), (test_images, test_labels) = \\\n tf.keras.datasets.fashion_mnist.load_data()\n\nprint(train_images.shape)\nprint(train_labels.shape)\nprint(test_images.shape)\nprint(test_labels.shape)", "load_data() returns two tuples, one for the training dataset and the other for the testing dataset. As you can see from the output of the code cell above, we have 60,000 training samples and 10,000 testing samples. This makes for a 14% holdout of the data.\nYou might be wondering what that 28, 28 is in the image data. That is a two-dimensional representation of the image. This is our feature data. Each pixel of the image is a feature. A 28 by 28 image has 784 pixels.\nAs you can see, even a tiny image generates quite a few features. If we were processing 4k-resolution images, which are often 3840 by 2160 pixels, then we would have 8,294,400 features! Over eight million features is quite a bit. In later labs we'll address some strategies for working with this massive amount of data.\nExploratory Data Analysis\nIt is always a good idea to look at your data before diving in to building your model. Remember that our data is divided across four NumPy arrays, two of which are three-dimensional arrays:", "print('Training images:', train_images.shape)\nprint('Training labels:', train_labels.shape)\nprint('Test images:', test_images.shape)\nprint('Test labels:', test_labels.shape)", "To make our exploration tasks a little easier, let's put the data into a Pandas DataFrame. One way to do this is to flatten the 28 by 28 image into a flat array of 784 pixels, with the pixel number being the column name. We then add the labels to a target column.", "import numpy as np\nimport pandas as pd\n\ntrain_df = pd.DataFrame(\n np.array([x.flatten() for x in train_images]),\n columns=[i for i in range(784)]\n)\ntrain_df['target'] = train_labels\n\ntrain_df.describe()", "With so many columns, reading the output of describe() is nearly impossible. Let's instead do our analysis a little differently.\nTo begin, we will find the minimum value of every pixel column and output the sorted list of unique values.", "FEATURES = train_df.columns[:-1]\n\nsorted(train_df.loc[:, FEATURES].min().unique())", "All of the values were 0.\nLet's do the same for the maximum values.", "sorted(train_df.loc[:, FEATURES].max().unique())", "That is more interesting. We seem to have values ranging from 16 through 255. These values represent color intensities for grayscale images. 0, which we saw as a minimum value, maps to black in the color map that we will use, while 255 is white.\nLet's see a histogram distribution of our max pixel values.", "import matplotlib.pyplot as plt\n\n_ = plt.hist(train_df.loc[:, FEATURES].max().unique())", "Unsurprisingly, higher intensity values seem to be more prevalent as maximum pixel values than lower intensity values.\nExercise 1: Charting Pixel Intensities\nIn the example above, we created a histogram containing the maximum pixel intensities. In this exercise you will create a histogram for all pixel intensities in the training dataset.\nIf some intensities are outliers, remove them to get a more meaningful histogram.\nHint: The NumPy where and flatten can come in handy for this exercise. \nStudent Solution", "# Your code goes here", "Continuing on With EDA\nNow that we have a basic idea of the values in our dataset, let's see if any are missing.", "train_df.isna().any().any()", "Good. We now know we aren't missing any values, and our pixel values range from 0 through 255.\nLet's now see if our target values are what we expect.", "sorted(train_df['target'].unique())", "Let's see the distribution.", "_ = train_df['target'].hist(bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])", "The class types seem evenly distributed. We have 6,000 of each.\nThe numeric values should map to these clothing types:\nLabel | Class\n------|------------\n0 | T-shirt/top\n1 | Trouser\n2 | Pullover\n3 | Dress\n4 | Coat\n5 | Sandal\n6 | Shirt\n7 | Sneaker\n8 | Bag\n9 | Ankle boot\nWe can spot check this by looking at some of the images. Let's check a random 'T-shirt/top'.\nTo do this we select a random index from the 'T-shirt/top' items (target = 0). We then reshape the pixel columns back into a 28 by 28 two-dimensional array, which are the dimensions of the image. We then use imshow() to display the image.", "index = np.random.choice(train_df[train_df['target'] == 0].index.values)\n\npixels = train_df.loc[index, FEATURES].to_numpy().reshape(28, 28)\n\n_ = plt.imshow(pixels, cmap='gray')", "In our sample we got an image that looked like a very low resolution t-shirt. You should see the same. Note: every time you rerun the above cell, a new random index will be chosen, so feel free to cycle through some of the values to see the different types of t-shirt/top images included in the dataset.\nThis single image spot checking is okay, but it doesn't scale well.\nWe can view multiple images at a time using the GridSpec class from Matplotlib.\nIn the code below, we build a visualization with a 10 by 10 grid of images in our t-shirt class.\nThe code imports gridspec, sets the number of rows and columns, and then sets the figure size so the image is large enough for us to actually see different samples.\nAfter that bit of setup, we create a 10 by 10 GridSpec. The other parameters to the constructor are there to ensure the images are tightly packed into the grid. Try experimenting with some other values.\nNext we randomly choose 100 indexes from items labelled with class 0 our training data.\nThe remainder of the code should look pretty familiar. We used similar code above to show a single image. The difference in this code is that we are adding 100 subplots using the GridSpec.", "from matplotlib import gridspec\n\n# Row and column count (100 samples)\nrows = 10\ncols = 10\n\n# Size of the final output image\nplt.figure(figsize=(12, 12)) \n\n# Grid that will be used to organize our samples\ngspec = gridspec.GridSpec(\n rows,\n cols,\n wspace = 0.0,\n hspace = 0.0,\n top = 1.0,\n bottom = 0.0,\n left = 0.00, \n right = 1.0,\n) \n\n# Randomly choose a sample of t-shirts\nT_SHIRTS = 0\nindexes = np.random.choice(\n train_df[train_df['target'] == T_SHIRTS].index.values,\n rows*cols\n)\n\n# Add each sample to a plot using the GridSpec\ncnt = 0\nfor r in range(rows):\n for c in range(cols):\n row = train_df.loc[indexes[cnt], FEATURES]\n img = row.to_numpy().reshape((28, 28))\n\n ax = plt.subplot(gspec[r, c])\n ax.imshow(img, cmap='gray')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n cnt = cnt + 1\n\nplt.show()", "Exercise 2: Visualizing Every Class\nIn this exercise, you'll take the code that we used above to visualize t-shirts and use it to visualize every class represented in our dataset. You'll need to print out the class name and then show a 10 by 10 grid of samples from that class. Try to minimize the amount of repeated code in your solution.\nStudent Solution", "# Your code goes here", "Wrapping Up EDA\nFrom our visual analysis, our samples seem reasonable.\nFirst off, class names seem to match pictures. This can give us some confidence that our data is labelled correctly.\nAnother nice thing is that all of the clothing items seem to be oriented in the same direction for the most part. If shoes were pointing in different directions, or if any images were rotated, then we would have had a lot more processing to do.\nAnd finally, all of our images are the same dimensions and are encoded with a single numeric grayscale intensity. In the real world, you'll likely not get so lucky. Images are acquired in different sizes and with different color encodings. We'll get to some examples of this in future labs.\nBased on our analysis so far, we can end our EDA and move on to model building.\nModeling\nWe have many options for building a multiclass classification model for images. In this lab we will build a deep neural network using TensorFlow Keras.\nPreparing the Data\nOur feature data is on a scale from 0 to 255, and our target data is categorically encoded. Fortunately, all of the features are on the same scale, so we don't have to worry about standardizing scale. However, we'll need to do a little data preprocessing in order to get our data ready for modeling.\nThe first bit of data preprocessing we'll do is bring the feature values into the range of 0.0 and 1.0. We could perform normalization to do this, but normalization actually isn't the only solution in this case.\nWe know that all of our features are pixel values in the range of 0 to 255. We also know from our EDA that every feature has a minimum value of 0, but that the max values have a pretty wide range. It is possible we would make our model worse by normalizing, since we'd be making the same values across pixels not map to the same color.\nInstead of normalizing, we can just divide every feature by 255.0. This keeps the relative values the same across pixels.", "train_df[FEATURES] = train_df[FEATURES] / 255.0\n\ntrain_df[FEATURES].describe()", "Exercise 3: One-Hot Encoding\nOur target values are categorical values in a column named target. In this exercise, you will one-hot encode the target values. Your code should:\n\nCreate ten new columns named target_0 through target_9.\nCreate a variable called TARGETS that contains the 10 target column names.\ndescribe() the ten new target column values to ensure that they have values between 0 and 1 and that the one-hot encoding looks evenly distributed.\n\nStudent Solution", "# Your code goes here", "Configure and Compile the Model\nWe'll be relying on the TensorFlow Keras Sequential model and Dense layers that we used in previous labs.\nIn this case our input shape needs to be the size of our feature count. We'll then add a few hidden layers and then use a softmax layer the same width as our target count. This layer should output the probability that a given set of input features maps to each of our targets. The sum of the probabilities will equal 1.0.", "model = tf.keras.Sequential([\n tf.keras.layers.Dense(128, input_shape=(len(FEATURES),)),\n tf.keras.layers.Dense(64, activation=tf.nn.relu),\n tf.keras.layers.Dense(32, activation=tf.nn.relu),\n tf.keras.layers.Dense(len(TARGETS), activation=tf.nn.softmax)\n])\n\nmodel.summary()", "Note that our images are actually 28 by 28 images. We flattened the images when we loaded them into a dataframe for EDA. However, flattening outside of the model isn't necessary. TensorFlow works in many dimensions. If we wanted to keep our images as 28 by 28 matrices, we could have added a Flatten layer as shown below.\n```python\nmodel = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n tf.keras.layers.Dense(64, activation=tf.nn.relu),\n tf.keras.layers.Dense(32, activation=tf.nn.relu),\n tf.keras.layers.Dense(len(TARGETS), activation=tf.nn.softmax)\n])\nmodel.summary()\n```\n```\nModel: \"sequential_8\"\n\nLayer (type) Output Shape Param #\nflatten_5 (Flatten) (None, 784) 0 \n\ndense_28 (Dense) (None, 128) 100480 \n\ndense_29 (Dense) (None, 64) 8256 \n\ndense_30 (Dense) (None, 32) 2080 \n\ndense_31 (Dense) (None, 10) 330\nTotal params: 111,146\nTrainable params: 111,146\nNon-trainable params: 0\n\n```\nThis model results in the same number of trainable parameters as our pre-flattened model; it just saves us from having to flatten the images outside of TensorFlow.\nBefore the model is ready for training, it needs a few more settings. These are added during the model's compile step:\n\nLoss function — This measures how well the model is doing during training. We want to minimize this function to \"steer\" the model in the right direction. A large loss would indicate the model is performing poorly in classification tasks, meaning it is not matching input images to the correct class names. (It might classify a boot as a coat, for example.)\nOptimizer — This is how the model is updated based on the data it sees and its loss function.\nMetrics — This is used to monitor the training and testing steps. The following example uses accuracy, the fraction of the images that are correctly classified.", "model.compile(\n loss='categorical_crossentropy',\n optimizer='Adam',\n metrics=['accuracy'],\n)\n\nmodel.summary()", "Train the Model\nTraining a Keras API neural network model to classify images looks just like all of the other Keras models we have worked with so far. We call this the model.fit method, passing it our training data and any other parameters we'd like to use.", "callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5)\n\nhistory = model.fit(\n train_df[FEATURES],\n train_df[TARGETS],\n epochs=500,\n callbacks=[callback]\n)", "As the model trains, the loss and accuracy metrics are displayed. We also store the progression in history.\nYou'll notice that this took longer to train per epoch than many of the models we've built previously in this course. That's because of the large number of features. Even with these tiny 28 by 28 grayscale images, we are still dealing with 784 features. This is orders of magnitude larger than the 10 or so features we are used to using.\nExercise 4: Graph Model Progress\nIn this exercise you'll create two graphs. The first will show the model loss over each epoch. The second will show the model accuracy over each epoch. Feel free to use any graphical toolkit we have used so far.", "# Your code goes here", "Evaluate the Model\nNow that our model is trained, let's evaluate it using an independent test data set. Then let's see if the model quality holds up. We'll use model.evaluate() and pass in the test dataset. model.evaluate() returns a test_loss and test_accuracy.\nAlso note that we need to apply the same feature preprocessing to the test data that we did to the train data.", "test_df = pd.DataFrame(\n np.array([x.flatten() for x in test_images]),\n columns=[i for i in range(784)]\n)\ntest_df['target'] = test_labels\n\ntest_df[FEATURES] = test_df[FEATURES] / 255.0\n\nfor class_i in sorted(test_df['target'].unique()):\n column_name = f'target_{class_i}'\n test_df[column_name] = (test_df['target'] == class_i).astype(int)\n\ntrain_loss = history.history['loss'][-1]\ntrain_accuracy = history.history['accuracy'][-1]\n(test_loss, test_accuracy) = model.evaluate(test_df[FEATURES], test_df[TARGETS])\n\nprint('Training loss:', train_loss)\nprint('Training accuracy:', train_accuracy)\n\nprint('Test loss:', test_loss)\nprint('Test accuracy:', test_accuracy)", "The accuracy on the test dataset is noticeably less than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of overfitting. Overfitting is when a machine learning model tends to perform worse on new data than on the training data. The trained model is unable to generalize to data that it has not seen before.\nThere are many ways to try to reduce overfitting. One that we have seen is early stopping. This causes training to stop when loss stops changing significantly for a model. Without early stopping, the model would continue training, becoming more and more tailored to the training data and likely less to be able to generalize across new data.\nAnother method for reducing overfitting in deep neural networks is dropout. A dropout layer is a layer that sits between two regular layers (in our case, dense layers) and randomly sets some of the values passed between layers to 0.\nIn TensorFlow the Dropout class is capable of doing this. To use Dropout you simply add a Dropout layer between other layers of the model. Each dropout layer has a percentage of values that it will set to 0.\npython\nmodel = tf.keras.Sequential([\n ...\n tf.keras.layers.Dense(name='L14', 431, activation=tf.nn.relu),\n # Randomly sets 15% of values between L14 and L15 to 0\n tf.keras.layers.Dropout(rate=0.15),\n tf.keras.layers.Dense(name='L15', 257, activation=tf.nn.relu),\n # Randomly sets 1% of values between L15 and L16 to 0\n tf.keras.layers.Dropout(rate=0.01),\n tf.keras.layers.Dense(name='L16', 57, activation=tf.nn.relu),\n ...\n])\nExercise 5: Dropout Layers\nIn this exercise take the model from above and add a Dropout layer or layers between the Dense layers. See if you can find a configuration that reduces the gap between the training loss and accuracy and the test loss and accuracy. Document your findings.\nStudent Solution", "# Your code goes here", "Iterate a few times and find a dropout model that seems to bring the testing and training numbers closer together. When you are done, document your findings in the table below. The ?s are placeholders for accuracy and loss values.\nDropout (Y/N) | Train/Test | Accuracy | Loss\n--------------|------------| ---------|------\nN | Train | ? | ?\nN | Test | ? | ?\nY | Train | ? | ?\nY | Test | ? | ?\n\nMake Predictions\nWe have now trained the model while trying to reduce overfitting. Let's say we're happy with our numbers and are ready to deploy the model. Now it is time to make predictions.\nWe could now snap an image of a clothing item, resize it to 28 by 28, and grayscale it. But that is a lot of work and outside the scope of this class. For simplicity, let's use the test images as input to the model and see what predictions we get.\nWe'll use the model.predict() function to do this. Let's make our predictions and peek at the first result.", "predictions = model.predict(test_df[FEATURES])\n\npredictions[0]", "What are those numbers?\nFor each image:\n * the prediction result is in the form of 10 numbers, one for each possible label\n * each number represents the level of confidence that a label is the correct label for the particular image\n * all 10 numbers should add up to the sum of 1\nLet's see if that is true.", "sum(predictions[0]), sum(predictions[1])", "Well, maybe not 1, but the result definitely approaches 1. Floating point math makes summing to exactly 1 a little difficult.\nLet's find out which label has the highest predicted number and whether it matches with the actual test label.\nTo find the highest predicted number we will use Numpy's argmax function which returns the index of the maximum value in an array.", "import numpy as np\n\nprint('Label with the highest confidence: {predicted_label}'.format(\n predicted_label = np.argmax(predictions[0])))\n\nprint('Actual label: {actual_label}'.format(actual_label = test_labels[0]))", "With our model the predicted class was class 9, and the actual class was class 9. Success!\nExercise 6: Thresholds\nWhen making our predictions, we blindly accepted the output of argmax without really understanding what argmax was doing.\nargmax returns the index of the maximum value in an array. What if there are ties? What happens for a 10-element array that looks like:\npython\n [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]\nIn this case it is a virtual tie between all of the classes. argmax will return the first value in the case of a tie. This is problematic for a few reasons. In this case we clearly have little confidence in any class, yet an algorithm that relies on argmax would naively predict the first class.\nFor this exercise, discuss ways we can get around relying solely on argmax. Are there better ways of finding a prediction algorithm?\nStudent Solution\n\nYour argument goes here\n\n\nExercise 7: MNIST Digits\nAnother popular MNIST dataset is the digits dataset. This dataset consists of images of labelled, hand-written digits ranging from 0 through 9.\nIn this exercise you will build a model that predicts the class of MNIST digit images.\nThe dataset is part of scikit-learn.", "from sklearn import datasets\n\nimport pandas as pd\n\ndigits_bunch = datasets.load_digits()\ndigits = pd.DataFrame(digits_bunch.data)\ndigits['digit'] = digits_bunch.target\n\ndigits.describe()", "You will need to:\n\nPerform EDA on the data\nChoose a model (or models) to use to predict digits\nPerform any model-specific data manipulation\nTrain the model and, if possible, visualize training progression\nPerform a final test of the model on holdout data\n\nUse as many code and text cells as you need to. Explain your work.\nStudent Solution", "# Your code goes here", "" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
timothyb0912/pylogit
examples/notebooks/.ipynb_checkpoints/Nested Logit Example--Python Biogeme benchmark--09NestedLogit-checkpoint.ipynb
bsd-3-clause
[ "The purpose of this notebook is twofold. First, it demonstrates the basic functionality of PyLogit for estimating nested logit models. Secondly, it compares the nested logit capabilities of PyLogit with Python Biogeme. The dataset used is the SwissMetro dataset from <a href=\"http://biogeme.epfl.ch/examples_swissmetro.html\">http://biogeme.epfl.ch/examples_swissmetro.html</a>. For an explanation of the variables in the dataset, see http://www.strc.ch/conferences/2001/bierlaire1.pdf", "from collections import OrderedDict # For recording the model specification \n\nimport pandas as pd # For file input/output\nimport numpy as np # For vectorized math operations\nimport statsmodels.tools.numdiff as numdiff # For numeric hessian\nimport scipy.linalg # For matrix inversion\n\nimport pylogit as pl # For choice model estimation\nfrom pylogit import nested_logit as nl # For nested logit convenience funcs", "1. Load the Swissmetro Dataset", "# Load the raw swiss metro data\n# Note the .dat files are tab delimited text files\nswissmetro_wide = pd.read_table(\"../data/swissmetro.dat\", sep='\\t')", "2. Clean the dataset\nNote that the 09NestedLogit.py file provided is an example from Python Biogeme (see: <a href=\"http://biogeme.epfl.ch/examples_swissmetro.html\">http://biogeme.epfl.ch/examples_swissmetro.html</a>). The 09NestedLogit.py file excludes observations meeting the following critera:\n<pre>\nexclude = (( PURPOSE != 1 ) * ( PURPOSE != 3 ) + ( CHOICE == 0 )) > 0\n</pre>\nAs a result, their dataset has 6,768 observations. Below, I make the same exclusions.", "# Select obervations whose choice is known (i.e. CHOICE != 0)\n# **AND** whose PURPOSE is either 1 or 3\ninclude_criteria = (swissmetro_wide.PURPOSE.isin([1, 3]) &\n (swissmetro_wide.CHOICE != 0))\n\n# Use \".copy()\" so that later on, we avoid performing operations \n# on a view of a dataframe as opposed to on an actual dataframe\nclean_sm_wide = swissmetro_wide.loc[include_criteria].copy()\n\n# Look at how many observations we have after removing unwanted\n# observations\nfinal_num_obs = clean_sm_wide.shape[0]\nnum_obs_statement = \"The cleaned number of observations is {:,.0f}.\"\nprint (num_obs_statement.format(final_num_obs))", "3. Create an id column that ignores the repeat observations per individual\nIn the simple example given on the Python Biogeme website for 09NestedLogit.py, the repeated observations per individual are treated as separate and independent observations. We will do the same.", "# Create a custom id column that ignores the fact that this is a \n# panel/repeated-observations dataset, and start the \"custom_id\" from 1\nclean_sm_wide[\"custom_id\"] = np.arange(clean_sm_wide.shape[0], dtype=int) + 1", "4. Convert the data from 'wide' format to 'long' format\n4a. Determine the 'type' of each column in the dataset.", "# Look at the columns of the swissmetro data\nclean_sm_wide.columns\n\n# Create the list of individual specific variables\nind_variables = clean_sm_wide.columns.tolist()[:15]\n\n# Specify the variables that vary across individuals **AND** \n# across some or all alternatives\nalt_varying_variables = {u'travel_time': dict([(1, 'TRAIN_TT'),\n (2, 'SM_TT'),\n (3, 'CAR_TT')]),\n u'travel_cost': dict([(1, 'TRAIN_CO'),\n (2, 'SM_CO'),\n (3, 'CAR_CO')]),\n u'headway': dict([(1, 'TRAIN_HE'),\n (2, 'SM_HE')]),\n u'seat_configuration': dict([(2, \"SM_SEATS\")])}\n\n# Specify the availability variables\navailability_variables = dict(zip(range(1, 4), ['TRAIN_AV', 'SM_AV', 'CAR_AV']))\n\n# Determine the columns that will denote the\n# new column of alternative ids, and the columns\n# that denote the custom observation ids and the \n# choice column\nnew_alt_id = \"mode_id\"\nobs_id_column = \"custom_id\"\nchoice_column = \"CHOICE\"", "4b. Actually perform the conversion from wide to long formats", "# Perform the desired conversion\nlong_swiss_metro = pl.convert_wide_to_long(clean_sm_wide, \n ind_variables, \n alt_varying_variables, \n availability_variables, \n obs_id_column, \n choice_column,\n new_alt_id_name=new_alt_id)\n\n# Look at the first 9 rows of the long-format dataframe\nlong_swiss_metro.head(9).T", "5. Create the variables used in the Python Biogeme Nested Logit Model Example\nIn 09NestedLogit.py, the travel time and travel cost variables are scaled for ease of numeric optimization. We will do the same such that our estimated coefficients are comparable.", "# Scale both the travel time and travel cost by 100\nlong_swiss_metro[\"travel_time_hundredth\"] = (long_swiss_metro[\"travel_time\"] /\n 100.0)\n\n# Figure out which rows correspond to train or swiss metro \n# alternatives for individuals with GA passes. These individuals face no \n# marginal costs for a trip\ntrain_pass_train_alt = ((long_swiss_metro[\"GA\"] == 1) *\n (long_swiss_metro[\"mode_id\"].isin([1, 2]))).astype(int)\n# Note that the (train_pass_train_alt == 0) term accounts for the\n# fact that those with a GA pass have no marginal cost for the trip\nlong_swiss_metro[\"travel_cost_hundredth\"] = (long_swiss_metro[\"travel_cost\"] *\n (train_pass_train_alt == 0) /\n 100.0)", "6. Specify and Estimate the Python Biogeme Nested Logit Model Example\n6a. Specify the Model", "# Specify the nesting values\nnest_membership = OrderedDict()\nnest_membership[\"Future Modes\"] = [2]\nnest_membership[\"Existing Modes\"] = [1, 3]\n\n# Create the model's specification dictionary and variable names dictionary\n# NOTE: - Keys should be variables within the long format dataframe.\n# The sole exception to this is the \"intercept\" key.\n# - For the specification dictionary, the values should be lists\n# or lists of lists. Within a list, or within the inner-most\n# list should be the alternative ID's of the alternative whose\n# utility specification the explanatory variable is entering.\n\nexample_specification = OrderedDict()\nexample_names = OrderedDict()\n\n# Note that 1 is the id for the Train and 3 is the id for the Car.\n# The next two lines are placing alternative specific constants in\n# the utility equations for the Train and for the Car. The order\n# in which these variables are placed is chosen so the summary\n# dataframe which is returned will match that shown in the HTML\n# file of the python biogeme example.\nexample_specification[\"intercept\"] = [3, 1]\nexample_names[\"intercept\"] = ['ASC Car', 'ASC Train']\n\n# Note that the names used below are simply for consistency with\n# the coefficient names given in the Python Biogeme example.\n# example_specification[\"travel_cost_hundredth\"] = [[1, 2, 3]]\n# example_names[\"travel_cost_hundredth\"] = ['B_COST']\n\nexample_specification[\"travel_cost_hundredth\"] = [[1, 2, 3]]\nexample_names[\"travel_cost_hundredth\"] = ['B_COST']\n\nexample_specification[\"travel_time_hundredth\"] = [[1, 2, 3]]\nexample_names[\"travel_time_hundredth\"] = ['B_TIME']", "6b. Estimate the model\nOne main difference between the nested logit implementation in PyLogit and in Python Biogeme or mLogit in R is that PyLogit reparameterizes the 'standard' nested logit model. In particular, one standard reperesntation of the nested logit model is in terms of the inverse of the 'scale' parameter for each nest (see for example the representation given by Kenneth Train in section 4.2 <a href=\"http://eml.berkeley.edu/books/choice2nd/Ch04_p76-96.pdf\">here</a>). The 'scale' parameter has domain from zero to infinity, therefore the inverse of the scale parameter has the same domain.\nHowever, for econometric purposes (such as conforming to the assumptions that individuals are making choices through a utility maximizing decision protocol), the scale parameter of a 'lower level nest' is constrained to be greater than or equal to 1 (assuming that the 'upper level nest' is constrained to 1.0 for identification purposes). The inverse of the scale parameter would then be constrained to be between 0.0 and 1.0 in this case. In order to make use of unconstrained optimization algorithms, we therefore estimate the logit ( i.e. $\\ln \\left[ \\frac{\\textrm{scale}^{-1}}{1.0 - \\textrm{scale}^{-1}} \\right]$) of the inverse of the scale parameter, assuming that the inverse of the scale parameter will lie between zero and one (and accordingly that the scale parameter be greater than or equal to one).", "# Define a function that calculates the \"logit\" transformation of values\n# between 0.0 and 1.0.\ndef logit(x):\n \"\"\"\n Parameters\n ----------\n x : int, float, or 1D ndarray.\n If an array, all elements should be ints or floats. All\n elements should be between zero and one, exclusive of 1.0.\n\n Returns\n -------\n The logit of x: `np.log(x / (1.0 - x))`.\n \"\"\"\n return np.log(x/(1.0 - x))\n\n# Provide the module with the needed input arguments to create\n# an instance of the MNL model class\nexample_nested = pl.create_choice_model(data=long_swiss_metro,\n alt_id_col=new_alt_id,\n obs_id_col=obs_id_column,\n choice_col=choice_column,\n specification=example_specification,\n model_type=\"Nested Logit\",\n names=example_names,\n nest_spec=nest_membership)\n\n# Specify the initial nesting parameter values\n# Note: This should be in terms of the reparameterized values used\n# by PyLogit.\n\n# Note: The '40' corresponds to scale parameter that is numerically\n# indistinguishable from 1.0\n\n# Note: 2.05 is the scale parameter that is estimated by PythonBiogeme\n# so we invert it, then take the logit of this inverse to get the\n# corresponding starting value to be used by PyLogit.\n# Note the first value corresponds to the first nest in 'nest_spec'\n# and the second value corresponds to the second nest in 'nest_spec'.\ninit_nests = np.array([40, logit(2.05**-1)])\n\n# Specify the initial index coefficients used by PythonBiogeme\ninit_coefs = np.array([-0.167, -0.512, -0.899, -0.857])\n\n# Create a single array of the initial values\ninit_values = np.concatenate((init_nests, init_coefs), axis=0)\n\n# Start the model estimation from the pythonbiogeme initial values\n# Note that the first value, in the initial values, is constrained\n# to remain constant through the estimation process. This is because\n# the first nest in nest_spec is a 'degenerate' nest with only one\n# alternative, and the nest parameter of degenerate nests is not\n# identified.\nexample_nested.fit_mle(init_values,\n constrained_pos=[0])", "Also, note that the functionality of using parameter constraints is restriced to the Mixed Logit and Nested Logit models at the moment. Moreover, this functionality is only relevant when using optimization method that make use of gradient information. Gradient-free estimation methods such as 'powell's' method or 'nelder-mead' will not make use of the constrained_pos keyword argument.\n6.c Compare the model output with that of Python Biogeme", "# Look at the estimated coefficients and goodness-of-fit statistics\nexample_nested.get_statsmodels_summary()", "Compare with PythonBiogeme", "# Note that the Mu (i.e the scale parameter) estimated by python biogeme is \n# 1.0 / nest_coefficient where\n# nest_coefficient = 1.0 / (1.0 + exp[-1 * estimated_nest_param])\npylogit_mu = 1.0 + np.exp(-1 * example_nested.params[\"Existing Modes Nest Param\"])\nprint \"PyLogit's estimated Mu is: {:,.4f}\".format(pylogit_mu)", "Summary\nMy parameter estimates match those of Python Biogeme. <br>\nThe Python Biogeme log-likelihood is -5,236.900 and their estimated parameters are:\n<pre>\nASC Car: -0.167\nASC Train: -0.512\nB_COST: -0.857\nB_TIME: -0.899\nMu: 2.05\n</pre>\n\nAs shown above, my log-likelihood is -5,236.900, and my estimated parameters are:\n<pre>\nASC Car: -0.1672\nASC Train: -0.5119\nB_COST: -0.8567\nB_TIME: -0.8987\nExisting Modes Nest Param: 2.0541\n</pre>\n\nPyLogit's covariance estimates for the Nested Logit model are currently based on the BHHH approximation to the Fisher Information Matrix. This is the same procedure used by mlogit. However, based on the disaggreement between PyLogit's standard errors and those of Python Biogeme, Python Biogeme is clearly not using the BHHH approximation to the Fisher Information Matrix to calculate its standard errors. How does Python Biogeme calculate its standard errors?\nInvestigate the use of numeric approximations to the Hessian", "# Create objects for all of the necessary arguments that are\n# needed to compute the log-likelihood of the nested logit model\n# given the data used in this example\nnested_design = example_nested.design\nmapping_res = example_nested.get_mappings_for_fit()\nchoice_array = long_swiss_metro[\"CHOICE\"].values\n\n# Create a 'convenience' function that simply returns the log-likelihood\n# given a vector of coefficients\ndef convenient_log_likelihood(all_coefs):\n log_likelihood = nl.convenient_nested_log_likelihood(all_coefs,\n nested_design,\n mapping_res[\"rows_to_obs\"],\n mapping_res[\"rows_to_nests\"],\n choice_array)\n return log_likelihood\n\n# Calculate the numeric hessian\nnumeric_hess = numdiff.approx_hess(example_nested.params.values,\n convenient_log_likelihood)\n# Account for the fact that the first param is constrained\nnumeric_hess[0, :] = 0\nnumeric_hess[:, 0] = 0\nnumeric_hess[0, 0] = -1\n# Calculate the asymptotic covariance with the numeric hessian\nnumeric_cov = -1 * scipy.linalg.inv(numeric_hess)\n# Get the numeric standard errors\nnumeric_std_errs = pd.Series(np.sqrt(np.diag(numeric_cov)),\n index=example_nested.params.index)\n# Make sure the Future Modes Nest param has a standard error of np.nan\nnumeric_std_errs.loc[\"Future Modes Nest Param\"] = np.nan\n# Order the numeric standard errors according to the Python Biogeme\n# output\nnumeric_std_errs = pd.concat([numeric_std_errs[example_nested.params.index[2:]],\n numeric_std_errs[example_nested.params.index[:2]]],\n axis=0)\n# Display the numeric standard errors\nnumeric_std_errs", "Python Biogeme Output\n<pre>\nName Value Std err t-test p-value\nASC_CAR -0.167 0.0371 -4.50 0.00\nASC_TRAIN -0.512 0.0452 -11.33 0.00\nB_COST -0.857 0.0463 -18.51 0.00\nB_TIME -0.899 0.0570 -15.77 0.00\nMU 2.05 0.118 17.45 0.00\n</pre>\n\nFrom above, we see that for the index coefficients, the standard errors that are calculated using the numeric approximation of the hessian match the standard errors returned by Python Biogeme. This suggests that the standard errors of Python Biogeme, for the nested logit model, are based on a numeric differentiation approximation to the Hessian.\nBelow, we investigate whether the numeric approximation of the gradient via numeric differentiation is a close approximation to the analytic gradient. The premise is that if the numeric gradient does not adequately approximate the analytic gradient, then what chance does the numeric hessian have of adequately approximating the analytic hessian?", "# Approximate the gradient using numeric differentiation\nnumeric_grad = numdiff.approx_fprime(example_nested.params.values,\n convenient_log_likelihood)\npd.DataFrame([numeric_grad,\n example_nested.gradient.values],\n index=[\"Numeric Differentiation\", \"Analytic\"],\n columns=example_nested.params.index).T", "From the dataframe above, we see that the numeric gradient does not adequately approximate the analytic gradient. The numeric gradient is incorrect by a factor of 2 to 4 depending on what variable is being examined (this excludes the Future Modes Nest Param that constrained to 40).\nGiven that the numeric gradient does not provide a good approximation of the analytic gradient, I do not expect the numeric hessian (nor the BHHH approximation used by PyLogit) to provide a good approximation to the analytic hessian. Since the standard errors calculated from the numeric hessian matches PythonBioeme's standard errors, this suggests that Python Biogeme is calculating its hessian (and therefore standard errors) via numeric differentiation, and as suggested above, this is likely to be poor approximation to the analytic hessian." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mdalvi/financial-analysis-and-algo-trading
time_series_analysis/time_series_analysis_notes.ipynb
mit
[ "Time series basics\n\nTrends\nSeasonality\nCyclical\n\nIntroduction to statsmodels", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport statsmodels.api as sm\n\n# Importing built-in datasets in statsmodels\ndf = sm.datasets.macrodata.load_pandas().data\n\ndf.head()\n\nprint(sm.datasets.macrodata.NOTE)\n\ndf.head()\n\ndf.tail()\n\n# statsmodels.timeseriesanalysis.datetools\nindex = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))\nindex\n\ndf.index = index\n\ndf.head()\n\ndf['realgdp'].plot()", "Using the Hodrick-Prescott Filter for trend analysis", "result = sm.tsa.filters.hpfilter(df['realgdp'])\nresult\n\ntype(result)\n\ntype(result[0])\n\ntype(result[1])\n\ngdp_cycle, gdp_trend = result\ndf['trend'] = gdp_trend\n\ndf[['realgdp', 'trend']].plot()\n\n# zooming in\ndf[['realgdp', 'trend']]['2000-03-31':].plot()", "ETS Theory (Error-Trend-Seasonality)\n\nExponential Smoothing\nTrend Methods Models\nETS Decomposition\n\nEWMA Theory\n(Exponentially Weighted Moving Averages)", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nairline = pd.read_csv('airline_passengers.csv', index_col = 'Month')\nairline.head()\n\n# this is a normal index\nairline.index\n\n# Get rid of all the missing values in this dataset\nairline.dropna(inplace=True)\n\nairline.index = pd.to_datetime(airline.index)\nairline.head()\n\n# now its a DatetimeIndex\nairline.index\n\n# Recap of making the SMA\nairline['6-month-SMA'] = airline['Thousands of Passengers'].rolling(window=6).mean()\nairline['12-month-SMA'] = airline['Thousands of Passengers'].rolling(window=12).mean()\n\nairline.plot(figsize=(10,8))", "Weakness of SMA\n* Smaller windows will lead to more noise, rather than signal\n* It will always lag by the size of the window\n* It will never reach to full peak or valley of the data due to the averaging.\n* Does not really inform you about possible future behaviour, all it really does is describe trends in your data.\n* Extreme historical values can skew your SMA significantly\nCreating EWMA", "airline['EWMA-12'] = airline['Thousands of Passengers'].ewm(span=12).mean()\n\nairline[['Thousands of Passengers', 'EWMA-12']].plot(figsize=(10,8))", "Full reading on mathematics of EWMA\n* http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows\nETS continued...", "import pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nairline = pd.read_csv('airline_passengers.csv', index_col = 'Month')\nairline.head()\n\nairline.plot()\n\nairline.dropna(inplace = True)\n\nairline.index = pd.to_datetime(airline.index)\nairline.head()\n\nfrom statsmodels.tsa.seasonal import seasonal_decompose\n\n# additive/ multiplicative models available\n# suspected linear trend = use additive\n# suspected non-linear trend = multiplicative model\nresult = seasonal_decompose(airline['Thousands of Passengers'], model='multiplicative')\n\nresult.seasonal\n\nresult.seasonal.plot()\n\nresult.trend.plot()\n\nresult.resid.plot()\n\nfig = result.plot()", "ARIMA models\nAuto regressive integrated moving averages\n[https://people.duke.edu/~rnau/411arim3.htm]\n\n\nAutoregressive integrated moving average (ARIMA) model is a generalization of an autoregressive moving average (ARMA) model.\n\n\nARIMA model types\n\nNon-seasonal ARIMA (for non-seasonal data)\nSeasonal ARIMA (for seasonal data)\n\n\n\nARIMA models are applied in some cases where data show evidence of non-stationarity, where an initial differencing step (corresponding to the 'integrated' part of the model) can be applied one or more times to eliminate the non-stationarity.\n\n\nNon-seasonal ARIMA models are generally denoted as ARIMA(p, d, q) where parameters p, d and q are non-negative integers.\n\n\nAR(p): Autoregression component\n\nA regression model that utilizes the dependent relationship between a current observation and observations over a previous period.\n\n\nI(d): Integrated\nDifferencing of observations (subtracting an observation from an observation at the previous time step) in order to make the time series stationary.\n\n\nMA(q): Moving Average\nA model that uses the dependency between an observation and a residual error from a moving average model applied to lagged observations\n\n\n\nStationary vs Non-Stationary Data\n\nA stationary series has a constant mean and variance over time\nA stationary dataset will allow our model to predict that the mean and variance will be the same in future periods.\n\n\n<img src='stationaryvsnonstationary.png' />\n\n\nNote above for stationary data (mean and variance both are constant over time)\n\nAnother aspect to look for is covariance not be a function of time in stationary data\n\n\nIf you've determined your data is not stationary (either visually or mathematically), you will then need to transform it to be stationary in order to evaluate it and what type of ARIMA terms you will use.\n\n\nOne simple way to do this is through \"differencing\"\n\n\nOriginal Data\n\n\n<table>\n<tr><td>Time1</td><td>10</td></tr>\n<tr><td>Time2</td><td>12</td></tr>\n<tr><td>Time3</td><td>8</td></tr>\n<tr><td>Time4</td><td>14</td></tr>\n<tr><td>Time5</td><td>7</td></tr>\n</table>\n\n\nFirst Difference\n\n<table>\n<tr><td>Time1</td><td>NA</td></tr>\n<tr><td>Time2</td><td>2</td></tr>\n<tr><td>Time3</td><td>-4</td></tr>\n<tr><td>Time4</td><td>6</td></tr>\n<tr><td>Time5</td><td>-7</td></tr>\n</table>\n\n\nSecond Difference\n\n<table>\n<tr><td>Time1</td><td>NA</td></tr>\n<tr><td>Time2</td><td>NA</td></tr>\n<tr><td>Time3</td><td>-6</td></tr>\n<tr><td>Time4</td><td>10</td></tr>\n<tr><td>Time5</td><td>-13</td></tr>\n</table>\n\n\n\nFor seasonal data, you can also difference by season. If you had monthly data with yearly seasonality, you could difference by a time unit of 12, instead of just 1.\n\n\nAnother common techinique with seasonal ARIMA models is to combine both methods, taking the seasonal difference of the first difference.\n\n\nARIMA models continued... 1\nThe general process for ARIMA models is the following:\n* Visualize the Time Series Data\n* Make the time series data stationary\n* Plot the Correlation and AutoCorrelation Charts\n* Construct the ARIMA Model\n* Use the model to make predictions\nLet's go through these steps!\n[https://people.duke.edu/~rnau/arimrule.htm]", "import numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndf = pd.read_csv('monthly-milk-production-pounds-p.csv')\ndf.head()\n\ndf.columns = ['Month', 'Milk in Pounds per Cow']\ndf.head()\n\ndf.tail()\n\ndf.drop(168, axis=0, inplace=True)\ndf.tail()\n\ndf['Month'] = pd.to_datetime(df['Month'])\ndf.head()\n\ndf.set_index('Month', inplace=True)\ndf.head()\n\ndf.index\n\ndf.describe()\n\ndf.describe().transpose()", "Step 1 - Visualize the data", "df.plot();\n\ntime_series = df['Milk in Pounds per Cow']\ntype(time_series)\n\ntime_series.rolling(12).mean().plot(label='12 SMA')\ntime_series.rolling(12).std().plot(label='12 STD')\ntime_series.plot()\nplt.legend();", "Conclusion: The scale of STD (standard deviation) is always pretty much smaller than the actual scale. If 12 STD does not show crazy behaviour is comparitively flat then its 'workable'", "from statsmodels.tsa.seasonal import seasonal_decompose\n\ndecomp = seasonal_decompose(time_series)\nfig = decomp.plot()\nfig.set_size_inches(15,8)", "ARIMA models continued... 2\n### Step 2 - Make the time series data stationary (if non-stationary)\nWe can use the Augmented Dickey-Fuller unit root test.\nIn statistics and econometrics, an augmented Dickey–Fuller test (ADF) tests the null hypothesis that a unit root is present in a time series sample. The alternative hypothesis is different depending on which version of the test is used, but is usually stationarity or trend-stationarity.\nBasically, we are trying to whether to accept the Null Hypothesis H0 (that the time series has a unit root, indicating it is non-stationary) or reject H0 and go with the Alternative Hypothesis (that the time series has no unit root and is stationary).\nWe end up deciding this based on the p-value return.\n\n\nA small p-value (typically ≤ 0.05) indicates strong evidence against the null hypothesis, so you reject the null hypothesis.\n\n\nA large p-value (> 0.05) indicates weak evidence against the null hypothesis, so you fail to reject the null hypothesis.\n\n\nLet's run the Augmented Dickey-Fuller test on our data:", "from statsmodels.tsa.stattools import adfuller\n\nresult = adfuller(df['Milk in Pounds per Cow'])\nresult\n\ndef adf_check(time_series):\n result = adfuller(time_series)\n print('Augumented Dicky-Fuller Test')\n labels = ['ADF Test Statistic', 'p-value', '# of lags', 'Num of Observations used']\n for value, label in zip(result, labels):\n print(label + ': ' + str(value))\n \n if result[1] < 0.05:\n print('Strong evidence against null hypothesis')\n print('Rejecting null hypothesis')\n print('Data has no unit root! and is stationary')\n else:\n print('Weak evidence against null hypothesis')\n print('Fail to reject null hypothesis')\n print('Data has a unit root, it is non-stationary')\n\nadf_check(df['Milk in Pounds per Cow'])", "Thus, the ADF test confirms our assumption from visual analysis that definately the data is non-stationary and has a seasionality and trend factor to it.", "# Now making the data stationary\ndf['First Difference'] = df['Milk in Pounds per Cow'] - df['Milk in Pounds per Cow'].shift(1)\ndf['First Difference'].plot()\n\n# adf_check(df['First Difference']) - THIS RESULTS IN LinAlgError: SVD did not converge ERROR\n\n# Note: we need to drop the first NA value before plotting this\nadf_check(df['First Difference'].dropna())\n\ndf['Second Difference'] = df['First Difference'] - df['First Difference'].shift(1)\ndf['Second Difference'].plot();\n\nadf_check(df['Second Difference'].dropna())", "Since, p-value('Original') - p-value('First Difference') < p-value('First Difference') - p-value('Second Difference'), it is the first difference that did most of the elimination of the trend.", "# Let's plot seasonal difference\ndf['Seasonal Difference'] = df['Milk in Pounds per Cow'] - df['Milk in Pounds per Cow'].shift(12)\ndf['Seasonal Difference'].plot();\n\nadf_check(df['Seasonal Difference'].dropna())", "Thus, we conclude that seasonal difference does not make the data stationary here, in fact we can observe visually that as we go further in time the variance began to increase.", "# Plotting 'Seasonal first difference'\ndf['Seasonal First Difference'] = df['First Difference'] - df['First Difference'].shift(12)\ndf['Seasonal First Difference'].plot();\n\nadf_check(df['Seasonal First Difference'].dropna())", "ARIMA models continued... 3\n### Step 3 - Plot the Correlation and Autocorrelation Charts", "from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n\n# Plotting the gradual decline autocorrelation\nfig_first = plot_acf(df['First Difference'].dropna())\n\nfig_first = plot_acf(df['First Difference'].dropna(), use_vlines=False)\n\nfig_seasonal_first = plot_acf(df['Seasonal First Difference'].dropna(), use_vlines=False)\n\nfig_seasonal_first_pacf = plot_pacf(df['Seasonal First Difference'].dropna(), use_vlines=False)", "Plotting the final 'Autocorrelation' and 'Partial autocorrelation'", "plot_acf(df['Seasonal First Difference'].dropna());\nplot_pacf(df['Seasonal First Difference'].dropna());", "ARIMA models continued... 4\n### Step 4 - Construct the ARIMA model", "# ARIMA model for non-sesonal data\nfrom statsmodels.tsa.arima_model import ARIMA\n\n# help(ARIMA)\n\n# ARIMA model from seasonal data\n# from statsmodels.tsa.statespace import sarimax", "Choosing the p, d, q values of the order and seasonal_order tuple is reading task\nMore information here...\n[https://stackoverflow.com/questions/22770352/auto-arima-equivalent-for-python]\n[https://stats.stackexchange.com/questions/44992/what-are-the-values-p-d-q-in-arima]\n[https://people.duke.edu/~rnau/arimrule.htm]", "model = sm.tsa.statespace.SARIMAX(df['Milk in Pounds per Cow'], order=(0,1,0), seasonal_order=(1,1,1,12))\n\nresults = model.fit()\n\nprint(results.summary())\n\n# residual errors of prediction on the original training data\nresults.resid\n\n# plot of residual errors of prediction on the original training data\nresults.resid.plot();\n\n# KDE plot of residual errors of prediction on the original training data\nresults.resid.plot(kind='kde');\n\n# Creating a column forecast to house the forecasted values for existing values\ndf['forecast'] = results.predict(start=150, end=168)\ndf[['Milk in Pounds per Cow', 'forecast']].plot(figsize=(12,8));\n\n# Forecasting for future data\ndf.tail()\n\nfrom pandas.tseries.offsets import DateOffset\n\nfuture_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,24)]\nfuture_dates\n\nfuture_df = pd.DataFrame(index=future_dates, columns=df.columns)\nfuture_df.head()\n\nfinal_df = pd.concat([df, future_df])\nfinal_df.head()\n\nfinal_df.tail()\n\nfinal_df['forecast'] = results.predict(start=168, end=192)\nfinal_df.tail()\n\nfinal_df[['Milk in Pounds per Cow', 'forecast']].plot()", "Why ARIMA Models are questioning when it comes to financial forecasting?\n\nLot of this stuff assumes that the y-axis value (price) is directly connected to the time (x-axis value) and that the time is really important aspect of the y value.\nWhile that is true for financial series it discounts the external force i.e. traders also able to buy and sell securities outside the market and affect its price.\nAnd because of that often you'll hear stock and securities prices are following some sort of Brownian motion almost like a random walk.\nBecause of those aspects of the financial and securities data this sort of forecsting method doesn't really work with stock." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
CalPolyPat/phys202-2015-work
assignments/assignment10/ODEsEx01.ipynb
mit
[ "Ordinary Differential Equations Exercise 1\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.integrate import odeint\nfrom IPython.html.widgets import interact, fixed", "Euler's method\nEuler's method is the simplest numerical approach for solving a first order ODE numerically. Given the differential equation\n$$ \\frac{dy}{dx} = f(y(x), x) $$\nwith the initial condition:\n$$ y(x_0)=y_0 $$\nEuler's method performs updates using the equations:\n$$ y_{n+1} = y_n + h f(y_n,x_n) $$\n$$ h = x_{n+1} - x_n $$\nWrite a function solve_euler that implements the Euler method for a 1d ODE and follows the specification described in the docstring:", "def solve_euler(derivs, y0, x):\n \"\"\"Solve a 1d ODE using Euler's method.\n \n Parameters\n ----------\n derivs : function\n The derivative of the diff-eq with the signature deriv(y,x) where\n y and x are floats.\n y0 : float\n The initial condition y[0] = y(x[0]).\n x : np.ndarray, list, tuple\n The array of times at which of solve the diff-eq.\n \n Returns\n -------\n y : np.ndarray\n Array of solutions y[i] = y(x[i])\n \"\"\"\n h=x[-1]-x[-2]\n data = [y0]\n for t in x[1:]:\n data.append(data[-1]+h*derivs(data[-1],t))\n return data\n\nassert np.allclose(solve_euler(lambda y, x: 1, 0, [0,1,2]), [0,1,2])", "The midpoint method is another numerical method for solving the above differential equation. In general it is more accurate than the Euler method. It uses the update equation:\n$$ y_{n+1} = y_n + h f\\left(y_n+\\frac{h}{2}f(y_n,x_n),x_n+\\frac{h}{2}\\right) $$\nWrite a function solve_midpoint that implements the midpoint method for a 1d ODE and follows the specification described in the docstring:", "def solve_midpoint(derivs, y0, x):\n \"\"\"Solve a 1d ODE using the Midpoint method.\n \n Parameters\n ----------\n derivs : function\n The derivative of the diff-eq with the signature deriv(y,x) where y\n and x are floats.\n y0 : float\n The initial condition y[0] = y(x[0]).\n x : np.ndarray, list, tuple\n The array of times at which of solve the diff-eq.\n \n Returns\n -------\n y : np.ndarray\n Array of solutions y[i] = y(x[i])\n \"\"\"\n h=x[-1]-x[-2]\n data = [y0]\n for t in x[1:]:\n data.append(data[-1]+h*derivs(data[-1]+h/2*derivs(data[-1],t),t+h/2))\n return data\n\nassert np.allclose(solve_euler(lambda y, x: 1, 0, [0,1,2]), [0,1,2])", "You are now going to solve the following differential equation:\n$$\n\\frac{dy}{dx} = x + 2y\n$$\nwhich has the analytical solution:\n$$\ny(x) = 0.25 e^{2x} - 0.5 x - 0.25\n$$\nFirst, write a solve_exact function that compute the exact solution and follows the specification described in the docstring:", "def solve_exact(x):\n \"\"\"compute the exact solution to dy/dx = x + 2y.\n \n Parameters\n ----------\n x : np.ndarray\n Array of x values to compute the solution at.\n \n Returns\n -------\n y : np.ndarray\n Array of solutions at y[i] = y(x[i]).\n \"\"\"\n data = np.array(.25*np.exp(2*x)-.5*x-.25)\n return data\n\nassert np.allclose(solve_exact(np.array([0,1,2])),np.array([0., 1.09726402, 12.39953751]))", "In the following cell you are going to solve the above ODE using four different algorithms:\n\nEuler's method\nMidpoint method\nodeint\nExact\n\nHere are the details:\n\nGenerate an array of x values with $N=11$ points over the interval $[0,1]$ ($h=0.1$).\nDefine the derivs function for the above differential equation.\nUsing the solve_euler, solve_midpoint, odeint and solve_exact functions to compute\n the solutions using the 4 approaches.\n\nVisualize the solutions on a sigle figure with two subplots:\n\nPlot the $y(x)$ versus $x$ for each of the 4 approaches.\nPlot $\\left|y(x)-y_{exact}(x)\\right|$ versus $x$ for each of the 3 numerical approaches.\n\nYour visualization should have legends, labeled axes, titles and be customized for beauty and effectiveness.\nWhile your final plot will use $N=10$ points, first try making $N$ larger and smaller to see how that affects the errors of the different approaches.", "x = np.linspace(0,1,11)\ndef derivs(y, x):\n dy = x+2*y\n return dy\neuler_error = np.array(solve_euler(derivs, 0, x))-solve_exact(x)\nmidpoint_error = np.array(solve_midpoint(derivs, 0, x))-solve_exact(x)\nodeint_error = np.array(odeint(derivs, 0, x)).flatten()-solve_exact(x)\n\nf = plt.figure(figsize = (9,6))\nax = plt.subplot(211)\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.yaxis.set_ticks_position('left')\nax.xaxis.set_ticks_position('bottom')\nplt.plot(x,solve_euler(derivs, 0, x), label=\"Euler\")\n\nplt.plot(x,solve_midpoint(derivs, 0, x), label=\"Midpoint\")\n\nplt.plot(x,solve_exact(x), label=\"Exact\")\n\nplt.plot(x,odeint(derivs, 0, x), label=\"ODEInt\")\nplt.ylabel(\"y(x)\")\nplt.xlabel(\"x\")\nplt.title(r\"Numerical Solutions to $\\frac{dy}{dx}=x+2y$\")\nplt.legend(loc = \"best\")\n\nax = plt.subplot(212)\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.yaxis.set_ticks_position('left')\nax.xaxis.set_ticks_position('bottom')\nplt.plot(x,abs(euler_error), label = \"Euler Error\")\n\nplt.plot(x,abs(midpoint_error), label = \"Midpoint Error\")\n\nplt.plot(x,abs(odeint_error), label = \"ODEInt Error\")\nplt.ylabel(\"Errors\")\nplt.xlabel(\"x\")\nplt.title(r\"Errors of numerical solutions to $\\frac{dy}{dx}=x+2y$\")\nplt.legend(loc = \"best\")\nplt.tight_layout()\n\nassert True # leave this for grading the plots" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
studywolf/NDMPS-paper
code/NDMPs tutorial.ipynb
gpl-3.0
[ "NDMPs tutorial\nIntroduction\nIn the control of motor systems, it often comes up that you would like to execute some pre-determined trajectory, like walking, running, throwing a frisbee, or handwriting.\nDynamical movement primitives (DMPs) are robust, generalizable trajectory generation systems. I give an overview of their origins and some use cases on my blog https://studywolf.wordpress.com/category/robotics/dynamic-movement-primitive/\nIn this tutorial, we'll be looking at a neural implementation of DMPs (NDMPs).\nBasics\nThere are two main parts to DMPs, the point attractors and the forcing function. \nPoint attractors\nFor each degree-of-freedom in your movement a separate point attractor is required. \nFor discrete movements, the point attractor simply moves in a straight line from the starting point of the trajectory to the ending point of the trajectory.", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndef point_attractor(start, target, dt=.001, alpha=400, beta=100):\n x_track = [np.copy(start)]\n x = np.array(start, dtype=float) # initial position\n dx = np.array([0, 0], dtype=float) # initial velocity\n for ii in range(30):\n # ddx = alpha * (beta * (target -x)) # no velocity compensation\n ddx = alpha * (beta * (target - x) - dx) # <-- point attractor dynamics\n dx += ddx * dt \n x += dx * dt\n x_track.append(np.copy(x))\n \n return np.array(x_track)\n\n# Discrete system point attractor\n\nstart = [0, 0] # change this and run!\nend = [1, 1] # change this and run!\ntrajectory = point_attractor(start, end)\n\nplt.plot(start[0], start[1], 'bx', mew=4) # blue x at start position\nplt.plot(end[0], end[1], 'gx', mew=4) # green x at end position\nplt.plot(trajectory[:, 0], trajectory[:, 1], '.')\nplt.xlim([-2, 2])\nplt.ylim([-2, 2])", "For rhythmic movements, the start and end point are the same, so the point attractor just holds the system at the same position. Which, granted, is not very exciting to see. See for yourself!", "# Rhythmic system point attractor\n\nstart = end =[0, 0] # change this and run!\ntrajectory = point_attractor(start, end)\n\nplt.plot(start[0], start[1], 'bx', mew=4) # blue x at start position\nplt.plot(end[0], end[1], 'gx', mew=4) # green x at end position\nplt.plot(trajectory[:, 0], trajectory[:, 1], '.')\nplt.xlim([-2, 2])\nplt.ylim([-2, 2])", "Forcing functions\nThe second part of the DMP system is the forcing function. The idea here is simply that the some additional force is added in to the point attractor dynamics, that pushes them along a path that is no longer straight as they move to the end point (discrete movements) or try to hold a position (rhythmic movements).", "def point_attractor_ff(start, target, ff, dt=.001, alpha=400, beta=100):\n \" ff is a vector of forces to apply over time \"\n x_track = [np.copy(start)]\n x = np.array(start, dtype=float) # initial position\n dx = np.array([0, 0], dtype=float) # initial velocity\n for ii in range(len(ff)):\n ddx = alpha * (beta * (target - x) - dx) + ff[ii]\n dx += ddx * dt \n x += dx * dt\n x_track.append(np.copy(x))\n \n return np.array(x_track)\n\n# Discrete system point attractor with forcing function\n\nff = np.vstack([\n -np.sin(np.arange(0, 10, .1)),\n np.cos(np.arange(0, 10, .1))]).T * 2e4\nstart = [0, 0] # change this and run!\nend = [1, 0] # change this and run!\ntrajectory = point_attractor_ff(start, end, ff)\n\nplt.plot(start[0], start[1], 'bx', mew=4) # blue x at start position\nplt.plot(end[0], end[1], 'gx', mew=4) # green x at end position\nplt.plot(trajectory[:, 0], trajectory[:, 1], '.')\nplt.xlim([-2, 2])\nplt.ylim([-2, 2])\n\n# Rhythmic system point attractor with forcing function\n\nff = np.vstack([\n np.sin(np.arange(0, 10, .1)),\n np.cos(np.arange(0, 10, .1))]).T * 2e4\nstart = end = [0, 0] # change this and run!\ntrajectory = point_attractor_ff(start, end, ff)\n\nplt.plot(start[0], start[1], 'bx', mew=4) # blue x at start position\nplt.plot(end[0], end[1], 'gx', mew=4) # green x at end position\nplt.plot(trajectory[:, 0], trajectory[:, 1], '.')\nplt.xlim([-2, 2])\nplt.ylim([-2, 2])", "How to get the trajectory you want\nSo at this point, we have point attractors and we can apply forces to them to make them move in fun ways, but how do we get them to move in a specific way?\nLet's say that we want to draw out a fun heart shape:", "heart = np.load('models/handwriting_trajectories/heart.npz')['arr_0']\nheart = np.vstack([heart, heart[-1]])\nplt.plot(heart[:, 0], heart[:, 1], 'r--')", "The forces that we apply to our point attractor affect acceleration, so what we need to do is find out what accelerations will give us the above trajectory. Let's assume our timestep is 1ms when drawing out that heart trajectory.", "dt = 0.001\n\n# initial velocity is zero\nd_heart = np.vstack([[0, 0], np.diff(heart, axis=0) / dt]) \n# final acceleration is zero\ndd_heart = np.vstack([np.diff(d_heart, axis=0) / dt, [0, 0]]) \nplt.subplot(2, 1, 1)\nplt.plot(d_heart)\nplt.title('Desired velocities')\nplt.subplot(2, 1, 2)\nplt.plot(dd_heart)\nplt.title('Desired accelerations')\nplt.tight_layout()", "So if there were no other forces affecting the system, those are the forces that we would need to apply to draw out a heart. HOWEVER. There are other forces affecting the system, namely those point attractor dynamics that draw the system from the start point to the end point. So we need to account for those.", "# For the discrete system\n\nalpha = 400\nbeta = 100\n\nstart = heart[-1]\nend = heart[-1] # change this and run!\nforces = dd_heart - (alpha * (beta * (end - heart) - d_heart))\n\n# plot the desired accelerations and forces to apply to\n# achieve the desired accelerations\nplt.subplot(2, 1, 1)\nplt.plot(dd_heart[:, 0], alpha=.5)\nplt.gca().set_prop_cycle(None)\nplt.plot(forces[:, 0])\nplt.legend(['desired acceleration', 'forces to apply'])\nplt.title('Forces to apply')\nplt.subplot(2, 1, 2)\nplt.plot(dd_heart[:, 1], alpha=.5)\nplt.gca().set_prop_cycle(None)\nplt.plot(forces[:,1])\nplt.legend(['desired acceleration', 'forces to apply'])\n\ntrajectory = point_attractor_ff(start, end, forces, dt=dt,\n alpha=alpha, beta=beta)\n\nplt.plot(heart[:, 0], heart[:, 1], 'r--', lw=3) # plot the target path\n\nplt.plot(trajectory[:, 0], trajectory[:, 1]) # system trajectory in blue\nplt.plot(trajectory[:, 0], trajectory[:, 1], 'b.') # add blue dots at time steps\n\nplt.plot(start[0], start[1], 'bx', mew=4) # blue x at start position\nplt.plot(end[0], end[1], 'gx', mew=4) # green x at end position\n\nplt.legend(['desired trajectory', 'actual trajectory'])\nplt.xlim([-2, 2])\nplt.ylim([-2, 2])", "Here, the system is starting and ending at the same point in the trajectory, and it traces it out perfectly.", "# For the rhythmic system\n\nalpha = 400\nbeta = 100\n\nstart = end = [0, 0] # change this and run!\nforces = dd_heart - (alpha * (beta * (end - heart) - d_heart))\n\n# plot the desired accelerations and forces to apply to\n# achieve the desired accelerations\nplt.subplot(2, 1, 1)\nplt.plot(dd_heart[:, 0], alpha=.5)\nplt.gca().set_prop_cycle(None)\nplt.plot(forces[:, 0])\nplt.legend(['desired acceleration', 'forces to apply'])\nplt.title('Forces to apply')\nplt.subplot(2, 1, 2)\nplt.plot(dd_heart[:, 1], alpha=.5)\nplt.gca().set_prop_cycle(None)\nplt.plot(forces[:,1])\nplt.legend(['desired acceleration', 'forces to apply'])\n\nnum_loops = 3\nforces = np.vstack([forces] * num_loops)\n\ntrajectory = point_attractor_ff(start, end, forces, dt=dt,\n alpha=alpha, beta=beta)\n\nplt.plot(heart[:, 0], heart[:, 1], 'r--', lw=3) # plot the target path\n\nplt.plot(trajectory[:, 0], trajectory[:, 1]) # system trajectory in blue\nplt.plot(trajectory[:, 0], trajectory[:, 1], 'b.') # add blue dots at time steps\n\nplt.plot(start[0], start[1], 'bx', mew=4) # blue x at start position\nplt.plot(end[0], end[1], 'gx', mew=4) # green x at end position\n\nplt.legend(['desired trajectory', 'actual trajectory'])\nplt.xlim([-2, 2])\nplt.ylim([-2, 2])", "Here, we've started our system out at [0, 0], and you can see that on the first loop through it's not matching the desired trajectory. However, by the second loop through it's converged to the desired path and we're stably tracing out the heart pattern.\nSo now we've implemented the most basic possible versions of something that resembles DMPs. But it's hopefully enough to get a feel for how these kinds of systems work. Directly we now move on to neural DMPs!\nNeural DMPs\nSimilar to how we broke down DMPs into point attractors and a forcing function, we will first discuss implementing neural point attractors and the add in a forcing function!\nNeural point attractors\nOur point attractor is a second order system (which means that the dynamics are defined in terms of the second derivative, acceleration):\n$\\ddot{y} = \\alpha \\; (\\beta \\; (y^* - y) - \\dot{y})$\nat each time step the system velocity, $\\dot{y}$, and position, $y$, are updated according to\n$\\dot{y} = \\dot{y} + \\ddot{y} * dt$\n$y = y + \\dot{y} * dt$\nTo make implementation easier on ourselves, we're going to rewrite the point attractor equations as a first order system. So first we define\n$\\textbf{y} = \\left[ \\begin{array}{c}y \\ \\dot{y} \\end{array} \\right]$\nThis lets us rewrite the dynamics as a first order system\n$\\dot{\\textbf{y}} = \\left[ \\begin{array}{c} \\dot{y} \\ \\ddot{y} \\end{array} \\right ] = \\begin{array}{c} \\dot{y} \\ \\alpha \\; (\\beta \\; (y^ - y) - \\dot{y}) \\end{array} = \\left[ \\begin{array}{cc}0 & 1 \\ - \\alpha \\beta & -\\beta \\end{array} \\right ] \\textbf{y} + \\left[ \\begin{array}{c} 0 \\ \\alpha\\beta \\end{array} \\right ] y^$\nSo the change in $\\textbf{y}$ is dependent on two parts: \n1) the current state of the system:\n$\\left[ \\begin{array}{cc}0 & 1 \\ - \\alpha \\beta & -\\beta \\end{array} \\right ] \\textbf{y} = \\textbf{A} \\textbf{y}$\n2) the system input:\n$\\left[ \\begin{array}{c} 0 \\ \\alpha \\beta \\end{array} \\right ] y^ = \\textbf{B} y^$\nTo implement this dynamical system in neurons, we first need to set up an ensemble that represents our variables of interest: $y$ and $\\dot{y}$. \nNOTE: we make it so neurons represent y OR dy, not both this makes it so that the representation of y does not interfere with the representation of dy. \nBecause we don't need to compute any nonlinear functions of y and dy on outgoing connections this is OK! \nDISCUSS: does this make sense? how does this make you feel? maybe drawing some circles will help?\nOnce the ensemble is created that represents our system state variables we need to implement the desired system dynamics.\nThe above equations tell us how $\\textbf{y}$ changes over time. We broke it down into two parts, the $\\textbf{A}$ and $\\textbf{B}$ matrices, which operate on the current state and the input signal, respectively.\nSo Y is representing our system state, and connections into Y will implement the dynamics. \nThe first part of the dynamics work on the current state of the system, so we can implement these with a recurrent connection on Y.\nThe second part of the dynamics operates on the input signal, so we create an input signal and project it into Y.", "import nengo\nfrom models import point_attractor\n\nmodel = point_attractor.generate()\n\nfrom nengo_gui.ipython import IPythonViz\nIPythonViz(model, cfg='point_attractor.viz.cfg')", "Alright, great! \nSo we've now got point attractors implemented in neurons, all that's left is generating the additional forces that we need to move the system along our desired trajectory. \nFor discrete movements, we can do this by decoding the required forces off of a ramping signal, and for rhythmic movements we can decode the required forces off of an oscillator (so that they are produced over and over and over and ...)\nHere we're going to look at the implementation for a rhythmic movement.", "from models import forcing_functions\nfrom models import oscillator\n\n\ndef generate(data_file, net=None, alpha=1000.0, beta=250):\n\n # generate our forcing function\n y_des = np.load(data_file)['arr_0'].T\n _, force_functions, _ = forcing_functions.generate(\n y_des, rhythmic=True, alpha=alpha, beta=beta)\n\n net = nengo.Network(label='Rhythmic NDMP')\n with net:\n # --------------------- Inputs ------------------------------\n net.input = nengo.Node(size_in=2)\n\n # ------------------- Point Attractors ----------------------\n x = point_attractor.generate(\n n_neurons=500, alpha=alpha, beta=beta)\n nengo.Connection(net.input[0], x.input[0], synapse=None)\n \n y = point_attractor.generate(\n n_neurons=500, alpha=alpha, beta=beta)\n nengo.Connection(net.input[1], y.input[0], synapse=None)\n\n # -------------------- Oscillators --------------------------\n kick = nengo.Node(\n nengo.utils.functions.piecewise({0: 1, .05: 0}),\n label='kick')\n osc = oscillator.generate(net, n_neurons=3000, speed=.025)\n osc.label = 'oscillator'\n nengo.Connection(kick, osc[0])\n\n # connect oscillator to point attractor\n nengo.Connection(osc, x.input[1], synapse=None, **force_functions[0])\n nengo.Connection(osc, y.input[1], synapse=None, **force_functions[1])\n\n # -------------------- Output -------------------------------\n net.output = nengo.Node(size_in=2)\n nengo.Connection(x.output, net.output[0], synapse=None)\n nengo.Connection(y.output, net.output[1], synapse=None)\n\n return net\n\nmodel = generate('models/handwriting_trajectories/star.npz')\nfrom nengo_gui.ipython import IPythonViz\nIPythonViz(model, cfg='ndmp.viz.cfg')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
msakuta/WebGL-Orbiter
scripts/UVMap.ipynb
mit
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport math", "Rationale\nWe want to map a rectangular texture to a sphere with UV mapping.\nIn 3D model conventions, texture coordinates are designed into a model, but in our case we want to generate from coordinates.\nIt is a bit tricky since the conversion from 3D coordinates to 2D texture coordinates.\nWith 3D coordinates $(x, y, z)$, we define the conversion:\n\\begin{align}\n\\phi &= \\mathbf{atan}\\left(\\frac{y}{x}\\right) \\\n\\theta &= \\mathbf{asin}\\left(\\frac{z}{\\sqrt{x^2 + y^2 + z^2}}\\right)\n\\end{align}\nHowever, it is a little more tricky since some of the vertices in a triangle wrap around $\\phi$ coordinate and shows an artifact that looks like a \"seam\".\nWe want to detect the \"seams\" and put the vertex coordinates to closest neighbor.\nIn particular, if any of the $\\phi$ values of vertices in a triangle have difference larger than $\\pi$, we will add a value to make the difference less than $\\pi$.\nWe would like to define a function that maps a value wrap in $\\pi$.\nFirst, let's assume the input is normalized to 1.\nThen we can define a saw-like function like below.\n$$\nf(x) = x - \\mathrm{floor}(x - 0.5) - 1\n$$\nWe can visualize the function like below.", "x = np.linspace(-2, 2, 1000)\n\ndef f(x):\n return x - np.floor(x - 0.5) - 1.\n\ny = f(x)\n\nplt.plot(x, y)\nplt.grid()\nplt.axis(\"square\")", "The script\nWe have sphere_uv.py that generates the UV coordinates and maps it with the function described above.", "from sphere_uv import SphereUV\n\nsphere_uv = SphereUV(\"../src/models/phobos_t.obj\")\n\nfor face in sphere_uv.faces:\n for v0, v1 in zip(face, face[1:] + [face[-1]]):\n # print(uv, len(uvidx))\n uvv = np.asarray([sphere_uv.uvs[v0.t], sphere_uv.uvs[v1.t]])\n # print(uvv)\n plt.plot(uvv[:,0], uvv[:,1], \"-\")\n\nuvidx, uvbuf = SphereUV.gen_uvs(sphere_uv.faces, sphere_uv.vertices)\nfor uv in uvidx:\n # print(uv, len(uvidx))\n uvv = np.asarray([uvbuf[vu] for vu in uv + [uv[0]]])\n # print(uvv)\n plt.plot(uvv[:,0], uvv[:,1], \"-\")\n\n\nedges = []\nfor face in sphere_uv.faces:\n for edge in zip(face, face[1:] + [face[0]]):\n # print(edge)\n edgev = np.array([edge[0].v, edge[1].v])\n # print(vertices[edge[0]], vertices[edge[1]])\n # edges.append(vertices[edge,0], vertices[edge,1])\n plt.plot(sphere_uv.vertices[edgev,0], sphere_uv.vertices[edgev,1], \"-\")\n\n\n# Output to an obj file\nsphere_uv.write(\"../src/models/phobos_tf.obj\")\n" ]
[ "code", "markdown", "code", "markdown", "code" ]
Santana9937/Regression_ML_Specialization
Week_4_Ridge_Regression/assign_2_ridge-regression.ipynb
mit
[ "Regression Week 4: Ridge Regression (gradient descent)\nIn this notebook, we will implement ridge regression via gradient descent. You will:\n* Convert an SFrame into a Numpy array\n* Write a Numpy function to compute the derivative of the regression weights with respect to a single feature\n* Write gradient descent function to compute the regression weights given an initial weight vector, step size, tolerance, and L2 penalty\nImporting Libraries", "import graphlab\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('darkgrid')\n%matplotlib inline", "If we want to do any \"feature engineering\" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features.\nLoading and Plotting the house sales data", "sales = graphlab.SFrame('kc_house_data.gl/')\n\nplt.figure(figsize=(8,6))\nplt.plot(sales['sqft_living'], sales['price'],'.')\nplt.xlabel('Living Area (ft^2)', fontsize=16)\nplt.ylabel('House Price ($)', fontsize=16)\nplt.title('King County, Seattle House Price Data', fontsize=18)\nplt.axis([0.0, 14000.0, 0.0, 8000000.0])\nplt.show()", "Import useful functions from previous notebook\nAs in Week 2, we convert the SFrame into a 2D Numpy array. Copy and paste get_num_data() from the second notebook of Week 2.", "def get_numpy_data(input_sframe, features, output):\n \n input_sframe['constant'] = 1 # Adding column 'constant' to input SFrame with all values = 1.0\n features = ['constant'] + features # Adding 'constant' to List of features\n \n # Selecting the columns for the feature_matrux and output_array\n features_sframe = input_sframe[features]\n output_sarray = input_sframe[output]\n \n # Converting sframes to numpy.ndarrays\n feature_matrix = features_sframe.to_numpy()\n output_array = output_sarray.to_numpy()\n \n return(feature_matrix, output_array)", "Also, copy and paste the predict_output() function to compute the predictions for an entire matrix of features given the matrix and the weights:", "def predict_output(feature_matrix, weights):\n predictions = np.dot(feature_matrix, weights)\n return predictions", "Computing the Derivative\nWe are now going to move to computing the derivative of the regression cost function. Recall that the cost function is the sum over the data points of the squared difference between an observed output and a predicted output, plus the L2 penalty term.\nCost(w)\n= SUM[ (prediction - output)^2 ]\n+ l2_penalty*(w[0]^2 + w[1]^2 + ... + w[k]^2).\nSince the derivative of a sum is the sum of the derivatives, we can take the derivative of the first part (the RSS) as we did in the notebook for the unregularized case in Week 2 and add the derivative of the regularization part. As we saw, the derivative of the RSS with respect to w[i] can be written as: \n2*SUM[ error*[feature_i] ].\nThe derivative of the regularization term with respect to w[i] is:\n2*l2_penalty*w[i].\nSumming both, we get\n2*SUM[ error*[feature_i] ] + 2*l2_penalty*w[i].\nThat is, the derivative for the weight for feature i is the sum (over data points) of 2 times the product of the error and the feature itself, plus 2*l2_penalty*w[i]. \nWe will not regularize the constant. Thus, in the case of the constant, the derivative is just twice the sum of the errors (without the 2*l2_penalty*w[0] term).\nRecall that twice the sum of the product of two vectors is just twice the dot product of the two vectors. Therefore the derivative for the weight for feature_i is just two times the dot product between the values of feature_i and the current errors, plus 2*l2_penalty*w[i].\nWith this in mind complete the following derivative function which computes the derivative of the weight given the value of the feature (over all data points) and the errors (over all data points). To decide when to we are dealing with the constant (so we don't regularize it) we added the extra parameter to the call feature_is_constant which you should set to True when computing the derivative of the constant and False otherwise.", "def feature_derivative_ridge(errors, feature, weight, l2_penalty, feature_is_constant):\n \n # If feature_is_constant is True, derivative is twice the dot product of errors and feature\n if feature_is_constant==True:\n derivative = 2.0*np.dot(errors, feature)\n \n # Otherwise, derivative is twice the dot product plus 2*l2_penalty*weight\n else:\n derivative = 2.0*np.dot(errors, feature) + 2.0*l2_penalty*weight\n \n return derivative", "To test your feature derivartive run the following:", "(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price') \nmy_weights = np.array([1., 10.])\ntest_predictions = predict_output(example_features, my_weights) \nerrors = test_predictions - example_output # prediction errors\n\n# next two lines should print the same values\nprint feature_derivative_ridge(errors, example_features[:,1], my_weights[1], 1, False)\nprint np.sum(errors*example_features[:,1])*2+20.\nprint ''\n\n# next two lines should print the same values\nprint feature_derivative_ridge(errors, example_features[:,0], my_weights[0], 1, True)\nprint np.sum(errors)*2.", "Gradient Descent\nNow we will write a function that performs a gradient descent. The basic premise is simple. Given a starting point we update the current weights by moving in the negative gradient direction. Recall that the gradient is the direction of increase and therefore the negative gradient is the direction of decrease and we're trying to minimize a cost function. \nThe amount by which we move in the negative gradient direction is called the 'step size'. We stop when we are 'sufficiently close' to the optimum. Unlike in Week 2, this time we will set a maximum number of iterations and take gradient steps until we reach this maximum number. If no maximum number is supplied, the maximum should be set 100 by default. (Use default parameter values in Python.)\nWith this in mind, complete the following gradient descent function below using your derivative function above. For each step in the gradient descent, we update the weight for each feature before computing our stopping criteria.", "def ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations):\n weights = np.array(initial_weights) # make sure it's a numpy array\n \n iteration_count = 0\n \n #while not reached maximum number of iterations:\n while iteration_count < max_iterations:\n \n predictions = predict_output(feature_matrix, weights) # computing predictions w/ feature_matrix and weights\n errors = predictions - output # compute the errors as predictions - output\n\n # loop over each weight\n for i in xrange(len(weights)): \n # Recall that feature_matrix[:,i] is the feature column associated with weights[i]\n # compute the derivative for weight[i].\n #(Remember: when i=0, you are computing the derivative of the constant!)\n if i == 0:\n derivative = feature_derivative_ridge(errors, feature_matrix[:,0], weights[0], l2_penalty, True)\n else:\n derivative = feature_derivative_ridge(errors, feature_matrix[:,i], weights[i], l2_penalty, False) \n \n weights[i] = weights[i] - step_size*derivative\n \n # Incrementing the iteration count\n iteration_count += 1\n \n return weights", "Visualizing effect of L2 penalty\nThe L2 penalty gets its name because it causes weights to have small L2 norms than otherwise. Let's see how large weights get penalized. Let us consider a simple model with 1 feature:", "simple_features = ['sqft_living']\nmy_output = 'price'", "Load the training set and test set.", "train_data,test_data = sales.random_split(.8,seed=0)", "In this part, we will only use 'sqft_living' to predict 'price'. Use the get_numpy_data function to get a Numpy versions of your data with only this feature, for both the train_data and the test_data.", "(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)\n(simple_test_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)", "Let's set the parameters for our optimization:", "initial_weights = np.array([0.0, 0.0])\nstep_size = 1e-12\nmax_iterations=1000", "First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights:\nsimple_weights_0_penalty\nwe'll use them later.", "l2_penalty = 0.0\nsimple_weights_0_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)", "Next, let's consider high regularization. Set the l2_penalty to 1e11 and run your ridge regression algorithm to learn the weights of your model. Call your weights:\nsimple_weights_high_penalty\nwe'll use them later.", "l2_penalty = 1.0e11\nsimple_weights_high_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)", "This code will plot the two learned models. (The green line is for the model with no regularization and the red line is for the one with high regularization.)", "plt.figure(figsize=(8,6))\nplt.plot(simple_feature_matrix[:,1],output,'.', label= 'House Price Data')\nplt.hold(True)\n\nplt.plot(simple_feature_matrix[:,1], predict_output(simple_feature_matrix, simple_weights_0_penalty),'-', label= 'No L2 Penalty')\nplt.plot(simple_feature_matrix[:,1], predict_output(simple_feature_matrix, simple_weights_high_penalty),'-', label= 'Large L2 Penalty')\n\nplt.hold(False)\nplt.legend(loc='upper left', fontsize=16)\nplt.xlabel('Living Area (ft^2)', fontsize=16)\nplt.ylabel('House Price ($)', fontsize=16)\nplt.title('King County, Seattle House Price Data', fontsize=18)\nplt.axis([0.0, 14000.0, 0.0, 8000000.0])\nplt.show()", "Compute the RSS on the TEST data for the following three sets of weights:\n1. The initial weights (all zeros)\n2. The weights learned with no regularization\n3. The weights learned with high regularization\nWhich weights perform best?", "test_pred_weights_0 = predict_output(simple_test_feature_matrix, initial_weights)\nRSS_test_weights_0 = sum( (test_output - test_pred_weights_0)**2.0 )\n\ntest_pred_no_reg = predict_output(simple_test_feature_matrix, simple_weights_0_penalty)\nRSS_test_no_reg = sum( (test_output - test_pred_no_reg)**2.0 )\n\ntest_pred_high_reg = predict_output(simple_test_feature_matrix, simple_weights_high_penalty)\nRSS_test_high_reg = sum( (test_output - test_pred_high_reg)**2.0 )", "QUIZ QUESTIONS\nQ1: What is the value of the coefficient for sqft_living that you learned with no regularization, rounded to 1 decimal place? What about the one with high regularization?", "print 'No Regulatization sqft_living weight: %.1f' %(simple_weights_0_penalty[1])\nprint 'High Regulatization sqft_living weight: %.1f' %(simple_weights_high_penalty[1])", "Q2: Comparing the lines you fit with the with no regularization versus high regularization, which one is steeper?", "print 'Line with No Regularization is steeper'", "Q3: What are the RSS on the test data for each of the set of weights above (initial, no regularization, high regularization)?", "print 'Test set RSS with initial weights all set to 0.0: %.1e' %(RSS_test_weights_0)\nprint 'Test set RSS with initial weights set to weights learned with no regularization: %.1e' %(RSS_test_no_reg)\nprint 'Test set RSS with initial weights set to weights learned with high regularization: %.1e' %(RSS_test_high_reg)", "Initial weights learned with no regularization performed best on the Test Set (lowest RSS value) \nRunning a multiple regression with L2 penalty\nLet us now consider a model with 2 features: ['sqft_living', 'sqft_living15'].\nFirst, create Numpy versions of your training and test data with these two features.", "model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors. \nmy_output = 'price'\n(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)\n(test_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)", "We need to re-inialize the weights, since we have one extra parameter. Let us also set the step size and maximum number of iterations.", "initial_weights = np.array([0.0,0.0,0.0])\nstep_size = 1e-12\nmax_iterations = 1000", "First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights:\nmultiple_weights_0_penalty", "l2_penalty = 0.0\nmultiple_weights_0_penalty = ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)", "Next, let's consider high regularization. Set the l2_penalty to 1e11 and run your ridge regression algorithm to learn the weights of your model. Call your weights:\nmultiple_weights_high_penalty", "l2_penalty = 1.0e11\nmultiple_weights_high_penalty = ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)", "Compute the RSS on the TEST data for the following three sets of weights:\n1. The initial weights (all zeros)\n2. The weights learned with no regularization\n3. The weights learned with high regularization\nWhich weights perform best?", "test_pred_mul_feat_weights_0 = predict_output(test_feature_matrix, initial_weights)\nRSS_test_mul_feat_weights_0 = sum( (test_output - test_pred_mul_feat_weights_0)**2.0 )\n\ntest_pred_mul_feat_no_reg = predict_output(test_feature_matrix, multiple_weights_0_penalty)\nRSS_test_mul_feat_no_reg = sum( (test_output - test_pred_mul_feat_no_reg)**2.0 )\n\ntest_pred_mul_feat_high_reg = predict_output(test_feature_matrix, multiple_weights_high_penalty)\nRSS_test_mul_feat_high_reg = sum( (test_output - test_pred_mul_feat_high_reg)**2.0 )", "Predict the house price for the 1st house in the test set using the no regularization and high regularization models. (Remember that python starts indexing from 0.) How far is the prediction from the actual price? Which weights perform best for the 1st house?", "print 'Pred. price of 1st house in Test Set with weights learned with no reg.: %.2f' %(test_pred_mul_feat_no_reg[0])\nprint 'Pred. price of 1st house in Test Set with weights learned with high reg.: %.2f' %(test_pred_mul_feat_high_reg[0])\n\nprint 'Pred. price - actual prize of 1st house in Test Set, using weights w/ no reg.: %.2f' %(abs(test_output[0] - test_pred_mul_feat_no_reg[0]))\nprint 'Pred. price - actual prize of 1st house in Test Set, using weights w/ high reg.: %.2f' %(abs(test_output[0] - test_pred_mul_feat_high_reg[0]))", "Weights with high regularization perform best on 1st house in Test Set \nQUIZ QUESTIONS\nQ1: What is the value of the coefficient for sqft_living that you learned with no regularization, rounded to 1 decimal place? What about the one with high regularization?", "print 'No Regulatization sqft_living weight: %.1f' %(multiple_weights_0_penalty[1])\nprint 'High Regulatization sqft_living weight: %.1f' %(multiple_weights_high_penalty[1])", "Q2: What are the RSS on the test data for each of the set of weights above (initial, no regularization, high regularization)?", "print 'Test set RSS with initial weights all set to 0.0: %.1e' %(RSS_test_mul_feat_weights_0)\nprint 'Test set RSS with initial weights set to weights learned with no regularization: %.1e' %(RSS_test_mul_feat_no_reg)\nprint 'Test set RSS with initial weights set to weights learned with high regularization: %.1e' %(RSS_test_mul_feat_high_reg)", "Q3: We make prediction for the first house in the test set using two sets of weights (no regularization vs high regularization). Which weights make better prediction <u>for that particular house</u>?\n Weights with high regularization perform best on 1st house in Test Set" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gfeiden/MagneticUpperSco
notes/convective_structure.ipynb
mit
[ "Radiative Cores & Convective Envelopes\nAnalysis of how magnetic fields influence the extent of radiative cores and convective envelopes in young, pre-main-sequence stars.\nBegin with some preliminaries.", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d", "Load a standard and magnetic isochrone with equivalent ages. Here, the adopted age is 10 Myr to look specifically at the predicted internal structure of stars in Upper Scorpius.", "# read standard 10 Myr isochrone\niso_std = np.genfromtxt('../models/iso/std/dmestar_00010.0myr_z+0.00_a+0.00_phx.iso')\n\n# read standard 5 Myr isochrone\niso_5my = np.genfromtxt('../models/iso/std/dmestar_00005.0myr_z+0.00_a+0.00_phx.iso')\n\n# read magnetic isochrone\niso_mag = np.genfromtxt('../models/iso/mag/dmestar_00010.0myr_z+0.00_a+0.00_phx_magBeq.iso')", "The magnetic isochrone is known to begin at a lower mass than the standard isochrone and both isochrones have gaps where individual models failed to converge. Gaps need not occur at the same masses along each isochrone. To overcome these inconsistencies, we can interpolate both isochrones onto a pre-defined mass domain.", "masses = np.arange(0.09, 1.70, 0.01) # new mass domain\n\n# create an interpolation curve for a standard isochrone \nicurve = interp1d(iso_std[:,0], iso_std, axis=0, kind='cubic')\n\n# and transform to new mass domain\niso_std_eq = icurve(masses)\n\n# create interpolation curve for standard 5 Myr isochrone\nicurve = interp1d(iso_5my[:,0], iso_5my, axis=0, kind='linear')\n\n# and transform to a new mass domain\niso_5my_eq = icurve(masses)\n\n# create an interpolation curve for a magnetic isochrone \nicurve = interp1d(iso_mag[:,0], iso_mag, axis=0, kind='cubic')\n\n# and transform to new mass domain\niso_mag_eq = icurve(masses)", "Let's compare the interpolated isochrones to the original, just to be sure that the resulting isochrones are smooth.", "plt.plot(10**iso_std[:, 1], iso_std[:, 3], '-', lw=4, color='red')\nplt.plot(10**iso_std_eq[:, 1], iso_std_eq[:, 3], '--', lw=4, color='black')\n\nplt.plot(10**iso_mag[:, 1], iso_mag[:, 3], '-', lw=4, color='blue')\nplt.plot(10**iso_mag_eq[:, 1], iso_mag_eq[:, 3], '--', lw=4, color='black')\n\nplt.grid()\nplt.xlim(2500., 8000.)\nplt.ylim(-2, 1.1)\nplt.xlabel('$T_{\\\\rm eff}\\ [K]$', fontsize=20)\nplt.ylabel('$\\\\log(L / L_{\\\\odot})$', fontsize=20)", "The interpolation appears to have worked well as there are no egregious discrepancies between the real and interpolated isochrones.\nWe can now analyze the properties of the radiative cores and the convective envelopes. Beginning with the radiative core, we can look as a function of stellar properties, how much of the total stellar mass is contained in the radiative core.", "# as a function of stellar mass\nplt.plot(iso_std_eq[:, 0], 1.0 - iso_std_eq[:, -1]/iso_std_eq[:, 0], \n '--', lw=3, color='#333333')\nplt.plot(iso_5my_eq[:, 0], 1.0 - iso_5my_eq[:, -1]/iso_5my_eq[:, 0], \n '-.', lw=3, color='#333333')\nplt.plot(iso_mag_eq[:, 0], 1.0 - iso_mag_eq[:, -1]/iso_mag_eq[:, 0], \n '-' , lw=4, color='#01a9db')\n\nplt.grid()\nplt.xlabel('${\\\\rm Stellar Mass}\\ [M_{\\\\odot}]$', fontsize=20)\nplt.ylabel('$M_{\\\\rm rad\\ core}\\ /\\ M_{\\\\star}$', fontsize=20)\n\n# as a function of effective temperature\nplt.plot(10**iso_std_eq[:, 1], 1.0 - iso_std_eq[:, -1]/iso_std_eq[:, 0], \n '--', lw=3, color='#333333')\nplt.plot(10**iso_5my_eq[:, 1], 1.0 - iso_5my_eq[:, -1]/iso_5my_eq[:, 0], \n '-.', lw=3, color='#333333')\nplt.plot(10**iso_mag_eq[:, 1], 1.0 - iso_mag_eq[:, -1]/iso_mag_eq[:, 0], \n '-' , lw=4, color='#01a9db')\n\nplt.grid()\nplt.xlim(3000., 7000.)\nplt.xlabel('${\\\\rm Effective Temperature}\\ [K]$', fontsize=20)\nplt.ylabel('$M_{\\\\rm rad\\ core}\\ /\\ M_{\\\\star}$', fontsize=20)", "Now let's look at the relative difference in radiative core mass as a function of these stellar properties.", "# as a function of stellar mass (note, there is a minus sign switch b/c we tabulate \n# convective envelope mass)\nplt.plot(iso_mag_eq[:, 0], (iso_mag_eq[:, -1] - iso_std_eq[:, -1]), \n '-' , lw=4, color='#01a9db')\nplt.plot(iso_mag_eq[:, 0], (iso_mag_eq[:, -1] - iso_5my_eq[:, -1]), \n '--' , lw=4, color='#01a9db')\n\nplt.grid()\nplt.xlabel('${\\\\rm Stellar Mass}\\ [M_{\\\\odot}]$', fontsize=20)\nplt.ylabel('$\\\\Delta M_{\\\\rm rad\\ core}\\ [M_{\\\\odot}]$', fontsize=20)", "Analysis", "# interpolate into the temperature domain\nTeffs = np.log10(np.arange(3050., 7000., 50.))\n\nicurve = interp1d(iso_std[:, 1], iso_std, axis=0, kind='linear')\niso_std_te = icurve(Teffs)\n\nicurve = interp1d(iso_5my[:, 1], iso_5my, axis=0, kind='linear')\niso_5my_te = icurve(Teffs)\n\nicurve = interp1d(iso_mag[:, 1], iso_mag, axis=0, kind='linear')\niso_mag_te = icurve(Teffs)\n\n# as a function of stellar mass \n# (note, there is a minus sign switch b/c we tabulate convective envelope mass)\n#\n# plotting: standard - magnetic where + implies \nplt.plot(10**Teffs, (iso_mag_te[:, 0] - iso_mag_te[:, -1] - \n iso_std_te[:, 0] + iso_std_te[:, -1]), \n '-' , lw=4, color='#01a9db')\nplt.plot(10**Teffs, (iso_mag_te[:, 0] - iso_mag_te[:, -1] - \n iso_5my_te[:, 0] + iso_5my_te[:, -1]), \n '--' , lw=4, color='#01a9db')\n\nnp.savetxt('../models/rad_core_comp.txt', \n np.column_stack((iso_std_te, iso_mag_te)), \n fmt=\"%10.6f\")\n\nnp.savetxt('../models/rad_core_comp_dage.txt', \n np.column_stack((iso_5my_te, iso_mag_te)), \n fmt=\"%10.6f\")\n\nplt.grid()\nplt.xlim(3000., 7000.)\nplt.xlabel('${\\\\rm Effective Temperature}\\ [K]$', fontsize=20)\nplt.ylabel('$\\\\Delta M_{\\\\rm rad\\ core}\\ [M_{\\\\odot}]$', fontsize=20)", "Stars are fully convective below 3500 K, regardless of whether there is magnetic inhibition of convection. On the other extreme, stars hotter than about 6500 K are approaching ignition of the CN-cycle, which coincides with the disappearnce of the outer convective envelope. However, delayed contraction means that stars of a given effective temperature have a higher mass in the magnetic case, which leads to a slight mass offset once the radiative core comprises nearly 100% of the star. Note that our use of the term \"radiative core\" is technically invalid in this regime due to the presence of a convective core." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ogoann/StatisticalMethods
examples/SDSScatalog/CorrFunc.ipynb
gpl-2.0
[ "\"Spatial Clustering\" - the Galaxy Correlation Function\n\n\nThe degree to which objects positions are correlated with each other - \"clustered\" - is of great interest in astronomy. \n\n\nWe expect galaxies to appear in groups and clusters, as they fall together under gravity: the statistics of galaxy clustering should contain information about galaxy evolution during hierarchical structure formation.\n\n\nLet's try and measure a clustering signal in our SDSS photometric object catalog.", "%load_ext autoreload\n%autoreload 2\n\nimport numpy as np\nimport SDSS\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport copy\n\n# We want to select galaxies, and then are only interested in their positions on the sky.\n\ndata = pd.read_csv(\"downloads/SDSSobjects.csv\",usecols=['ra','dec','u','g',\\\n 'r','i','size'])\n\n# Filter out objects with bad magnitude or size measurements:\ndata = data[(data['u'] > 0) & (data['g'] > 0) & (data['r'] > 0) & (data['i'] > 0) & (data['size'] > 0)]\n\n# Make size cuts, to exclude stars and nearby galaxies, and magnitude cuts, to get good galaxy detections:\ndata = data[(data['size'] > 0.8) & (data['size'] < 10.0) & (data['i'] > 17) & (data['i'] < 22)]\n\n# Drop the things we're not so interested in:\ndel data['u'], data['g'], data['r'], data['i'],data['size']\n\ndata.head()\n\nNgals = len(data)\nramin,ramax = np.min(data['ra']),np.max(data['ra'])\ndecmin,decmax = np.min(data['dec']),np.max(data['dec'])\nprint Ngals,\"galaxy-like objects in (ra,dec) range (\",ramin,\":\",ramax,\",\",decmin,\":\",decmax,\")\"", "The Correlation Function\n\n\nThe 2-point correlation function $\\xi(\\theta)$ is defined as \"the probability of finding two galaxies separated by an angular distance $\\theta$ with respect to that expected for a random distribution\" (Peebles 1980), and is an excellent summary statistic for quantifying the clustering of galaxies.\n\n\nThe simplest possible estimator for this excess probability is just \n$\\hat{\\xi}(\\theta) = \\frac{DD - RR}{RR}$, \nwhere $DD(\\theta) = N_{\\rm pairs}(\\theta) / N_D(N_D-1)/2$. Here, $N_D$ is the total number of galaxies in the dataset, and $N_{\\rm pairs}(\\theta)$ is the number of galaxy pairs with separation lying in a bin centered on $\\theta$. $RR(\\theta)$ is the same quantity computed in a \"random catalog,\" covering the same field of view but with uniformly randomly distributed positions.\n\n\nCorrelations between mock galaxies distributed uniformly randomly over the survey \"footprint\" helps account for spurious effects in the correlation function that might arise from weird survey area design.\n\n\nWe'll use Mike Jarvis' TreeCorr code (Jarvis et al 2004) to compute this correlation function estimator efficiently. You can read more about better estimators starting from the TreeCorr wiki.", "# !pip install --upgrade TreeCorr", "Random Catalogs\nFirst we'll need a random catalog. Let's make it the same size as the data one.", "random = pd.DataFrame({'ra' : ramin + (ramax-ramin)*np.random.rand(Ngals), 'dec' : decmin + (decmax-decmin)*np.random.rand(Ngals)})\n\nprint len(random), type(random)", "Now let's plot both catalogs, and compare.", "fig, ax = plt.subplots(nrows=1, ncols=2)\nfig.set_size_inches(15, 6)\nplt.subplots_adjust(wspace=0.2)\n \nrandom.plot(kind='scatter', x='ra', y='dec', ax=ax[0], title='Random')\nax[0].set_xlabel('RA / deg')\nax[0].set_ylabel('Dec. / deg')\n\ndata.plot(kind='scatter', x='ra', y='dec', ax=ax[1], title='Data')\nax[1].set_xlabel('RA / deg')\nax[1].set_ylabel('Dec. / deg')", "Estimating $\\xi(\\theta)$", "import treecorr\n\nrandom_cat = treecorr.Catalog(ra=random['ra'], dec=random['dec'], ra_units='deg', dec_units='deg')\ndata_cat = treecorr.Catalog(ra=data['ra'], dec=data['dec'], ra_units='deg', dec_units='deg')\n\n# Set up some correlation function estimator objects:\n\nsep_units='arcmin'\nmin_sep=0.5\nmax_sep=10.0\nN = 7\nbin_size = np.log10(1.0*max_sep/min_sep)/(1.0*N)\n\ndd = treecorr.NNCorrelation(bin_size=bin_size, min_sep=min_sep, max_sep=max_sep, sep_units=sep_units, bin_slop=0.05/bin_size)\nrr = treecorr.NNCorrelation(bin_size=bin_size, min_sep=min_sep, max_sep=max_sep, sep_units=sep_units, bin_slop=0.05/bin_size)\n\n# Process the data:\ndd.process(data_cat)\nrr.process(random_cat)\n\n# Combine into a correlation function and its variance:\nxi, varxi = dd.calculateXi(rr)\n\nplt.figure(figsize=(15,8))\nplt.rc('xtick', labelsize=16) \nplt.rc('ytick', labelsize=16)\nplt.errorbar(np.exp(dd.logr),xi,np.sqrt(varxi),c='blue',linewidth=2)\n# plt.xscale('log')\nplt.xlabel('$\\\\theta / {\\\\rm arcmin}$',fontsize=20)\nplt.ylabel('$\\\\xi(\\\\theta)$',fontsize=20)\nplt.ylim([-0.1,0.2])\nplt.grid(True)", "Q: Are galaxies uniformly randomly distributed?\nDiscuss the clustering signal (or lack thereof) in the above plot with your neighbor. What would you want to do better, in a second pass at this?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
scotthuang1989/Python-3-Module-of-the-Week
data_structure/OrderedDict — Remember the Order Keys are Added to a Dictionary.ipynb
apache-2.0
[ "An OrderedDict is a dictionary subclass that remembers the order in which its contents are added.", "import collections\n\nprint('Regular dictionary:')\nd = {}\nd['a'] = 'A'\nd['b'] = 'B'\nd['c'] = 'C'\n\nfor k, v in d.items():\n print(k, v)\n\nprint('\\nOrderedDict:')\nd = collections.OrderedDict()\nd['a'] = 'A'\nd['b'] = 'B'\nd['c'] = 'C'\n\nfor k, v in d.items():\n print(k, v)", "A regular dict does not track the insertion order, and iterating over it produces the values in order based on how the keys are stored in the hash table, which is in turn influenced by a random value to reduce collisions. In an OrderedDict, by contrast, the order in which the items are inserted is remembered and used when creating an iterator.\nEquality\nA regular dict looks at its contents when testing for equality. An OrderedDict also considers the order in which the items were added.", "import collections\n\nprint('dict :', end=' ')\nd1 = {}\nd1['a'] = 'A'\nd1['b'] = 'B'\nd1['c'] = 'C'\n\nd2 = {}\nd2['c'] = 'C'\nd2['b'] = 'B'\nd2['a'] = 'A'\n\nprint(d1 == d2)\n\nprint('OrderedDict:', end=' ')\n\nd1 = collections.OrderedDict()\nd1['a'] = 'A'\nd1['b'] = 'B'\nd1['c'] = 'C'\n\nd2 = collections.OrderedDict()\nd2['c'] = 'C'\nd2['b'] = 'B'\nd2['a'] = 'A'\n\nprint(d1 == d2)", "In this case, since the two ordered dictionaries are created from values in a different order, they are considered to be different.\nReordering\nIt is possible to change the order of the keys in an OrderedDict by moving them to either the beginning or the end of the sequence using move_to_end().", "import collections\n\nd = collections.OrderedDict(\n [('a', 'A'), ('b', 'B'), ('c', 'C')]\n)\n\nprint('Before:')\nfor k, v in d.items():\n print(k, v)\n\nd.move_to_end('b')\n\nprint('\\nmove_to_end():')\nfor k, v in d.items():\n print(k, v)\n\nd.move_to_end('b', last=False)\n\nprint('\\nmove_to_end(last=False):')\nfor k, v in d.items():\n print(k, v)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
goddoe/CADL
session-5/session-5-part-2.ipynb
apache-2.0
[ "Session 5: Generative Networks\nAssignment: Generative Adversarial Networks, Variational Autoencoders, and Recurrent Neural Networks\n<p class=\"lead\">\n<a href=\"https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info\">Creative Applications of Deep Learning with Google's Tensorflow</a><br />\n<a href=\"http://pkmital.com\">Parag K. Mital</a><br />\n<a href=\"https://www.kadenze.com\">Kadenze, Inc.</a>\n</p>\n\nContinued from session-5-part-1.ipynb...\nTable of Contents\n<!-- MarkdownTOC autolink=\"true\" autoanchor=\"true\" bracket=\"round\" -->\n\nOverview\nLearning Goals\nPart 1 - Generative Adversarial Networks (GAN) / Deep Convolutional GAN (DCGAN)\nIntroduction\nBuilding the Encoder\nBuilding the Discriminator for the Training Samples\nBuilding the Decoder\nBuilding the Generator\nBuilding the Discriminator for the Generated Samples\nGAN Loss Functions\nBuilding the Optimizers w/ Regularization\nLoading a Dataset\nTraining\nEquilibrium\nPart 2 - Variational Auto-Encoding Generative Adversarial Network (VAEGAN)\nBatch Normalization\nBuilding the Encoder\nBuilding the Variational Layer\nBuilding the Decoder\nBuilding VAE/GAN Loss Functions\nCreating the Optimizers\nLoading the Dataset\nTraining\nPart 3 - Latent-Space Arithmetic\nLoading the Pre-Trained Model\nExploring the Celeb Net Attributes\nFind the Latent Encoding for an Attribute\nLatent Feature Arithmetic\nExtensions\nPart 4 - Character-Level Language Model\nPart 5 - Pretrained Char-RNN of Donald Trump\nGetting the Trump Data\nBasic Text Analysis\nLoading the Pre-trained Trump Model\nInference: Keeping Track of the State\nProbabilistic Sampling\nInference: Temperature\nInference: Priming\n\n\nAssignment Submission\n\n<!-- /MarkdownTOC -->", "# First check the Python version\nimport sys\nif sys.version_info < (3,4):\n print('You are running an older version of Python!\\n\\n',\n 'You should consider updating to Python 3.4.0 or',\n 'higher as the libraries built for this course',\n 'have only been tested in Python 3.4 and higher.\\n')\n print('Try installing the Python 3.5 version of anaconda'\n 'and then restart `jupyter notebook`:\\n',\n 'https://www.continuum.io/downloads\\n\\n')\n\n# Now get necessary libraries\ntry:\n import os\n import numpy as np\n import matplotlib.pyplot as plt\n from skimage.transform import resize\n from skimage import data\n from scipy.misc import imresize\n from scipy.ndimage.filters import gaussian_filter\n import IPython.display as ipyd\n import tensorflow as tf\n from libs import utils, gif, datasets, dataset_utils, nb_utils\nexcept ImportError as e:\n print(\"Make sure you have started notebook in the same directory\",\n \"as the provided zip file which includes the 'libs' folder\",\n \"and the file 'utils.py' inside of it. You will NOT be able\",\n \"to complete this assignment unless you restart jupyter\",\n \"notebook inside the directory created by extracting\",\n \"the zip file or cloning the github repo.\")\n print(e)\n\n# We'll tell matplotlib to inline any drawn figures like so:\n%matplotlib inline\nplt.style.use('ggplot')\n\n# Bit of formatting because I don't like the default inline code style:\nfrom IPython.core.display import HTML\nHTML(\"\"\"<style> .rendered_html code { \n padding: 2px 4px;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n} </style>\"\"\")", "<style> .rendered_html code { \n padding: 2px 4px;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n} </style>\n\n<a name=\"part-4---character-level-language-model\"></a>\nPart 4 - Character-Level Language Model\nWe'll now continue onto the second half of the homework and explore recurrent neural networks. We saw one potential application of a recurrent neural network which learns letter by letter the content of a text file. We were then able to synthesize from the model to produce new phrases. Let's try to build one. Replace the code below with something that loads your own text file or one from the internet. Be creative with this!\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "import tensorflow as tf\nfrom six.moves import urllib\nscript = 'http://www.awesomefilm.com/script/biglebowski.txt'\ntxts = []\nf, _ = urllib.request.urlretrieve(script, script.split('/')[-1])\nwith open(f, 'r') as fp:\n txt = fp.read()", "Let's take a look at the first part of this:", "txt[:100]", "We'll just clean up the text a little. This isn't necessary, but can help the training along a little. In the example text I provided, there is a lot of white space (those \\t's are tabs). I'll remove them. There are also repetitions of \\n, new lines, which are not necessary. The code below will remove the tabs, ending whitespace, and any repeating newlines. Replace this with any preprocessing that makes sense for your dataset. Try to boil it down to just the possible letters for what you want to learn/synthesize while retaining any meaningful patterns:", "txt = \"\\n\".join([txt_i.strip()\n for txt_i in txt.replace('\\t', '').split('\\n')\n if len(txt_i)])", "Now we can see how much text we have:", "len(txt)", "In general, we'll want as much text as possible. But I'm including this just as a minimal example so you can explore your own. Try making a text file and seeing the size of it. You'll want about 1 MB at least.\nLet's now take a look at the different characters we have in our file:", "vocab = list(set(txt))\nvocab.sort()\nprint(len(vocab))\nprint(vocab)", "And then create a mapping which can take us from the letter to an integer look up table of that letter (and vice-versa). To do this, we'll use an OrderedDict from the collections library. In Python 3.6, this is the default behavior of dict, but in earlier versions of Python, we'll need to be explicit by using OrderedDict.", "from collections import OrderedDict\n\nencoder = OrderedDict(zip(vocab, range(len(vocab))))\ndecoder = OrderedDict(zip(range(len(vocab)), vocab))\n\nencoder", "We'll store a few variables that will determine the size of our network. First, batch_size determines how many sequences at a time we'll train on. The seqence_length parameter defines the maximum length to unroll our recurrent network for. This is effectively the depth of our network during training to help guide gradients along. Within each layer, we'll have n_cell LSTM units, and n_layers layers worth of LSTM units. Finally, we'll store the total number of possible characters in our data, which will determine the size of our one hot encoding (like we had for MNIST in Session 3).", "# Number of sequences in a mini batch\nbatch_size = 100\n\n# Number of characters in a sequence\nsequence_length = 50\n\n# Number of cells in our LSTM layer\nn_cells = 128\n\n# Number of LSTM layers\nn_layers = 3\n\n# Total number of characters in the one-hot encoding\nn_chars = len(vocab)", "Let's now create the input and output to our network. We'll use placeholders and feed these in later. The size of these need to be [batch_size, sequence_length]. We'll then see how to build the network in between.\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "X = tf.placeholder(tf.int32, shape=..., name='X')\n\n# We'll have a placeholder for our true outputs\nY = tf.placeholder(tf.int32, shape=..., name='Y')", "The first thing we need to do is convert each of our sequence_length vectors in our batch to n_cells LSTM cells. We use a lookup table to find the value in X and use this as the input to n_cells LSTM cells. Our lookup table has n_chars possible elements and connects each character to n_cells cells. We create our lookup table using tf.get_variable and then the function tf.nn.embedding_lookup to connect our X placeholder to n_cells number of neurons.", "# we first create a variable to take us from our one-hot representation to our LSTM cells\nembedding = tf.get_variable(\"embedding\", [n_chars, n_cells])\n\n# And then use tensorflow's embedding lookup to look up the ids in X\nXs = tf.nn.embedding_lookup(embedding, X)\n\n# The resulting lookups are concatenated into a dense tensor\nprint(Xs.get_shape().as_list())", "Now recall from the lecture that recurrent neural networks share their weights across timesteps. So we don't want to have one large matrix with every timestep, but instead separate them. We'll use tf.split to split our [batch_size, sequence_length, n_cells] array in Xs into a list of sequence_length elements each composed of [batch_size, n_cells] arrays. This gives us sequence_length number of arrays of [batch_size, 1, n_cells]. We then use tf.squeeze to remove the 1st index corresponding to the singleton sequence_length index, resulting in simply [batch_size, n_cells].", "with tf.name_scope('reslice'):\n Xs = [tf.squeeze(seq, [1])\n for seq in tf.split(Xs, sequence_length, 1)]", "With each of our timesteps split up, we can now connect them to a set of LSTM recurrent cells. We tell the tf.contrib.rnn.BasicLSTMCell method how many cells we want, i.e. how many neurons there are, and we also specify that our state will be stored as a tuple. This state defines the internal state of the cells as well as the connection from the previous timestep. We can also pass a value for the forget_bias. Be sure to experiment with this parameter as it can significantly effect performance (e.g. Gers, Felix A, Schmidhuber, Jurgen, and Cummins, Fred. Learning to forget: Continual prediction with lstm. Neural computation, 12(10):2451–2471, 2000).", "cells = tf.contrib.rnn.BasicLSTMCell(num_units=n_cells, state_is_tuple=True, forget_bias=1.0)", "Let's take a look at the cell's state size:", "cells.state_size", "c defines the internal memory and h the output. We'll have as part of our cells, both an initial_state and a final_state. These will become important during inference and we'll see how these work more then. For now, we'll set the initial_state to all zeros using the convenience function provided inside our cells object, zero_state:", "initial_state = cells.zero_state(tf.shape(X)[0], tf.float32)", "Looking at what this does, we can see that it creates a tf.Tensor of zeros for our c and h states for each of our n_cells and stores this as a tuple inside the LSTMStateTuple object:", "initial_state", "So far, we have created a single layer of LSTM cells composed of n_cells number of cells. If we want another layer, we can use the tf.contrib.rnn.MultiRNNCell method, giving it our current cells, and a bit of pythonery to multiply our cells by the number of layers we want. We'll then update our initial_state variable to include the additional cells:", "cells = tf.contrib.rnn.MultiRNNCell(\n [cells] * n_layers, state_is_tuple=True)\ninitial_state = cells.zero_state(tf.shape(X)[0], tf.float32)", "Now if we take a look at our initial_state, we should see one LSTMStateTuple for each of our layers:", "initial_state", "So far, we haven't connected our recurrent cells to anything. Let's do this now using the tf.contrib.rnn.static_rnn method. We also pass it our initial_state variables. It gives us the outputs of the rnn, as well as their states after having been computed. Contrast that with the initial_state, which set the LSTM cells to zeros. After having computed something, the cells will all have a different value somehow reflecting the temporal dynamics and expectations of the next input. These will be stored in the state tensors for each of our LSTM layers inside a LSTMStateTuple just like the initial_state variable.\n```python\nhelp(tf.contrib.rnn.static_rnn)\nHelp on function static_rnn in module tensorflow.contrib.rnn.python.ops.core_rnn:\nstatic_rnn(cell, inputs, initial_state=None, dtype=None, sequence_length=None, scope=None)\n Creates a recurrent neural network specified by RNNCell cell.\nThe simplest form of RNN network generated is:\n\n state = cell.zero_state(...)\n outputs = []\n for input_ in inputs:\n output, state = cell(input_, state)\n outputs.append(output)\n return (outputs, state)\n\nHowever, a few other options are available:\n\nAn initial state can be provided.\nIf the sequence_length vector is provided, dynamic calculation is performed.\nThis method of calculation does not compute the RNN steps past the maximum\nsequence length of the minibatch (thus saving computational time),\nand properly propagates the state at an example's sequence length\nto the final state output.\n\nThe dynamic calculation performed is, at time t for batch row b,\n (output, state)(b, t) =\n (t &gt;= sequence_length(b))\n ? (zeros(cell.output_size), states(b, sequence_length(b) - 1))\n : cell(input(b, t), state(b, t - 1))\n\nArgs:\n cell: An instance of RNNCell.\n inputs: A length T list of inputs, each a `Tensor` of shape\n `[batch_size, input_size]`, or a nested tuple of such elements.\n initial_state: (optional) An initial state for the RNN.\n If `cell.state_size` is an integer, this must be\n a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.\n If `cell.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell.state_size`.\n dtype: (optional) The data type for the initial state and expected output.\n Required if initial_state is not provided or RNN state has a heterogeneous\n dtype.\n sequence_length: Specifies the length of each sequence in inputs.\n An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.\n scope: VariableScope for the created subgraph; defaults to \"RNN\".\n\nReturns:\n A pair (outputs, state) where:\n - outputs is a length T list of outputs (one for each input), or a nested\n tuple of such elements.\n - state is the final state\n\nRaises:\n TypeError: If `cell` is not an instance of RNNCell.\n ValueError: If `inputs` is `None` or an empty list, or if the input depth\n (column size) cannot be inferred from inputs via shape inference.\n\n```\nUse the help on the function tf.contrib.rnn.static_rnn to create the outputs and states variable as below. We've already created each of the variable you need to use:\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "outputs, state = tf.contrib.rnn.static_rnn(cell=..., inputs=..., initial_state=...)", "Let's take a look at the state now:", "state", "Our outputs are returned as a list for each of our timesteps:", "outputs", "We'll now stack all our outputs for every timestep. We can treat every observation at each timestep and for each batch using the same weight matrices going forward, since these should all have shared weights. Each timstep for each batch is its own observation. So we'll stack these in a 2d matrix so that we can create our softmax layer:", "outputs_flat = tf.reshape(tf.concat(values=outputs, axis=1), [-1, n_cells])", "Our outputs are now concatenated so that we have [batch_size * timesteps, n_cells]", "outputs_flat", "We now create a softmax layer just like we did in Session 3 and in Session 3's homework. We multiply our final LSTM layer's n_cells outputs by a weight matrix to give us n_chars outputs. We then scale this output using a tf.nn.softmax layer so that they become a probability by exponentially scaling its value and dividing by its sum. We store the softmax probabilities in probs as well as keep track of the maximum index in Y_pred:", "with tf.variable_scope('prediction'):\n W = tf.get_variable(\n \"W\",\n shape=[n_cells, n_chars],\n initializer=tf.random_normal_initializer(stddev=0.1))\n b = tf.get_variable(\n \"b\",\n shape=[n_chars],\n initializer=tf.random_normal_initializer(stddev=0.1))\n\n # Find the output prediction of every single character in our minibatch\n # we denote the pre-activation prediction, logits.\n logits = tf.matmul(outputs_flat, W) + b\n\n # We get the probabilistic version by calculating the softmax of this\n probs = tf.nn.softmax(logits)\n\n # And then we can find the index of maximum probability\n Y_pred = tf.argmax(probs, 1)", "To train the network, we'll measure the loss between our predicted outputs and true outputs. We could use the probs variable, but we can also make use of tf.nn.softmax_cross_entropy_with_logits which will compute the softmax for us. We therefore need to pass in the variable just before the softmax layer, denoted as logits (unscaled values). This takes our variable logits, the unscaled predicted outputs, as well as our true outputs, Y. Before we give it Y, we'll need to reshape our true outputs in the same way, [batch_size x timesteps, n_chars]. Luckily, tensorflow provides a convenience for doing this, the tf.nn.sparse_softmax_cross_entropy_with_logits function:\n```python\nhelp(tf.nn.sparse_softmax_cross_entropy_with_logits)\nHelp on function sparse_softmax_cross_entropy_with_logits in module tensorflow.python.ops.nn_ops:\nsparse_softmax_cross_entropy_with_logits(logits, labels, name=None)\n Computes sparse softmax cross entropy between logits and labels.\nMeasures the probability error in discrete classification tasks in which the\nclasses are mutually exclusive (each entry is in exactly one class). For\nexample, each CIFAR-10 image is labeled with one and only one label: an image\ncan be a dog or a truck, but not both.\n\n**NOTE:** For this operation, the probability of a given label is considered\nexclusive. That is, soft classes are not allowed, and the `labels` vector\nmust provide a single specific index for the true class for each row of\n`logits` (each minibatch entry). For soft softmax classification with\na probability distribution for each entry, see\n`softmax_cross_entropy_with_logits`.\n\n**WARNING:** This op expects unscaled logits, since it performs a softmax\non `logits` internally for efficiency. Do not call this op with the\noutput of `softmax`, as it will produce incorrect results.\n\nA common use case is to have logits of shape `[batch_size, num_classes]` and\nlabels of shape `[batch_size]`. But higher dimensions are supported.\n\nArgs:\n logits: Unscaled log probabilities of rank `r` and shape\n `[d_0, d_1, ..., d_{r-2}, num_classes]` and dtype `float32` or `float64`.\n labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-2}]` and dtype `int32` or\n `int64`. Each entry in `labels` must be an index in `[0, num_classes)`.\n Other values will result in a loss of 0, but incorrect gradient\n computations.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` of the same shape as `labels` and of the same type as `logits`\n with the softmax cross entropy loss.\n\nRaises:\n ValueError: If logits are scalars (need to have rank &gt;= 1) or if the rank\n of the labels is not equal to the rank of the labels minus one.\n\n```", "with tf.variable_scope('loss'):\n # Compute mean cross entropy loss for each output.\n Y_true_flat = tf.reshape(tf.concat(values=Y, axis=1), [-1])\n # logits are [batch_size x timesteps, n_chars] and\n # Y_true_flat are [batch_size x timesteps]\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y_true_flat, logits=logits)\n # Compute the mean over our `batch_size` x `timesteps` number of observations\n mean_loss = tf.reduce_mean(loss)", "Finally, we can create an optimizer in much the same way as we've done with every other network. Except, we will also \"clip\" the gradients of every trainable parameter. This is a hacky way to ensure that the gradients do not grow too large (the literature calls this the \"exploding gradient problem\"). However, note that the LSTM is built to help ensure this does not happen by allowing the gradient to be \"gated\". To learn more about this, please consider reading the following material:\nhttp://www.felixgers.de/papers/phd.pdf\nhttps://colah.github.io/posts/2015-08-Understanding-LSTMs/", "with tf.name_scope('optimizer'):\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001)\n gradients = []\n clip = tf.constant(5.0, name=\"clip\")\n for grad, var in optimizer.compute_gradients(mean_loss):\n gradients.append((tf.clip_by_value(grad, -clip, clip), var))\n updates = optimizer.apply_gradients(gradients)", "Let's take a look at the graph:", "nb_utils.show_graph(tf.get_default_graph().as_graph_def())", "Below is the rest of code we'll need to train the network. I do not recommend running this inside Jupyter Notebook for the entire length of the training because the network can take 1-2 days at least to train, and your browser may very likely complain. Instead, you should write a python script containing the necessary bits of code and run it using the Terminal. We didn't go over how to do this, so I'll leave it for you as an exercise. The next part of this notebook will have you load a pre-trained network.", "with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n cursor = 0\n it_i = 0\n while it_i < 500:\n Xs, Ys = [], []\n for batch_i in range(batch_size):\n if (cursor + sequence_length) >= len(txt) - sequence_length - 1:\n cursor = 0\n Xs.append([encoder[ch]\n for ch in txt[cursor:cursor + sequence_length]])\n Ys.append([encoder[ch]\n for ch in txt[cursor + 1: cursor + sequence_length + 1]])\n\n cursor = (cursor + sequence_length)\n Xs = np.array(Xs).astype(np.int32)\n Ys = np.array(Ys).astype(np.int32)\n\n loss_val, _ = sess.run([mean_loss, updates],\n feed_dict={X: Xs, Y: Ys})\n if it_i % 100 == 0:\n print(it_i, loss_val)\n\n if it_i % 500 == 0:\n p = sess.run(probs, feed_dict={X: np.array(Xs[-1])[np.newaxis]})\n ps = [np.random.choice(range(n_chars), p=p_i.ravel())\n for p_i in p]\n p = [np.argmax(p_i) for p_i in p]\n if isinstance(txt[0], str):\n print('original:', \"\".join(\n [decoder[ch] for ch in Xs[-1]]))\n print('synth(samp):', \"\".join(\n [decoder[ch] for ch in ps]))\n print('synth(amax):', \"\".join(\n [decoder[ch] for ch in p]))\n else:\n print([decoder[ch] for ch in ps])\n\n it_i += 1", "<a name=\"part-5---pretrained-char-rnn-of-donald-trump\"></a>\nPart 5 - Pretrained Char-RNN of Donald Trump\nRather than stick around to let a model train, let's now explore one I've trained for you Donald Trump. If you've trained your own model on your own text corpus then great! You should be able to use that in place of the one I've provided and still continue with the rest of the notebook. \nFor the Donald Trump corpus, there are a lot of video transcripts that you can find online. I've searched for a few of these, put them in a giant text file, made everything lowercase, and removed any extraneous letters/symbols to help reduce the vocabulary (not that it's not very large to begin with, ha).\nI used the code exactly as above to train on the text I gathered and left it to train for about 2 days. The only modification is that I also used \"dropout\" which you can see in the libs/charrnn.py file. Let's explore it now and we'll see how we can play with \"sampling\" the model to generate new phrases, and how to \"prime\" the model (a psychological term referring to when someone is exposed to something shortly before another event).\nFirst, let's clean up any existing graph:", "tf.reset_default_graph()", "<a name=\"getting-the-trump-data\"></a>\nGetting the Trump Data\nNow let's load the text. This is included in the repo or can be downloaded from:", "with open('trump.txt', 'r') as fp:\n txt = fp.read()", "Let's take a look at what's going on in here:", "txt[:100]", "<a name=\"basic-text-analysis\"></a>\nBasic Text Analysis\nWe can do some basic data analysis to get a sense of what kind of vocabulary we're working with. It's really important to look at your data in as many ways as possible. This helps ensure there isn't anything unexpected going on. Let's find every unique word he uses:", "words = set(txt.split(' '))\n\nwords", "Now let's count their occurrences:", "counts = {word_i: 0 for word_i in words}\nfor word_i in txt.split(' '):\n counts[word_i] += 1\ncounts", "We can sort this like so:", "[(word_i, counts[word_i]) for word_i in sorted(counts, key=counts.get, reverse=True)]", "As we should expect, \"the\" is the most common word, as it is in the English language: https://en.wikipedia.org/wiki/Most_common_words_in_English\n<a name=\"loading-the-pre-trained-trump-model\"></a>\nLoading the Pre-trained Trump Model\nLet's load the pretrained model. Rather than provide a tfmodel export, I've provided the checkpoint so you can also experiment with training it more if you wish. We'll rebuild the graph using the charrnn module in the libs directory:", "from libs import charrnn", "Let's get the checkpoint and build the model then restore the variables from the checkpoint. The only parameters of consequence are n_layers and n_cells which define the total size and layout of the model. The rest are flexible. We'll set the batch_size and sequence_length to 1, meaning we can feed in a single character at a time only, and get back 1 character denoting the very next character's prediction.", "ckpt_name = './trump.ckpt'\ng = tf.Graph()\nn_layers = 3\nn_cells = 512\nwith tf.Session(graph=g) as sess:\n model = charrnn.build_model(txt=txt,\n batch_size=1,\n sequence_length=1,\n n_layers=n_layers,\n n_cells=n_cells,\n gradient_clip=10.0)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n if os.path.exists(ckpt_name):\n saver.restore(sess, ckpt_name)\n print(\"Model restored.\")", "Let's now take a look at the model:", "nb_utils.show_graph(g.as_graph_def())\n\nn_iterations = 100", "<a name=\"inference-keeping-track-of-the-state\"></a>\nInference: Keeping Track of the State\nNow recall from Part 4 when we created our LSTM network, we had an initial_state variable which would set the LSTM's c and h state vectors, as well as the final output state which was the output of the c and h state vectors after having passed through the network. When we input to the network some letter, say 'n', we can set the initial_state to zeros, but then after having input the letter n, we'll have as output a new state vector for c and h. On the next letter, we'll then want to set the initial_state to this new state, and set the input to the previous letter's output. That is how we ensure the network keeps track of time and knows what has happened in the past, and let it continually generate.", "curr_states = None\ng = tf.Graph()\nwith tf.Session(graph=g) as sess:\n model = charrnn.build_model(txt=txt,\n batch_size=1,\n sequence_length=1,\n n_layers=n_layers,\n n_cells=n_cells,\n gradient_clip=10.0)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n if os.path.exists(ckpt_name):\n saver.restore(sess, ckpt_name)\n print(\"Model restored.\")\n \n # Get every tf.Tensor for the initial state\n init_states = []\n for s_i in model['initial_state']:\n init_states.append(s_i.c)\n init_states.append(s_i.h)\n \n # Similarly, for every state after inference\n final_states = []\n for s_i in model['final_state']:\n final_states.append(s_i.c)\n final_states.append(s_i.h)\n\n # Let's start with the letter 't' and see what comes out:\n synth = [[encoder[' ']]]\n for i in range(n_iterations):\n\n # We'll create a feed_dict parameter which includes what to\n # input to the network, model['X'], as well as setting\n # dropout to 1.0, meaning no dropout.\n feed_dict = {model['X']: [synth[-1]],\n model['keep_prob']: 1.0}\n \n # Now we'll check if we currently have a state as a result\n # of a previous inference, and if so, add to our feed_dict\n # parameter the mapping of the init_state to the previous\n # output state stored in \"curr_states\".\n if curr_states:\n feed_dict.update(\n {init_state_i: curr_state_i\n for (init_state_i, curr_state_i) in\n zip(init_states, curr_states)})\n \n # Now we can infer and see what letter we get\n p = sess.run(model['probs'], feed_dict=feed_dict)[0]\n \n # And make sure we also keep track of the new state\n curr_states = sess.run(final_states, feed_dict=feed_dict)\n \n # Find the most likely character\n p = np.argmax(p)\n \n # Append to string\n synth.append([p])\n \n # Print out the decoded letter\n print(model['decoder'][p], end='')\n sys.stdout.flush()", "<a name=\"probabilistic-sampling\"></a>\nProbabilistic Sampling\nRun the above cell a couple times. What you should find is that it is deterministic. We always pick the most likely character. But we can do something else which will make things less deterministic and a bit more interesting: we can sample from our probabilistic measure from our softmax layer. This means if we have the letter 'a' as 0.4, and the letter 'o' as 0.2, we'll have a 40% chance of picking the letter 'a', and 20% chance of picking the letter 'o', rather than simply always picking the letter 'a' since it is the most probable.", "curr_states = None\ng = tf.Graph()\nwith tf.Session(graph=g) as sess:\n model = charrnn.build_model(txt=txt,\n batch_size=1,\n sequence_length=1,\n n_layers=n_layers,\n n_cells=n_cells,\n gradient_clip=10.0)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n if os.path.exists(ckpt_name):\n saver.restore(sess, ckpt_name)\n print(\"Model restored.\")\n \n # Get every tf.Tensor for the initial state\n init_states = []\n for s_i in model['initial_state']:\n init_states.append(s_i.c)\n init_states.append(s_i.h)\n \n # Similarly, for every state after inference\n final_states = []\n for s_i in model['final_state']:\n final_states.append(s_i.c)\n final_states.append(s_i.h)\n\n # Let's start with the letter 't' and see what comes out:\n synth = [[encoder[' ']]]\n for i in range(n_iterations):\n\n # We'll create a feed_dict parameter which includes what to\n # input to the network, model['X'], as well as setting\n # dropout to 1.0, meaning no dropout.\n feed_dict = {model['X']: [synth[-1]],\n model['keep_prob']: 1.0}\n \n # Now we'll check if we currently have a state as a result\n # of a previous inference, and if so, add to our feed_dict\n # parameter the mapping of the init_state to the previous\n # output state stored in \"curr_states\".\n if curr_states:\n feed_dict.update(\n {init_state_i: curr_state_i\n for (init_state_i, curr_state_i) in\n zip(init_states, curr_states)})\n \n # Now we can infer and see what letter we get\n p = sess.run(model['probs'], feed_dict=feed_dict)[0]\n \n # And make sure we also keep track of the new state\n curr_states = sess.run(final_states, feed_dict=feed_dict)\n \n # Now instead of finding the most likely character,\n # we'll sample with the probabilities of each letter\n p = p.astype(np.float64)\n p = np.random.multinomial(1, p.ravel() / p.sum())\n p = np.argmax(p)\n \n # Append to string\n synth.append([p])\n \n # Print out the decoded letter\n print(model['decoder'][p], end='')\n sys.stdout.flush()", "<a name=\"inference-temperature\"></a>\nInference: Temperature\nWhen performing probabilistic sampling, we can also use a parameter known as temperature which comes from simulated annealing. The basic idea is that as the temperature is high and very hot, we have a lot more free energy to use to jump around more, and as we cool down, we have less energy and then become more deterministic. We can use temperature by scaling our log probabilities like so:", "temperature = 0.5\ncurr_states = None\ng = tf.Graph()\nwith tf.Session(graph=g) as sess:\n model = charrnn.build_model(txt=txt,\n batch_size=1,\n sequence_length=1,\n n_layers=n_layers,\n n_cells=n_cells,\n gradient_clip=10.0)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n if os.path.exists(ckpt_name):\n saver.restore(sess, ckpt_name)\n print(\"Model restored.\")\n \n # Get every tf.Tensor for the initial state\n init_states = []\n for s_i in model['initial_state']:\n init_states.append(s_i.c)\n init_states.append(s_i.h)\n \n # Similarly, for every state after inference\n final_states = []\n for s_i in model['final_state']:\n final_states.append(s_i.c)\n final_states.append(s_i.h)\n\n # Let's start with the letter 't' and see what comes out:\n synth = [[encoder[' ']]]\n for i in range(n_iterations):\n\n # We'll create a feed_dict parameter which includes what to\n # input to the network, model['X'], as well as setting\n # dropout to 1.0, meaning no dropout.\n feed_dict = {model['X']: [synth[-1]],\n model['keep_prob']: 1.0}\n \n # Now we'll check if we currently have a state as a result\n # of a previous inference, and if so, add to our feed_dict\n # parameter the mapping of the init_state to the previous\n # output state stored in \"curr_states\".\n if curr_states:\n feed_dict.update(\n {init_state_i: curr_state_i\n for (init_state_i, curr_state_i) in\n zip(init_states, curr_states)})\n \n # Now we can infer and see what letter we get\n p = sess.run(model['probs'], feed_dict=feed_dict)[0]\n \n # And make sure we also keep track of the new state\n curr_states = sess.run(final_states, feed_dict=feed_dict)\n \n # Now instead of finding the most likely character,\n # we'll sample with the probabilities of each letter\n p = p.astype(np.float64)\n p = np.log(p) / temperature\n p = np.exp(p) / np.sum(np.exp(p))\n p = np.random.multinomial(1, p.ravel() / p.sum())\n p = np.argmax(p)\n \n # Append to string\n synth.append([p])\n \n # Print out the decoded letter\n print(model['decoder'][p], end='')\n sys.stdout.flush()", "<a name=\"inference-priming\"></a>\nInference: Priming\nLet's now work on \"priming\" the model with some text, and see what kind of state it is in and leave it to synthesize from there. We'll do more or less what we did before, but feed in our own text instead of the last letter of the synthesis from the model.", "prime = \"obama\"\ntemperature = 1.0\ncurr_states = None\nn_iterations = 500\ng = tf.Graph()\nwith tf.Session(graph=g) as sess:\n model = charrnn.build_model(txt=txt,\n batch_size=1,\n sequence_length=1,\n n_layers=n_layers,\n n_cells=n_cells,\n gradient_clip=10.0)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n if os.path.exists(ckpt_name):\n saver.restore(sess, ckpt_name)\n print(\"Model restored.\")\n \n # Get every tf.Tensor for the initial state\n init_states = []\n for s_i in model['initial_state']:\n init_states.append(s_i.c)\n init_states.append(s_i.h)\n \n # Similarly, for every state after inference\n final_states = []\n for s_i in model['final_state']:\n final_states.append(s_i.c)\n final_states.append(s_i.h)\n\n # Now we'll keep track of the state as we feed it one\n # letter at a time.\n curr_states = None\n for ch in prime:\n feed_dict = {model['X']: [[model['encoder'][ch]]],\n model['keep_prob']: 1.0}\n if curr_states:\n feed_dict.update(\n {init_state_i: curr_state_i\n for (init_state_i, curr_state_i) in\n zip(init_states, curr_states)})\n \n # Now we can infer and see what letter we get\n p = sess.run(model['probs'], feed_dict=feed_dict)[0]\n p = p.astype(np.float64)\n p = np.log(p) / temperature\n p = np.exp(p) / np.sum(np.exp(p))\n p = np.random.multinomial(1, p.ravel() / p.sum())\n p = np.argmax(p)\n \n # And make sure we also keep track of the new state\n curr_states = sess.run(final_states, feed_dict=feed_dict)\n \n # Now we're ready to do what we were doing before but with the\n # last predicted output stored in `p`, and the current state of\n # the model.\n synth = [[p]]\n print(prime + model['decoder'][p], end='')\n for i in range(n_iterations):\n\n # Input to the network\n feed_dict = {model['X']: [synth[-1]],\n model['keep_prob']: 1.0}\n \n # Also feed our current state\n feed_dict.update(\n {init_state_i: curr_state_i\n for (init_state_i, curr_state_i) in\n zip(init_states, curr_states)})\n \n # Inference\n p = sess.run(model['probs'], feed_dict=feed_dict)[0]\n \n # Keep track of the new state\n curr_states = sess.run(final_states, feed_dict=feed_dict)\n \n # Sample\n p = p.astype(np.float64)\n p = np.log(p) / temperature\n p = np.exp(p) / np.sum(np.exp(p))\n p = np.random.multinomial(1, p.ravel() / p.sum())\n p = np.argmax(p)\n \n # Append to string\n synth.append([p])\n \n # Print out the decoded letter\n print(model['decoder'][p], end='')\n sys.stdout.flush()", "<a name=\"assignment-submission\"></a>\nAssignment Submission\nAfter you've completed both notebooks, create a zip file of the current directory using the code below. This code will make sure you have included this completed ipython notebook and the following files named exactly as:\nsession-5/ \n session-5-part-1.ipynb \n session-5-part-2.ipynb \n vaegan.gif\n\nYou'll then submit this zip file for your third assignment on Kadenze for \"Assignment 5: Generative Adversarial Networks and Recurrent Neural Networks\"! If you have any questions, remember to reach out on the forums and connect with your peers or with me.\nTo get assessed, you'll need to be a premium student! This will allow you to build an online portfolio of all of your work and receive grades. If you aren't already enrolled as a student, register now at http://www.kadenze.com/ and join the #CADL community to see what your peers are doing! https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info\nAlso, if you share any of the GIFs on Facebook/Twitter/Instagram/etc..., be sure to use the #CADL hashtag so that other students can find your work!", "utils.build_submission('session-5.zip',\n ('vaegan.gif',\n 'session-5-part-1.ipynb',\n 'session-5-part-2.ipynb'))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tritemio/multispot_paper
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-27d.ipynb
mit
[ "Executed: Mon Mar 27 11:39:46 2017\nDuration: 7 seconds.\nusALEX-5samples - Template\n\nThis notebook is executed through 8-spots paper analysis.\nFor a direct execution, uncomment the cell below.", "ph_sel_name = \"None\"\n\ndata_id = \"27d\"\n\n# data_id = \"7d\"", "Load software and filenames definitions", "from fretbursts import *\n\ninit_notebook()\nfrom IPython.display import display", "Data folder:", "data_dir = './data/singlespot/'\n\nimport os\ndata_dir = os.path.abspath(data_dir) + '/'\nassert os.path.exists(data_dir), \"Path '%s' does not exist.\" % data_dir", "List of data files:", "from glob import glob\nfile_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)\n## Selection for POLIMI 2012-11-26 datatset\nlabels = ['17d', '27d', '7d', '12d', '22d']\nfiles_dict = {lab: fname for lab, fname in zip(labels, file_list)}\nfiles_dict\n\ndata_id", "Data load\nInitial loading of the data:", "d = loader.photon_hdf5(filename=files_dict[data_id])", "Load the leakage coefficient from disk:", "leakage_coeff_fname = 'results/usALEX - leakage coefficient DexDem.csv'\nleakage = np.loadtxt(leakage_coeff_fname)\n\nprint('Leakage coefficient:', leakage)", "Load the direct excitation coefficient ($d_{exAA}$) from disk:", "dir_ex_coeff_fname = 'results/usALEX - direct excitation coefficient dir_ex_aa.csv'\ndir_ex_aa = np.loadtxt(dir_ex_coeff_fname)\n\nprint('Direct excitation coefficient (dir_ex_aa):', dir_ex_aa)", "Load the gamma-factor ($\\gamma$) from disk:", "gamma_fname = 'results/usALEX - gamma factor - all-ph.csv'\ngamma = np.loadtxt(gamma_fname)\n\nprint('Gamma-factor:', gamma)", "Update d with the correction coefficients:", "d.leakage = leakage\nd.dir_ex = dir_ex_aa\nd.gamma = gamma", "Laser alternation selection\nAt this point we have only the timestamps and the detector numbers:", "d.ph_times_t[0][:3], d.ph_times_t[0][-3:]#, d.det_t\n\nprint('First and last timestamps: {:10,} {:10,}'.format(d.ph_times_t[0][0], d.ph_times_t[0][-1]))\nprint('Total number of timestamps: {:10,}'.format(d.ph_times_t[0].size))", "We need to define some parameters: donor and acceptor ch, excitation period and donor and acceptor excitiations:", "d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)", "We should check if everithing is OK with an alternation histogram:", "plot_alternation_hist(d)", "If the plot looks good we can apply the parameters with:", "loader.alex_apply_period(d)\n\nprint('D+A photons in D-excitation period: {:10,}'.format(d.D_ex[0].sum()))\nprint('D+A photons in A-excitation period: {:10,}'.format(d.A_ex[0].sum()))", "Measurements infos\nAll the measurement data is in the d variable. We can print it:", "d", "Or check the measurements duration:", "d.time_max", "Compute background\nCompute the background using automatic threshold:", "d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)\n\ndplot(d, timetrace_bg)\n\nd.rate_m, d.rate_dd, d.rate_ad, d.rate_aa", "Burst search and selection", "d.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel('all'))\n\nprint(d.ph_sel)\ndplot(d, hist_fret);\n\n# if data_id in ['7d', '27d']:\n# ds = d.select_bursts(select_bursts.size, th1=20)\n# else:\n# ds = d.select_bursts(select_bursts.size, th1=30)\n\nds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)\n\nn_bursts_all = ds.num_bursts[0]\n\ndef select_and_plot_ES(fret_sel, do_sel):\n ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)\n ds_do = ds.select_bursts(select_bursts.ES, **do_sel)\n bpl.plot_ES_selection(ax, **fret_sel)\n bpl.plot_ES_selection(ax, **do_sel) \n return ds_fret, ds_do\n\nax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)\n\nif data_id == '7d':\n fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)\n do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True) \n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n \nelif data_id == '12d':\n fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n\nelif data_id == '17d':\n fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n\nelif data_id == '22d':\n fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \n\nelif data_id == '27d':\n fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \n\nn_bursts_do = ds_do.num_bursts[0]\nn_bursts_fret = ds_fret.num_bursts[0]\n\nn_bursts_do, n_bursts_fret\n\nd_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)\nprint('D-only fraction:', d_only_frac)\n\ndplot(ds_fret, hist2d_alex, scatter_alpha=0.1);\n\ndplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);", "Donor Leakage fit", "bandwidth = 0.03\n\nE_range_do = (-0.1, 0.15)\nE_ax = np.r_[-0.2:0.401:0.0002]\n\nE_pr_do_kde = bext.fit_bursts_kde_peak(ds_do, bandwidth=bandwidth, weights='size', \n x_range=E_range_do, x_ax=E_ax, save_fitter=True)\n\nmfit.plot_mfit(ds_do.E_fitter, plot_kde=True, bins=np.r_[E_ax.min(): E_ax.max(): bandwidth])\nplt.xlim(-0.3, 0.5)\nprint(\"%s: E_peak = %.2f%%\" % (ds.ph_sel, E_pr_do_kde*100))", "Burst sizes", "nt_th1 = 50\n\ndplot(ds_fret, hist_size, which='all', add_naa=False)\nxlim(-0, 250)\nplt.axvline(nt_th1)\n\nTh_nt = np.arange(35, 120)\nnt_th = np.zeros(Th_nt.size)\nfor i, th in enumerate(Th_nt):\n ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)\n nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th\n\nplt.figure()\nplot(Th_nt, nt_th)\nplt.axvline(nt_th1)\n\nnt_mean = nt_th[np.where(Th_nt == nt_th1)][0]\nnt_mean", "Fret fit\nMax position of the Kernel Density Estimation (KDE):", "E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')\nE_fitter = ds_fret.E_fitter\n\nE_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nE_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))\n\nE_fitter.fit_res[0].params.pretty_print()\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(E_fitter, ax=ax[0])\nmfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))\ndisplay(E_fitter.params*100)", "Weighted mean of $E$ of each burst:", "ds_fret.fit_E_m(weights='size')", "Gaussian fit (no weights):", "ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)", "Gaussian fit (using burst size as weights):", "ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')\n\nE_kde_w = E_fitter.kde_max_pos[0]\nE_gauss_w = E_fitter.params.loc[0, 'center']\nE_gauss_w_sig = E_fitter.params.loc[0, 'sigma']\nE_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))\nE_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr\nE_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr", "Stoichiometry fit\nMax position of the Kernel Density Estimation (KDE):", "S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)\nS_fitter = ds_fret.S_fitter\n\nS_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nS_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(S_fitter, ax=ax[0])\nmfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))\ndisplay(S_fitter.params*100)\n\nS_kde = S_fitter.kde_max_pos[0]\nS_gauss = S_fitter.params.loc[0, 'center']\nS_gauss_sig = S_fitter.params.loc[0, 'sigma']\nS_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))\nS_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr\nS_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr", "The Maximum likelihood fit for a Gaussian population is the mean:", "S = ds_fret.S[0]\nS_ml_fit = (S.mean(), S.std())\nS_ml_fit", "Computing the weighted mean and weighted standard deviation we get:", "weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)\nS_mean = np.dot(weights, S)/weights.sum()\nS_std_dev = np.sqrt(\n np.dot(weights, (S - S_mean)**2)/weights.sum())\nS_wmean_fit = [S_mean, S_std_dev]\nS_wmean_fit", "Save data to file", "sample = data_id", "The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.", "variables = ('sample n_bursts_all n_bursts_do n_bursts_fret '\n 'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '\n 'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '\n 'E_pr_do_kde nt_mean\\n')", "This is just a trick to format the different variables:", "variables_csv = variables.replace(' ', ',')\nfmt_float = '{%s:.6f}'\nfmt_int = '{%s:d}'\nfmt_str = '{%s}'\nfmt_dict = {**{'sample': fmt_str}, \n **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}\nvar_dict = {name: eval(name) for name in variables.split()}\nvar_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\\n'\ndata_str = var_fmt.format(**var_dict)\n\nprint(variables_csv)\nprint(data_str)\n\n# NOTE: The file name should be the notebook name but with .csv extension\nwith open('results/usALEX-5samples-E-corrected-all-ph.csv', 'a') as f:\n f.seek(0, 2)\n if f.tell() == 0:\n f.write(variables_csv)\n f.write(data_str)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
WNoxchi/Kaukasos
quantum/openfermion-projectq-demo-codealong.ipynb
mit
[ "OpenFermion Project Q Demo\n\nWayne H Nixalo – 2018/6/26\nA codealong of the OpenFermion ProjectQ demo example.", "%matplotlib inline\n%reload_ext autoreload\n%autoreload 2", "Simulating a Variational Quantum Eigensolver using OpenFermion ProjectQ\n\nWe now demonstrate how one can use both OpenFermion and ProjectQ to run a simple VQE example using a Unitary Coupled Cluster ansatz. It demonstrates a simple way to evaluate the energy, optimize the energy wrt the ansatz and build the corresponding compiled quantum circuit. It utilizes OpenFermion to prepare the Hamiltonians as well as initial parameters and ProjectQ to build and simulate the circuit.", "import os\n\nimport numpy as np\nfrom scipy.optimize import minimize as scipy_minimize\n\nfrom openfermion.config import *\nfrom openfermionprojectq import *\n\nfrom openfermion.hamiltonians import MolecularData\nfrom openfermion.transforms import jordan_wigner # hi jordan\nfrom openfermion.utils import uccsd_singlet_paramsize\n\nfrom projectq.ops import X, All, Measure\nfrom projectq.backends import CommandPrinter, CircuitDrawer", "Here we load H$_2$ from a precomputed molecule file found in the test data directory, and initialize the ProjectQ circuit compiler to a standard setting thta uses a first-order Trotter decomposition to break up the exponentials of non-commuting operators.", "# Load the molecule\nfilename = os.path.join(DATA_DIRECTORY, 'H2_sto-3g_singlet_0.7414')\nmolecule = MolecularData(filename=filename)\n\n# Use a Jordan-Wiger encoding, and compress to remove 0 imaginary components\nqubit_hamiltonian = jordan_wigner(molecule.get_molecular_hamiltonian())\nqubit_hamiltonian.compress()\ncompiler_engine = uccsd_trotter_engine()", "wait where is it getting data_dir... how does it know where it is?", "DATA_DIRECTORY", "Ohhh. The package must an absolute path to its root folder when you build it. So if you move the library it you have to rebuild it. Got it.\n\nThe Variational Quantum Eigensolver (or VQE) works by parameterizeing a wavefunction $\\lvert Ψ(θ) \\big\\rangle$ through some quantum circuit and minimizing the nergy wrt that angle, which is defined by\n\n$$E(θ) = \\big\\langle Ψ(θ)\\lvert H \\lvert Ψ(θ) \\big\\rangle \\,\\,\\,\\,\\, (1)$$\n\nTo perform the VQE loop with a simple molecule, it helps to wrap the evaluation of the energy into a simple objective function that takes the parameters of the circuit and returns the enregy. Here we define that function using ProjectQ to handle the qubits and the simulation.", "def energy_objective(packed_amplitudes):\n \"\"\"Evaluate the energy of a UCCSD singlet wavefunction w/ packed_amplitudes\n Args:\n packed_amplitudes(ndarray): Compact array that stores the unique\n amplitudes for a UCCSD singlet wavefunction.\n Returns:\n energy(float): Energy corresponding to the given amplitudes\n \"\"\"\n os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n \n # Set Jordan-Wigner initial state w/ correct number of electrons\n wavefunction = compiler_engine.allocate_qureg(molecule.n_qubits)\n for i in range(molecule.n_electrons):\n X | wavefunction[i]\n \n # Build the circuit and act it on the wavefunction\n evolution_operator = uccsd_singlet_evolution(packed_amplitudes,\n molecule.n_qubits,\n molecule.n_electrons)\n evolution_operator | wavefunction\n compiler_engine.flush()\n \n # Evaluate the energy and reset wavefunction\n energy = compiler_engine.backend.get_expectation_value(qubit_hamiltonian, wavefunction)\n All(Measure) | wavefunction\n compiler_engine.flush()\n return energy", "While we could plug this objective function into any optimizer, SciPy offers a convenient framework within the Python ecosystem. We'll choose as starting amplitudes the classical CCSD values that can be loaded from the molecule if desired. The optimal energy is found and compared to the exact values to verify that our simulation was successful.", "n_amplitudes = uccsd_singlet_paramsize(molecule.n_qubits, molecule.n_electrons)\ninitial_amplitudes = [0, 0.05677]\ninitial_energy = energy_objective(initial_amplitudes)\n\n# Run VQE Optimization to find new CCSD parameters\nopt_result = scipy_minimize(energy_objective, initial_amplitudes,\n method='CG', options={'disp':True})\n\nopt_energy, opt_amplitudes = opt_result.fun, opt_result.x\nprint(\"\\nOptimal UCCSD Singlet Energy: {}\".format(opt_energy))\nprint(\"Optimal UCCSD Singlet Amplitudes: {}\".format(opt_amplitudes))\nprint(\"Classical CCSD Energy: {} Hartrees\".format(molecule.ccsd_energy))\nprint(\"Exact FCI Energy: {} Hartrees\".format(molecule.fci_energy))\nprint(\"Initial Energy of UCCSD with CCSD amplitudes: {} Hartrees\".format(initial_energy))", "As we can see, the optimization terminates extremely quickly because the classical coupled cluster amplitudes were (for this molecule) already optimal. We can now use ProjectQ to compile this simulation circuit to a set of 2-body quantum gates.", "compiler_engine = uccsd_trotter_engine(CommandPrinter())\nwavefunction = compiler_engine.allocate_qureg(molecule.n_qubits)\nfor i in range(molecule.n_electrons):\n X | wavefunction[i]\n \n# Build the circuit and act it on the wavefunction\nevolution_operator = uccsd_singlet_evolution(opt_amplitudes,\n molecule.n_qubits,\n molecule.n_electrons)\nevolution_operator | wavefunction\ncompiler_engine.flush()", "I have no idea what I did, but it felt cool." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
CalebVuorinen/summerstudentProtoWork
functional/Example_01.ipynb
mit
[ "DataFrame: Functional Chains for TTrees in Python.\n<hr style=\"border-top-width: 4px; border-top-color: #359C38;\">\nThe DataFrame class brings the feature called functional chains with caching to trees. This is achieved in identifying different functions are creating lists of transformations. \nUsability is a key. Functional chains are a lot simpler way of creating histograms because the user doesn't need to create loops. DataFrame will do it for you.\nPreparation\nWe include ROOT, DataFrame and PyTreeReader class. DataFrame uses PyTreeReader for filling histograms and filtering results. All of the computing is mostly done by using PyTreeReader inside the DataFrame Class. Clearly this will be done in a better way now that the usage of PyTreeReader in ROOT is still unknown. PyTreeReader can be found from https://github.com/dpiparo/pytreereader", "import ROOT\nfrom PyTreeReader import PyTreeReader\nfrom functional import DataFrame\nfrom ROOT import TFile", "This is to get a tree from test data called cernstaff.root", "testFile = TFile('cernstaff.root')\ntestTree = testFile.Get('T')", "Here we create the DataFrame object", "dataFrame = DataFrame(testTree)", "As you can see, it also creates a PyTreeReader. This is why PyTreeReader is mandatory for the class\nTraditional read without cache", "%%time\ndataFrame.filter(lambda e : e.Children() > 4).head(5)", "Same but now first caching it and then rerunning the same", "dataFrame.resetcache()\n\n%%time\ndataFrame.filter(lambda e : e.Children() > 4).cache().head(5)", "Now rerunning it and using the cached results to print", "%%time\ndataFrame.filter(lambda e : e.Children() > 4).cache().filter(lambda e : e.Age() < 47).head(5)", "There is some caching with the files in the Swan service, but the point is that first and second run differ alot with their speed\nLets reset the cache by calling a function from the class", "dataFrame.resetcache()", "Now we can demonstrate different histograms and drawing them", "%%time\ndataFrame.filter(lambda e : e.Age() > 45).cache().histo('Age:Cost').Draw('COLZ')\nROOT.gPad.Draw()", "Rerun the same analysis, compare the time", "%%time\ndataFrame.filter(lambda e : e.Age() > 45).cache().histo('Age:Cost').Draw('COLZ')\nROOT.gPad.Draw()", "Lets add one more filter after the cache and see how it differs...", "%%time\ndataFrame.filter(lambda e : e.Age() > 45).cache().filter(lambda e: e.Cost() > 8500).histo('Age:Cost').Draw('COLZ')\nROOT.gPad.Draw()", "What can be done more?\nThis is the first implementation of the class and functional chains.\nUsability can be improved with adding more and more transformations and actions to \nA lot can be achieved with seizable performance improvements with using the PyTreeReader.\nHowever, there are some minor flaws in the class:\n - Reading more complex trees might need a different approach\n - If PyTreeReader is changed not to use brackets to handle the entries, this program crashes\n - Map() and FlatMap() functions have a skeleton ready, but it has to be figured out how and where new tree should be read to the PyTreeReader\n - TEntryList usage can be optimized more\n - This uses the RDD idea, so it uses the functions one by one, if there are 3 filters in a row it could run all these at the same time -> this way it doesnt have to go through the loop seperately for each of them.\n - Transformations after cache() are not working properly if there is more than 1 of them.\n - This Class is in Python and it should be converted to C++ when its possible\n - It has some glitches here when reading values the first time but when its done second time it works\nRemember that this is a prototype, it will need optimizing and improvements" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
graphistry/pygraphistry
demos/more_examples/graphistry_features/encodings-icons.ipynb
bsd-3-clause
[ "Icons encodings tutorial\nSee the examples below for common ways to map data to node icon in Graphistry.\nYou can add a main icon. The glyph system supports text, icons, flags, and images, as well as multiple mapping and style controls. When used with column type, the icon will also appear in the legend. \nIcons are often used with node color, label, size, and badges to provide more visual information. Most encodings work both for points and edges. The PyGraphistry Python client makes it easier to use the URL settings API and the REST upload API. For dynamic control, you can use also use the JavaScript APIs. \nSetup\nMode api=3 is recommended. It is required for complex_encodings (ex: .encode_point_size(...)). Mode api=1 works with the simpler .bind(point_size='col_a') form.", "# ! pip install --user graphistry\nimport graphistry\n\n# To specify Graphistry account & server, use:\n# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')\n# For more options, see https://github.com/graphistry/pygraphistry#configure\n\ngraphistry.__version__\n\nimport datetime, pandas as pd\ne_df = pd.DataFrame({\n 's': ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'd', 'e'],\n 'd': ['b', 'c', 'a', 'b', 'c', 'a', 'c', 'e', 'd'],\n 'time': [datetime.datetime(1987, 10, 1), datetime.datetime(1987, 10, 2), datetime.datetime(1987, 10, 3),\n datetime.datetime(1988, 10, 1), datetime.datetime(1988, 10, 2), datetime.datetime(1988, 10, 3),\n datetime.datetime(1989, 10, 1), datetime.datetime(1989, 10, 2), datetime.datetime(1989, 10, 3)]\n})\nn_df = pd.DataFrame({\n 'n': ['a', 'b', 'c', 'd', 'e'],\n 'score': [ 1, 30, 50, 70, 90 ],\n 'palette_color_int32': pd.Series(\n [0, 1, 2, 3, 4],\n dtype='int32'),\n 'hex_color_int64': pd.Series(\n [0xFF000000, 0xFFFF0000, 0xFFFFFF00, 0x00FF0000, 0x0000FF00],\n dtype='int64'),\n 'type': ['mac', 'macbook', 'mac', 'macbook', 'sheep'],\n 'assorted': ['Canada', 'mac', 'macbook', 'embedded_smile', 'external_logo'],\n 'origin': ['Canada', 'England', 'Russia', 'Mexico', 'China']\n \n})\n\ng = graphistry.edges(e_df, 's', 'd').nodes(n_df, 'n')", "Icons as categorical mappings + glyph types\nThe most common form is mapping distinct values to icons. \nGraphistry supports built-in + custom glyphs:\n\nBuilt-in general glyphs: Use values from Font Awesome 4 or, more explicitly, fa-thename\nBuilt-in flag icons: Use ISO3611-Alpha-2 values\nCustom image URL\nCustom image data URI (embedded)", "g.encode_point_icon(\n 'assorted',\n shape=\"circle\", #clip excess\n categorical_mapping={\n 'macbook': 'laptop', #https://fontawesome.com/v4.7.0/icons/\n 'Canada': 'flag-icon-ca', #ISO3611-Alpha-2: https://github.com/datasets/country-codes/blob/master/data/country-codes.csv\n 'embedded_smile': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARkAAAC0CAMAAACXO6ihAAAAgVBMVEUAAAD////T09P4+PiYmJihoaHf39/7+/vm5uYLCwsRERHy8vJ+fn7v7+83Nze2trbZ2dlRUVFGRka9vb1xcXEYGBhbW1tWVlbq6uq7u7usrKxgYGA8PDwWFhbNzc14eHhpaWmoqKiPj48vLy8oKCggICCIiIiUlJRBQUFKSkokJCTGfDo0AAAOxklEQVR4nO1d6YKyOgxFNkFEUFwQFFEUdd7/AS9L06ZlkZm5I37i+acUSA9dkjRNJal/JPNYjfxRBlvW1guzb3leBPupJo8wLGfat0wvgaOrjEQo0b1vsfrGTKvSUiLtW7RekaRNvGRQV32L1x9it5mX0Wh86lu+3uCM24jJEPYtYT+468Kg6xuGz3cuu28Ze0FqYA7c4/Wwud3WUy3ihpq+pewBB9yTnPWMXjivfURYjxL2hIDV3j+KFx3Wwwan8TFixl5VpdtvswuWHEXGKOhBuD4xZS0mntRcPxvRYT1Lkvt0OSwLiql38qy+xP65Ar0K5nRWCgas5dZgQvWYtkEkdDzPG5gSTGeeoHkMmRDzWx+SFhx3aDGhRbXg+GmC9Y0E+pKbNJa5IfXYf6Js/eIEVW4mRuIMhKH0pztM2MvmMjtMzEh+nnC9QiP1jb6ayzgcM0PpTsSOVA4tZfQhMgNmQWsX4dwTI/1ZsvUL8G7GbYV8jplhuGjupNJKayl+7en8JNn6hUpq67WWWmJitk8SrWfArPNAR0Hdybg9R7KecQYVbtFebsuI2TxHsr5xh1mnvSEwTc9qUZTfCiHRZqwGf1WJxB7YvCQhM7utzczQ1BQ/SbDesezCDLYN5k8TrWdQZprNyT0mRm/tde8E2psaF9hmnJbX4vR7M2zo2NrQnea8MTmYARi1iKj2uhBMMx7QAiVbmnSqHSXmXHkZjAEFpF1ZtR3hUlgNpqlvWO+JBLle3DW6EMqWyMuwmOEdmXYabubzTZjy3ciFYXg4A3AOMWBRqUQwOmfCjPLA7nwzpNVOwyOV9p3cW+8HtZWXKFN0DqQZGX2L+mRMtGZe3CI4GqZ2rW9Rn45rIy+lN6aj4+8dsa6Lj9bjS3k1AUV5MOYkRhjhNaWxLmtsyfLuyZHjOMdTXZjaAHBPtSCSM3ialg5ren4M85xk+FY82uoTvVaDS1z4MOy2FfE3xuoWp6qqaiVU7TQNb+V4k1ATQhvcALRfOrJsWLzJrdiGHMmOGiKf1qDiGs93zX+0pYfCH4zjXIqvxmM+EIYy1MRRjW+mFcOwG9Z6y07KBoj+wHfEWvT9ftpMgUnw3X5U4u3XFMSNlARjX/e2GVyjibe4b8n/GBVHja9H14Wgxu3n6dbV+SQA2/deutxvBVq2aksU0QkpO9Z7O272HsdLdHigvSGXznuPMgmek6wILXObl2S2u1VtatAFlfh5UvaAL0QMzhoyWV4D2bdqx1g1shU7Ct7c04e6ko5jaWZkQcWu61tmuAzfPTgYbdbmd1LeYdhp2a/xzqBbnEa2EHwFoUeVve3DwJkGQlfmX2hMw1rRpmCjb0WBIZxZLdvC3hg0WK9mmCVX3PdWchuwAp1NqTaMBTDTg1z9YwomUI0rAaIBhuB/qYCaS1bNRVi3fW/DqAFzaDJx9doKvBLPFuolcABdrmYXytJqbk7vD+gwdRrLEfTip0v1CiCVrwv1ncGsNZzlJASTVL5uVzIoOv7u6WK9AEJS+7otpmCAD9NogtDOmqEE9gi2bv9/XwTN7QLMKXlwoQ4FnMY2swQf+EDtbND/K1sK6MQ0TGWG7RfURbcl3ZAwqOAYhITUfywkjKZB98ZgI/GAAZdbA6AG+JsvJrWBLmVvGTUTlRIzSP9DCZax04XmsWZrLNaAI4MXLL5BMZw4uasyS3YwTMcMoG0Xz7Dz+M/qg2by+Sq49C1cv9g0MeMMnBghWRNrMQOeligOdpUYf7iKDMbGE4lxBpK16SH2sY7Cy8bRepCLkg0w1Uh2DUOPInWgScfbcFkshpLi64MPPvjggw8++OCDDz744IMPPvjggw8++OCDDz744IM/wWTF0FrwHERe/ByZXgNbxQa07vc6Fou90ZunPsDAqVZaikGgljycEJtuzFxo7OdwNnJ3Y+ZOywzlQFMuqUgLMwdaZiCHU2ZII5ZmsLnUENuMeTlPrYfMnCkzncaZxNd14w12dpgddmlD0KPcab9Gse/lDbIRdmHGTIssI1634HtlQMxI0nyapnE3beY4GhQz3bH2/4iZr83ykGE5b0m2tciKLDeVeMxbXNwZtpwceAmLW/EmUp6ZW/72uGP0XhIWL7zj0WdGdqq2MbMp7ur2DoDmRb4yHo8VxZA9Jy7/3J1UQP7PMtKzIooRBZiCdeDp9ji/1dY9TyV1ozeq+W6LtRPZ5a0aqztm5hS5+dtt2UPxwXP2EBX3p+lWtgtZ9chLIT50Cfsxo2txg5Y9aEZv13KJr5FR3nXMnhayZ5Paxuwfuj/6HAjB3YpV2HAHtvPGkxKPRfJaNF717gtZfi3jlL13z8rakrlFZey4yswdJVpU2L5lljAMn5sR2viN4zJboWFXkg1P8Z6qUcrtCIlMrIST8H6UuA30SvQA9MopOxwng7fBebEV0ozPlZjwHFnd9uyXfRMyasOWHcrMkk/tGpktzHzVvPEq3WqkCHlmQvzxZU4JJ8ygw7fK04C+6s/IUXhmZG4PDmHmLFduy6FxzCjiYSqQzYkyI+5G0JqZ2dclXD6yFFoIc44Zh/s83ZhpODxIYEZIuV8yU3eAzEhkpgr9xjNTefWlkZna87GOtbvKFhwzfGfrxAzK/qfngJx3OTPLmr0l5cWCGfb9jOJWlMhqX+n2mNpTlRnbMFDXbWQGfupemgYukfUo1bRda4cNUgEyx0N1nMmZgaxkI8Ur9zLeSH1zZmZTVcPtwgucUpiCGdqijFMh9+VIap/3hvSqoa/iBupJY0T6e4EZVwtXyXIL7BHz8a4GHrxjxnGllTNAaBBmrk4QwOPdoEA+myWnq4Y+rr0NiBmbvWCtasAnYSZk/+TMgPhjmpVBZcwUwzP9lO5yBc05Z+YMnPmgIEBRMk7MqEhBkVjmwtrAjGcmIolnoHI6NQZWEb4DCthwOSDM5ICxRtBnWEeL5hM4d7ikHjbpsa1nS3YeGx2xWSYLjWdmDhOHXogfUmbW8MYY7hSYgWQ8LBXGkfuHMqNDGtMV+QOlYoQvxzFDk0tgZsD4FJiZU2L2rHZyMf1Bf2HMEDU6ZwYYRcf5NTFTDg4kw86KfWD2XIGZCSnAdvXPYYLQOWbYzlvSndC5txwzE+jpcLVoAYrahRmSKrQrMzTJHXpcAzNEmjg/C0bPVY4xX66ZGZSmCOppccyw63I3ZmgrXJ7SdLrcdWHGK4emoJC/bGVtzNygM6Gd5A3MkL69uu1myfnC3G0oOchjZo7kL9+sZ8ZrZ4bNQV41JdYDZkj1z4X8q4fMwG0+8n80MCOm1AQ1CLkhHzMD05ny9TNm0HQnT2OenW7MYLQxA6OzjFa6OjJTHWY6MLODrjn/GTP8vmY3wPt2/19mYKbDSa47MgMNGyUueMzMhPReZfMzZiTh8AwF7Wl+OWbQCNWBGTjZ9qfMzCrnihggwKswQ6eJ7zFj/JIZaVMxtuzwT5lBvrS/ZEaC3nT7KTPSLhUPT9P/ghkYgW3kAezGjPkjZjaQ1HX/Y2YyxWGq85au8wfMUNUZHSB/7MQM9UCgxImPmYHvXShHP2VGyt2w24iZ5tHld8ywI35iG5jZgXBo+oOD0Tsyg5IqPmYGTP8mHbgrMxnOMVVuSgfqz5mJqLIKpzkb0oQTtcSoGzNgV3xH07tAF4x+xswsiFzbkol/3gT3y/jwO2Ys8MpT+Qx05DWsBl9gNn7EDPjLdDatpcg/U8sM9STFP2NmWrzAoCsXnE71fWZuY3y/hNybGTN3egZQWnS2TQSlHzFzgQ/GVD3hTPUKM3TUHp1/yEzRKHXKDLxwipkRcqa2MJNQD1Ph9dgwp17uuaLCjg1HDVC6MmAmVOqZAVt75MKMP0fezlpmqBOp/KcmL3uVGei0CWKG2ktwgE+MmZH5lUT4uy4zEm0jlh5F1M9LmBH1bYGZVQKi6bNFZqSix1L3s15M+UlIbzUWixliZuQu57fkNqcj5jiviXmmaQazJxcN9rIA/4x6u+XzjXmhp96Gi90KBrKg/BYJ+AijYuWTvj/YTcxZKBf0r2jCVG+XyS+sjx+azpUVPOQ8Cr9a6jE3sG3rMjb+Z+z8mKPKXKgF/PwYkAn6w5DRqoa3ynh12A3ZkzMLc+NQp29+HPA1q6vj0TZs2caJDvGuli87UtFKfyTVQEa+4+TX8sMdPSrk2PZdWWg4l8oajWIxZqRjPTN+puKY4rKJhXOoNyzHlJjwzIgPMYUFtWVlwcRB00MJD/mkMZSyCZ3FaqbUe0ph88xIc6HR2Gn5KUpmpBR1sNHYIaNBwYy4WsGdFSPWjlvUMRuZGRceCIf/s5YZoTlvG5gBLVxcdHKllVjeF0eaBVfCvUkcM9LX1iXc+d4SmoIfPmKm0hhdJFsjM+Vw+UNmasYFdkbWTmjhVmw+ZEa60cVLK8o68IpnJnvoIb2q6ikNWSepbTMWfyLBmavfdodWvpp6k1vaIaZwKGUXZjzpXl3UjpBds+HFle8TkUmxNxV3pUXERxrn4/gejzMiyKxu5A7QwJU5iO5Xc+HB4qMem9KcFsQjcHSkAhpLmB007slupoXG/D/XPPCD+ydTnFaLqz5mDUGJQi6J2OVAFzrHbnyRJrz0svvwEA6y6kxUlFVyx1YJId7tmrjsNj0etWVN9jekz2w0x3GC6f+TNvqyWJ6O2TsPdYFM5iF7VXDqnDv2Mptj0cnUn1kv5tVxPJmLFCUf+PcBuHX+mZfCwgmcyPXx9zqWIh/pvIxsDVhZrCSZ/zZenhnSQHDnIHPKoY4Zzhb5Ff4VZlDsIKz6bGqY2YDW+fsDK/4VZtisAiamsaPMGNQBDbP7/3CWXfKPMEPlm4ORlk9hVP93r/E6VJlj/rdHKd3mB1CtDe00fclza8EGVaJ0vY6P1BQoViMqlhGB89scrOIDXzELcFhXcxhbGpjRf33GlPjEVzyBqIGZclauZ8b/fdv/d5nZksqLMdI5L9v/4a3/AjMLt+q2c5nSG6p8qGykrlse1hniG19xnMksMy3CLcOmmwNKmLv5Oj7k0frTeN22H+M72PBYv+LclONrsQnjqVpW/kbV4f8AHefeSC51gZgAAAAASUVORK5CYII=',\n 'external_logo': 'https://awsmp-logos.s3.amazonaws.com/4675c3b9-6053-4a8c-8619-6519b83bbbfd/536ec8b5c79de08fcac1086fdf74f91b.png'\n },\n default_mapping=\"question\").plot()\n", "Icons as continuous mappings and text\nYou can also use value ranges to pick the glyph, and use text as the glyph", "g.encode_point_icon(\n 'score',\n as_text=True,\n continuous_binning=[\n [33, 'low'],\n [66, 'mid'],\n [200, 'high']\n ]).plot()\n", "Special continuous bins\n\nFor values bigger than the last bin, use None\nFor nulls, use the default mapping", "g.encode_point_icon(\n 'score',\n as_text=True,\n continuous_binning=[\n [33, 'low'],\n [66, 'mid'],\n [None, 'high']\n ],\n default_mapping='?'\n).plot()\n", "Flag inference\nThe below code generates ISO3166 mappings from different conventions to Alpha-2", "codes = pd.read_csv('https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv')\ncodes.columns\n\ncountry_to_iso_flag = {\n o['CLDR display name']: 'flag-icon-' + o['ISO3166-1-Alpha-2'].lower()\n for o in codes[['CLDR display name', 'ISO3166-1-Alpha-2']].dropna().to_dict('records')\n}\n\ng.encode_point_icon(\n 'origin',\n shape=\"circle\",\n categorical_mapping=country_to_iso_flag,\n default_mapping=\"question\").plot()\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
evelynegroen/evelynegroen.github.io
Code/SSCC_LCA_evelynegroen.ipynb
mit
[ "Procedure: Global sensitivity analysis for matrix-based LCA\nMethod: Squared Spearman correlation coefficients (SSCC) & MCS: Monte Carlo simulation (normal random)\nAuthor: Evelyne Groen {evelyne [dot] groen [at] gmail [dot] com}\nLast update: 25/10/2016", "import numpy as np \n\nA_det = np.matrix('10 0; -2 100') #A-matrix\nB_det = np.matrix('1 10') #B-matrix\nf = np.matrix('1000; 0') #Functional unit vector f\n\ng_LCA = B_det * A_det.I * f \nprint(\"The deterministic result is:\", g_LCA[0,0]) ", "Step 1: Uncertainty propagation\nMonte Carlo simulation using normal distribution functions for all input parameters\nThe mean values are equal to the initial values of A and B. \nThe standard deviation equals 5% of the mean of A and B.", "N = 1000 #Sample size\nCV = 0.05 #Coefficient of variation (CV = sigma/mu)\n\nimport random\n\nA1 = [random.gauss(A_det[0,0], CV*A_det[0,0]) for i in range(N)]\nA3 = [random.gauss(A_det[1,0], CV*A_det[1,0]) for i in range(N)]\nA4 = [random.gauss(A_det[1,1], CV*A_det[1,1]) for i in range(N)]\n\nB1 = [random.gauss(B_det[0,0], CV*B_det[0,0]) for i in range(N)]\nB2 = [random.gauss(B_det[0,1], CV*B_det[0,1]) for i in range(N)]\n\n\nAs = [np.matrix([[A1[i], 0],[A3[i], A4[i]]]) for i in range(N)]\nBs = [np.matrix([[B1[i], B2[i]]]) for i in range(N)]\n\nf = np.matrix('1000; 0')\n\ngs = [B * A.I * f for A, B in zip(As, Bs)]\n\ng_list =[g[0,0] for g in gs]\n\nimport statistics as stats\nvar_g = stats.variance(g_list)\nprint(\"The output variance equals:\", var_g)\n", "Step 2: Calculate the squared correlation coefficients", "#Reshape the data\ng_list = np.reshape([g[0,0] for g in gs], (N,1))\nAs_list = np.reshape(As, (N,4))\nBs_list = np.reshape(Bs, (N,2))\n\nPs_list = np.concatenate((np.ones((N,1)), As_list[:,:1], As_list[:,2:], Bs_list), axis=1)\n\nfrom scipy.stats import rankdata\nimport statistics as stats\n\n#Transform to rankdata\nPs_rank = [rankdata(Ps_list[:,k]) for k in range(1,6)]\ng_rank = [rankdata(g_list[:,0])]\n\ncovar = [np.cov(Ps_rank[k],g_rank[0]) for k in range(0,5)]\n\nstd_rank = np.std(Ps_rank, axis = 1)\nstd_rank_g = np.std(g_rank)\n\nSSCC = [(covar[k][0,1] / (std_rank[k] * std_rank_g))**2 for k in range(0,5)]\nprint(\"squared Spearman correlation coefficients:\", SSCC)\n", "Visualize", "import matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\n\nSSCC_procent = [SSCC[k] * 100 for k in range(0,5)]\n\nx_label=[ 'A(1,1)', 'A(2,1)', 'A(2,2)', 'B(1,1)', 'B(1,2)']\nx_pos = range(5)\nplt.bar(x_pos, SSCC_procent, align='center')\nplt.xticks(x_pos, x_label)\nplt.title('Global sensitivity analysis: squared Spearman correlation coefficients')\nplt.ylabel('SSCC (%)')\nplt.xlabel('Parameter')\nplt.show('Figure 1')\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
wikistat/Apprentissage
ExemplesJouet/Apprent-Python-Blobs.ipynb
gpl-3.0
[ "<center>\n<a href=\"http://www.insa-toulouse.fr/\" ><img src=\"http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo-insa.jpg\" style=\"float:left; max-width: 120px; display: inline\" alt=\"INSA\"/></a> \n<a href=\"http://wikistat.fr/\" ><img src=\"http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/wikistat.jpg\" style=\"float:right; max-width: 250px; display: inline\" alt=\"Wikistat\"/></a>\n</center>\nScénarios d'Apprentissage Statistique\nExemple \"Jouet\": Discrimination de blobs de <a href=\"http://scikit-learn.org/stable/#\"><img src=\"http://scikit-learn.org/stable/_static/scikit-learn-logo-small.png\" style=\"max-width: 100px; display: inline\" alt=\"R\"/></a>\nRésumé: Les méthodes de discrimination sont comparées sur un jeu de données fictif obtenu par la commande make_blobs de Scikit-learn. L'objectif est de mettre en évidence le rôle des paramètres de complexité de différentes méthodes (régression logistique, k-nn, réseaux de neurones, arbre de décision, bagging, svm) et de comparer les formes spécifiques des frontières estimées par chacune d'elle. Cela permet d'illustrer les principales méthodes d'apprentissage de Scikit-learn\n1 Introduction\nLes données se présentent sous la forme de nuages de points dans R2 plus présentant des groupes plu sou moins distincts ou mélangés. L'objectif est d'apprendre ces données très particulières afin de discriminer les deux classes. Les données étant simplement dans R2, il est facile de prévoir la classe de chaque point du plan et ainsi de visualiser la frontière entre les prévisions des deux classes. L'intérêt est de représenter ainsi facilement le rôle jouer par les paramètres de complexité de chaque méthode et de comparer les formes des frontières obtenues et donc la plus ou moins bonne adéquation d'une méthode à la spécificité de ces données simulées.", "%matplotlib inline\nfrom matplotlib import pyplot as plt\n# option d'impression\nimport numpy as np\nnp.set_printoptions(precision=3)", "2 Génération des données\nGénération aléatoire de 20 groupes de nuages de 1000 points dans R2. Le générateur de nombres aléatoires est initialisés par souci de reproductibilité. Les points sont séparés en deux classes.", "from sklearn.datasets import make_blobs\n\nX, y = make_blobs(n_samples=1000, centers=20, random_state=123)\nlabels = [\"b\", \"r\"]\ny = np.take(labels, (y < 10))\nprint(X)\nprint(y[:5])", "Représentation des données dans le plan.", "plt.figure()\nplt.scatter(X[:, 0], X[:, 1], c=y)\nplt.xlim(-10, 10)\nplt.ylim(-10, 10)\nplt.show()", "3 Les classifieurs de Scikit-learn\nTous les algorithmes et méthodes de Scikit-learn se présentent sous la forme d'une combinaison de 3 \"interfaces\":\n* estimation pour ajuster des modèles,\n* prévision pour de nouvelles observations,\n* transformation pour convertir des données.\nCelles-ci peuvent être facilement combinées et enchaînées en un pipeline.\n3.1 k plus proches voisins\nEstimation", "# Importation de la fonction\nfrom sklearn.neighbors import KNeighborsClassifier \n# Détermination des paramètres et définition de l'estimateur\nknnF = KNeighborsClassifier(n_neighbors=5)\n\n# Apprentissage ou estimation du modèle sur l'échantillon d'apprentissage \n# mais il ne se passe rien dans le cas de *k*-nn avant en dehors de la phase de prévision. \nknnF.fit(X, y)\n# Les options de cet algorithmes:", "Prévision\nPrévision de la classe des 5 premiers points", "print(knnF.predict(X[:5])) ", "Ou de la probabilité des classes.", "print(knnF.predict_proba(X[:5]))", "La fonction définie ci-dessous prévoit chaque point d'une grille du plan et trace un graphe de contour des probabilités.", "def plot_surface(clf, X, y, xlim=(-10, 10), ylim=(-10, 10), n_steps=250):\n fig = plt.figure()\n xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], n_steps), np.linspace(ylim[0], ylim[1], n_steps))\n if hasattr(clf, \"decision_function\"):\n z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n else:\n z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] \n z = z.reshape(xx.shape)\n plt.contourf(xx, yy, z, alpha=0.8, cmap=plt.cm.RdBu_r)\n plt.scatter(X[:, 0], X[:, 1], c=y)\n plt.xlim(*xlim)\n plt.ylim(*ylim)\n plt.show()\n\nknnF = KNeighborsClassifier(n_neighbors=70)\nknnF.fit(X, y)\nplot_surface(knnF, X, y)", "Ce qui donne une bonne idée de la forme des frontières estimées entre les classes.\nFaire varier le paramère n_neighbors (1 à 500) et observer l'impact sur la forme des classes.\n3.2 Arbre binaire de décision\nLes frontières sont construites à partir de parallèles aux axes. Faire varier la profondeur maximale max_depth de l'arbre.", "from sklearn.tree import DecisionTreeClassifier \ntreeF = DecisionTreeClassifier(max_depth=10)\ntreeF.fit(X, y)\nplot_surface(treeF, X, y)", "3.3 Forêts aléatoires\nObtenues par agrégation d'arbres, une forêt arrondit les frontières. Avec seulement 2 variables (dans R2), cela revient à faire du bagging. Faire variaer le nombre d'arbres: n_estimators (1 à 20), de la forêt.", "from sklearn.ensemble import RandomForestClassifier \nrfF = RandomForestClassifier(n_estimators=200)\nrfF.fit(X, y)\nplot_surface(rfF, X, y)", "3.4 Séparateur à Vaste Marge (SVM)\nSVM linéaire, pas très intéressant sur ces données, puis gaussien. Faire varier la pénalisation C et l'étendue du noyau gaussien.", "# Noyau linéaire\nfrom sklearn.svm import SVC\nsvmF = SVC(kernel=\"linear\",C=1) # try kernel=\"rbf\" instead\nsvmF.fit(X, y)\nplot_surface(svmF, X, y)", "Faire varier C de 10-5 à 10^5, gamma de 0.001 à 100", "# Noyau gaussien avec valeurs par défaut de C et gamma\nfrom sklearn.svm import SVC\nsvmF = SVC(kernel=\"rbf\", C=1, gamma='auto') \nsvmF.fit(X, y)\nplot_surface(svmF, X, y)", "3.5 Perceptron multicouches\nFaire varier le nombre de couches cachées, le nombe de neurones sur ces couches, la pénalisation l2 alpha.", "# Trois couches de 100 neurones avec alpha par défaut\nfrom sklearn.neural_network import MLPClassifier\nnnetF = MLPClassifier(hidden_layer_sizes=(100, 100, 100),alpha=0.0001)\nnnetF.fit(X, y)\nplot_surface(nnetF, X, y)", "3.6 Processus gaussien\nConsulter la doc en ligne.", "from sklearn.gaussian_process import GaussianProcessClassifier\npgF = GaussianProcessClassifier()\npgF.fit(X, y)\nplot_surface(pgF, X, y) ", "Conclusion:\nQ Quelles sont les méthodes ou modèles les plus adaptés à ces données? Est-ce généralisable à d'autres?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ogaway/Matching-Market
Comparison.ipynb
gpl-3.0
[ "Compare the three matching mechanisms\n\nCompare the three matching mechanisms. \n1. BOS (list length is three)\n2. SOSM (list length is two) + Additional BOS (list length is one)\n3. SOSM (list length is three)\n\n1) Features\nBefore comparing the two mechanisms, a summary of them is described below.", "%matplotlib inline\nimport matchfuncs as mf\n\nprop_num = 5\nresp_num = 3\nprop_prefs = [\n [2, 0, 1],\n [2, 1, 0],\n [1, 2, 0],\n [1, 2, 0],\n [1, 2, 0]\n]\nresp_prefs = [\n [4, 3, 1, 2, 0],\n [1, 0, 4, 3, 2],\n [1, 3, 0, 2, 4]\n]\nprop_caps = [1, 3, 2, 2, 1]\nresp_caps = [2, 2, 4]\nlist_length = 3", "1.1) BOS", "prop_matched, resp_matched, prop_indptr, resp_indptr = mf.BOS(prop_prefs, resp_prefs, resp_caps, prop_caps, list_length)\nprint('prop_matched = ' + str(prop_matched))\nprint('resp_matched = ' + str(resp_matched))\nprint('prop_indptr = ' + str(prop_indptr))\nprint('resp_indptr = ' + str(resp_indptr))\nmf.Graph(prop_matched, resp_matched, prop_indptr, resp_indptr)", "1.2) SOSM + AddBOS", "prop_matched, resp_matched, prop_indptr, resp_indptr = mf.DA(prop_prefs, resp_prefs, resp_caps, prop_caps, list_length-1)\nprint('prop_matched = ' + str(prop_matched))\nprint('resp_matched = ' + str(resp_matched))\nprint('prop_indptr = ' + str(prop_indptr))\nprint('resp_indptr = ' + str(resp_indptr))\nmf.Graph(prop_matched, resp_matched, prop_indptr, resp_indptr)\n\nprop_matched, resp_matched, prop_indptr, resp_indptr = mf.AddBOS(prop_prefs, resp_prefs, prop_matched, resp_matched, resp_caps, prop_caps)\nprint('prop_matched = ' + str(prop_matched))\nprint('resp_matched = ' + str(resp_matched))\nprint('prop_indptr = ' + str(prop_indptr))\nprint('resp_indptr = ' + str(resp_indptr))\nmf.Graph(prop_matched, resp_matched, prop_indptr, resp_indptr)", "1.3) SOSM", "prop_matched, resp_matched, prop_indptr, resp_indptr = mf.DA(prop_prefs, resp_prefs, resp_caps, prop_caps, resp_num)\nprint('prop_matched = ' + str(prop_matched))\nprint('resp_matched = ' + str(resp_matched))\nprint('prop_indptr = ' + str(prop_indptr))\nprint('resp_indptr = ' + str(resp_indptr))\nmf.Graph(prop_matched, resp_matched, prop_indptr, resp_indptr)", "2) Comparison\nEvaluate Axis is below;\n 1. Stability\n 2. Truth-telling\n 3. Efficiency\n 4. Fairness\n 5. Feasibility \n2.1) truth-telling circumstances\nThe result is \n|| BOS | DAAdd | DA |\n|:-----:|:-----------:|:------------:|:------------:|\n|Stability| 1 | 0 | 0 |\n|Truth-telling| 5 | 5 | 5 |\n|Efficiency| 0.733, 2.167 | 0.9, 1.333 | 0.9, 1.333|\n|Fairness| 0, 0 | 0, 0 | 0, 0|\n|Feasibility| 9 | 13 | 15|", "# BOS\nmf.Comp('BOS', prop_prefs, resp_prefs, resp_caps, prop_caps, list_length)\n\n# DAAdd\nmf.Comp('DAAdd', prop_prefs, resp_prefs, resp_caps, prop_caps, list_length)\n\n# DA\nmf.Comp('DA', prop_prefs, resp_prefs, resp_caps, prop_caps, resp_num)", "2.2) Nash Equilibria\nThe result is \n|| BOS | DAAdd | DA |\n|:-----:|:-----------:|:------------:|:------------:|\n|Nash| 2768 | 3136 | 3672 |\n|Stability| 1.236 | 0.737 | 0.0 |\n|Truth-telling| 1.116 | 1.083 | 1.02 |\n|Efficiency| 0.769, 1.854 | 0.807, 1.626 | 0.923, 1.333|\n|Fairness| 0.0, 0.501 | 0.0, 0.25 | 0.0, 0.0|\n|Feasibility| 8.696 | 12.319 | 15.0|", "# BOS\nmf.NashComp('BOS', prop_prefs, resp_prefs, resp_caps, prop_caps, list_length)\n\n# DAAdd\nmf.NashComp('DAAdd', prop_prefs, resp_prefs, resp_caps, prop_caps, list_length)\n\n# DA\nmf.NashComp('DA', prop_prefs, resp_prefs, resp_caps, prop_caps, resp_num)", "2.3) Common-Value Model\nUtility Function for the i-th student is given as follows. \n$U_{i} = \\alpha CV_{j} + (1-\\alpha) PV_{ij}$ \n$CV_{j}$ means a common value to the j-th seminar. (popularity etc.)\n$PV_{ij}$ means the i-th student's private value to the j-th seminar. \nThese are uniformly distributed over [0, 1), and the lower j is, the higher CV is.\n$\\alpha$ is a parameter, default set is 0.3.\nUtility Function for the j-th seminar is given as follows.\n$U_{j} = \\beta CV_{i} + (1-\\beta) PV_{ji}$ \n$CV_{i}$ means a common value to the i-th student. (grade etc.)\n$PV_{ji}$ means the j-th seminar's private value to the i-th student. \nThese are uniformly distributed over [0, 1), and the lower i is, the higher CV is.\n$\\beta$ is a parameter, default set is also 0.3.\nSuppose that the preferences of the students and seminars are decided by these utility functions, \nand the number of students is 345, that of seminars is 38.\nHalf of the seminars have capacity 6, while the other half have capacity 13.\nAll students have 2 capacity.", "prop_num = 9\nresp_num = 6\nalpha = 0.3\nbeta = 0.3\nprop_caps = [2 for i in range(prop_num)]\nresp_caps = [2, 2, 2, 4, 4, 4]\nlist_length = 3\n\nprop_prefs, resp_prefs = mf.MakeCVprefs(prop_num, resp_num, alpha, beta)\n\nprint('prop_prefs =')\nprint(prop_prefs)\nprint('\\nresp_prefs = ')\nprint(resp_prefs)\nprint('\\nprop_caps = ')\nprint(prop_caps)\nprint('\\nresp_caps = ')\nprint(resp_caps)\n\nresult1 = mf.Comp('BOS', prop_prefs, resp_prefs, resp_caps, prop_caps, list_length)\nresult2 = mf.Comp('DAAdd', prop_prefs, resp_prefs, resp_caps, prop_caps, list_length)\nresult3 = mf.Comp('DA', prop_prefs, resp_prefs, resp_caps, prop_caps)\n\nprint('Justified Envy')\nprint('1: %s \\n2: %s \\n3: %s' % (result1[0], result2[0], result3[0]))\nprint('Truth-telling')\nprint('1: %s \\n2: %s \\n3: %s' % (result1[1], result2[1], result3[1]))\nprint('Efficiency of the students')\nprint('1: %s \\n2: %s \\n3: %s' % (result1[2], result2[2], result3[2]))\nprint('Efficiency of the seminars')\nprint('1: %s \\n2: %s \\n3: %s' % (result1[3], result2[3], result3[3]))\nprint('None Zemi')\nprint('1: %s \\n2: %s \\n3: %s' % (result1[4], result2[4], result3[4]))\nprint('Vacant Caps')\nprint('1: %s \\n2: %s \\n3: %s' % (result1[5], result2[5], result3[5]))\nprint('Interviews')\nprint('1: %s \\n2: %s \\n3: %s' % (result1[6], result2[6], result3[6]))", "Therefore, the result is \n|| BOS | DAAdd | DA |\n|:-----:|:-----------:|:------------:|:------------:|\n|Stability| 5 | 2 | 0 |\n|Truth-telling| 9 | 9 | 9 |\n|Efficiency| 1.389, 3.833 | 1.722, 4.0 | 1.556, 3.625|\n|Fairness| 0, 0 | 0, 2 | 0, 0|\n|Feasibility| 20 | 24 | 54 |" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
metpy/MetPy
v0.9/_downloads/804652c055dcb34bc8b8482ecfaf9d26/cross_section.ipynb
bsd-3-clause
[ "%matplotlib inline", "Cross Section Analysis\nThe MetPy function metpy.interpolate.cross_section can obtain a cross-sectional slice through\ngridded data.", "import cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\n\nimport metpy.calc as mpcalc\nfrom metpy.cbook import get_test_data\nfrom metpy.interpolate import cross_section", "Getting the data\nThis example uses NARR reanalysis data for 18 UTC 04 April 1987 from NCEI\n(https://www.ncdc.noaa.gov/data-access/model-data).\nWe use MetPy's CF parsing to get the data ready for use, and squeeze down the size-one time\ndimension.", "data = xr.open_dataset(get_test_data('narr_example.nc', False))\ndata = data.metpy.parse_cf().squeeze()\nprint(data)", "Define start and end points:", "start = (37.0, -105.0)\nend = (35.5, -65.0)", "Get the cross section, and convert lat/lon to supplementary coordinates:", "cross = cross_section(data, start, end)\ncross.set_coords(('lat', 'lon'), True)\nprint(cross)", "For this example, we will be plotting potential temperature, relative humidity, and\ntangential/normal winds. And so, we need to calculate those, and add them to the dataset:", "temperature, pressure, specific_humidity = xr.broadcast(cross['Temperature'],\n cross['isobaric'],\n cross['Specific_humidity'])\n\ntheta = mpcalc.potential_temperature(pressure, temperature)\nrh = mpcalc.relative_humidity_from_specific_humidity(specific_humidity, temperature, pressure)\n\n# These calculations return unit arrays, so put those back into DataArrays in our Dataset\ncross['Potential_temperature'] = xr.DataArray(theta,\n coords=temperature.coords,\n dims=temperature.dims,\n attrs={'units': theta.units})\ncross['Relative_humidity'] = xr.DataArray(rh,\n coords=specific_humidity.coords,\n dims=specific_humidity.dims,\n attrs={'units': rh.units})\n\ncross['u_wind'].metpy.convert_units('knots')\ncross['v_wind'].metpy.convert_units('knots')\ncross['t_wind'], cross['n_wind'] = mpcalc.cross_section_components(cross['u_wind'],\n cross['v_wind'])\n\nprint(cross)", "Now, we can make the plot.", "# Define the figure object and primary axes\nfig = plt.figure(1, figsize=(16., 9.))\nax = plt.axes()\n\n# Plot RH using contourf\nrh_contour = ax.contourf(cross['lon'], cross['isobaric'], cross['Relative_humidity'],\n levels=np.arange(0, 1.05, .05), cmap='YlGnBu')\nrh_colorbar = fig.colorbar(rh_contour)\n\n# Plot potential temperature using contour, with some custom labeling\ntheta_contour = ax.contour(cross['lon'], cross['isobaric'], cross['Potential_temperature'],\n levels=np.arange(250, 450, 5), colors='k', linewidths=2)\ntheta_contour.clabel(theta_contour.levels[1::2], fontsize=8, colors='k', inline=1,\n inline_spacing=8, fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Plot winds using the axes interface directly, with some custom indexing to make the barbs\n# less crowded\nwind_slc_vert = list(range(0, 19, 2)) + list(range(19, 29))\nwind_slc_horz = slice(5, 100, 5)\nax.barbs(cross['lon'][wind_slc_horz], cross['isobaric'][wind_slc_vert],\n cross['t_wind'][wind_slc_vert, wind_slc_horz],\n cross['n_wind'][wind_slc_vert, wind_slc_horz], color='k')\n\n# Adjust the y-axis to be logarithmic\nax.set_yscale('symlog')\nax.set_yticklabels(np.arange(1000, 50, -100))\nax.set_ylim(cross['isobaric'].max(), cross['isobaric'].min())\nax.set_yticks(np.arange(1000, 50, -100))\n\n# Define the CRS and inset axes\ndata_crs = data['Geopotential_height'].metpy.cartopy_crs\nax_inset = fig.add_axes([0.125, 0.665, 0.25, 0.25], projection=data_crs)\n\n# Plot geopotential height at 500 hPa using xarray's contour wrapper\nax_inset.contour(data['x'], data['y'], data['Geopotential_height'].sel(isobaric=500.),\n levels=np.arange(5100, 6000, 60), cmap='inferno')\n\n# Plot the path of the cross section\nendpoints = data_crs.transform_points(ccrs.Geodetic(),\n *np.vstack([start, end]).transpose()[::-1])\nax_inset.scatter(endpoints[:, 0], endpoints[:, 1], c='k', zorder=2)\nax_inset.plot(cross['x'], cross['y'], c='k', zorder=2)\n\n# Add geographic features\nax_inset.coastlines()\nax_inset.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='k', alpha=0.2, zorder=0)\n\n# Set the titles and axes labels\nax_inset.set_title('')\nax.set_title('NARR Cross-Section \\u2013 {} to {} \\u2013 Valid: {}\\n'\n 'Potential Temperature (K), Tangential/Normal Winds (knots), '\n 'Relative Humidity (dimensionless)\\n'\n 'Inset: Cross-Section Path and 500 hPa Geopotential Height'.format(\n start, end, cross['time'].dt.strftime('%Y-%m-%d %H:%MZ').item()))\nax.set_ylabel('Pressure (hPa)')\nax.set_xlabel('Longitude (degrees east)')\nrh_colorbar.set_label('Relative Humidity (dimensionless)')\n\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
philmui/datascience
lecture07.big.data/lecture07.3.trends.ipynb
mit
[ "import pandas as pd\n%pylab inline\n\ndf = pd.read_csv(\"data/eu_trade_sums.csv\")\n\ndf.head(4)\n\ndf.dtypes\n\ndf = df.set_index('geo')", "We are only interested the year range from 2002 - 2006", "yrs = [str(yr) for yr in range(2002, 2016)]", "Let's filter out the following types of record:\n1. Export only\n2. Partners are those with the UK and outside the EU28", "export_df = df[(df['trade_type'] == 'Export') &\n (df['partner'] == 'EXT_EU28')\n ].loc[['EU28', 'UK']][yrs]\n\nexport_df.head(4)", "Let's transpoe this to get 2 columns of series data:", "export_df = export_df.T\n\nexport_df.head(4)", "Let's rename the columns to clarify these columns related to export from these entities:", "export_df = export_df.rename(columns={'EU28': 'EU28_TO_EXT', 'UK': 'UK_TO_EXT'})\n\nexport_df.head(4)", "Now, let's get the columns from UK and EU28 to those partners inside EU28", "int_df = df[(df['trade_type'] == 'Export') &\n (df['partner'] == 'EU28')\n ].loc[['EU28', 'UK']][yrs]\n\nint_df.head(4)\n\nint_df = int_df.T\n\nint_df.head(4)", "Let's now combine these 2 new columns to the exports to outside UK and EU28", "export_df = pd.concat([export_df, int_df], axis=1)\n\nexport_df.head(4)\n\nexport_df = export_df.rename(columns={'EU28': 'EU28_TO_INT', \n 'UK' : 'UK_TO_INT'})\n\nexport_df.head(4)", "Trends\nLet's now plot to see any trends", "export_df.plot(legend=False)\n\nexport_df.plot()\n\nexport_df[['UK_TO_EXT', 'UK_TO_INT']].plot()", "Interactive Plot", "from bokeh.plotting import figure, output_file, show\nfrom bokeh.layouts import gridplot\n\nTOOLS = 'resize,pan,wheel_zoom,box_zoom,reset,hover'\n\np = figure(tools=TOOLS, x_range=(2002, 2015), y_range=(200000, 500000),\n title=\"UK Import Export Trends from 2002-2014\")\n\np.yaxis.axis_label = \"Value in $1000\"\n\np.line(yrs, export_df['UK_TO_EXT'], color='#A6CEE3', legend='UK_TO_EXT')\np.line(yrs, export_df['UK_TO_INT'], color='#B2DF8A', legend='UK_TO_INT')\np.legend.location = 'top_left'\n\noutput_file(\"uk_grade.html\", title=\"UK Trade from 2002-2014\")\n\n# open a browser\nshow(p)\n", "Outliers\nLet look at % change. \nFirst, let's remove the aggregate sum (by identifying the aggregate key 'EU28'. Remember that we have set the index to \"geo\" already.", "df = df[~ df.index.isin(['EU28'])]\n\ndf.head(4)\n\npct_change_df = df.copy()", "Recall that yrs column is of type \"str\" even though they supposedly represent the year number.", "for yr in yrs:\n pct_change_df[yr] = (df[yr] - df[str(int(yr)-1)]) / df[str(int(yr)-1)]\n\npct_change_df.head(4)", "What is the year with the largest spread", "[(yr, abs(pct_change_df[yr].max() - pct_change_df[yr].min(0))) for yr in yrs]", "2010 seems to have a big % change in recent years.\nLet's find some outliers by using standard deviations.", "pct_change_df['2010'].std()\n\npct_change_df['2010'].mean()", "Let's define outliers are those > 2 standard deviations from the mean.", "pct_change_df[pct_change_df['2010'].abs() >= \n (pct_change_df['2010'].mean() + 2*pct_change_df['2010'].std())]", "Looks like these 3 countries are outliers defined as having % change > 2 standard deviations from their means.\nLet's use sorting to see the range of values for 2010", "pct_change_df['2010'].sort_values()", "There are very few countries with negative % change values for 2010. Let's separate out those values.", "pct_change_df[pct_change_df['2010'] < 0]", "Looks like Greece, Hungary, and Ireland all shrunk in imports for 2010. Luxumberg shrunk in both imports & exports in 2010.\nAlso looks like very few countries have % change values > 0.4. Let's examine those values for 2010.", "pct_change_df[pct_change_df['2010'] > 0.4]", "Looks like Lithuania has grown both import & export by > 40% that year.\nFor next steps in outliers analysis, we will next dig up news articles or reviews about why Lithuania and Luxumberg have such outlier behaviors during 2010." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
bolidozor/MeteorDataAnalyser
iPython/CRVAL2_SYSTIME_diff_plotter.ipynb
gpl-3.0
[ "Bolidozor FITS CRVAL2 and SYSDATE1 plotter\nFor use of this notebook you must have mounted space.astro.cz storage server to local filesystem. It is possible to do with sshfs\nbash\n sshfs &lt;user&gt;@space.astro.cz /&lt;mnt folder&gt;\nThen you must set path of &lt;mnt foler&gt; to path variable.\nScript works only with files with SYSDATE1 parameter in header.", "import os\nimport datetime\nimport numpy\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\n%matplotlib inline \n\npaths = ['/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/03/',\n '/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/04/',\n '/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/05/']\ntimes = numpy.ndarray((0,2))\nstart_time = datetime.datetime.now()\nfits_browsed = 0\nfor path in paths:\n for root, dirs, files in os.walk(path):\n print(\"\")\n print(root, \" \")\n for name in files:\n if name.endswith((\"snap.fits\")):\n try:\n hdulist = fits.open(os.path.join(root, name))\n sysdate = hdulist[1].header['SYSDATE1']\n sysdate_beg = sysdate - hdulist[1].header['CDELT2']* hdulist[1].header['NAXIS2']\n crval = hdulist[1].header['CRVAL2']\n time = [sysdate_beg, crval]\n times = numpy.vstack( [times, time] )\n hdulist.close()\n print(\"+\", end='')\n fits_browsed += 1\n except Exception:\n print(\"-\", end='')\ntimes.sort(axis=0)\nprint(\"\")\nprint(\"===================================\")\nprint(fits_browsed, \"was successfully processed\")\nprint(\"It takes\", datetime.datetime.now()-start_time)\n", "Plotter\nNext cell plots a graph of time differences.\nPositive number means SYSDATE is ahead of CRVAL2 (radio-observer time is late).<br/>\nNegativ value means SYSDATE is behind CRVAL2 (radio-observer time is faster).", "plt.figure(figsize=(17, 9))\n\ndata=md.date2num([datetime.datetime.fromtimestamp(ts, datetime.timezone.utc) for ts in times[:,0]/1000])\nplt.xticks( rotation=25 )\nax=plt.gca()\nxfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')\nax.xaxis.set_major_formatter(xfmt)\nax.set_title('Difference of SYSDATE1 (system time) and CRVAL2 (radio-observer time of 1st .FITS row)')\nax.set_xlabel('datetime [UTC]')\nax.set_ylabel('time difference (SYSDATE1 - CRVAL2) [s]')\n\nplt.plot(data, (times[:,0]-times[:,1])/1000.0)\nplt.show()", "<br>\nCalc time difference of one file", "fits_path = '/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/04/19/20170904192530311_ZVPP-R6_snap.fits'\n\nprint(\"\")\nhdulist = fits.open(fits_path)\nsysdate = hdulist[1].header['SYSDATE1']\nsysdate_beg = sysdate - hdulist[1].header['CDELT2']* hdulist[1].header['NAXIS2']\ncrval = hdulist[1].header['CRVAL2']\nhdulist.close()\n\ntime = (sysdate_beg - crval)/1000.0\nif time>0:\n print(\"difference between times is\", time, \"s. (SYSDATE is ahead, radio-observer time is late)\")\nelse:\n print(\"difference between times is\", time, \"s. (CRVAL2 is ahead, radio-observer time is in the future :-) )\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
Python4AstronomersAndParticlePhysicists/PythonWorkshop-ICE
notebooks/10_02_Astronomy_Astroquery.ipynb
mit
[ "Astroquery\nAstroquery is a set of tools for querying astronomical web forms and databases.\nGenerally it is used to query both Vizier and Simbad catalogs from python scripts. If you have some experience in the astronomy field, you probably already know how vast is the amount of information within these catalogs.\nNote these queries use Simbad/Vizier online services, so you need a working internet connection to run this notebook.\nSimbad catalog queries with astroquery\nThe easiest way to show the potential of astroquery is to show how it works:", "from astroquery.simbad import Simbad\nresult_table = Simbad.query_object(\"Crab Nebula\")\nresult_table.pprint(show_unit=True)\nresult_table.colnames", "The VOTable fields that are currently returned in the result are set to main_id and coordinates. However you can specify other fields to be added to the resulting table. To see the complete list of these fields:", "Simbad.list_votable_fields()", "Whaaaaaaaat?! I have no idea what all these fields mean... Specially \"coo_err_maja\"!", "Simbad.get_field_description('coo_err_maja')", "I see... Let's do another query on the Crab Nebula, but this time with some more \"votable fields\":", "customSimbad = Simbad()\ncustomSimbad.add_votable_fields('otype','distance', 'velocity')", "Remember to use the customized instance of Simbad for the new query:", "result_table = customSimbad.query_object(\"Crab Nebula\")\nresult_table.pprint(show_unit=True)\nresult_table.colnames\n\nprint(\"The Crab Nebula is a {} at a distance of {} {}\".format(result_table['OTYPE'][0], \n result_table['distance_distance'][0], \n result_table['distance_unit'][0]))", "Astroquery allows more complex queries, as for example a region around a sky coordinate:", "from astropy.coordinates import SkyCoord\ncrab_coord = SkyCoord.from_name('Crab Nebula') \nresult_table = customSimbad.query_region(crab_coord, radius='2d0m0s')\nresult_table.pprint()", "This is already too much information. By using pandas, we can easily take a peek at what we queried.\nYou probably won't need it, but remember you can take a look at the definition of the different Simbad otypes here.", "pandas_table = result_table.to_pandas()\npandas_table['OTYPE'].value_counts()", "Vizier catalog queries with astroquery\nSimilarly as described above, astroquery can be used to query within any Vizier catalog.\nThe possibilities are enormous, here just some examples are listed:\nQuery an object\nWe can search over the huge amount of catalogs within Vizier for a specific object:", "from astroquery.vizier import Vizier\n# The crab pulsar is too famous! 225 catalogs contain information of the CP...\n# result = Vizier.query_object(\"Crab pulsar\")\n\nresult = Vizier.query_object(\"FRB121102\")\nprint(result)\n\nresult[0] # Equivalent to result['I/197A/tic']", "Finding catalogs by keyword\nYou can also search through the Vizier catalogs through keywords (e. g. authors, title, etc...):", "# Let's try to find some Fermi-LAT catalogs.\n\n# This query is probably too broad... 391 available catalogs with the keyword \"gamma\"\n# catalog_list = Vizier.find_catalogs('Gamma', max_catalogs=10000)\n\n# Fewer results if we directly search for the author name:\ncatalog_list = Vizier.find_catalogs('Abdo', max_catalogs=10000)\nprint({k:v.description for k,v in catalog_list.items()})", "Let's take a look at the 2nd Fermi-LAT catalog of gamma-ray pulsars (key = 'J/ApJS/208/17')", "catalog = Vizier.get_catalogs('J/ApJS/208/17')\n\ncatalog.pprint()\n\ncatalog[0]", "Query a region using Vizier catalogs\nSimilar as with Simbad, let's do a query around a sky region:", "from astroquery.vizier import Vizier\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\n\n# Select only bright stars\nv = Vizier(column_filters={\"Bmag\":\"<9\"})\nv.ROW_LIMIT = -1\ncrabNebula = SkyCoord.from_name('Crab Nebula') \nresult = v.query_region(crabNebula, radius=2.0*u.deg, catalog='NOMAD')\nresult[0]\n\nnomad_query = result[0]\nnomad_query['RAJ2000', 'DEJ2000','Bmag']", "Classic problem: convert ra/dec into alt/az, and see which star is bothering you within your field of view:", "from astropy import coordinates as c\nfrom astropy.coordinates import SkyCoord\nfrom astropy import time as t\n\nmagicSite = c.EarthLocation( lat=28.76194399284443*u.deg, lon=-17.890066533603996*u.deg )\nstar_coord = SkyCoord(ra=nomad_query['RAJ2000'], dec=nomad_query['DEJ2000'], frame='icrs')\nstar_coord_altAz = star_coord.transform_to( c.AltAz(obstime=t.Time.now(), location=magicSite) )\nnomad_query['alt'] = star_coord_altAz.alt\nnomad_query['az'] = star_coord_altAz.az\n\nnomad_query.sort('Bmag')\nnomad_query['Bmag', 'alt', 'az'].pprint()", "Resources:\n\nAstroquery documentation\nAstroquery Vizier docs\nAstroquery Simbad docs\nSimple examples\nThe Naval Observatory Merged Astrometric Dataset (NOMAD)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
eshlykov/mipt-day-after-day
statistics/python/python_2.ipynb
unlicense
[ "Кафедра дискретной математики МФТИ\nКурс математической статистики\nНикита Волков\nНа основе http://www.inp.nsk.su/~grozin/python/\nКортежи\nКортежи (tuples) очень похожи на списки, но являются неизменяемыми. Как мы видели, использование изменяемых объектов может приводить к неприятным сюрпризам.\nКортежи пишутся в круглых скобках. Если элементов $>1$ или 0, это не вызывает проблем. Но как записать кортеж с одним элементом? Конструкция (x) абсолютно легальна в любом месте любого выражения, и означает просто x. Чтобы избежать неоднозначности, кортеж с одним элементом x записывается в виде (x,).", "(1, 2, 3)\n\n()\n\n(1,)", "Скобки ставить не обязательно, если кортеж - единственная вещь в правой части присваивания.", "t = 1, 2, 3\nt", "Работать с кортежами можно так же, как со списками. Нельзя только изменять их.", "len(t)\n\nt[1]\n\nu = 4, 5\nt + u\n\n2 * u", "В левой части присваивания можно написать несколько переменных через запятую, а в правой кортеж. Это одновременное присваивание значений нескольким переменным.", "x, y = 1, 2\n\nx\n\ny", "Сначала вычисляется кортеж в правой части, исходя из старых значений переменных (до этого присваивания). Потом одновременно всем переменным присваиваются новые значения из этого кортежа. Поэтому так можно обменять значения двух переменных.", "x, y = y, x\n\nx\n\ny", "Это проще, чем в других языках, где приходится использовать третью переменную.\nМножества\nВ соответствии с математическими обозначениями, множества пишутся в фигурных скобках. Элемент может содержаться в множестве только один раз. Порядок элементов в множестве не имеет значения, поэтому питон их сортирует. Элементы множества могут быть любых типов.", "s = {0, 1, 0, 5, 5, 1, 0}\ns", "Принадлежит ли элемент множеству?", "1 in s, 2 in s, 1 not in s", "Множество можно получить из списка, или строки, или любого объекта, который можно использовать в for цикле (итерабельного).", "l = [0, 1, 0, 5, 5, 1, 0]\nset(l)\n\nset('абба')", "Как записать пустое множество? Только так.", "set()", "Дело в том, что в фигурных скобках в питоне пишутся также словари (мы будем их обсуждать в следующем параграфе). Когда в них есть хоть один элемент, можно отличить словарь от множества. Но пустые фигурные скобки означают пустой словарь.", "{}", "Работать с множествами можно как со списками.", "len(s)\n\nfor x in s:\n print(x)", "Это генератор множества (set comprehension).", "{i for i in range(5)}", "Объединение множеств.", "s2 = s | {2, 5}\ns2", "Проверка того, является ли одно множество подмножеством другого.", "s < s2, s > s2, s <= s2, s >= s2", "Пересечение.", "s2 & {1, 2, 3}", "Разность и симметричная разность.", "s2 - {1,3,5}\n\ns2 ^ {1,3,5}", "Множества (как и списки) являются изменяемыми объектами. Добавление элемента в множество и исключение из него.", "s2.add(4)\ns2\n\ns2.remove(1)\ns2", "Как и в случае +=, можно скомбинировать теоретико-множественную операцию с присваиванием.", "s2 |= {1, 2}\ns2", "Приведенные выше операции можно записывать и в другом стиле", "x = set([1, 4, 2, 4, 2, 1, 3, 4])\nprint(x)\n\nx.add(5) # добавление элемента\nprint(x)\n\nx.pop() # удаление элемента\nprint(x)\n\nprint(x.intersection(set([2, 4, 6, 8]))) # Пересечение\nprint(x.difference(set([2, 4, 6, 8]))) # Разность\nprint(x.union(set([2, 4, 6, 8]))) # Объединение\nprint(x.symmetric_difference(set([2, 4, 6, 8]))) # Симметрическая разность\n\nprint(x.issubset(set([2, 4, 6, 8]))) # Является ли подмножеством\nprint(x.issubset(set(list(range(10)))))\n\nprint(x.issuperset(set([2, 4, 6, 8]))) # Является ли надмножеством\nprint(x.issuperset(set([2, 4])))", "Существуют также неизменяемые множества. Этот тип данных называется frozenset. Операции над такими множествами подобны обычным, только невозможно изменять их (добавлять и исключать элементы).\nСловари\nСловарь содержит пары ключ - значение (их порядок несущественен). Это один из наиболее полезных и часто используемых типов данных в питоне.", "d = {'one': 1, 'two': 2, 'three': 3}\nd", "Можно узнать значение, соответствующее некоторому ключу. Словари реализованы как хэш-таблицы, так что поиск даже в больших словарях очень эффективен. В языках низкого уровня (например, C) для построения хэш-таблиц требуется использовать внешние библиотеки и писать заметное количество кода. В скриптовых языках (perl, python, php) они уже встроены в язык, и использовать их очень легко.", "d['two']\n\nd['four']", "Можно проверить, есть ли в словаре данный ключ.", "'one' in d, 'four' in d", "Можно присваивать значения как имеющимся ключам, так и отсутствующим (они добавятся к словарю).", "d['one'] =- 1\nd\n\nd['four'] = 4\nd", "Длина - число ключей в словаре.", "len(d)", "Можно удалить ключ из словаря.", "del d['two']\nd", "Метод get, если он будет вызван с отсутствующим ключом, не приводит к ошибке, а возвращает специальный объект None. Он используется всегда, когда необходимо указать, что объект отсутствует (в какой-то мере он аналогичен null в C). Если передать методу get второй аргумент - значение по умолчанию, то будет возвращаться это значение, а не None.", "d.get('one'), d.get('five')\n\nd.get('one', 0), d.get('five', 0)", "Словари обычно строят последовательно: начинают с пустого словаря, а затем добавляют ключи со значениями.", "d = {}\nd\n\nd['zero'] = 0\nd\n\nd['one'] = 1\nd", "А это генератор словаря (dictionary comprehension).", "d = {i: i ** 2 for i in range(5)}\nd", "Ключами могут быть любые неизменяемые объекты, например, целые числа, строки, кортежи.", "d = {}\nd[0, 0] = 1\nd[0, 1] = 0\nd[1, 0] = 0\nd[1, 1] = -1\nd\n\nd[0, 0] + d[1, 1]", "Словари, подобно спискам, можно использовать в for циклах. Перебираются имеющиеся в словаре ключи (в каком-то непредсказуемом порядке).", "d = {'one': 1, 'two': 2, 'three': 3}\nfor x in d:\n print(x, ' ', d[x])", "Метод keys возвращает список ключей, метод values - список соответствующих значений (в том же порядке), а метод items - список пар (ключ,значение). Точнее говоря, это не списки, а некоторые объекты, которые можно использовать в for циклах или превратить в списки функцией list. Если хочется написать цикл по упорядоченному списку ключей, то можно использовать sorted(d.keys)).", "d.keys(), d.values(), d.items()\n\nfor x in sorted(d.keys()):\n print(x, ' ', d[x])\n\nfor x, y in d.items():\n print(x, ' ', y)\n\ndel x, y", "Что есть истина? И что есть ложь? Подойдём к этому философскому вопросу экспериментально.", "bool(False), bool(True)\n\nbool(None)\n\nbool(0), bool(123)\n\nbool(''), bool(' ')\n\nbool([]), bool([0])\n\nbool(set()), bool({0})\n\nbool({}), bool({0: 0})", "На выражения, стоящие в булевых позициях (после if, elif и while), неявно напускается функция bool. Некоторые объекты интерпретируются как False: число 0, пустая строка, пустой список, пустое множество, пустой словарь, None и некоторые другие. Все остальные объекты интерпретируются как True. В операторах if или while очень часто используется список, словарь или что-нибудь подобное, что означает делай что-то если этот список (словарь и т.д.) не пуст.\nЗаметим, что число с плавающей точкой 0.0 тоже интерпретируется как False. Это использовать категорически не рекомендуется: вычисления с плавающей точкой всегда приближённые, и неизвестно, получите Вы 0.0 или 1.234E-12.\nЛучше напишите if abs(x)&lt;epsilon:.\nФункции\nЭто простейшая в мире функция. Она не имеет параметров, ничего не делает и ничего не возвращает. Оператор pass означает \"ничего не делай\"; он используется там, где синтаксически необходим оператор, а делать ничено не нужно (после if или elif, после def и т.д.).", "def f():\n pass\n\nf\n\npass\n\ntype(f)\n\nr = f()\nprint(r)", "Эта функция более полезна: она имеет параметр и что-то возвращает.", "def f(x):\n return x + 1\n\nf(1), f(1.0)\n\nf('abc')", "Если у функции много параметров, то возникает желание вызывать её попроще в наиболее часто встречающихся случаях. Для этого в операторе def можно задать значения некоторых параметров по умолчанию (они должны размещаться в конце списка параметров). При вызове необходимо указать все обязательные параметры (у которых нет значений по умолчанию), а необязательные можно и не указывать. Если при вызове указывать параметры в виде имя=значение, то это можно делать в любом порядке. Это гораздо удобнее, чем вспоминать, является данный параметр восьмым или девятым при вызове какой-нибудь сложной функции. Обратите внимание, что в конструкции имя=значение не ставятся пробелы между символом =.", "def f(x, a=0, b='b'):\n print(x, ' ', a, ' ', b)\n\nf(1.0)\n\nf(1.0, 1)\n\nf(1.0, b='a')\n\nf(1.0, b='a', a=2)\n\nf(a=2, x=2.0)", "Переменные, использующиеся в функции, являются локальными. Присваивание им не меняет значений глобальных переменных с такими же именами.", "a = 1\n\ndef f():\n a = 2\n return a\n\nf()\n\na", "Если в функции нужно использовать какие-нибудь глобальные переменные, их нужно описать как global.", "def f():\n global a\n a = 2\n return a\n\nf()\n\na", "Пространство имён устанавливает соответствие между именами переменных и объектами - их значениями. Есть пространство имён локальных переменных функции, пространство имён глобальных переменных программы и пространство имён встроенных функций языка питон. Для реализации пространств имён используются словари.\nЕсли функции передаётся в качестве аргумента какой-нибудь изменяемый объект, и функция его изменяет, то это изменение будет видно снаружи после этого вызова. Мы уже обсуждали эту ситуацию, когда две переменные (в данном случае глобальная переменная и параметр функции) указывают на один и тот же изменяемый объект объект.", "def f(x, l):\n l.append(x)\n return l\n\nl = [1, 2, 3]\nf(0, l)\n\nl", "Если в качестве значения какого-нибудь параметра по умолчанию используется изменяемый объект, то это может приводить к неожиданным последствиям. В данном случае исполнение определения функции приводит к созданию двух объектов: собственно функции и объекта-списка, первоначально пустого, который используется для инициализации параметра функции при вызове. Функция изменяет этот объект. При следующем вызове он опять используется для инициализации параметра, но его значение уже изменилось.", "def f(x, l=[]):\n l.append(x)\n return l\n\nf(0)\n\nf(1)\n\nf(2)", "Чтобы избежать таких сюрпризов, в качестве значений по умолчанию лучше использовать только неизменяемые объекты.", "def f(x, l=None):\n if l is None:\n l = []\n l.append(x)\n return l\n\nf(0)\n\nf(1)\n\nf(2, [0, 1])", "Эта функция имеет один обязательный параметр плюс произвольное число необязательных. При вызове все такие дополнительные аргументы объединяются в кортеж, который функция может использовать по своему усмотрению.", "def f(x, *l):\n print(x, ' ', l)\n\nf(0)\n\nf(0, 1)\n\nf(0, 1, 2)\n\nf(0, 1, 2, 3)", "Звёздочку можно использовать и при вызове функции. Можно заранее построить список (или кортеж) аргументов, а потом вызвать функцию с этими аргументами.", "l=[1, 2]\nc=('a', 'b')\nf(*l, 0, *c)", "Такую распаковку из списков и кортежей можно использовать не только при вызове функции, но и при построении списка или кортежа.", "(*l, 0, *c)\n\n[*l, 0, *c]\n\n[*l, 3]", "Эта функция имеет два обязательных параметра плюс произвольное число необязательных ключевых параметров. При вызове они должны задаваться в виде имя=значение. Они собираются в словарь, который функция может использовать по своему усмотрению.", "def f(x, y, **d):\n print(x, ' ', y, ' ', d)\n\nf(0, 1, foo=2, bar=3)", "Двойную звёздочку можно использовать и при вызове функции. Можно заранее построить словарь аргументов, сопоставляющий значения именам параметров, а потом вызвать функцию с этими ключевыми аргументами.", "d={'foo': 2, 'bar': 3}\nf(0, 1, **d)\n\nd['x'] = 0\nd['y'] = 1\nf(**d)", "Вот любопытный способ построить словарь с ключами-строками.", "def f(**d):\n return d\n\nf(x=0, y=1, z=2)", "Двойную звёздочку можно использовать не только при вызове функции, но и при построении словаря.", "d={0: 'a', 1: 'b'}\n{**d, 2: 'c'}", "Вот простой способ объединить два словаря.", "d1 = {0: 'a', 1: 'b'}\nd2 = {2: 'c', 3: 'd'}\n{**d1, **d2}", "Если один и тот же ключ встречается несколько раз, следующее значение затирает предыдущее.", "d2 = {1: 'B', 2: 'C'}\n{**d1, 3: 'D', **d2, 3: 'd'}", "Это наиболее общий вид списка параметров функции. Сначала идут обязательные параметры (в данном случае два), затем произвольное число необязательных (при вызове они будут объединены в кортеж), а затем произвольное число ключевых параметров (при вызове они будут объединены в словарь).", "def f(x, y, *l, **d):\n print(x, ' ', y, ' ', l, ' ', d)\n\nf(0, 1, 2, 3, foo=4, bar=5)", "В питоне функции являются гражданами первого сорта. Они могут присутствовать везде, где допустимы объекты других типов - среди элементов списков, значений в словарях и т.д.", "def f0(x):\n return x + 2\n\ndef f1(x):\n return 2 * x\n\nl = [f0, f1]\nl\n\nx = 2.0\nn = 1\nl[n](x)", "Если Вы пишете функцию не для того, чтобы один раз её вызвать и навсегда забыть, то нужна документация, объясняющая, что эта функция делает. Для этого сразу после строчки def пишется строка. Она называется док-строкой, и сохраняется при трансляции исходного текста на питоне в байт-код (в отличие от комментариев, которые при этом отбрасываются). Обычно эта строка заключается в тройные кавычки и занимает несколько строчек. Док-строка доступна как атрибут __doc__ функции, и используется функцией help. Вот пример культурно написанной функции, вычисляющей $n$-е число Фибоначчи.\nДля проверки типов аргументов, переданных функции, удобно использовать оператор assert. Если условие в нём истинно, всё в порядке, и он ничего не делает; если же оно ложно, выдаётся сообщение об ошибке.", "def fib(n):\n '''вычисляет n-е число Фибоначчи'''\n \n assert type(n) is int and n>0\n \n if n <= 2:\n return 1\n \n x, y = 1, 1\n for i in range(n - 2):\n x, y = y, x + y\n \n return y\n\nfib.__doc__\n\nhelp(fib)", "В jupyter-ноутбуке к документации можно обращаться более удобным способом", "fib?\n\n[fib(n) for n in range(1, 10)]\n\nfib(-1)\n\nfib(2.0)", "Некоторые полезные функции\nzip скрещивает два массива одной длины", "x = zip(range(5), range(0, 10, 2))\nprint(list(x))", "map применяет функию к каждому элементу массива", "x = map(lambda tmp: tmp ** 2, range(5))\nprint(list(x))", "sorted --- сортировка", "x = list(zip([7, 3, 4, 4, 5, 3, 9], ['a', 'n', 'n', 'a', 'k', 'n', 'a']))\n# сначала сортировка по букве по алфавиту, потом сортировка по убыванию по числу \nx = sorted(x, key=lambda element: (element[1], -element[0]))\nprint(list(x))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
timothydmorton/usrp-sciprog
day2/exercises/Solutions/Numpy_answers.ipynb
mit
[ "Exercise numpy\nThe ultimate goal of this exercise is to compare the position of stars in a patch of sky as measured in two different surveys. The main task at hand is to identify matching positions of stars between the surveys. For this, we will need to compare the positions of all stars in one survey to the position of all stars in the other survey. This task can be extremely time consuming if not implemented properly, we will therefore use this to compare different coding style and their impact on computation time. \nIf time allows, we will move on to represent the results of our analysis in a meaningfull way.", "import numpy as np \nimport matplotlib.pyplot as plt #We might need this\n\n\n#First, let us load the data\n#Catalog from HSC \ncat_hsc = np.loadtxt('../Catalog_HSC.csv')\nx_hsc = cat_hsc[:,0]\ny_hsc = cat_hsc[:,1]\n#Catalog from HST\ncat_hst = np.loadtxt('../Catalog_HST.csv')\nx_hst = cat_hst[:,0]\ny_hst = cat_hst[:,1]", "Check that the loaded data are consistent with what we expect: (ra, dec) coordinates of the same patch of sky", "#First, check the number of stars in each survey:\nns_hst = x_hst.size\nns_hsc = x_hsc.size\n#Print the result\nprint(f'There are {ns_hst} star in the HST catalog')\nprint(f'There are {ns_hsc} star in the HSC catalog')\n\n#This is a graphic representation of our data content:\n%matplotlib qt\nplt.title('star catalogs in COSMOS')\nplt.plot(x_hsc, y_hsc, 'or', label = 'hsc catalog')\nplt.plot(x_hst, y_hst, 'ob', label = 'hst catalog')\nplt.legend()\nplt.xlabel('ra')\nplt.ylabel('dec')\nplt.show()", "To begin with, let's write a function that returns the algebraic distance between two points", "def distance(point1, point2):\n ''' Returns the distance between two points with coordinates (x,y).\n \n Parameters\n ----------\n point1: list\n 2D coordinates of a point \n point2: list\n 2D coordinates of a point \n \n Returns\n -------\n d: float\n the distance between point1 and point2\n '''\n \n return np.sqrt(np.sum((np.array(point1)-np.array(point2))**2))", "Now let's test it by comparing the distance between the first point of each dataset.", "point1 = [x_hst[0], y_hst[0]]\npoint2 = [x_hsc[0], y_hsc[0]]\nprint(distance(point1, point2))", "Let's take it one step further and compare the distance between one point and a set of points", "def point_to_points_distance(point, coordinates):\n ''' Returns the distance between one point and all the points in coordinates.\n \n Parameters\n ----------\n point: list\n 2D coordinates of a point \n coordinates: list\n set of N 2D coordinates stored in a list with shape Nx2\n \n Returns\n -------\n d: list\n the distance between point and each point in coordinates in an array with size N\n '''\n #Declaring an empty list\n d = []\n for c in coordinates:\n # for each point in coordinates, take the distance to point and concatenate it to d \n d.append(distance(point, c))\n #make d a numpy array and return it\n return np.array(d)", "Let's test it on the first 10 points in the HSC catalog and the first point of the HST catalog", "coords = np.concatenate((x_hsc[:10,None], y_hsc[:10,None]), axis = 1)\nprint(point_to_points_distance(point1, coords))", "Now let's get to work. We would like to associate stars in one survey to their conterpart (if it exists) in the other survey. We will start by comparing the positions between each point of one survey to the position of each point in the other survey.\nFirst, write a function that takes two sets of coordinates (hsc and hst) and returns the distance from each point of one survey to each point of the other, such that the output should be an array of size (n_hst x n_hsc) or (n_hsc x n_hst).\nPS: if you have several (different) ideas about how to implement this, feel free to code them!", "def your_function(coord1, coord2):\n ''' Returns the distance between points in two sets of coordinates.\n \n Parameters\n coord1: array\n array of size Nx2 that contains the [Ra, Dec] positions of a catalog \n coord2: array\n array of size Mx2 that contains the [Ra, Dec] positions of a catalog \n \n Returns\n dist: array\n array of size NxM that contains the euclidean distances between points in the two datasets\n '''\n # See functions implemented in distances.py\n pass\n", "Now, let us take a look at the computation times:", "# In order not to spend the whole evening here, let us reduce the dataset size:\n#Select stars in hsc in the frame: 150.0<ra<150.1 and 2.0<dec<2.1\nloc_hsc = (np.abs(y_hsc-2.05)<0.05)*(np.abs(x_hsc-150.05)<0.05)\nx_hsc_exp = x_hsc[loc_hsc]#please fill these\ny_hsc_exp = y_hsc[loc_hsc]#...\n\nloc_hst = (np.abs(y_hst-2.05)<0.05)*(np.abs(y_hst-150.05)<0.05)\nx_hst_exp = x_hst[loc_hst]#please fill these\ny_hst_exp = y_hst[loc_hst]#\n#Once you are done with the exercise, feel free to try with larger selections to see how it impacts computation time\n\nimport distances as dt\n# Insert the names of your functions in the following array:\nmethods = [your_function, dt.naive, dt.with_indices, dt.one_loop, dt.one_loop_reverse, dt.scipy_version, dt.newaxis_magic]\n#An empty variable to store computation time\ntimers = []\nc2 = np.concatenate((x_hst_exp[:,None], y_hst_exp[:,None]), axis = 1)#Please fill these\nc1 = np.concatenate((x_hsc_exp[:,None], y_hsc_exp[:,None]), axis = 1)#\n\n\nfor f in methods:\n print(f.__name__)\n r = %timeit -o f(c1, c2)\n timers.append(r)\n\n#View the results:\nplt.figure(figsize=(10,6))\nplt.bar(np.arange(len(methods)), [r.best*1000 for r in timers], log=False) # Set log to True for logarithmic scale\nplt.xticks(np.arange(len(methods))+0.2, [f.__name__ for f in methods], rotation=30)\nplt.xlabel('Method')\nplt.ylabel('Time (ms)')", "Identifying matching stars\nNow that we know all the distances, let us find the stars in each datasets that correspond to one another.\nThis is done by finding, for each star, the minimum distance recorded between the two datasets.\nOne problem that arises with deriving an array that computes all the distances is that we end up with a very LARGE array, which becomes impractical for fast computations. Instead, we will modify one of the previous functions so that it returns the coordinates of stars that have a match in both datasets along with their distance.\nBecause all stars in a given set do not have a counter part in the other, we will only accept a match if the minimum distance between two points is smaller than 0.17 arcseconds (the size of an HSC pixel).\nIn other words, for each star in one dataset, find the star in the other dataset that is the closest (minimum distance), check wether that star is closer that 0.17 arcseconds, if yes, store its coordinates along with the computed distance. At the end of the function, return arrays with the matching star coordinates and their distance to their match in the other dataset.", "#Let us compute the distances as we did before, but this time, with the whole dataset.\n#Of course, a fast method is to be prefered\n\nc1 = np.concatenate((x_hst[:,None], y_hst[:,None]), axis = 1)\nc2 = np.concatenate((x_hsc[:,None], y_hsc[:,None]), axis = 1)\n\n\ndef get_match(coord_ref, coord2, rad):\n '''\n matches coordinates of stars between two datasets and computes the distance between the position of the stars in the 2 datasets\n\n Parameters\n coord_ref: numpy array (Nx2)\n coordinates (ra, dec) of stars in a FoV from a given dataset\n coord2: numpy array (Mx2)\n coordinates (ra dec) of stars in the same FoV in an other dataset\n rad: float\n radius (deg) around stars in coord_ref where to find a corresponding star in coord2\n \n Returns\n modulus:numpy array (N')\n containing the distance between matching stars\n v_coord: numpy array(N',2)\n coordinates in the coord_ref set of matching stars\n \n\n '''\n #Declare two empty arrays to store the coordinates and distances.\n modulus = []\n v_coord = []\n angle = []\n s = np.size(coord_ref[:,0])\n print('number of points in reference catalog: {0}'.format(s))\n #for each star in coord_ref\n for i,c in enumerate(coord_ref):\n\n if i % 3000 == 0:\n print('point number {0} out of {1}'.format(i, s))\n #compute the distance to all stars in coord2\n r = ((c[0]-coord2[:,0])**2+(c[1]-coord2[:,1])**2)**0.5\n #Find the closest star from coord 2 to c\n loc = np.array(np.where(r == np.min(r))).flatten()\n\n #Make sure that there is only one star matching\n if np.size(loc) > 1:\n loc = loc[0]\n\n #record the distance between matching stars\n rmin = r[loc]\n \n #Check whether the closest distance is smaller than rad\n if rmin <= rad:\n #if yes, save the coordinates and the distance in an array\n v_coord.append(c)\n modulus.append(rmin.item())\n angle.append(np.arctan2(-c[0]+coord2[loc,0], -c[1]+coord2[loc,1]).item())\n\n return np.array(v_coord), np.array(modulus), np.array(angle)\n\ncoord , r, theta = get_match(c1, c2, 0.3/3600.)", "Now I would like to have a representation for the work we have done that informs me about what is in my datasets. Namely, what is the error on star positions between the two datasets? We would like to have a global view of this error but also an impression of the error as a function of the position on the field. For the latter, I suggest you use the 'scatter' function from matplotlib.", "#Spatial distribution of distances\nplt.figure(0)\nplt.title('distribution of distances accross the FoV')\nplt.scatter(coord[:,0],coord[:,1],c = r, cmap = 'gist_stern')\nplt.xlabel('ra')\nplt.ylabel('dec')\nplt.colorbar()\nplt.show()\n\nplt.figure(1)\nplt.title('distribution of angles accross the FoV')\nplt.scatter(coord[:,0],coord[:,1],c = theta, cmap = 'twilight')\nplt.xlabel('ra')\nplt.ylabel('dec')\nplt.colorbar()\nplt.show()\n\n#Array of plots\nplt.subplot(121)\n#In an array of figure of 1 by to, plot number 1 is:\nplt.title('distances')\nplt.hist(r*3600, bins = 30)\nplt.xlabel('r')\nplt.subplot(122)\n#In an array of figure of 1 by to, plot number 2 is:\nplt.title('angles')\nplt.hist(theta, bins = 30)\nplt.xlabel('theta')\nplt.show()", "Extra bonus question: There is a third dataset in this directory. \nIt issued by gaia and contains the most precise astrometric measurements to date\nUse it to compare the two datasets and deduce which one is farthest from the truth." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.20/_downloads/ddc69c7d64f56b270d77306da97dbe20/plot_eeg_mri_coords.ipynb
bsd-3-clause
[ "%matplotlib inline", "EEG source localization given electrode locations on an MRI\nThis tutorial explains how to compute the forward operator from EEG data\nwhen the electrodes are in MRI voxel coordinates.\n :depth: 2", "# Authors: Eric Larson <larson.eric.d@gmail.com>\n#\n# License: BSD Style.\n\nimport os.path as op\n\nimport nibabel\nfrom nilearn.plotting import plot_glass_brain\nimport numpy as np\n\nimport mne\nfrom mne.channels import compute_native_head_t, read_custom_montage\nfrom mne.viz import plot_alignment", "Prerequisites\nFor this we will assume that you have:\n\nraw EEG data\nyour subject's MRI reconstrcted using FreeSurfer\nan appropriate boundary element model (BEM)\nan appropriate source space (src)\nyour EEG electrodes in Freesurfer surface RAS coordinates, stored\n in one of the formats :func:mne.channels.read_custom_montage supports\n\nLet's set the paths to these files for the sample dataset, including\na modified sample MRI showing the electrode locations plus a .elc\nfile corresponding to the points in MRI coords (these were synthesized\n&lt;https://gist.github.com/larsoner/0ac6fad57e31cb2d9caa77350a9ff366&gt;__,\nand thus are stored as part of the misc dataset).", "data_path = mne.datasets.sample.data_path()\nsubjects_dir = op.join(data_path, 'subjects')\nfname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')\nbem_dir = op.join(subjects_dir, 'sample', 'bem')\nfname_bem = op.join(bem_dir, 'sample-5120-5120-5120-bem-sol.fif')\nfname_src = op.join(bem_dir, 'sample-oct-6-src.fif')\n\nmisc_path = mne.datasets.misc.data_path()\nfname_T1_electrodes = op.join(misc_path, 'sample_eeg_mri', 'T1_electrodes.mgz')\nfname_mon = op.join(misc_path, 'sample_eeg_mri', 'sample_mri_montage.elc')", "Visualizing the MRI\nLet's take our MRI-with-eeg-locations and adjust the affine to put the data\nin MNI space, and plot using :func:nilearn.plotting.plot_glass_brain,\nwhich does a maximum intensity projection (easy to see the fake electrodes).\nThis plotting function requires data to be in MNI space.\nBecause img.affine gives the voxel-to-world (RAS) mapping, if we apply a\nRAS-to-MRI transform to it, it becomes the voxel-to-MNI transformation we\nneed. Thus we create a \"new\" MRI image in MNI coordinates and plot it as:", "img = nibabel.load(fname_T1_electrodes) # original subject MRI w/EEG\nras_mni_t = mne.transforms.read_ras_mni_t('sample', subjects_dir) # from FS\nmni_affine = np.dot(ras_mni_t['trans'], img.affine) # vox->ras->MNI\nimg_mni = nibabel.Nifti1Image(img.dataobj, mni_affine) # now in MNI coords!\nplot_glass_brain(img_mni, cmap='hot_black_bone', threshold=0., black_bg=True,\n resampling_interpolation='nearest', colorbar=True)", "Getting our MRI voxel EEG locations to head (and MRI surface RAS) coords\nLet's load our :class:~mne.channels.DigMontage using\n:func:mne.channels.read_custom_montage, making note of the fact that\nwe stored our locations in Freesurfer surface RAS (MRI) coordinates.\n.. collapse:: |question| What if my electrodes are in MRI voxels?\n :class: info\nIf you have voxel coordinates in MRI voxels, you can transform these to\nFreeSurfer surface RAS (called \"mri\" in MNE) coordinates using the\ntransformations that FreeSurfer computes during reconstruction.\n``nibabel`` calls this transformation the ``vox2ras_tkr`` transform\nand operates in millimeters, so we can load it, convert it to meters,\nand then apply it::\n\n &gt;&gt;&gt; pos_vox = ... # loaded from a file somehow\n &gt;&gt;&gt; img = nibabel.load(fname_T1)\n &gt;&gt;&gt; vox2mri = img.header.get_vox2ras_tkr() # voxel -&gt; mri\n &gt;&gt;&gt; vox2mri[:3] /= 1000. # mm -&gt; m\n &gt;&gt;&gt; pos_mri = mne.transforms.apply_trans(vox2mri, pos_vox)\n\nYou can also verify that these are correct (or manually convert voxels\nto MRI coords) by looking at the points in Freeview or tkmedit.", "dig_montage = read_custom_montage(fname_mon, head_size=None, coord_frame='mri')\ndig_montage.plot()", "We can then get our transformation from the MRI coordinate frame (where our\npoints are defined) to the head coordinate frame from the object.", "trans = compute_native_head_t(dig_montage)\nprint(trans) # should be mri->head, as the \"native\" space here is MRI", "Let's apply this digitization to our dataset, and in the process\nautomatically convert our locations to the head coordinate frame, as\nshown by :meth:~mne.io.Raw.plot_sensors.", "raw = mne.io.read_raw_fif(fname_raw)\nraw.load_data().pick_types(meg=False, eeg=True, stim=True, exclude=())\nraw.set_montage(dig_montage)\nraw.plot_sensors(show_names=True)", "Now we can do standard sensor-space operations like make joint plots of\nevoked data.", "raw.set_eeg_reference(projection=True)\nevents = mne.find_events(raw)\nepochs = mne.Epochs(raw, events)\ncov = mne.compute_covariance(epochs, tmax=0.)\nevoked = epochs['1'].average() # trigger 1 in auditory/left\nevoked.plot_joint()", "Getting a source estimate\nNew we have all of the components we need to compute a forward solution,\nbut first we should sanity check that everything is well aligned:", "plot_alignment(evoked.info, trans=trans, show_axes=True, surfaces='head-dense',\n subject='sample', subjects_dir=subjects_dir)", "Now we can actually compute the forward:", "fwd = mne.make_forward_solution(\n evoked.info, trans=trans, src=fname_src, bem=fname_bem, verbose=True)", "Finally let's compute the inverse and apply it:", "inv = mne.minimum_norm.make_inverse_operator(\n evoked.info, fwd, cov, verbose=True)\nstc = mne.minimum_norm.apply_inverse(evoked, inv)\nstc.plot(subjects_dir=subjects_dir, initial_time=0.1)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
smorton2/think-stats
code/chap08exmine.ipynb
gpl-3.0
[ "Examples and Exercises from Think Stats, 2nd Edition\nhttp://thinkstats2.com\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT", "from __future__ import print_function, division\n\n%matplotlib inline\n\nimport numpy as np\n\nimport brfss\n\nimport thinkstats2\nimport thinkplot", "The estimation game\nRoot mean squared error is one of several ways to summarize the average error of an estimation process.", "def RMSE(estimates, actual):\n \"\"\"Computes the root mean squared error of a sequence of estimates.\n\n estimate: sequence of numbers\n actual: actual value\n\n returns: float RMSE\n \"\"\"\n e2 = [(estimate-actual)**2 for estimate in estimates]\n mse = np.mean(e2)\n return np.sqrt(mse)", "The following function simulates experiments where we try to estimate the mean of a population based on a sample with size n=7. We run iters=1000 experiments and collect the mean and median of each sample.", "import random\n\ndef Estimate1(n=7, iters=1000):\n \"\"\"Evaluates RMSE of sample mean and median as estimators.\n\n n: sample size\n iters: number of iterations\n \"\"\"\n mu = 0\n sigma = 1\n\n means = []\n medians = []\n for _ in range(iters):\n xs = [random.gauss(mu, sigma) for _ in range(n)]\n xbar = np.mean(xs)\n median = np.median(xs)\n means.append(xbar)\n medians.append(median)\n\n print('Experiment 1')\n print('rmse xbar', RMSE(means, mu))\n print('rmse median', RMSE(medians, mu))\n \nEstimate1()", "Using $\\bar{x}$ to estimate the mean works a little better than using the median; in the long run, it minimizes RMSE. But using the median is more robust in the presence of outliers or large errors.\nEstimating variance\nThe obvious way to estimate the variance of a population is to compute the variance of the sample, $S^2$, but that turns out to be a biased estimator; that is, in the long run, the average error doesn't converge to 0.\nThe following function computes the mean error for a collection of estimates.", "def MeanError(estimates, actual):\n \"\"\"Computes the mean error of a sequence of estimates.\n\n estimate: sequence of numbers\n actual: actual value\n\n returns: float mean error\n \"\"\"\n errors = [estimate-actual for estimate in estimates]\n return np.mean(errors)", "The following function simulates experiments where we try to estimate the variance of a population based on a sample with size n=7. We run iters=1000 experiments and two estimates for each sample, $S^2$ and $S_{n-1}^2$.", "def Estimate2(n=7, iters=1000):\n mu = 0\n sigma = 1\n\n estimates1 = []\n estimates2 = []\n for _ in range(iters):\n xs = [random.gauss(mu, sigma) for i in range(n)]\n biased = np.var(xs)\n unbiased = np.var(xs, ddof=1)\n estimates1.append(biased)\n estimates2.append(unbiased)\n\n print('mean error biased', MeanError(estimates1, sigma**2))\n print('mean error unbiased', MeanError(estimates2, sigma**2))\n \nEstimate2()", "The mean error for $S^2$ is non-zero, which suggests that it is biased. The mean error for $S_{n-1}^2$ is close to zero, and gets even smaller if we increase iters.\nThe sampling distribution\nThe following function simulates experiments where we estimate the mean of a population using $\\bar{x}$, and returns a list of estimates, one from each experiment.", "def SimulateSample(mu=90, sigma=7.5, n=9, iters=1000):\n xbars = []\n for j in range(iters):\n xs = np.random.normal(mu, sigma, n)\n xbar = np.mean(xs)\n xbars.append(xbar)\n return xbars\n\nxbars = SimulateSample()", "Here's the \"sampling distribution of the mean\" which shows how much we should expect $\\bar{x}$ to vary from one experiment to the next.", "cdf = thinkstats2.Cdf(xbars)\nthinkplot.Cdf(cdf)\nthinkplot.Config(xlabel='Sample mean',\n ylabel='CDF')", "The mean of the sample means is close to the actual value of $\\mu$.", "np.mean(xbars)", "An interval that contains 90% of the values in the sampling disrtribution is called a 90% confidence interval.", "ci = cdf.Percentile(5), cdf.Percentile(95)\nci", "And the RMSE of the sample means is called the standard error.", "stderr = RMSE(xbars, 90)\nstderr", "Confidence intervals and standard errors quantify the variability in the estimate due to random sampling.\nEstimating rates\nThe following function simulates experiments where we try to estimate the mean of an exponential distribution using the mean and median of a sample.", "def Estimate3(n=7, iters=1000):\n lam = 2\n\n means = []\n medians = []\n for _ in range(iters):\n xs = np.random.exponential(1.0/lam, n)\n L = 1 / np.mean(xs)\n Lm = np.log(2) / thinkstats2.Median(xs)\n means.append(L)\n medians.append(Lm)\n\n print('rmse L', RMSE(means, lam))\n print('rmse Lm', RMSE(medians, lam))\n print('mean error L', MeanError(means, lam))\n print('mean error Lm', MeanError(medians, lam))\n \nEstimate3()", "The RMSE is smaller for the sample mean than for the sample median.\nBut neither estimator is unbiased.\nExercises\nExercise: In this chapter we used $\\bar{x}$ and median to estimate µ, and found that $\\bar{x}$ yields lower MSE. Also, we used $S^2$ and $S_{n-1}^2$ to estimate σ, and found that $S^2$ is biased and $S_{n-1}^2$ unbiased.\nRun similar experiments to see if $\\bar{x}$ and median are biased estimates of µ. Also check whether $S^2$ or $S_{n-1}^2$ yields a lower MSE.", "# Solution goes here\niterations = 500\nsample_size = 8\nmu = 0\nsigma = 1\nmeans = []\nmedians = []\n\nfor i in range(iterations):\n xs = [random.gauss(mu, sigma) for i in range(sample_size)]\n xbar = np.mean(xs)\n median = np.median(xs)\n means.append(xbar)\n medians.append(median)\n \nprint('mean error', MeanError(means, mu))\nprint('mean error median', MeanError(medians, mu))\nprint('finished')\n\n# Solution goes here\nprint('second experiment')\niterations = 500\nsample_size = 8\nmu = 0\nsigma = 1\n\nbiased_est = []\nunbiased_est = []\n\nfor i in range(iterations):\n xs = [random.gauss(mu, sigma) for i in range(sample_size)]\n bias = np.var(xs)\n biased_est.append(bias)\n unbias = np.var(xs, ddof=1)\n unbiased_est.append(unbias)\n \n \n \n \nprint('bias', MeanError(biased_est, mu))\nprint('unbias', MeanError(unbiased_est, mu))\nprint('finished')\n\n# Solution goes here\n", "Exercise: Suppose you draw a sample with size n=10 from an exponential distribution with λ=2. Simulate this experiment 1000 times and plot the sampling distribution of the estimate L. Compute the standard error of the estimate and the 90% confidence interval.\nRepeat the experiment with a few different values of n and make a plot of standard error versus n.", "# Solution goes here\ndef experiment(samples=10, lambdaa = 2, iterations=1000):\n L = []\n \n print('experiment starting')\n for i in range(iterations):\n xs = np.random.exponential(1.0/lambdaa, samples)\n lambdaa_hat = 1.0 / np.mean(xs)\n L.append(lambdaa_hat)\n \n std_error = RMSE(L, lambdaa)\n print('std error', stderr)\n \n cdf = thinkstats2.Cdf(L, label='estimates')\n print('90% confidence int =', cdf.Percentile(90))\n \n thinkplot.Cdf(cdf)\n thinkplot.Config(xlabel='estimates')\n \n print('experiment ending')\nexperiment()\n\n# Solution goes here", "Exercise: In games like hockey and soccer, the time between goals is roughly exponential. So you could estimate a team’s goal-scoring rate by observing the number of goals they score in a game. This estimation process is a little different from sampling the time between goals, so let’s see how it works.\nWrite a function that takes a goal-scoring rate, lam, in goals per game, and simulates a game by generating the time between goals until the total time exceeds 1 game, then returns the number of goals scored.\nWrite another function that simulates many games, stores the estimates of lam, then computes their mean error and RMSE.\nIs this way of making an estimate biased?", "def SimulateGame(lam):\n \"\"\"Simulates a game and returns the estimated goal-scoring rate.\n\n lam: actual goal scoring rate in goals per game\n \"\"\"\n goals = 0\n t = 0\n while True:\n time_between_goals = random.expovariate(lam)\n t += time_between_goals\n if t > 1:\n break\n goals += 1\n\n # estimated goal-scoring rate is the actual number of goals scored\n L = goals\n return L\n\ndef soccer_sim(lam=3, games=50000):\n \"\"\"games is how many games we're simulating\"\"\"\n \n estimates = []\n for i in range(games):\n goals = SimulateGame(lam)\n estimates.append(goals)\n \n print('RMSE of goals', RMSE(estimates, lam))\n print('mean error', MeanError(estimates, lam))\n \n pmf = thinkstats2.Pmf(estimates)\n thinkplot.Hist(pmf)\n thinkplot.Show()\n \nsoccer_sim()\n# Solution goes here\n\n# Solution goes here" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tyler-abbot/PyShop
session2/PyShop_session2_examples.ipynb
agpl-3.0
[ "PyShop\nSession 2: Examples\nCreating NumPy Arrays", "import numpy as np\n\nx = np.array([1, 2, 3])\ny = np.zeros((2, 2))\nz = np.ones((2, 2))\na = np.eye((2))\n\nprint(x)\nprint(y)\nprint(z)\nprint(a)\n\n# Be aware of data type. Numpy only allows one type.\nnp.array([1.0, 1])\n\nimport sys\n# How big are these things?\nn = 10\nprint(sys.getsizeof(int(1)), sys.getsizeof(np.array(1)), sys.getsizeof([1]))\nprint(sys.getsizeof(np.ones((n))),\n sys.getsizeof([1. for i in range(0, n)]))\nprint(sys.getsizeof(np.ones((n), dtype = np.float32)),\n sys.getsizeof([1. for i in range(0, n)]))\nprint(sys.getsizeof(np.ones((n), dtype = np.float32)/2),\n sys.getsizeof([1./2 for i in range(0, n)]))\n\nprint(sys.getsizeof(np.ones((n), dtype = np.float32)/2),\n sys.getsizeof([1./2 for i in range(0, n)]))", "Looks like Python 3 solved the problem I had of variable size list objects!! Hooray!", "# Dimensions are important! Be careful how big the arrays get\ndims = 5\nD = 10.\nprint(sys.getsizeof(np.ones(([D for i in range(0, dims)]))))", "Indexing NumPy Arrays", "a = np.eye(2)\nprint(a)\n\n# Referencing a single entry\na[0, 0]\n\n# Assignment\na[0, 0] = 2\nprint(a)\n\n# Also supports slicing\nprint(a[0, :])\nprint(a[1, :])", "Pointers and Copies", "# Equality creates a view\na = np.eye(2)\nb = a\nprint(a)\nprint(b)\n\n# Assigning then changes original. View references this\na[0, 0] = 0\nprint(a)\nprint(b)\n\n# If you need a copy, be explicit\na = np.eye(2)\nb[:] = a\nc = a.copy()\nprint(a)\nprint(b)\nprint(c)\n\na[0, 0] = 0\nprint(a)\nprint(b)\nprint(c)", "Broadcasting and Array Operations", "a = np.eye(2)\nb = np.eye(2)\nc = 2\n\n#Multiplicaiton is done elementwise\nprint(a * b)\n\n#Scalar multiplcation is broadcast\nprint(a * c)\n\n# Scalar addition is broadcast in the same way\nprint(a + c)\n\n# Some arrays to broadcast\nd = np.array([1, 2])\ne = np.vstack((a, np.zeros(2)))\nf = np.array([[1, 2], [3, 4]])\n\nprint(d,e,f)\n\n# Required that all of the dimensions either match or equal one\nprint(e.shape)\nprint(e)\nprint(d.shape)\nprint(d)\nprint(d + e)\nprint(e + a)\n\n# Notice the duplication along the smallest axis\nprint(d)\nprint(e)\nprint(d + e)\n\n# A one d array is neither column nor row\n# This means the broadcasting rule seems ambiguous\nprint(d.shape)\nprint(d == d.T)\n\nd = np.array([1., 1.])\nprint(a + d)\n\n# Broadcasting rules move along the first matching axis FROM THE RIGHT\nprint(f.shape)\nprint(f)\nprint(d.shape)\nprint(d)\nprint(f + d)\nprint(f + d.T)\n\n# You can change this by adding a new axis\n# This helps to be specific about shape\nprint(d[:, np.newaxis].shape)\nprint(f + d[:, np.newaxis])", "Why do I care?", "import time\n\nlength = 10\n\na = [i for i in range(0, length)]\nb = [i for i in range(0, length)]\nc = []\n\nt0 = time.time()\n\nfor i in range(len(a)):\n c.append(a[i]*b[i])\n\nt1 = time.time()\nprint(\"Process executed in : %s : seconds.\" %(t1 - t0))\n\na\n\na = np.arange(0, length).reshape(10,1)\nb = np.arange(0, length)\n\nt0 = time.time()\n\nC = a * b\n\nt1 = time.time()\nprint(\"Process executed in : %s : seconds.\" %(t1 - t0))\n\na\n\na[:, np.newaxis].shape", "Integration\nExample:\n$$\n\\int_{0}^{\\infty} ke^{-kx}dx\n$$", "import scipy.integrate\n\ndef integrand(x, k):\n return k*np.exp(-k*x)\n\nk = 1.0\nscipy.integrate.quad(integrand, 0, np.inf, args = (k))\n\n# Note: it is unclear what quadrature rule is used here\nscipy.integrate.quad(lambda x: k*np.exp(-k*x), 0, np.inf)", "Unconstrained Optimization\nMethods:\n\n\n\"Downhill simplex method\". Generates a simplex of dimension n+1 and then uses a simple algorithm (similar to a bisection algorithm) to find local optima.\n\n\n\"Broyden-Fletcher-Goldfarb-Shanno Algorithm\". Considered a \"quasi-newton\" method. A newton step would calculate the hessian directly, where quasi-newton methods approximate it in some way. \n\n\n\"Powell's Conjugate Direction Method\". A sort of combination of steps in the taxi-cab method. Instead of searching only along a single vecor, take a linear combination of the gradients.\n\n\n\"Conjugate Gradient Method\". Most useful for sparse, linear systems. You'll notice here it is unsuccessful.", "import scipy.optimize\nimport time\n\ndef rosenbrock(x, a, b):\n return (a - x[0])**2 + b*(x[1] - x[0]**2)**2\n\na = 1.\nb = 100.\n\nx0 = np.array([2., 3.])\n\nt0 = time.time()\nres = scipy.optimize.minimize(rosenbrock, x0, args=(a, b), method='Nelder-Mead')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds.\\n\" %(t1 - t0))\nprint(res)\n\nt0 = time.time()\nres = scipy.optimize.minimize(rosenbrock, x0, args=(a, b), method='BFGS')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds.\\n\" %(t1 - t0))\nprint(res)\n\nt0 = time.time()\nres = scipy.optimize.minimize(rosenbrock, x0, args=(a, b), method='Powell')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds. \\n\" %(t1 - t0))\nprint(res)\n\nt0 = time.time()\nres = scipy.optimize.minimize(rosenbrock, x0, args=(a, b), method='CG')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds. \\n\" %(t1 - t0))\nprint(res)", "Root Finding\nMethods:\n\n\n\"Hybrid\". From MINPACK, essentially a modified Powell method.\n\n\n\"Broyden's Method\". A quasi-newton method for multidimensional root finding. Calculate the jacobian only once, then do an update each iteration.\n\n\n\"Anderson Mixing\". A quasi-newton method. Approximate the jacobian by the \"best\" solution in the space spanned by the last M vectors... whatever that means!\n\n\n\"Linear Mixing\". Similar to Anderson method.\n\n\n\"Krylov Methods\". Approximate the jacobian by a spanning basis of the krylov space. Very neat.", "def f(x, a, b):\n return np.array([a*(1 - x[0]), b*(x[1] - x[0]**2)**2])\n\na = 1.\nb = 100.\nx0 = np.array([10., 2.])\n\nt0 = time.time()\nsol = scipy.optimize.root(f, x0, args=(a, b), method='hybr')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds. \\n\" %(t1 - t0))\nprint(sol)\n\nt0 = time.time()\nsol = scipy.optimize.root(f, x0, args=(a, b), method='broyden1')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds. \\n\" %(t1 - t0))\nprint(sol)\n\nt0 = time.time()\nsol = scipy.optimize.root(f, x0, args=(a, b), method='anderson')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds. \\n\" %(t1 - t0))\nprint(sol)\n\nt0 = time.time()\nsol = scipy.optimize.root(f, x0, args=(a, b), method='linearmixing')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds. \\n\" %(t1 - t0))\nprint(sol)\n\nt0 = time.time()\nsol = scipy.optimize.root(f, x0, args=(a, b), method='krylov')\nt1 = time.time()\nprint(\"\\nProcess executed in : %s : seconds. \\n\" %(t1 - t0))\nprint(sol)", "Simple Plotting", "import matplotlib.pyplot as plt\n%matplotlib inline\n\nx = np.arange(0, np.pi, 0.01)\ny = np.cos(x)\nplt.plot(x, y)\nplt.show()", "Modifying Plot Attributes", "x = np.arange(0, np.pi, 0.01)\ny = np.cos(x)\nplt.plot(x, y)\n\n#Add axis labels\nplt.xlabel('Lunch Seminars Per Week')\nplt.ylabel('Marginal Productivity of Doctoral Students')\n\n#Add title\nplt.title(\"Marginal Productivity of Doctoral\\n\"\n + \"Students as a Function\\n\"\n + \" of Lunch Seminars Per Week\")\n\n#Add emphasis to important points\npoints = np.array([1.0, 2.5])\nplt.plot(points, np.cos(points), 'ro')\n\n#Add a label and legend to the points\nplt.plot(points, np.cos(points), 'o', label='Nap Thresholds')\n#plt.legend()\n\n#But the legend is poorly placed, so move it to a better spot\nplt.legend(loc=2)\n\nplt.show()", "Subplot Objects", "x = np.arange(0, 10, 0.1)\nf = lambda x: np.cos(x)\ng = lambda x: np.sin(x)\nh = lambda x: x**2\ni = lambda x: np.log(x + 0.00001)\n\n#Create the figure and axes objects. sharex and sharey allow them to share axes\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n\n#Plot on the first axes object. Notice, you can plot several times on the same object\nax1.plot(x, f(x))\nax1.plot(x, g(x))\n\n#Plot on the second axes object\nax2.plot(x, f(x)*g(x))\n\n#Plot on the second axes object\nax3.plot(x, h(x))\n\n#Plot on the second axes object\nax4.plot(x, i(x))\n\nplt.show()", "3D Plotting", "%matplotlib qt\n\n%matplotlib inline\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef U(c1, c2, beta, gamma):\n return c1**(1 - gamma)/(1 - gamma) + beta*c2**(1 - gamma)/(1 - gamma)\n\nbeta = 0.98\ngamma = 2.0\n\nfig = plt.figure()\nax = fig.gca(projection=\"3d\")\n\nlow = 1.0\nhigh = 10.0\n\nc1, c2 = np.arange(low, high, 0.1), np.arange(low, high, 0.1)\n#c2 = np.arange(low, high, 0.1)\n\nC1, C2 = np.meshgrid(c1, c2)\n\nutils = U(C1, C2, beta, gamma)\n\nax.plot_surface(C1, C2, utils, alpha=0.3)\ncset = ax.contour(C1, C2, utils, zdir='z', offset=-2.0)\n#cset = ax.contour(C1, C2, utils, zdir='y', offset=10.0)\n#cset = ax.contour(C1, C2, utils, zdir='x', offset=1.0)\n\nplt.show()", "DataFrames", "import pandas as pd\n\nDF = pd.read_csv('http://people.stern.nyu.edu/wgreene/Econometrics/gasoline.csv')\n\nprint(DF.head())\nDF.head()\n\nDF.shape\n\nDF.describe()\n\nDF.columns\n\nDF.Y.plot()\n\nDF['Y'].plot()\n\nDF.columns = map(str.lower, DF.columns)\nprint(DF.columns)\n\nDF2 = DF.set_index('year')\nDF2.head()\n\nprint(DF2.loc[1953])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/en-snapshot/model_optimization/guide/pruning/pruning_with_keras.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Pruning in Keras example\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/model_optimization/guide/pruning/pruning_with_keras\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_keras.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_keras.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/model-optimization/tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_keras.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nOverview\nWelcome to an end-to-end example for magnitude-based weight pruning.\nOther pages\nFor an introduction to what pruning is and to determine if you should use it (including what's supported), see the overview page.\nTo quickly find the APIs you need for your use case (beyond fully pruning a model with 80% sparsity), see the\ncomprehensive guide.\nSummary\nIn this tutorial, you will:\n\nTrain a tf.keras model for MNIST from scratch.\nFine tune the model by applying the pruning API and see the accuracy.\nCreate 3x smaller TF and TFLite models from pruning.\nCreate a 10x smaller TFLite model from combining pruning and post-training quantization.\nSee the persistence of accuracy from TF to TFLite.\n\nSetup", "! pip install -q tensorflow-model-optimization\n\nimport tempfile\nimport os\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow import keras\n\n%load_ext tensorboard", "Train a model for MNIST without pruning", "# Load MNIST dataset\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 and 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# Define the model architecture.\nmodel = keras.Sequential([\n keras.layers.InputLayer(input_shape=(28, 28)),\n keras.layers.Reshape(target_shape=(28, 28, 1)),\n keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Flatten(),\n keras.layers.Dense(10)\n])\n\n# Train the digit classification model\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(\n train_images,\n train_labels,\n epochs=4,\n validation_split=0.1,\n)", "Evaluate baseline test accuracy and save the model for later usage.", "_, baseline_model_accuracy = model.evaluate(\n test_images, test_labels, verbose=0)\n\nprint('Baseline test accuracy:', baseline_model_accuracy)\n\n_, keras_file = tempfile.mkstemp('.h5')\ntf.keras.models.save_model(model, keras_file, include_optimizer=False)\nprint('Saved baseline model to:', keras_file)", "Fine-tune pre-trained model with pruning\nDefine the model\nYou will apply pruning to the whole model and see this in the model summary.\nIn this example, you start the model with 50% sparsity (50% zeros in weights)\nand end with 80% sparsity.\nIn the comprehensive guide, you can see how to prune some layers for model accuracy improvements.", "import tensorflow_model_optimization as tfmot\n\nprune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude\n\n# Compute end step to finish pruning after 2 epochs.\nbatch_size = 128\nepochs = 2\nvalidation_split = 0.1 # 10% of training set will be used for validation set. \n\nnum_images = train_images.shape[0] * (1 - validation_split)\nend_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs\n\n# Define model for pruning.\npruning_params = {\n 'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,\n final_sparsity=0.80,\n begin_step=0,\n end_step=end_step)\n}\n\nmodel_for_pruning = prune_low_magnitude(model, **pruning_params)\n\n# `prune_low_magnitude` requires a recompile.\nmodel_for_pruning.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel_for_pruning.summary()", "Train and evaluate the model against baseline\nFine tune with pruning for two epochs.\ntfmot.sparsity.keras.UpdatePruningStep is required during training, and tfmot.sparsity.keras.PruningSummaries provides logs for tracking progress and debugging.", "logdir = tempfile.mkdtemp()\n\ncallbacks = [\n tfmot.sparsity.keras.UpdatePruningStep(),\n tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),\n]\n \nmodel_for_pruning.fit(train_images, train_labels,\n batch_size=batch_size, epochs=epochs, validation_split=validation_split,\n callbacks=callbacks)", "For this example, there is minimal loss in test accuracy after pruning, compared to the baseline.", "_, model_for_pruning_accuracy = model_for_pruning.evaluate(\n test_images, test_labels, verbose=0)\n\nprint('Baseline test accuracy:', baseline_model_accuracy) \nprint('Pruned test accuracy:', model_for_pruning_accuracy)", "The logs show the progression of sparsity on a per-layer basis.", "#docs_infra: no_execute\n%tensorboard --logdir={logdir}", "For non-Colab users, you can see the results of a previous run of this code block on TensorBoard.dev.\nCreate 3x smaller models from pruning\nBoth tfmot.sparsity.keras.strip_pruning and applying a standard compression algorithm (e.g. via gzip) are necessary to see the compression\nbenefits of pruning.\n\nstrip_pruning is necessary since it removes every tf.Variable that pruning only needs during training, which would otherwise add to model size during inference\nApplying a standard compression algorithm is necessary since the serialized weight matrices are the same size as they were before pruning. However, pruning makes most of the weights zeros, which is\nadded redundancy that algorithms can utilize to further compress the model.\n\nFirst, create a compressible model for TensorFlow.", "model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)\n\n_, pruned_keras_file = tempfile.mkstemp('.h5')\ntf.keras.models.save_model(model_for_export, pruned_keras_file, include_optimizer=False)\nprint('Saved pruned Keras model to:', pruned_keras_file)", "Then, create a compressible model for TFLite.", "converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)\npruned_tflite_model = converter.convert()\n\n_, pruned_tflite_file = tempfile.mkstemp('.tflite')\n\nwith open(pruned_tflite_file, 'wb') as f:\n f.write(pruned_tflite_model)\n\nprint('Saved pruned TFLite model to:', pruned_tflite_file)", "Define a helper function to actually compress the models via gzip and measure the zipped size.", "def get_gzipped_model_size(file):\n # Returns size of gzipped model, in bytes.\n import os\n import zipfile\n\n _, zipped_file = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f:\n f.write(file)\n\n return os.path.getsize(zipped_file)", "Compare and see that the models are 3x smaller from pruning.", "print(\"Size of gzipped baseline Keras model: %.2f bytes\" % (get_gzipped_model_size(keras_file)))\nprint(\"Size of gzipped pruned Keras model: %.2f bytes\" % (get_gzipped_model_size(pruned_keras_file)))\nprint(\"Size of gzipped pruned TFlite model: %.2f bytes\" % (get_gzipped_model_size(pruned_tflite_file)))", "Create a 10x smaller model from combining pruning and quantization\nYou can apply post-training quantization to the pruned model for additional benefits.", "converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nquantized_and_pruned_tflite_model = converter.convert()\n\n_, quantized_and_pruned_tflite_file = tempfile.mkstemp('.tflite')\n\nwith open(quantized_and_pruned_tflite_file, 'wb') as f:\n f.write(quantized_and_pruned_tflite_model)\n\nprint('Saved quantized and pruned TFLite model to:', quantized_and_pruned_tflite_file)\n\nprint(\"Size of gzipped baseline Keras model: %.2f bytes\" % (get_gzipped_model_size(keras_file)))\nprint(\"Size of gzipped pruned and quantized TFlite model: %.2f bytes\" % (get_gzipped_model_size(quantized_and_pruned_tflite_file)))", "See persistence of accuracy from TF to TFLite\nDefine a helper function to evaluate the TF Lite model on the test dataset.", "import numpy as np\n\ndef evaluate_model(interpreter):\n input_index = interpreter.get_input_details()[0][\"index\"]\n output_index = interpreter.get_output_details()[0][\"index\"]\n\n # Run predictions on ever y image in the \"test\" dataset.\n prediction_digits = []\n for i, test_image in enumerate(test_images):\n if i % 1000 == 0:\n print('Evaluated on {n} results so far.'.format(n=i))\n # Pre-processing: add batch dimension and convert to float32 to match with\n # the model's input data format.\n test_image = np.expand_dims(test_image, axis=0).astype(np.float32)\n interpreter.set_tensor(input_index, test_image)\n\n # Run inference.\n interpreter.invoke()\n\n # Post-processing: remove batch dimension and find the digit with highest\n # probability.\n output = interpreter.tensor(output_index)\n digit = np.argmax(output()[0])\n prediction_digits.append(digit)\n\n print('\\n')\n # Compare prediction results with ground truth labels to calculate accuracy.\n prediction_digits = np.array(prediction_digits)\n accuracy = (prediction_digits == test_labels).mean()\n return accuracy", "You evaluate the pruned and quantized model and see that the accuracy from TensorFlow persists to the TFLite backend.", "interpreter = tf.lite.Interpreter(model_content=quantized_and_pruned_tflite_model)\ninterpreter.allocate_tensors()\n\ntest_accuracy = evaluate_model(interpreter)\n\nprint('Pruned and quantized TFLite test_accuracy:', test_accuracy)\nprint('Pruned TF test accuracy:', model_for_pruning_accuracy)", "Conclusion\nIn this tutorial, you saw how to create sparse models with the TensorFlow Model Optimization Toolkit API for both TensorFlow and TFLite. You \nthen combined pruning with post-training quantization for additional benefits.\nYou created a 10x smaller model for MNIST, with minimal accuracy difference.\nWe encourage you to try this new capability, which can be particularly important for deployment in resource-constrained environments." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs-l10n
site/en-snapshot/tensorboard/get_started.ipynb
apache-2.0
[ "Copyright 2019 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Get started with TensorBoard\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tensorboard/get_started\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorboard/blob/master/docs/get_started.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorboard/blob/master/docs/get_started.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/tensorboard/docs/get_started.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nIn machine learning, to improve something you often need to be able to measure it. TensorBoard is a tool for providing the measurements and visualizations needed during the machine learning workflow. It enables tracking experiment metrics like loss and accuracy, visualizing the model graph, projecting embeddings to a lower dimensional space, and much more.\nThis quickstart will show how to quickly get started with TensorBoard. The remaining guides in this website provide more details on specific capabilities, many of which are not included here.", "# Load the TensorBoard notebook extension\n%load_ext tensorboard\n\nimport tensorflow as tf\nimport datetime\n\n# Clear any logs from previous runs\n!rm -rf ./logs/ ", "Using the MNIST dataset as the example, normalize the data and write a function that creates a simple Keras model for classifying the images into 10 classes.", "mnist = tf.keras.datasets.mnist\n\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\ndef create_model():\n return tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation='softmax')\n ])", "Using TensorBoard with Keras Model.fit()\nWhen training with Keras's Model.fit(), adding the tf.keras.callbacks.TensorBoard callback ensures that logs are created and stored. Additionally, enable histogram computation every epoch with histogram_freq=1 (this is off by default)\nPlace the logs in a timestamped subdirectory to allow easy selection of different training runs.", "model = create_model()\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\nmodel.fit(x=x_train, \n y=y_train, \n epochs=5, \n validation_data=(x_test, y_test), \n callbacks=[tensorboard_callback])", "Start TensorBoard through the command line or within a notebook experience. The two interfaces are generally the same. In notebooks, use the %tensorboard line magic. On the command line, run the same command without \"%\".", "%tensorboard --logdir logs/fit", "<!-- <img class=\"tfo-display-only-on-site\" src=\"https://github.com/tensorflow/tensorboard/blob/master/docs/images/quickstart_model_fit.png?raw=1\"/> -->\n\nA brief overview of the dashboards shown (tabs in top navigation bar):\n\nThe Scalars dashboard shows how the loss and metrics change with every epoch. You can use it to also track training speed, learning rate, and other scalar values.\nThe Graphs dashboard helps you visualize your model. In this case, the Keras graph of layers is shown which can help you ensure it is built correctly. \nThe Distributions and Histograms dashboards show the distribution of a Tensor over time. This can be useful to visualize weights and biases and verify that they are changing in an expected way.\n\nAdditional TensorBoard plugins are automatically enabled when you log other types of data. For example, the Keras TensorBoard callback lets you log images and embeddings as well. You can see what other plugins are available in TensorBoard by clicking on the \"inactive\" dropdown towards the top right.\nUsing TensorBoard with other methods\nWhen training with methods such as tf.GradientTape(), use tf.summary to log the required information.\nUse the same dataset as above, but convert it to tf.data.Dataset to take advantage of batching capabilities:", "train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\ntest_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n\ntrain_dataset = train_dataset.shuffle(60000).batch(64)\ntest_dataset = test_dataset.batch(64)", "The training code follows the advanced quickstart tutorial, but shows how to log metrics to TensorBoard. Choose loss and optimizer:", "loss_object = tf.keras.losses.SparseCategoricalCrossentropy()\noptimizer = tf.keras.optimizers.Adam()", "Create stateful metrics that can be used to accumulate values during training and logged at any point:", "# Define our metrics\ntrain_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)\ntrain_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')\ntest_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)\ntest_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy')", "Define the training and test functions:", "def train_step(model, optimizer, x_train, y_train):\n with tf.GradientTape() as tape:\n predictions = model(x_train, training=True)\n loss = loss_object(y_train, predictions)\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n train_loss(loss)\n train_accuracy(y_train, predictions)\n\ndef test_step(model, x_test, y_test):\n predictions = model(x_test)\n loss = loss_object(y_test, predictions)\n\n test_loss(loss)\n test_accuracy(y_test, predictions)", "Set up summary writers to write the summaries to disk in a different logs directory:", "current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntrain_log_dir = 'logs/gradient_tape/' + current_time + '/train'\ntest_log_dir = 'logs/gradient_tape/' + current_time + '/test'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\ntest_summary_writer = tf.summary.create_file_writer(test_log_dir)", "Start training. Use tf.summary.scalar() to log metrics (loss and accuracy) during training/testing within the scope of the summary writers to write the summaries to disk. You have control over which metrics to log and how often to do it. Other tf.summary functions enable logging other types of data.", "model = create_model() # reset our model\n\nEPOCHS = 5\n\nfor epoch in range(EPOCHS):\n for (x_train, y_train) in train_dataset:\n train_step(model, optimizer, x_train, y_train)\n with train_summary_writer.as_default():\n tf.summary.scalar('loss', train_loss.result(), step=epoch)\n tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n for (x_test, y_test) in test_dataset:\n test_step(model, x_test, y_test)\n with test_summary_writer.as_default():\n tf.summary.scalar('loss', test_loss.result(), step=epoch)\n tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)\n \n template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'\n print (template.format(epoch+1,\n train_loss.result(), \n train_accuracy.result()*100,\n test_loss.result(), \n test_accuracy.result()*100))\n\n # Reset metrics every epoch\n train_loss.reset_states()\n test_loss.reset_states()\n train_accuracy.reset_states()\n test_accuracy.reset_states()", "Open TensorBoard again, this time pointing it at the new log directory. We could have also started TensorBoard to monitor training while it progresses.", "%tensorboard --logdir logs/gradient_tape", "<!-- <img class=\"tfo-display-only-on-site\" src=\"https://github.com/tensorflow/tensorboard/blob/master/docs/images/quickstart_gradient_tape.png?raw=1\"/> -->\n\nThat's it! You have now seen how to use TensorBoard both through the Keras callback and through tf.summary for more custom scenarios. \nTensorBoard.dev: Host and share your ML experiment results\nTensorBoard.dev is a free public service that enables you to upload your TensorBoard logs and get a permalink that can be shared with everyone in academic papers, blog posts, social media, etc. This can enable better reproducibility and collaboration.\nTo use TensorBoard.dev, run the following command:", "!tensorboard dev upload \\\n --logdir logs/fit \\\n --name \"(optional) My latest experiment\" \\\n --description \"(optional) Simple comparison of several hyperparameters\" \\\n --one_shot", "Note that this invocation uses the exclamation prefix (!) to invoke the shell\nrather than the percent prefix (%) to invoke the colab magic. When invoking this command from the command line there is no need for either prefix.\nView an example here.\nFor more details on how to use TensorBoard.dev, see https://tensorboard.dev/#get-started" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
goodwordalchemy/thinkstats_notes_and_exercises
code/chap01ex.ipynb
gpl-3.0
[ "Exercise from Think Stats, 2nd Edition (thinkstats2.com)<br>\nAllen Downey", "import nsfg\ndf = nsfg.ReadFemPreg()\ndf", "Print value counts for <tt>birthord</tt> and compare to results published in the codebook", "df.birthord.value_counts().sort_index()", "Print value counts for <tt>prglngth</tt> and compare to results published in the codebook", "prgs = df.prglngth.value_counts().sort_index()\n\nw13orless = prgs[0:14]\nw14tow26 = prgs[14:27]\nw27tow50 = prgs[27:]\n\nprint sum(w13orless)\nprint sum(w14tow26)\nprint sum(w27tow50)\n\n", "Print value counts for <tt>agepreg</tt> and compare to results published in the codebook.\nLooking at this data, please remember my comments in the book about the obligation to approach data with consideration for the context and respect for the respondents.", "%matplotlib inline\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nagepreg = df.agepreg\n\nagepreg.plot(kind='hist', bins=50)\n \n\n\n", "Compute the mean birthweight.", "df.totalwgt_lb.mean()", "Create a new column named <tt>totalwgt_kg</tt> that contains birth weight in kilograms. Compute its mean. Remember that when you create a new column, you have to use dictionary syntax, not dot notation.", "from collections import defaultdict\n\ndef convertToKg(df):\n l = []\n for i, lb in df.totalwgt_lb.iteritems():\n l.append(lb * 0.453592)\n df['totalwgt_kg'] = l\n return df\n\ndf = convertToKg(df)\n\nprint 'wgt kg: ',df.totalwgt_kg.mean()\nprint 'wgt kg converted to lbs:', df.totalwgt_kg.mean() / 0.453592\nprint 'wgt lb: ',df.totalwgt_lb.mean()\n\n\n\n \n", "Look through the codebook and find a variable, other than the ones mentioned in the book, that you find interesting. Compute values counts, means, or other statistics.", "df.babysex.value_counts().sort_index()\n\nprint df.babysex[df.outcome==1][df.babysex==1].count()\nprint df.babysex[df.outcome==1][df.babysex==2].count()\n", "Create a boolean Series.", "df.outcome == 1", "Use a boolean Series to select the records for the pregnancies that ended in live birth.", "live = df[df.outcome == 1]\nlen(live)", "Count the number of live births with <tt>birthwgt_lb</tt> between 0 and 5 pounds (including both). The result should be 1125.", "len(live[(live.birthwgt_lb >= 0) & (live.birthwgt_lb <= 5)])", "Count the number of live births with <tt>birthwgt_lb</tt> between 9 and 95 pounds (including both). The result should be 798", "len(live[(live.birthwgt_lb >= 9) & (live.birthwgt_lb <= 95)])", "Use <tt>birthord</tt> to select the records for first babies and others. How many are there of each?", "firsts = df[df.birthord==1]\nothers = df[df.birthord>1]\nlen(firsts), len(others)\n\ndf.birthord.plot('hist')", "Compute the mean weight for first babies and others.", "firsts.totalwgt_lb.mean()\n\nothers.totalwgt_lb.mean()", "Compute the mean <tt>prglngth</tt> for first babies and others. Compute the difference in means, expressed in hours.", "\nprint 'pregnancy length for first baby',firsts.prglngth.mean()\nprint '\\t\\t\\tstd:',firsts.prglngth.std()\nprint 'mean preg length for other babies',others.prglngth.mean()\nprint '\\t\\t\\tstd:',others.prglngth.std()\nprint 'difference', abs(others.prglngth.mean() - firsts.prglngth.mean())\n\nplt.figure()\nfirsts.prglngth[firsts.babysex==1].plot('hist', bins=49, label=\"Male\")\nfirsts.prglngth[firsts.babysex==2].plot('hist', bins=49, label=\"Female\")\nplt.legend(loc=\"best\")\nplt.xlim(0,50)\nplt.figure()\nothers.prglngth[others.babysex==1].plot('hist', bins=49, label=\"Male\")\nothers.prglngth[others.babysex==2].plot('hist', bins=49, label=\"Female\")\nplt.legend(loc=\"best\")\nplt.xlim(0,50)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
google/CFU-Playground
third_party/tflite-micro/tensorflow/lite/micro/examples/magic_wand/train/train_magic_wand_model.ipynb
apache-2.0
[ "Train a gesture recognition model for microcontroller use\nThis notebook demonstrates how to train a 20kb gesture recognition model for TensorFlow Lite for Microcontrollers. It will produce the same model used in the magic_wand example application.\nThe model is designed to be used with Google Colaboratory.\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/examples/magic_wand/train/train_magic_wand_model.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/examples/magic_wand/train/train_magic_wand_model.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>\n\nTraining is much faster using GPU acceleration. Before you proceed, ensure you are using a GPU runtime by going to Runtime -> Change runtime type and selecting GPU. Training will take around 5 minutes on a GPU runtime.\nConfigure dependencies\nRun the following cell to ensure the correct version of TensorFlow is used.\nWe'll also clone the TensorFlow repository, which contains the training scripts, and copy them into our workspace.", "# Clone the repository from GitHub\n!git clone --depth 1 -q https://github.com/tensorflow/tensorflow\n# Copy the training scripts into our workspace\n!cp -r tensorflow/tensorflow/lite/micro/examples/magic_wand/train train", "Prepare the data\nNext, we'll download the data and extract it into the expected location within the training scripts' directory.", "# Download the data we will use to train the model\n!wget http://download.tensorflow.org/models/tflite/magic_wand/data.tar.gz\n# Extract the data into the train directory\n!tar xvzf data.tar.gz -C train 1>/dev/null", "We'll then run the scripts that split the data into training, validation, and test sets.", "# The scripts must be run from within the train directory\n%cd train\n# Prepare the data\n!python data_prepare.py\n# Split the data by person\n!python data_split_person.py", "Load TensorBoard\nNow, we set up TensorBoard so that we can graph our accuracy and loss as training proceeds.", "# Load TensorBoard\n%load_ext tensorboard\n%tensorboard --logdir logs/scalars", "Begin training\nThe following cell will begin the training process. Training will take around 5 minutes on a GPU runtime. You'll see the metrics in TensorBoard after a few epochs.", "!python train.py --model CNN --person true", "Create a C source file\nThe train.py script writes a model, model.tflite, to the training scripts' directory.\nIn the following cell, we convert this model into a C++ source file we can use with TensorFlow Lite for Microcontrollers.", "# Install xxd if it is not available\n!apt-get -qq install xxd\n# Save the file as a C source file\n!xxd -i model.tflite > /content/model.cc\n# Print the source file\n!cat /content/model.cc" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sony/nnabla
tutorial/model_finetuning.ipynb
apache-2.0
[ "NNabla Models Finetuning Tutorial\nHere we demonstrate how to perform finetuning using nnabla's pre-trained models.", "!pip install nnabla-ext-cuda100\n!git clone https://github.com/sony/nnabla.git\n%cd nnabla/tutorial", "Load the model\nLoading the model is very simple. All you need is just 2 lines.", "from nnabla.models.imagenet import ResNet18\nmodel = ResNet18()", "You can choose other ResNet models such as ResNet34, ResNet50, by specifying the model's name as an argument. Of course, you can choose other pretrained models as well. See the Docs.\nNOTE: If you use the ResNet18 for the first time, nnabla will automatically download the weights from https://nnabla.org and it may take up to a few minutes.\nDataset\nIn this tutorial, we use Caltech101 as the dataset for finetuning.\nCaltech101 consists of more than 9,000 object images in total and each image belongs to one of 101 distinct categories or \"clutter\" category. We use images from 101 categories for simple classification.\nWe have a script named caltech101_data.py which can automatically download the dataset and store it in nnabla_data.\nIf you have your own dataset and DataIterator which can load your data, you can use it instead.", "run caltech101_data.py\n\nbatch_size = 32 # we set batch_size = 32\nall_data = data_iterator_caltech101(batch_size)", "Since there is no separate data for training and validation in caltech101, we need to manually split it up.\nHere, we will split the dataset as the following way; 80% for training, and 20% for validation.", "num_samples = all_data.size\nnum_train_samples = int(0.8 * num_samples) # Take 80% for training, and the rest for validation. \nnum_class = 101\ndata_iterator_train = all_data.slice(\n rng=None, slice_start=0, slice_end=num_train_samples)\ndata_iterator_valid = all_data.slice(\n rng=None, slice_start=num_train_samples, slice_end=num_samples)", "Now we have model and data!\nOptional: Check the image in the dataset\nLet's take a look at what kind of images are included in the dataset. You can get images by DataIterator's method, next", "import matplotlib.pyplot as plt\n%matplotlib inline\nimages, labels = data_iterator_train.next()\nsample_image, sample_label = images[0], labels[0]\nplt.imshow(sample_image.transpose(1,2,0))\nplt.show()\nprint(\"image_shape: {}\".format(sample_image.shape))\nprint(\"label_id: {}\".format(sample_label))", "Preparing Graph Construction\nLet's start with importing basic modules.", "import nnabla as nn\n\n# Optional: If you want to use GPU\nfrom nnabla.ext_utils import get_extension_context\nctx = get_extension_context(\"cudnn\")\nnn.set_default_context(ctx)\next = nn.ext_utils.import_extension_module(\"cudnn\")", "Create input Variables for the Network\nNow we are going to create the input variables.", "channels, image_height, image_width = sample_image.shape # use info from the image we got\n\n# input variables for the validation network\nimage_valid = nn.Variable((batch_size, channels, image_height, image_width))\nlabel_valid = nn.Variable((batch_size, 1))\ninput_image_valid = {\"image\": image_valid, \"label\": label_valid}\n\n# input variables for the training network\nimage_train = nn.Variable((batch_size, channels, image_height, image_width))\nlabel_train = nn.Variable((batch_size, 1))\ninput_image_train = {\"image\": image_train, \"label\": label_train}", "Create the training graph using the pretrained model\nIf you take a look at the Model's API Reference, you can find use_up_to option. Specifying one of the pre-defined strings when calling the model, the computation graph will be constructed up to the layer you specify. For example, in case of ResNet18, you can choose one of the following as the last layer of the graph.\n\n'classifier' (default): The output of the final affine layer for classification.\n'pool': The output of the final global average pooling.\n'lastconv': The input of the final global average pooling without ReLU activation..\n'lastconv+relu': Network up to 'lastconv' followed by ReLU activation.\n\nFor finetuning, it is common to replace only the upper layers with the new (not trained) ones and re-use the lower layers with their pretrained weights. Also, pretrained models have been trained on a classification task on ImageNet, which has 1000 categories, so the output of the classifier layer has the output shape (batch_size, 1000) that wouldn't fit our current dataset. \nFor this reason, here we construct the graph up to the pool layer, which corresponds to the global average pooling layer in the original graph, and connect it to the additional affine (fully-connected) layer for 101-way classification. For finetuning, it is common to train only the weights for the newly added layers (in this case, the last affine layer), but in this tutorial, we will update the weights for all layers in the graph. Also, when creating a training graph, you need to set training=True.", "import nnabla.parametric_functions as PF\n\ny_train = model(image_train, force_global_pooling=True, use_up_to=\"pool\", training=True)\nwith nn.parameter_scope(\"finetuning_fc\"):\n pred_train = PF.affine(y_train, 101) # adding the affine layer to the graph.", "NOTE: You need to specify force_global_pooling=True when the input shape is different from what the model expects. You can check the model's default input shape by typing model.input_shape.\nCreate the validation graph using the model\nCreating the validation graph is almost the same. You simply need to change training flag to False.", "y_valid = model(image_valid, \n force_global_pooling=True, use_up_to=\"pool\", training=False)\nwith nn.parameter_scope(\"finetuning_fc\"):\n pred_valid = PF.affine(y_valid, 101)\npred_valid.persistent = True # to keep the value when get `forward(clear_buffer=True)`-ed.", "Define the functions for computing Loss and Categorical Error", "import nnabla.functions as F\n\n\ndef loss_function(pred, label):\n \"\"\"\n Compute loss.\n \"\"\"\n loss = F.mean(F.softmax_cross_entropy(pred, label))\n return loss\n\nloss_valid = loss_function(pred_valid, label_valid)\ntop_1_error_valid = F.mean(F.top_n_error(pred_valid, label_valid))\nloss_train = loss_function(pred_train, label_train)\ntop_1_error_train = F.mean(F.top_n_error(pred_train, label_train))", "Prepare the solver", "import nnabla.solvers as S\n\nsolver = S.Momentum(0.01) # you can choose others as well\n\nsolver.set_parameters(nn.get_parameters())", "Some setting for iteration", "num_epoch = 10 # arbitrary\none_epoch = data_iterator_train.size // batch_size\nmax_iter = num_epoch * one_epoch\nval_iter = data_iterator_valid.size // batch_size", "Performance before finetuning\nLet's see how well the model works. Note that all the weights are pretrained on ImageNet except for the last affine layer.\nFirst, prepare a function to show us the model's performance,", "def run_validation(pred_valid, loss_valid, top_1_error_valid,\n input_image_valid, data_iterator_valid, \n with_visualized=False, num_visualized=3):\n assert num_visualized < pred_valid.shape[0], \"too many images to plot.\"\n val_iter = data_iterator_valid.size // pred_valid.shape[0]\n ve = 0.\n vloss = 0.\n for j in range(val_iter):\n v_image, v_label = data_iterator_valid.next()\n input_image_valid[\"image\"].d = v_image\n input_image_valid[\"label\"].d = v_label\n nn.forward_all([loss_valid, top_1_error_valid], clear_no_need_grad=True)\n vloss += loss_valid.d.copy()\n ve += top_1_error_valid.d.copy()\n\n vloss /= val_iter\n ve /= val_iter\n \n if with_visualized:\n ind = 1\n random_start = np.random.randint(pred_valid.shape[0] - num_visualized)\n fig = plt.figure(figsize=(12., 12.))\n for n in range(random_start, random_start + num_visualized):\n sample_image, sample_label = v_image[n], v_label[n]\n ax = fig.add_subplot(1, num_visualized, ind)\n ax.imshow(sample_image.transpose(1,2,0))\n with nn.auto_forward():\n predicted_id = np.argmax(F.softmax(pred_valid)[n].d)\n result = \"true label_id: {} - predicted as {}\".format(str(sample_label[0]), str(predicted_id))\n ax.set_title(result)\n ind += 1\n fig.show()\n\n return ve, vloss\n\n_, _ = run_validation(pred_valid, loss_valid, top_1_error_valid, input_image_valid, data_iterator_valid, with_visualized=True)", "As you can see, the model fails to classify images properly. Now, let's begin the finetuning and see how performance improves.\nStart Finetuning\nLet's prepare the monitor for training.", "from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed\nmonitor = Monitor(\"tmp.monitor\")\nmonitor_loss = MonitorSeries(\"Training loss\", monitor, interval=200)\nmonitor_err = MonitorSeries(\"Training error\", monitor, interval=200)\nmonitor_vloss = MonitorSeries(\"Test loss\", monitor, interval=200)\nmonitor_verr = MonitorSeries(\"Test error\", monitor, interval=200)\n\n# Training-loop\nfor i in range(max_iter):\n image, label = data_iterator_train.next()\n input_image_train[\"image\"].d = image\n input_image_train[\"label\"].d = label\n nn.forward_all([loss_train, top_1_error_train], clear_no_need_grad=True)\n\n monitor_loss.add(i, loss_train.d.copy())\n monitor_err.add(i, top_1_error_train.d.copy())\n\n solver.zero_grad()\n loss_train.backward(clear_buffer=True)\n\n # update parameters\n solver.weight_decay(3e-4)\n solver.update()\n\n if i % 200 == 0:\n ve, vloss = run_validation(pred_valid, loss_valid, top_1_error_valid,\n input_image_valid, data_iterator_valid, \n with_visualized=False, num_visualized=3)\n\n monitor_vloss.add(i, vloss)\n monitor_verr.add(i, ve)", "As you see, the loss and error rate is decreasing as the finetuning progresses.\nLet's see the classification result after finetuning.", "_, _ = run_validation(pred_valid, loss_valid, top_1_error_valid, input_image_valid, data_iterator_valid, with_visualized=True)", "You can see now the model is able to classify the image properly.\nFinetuning more\nwe have a convenient script named finetuning.py. By using this, you can try finetuning with different models even on your original dataset.\nTo do this, you need to prepare your own dataset and do some preprocessing. We will explain how to do this in the following.\nPrepare your dataset\nSuppose you have a lot of images which can be used for image classification. You need to organize your data in a certain manner. Here, we will explain that with another dataset, Stanford Dogs Dataset. \nFirst, visit the official page and download images.tar (here is the direct link). Next, untar the archive and then you will see a directory named Images. Inside that directory, there are many subdirectories and each subdirectory stores images which belong to 1 category. For example, a directory n02099712-Labrador_retriever contains labrador retriever's images only. So if you want to use your own dataset, you need to organize your images and directiories in the same way like the following;\nparent_directory \n├── subdirectory_for_category_A \n│ ├── image_0.jpg \n│ ├── image_1.jpg \n│ ├── image_2.jpg \n│ ├── ... \n│ \n├── subdirectory_for_category_B \n│ ├── image_0.jpg \n│ ├── ... \n│ \n├── subdirectory_for_category_C \n│ ├── image_0.jpg \n│ ├── ... \n│ \n├── subdirectory_for_category_D \n│ ├── image_0.jpg \n│ ├── ... \n│ \n ...\nThe numbers of images in each category can vary, do not have to be exactly the same. Once you arrange your dataset, now you're good to go!\nCreate image classification dataset using NNabla CLI\nNow that you prepare and organize your dataset, the only thing you have to do is to create a .csv file which will be used in finetuning.py. To do so, you can use NNabla's Python Command Line Interface. Just type like the following. \nnnabla_cli create_image_classification_dataset -i &lt;path to parent directory&gt; -o &lt;output directory which contains \"preprocessed\" images&gt; -c &lt;number of channels&gt; -w &lt;width&gt; -g &lt;height&gt; -m &lt;padding or trimming&gt; -s &lt;whether apply shuffle or not&gt; -f1 &lt;name of the output csv file for training data&gt; -f2 &lt;name of the output csv file for test data&gt; -r2 &lt;ratio(%) of test data to training data&gt;\nIf you do that on Stanford Dogs Dataset, \nnnabla_cli create_image_classification_dataset -i Images -o arranged_images -c 3 -w 128 -g 128 -m padding -s true -f1 stanford_dog_train.csv -f2 stanford_dog_test.csv -r2 20\nNote that output .csv file will be stored in the same directory you specified with -o option. For more information, please check the docs.\nAfter executing the command above, you can start finetuning on your dataset. \nRun finetuning\nAll you need is just to type one line.\npython finetuning.py --model &lt;model name&gt; --train-csv &lt;.csv file containing training data&gt; --test-csv &lt;.csv file containing test data&gt;\nIt will execute finetuning on your dataset!", "run finetuning.py --model ResNet34 --epoch 10 --train-csv ~/nnabla_data/stanford_dog_arranged/stanford_dog_train.csv --test-csv ~/nnabla_data/stanford_dog_arranged/stanford_dog_test.csv --shuffle True", "An example of how to use finetuning's result for inference\nOnce the finetuning finished, let's use it for inference! The script above has saved the parameters at every certain iteration you specified. So now call the same model you trained and this time let's use the finetuned parameters in the following way.", "from nnabla.models.imagenet import ResNet34\nimport nnabla as nn\n\nparam_path = \"params_XXX.h5\" # specify the path to the saved parameter (.h5)\n\nmodel = ResNet34()\nbatch_size = 1 # just for inference\ninput_shape = (batch_size, ) + model.input_shape", "Then define an input Variable and a network for inference. Note that you need to construct the network exactly the same way as done in finetuning script (layer configuration, parameters names, and so on...).", "x = nn.Variable(input_shape) # input Variable\npooled = model(x, use_up_to=\"pool\", training=False)\nwith nn.parameter_scope(\"finetuning\"):\n with nn.parameter_scope(\"last_fc\"):\n pred = PF.affine(pooled, 120)", "Load the parameters which you finetuned above. You can use nn.load_parameters() to load the parameters. Once you call this, the parameters stored in the params.h5 will be stored in global scope. You can check the parameters are different before and after nn.load_parameters() by using nn.get_parameters().", "nn.load_parameters(param_path) # load the finetuned parameters.\npred.forward()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
nwfpug/meetings
2017-01-23/pandas.ipynb
gpl-3.0
[ "Python pandas Q&A video series by Data School\nYouTube playlist and GitHub repository\nTable of contents\n\n<a href=\"#1.-What-is-pandas%3F-%28video%29\">What is pandas?</a>\n<a href=\"#2.-How-do-I-read-a-tabular-data-file-into-pandas%3F-%28video%29\">How do I read a tabular data file into pandas?</a>\n<a href=\"#3.-How-do-I-select-a-pandas-Series-from-a-DataFrame%3F-%28video%29\">How do I select a pandas Series from a DataFrame?</a>\n<a href=\"#4.-Why-do-some-pandas-commands-end-with-parentheses-%28and-others-don't%29%3F-%28video%29\">Why do some pandas commands end with parentheses (and others don't)?</a>\n<a href=\"#5.-How-do-I-rename-columns-in-a-pandas-DataFrame%3F-%28video%29\">How do I rename columns in a pandas DataFrame?</a>\n<a href=\"#6.-How-do-I-remove-columns-from-a-pandas-DataFrame%3F-%28video%29\">How do I remove columns from a pandas DataFrame?</a>\n<a href=\"#7.-How-do-I-sort-a-pandas-DataFrame-or-a-Series%3F-%28video%29\">How do I sort a pandas DataFrame or a Series?</a>\n<a href=\"#8.-How-do-I-filter-rows-of-a-pandas-DataFrame-by-column-value%3F-%28video%29\">How do I filter rows of a pandas DataFrame by column value?</a>\n<a href=\"#9.-How-do-I-apply-multiple-filter-criteria-to-a-pandas-DataFrame%3F-%28video%29\">How do I apply multiple filter criteria to a pandas DataFrame?</a>\n<a href=\"#10.-Your-pandas-questions-answered%21-%28video%29\">Your pandas questions answered!</a>\n<a href=\"#11.-How-do-I-use-the-%22axis%22-parameter-in-pandas%3F-%28video%29\">How do I use the \"axis\" parameter in pandas?</a>\n<a href=\"#12.-How-do-I-use-string-methods-in-pandas%3F-%28video%29\">How do I use string methods in pandas?</a>\n<a href=\"#13.-How-do-I-change-the-data-type-of-a-pandas-Series%3F-%28video%29\">How do I change the data type of a pandas Series?</a>\n<a href=\"#14.-When-should-I-use-a-%22groupby%22-in-pandas%3F-%28video%29\">When should I use a \"groupby\" in pandas?</a>\n<a href=\"#15.-How-do-I-explore-a-pandas-Series%3F-%28video%29\">How do I explore a pandas Series?</a>\n<a href=\"#16.-How-do-I-handle-missing-values-in-pandas%3F-%28video%29\">How do I handle missing values in pandas?</a>\n<a href=\"#17.-What-do-I-need-to-know-about-the-pandas-index%3F-%28Part-1%29-%28video%29\">What do I need to know about the pandas index? (Part 1)</a>\n<a href=\"#18.-What-do-I-need-to-know-about-the-pandas-index%3F-%28Part-2%29-%28video%29\">What do I need to know about the pandas index? (Part 2)</a>\n<a href=\"#19.-How-do-I-select-multiple-rows-and-columns-from-a-pandas-DataFrame%3F-%28video%29\">How do I select multiple rows and columns from a pandas DataFrame?</a>\n<a href=\"#20.-When-should-I-use-the-%22inplace%22-parameter-in-pandas%3F-%28video%29\">When should I use the \"inplace\" parameter in pandas?</a>\n<a href=\"#21.-How-do-I-make-my-pandas-DataFrame-smaller-and-faster%3F-%28video%29\">How do I make my pandas DataFrame smaller and faster?</a>\n<a href=\"#22.-How-do-I-use-pandas-with-scikit-learn-to-create-Kaggle-submissions%3F-%28video%29\">How do I use pandas with scikit-learn to create Kaggle submissions?</a>\n<a href=\"#23.-More-of-your-pandas-questions-answered%21-%28video%29\">More of your pandas questions answered!</a>\n<a href=\"#24.-How-do-I-create-dummy-variables-in-pandas%3F-%28video%29\">How do I create dummy variables in pandas?</a>\n<a href=\"#25.-How-do-I-work-with-dates-and-times-in-pandas%3F-%28video%29\">How do I work with dates and times in pandas?</a>\n<a href=\"#26.-How-do-I-find-and-remove-duplicate-rows-in-pandas%3F-%28video%29\">How do I find and remove duplicate rows in pandas?</a>\n<a href=\"#27.-How-do-I-avoid-a-SettingWithCopyWarning-in-pandas%3F-%28video%29\">How do I avoid a SettingWithCopyWarning in pandas?</a>", "# conventional way to import pandas\nimport pandas as pd\n# get Pansda's vesrion #\nprint ('Pandas version', pd.__version__)", "1. What is pandas?\n\npandas main page\npandas installation instructions\nAnaconda distribution of Python (includes pandas)\nHow to use the IPython/Jupyter notebook (video)\n\n2. How do I read a tabular data file into pandas?", "# read a dataset of Chipotle orders directly from a URL and store the results in a DataFrame\norders = pd.read_table('data/chipotle.tsv')\n\n# examine the first 5 rows\norders.head()", "Documentation for read_table", "users = pd.read_table('data/u.user')\n# examine the first 5 rows\nusers.head()\n\nusers = pd.read_table('data/u.user', sep='|')\n# examine the first 5 rows\nusers.head()\n\nusers = pd.read_table('data/u.user', sep='|', header=None)\n# examine the first 5 rows\nusers.head()\n\nuser_cols = ['user_id', 'age', 'gender', 'occupation', 'zip_code']\nusers = pd.read_table('data/u.user', sep='|', header=None, names=user_cols)\n# examine the first 5 rows\nusers.head()", "3. How do I select a pandas Series from a DataFrame?", "# read a dataset of UFO reports into a DataFrame\nufo = pd.read_table('data/ufo.csv', sep=',')", "OR", "# read_csv is equivalent to read_table, except it assumes a comma separator\nufo = pd.read_csv('data/ufo.csv')\ntype(ufo)\n\n# examine the first 5 rows\nufo.head()\n\n# select the 'City' Series using bracket notation\nufo['Colors Reported']\n\ntype(ufo['City'])\n\n# or equivalently, use dot notation - see notes below\nufo.City", "Bracket notation will always work, whereas dot notation has limitations:\n\nDot notation doesn't work if there are spaces in the Series name\nDot notation doesn't work if the Series has the same name as a DataFrame method or attribute (like 'head' or 'shape')\nDot notation can't be used to define the name of a new Series (see below)", "# create a new 'Location' Series (must use bracket notation to define the Series name)\nufo['Location'] = ufo.City + ', ' + ufo.State\nufo.head()", "[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n4. Why do some pandas commands end with parentheses (and others don't)?", "# read a dataset of top-rated IMDb movies into a DataFrame\nmovies = pd.read_csv('data/imdb_1000.csv')", "Methods end with parentheses, while attributes don't:", "# example method: show the first 5 rows\nmovies.head()\n\n# example method: calculate summary statistics\nmovies.describe()\n\n# example attribute: number of rows and columns\nmovies.shape\n\n# example attribute: data type of each column\nmovies.dtypes\n\n# use an optional parameter to the describe method to summarize only 'object' columns\nmovies.describe(include=['object'])", "Documentation for describe\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n5. How do I rename columns in a pandas DataFrame?", "# read a dataset of UFO reports into a DataFrame\nufo = pd.read_csv('data/ufo.csv')\n\n# examine the column names\nufo.columns\n\n# rename two of the columns by using the 'rename' method\nufo.rename(columns={'Colors Reported':'Colors_Reported', 'Shape Reported':'Shape_Reported'}, inplace=True)\nufo.columns", "Documentation for rename", "# replace all of the column names by overwriting the 'columns' attribute\nufo_cols = ['city', 'colors reported', 'shape reported', 'state', 'time']\nufo.columns = ufo_cols\nufo.columns\n\n# replace the column names during the file reading process by using the 'names' parameter\nufo = pd.read_csv('data/ufo.csv', header=0, names=ufo_cols)\nufo.head()\n", "Documentation for read_csv", "# replace all spaces with underscores in the column names by using the 'str.replace' method\nufo.columns = ufo.columns.str.replace(' ', '_')\nufo.columns", "Documentation for str.replace\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n6. How do I remove columns from a pandas DataFrame?", "# read a dataset of UFO reports into a DataFrame\nufo = pd.read_csv('data/ufo.csv')\nufo.head()\n\n# remove a single column (axis=1 refers to columns)\nufo.drop('Colors Reported', axis=1, inplace=True)\nufo.head()", "Documentation for drop", "# remove multiple columns at once\nufo.drop(['City', 'State'], axis=1, inplace=True)\nufo.head()\n\n# remove multiple rows at once (axis=0 refers to rows)\nufo.drop([0, 1], axis=0, inplace=True)\nufo.head()", "[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n7. How do I sort a pandas DataFrame or a Series?", "# read a dataset of top-rated IMDb movies into a DataFrame\nmovies = pd.read_csv('data/imdb_1000.csv')\nmovies.head()", "Note: None of the sorting methods below affect the underlying data. (In other words, the sorting is temporary).", "# sort the 'title' Series in ascending order (returns a Series)\nmovies.title.sort_values()\n\n# sort in descending order instead\nmovies.title.sort_values(ascending=False).head()", "Documentation for sort_values for a Series. (Prior to version 0.17, use order instead.)", "# sort the entire DataFrame by the 'title' Series (returns a DataFrame)\nmovies.sort_values('title').head()\n\n# sort in descending order instead\nmovies.sort_values('title', ascending=False).head()", "Documentation for sort_values for a DataFrame. (Prior to version 0.17, use sort instead.)", "# sort the DataFrame first by 'content_rating', then by 'duration'\nmovies.sort_values(['content_rating', 'duration']).head()", "Summary of changes to the sorting API in pandas 0.17\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n8. How do I filter rows of a pandas DataFrame by column value?", "# read a dataset of top-rated IMDb movies into a DataFrame\nmovies = pd.read_csv('data/imdb_1000.csv')\nmovies.head()\n\n# examine the number of rows and columns\nmovies.shape", "Goal: Filter the DataFrame rows to only show movies with a 'duration' of at least 200 minutes.", "# create a list in which each element refers to a DataFrame row: True if the row satisfies the condition, False otherwise\nbooleans = []\nfor length in movies.duration:\n if length >= 200:\n booleans.append(True)\n else:\n booleans.append(False)\n\n# confirm that the list has the same length as the DataFrame\nlen(booleans)\n\n# examine the first five list elements\nbooleans[0:5]\n\n# convert the list to a Series\nis_long = pd.Series(booleans)\nis_long.head()\n\n# use bracket notation with the boolean Series to tell the DataFrame which rows to display\nmovies[is_long]\n\n# simplify the steps above: no need to write a for loop to create 'is_long' since pandas will broadcast the comparison\nis_long = movies.duration >= 200\nmovies[is_long]\n\n# or equivalently, write it in one line (no need to create the 'is_long' object)\nmovies[movies.duration >= 200]\n\n# select the 'genre' Series from the filtered DataFrame\nis_long = movies.duration >= 200\nmovies[is_long].genre\n\n# or equivalently, use the 'loc' method\nmovies.loc[movies.duration >= 200, 'genre']", "Documentation for loc\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n9. How do I apply multiple filter criteria to a pandas DataFrame?", "# read a dataset of top-rated IMDb movies into a DataFrame\nmovies = pd.read_csv('data/imdb_1000.csv')\nmovies.head()\n\n# filter the DataFrame to only show movies with a 'duration' of at least 200 minutes\nmovies[movies.duration >= 200]", "Understanding logical operators:\n\nand: True only if both sides of the operator are True\nor: True if either side of the operator is True", "# demonstration of the 'and' operator\nprint(True and True)\nprint(True and False)\nprint(False and False)\n\n# demonstration of the 'or' operator\nprint(True or True)\nprint(True or False)\nprint(False or False)", "Rules for specifying multiple filter criteria in pandas:\n\nuse &amp; instead of and\nuse | instead of or\nadd parentheses around each condition to specify evaluation order\n\nGoal: Further filter the DataFrame of long movies (duration >= 200) to only show movies which also have a 'genre' of 'Drama'", "# CORRECT: use the '&' operator to specify that both conditions are required\nmovies[(movies.duration >=200) & (movies.genre == 'Drama')]\n\n# INCORRECT: using the '|' operator would have shown movies that are either long or dramas (or both)\nmovies[(movies.duration >=200) | (movies.genre == 'Drama')].head()", "Goal: Filter the original DataFrame to show movies with a 'genre' of 'Crime' or 'Drama' or 'Action'", "# use the '|' operator to specify that a row can match any of the three criteria\nmovies[(movies.genre == 'Crime') | (movies.genre == 'Drama') | (movies.genre == 'Action')].tail(20)\n\n# or equivalently, use the 'isin' method\n#movies[movies.genre.isin(['Crime', 'Drama', 'Action'])].head(10)", "Documentation for isin\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n10. Your pandas questions answered!\nQuestion: When reading from a file, how do I read in only a subset of the columns?", "# read a dataset of UFO reports into a DataFrame, and check the columns\nufo = pd.read_csv('data/ufo.csv')\nufo.columns\n\n# specify which columns to include by name\nufo = pd.read_csv('data/ufo.csv', usecols=['City', 'State'])\n\n# or equivalently, specify columns by position\nufo = pd.read_csv('data/ufo.csv', usecols=[0, 4])\nufo.columns", "Question: When reading from a file, how do I read in only a subset of the rows?", "# specify how many rows to read\nufo = pd.read_csv('data/ufo.csv', nrows=3)\nufo", "Documentation for read_csv\nQuestion: How do I iterate through a Series?", "# Series are directly iterable (like a list)\nfor c in ufo.City:\n print(c)", "Question: How do I iterate through a DataFrame?", "# various methods are available to iterate through a DataFrame\nfor index, row in ufo.iterrows():\n print(index, row.City, row.State)", "Documentation for iterrows\nQuestion: How do I drop all non-numeric columns from a DataFrame?", "# read a dataset of alcohol consumption into a DataFrame, and check the data types\ndrinks = pd.read_csv('data/drinks.csv')\ndrinks.dtypes\n\n# only include numeric columns in the DataFrame\nimport numpy as np\ndrinks.select_dtypes(include=[np.number]).dtypes", "Documentation for select_dtypes\nQuestion: How do I know whether I should pass an argument as a string or a list?", "# describe all of the numeric columns\ndrinks.describe()\n\n# pass the string 'all' to describe all columns\ndrinks.describe(include='all')\n\n# pass a list of data types to only describe certain types\ndrinks.describe(include=['object', 'float64'])\n\n# pass a list even if you only want to describe a single data type\ndrinks.describe(include=['object'])", "Documentation for describe\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n11. How do I use the \"axis\" parameter in pandas?", "# read a dataset of alcohol consumption into a DataFrame\ndrinks = pd.read_csv('data/drinks.csv')\ndrinks.head()\n\n# drop a column (temporarily)\ndrinks.drop('continent', axis=1).head()", "Documentation for drop", "# drop a row (temporarily)\ndrinks.drop(2, axis=0).head()", "When referring to rows or columns with the axis parameter:\n\naxis 0 refers to rows\naxis 1 refers to columns", "# calculate the mean of each numeric column\ndrinks.mean()\n\n# or equivalently, specify the axis explicitly\ndrinks.mean(axis=0)", "Documentation for mean", "# calculate the mean of each row\ndrinks.mean(axis=1).head()", "When performing a mathematical operation with the axis parameter:\n\naxis 0 means the operation should \"move down\" the row axis\naxis 1 means the operation should \"move across\" the column axis", "# 'index' is an alias for axis 0\ndrinks.mean(axis='index')\n\n# 'columns' is an alias for axis 1\ndrinks.mean(axis='columns').head()", "[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n12. How do I use string methods in pandas?", "# read a dataset of Chipotle orders into a DataFrame\norders = pd.read_table('data/chipotle.tsv')\norders.head()\n\n# normal way to access string methods in Python\n'hello'.upper()\n\n# string methods for pandas Series are accessed via 'str'\norders.item_name.str.upper().head()\n\n# string method 'contains' checks for a substring and returns a boolean Series\norders.item_name.str.contains('Chicken').head()\n\n# use the boolean Series to filter the DataFrame\norders[orders.item_name.str.contains('Chicken')].head()\n\n# string methods can be chained together\norders.choice_description.str.replace('[', '').str.replace(']', '').head()\n\n# many pandas string methods support regular expressions (regex)\norders.choice_description.str.replace('[\\[\\]]', '').head()", "String handling section of the pandas API reference\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n13. How do I change the data type of a pandas Series?", "# read a dataset of alcohol consumption into a DataFrame\ndrinks = pd.read_csv('data/drinks.csv')\ndrinks.head()\n\n# examine the data type of each Series\ndrinks.dtypes\n\n# change the data type of an existing Series\ndrinks['beer_servings'] = drinks.beer_servings.astype(float)\ndrinks.dtypes", "Documentation for astype", "# alternatively, change the data type of a Series while reading in a file\ndrinks = pd.read_csv('data/drinks.csv', dtype={'beer_servings':float})\ndrinks.dtypes\n\n# read a dataset of Chipotle orders into a DataFrame\norders = pd.read_table('data/chipotle.tsv')\norders.head()\n\n# examine the data type of each Series\norders.dtypes\n\n# convert a string to a number in order to do math\norders.item_price.str.replace('$', '').astype(float).mean()\n\n# string method 'contains' checks for a substring and returns a boolean Series\norders.item_name.str.contains('Chicken').head()\n\n# convert a boolean Series to an integer (False = 0, True = 1)\norders.item_name.str.contains('Chicken').astype(int).head()", "[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n14. When should I use a \"groupby\" in pandas?", "# read a dataset of alcohol consumption into a DataFrame\ndrinks = pd.read_csv('data/drinks.tsv')\ndrinks.head()\n\n# calculate the mean beer servings across the entire dataset\ndrinks.beer_servings.mean()\n\n# calculate the mean beer servings just for countries in Africa\ndrinks[drinks.continent=='Africa'].beer_servings.mean()\n\n# calculate the mean beer servings for each continent\ndrinks.groupby('continent').beer_servings.mean()", "Documentation for groupby", "# other aggregation functions (such as 'max') can also be used with groupby\ndrinks.groupby('continent').beer_servings.max()\n\n# multiple aggregation functions can be applied simultaneously\ndrinks.groupby('continent').beer_servings.agg(['count', 'mean', 'min', 'max'])", "Documentation for agg", "# specifying a column to which the aggregation function should be applied is not required\ndrinks.groupby('continent').mean()\n\n# allow plots to appear in the notebook\n%matplotlib inline\n\n# side-by-side bar plot of the DataFrame directly above\ndrinks.groupby('continent').mean().plot(kind='bar')", "Documentation for plot\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n15. How do I explore a pandas Series?", "# read a dataset of top-rated IMDb movies into a DataFrame\nmovies = pd.read_csv('data/imbd_1000.csv')\nmovies.head()\n\n# examine the data type of each Series\nmovies.dtypes", "Exploring a non-numeric Series:", "# count the non-null values, unique values, and frequency of the most common value\nmovies.genre.describe()", "Documentation for describe", "# count how many times each value in the Series occurs\nmovies.genre.value_counts()", "Documentation for value_counts", "# display percentages instead of raw counts\nmovies.genre.value_counts(normalize=True)\n\n# 'value_counts' (like many pandas methods) outputs a Series\ntype(movies.genre.value_counts())\n\n# thus, you can add another Series method on the end\nmovies.genre.value_counts().head()\n\n# display the unique values in the Series\nmovies.genre.unique()\n\n# count the number of unique values in the Series\nmovies.genre.nunique()", "Documentation for unique and nunique", "# compute a cross-tabulation of two Series\npd.crosstab(movies.genre, movies.content_rating)", "Documentation for crosstab\nExploring a numeric Series:", "# calculate various summary statistics\nmovies.duration.describe()\n\n# many statistics are implemented as Series methods\nmovies.duration.mean()", "Documentation for mean", "# 'value_counts' is primarily useful for categorical data, not numerical data\nmovies.duration.value_counts().head()\n\n# allow plots to appear in the notebook\n%matplotlib inline\n\n# histogram of the 'duration' Series (shows the distribution of a numerical variable)\nmovies.duration.plot(kind='hist')\n\n# bar plot of the 'value_counts' for the 'genre' Series\nmovies.genre.value_counts().plot(kind='bar')", "Documentation for plot\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n16. How do I handle missing values in pandas?", "# read a dataset of UFO reports into a DataFrame\nufo = pd.read_csv('data/ufo.csv')\nufo.tail()", "What does \"NaN\" mean?\n\n\"NaN\" is not a string, rather it's a special value: numpy.nan.\nIt stands for \"Not a Number\" and indicates a missing value.\nread_csv detects missing values (by default) when reading the file, and replaces them with this special value.\n\nDocumentation for read_csv", "# 'isnull' returns a DataFrame of booleans (True if missing, False if not missing)\nufo.isnull().tail()\n\n# 'nonnull' returns the opposite of 'isnull' (True if not missing, False if missing)\nufo.notnull().tail()", "Documentation for isnull and notnull", "# count the number of missing values in each Series\nufo.isnull().sum()", "This calculation works because:\n\nThe sum method for a DataFrame operates on axis=0 by default (and thus produces column sums).\nIn order to add boolean values, pandas converts True to 1 and False to 0.", "# use the 'isnull' Series method to filter the DataFrame rows\nufo[ufo.City.isnull()].head()", "How to handle missing values depends on the dataset as well as the nature of your analysis. Here are some options:", "# examine the number of rows and columns\nufo.shape\n\n# if 'any' values are missing in a row, then drop that row\nufo.dropna(how='any').shape", "Documentation for dropna", "# 'inplace' parameter for 'dropna' is False by default, thus rows were only dropped temporarily\nufo.shape\n\n# if 'all' values are missing in a row, then drop that row (none are dropped in this case)\nufo.dropna(how='all').shape\n\n# if 'any' values are missing in a row (considering only 'City' and 'Shape Reported'), then drop that row\nufo.dropna(subset=['City', 'Shape Reported'], how='any').shape\n\n# if 'all' values are missing in a row (considering only 'City' and 'Shape Reported'), then drop that row\nufo.dropna(subset=['City', 'Shape Reported'], how='all').shape\n\n# 'value_counts' does not include missing values by default\nufo['Shape Reported'].value_counts().head()\n\n# explicitly include missing values\nufo['Shape Reported'].value_counts(dropna=False).head()", "Documentation for value_counts", "# fill in missing values with a specified value\nufo['Shape Reported'].fillna(value='VARIOUS', inplace=True)", "Documentation for fillna", "# confirm that the missing values were filled in\nufo['Shape Reported'].value_counts().head()", "Working with missing data in pandas\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n17. What do I need to know about the pandas index?", "# read a dataset of alcohol consumption into a DataFrame\ndrinks = pd.read_csv('data/drinks.csv')\ndrinks.head()\n\n# every DataFrame has an index (sometimes called the \"row labels\")\ndrinks.index\n\n# column names are also stored in a special \"index\" object\ndrinks.columns\n\n# neither the index nor the columns are included in the shape\ndrinks.shape\n\n# index and columns both default to integers if you don't define them\npd.read_table('data/imbd_1000.csv', header=None, sep='|').head()", "What is the index used for?\n\nidentification\nselection\nalignment (covered in the next video)", "# identification: index remains with each row when filtering the DataFrame\ndrinks[drinks.continent=='South America']\n\n# selection: select a portion of the DataFrame using the index\ndrinks.loc[23, 'beer_servings']", "Documentation for loc", "# set an existing column as the index\ndrinks.set_index('country', inplace=True)\ndrinks.head()", "Documentation for set_index", "# 'country' is now the index\ndrinks.index\n\n# 'country' is no longer a column\ndrinks.columns\n\n# 'country' data is no longer part of the DataFrame contents\ndrinks.shape\n\n# country name can now be used for selection\ndrinks.loc['Brazil', 'beer_servings']\n\n# index name is optional\ndrinks.index.name = None\ndrinks.head()\n\n# restore the index name, and move the index back to a column\ndrinks.index.name = 'country'\ndrinks.reset_index(inplace=True)\ndrinks.head()", "Documentation for reset_index", "# many DataFrame methods output a DataFrame\ndrinks.describe()\n\n# you can interact with any DataFrame using its index and columns\ndrinks.describe().loc['25%', 'beer_servings']", "Indexing and selecting data\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n18. What do I need to know about the pandas index?", "# read a dataset of alcohol consumption into a DataFrame\ndrinks = pd.read_csv('data/drinks.csv')\ndrinks.head()\n\n# every DataFrame has an index\ndrinks.index\n\n# every Series also has an index (which carries over from the DataFrame)\ndrinks.continent.head()\n\n# set 'country' as the index\ndrinks.set_index('country', inplace=True)", "Documentation for set_index", "# Series index is on the left, values are on the right\ndrinks.continent.head()\n\n# another example of a Series (output from the 'value_counts' method)\ndrinks.continent.value_counts()", "Documentation for value_counts", "# access the Series index\ndrinks.continent.value_counts().index\n\n# access the Series values\ndrinks.continent.value_counts().values\n\n# elements in a Series can be selected by index (using bracket notation)\ndrinks.continent.value_counts()['Africa']\n\n# any Series can be sorted by its values\ndrinks.continent.value_counts().sort_values()\n\n# any Series can also be sorted by its index\ndrinks.continent.value_counts().sort_index()", "Documentation for sort_values and sort_index\nWhat is the index used for?\n\nidentification (covered in the previous video)\nselection (covered in the previous video)\nalignment", "# 'beer_servings' Series contains the average annual beer servings per person\ndrinks.beer_servings.head()\n\n# create a Series containing the population of two countries\npeople = pd.Series([3000000, 85000], index=['Albania', 'Andorra'], name='population')\npeople", "Documentation for Series", "# calculate the total annual beer servings for each country\n(drinks.beer_servings * people).head()", "The two Series were aligned by their indexes.\nIf a value is missing in either Series, the result is marked as NaN.\nAlignment enables us to easily work with incomplete data.", "# concatenate the 'drinks' DataFrame with the 'population' Series (aligns by the index)\npd.concat([drinks, people], axis=1).head()", "Documentation for concat\nIndexing and selecting data\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n19. How do I select multiple rows and columns from a pandas DataFrame?", "# read a dataset of UFO reports into a DataFrame\nufo = pd.read_csv('data/ufo.csv')\nufo.head(3)", "The loc method is used to select rows and columns by label. You can pass it:\n\nA single label\nA list of labels\nA slice of labels\nA boolean Series\nA colon (which indicates \"all labels\")", "# row 0, all columns\nufo.loc[0, :]\n\n# rows 0 and 1 and 2, all columns\nufo.loc[[0, 1, 2], :]\n\n# rows 0 through 2 (inclusive), all columns\nufo.loc[0:2, :]\n\n# this implies \"all columns\", but explicitly stating \"all columns\" is better\nufo.loc[0:2]\n\n# rows 0 through 2 (inclusive), column 'City'\nufo.loc[0:2, 'City']\n\n# rows 0 through 2 (inclusive), columns 'City' and 'State'\nufo.loc[0:2, ['City', 'State']]\n\n# accomplish the same thing using double brackets - but using 'loc' is preferred since it's more explicit\nufo[['City', 'State']].head(3)\n\n# rows 0 through 2 (inclusive), columns 'City' through 'State' (inclusive)\nufo.loc[0:2, 'City':'State']\n\n# accomplish the same thing using 'head' and 'drop'\nufo.head(3).drop('Time', axis=1)\n\n# rows in which the 'City' is 'Oakland', column 'State'\nufo.loc[ufo.City=='Oakland', 'State']\n\n# accomplish the same thing using \"chained indexing\" - but using 'loc' is preferred since chained indexing can cause problems\nufo[ufo.City=='Oakland'].State", "The iloc method is used to select rows and columns by integer position. You can pass it:\n\nA single integer position\nA list of integer positions\nA slice of integer positions\nA colon (which indicates \"all integer positions\")", "# rows in positions 0 and 1, columns in positions 0 and 3\nufo.iloc[[0, 1], [0, 3]]\n\n# rows in positions 0 through 2 (exclusive), columns in positions 0 through 4 (exclusive)\nufo.iloc[0:2, 0:4]\n\n# rows in positions 0 through 2 (exclusive), all columns\nufo.iloc[0:2, :]\n\n# accomplish the same thing - but using 'iloc' is preferred since it's more explicit\nufo[0:2]", "The ix method is used to select rows and columns by label or integer position, and should only be used when you need to mix label-based and integer-based selection in the same call.", "# read a dataset of alcohol consumption into a DataFrame and set 'country' as the index\ndrinks = pd.read_csv('data/drinks.csv', index_col='country')\ndrinks.head()\n\n# row with label 'Albania', column in position 0\ndrinks.ix['Albania', 0]\n\n# row in position 1, column with label 'beer_servings'\ndrinks.ix[1, 'beer_servings']", "Rules for using numbers with ix:\n\nIf the index is strings, numbers are treated as integer positions, and thus slices are exclusive on the right.\nIf the index is integers, numbers are treated as labels, and thus slices are inclusive.", "# rows 'Albania' through 'Andorra' (inclusive), columns in positions 0 through 2 (exclusive)\ndrinks.ix['Albania':'Andorra', 0:2]\n\n# rows 0 through 2 (inclusive), columns in positions 0 through 2 (exclusive)\nufo.ix[0:2, 0:2]", "Summary of the pandas API for selection\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n20. When should I use the \"inplace\" parameter in pandas?", "# read a dataset of UFO reports into a DataFrame\nufo = pd.read_csv('data/ufo.csv')\nufo.head()\n\nufo.shape\n\n# remove the 'City' column (doesn't affect the DataFrame since inplace=False)\nufo.drop('City', axis=1).head()\n\n# confirm that the 'City' column was not actually removed\nufo.head()\n\n# remove the 'City' column (does affect the DataFrame since inplace=True)\nufo.drop('City', axis=1, inplace=True)\n\n# confirm that the 'City' column was actually removed\nufo.head()\n\n# drop a row if any value is missing from that row (doesn't affect the DataFrame since inplace=False)\nufo.dropna(how='any').shape\n\n# confirm that no rows were actually removed\nufo.shape\n\n# use an assignment statement instead of the 'inplace' parameter\nufo = ufo.set_index('Time')\nufo.tail()\n\n# fill missing values using \"backward fill\" strategy (doesn't affect the DataFrame since inplace=False)\nufo.fillna(method='bfill').tail()\n\n# compare with \"forward fill\" strategy (doesn't affect the DataFrame since inplace=False)\nufo.fillna(method='ffill').tail()", "[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n21. How do I make my pandas DataFrame smaller and faster?", "# read a dataset of alcohol consumption into a DataFrame\ndrinks = pd.read_csv('data/drinks.csv')\ndrinks.head()\n\n# exact memory usage is unknown because object columns are references elsewhere\ndrinks.info()\n\n# force pandas to calculate the true memory usage\ndrinks.info(memory_usage='deep')\n\n# calculate the memory usage for each Series (in bytes)\ndrinks.memory_usage(deep=True)", "Documentation for info and memory_usage", "# use the 'category' data type (new in pandas 0.15) to store the 'continent' strings as integers\ndrinks['continent'] = drinks.continent.astype('category')\ndrinks.dtypes\n\n# 'continent' Series appears to be unchanged\ndrinks.continent.head()\n\n# strings are now encoded (0 means 'Africa', 1 means 'Asia', 2 means 'Europe', etc.)\ndrinks.continent.cat.codes.head()\n\n# memory usage has been drastically reduced\ndrinks.memory_usage(deep=True)\n\n# repeat this process for the 'country' Series\ndrinks['country'] = drinks.country.astype('category')\ndrinks.memory_usage(deep=True)\n\n# memory usage increased because we created 193 categories\ndrinks.country.cat.categories", "The category data type should only be used with a string Series that has a small number of possible values.", "# create a small DataFrame from a dictionary\ndf = pd.DataFrame({'ID':[100, 101, 102, 103], 'quality':['good', 'very good', 'good', 'excellent']})\ndf\n\n# sort the DataFrame by the 'quality' Series (alphabetical order)\ndf.sort_values('quality')\n\n# define a logical ordering for the categories\ndf['quality'] = df.quality.astype('category', categories=['good', 'very good', 'excellent'], ordered=True)\ndf.quality\n\n# sort the DataFrame by the 'quality' Series (logical order)\ndf.sort_values('quality')\n\n# comparison operators work with ordered categories\ndf.loc[df.quality > 'good', :]", "Overview of categorical data in pandas\nAPI reference for categorical methods\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n22. How do I use pandas with scikit-learn to create Kaggle submissions?", "# read the training dataset from Kaggle's Titanic competition into a DataFrame\ntrain = pd.read_csv('http://bit.ly/kaggletrain')\ntrain.head()", "Goal: Predict passenger survival aboard the Titanic based on passenger attributes\nVideo: What is machine learning, and how does it work?", "# create a feature matrix 'X' by selecting two DataFrame columns\nfeature_cols = ['Pclass', 'Parch']\nX = train.loc[:, feature_cols]\nX.shape\n\n# create a response vector 'y' by selecting a Series\ny = train.Survived\ny.shape", "Note: There is no need to convert these pandas objects to NumPy arrays. scikit-learn will understand these objects as long as they are entirely numeric and the proper shapes.", "# fit a classification model to the training data\nfrom sklearn.linear_model import LogisticRegression\nlogreg = LogisticRegression()\nlogreg.fit(X, y)", "Video series: Introduction to machine learning with scikit-learn", "# read the testing dataset from Kaggle's Titanic competition into a DataFrame\ntest = pd.read_csv('http://bit.ly/kaggletest')\ntest.head()\n\n# create a feature matrix from the testing data that matches the training data\nX_new = test.loc[:, feature_cols]\nX_new.shape\n\n# use the fitted model to make predictions for the testing set observations\nnew_pred_class = logreg.predict(X_new)\n\n# create a DataFrame of passenger IDs and testing set predictions\npd.DataFrame({'PassengerId':test.PassengerId, 'Survived':new_pred_class}).head()", "Documentation for the DataFrame constructor", "# ensure that PassengerID is the first column by setting it as the index\npd.DataFrame({'PassengerId':test.PassengerId, 'Survived':new_pred_class}).set_index('PassengerId').head()\n\n# write the DataFrame to a CSV file that can be submitted to Kaggle\npd.DataFrame({'PassengerId':test.PassengerId, 'Survived':new_pred_class}).set_index('PassengerId').to_csv('sub.csv')", "Documentation for to_csv", "# save a DataFrame to disk (\"pickle it\")\ntrain.to_pickle('train.pkl')\n\n# read a pickled object from disk (\"unpickle it\")\npd.read_pickle('train.pkl').head()", "Documentation for to_pickle and read_pickle\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n23. More of your pandas questions answered!\nQuestion: Could you explain how to read the pandas documentation?\npandas API reference\nQuestion: What is the difference between ufo.isnull() and pd.isnull(ufo)?", "# read a dataset of UFO reports into a DataFrame\nufo = pd.read_csv('http://bit.ly/uforeports')\nufo.head()\n\n# use 'isnull' as a top-level function\npd.isnull(ufo).head()\n\n# equivalent: use 'isnull' as a DataFrame method\nufo.isnull().head()", "Documentation for isnull\nQuestion: Why are DataFrame slices inclusive when using .loc, but exclusive when using .iloc?", "# label-based slicing is inclusive of the start and stop\nufo.loc[0:4, :]\n\n# position-based slicing is inclusive of the start and exclusive of the stop\nufo.iloc[0:4, :]", "Documentation for loc and iloc", "# 'iloc' is simply following NumPy's slicing convention...\nufo.values[0:4, :]\n\n# ...and NumPy is simply following Python's slicing convention\n'python'[0:4]\n\n# 'loc' is inclusive of the stopping label because you don't necessarily know what label will come after it\nufo.loc[0:4, 'City':'State']", "Question: How do I randomly sample rows from a DataFrame?", "# sample 3 rows from the DataFrame without replacement (new in pandas 0.16.1)\nufo.sample(n=3)", "Documentation for sample", "# use the 'random_state' parameter for reproducibility\nufo.sample(n=3, random_state=42)\n\n# sample 75% of the DataFrame's rows without replacement\ntrain = ufo.sample(frac=0.75, random_state=99)\n\n# store the remaining 25% of the rows in another DataFrame\ntest = ufo.loc[~ufo.index.isin(train.index), :]", "Documentation for isin\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n24. How do I create dummy variables in pandas?", "# read the training dataset from Kaggle's Titanic competition\ntrain = pd.read_csv('http://bit.ly/kaggletrain')\ntrain.head()\n\n# create the 'Sex_male' dummy variable using the 'map' method\ntrain['Sex_male'] = train.Sex.map({'female':0, 'male':1})\ntrain.head()", "Documentation for map", "# alternative: use 'get_dummies' to create one column for every possible value\npd.get_dummies(train.Sex).head()", "Generally speaking:\n\nIf you have \"K\" possible values for a categorical feature, you only need \"K-1\" dummy variables to capture all of the information about that feature.\nOne convention is to drop the first dummy variable, which defines that level as the \"baseline\".", "# drop the first dummy variable ('female') using the 'iloc' method\npd.get_dummies(train.Sex).iloc[:, 1:].head()\n\n# add a prefix to identify the source of the dummy variables\npd.get_dummies(train.Sex, prefix='Sex').iloc[:, 1:].head()\n\n# use 'get_dummies' with a feature that has 3 possible values\npd.get_dummies(train.Embarked, prefix='Embarked').head(10)\n\n# drop the first dummy variable ('C')\npd.get_dummies(train.Embarked, prefix='Embarked').iloc[:, 1:].head(10)", "How to translate these values back to the original 'Embarked' value:\n\n0, 0 means C\n1, 0 means Q\n0, 1 means S", "# save the DataFrame of dummy variables and concatenate them to the original DataFrame\nembarked_dummies = pd.get_dummies(train.Embarked, prefix='Embarked').iloc[:, 1:]\ntrain = pd.concat([train, embarked_dummies], axis=1)\ntrain.head()", "Documentation for concat", "# reset the DataFrame\ntrain = pd.read_csv('http://bit.ly/kaggletrain')\ntrain.head()\n\n# pass the DataFrame to 'get_dummies' and specify which columns to dummy (it drops the original columns)\npd.get_dummies(train, columns=['Sex', 'Embarked']).head()\n\n# use the 'drop_first' parameter (new in pandas 0.18) to drop the first dummy variable for each feature\npd.get_dummies(train, columns=['Sex', 'Embarked'], drop_first=True).head()", "Documentation for get_dummies\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n25. How do I work with dates and times in pandas?", "# read a dataset of UFO reports into a DataFrame\nufo = pd.read_csv('http://bit.ly/uforeports')\nufo.head()\n\n# 'Time' is currently stored as a string\nufo.dtypes\n\n# hour could be accessed using string slicing, but this approach breaks too easily\nufo.Time.str.slice(-5, -3).astype(int).head()\n\n# convert 'Time' to datetime format\nufo['Time'] = pd.to_datetime(ufo.Time)\nufo.head()\n\nufo.dtypes", "Documentation for to_datetime", "# convenient Series attributes are now available\nufo.Time.dt.hour.head()\n\nufo.Time.dt.weekday_name.head()\n\nufo.Time.dt.dayofyear.head()", "API reference for datetime properties and methods", "# convert a single string to datetime format (outputs a timestamp object)\nts = pd.to_datetime('1/1/1999')\nts\n\n# compare a datetime Series with a timestamp\nufo.loc[ufo.Time >= ts, :].head()\n\n# perform mathematical operations with timestamps (outputs a timedelta object)\nufo.Time.max() - ufo.Time.min()\n\n# timedelta objects also have attributes you can access\n(ufo.Time.max() - ufo.Time.min()).days\n\n# allow plots to appear in the notebook\n%matplotlib inline\n\n# count the number of UFO reports per year\nufo['Year'] = ufo.Time.dt.year\nufo.Year.value_counts().sort_index().head()\n\n# plot the number of UFO reports per year (line plot is the default)\nufo.Year.value_counts().sort_index().plot()", "[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n26. How do I find and remove duplicate rows in pandas?", "# read a dataset of movie reviewers into a DataFrame\nuser_cols = ['user_id', 'age', 'gender', 'occupation', 'zip_code']\nusers = pd.read_table('http://bit.ly/movieusers', sep='|', header=None, names=user_cols, index_col='user_id')\nusers.head()\n\nusers.shape\n\n# detect duplicate zip codes: True if an item is identical to a previous item\nusers.zip_code.duplicated().tail()\n\n# count the duplicate items (True becomes 1, False becomes 0)\nusers.zip_code.duplicated().sum()\n\n# detect duplicate DataFrame rows: True if an entire row is identical to a previous row\nusers.duplicated().tail()\n\n# count the duplicate rows\nusers.duplicated().sum()", "Logic for duplicated:\n\nkeep='first' (default): Mark duplicates as True except for the first occurrence.\nkeep='last': Mark duplicates as True except for the last occurrence.\nkeep=False: Mark all duplicates as True.", "# examine the duplicate rows (ignoring the first occurrence)\nusers.loc[users.duplicated(keep='first'), :]\n\n# examine the duplicate rows (ignoring the last occurrence)\nusers.loc[users.duplicated(keep='last'), :]\n\n# examine the duplicate rows (including all duplicates)\nusers.loc[users.duplicated(keep=False), :]\n\n# drop the duplicate rows (inplace=False by default)\nusers.drop_duplicates(keep='first').shape\n\nusers.drop_duplicates(keep='last').shape\n\nusers.drop_duplicates(keep=False).shape", "Documentation for drop_duplicates", "# only consider a subset of columns when identifying duplicates\nusers.duplicated(subset=['age', 'zip_code']).sum()\n\nusers.drop_duplicates(subset=['age', 'zip_code']).shape", "[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]\n27. How do I avoid a SettingWithCopyWarning in pandas?", "# read a dataset of top-rated IMDb movies into a DataFrame\nmovies = pd.read_csv('http://bit.ly/imdbratings')\nmovies.head()\n\n# count the missing values in the 'content_rating' Series\nmovies.content_rating.isnull().sum()\n\n# examine the DataFrame rows that contain those missing values\nmovies[movies.content_rating.isnull()]\n\n# examine the unique values in the 'content_rating' Series\nmovies.content_rating.value_counts()", "Goal: Mark the 'NOT RATED' values as missing values, represented by 'NaN'.", "# first, locate the relevant rows\nmovies[movies.content_rating=='NOT RATED'].head()\n\n# then, select the 'content_rating' Series from those rows\nmovies[movies.content_rating=='NOT RATED'].content_rating.head()\n\n# finally, replace the 'NOT RATED' values with 'NaN' (imported from NumPy)\nimport numpy as np\nmovies[movies.content_rating=='NOT RATED'].content_rating = np.nan", "Problem: That statement involves two operations, a __getitem__ and a __setitem__. pandas can't guarantee whether the __getitem__ operation returns a view or a copy of the data.\n\nIf __getitem__ returns a view of the data, __setitem__ will affect the 'movies' DataFrame.\nBut if __getitem__ returns a copy of the data, __setitem__ will not affect the 'movies' DataFrame.", "# the 'content_rating' Series has not changed\nmovies.content_rating.isnull().sum()", "Solution: Use the loc method, which replaces the 'NOT RATED' values in a single __setitem__ operation.", "# replace the 'NOT RATED' values with 'NaN' (does not cause a SettingWithCopyWarning)\nmovies.loc[movies.content_rating=='NOT RATED', 'content_rating'] = np.nan\n\n# this time, the 'content_rating' Series has changed\nmovies.content_rating.isnull().sum()", "Summary: Use the loc method any time you are selecting rows and columns in the same statement.\nMore information: Modern Pandas (Part 1)", "# create a DataFrame only containing movies with a high 'star_rating'\ntop_movies = movies.loc[movies.star_rating >= 9, :]\ntop_movies", "Goal: Fix the 'duration' for 'The Shawshank Redemption'.", "# overwrite the relevant cell with the correct duration\ntop_movies.loc[0, 'duration'] = 150", "Problem: pandas isn't sure whether 'top_movies' is a view or a copy of 'movies'.", "# 'top_movies' DataFrame has been updated\ntop_movies\n\n# 'movies' DataFrame has not been updated\nmovies.head(1)", "Solution: Any time you are attempting to create a DataFrame copy, use the copy method.", "# explicitly create a copy of 'movies'\ntop_movies = movies.loc[movies.star_rating >= 9, :].copy()\n\n# pandas now knows that you are updating a copy instead of a view (does not cause a SettingWithCopyWarning)\ntop_movies.loc[0, 'duration'] = 150\n\n# 'top_movies' DataFrame has been updated\ntop_movies", "Documentation on indexing and selection: Returning a view versus a copy\nStack Overflow: What is the point of views in pandas if it is undefined whether an indexing operation returns a view or a copy?\n[<a href=\"#Python-pandas-Q&A-video-series-by-Data-School\">Back to top</a>]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
dmc-2016/dmc
notebooks/week-6/02-using a pre-trained model with Keras.ipynb
apache-2.0
[ "Lab 6.2 - Using a pre-trained model with Keras\nIn this section of the lab, we will load the model we trained in the previous section, along with the training data and mapping dictionaries, and use it to generate longer sequences of text.\nLet's start by importing the libraries we will be using:", "import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils import np_utils\n\nimport sys\nimport re\nimport pickle", "Next, we will import the data we saved previously using the pickle library.", "pickle_file = '-basic_data.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n X = save['X']\n y = save['y']\n char_to_int = save['char_to_int'] \n int_to_char = save['int_to_char'] \n del save # hint to help gc free up memory\n print('Training set', X.shape, y.shape)", "Now we need to define the Keras model. Since we will be loading parameters from a pre-trained model, this needs to match exactly the definition from the previous lab section. The only difference is that we will comment out the dropout layer so that the model uses all the hidden neurons when doing the predictions.", "# define the LSTM model\nmodel = Sequential()\nmodel.add(LSTM(128, return_sequences=False, input_shape=(X.shape[1], X.shape[2])))\n# model.add(Dropout(0.50))\nmodel.add(Dense(y.shape[1], activation='softmax'))", "Next we will load the parameters from the model we trained previously, and compile it with the same loss and optimizer function.", "# load the parameters from the pretrained model\nfilename = \"-basic_LSTM.hdf5\"\nmodel.load_weights(filename)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')", "We also need to rewrite the sample() and generate() helper functions so that we can use them in our code:", "def sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\ndef generate(sentence, sample_length=50, diversity=0.35):\n generated = sentence\n sys.stdout.write(generated)\n\n for i in range(sample_length):\n x = np.zeros((1, X.shape[1], X.shape[2]))\n for t, char in enumerate(sentence):\n x[0, t, char_to_int[char]] = 1.\n\n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = int_to_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print", "Now we can use the generate() function to generate text of any length based on our imported pre-trained model and a seed text of our choice. For best result, the length of the seed text should be the same as the length of training sequences (100 in the previous lab section). \nIn this case, we will test the overfitting of the model by supplying it two seeds:\n\none which comes verbatim from the training text, and\none which comes from another earlier speech by Obama\n\nIf the model has not overfit our training data, we should expect it to produce reasonable results for both seeds. If it has overfit, it might produce pretty good results for something coming directly from the training set, but perform poorly on a new seed. This means that it has learned to replicate our training text, but cannot generalize to produce text based on other inputs. Since the original article was very short, however, the entire vocabulary of the model might be very limited, which is why as input we use a part of another speech given by Obama, instead of completely random text.\nSince we have not trained the model for that long, we will also use a lower temperature to get the model to generate more accurate if less diverse results. Try running the code a few times with different temperature settings to generate different results.", "prediction_length = 500\nseed_from_text = \"america has shown that progress is possible. last year, income gains were larger for households at t\"\nseed_original = \"and as people around the world began to hear the tale of the lowly colonists who overthrew an empire\"\n\nfor seed in [seed_from_text, seed_original]:\n generate(seed, prediction_length, .50)\n print \"-\" * 20" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
AISpace2/AISpace2
notebooks/search/search.ipynb
gpl-3.0
[ "3. Searching for Solutions\nAbout\nThis chapter casts the problem of an agent deciding how to solve a goal as the problem of searching to find a path in a graph.\nYou can run each cell by selecting it and pressing Ctrl+Enter in Windows or Shift+Return in MacOS. Alternatively, you can click the Play button in the toolbar, to the left of the stop button. For more information, check out our AISpace2 Tutorial.\nFeel free to modify our codes either in this notebook or somewhere outside (e.g. python files in /aipython/). If you want to modify our codes outside, you might find this helpful for how your changes can take effect.\nYou need to run the following command to import our pre-defined problems.", "# Run this to import pre-defined problems\nfrom aipython.searchProblem import search_simple1, search_simple2, search_cyclic_delivery, search_acyclic_delivery, search_tree, search_extended_tree, search_cyclic, search_vancouver_neighbour, search_misleading_heuristic, search_multiple_path_pruning, search_module_4_graph, search_module_5_graph, search_bicycle_courier_acyclic, search_bicycle_courier_cyclic", "You can also define your own problems (how?). \nYou need to run the following command to import utilities that support your self-defined problems.", "# Run this to import utilities that support self-defined problems \nfrom aipython.searchProblem import Arc, Search_problem_from_explicit_graph", "3.5.2 Depth-First Search\n\nImplementation Details (page 39)\n\nIn depth-first search, the frontier acts like a LIFO (last-in, first-out) stack of paths. This means that the path selected and removed from the frontier at any time is the last path that was added. Depth-first search is appropriate when space is restricted, or when there are many solutions. On the other hand, depth-first search is not appropriate if it is possible to get stuck into infinite paths or if solutions exist at shallow depths.", "from aipython.searchGeneric import Searcher\n\ns = Searcher(problem=search_simple2)\n\n# Visualization options\n# For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options\ns.sleep_time = 0.2 # The time, in seconds, between each step in auto solving\ns.line_width = 2.0 # The thickness of edges\ns.text_size = 13 # The fontsize of the text\ns.detail_level = 2 # 0=no text, 1=truncated text, 2=full text\ns.show_edge_costs = True\ns.show_node_heuristics = False\n# Controls the layout engine used. Either \"force\" for force layout, or \"tree\".\ns.layout_method = \"force\"\n# s.layout_method = \"tree\"\n\n# Display the widget\ndisplay(s)\ns.search()", "3.6.1 A* Search\n\nImplementation Details (page 41)\n\nA* search uses both path cost and heuristic information in its selection of which path to expand. For each path on the frontier, A* uses an estimate of the total path cost from the start node to a goal node constrained to follow that path initially. The estimated total path cost is the sum of the cost of the path found $\\text{c⁢o⁢s⁢t}⁢(p)$ and the heuristic function $h(p)$, which estimates the cost from the end of $p$ to the goal.", "from aipython.searchGeneric import AStarSearcher\n\ns_astar = AStarSearcher(problem=search_simple1)\n\n# Visualization options\n# For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options\ns_astar.sleep_time = 0.2 # The time, in seconds, between each step in auto solving\ns_astar.line_width = 2.0 # The thickness of edges\ns_astar.text_size = 13 # The fontsize of the text\ns_astar.detail_level = 2 # 0=no text, 1=truncated text, 2=full text\ns_astar.show_edge_costs = True\ns_astar.show_node_heuristics = True\n\n# Display the widget\ndisplay(s_astar)\ns_astar.search()", "3.7.2 A* Search with Multiple Path Pruning\n\nImplementation Details (page 43)\n\nThere is often more than one path to a node. If only one path is required, a search algorithm can prune from the frontier any path that leads to a node to which it has already found a path. Multiple-path pruning is implemented by maintaining an explored set (traditionally called closed list) of nodes that are at the end of paths that have been expanded. The explored set is initially empty. When a path $⟨n_0,…,n_k⟩$ is selected , if $n_k$ is already in the explored set, the path can be discarded. Otherwise, $n_k$ is added to the explored set, and the algorithm proceeds as before.", "from aipython.searchMPP import SearcherMPP\n\ns_mpp = SearcherMPP(problem=search_simple1)\n\n# Visualization options\n# For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options\ns_mpp.sleep_time = 0.2 # The time, in seconds, between each step in auto solving\ns_mpp.line_width = 2.0 # The thickness of edges\ns_mpp.text_size = 13 # The fontsize of the text\ns_mpp.detail_level = 1 # 0=no text, 1=truncated text, 2=full text\ns_mpp.show_edge_costs = True\ns_mpp.show_node_heuristics = True\n\n# Display the widget\ndisplay(s_mpp)\ns_mpp.search()", "3.8.1 Branch-and-bound Search\n\nImplementation Details (page 44)\n\nDepth-first branch-and-bound search is a way to combine the space saving of depth-first search with heuristic information for finding optimal paths. It is particularly applicable when there are many paths to a goal. As in A* search, the heuristic function h$⁢(n)$ is non-negative and less than or equal to the cost of a lowest-cost path from n to a goal node.\nThe idea of a branch-and-bound search is to maintain the lowest cost $b$ (\"bound\") of a path to a goal found so far. If the search encounters a path $p$ such that $\\text{c⁢o⁢s⁢t}⁢(p)+h⁢(p)≥b$, path $p$ can be pruned. If a non-pruned path to a goal is found, it must be better than the previous best path. This new solution is remembered and b⁢o⁢u⁢n⁢d is set to the cost of this new solution. The searcher then proceeds to search for a better solution.", "from aipython.searchBranchAndBound import DF_branch_and_bound\n\ns_dfbb = DF_branch_and_bound(problem=search_simple1)\n\n# Visualization options\n# For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options\ns_dfbb.sleep_time = 0.2 # The time, in seconds, between each step in auto solving\ns_dfbb.line_width = 2.0 # The thickness of edges\ns_dfbb.text_size = 13 # The fontsize of the text\ns_dfbb.detail_level = 2 # 0=no text, 1=truncated text, 2=full text\ns_dfbb.show_edge_costs = True\ns_dfbb.show_node_heuristics = True\n\n# Display the widget\ndisplay(s_dfbb)\ns_dfbb.search()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
TomTranter/OpenPNM
examples/topology/Stitching Networks Together.ipynb
mit
[ "Create Bilayer Topology with Stitch\nOpenPNM includes numerous tools for manipulating and altering the topology. Most of these are found in the topotools submodule. This example will illustrate how to join or 'stitch' two distinct networks, even if they have different lattice spacing. In this example we'll create a coarse and a fine network then stitch them together to make a network with two distinct layers. \nStart by creating a network with a large lattice spacing:", "import scipy as sp\nimport numpy as np\nnp.set_printoptions(precision=4)\nimport matplotlib.pyplot as plt\nimport openpnm as op\n%matplotlib inline\nnp.random.seed(10)\nws = op.Workspace()\nws.settings[\"loglevel\"] = 40\n\ncoarse_net = op.network.Cubic(shape=[10, 10, 10], spacing=50e-6, name='coarse_net')\nprint(coarse_net)", "The coarse_net network has 1000 pores in a cubic lattice with a spacing of 50 um for a total size of 500 um per size. Next, we'll make another network with smaller spacing between pores, but with the same total size.", "fine_net = op.network.Cubic(shape=[25, 25, 5], spacing=20e-6, name='fine_net')\nprint(fine_net)", "These two networks are totally independent of each other, and actually both spatially overlap each other since the network generator places the pores at the [0, 0, 0] origin. Combining these networks into a single network is possible using the stitch function, but first we must make some adjustments. For starters, let's shift the fine_net along the z-axis so it is beside the coarse_net to give the layered effect:", "fine_net['pore.coords'] += np.array([0, 0, 10 * 50e-6])", "Before proceeding, let's quickly check that the two networks are indeed spatially separated now. OpenPNM as a rule does not provide extensively visualization capabilities since there are so many other packages that do it very well. However, we've found it useful to have a quick way of checking the network, so provide two tools in the topotools module: plot_connections and plot_coordinates. Below we'll use plot_connections:", "#NBVAL_IGNORE_OUTPUT\nfig = op.topotools.plot_connections(coarse_net)\nfig = op.topotools.plot_connections(fine_net, fig=fig)", "As can be seen below, fine_net (orange) has been repositioned above the coarse_net (blue) because we shifted the z-coordinate by 500 um. (10 pores as 50 um per pore).\nNow it's time stitch the networks together by adding throats between the pores on the top of the coarse network and those on the bottom of the fine network. The stitch function uses Euclidean distance to determine which pore is each face is nearest each other, and connects them.", "op.topotools.stitch(network=fine_net,\n donor=coarse_net,\n P_network=fine_net.pores('bottom'),\n P_donor=coarse_net.pores('top'),\n len_max=4e-5)", "And we can quickly visualize the result using OpenPNM's plotting tools:", "#NBVAL_IGNORE_OUTPUT\nfig = op.topotools.plot_connections(fine_net)", "The diagonal throats between the two networks have been added by the stitch process. The next step would be to assign different geometry objects to each network, with different pore sizes and such." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
enakai00/jupyter_ml4se_commentary
06-pandas DataFrame-02.ipynb
apache-2.0
[ "データフレームからのデータの抽出", "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas import Series, DataFrame", "DataFrame から特定の列を Series として取り出す例です。", "from numpy.random import randint\ndices = randint(1,7,(5,2))\ndiceroll = DataFrame(dices, columns=['dice1','dice2'])\ndiceroll", "配列の index に column 名を指定して取り出します。", "diceroll['dice1']", "column 名を属性に指定して取り出します。", "diceroll.dice1", "複数の列を DataFrame として取り出す例です。", "data = {'City': ['Tokyo','Osaka','Nagoya','Okinawa'],\n 'Temperature': [25.0,28.2,27.3,30.9],\n 'Humidity': [44,42,np.nan,62]}\ncities = DataFrame(data)\ncities\n\ncities[['City', 'Humidity']]", "次のように、単一の列を DataFrame として取り出すこともできます。", "cities[['City']]", "次は Series として取り出す場合です。", "cities['City']", "DataFrame から行を指定して取り出す例です。\n配列のスライス記法で取り出す行を指定します。", "cities[0:2]\n\ncities[2:3]\n\ncities[1:]", "特定の条件を満たす行だけを取り出すこともできます。", "cities[cities['Temperature']>28]", "行と列の両方を指定して取り出す例です。", "cities", "行はスライス記法、列は column 名のリストで指定します。", "cities.ix[1:3, ['City','Humidity']]", "DataFrame の行ごとに処理をする例です。", "cities", "iterrows メソッドは、各行の index とその行を表わす Series オブジェクトを順に返します。", "for index, line in cities.iterrows():\n print 'Index:', index\n print line, '\\n'", "データフレームを変更する例です。\nDataFrame から抽出したオブジェクトを変更する際は、明示的にコピーを作成します。", "humidity = cities['Humidity'].copy()\nhumidity[2] = 50\nhumidity", "コピーを変更しても元の DataFrame が変更されることはありません。", "cities", "DataFrame の特定要素を変更する際は、loc メソッドで要素を指定します。", "cities.loc[2,'Humidity'] = 50\ncities", "30より大きい値の Temperature を30に揃える処理の例です。", "for index, line in cities.iterrows():\n if line['Temperature'] > 30:\n cities.loc[index, 'Temperature'] = 30\ncities", "条件による行の指定と組み合わせることもできます。", "cities.loc[(cities['Temperature']>27)&(cities['Temperature']<29), 'Temperature'] = 28\ncities", "dropna メソッドで欠損値を含む行を削除する例です。", "cities.loc[2,'Humidity'] = np.nan\ncities\n\ncities = cities.dropna()\ncities", "練習問題\n(1) 次の関数 create_dataset() を用いて、num=10 個のデータからなるデータフレーム data を作成します。その後、iterrowsメソッドを利用して、データポイント (x,y) のy値と関数 sin(2πx) の平方根平均二乗誤差 √{sum(sin(2πx) - y)**2 / num} を計算してください。\nヒント:この例では、平方根平均二乗誤差は約0.3になります。", "from numpy.random import normal\n\ndef create_dataset(num):\n data_x = np.linspace(0,1,num)\n data_y = np.sin(2*np.pi*data_x) + normal(loc=0, scale=0.3, size=num)\n return DataFrame({'x': data_x, 'y': data_y})", "(2) (1)のDataFrameから列 'x' だけを取り出したSeriesオブジェクトを変数 x に格納してください。nameプロパティは、'x' とします。\nさらに、x**2 (各要素を2乗した値)を要素とするSeriesオブジェクトを作成して、変数 x2 に格納してください。nameプロパティは、'x2' とします。\n同様に、x**3、x**4 を要素とするSeriesオブジェクトを変数 x3, x4 に格納します。\n(3) (2)で作成した x, x2, x3, x4 を結合して、x, x2, x3, x4を列に持ったDataFrame dataset を作成してください。\nヒント:結果は、次のような DataFrame になります。", "from PIL import Image\nImage.open(\"figure01.png\") " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
brettavedisian/phys202-2015-work
assignments/assignment03/NumpyEx01.ipynb
mit
[ "Numpy Exercise 1\nImports", "import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport antipackage\nimport github.ellisonbg.misc.vizarray as va", "Checkerboard\nWrite a Python function that creates a square (size,size) 2d Numpy array with the values 0.0 and 1.0:\n\nYour function should work for both odd and even size.\nThe 0,0 element should be 1.0.\nThe dtype should be float.", "def checkerboard(size):\n \"\"\"Return a 2d checkboard of 0.0 and 1.0 as a NumPy array\"\"\"\n a=np.zeros((size,size))\n a[::2,::2]=1.0 #slices the array at every even index and makes it 1.0 in first, third,... rows\n a[1::2,1::2]=1.0 #slices the array at every odd index and makes it 1.0 in second, fourth,... rows\n return a\n raise NotImplementedError()\n\na = checkerboard(4)\nassert a[0,0]==1.0\nassert a.sum()==8.0\nassert a.dtype==np.dtype(float)\nassert np.all(a[0,0:5:2]==1.0)\nassert np.all(a[1,0:5:2]==0.0)\n\nb = checkerboard(5)\nassert b[0,0]==1.0\nassert b.sum()==13.0\nassert np.all(b.ravel()[0:26:2]==1.0)\nassert np.all(b.ravel()[1:25:2]==0.0)", "Use vizarray to visualize a checkerboard of size=20 with a block size of 10px.", "f=checkerboard(20)\nva.set_block_size(10) #creating the checkerboard and setting pixel size to 10\nva.enable()\nf\n\nva.disable()\n\nassert True", "Use vizarray to visualize a checkerboard of size=27 with a block size of 5px.", "g=checkerboard(27)\nva.set_block_size(5) #same as above process, with different pixel size\nva.enable()\ng\n\nva.disable()\n\nassert True" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jakobrunge/tigramite
tutorials/tigramite_tutorial_sliding_window_analysis.ipynb
gpl-3.0
[ "Sliding window causal discovery with TIGRAMITE\nTIGRAMITE is a time series analysis python module. It allows to reconstruct graphical models (conditional independence graphs) from discrete or continuously-valued time series based on the PCMCI framework and create high-quality plots of the results.\nThis tutorial explains the function PCMCI.run_sliding_window_of which is a convenience function that allows to run all PCMCI causal discovery methods on sliding windows across a multivariate time series.", "# Imports\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\n%matplotlib inline \n## use `%matplotlib notebook` for interactive figures\n# plt.style.use('ggplot')\n\nimport tigramite\nfrom tigramite import data_processing as pp\nfrom tigramite.toymodels import structural_causal_processes as toys\nfrom tigramite import plotting as tp\nfrom tigramite.pcmci import PCMCI\nfrom tigramite.independence_tests import ParCorr, GPDC, CMIknn, CMIsymb", "Setup\nPCMCI and its variants allow to reconstruct time series graphs from multivariate time series data. An important underlying assumption is causal stationarity, i.e., that the conditional independence relations are stationary over time and, hence, the presence and absence of causal links is stationary. Sometimes, one may know that causal relations are only stationary in specific time periods, for example, we may observe different causal relations in summer and winter. This case can be addressed by the masking functionality which is discussed in the corresponding tutorial.\nIn the sliding window analyis implemented in run_sliding_window_of, on the other hand, we conduct the causal graph estimation separately on a sequence of time windows which is sled over the time series. That is, given a method, window_step $s$ and window_length $w$, we run the method (e.g., PCMCIplus) sequentially on the samples in each time window ${\\mathbf{X}t}{t=s\\cdot i}^{s\\cdot i+w-1}$ for $i=0, 1, \\ldots$.\nIn the following we discuss two potential use cases.\nUse case I: Time-varying causal dynamics\nIn this case we assume that the underlying stochastic process $\\mathbf{X}_t=(X^1_t,\\ldots,X^N_t)$ has a time-varying causal structure. This may affect the functional dependencies, causal parents, and noise distributions. Hence, we assume the following structural causal model (SCM):\n\\begin{align} \\label{eq:causal_model}\nX^j_t &= f^j_t\\left(\\mathcal{P}_t(X^j_t),\\,\\eta^j_t\\right)\\quad \\eta^j_t\\sim \\mathcal{D}_t\n\\end{align}\nwhere $f^j_t$ is some arbitrary function with non-trivial dependencies on its arguments $\\mathcal{P}_t(X^j_t)$ and $\\eta^j_t$. The latter represents mutually ($i\\neq j$) and serially ($t'\\neq t$) independent dynamical noise following some distribution $\\mathcal{D}_t$. Different from before, all three, $f^j_t$, $\\mathcal{P}_t(X^j_t)$, and $\\mathcal{D}_t$ may be time-dependent here.\nOf course, if this time-dependence is not restricted in some way, then causal relations cannot be identified. In the following example, we consider a slowly varying change governed by a slow confounder $U$.", "np.random.seed(42)\nN = 4\nT = 100000\ndata = np.random.randn(T, N)\ndatatime = np.arange(T)\n\n# Simple unobserved confounder U that smoothly changes causal relations\nU = np.cos(np.arange(T)*0.00005) #+ 0.1*np.random.randn(T)\nc = 0.8\nfor t in range(1, T):\n if U[t] >= 0:\n data[t, 0] += 0.4*data[t-1, 0] \n data[t, 1] += 0.5*data[t-1, 1] + 0.4*U[t]*data[t-1, 0]\n data[t, 2] += 0.6*data[t-1, 2] + data[t, 0] \n else:\n data[t, 2] += 0.6*data[t-1, 2] \n data[t, 0] += 0.4*data[t-1, 0] + 0.4*data[t, 2]\n data[t, 1] += 0.5*data[t-1, 1] + 0.4*U[t]*data[t-1, 0]\n data[t, 3] = U[t]\n\n# Initialize dataframe object, specify variable names\nvar_names = [r'$X^{%d}$' % j for j in range(N-1) ] + [r'$U$']\ndataframe_plot = pp.DataFrame(data, var_names=var_names)\ntp.plot_timeseries(dataframe_plot); plt.show()\n\n# For the analysis we use only the observed data\ndataframe = pp.DataFrame(data[:,:3], var_names=var_names[:-1])", "As you can see from the data-generating process, $U$ changes the strength and sign of the causal link from $X^0_{t-1}\\to X^1_t$ and, in addition, the causal direction of the contemporaneous link between $X^0_t$ and $X^2_t$. We assume that we do not know the unobserved confounder $U$ and construct the dataframe only from $X^0, X^1, X^2$.\nWe now run run_sliding_window_of with method='run_pcmciplus', window_step=10000 and window_length=10000. Additional arguments to 'run_pcmciplus' are passed using method_args. Of course, the windows may also be chosen to be overlapping.", "window_step=10000\nwindow_length=10000\nmethod='run_pcmciplus'\nmethod_args={'tau_min':0, 'tau_max':2, 'pc_alpha':0.01}\nconf_lev = 0.95\ncond_ind_test = ParCorr(significance='analytic')\n\n# Init\npcmci = PCMCI(\n dataframe=dataframe, \n cond_ind_test=cond_ind_test,\n verbosity=0)\n# Run\nresults = pcmci.run_sliding_window_of(method=method, \n method_args=method_args, \n window_step=window_step,\n window_length=window_length,\n conf_lev = conf_lev)\n\n", "run_sliding_window_of returns a dictionary with entries 'summary_results' and 'window_results'. The first one is discussed for the second use case. results['window_results'] now contains the same result entries as the results of a standard PCMCIplus analysis, but each one is a list of results for every sliding window. In the following we focus on the graph and val_matrix and visualize them aligned with the time series.", "graphs = results['window_results']['graph']\nval_matrices = results['window_results']['val_matrix']\nn_windows = len(graphs)\n\nmosaic = [['data %s' %j for i in range(n_windows)] for j in range(N)]\nfor n in range(N):\n mosaic.append(['graph %s' %i for i in range(n_windows)])\n# print(mosaic)\nfig, axs = plt.subplot_mosaic(mosaic = mosaic, figsize=(20, 10))\n\nfor j in range(N):\n ax = axs['data %s' %j]\n ax.axhline(0., color='grey')\n if j ==3:\n ax.fill_between(x=datatime, y1=-1, y2=1, where=datatime <= window_length, color='grey', alpha=0.3)\n \n if j == 3: color = 'black'\n else: color = 'blue'\n ax.plot(datatime, data[:,j], color=color)\n# axs['data %s' %j].axis('off') # xaxis.set_ticklabels([])\n for loc, spine in ax.spines.items():\n if loc != 'left':\n spine.set_color(\"none\")\n \n ax.xaxis.set_ticks([]) \n ax.set_xlim(0., T)\n ax.set_ylabel(var_names[j])\n\nfor w in range(n_windows):\n if w == 0: show_colorbar=True\n else: show_colorbar = False\n tp.plot_graph(graphs[w], val_matrix=val_matrices[w], show_colorbar=show_colorbar,\n fig_ax=(fig, axs['graph %s' %w]))\n\n", "As you can see, here there are 9 sliding windows (the length is indicated as the grey bar in the $U$-time series. As expected, over time the strength and sign of the causal link from $X^0_{t-1}\\to X^1_t$ and, in addition, the causal direction of the contemporaneous link between $X^0_t$ and $X^2_t$ change.\nUse case II: Stationary causal relations with a slowly varying confounder\nIn this case we assume that the SCM among $\\mathbf{X}_t=(X^1_t,\\ldots,X^N_t)$ among the $X^i$ is stationary over time, i.e., $f^j$, $\\mathcal{P}(X^j_t)$, and $\\mathcal{D}$ are not time dependent, but that there is an added external, slowly-varying confounder. (Of course, the true SCM includes this confounder).", "np.random.seed(42)\nN = 4\nT = 10000\ndata = np.random.randn(T, N)\ndatatime = np.arange(T)\n# Simple unobserved confounder U that smoothly changes causal relations\nU = np.cos(np.arange(T)*0.0005) #+ 0.1*np.random.randn(T)\nc = 3.\nfor t in range(1, T):\n data[t, 2] += 0.6*data[t-1, 2] + c*U[t]\n data[t, 0] += 0.4*data[t-1, 0] + 0.4*data[t, 2] + c*U[t]\n data[t, 1] += 0.5*data[t-1, 1] + 0.05*data[t-1, 0] + c*U[t]\n data[t, 3] = U[t]\n\n# Initialize dataframe object, specify variable names\nvar_names = [r'$X^{%d}$' % j for j in range(N-1) ] + [r'$U$']\ndataframe_plot = pp.DataFrame(data, var_names=var_names)\ntp.plot_timeseries(dataframe_plot); plt.show()\n\n# For the analysis we use only the observed data\ndataframe = pp.DataFrame(data[:,:3], var_names=var_names[:-1])", "We run the analysis both on the whole time series and in sliding windows.", "window_step=1000\nwindow_length=1000\nmethod='run_pcmciplus'\nmethod_args={'tau_min':0, 'tau_max':2, 'pc_alpha':0.01}\nconf_lev = 0.95\ncond_ind_test = ParCorr(significance='analytic')\n\n# Init\npcmci = PCMCI(\n dataframe=dataframe, \n cond_ind_test=cond_ind_test,\n verbosity=0)\n# Run\nresults = pcmci.run_sliding_window_of(method=method, \n method_args=method_args, \n window_step=window_step,\n window_length=window_length,\n conf_lev = conf_lev)\nresults_alldata = pcmci.run_pcmciplus(**method_args)\n", "The strong unobserved confounding induces a trend in all variables that makes them dependent. Hence, if we analyze the whole time frame, we get an almost fully connected graph.", "tp.plot_graph(graph=results_alldata['graph'], val_matrix=results_alldata['val_matrix']); plt.show()", "On the other hand, in each sliding window the unobserved confounder can be assumed constant and, hence, does not lead to a confounding.", "graphs = results['window_results']['graph']\nval_matrices = results['window_results']['val_matrix']\nn_windows = len(graphs)\n\nmosaic = [['data %s' %j for i in range(n_windows)] for j in range(N)]\nfor n in range(N):\n mosaic.append(['graph %s' %i for i in range(n_windows)])\n# print(mosaic)\nfig, axs = plt.subplot_mosaic(mosaic = mosaic, figsize=(20, 10))\n\nfor j in range(N):\n ax = axs['data %s' %j]\n ax.axhline(0., color='grey')\n if j ==3:\n ax.fill_between(x=datatime, y1=-1, y2=1, where=datatime <= window_length, color='grey', alpha=0.3)\n \n if j == 3: color = 'black'\n else: color = 'blue'\n ax.plot(datatime, data[:,j], color=color)\n# axs['data %s' %j].axis('off') # xaxis.set_ticklabels([])\n for loc, spine in ax.spines.items():\n if loc != 'left':\n spine.set_color(\"none\")\n \n ax.xaxis.set_ticks([]) \n ax.set_xlim(0., T)\n ax.set_ylabel(var_names[j])\n\nfor w in range(n_windows):\n if w == 0: show_colorbar=True\n else: show_colorbar = False\n tp.plot_graph(graphs[w], val_matrix=val_matrices[w], show_colorbar=show_colorbar,\n fig_ax=(fig, axs['graph %s' %w]))\n\n", "Now the graph is rather stationary over time. With this assumption of an effectively stationary SCM, we may also consider summary statistics in 'summary_results'. 'most_frequent_links' contains a graph where each entry contains the link that occurs most frequently among the sliding windows, including the absence of a link \"\". 'link_frequency' contains the fraction of sliding windows where that link occurs. Finally, 'val_matrix_mean' contains the averaged test statistic values over all time windows. These three features can be visualized using plot_graph.\nNote: The test statistic values (e.g., partial correlation) may give a qualitative intuition of the strength of a dependency, but for a proper causal effect analysis please refer to the CausalEffects class.", "tp.plot_graph(graph=results['summary_results']['most_frequent_links'], \n val_matrix=results['summary_results']['val_matrix_mean'],\n link_width=results['summary_results']['link_frequency'])\nprint('most_frequent_links')\nprint(results['summary_results']['most_frequent_links'].squeeze())\nprint('link_frequency')\nprint(results['summary_results']['link_frequency'].squeeze())", "Here the link from $X^0 \\to X^1$ is weak and, hence, only detected in a small fraction of the windows." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Gezort/YSDA_deeplearning17
Seminar3/HW3_Modules.ipynb
mit
[ "import numpy as np", "Module is an abstract class which defines fundamental methods necessary for a training a neural network. You do not need to change anything here, just read the comments.", "class Module(object):\n def __init__ (self):\n self.output = None\n self.gradInput = None\n self.training = True\n \"\"\"\n Basically, you can think of a module as of a something (black box) \n which can process `input` data and produce `ouput` data.\n This is like applying a function which is called `forward`: \n \n output = module.forward(input)\n \n The module should be able to perform a backward pass: to differentiate the `forward` function. \n More, it should be able to differentiate it if is a part of chain (chain rule).\n The latter implies there is a gradient from previous step of a chain rule. \n \n gradInput = module.backward(input, gradOutput)\n \"\"\"\n \n def forward(self, input):\n \"\"\"\n Takes an input object, and computes the corresponding output of the module.\n \"\"\"\n return self.updateOutput(input)\n\n def backward(self,input, gradOutput):\n \"\"\"\n Performs a backpropagation step through the module, with respect to the given input.\n \n This includes \n - computing a gradient w.r.t. `input` (is needed for further backprop),\n - computing a gradient w.r.t. parameters (to update parameters while optimizing).\n \"\"\"\n self.updateGradInput(input, gradOutput)\n self.accGradParameters(input, gradOutput)\n return self.gradInput\n \n\n def updateOutput(self, input):\n \"\"\"\n Computes the output using the current parameter set of the class and input.\n This function returns the result which is stored in the `output` field.\n \n Make sure to both store the data in `output` field and return it. \n \"\"\"\n \n # The easiest case:\n \n # self.output = input \n # return self.output\n \n pass\n\n def updateGradInput(self, input, gradOutput):\n \"\"\"\n Computing the gradient of the module with respect to its own input. \n This is returned in `gradInput`. Also, the `gradInput` state variable is updated accordingly.\n \n The shape of `gradInput` is always the same as the shape of `input`.\n \n Make sure to both store the gradients in `gradInput` field and return it.\n \"\"\"\n \n # The easiest case:\n \n # self.gradInput = gradOutput \n # return self.gradInput\n \n pass \n \n def accGradParameters(self, input, gradOutput):\n \"\"\"\n Computing the gradient of the module with respect to its own parameters.\n No need to override if module has no parameters (e.g. ReLU).\n \"\"\"\n pass\n \n def zeroGradParameters(self): \n \"\"\"\n Zeroes `gradParams` variable if the module has params.\n \"\"\"\n pass\n \n def getParameters(self):\n \"\"\"\n Returns a list with its parameters. \n If the module does not have parameters return empty list. \n \"\"\"\n return []\n \n def getGradParameters(self):\n \"\"\"\n Returns a list with gradients with respect to its parameters. \n If the module does not have parameters return empty list. \n \"\"\"\n return []\n \n def training(self):\n \"\"\"\n Sets training mode for the module.\n Training and testing behaviour differs for Dropout, BatchNorm.\n \"\"\"\n self.training = True\n \n def evaluate(self):\n \"\"\"\n Sets evaluation mode for the module.\n Training and testing behaviour differs for Dropout, BatchNorm.\n \"\"\"\n self.training = False\n \n def __repr__(self):\n \"\"\"\n Pretty printing. Should be overrided in every module if you want \n to have readable description. \n \"\"\"\n return \"Module\"", "Sequential container\nDefine a forward and backward pass procedures.", "class Sequential(Module):\n \"\"\"\n This class implements a container, which processes `input` data sequentially. \n \n `input` is processed by each module (layer) in self.modules consecutively.\n The resulting array is called `output`. \n \"\"\"\n \n def __init__ (self):\n super(Sequential, self).__init__()\n self.modules = []\n \n def add(self, module):\n \"\"\"\n Adds a module to the container.\n \"\"\"\n self.modules.append(module)\n self.inputs = []\n\n def updateOutput(self, input):\n \"\"\"\n Basic workflow of FORWARD PASS:\n \n y_0 = module[0].forward(input)\n y_1 = module[1].forward(y_0)\n ...\n output = module[n-1].forward(y_{n-2}) \n \n \n Just write a little loop. \n \"\"\"\n self.inputs = []\n y = input\n for mod in self.modules:\n self.inputs.append(y)\n y = mod.forward(y)\n self.output = y\n return self.output\n\n def backward(self, input, gradOutput):\n \"\"\"\n Workflow of BACKWARD PASS:\n \n g_{n-1} = module[n-1].backward(y_{n-2}, gradOutput)\n g_{n-2} = module[n-2].backward(y_{n-3}, g_{n-1})\n ...\n g_1 = module[1].backward(y_0, g_2) \n gradInput = module[0].backward(input, g_1) \n \n \n !!!\n \n To ech module you need to provide the input, module saw while forward pass, \n it is used while computing gradients. \n Make sure that the input for `i-th` layer the output of `module[i]` (just the same input as in forward pass) \n and NOT `input` to this Sequential module. \n \n !!!\n \n \"\"\"\n \n g = gradOutput\n for mod, inp in zip(self.modules[::-1], self.inputs[::-1]):\n g = mod.backward(inp, g)\n \n self.gradInput = g\n return self.gradInput\n \n\n def zeroGradParameters(self): \n for module in self.modules:\n module.zeroGradParameters()\n \n def getParameters(self):\n \"\"\"\n Should gather all parameters in a list.\n \"\"\"\n return [x.getParameters() for x in self.modules]\n \n def getGradParameters(self):\n \"\"\"\n Should gather all gradients w.r.t parameters in a list.\n \"\"\"\n return [x.getGradParameters() for x in self.modules]\n \n def __repr__(self):\n string = \"\".join([str(x) + '\\n' for x in self.modules])\n return string\n \n def __getitem__(self,x):\n return self.modules.__getitem__(x)", "Layers\n\ninput: batch_size x n_feats1\noutput: batch_size x n_feats2", "class Linear(Module):\n \"\"\"\n A module which applies a linear transformation \n A common name is fully-connected layer, InnerProductLayer in caffe. \n \n The module should work with 2D input of shape (n_samples, n_feature).\n \"\"\"\n def __init__(self, n_in, n_out):\n super(Linear, self).__init__()\n \n # This is a nice initialization\n stdv = 1./np.sqrt(n_in)\n self.W = np.random.uniform(-stdv, stdv, size = (n_out, n_in))\n self.b = np.random.uniform(-stdv, stdv, size = n_out)\n \n self.gradW = np.zeros_like(self.W)\n self.gradb = np.zeros_like(self.b)\n \n def updateOutput(self, input):\n self.output = input.dot(self.W.T) + self.b\n return self.output\n \n def updateGradInput(self, input, gradOutput):\n self.gradInput = gradOutput.dot(self.W)\n return self.gradInput\n \n def accGradParameters(self, input, gradOutput):\n self.gradW = gradOutput.T.dot(input)\n self.gradb = gradOutput.sum(axis=0)\n \n def zeroGradParameters(self):\n self.gradW.fill(0)\n self.gradb.fill(0)\n \n def getParameters(self):\n return [self.W, self.b]\n \n def getGradParameters(self):\n return [self.gradW, self.gradb]\n \n def __repr__(self):\n s = self.W.shape\n q = 'Linear %d -> %d' %(s[1],s[0])\n return q", "This one is probably the hardest but as others only takes 5 lines of code in total. \n- input: batch_size x n_feats\n- output: batch_size x n_feats", "class SoftMax(Module):\n def __init__(self):\n !pip freeze\n !ifconfig -a\n super(SoftMax, self).__init__()\n \n def updateOutput(self, input):\n # start with normalization for numerical stability\n self.output = np.subtract(input, input.max(axis=1, keepdims=True))\n self.output = np.exp(self.output)\n self.output = (self.output.T / (np.sum(self.output, axis=1))).T\n \n return self.output\n \n def updateGradInput(self, input, gradOutput):\n input = np.subtract(input, input.max(axis=1, keepdims=True))\n output = (np.exp(input).T / (np.sum(np.exp(input), axis=1))).T\n \n self.gradInput = np.zeros(input.shape)\n self.gradInput += gradOutput * output\n self.gradInput -= (np.sum(gradOutput * output, axis=1) * output.T).T\n \n return self.gradInput\n \n def __repr__(self):\n return \"SoftMax\"", "One of the most significant recent ideas that impacted NNs a lot is Batch normalization. The idea is simple, yet effective: the features should be whitened ($mean = 0$, $std = 1$) all the way through NN. This improves the convergence for deep models letting it train them for days but not weeks. You are to implement a part of the layer: mean subtraction. That is, the module should calculate mean value for every feature (every column) and subtract it.\nNote, that you need to estimate the mean over the dataset to be able to predict on test examples. The right way is to create a variable which will hold smoothed mean over batches (exponential smoothing works good) and use it when forwarding test examples.\nWhen training calculate mean as folowing: \nmean_to_subtract = self.old_mean * alpha + batch_mean * (1 - alpha)\nwhen evaluating (self.training == False) set $alpha = 1$.\n\ninput: batch_size x n_feats\noutput: batch_size x n_feats", "class BatchMeanSubtraction(Module):\n def __init__(self, alpha = 0.):\n super(BatchMeanSubtraction, self).__init__()\n \n self.alpha = alpha\n self.old_mean = None\n \n def updateOutput(self, input):\n if not self.training:\n mean_to_subtract = self.old_mean\n elif self.old_mean is not None:\n mean_to_subtract = self.old_mean * self.alpha + np.mean(input, axis=0) * (1 - self.alpha)\n else:\n mean_to_subtract = np.mean(input, axis=0)\n self.old_mean = mean_to_subtract\n self.output = input - mean_to_subtract\n return self.output\n \n def updateGradInput(self, input, gradOutput):\n self.gradInput = gradOutput - np.mean(gradOutput, axis=0)\n return self.gradInput\n \n def __repr__(self):\n return \"BatchMeanNormalization\"", "Implement dropout. The idea and implementation is really simple: just multimply the input by $Bernoulli(p)$ mask. \nThis is a very cool regularizer. In fact, when you see your net is overfitting try to add more dropout.\nWhile training (self.training == True) it should sample a mask on each iteration (for every batch). When testing this module should implement identity transform i.e. self.output = input.\n\ninput: batch_size x n_feats\noutput: batch_size x n_feats", "class Dropout(Module):\n def __init__(self, p=0.5):\n super(Dropout, self).__init__()\n self.p = p\n self.mask = None\n \n def updateOutput(self, input):\n self.mask = np.random.binomial(1, self.p, size=input.shape)\n self.output = input * self.mask\n return self.output\n \n def updateGradInput(self, input, gradOutput):\n self.gradInput = gradOutput * self.mask\n return self.gradInput\n \n def __repr__(self):\n return \"Dropout\"", "Activation functions\nHere's the complete example for the Rectified Linear Unit non-linearity (aka ReLU):", "class ReLU(Module):\n def __init__(self):\n super(ReLU, self).__init__()\n \n def updateOutput(self, input):\n self.output = np.maximum(input, 0)\n return self.output\n \n def updateGradInput(self, input, gradOutput):\n self.gradInput = np.multiply(gradOutput , input > 0)\n return self.gradInput\n \n def __repr__(self):\n return \"ReLU\"", "Implement Leaky Rectified Linear Unit. Expriment with slope.", "class LeakyReLU(Module):\n def __init__(self, slope = 0.03):\n super(LeakyReLU, self).__init__()\n \n self.slope = slope\n \n def updateOutput(self, input):\n self.output = np.maximum(input, self.slope * input)\n return self.output\n \n def updateGradInput(self, input, gradOutput):\n self.gradInput = gradOutput\n self.gradInput[input < 0] *= self.slope\n return self.gradInput\n \n def __repr__(self):\n return \"LeakyReLU\"", "Implement Exponential Linear Units activations.", "class ELU(Module):\n def __init__(self, alpha = 1.0):\n super(ELU, self).__init__()\n \n self.alpha = alpha\n \n def updateOutput(self, input):\n self.output = input\n self.output[input < 0] = (np.exp(self.output[input < 0]) - 1) * self.alpha\n return self.output\n \n def updateGradInput(self, input, gradOutput):\n self.gradInput = gradOutput\n self.gradInput[input < 0] *= self.alpha * np.exp(input[input < 0])\n return self.gradInput\n \n def __repr__(self):\n return \"ELU\"", "Implement SoftPlus activations. Look, how they look a lot like ReLU.", "class SoftPlus(Module):\n def __init__(self):\n super(SoftPlus, self).__init__()\n \n def updateOutput(self, input):\n self.output = np.log(np.exp(input) + 1)\n return self.output\n \n def updateGradInput(self, input, gradOutput):\n self.gradInput = 1. / (1 + np.exp(-input)) * gradOutput\n return self.gradInput\n \n def __repr__(self):\n return \"SoftPlus\"", "Criterions\nCriterions are used to score the models answers.", "class Criterion(object):\n def __init__ (self):\n self.output = None\n self.gradInput = None\n \n def forward(self, input, target):\n \"\"\"\n Given an input and a target, compute the loss function \n associated to the criterion and return the result.\n \n For consistency this function should not be overrided,\n all the code goes in `updateOutput`.\n \"\"\"\n return self.updateOutput(input, target)\n\n def backward(self, input, target):\n \"\"\"\n Given an input and a target, compute the gradients of the loss function\n associated to the criterion and return the result. \n\n For consistency this function should not be overrided,\n all the code goes in `updateGradInput`.\n \"\"\"\n return self.updateGradInput(input, target)\n \n def updateOutput(self, input, target):\n \"\"\"\n Function to override.\n \"\"\"\n return self.output\n\n def updateGradInput(self, input, target):\n \"\"\"\n Function to override.\n \"\"\"\n return self.gradInput \n\n def __repr__(self):\n \"\"\"\n Pretty printing. Should be overrided in every module if you want \n to have readable description. \n \"\"\"\n return \"Criterion\"", "The MSECriterion, which is basic L2 norm usually used for regression, is implemented here for you.", "class MSECriterion(Criterion):\n def __init__(self):\n super(MSECriterion, self).__init__()\n \n def updateOutput(self, input, target): \n self.output = np.sum(np.power(input - target,2)) / input.shape[0]\n return self.output \n \n def updateGradInput(self, input, target):\n self.gradInput = (input - target) * 2 / input.shape[0]\n return self.gradInput\n\n def __repr__(self):\n return \"MSECriterion\"", "You task is to implement the ClassNLLCriterion. It should implement multiclass log loss. Nevertheless there is a sum over y (target) in that formula, \nremember that targets are one-hot encoded. This fact simplifies the computations a lot. Note, that criterions are the only places, where you divide by batch size.", "class ClassNLLCriterion(Criterion):\n def __init__(self):\n a = super(ClassNLLCriterion, self)\n super(ClassNLLCriterion, self).__init__()\n \n def updateOutput(self, input, target): \n \n # Use this trick to avoid numerical errors\n eps = 1e-15 \n input_clamp = np.clip(input, eps, 1 - eps)\n \n self.output = -np.sum(target * np.log(input_clamp)) / target.shape[0]\n return self.output\n\n def updateGradInput(self, input, target):\n \n # Use this trick to avoid numerical errors\n input_clamp = np.maximum(1e-15, np.minimum(input, 1 - 1e-15) )\n \n self.gradInput = -target / input_clamp / target.shape[0]\n return self.gradInput\n \n def __repr__(self):\n return \"ClassNLLCriterion\"" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
plipp/informatica-pfr-2017
nbs/2/4-ADVANCED-Primer-Idiomatic Pandas.ipynb
mit
[ "Primer: Idiomatic Pandas: Making Code Pandorable\nOriginal Source: Coursera Introduction to Data Science in Python: Week 3\nGood Follow up/Exercises can be found in 5 Tips To Write Idiomatic Pandas Code", "import pandas as pd\ndf = pd.read_csv('../../data/census.csv')\ndf[:5]", "Index vs. Method Chaining\nIndex Chaining is a bad practice:\ndf.loc[“Washtenaw”][“Total Population”]\nYou never know, when a view or a copy is returned and on what you work (depends on underlaying numpy):\n-- <cite>If you see a ][ you should think carefully about what you are doing (Tom\nAugspurger)</cite>\nMethod Chaining is best practice.", "# good code style: m e t h o d s in multiple lines\n(df.where(df['SUMLEV']==50)\n .dropna()\n .set_index(['STNAME','CTYNAME'])\n .rename(columns={'ESTIMATESBASE2010': 'Estimates Base 2010'}))\n\n# makes a c o p y !\n\ndf.where(df['SUMLEV']==50).shape # replaces rows with df['SUMLEV']!=50 with NaN, but has the same shape as the originsl\n\ndf[df['SUMLEV']==50].shape # removes all rows with df['SUMLEV']!=50\n\ndf = df[df['SUMLEV']==50]\ndf.set_index(['STNAME','CTYNAME'], inplace=True)\ndf.rename(columns={'ESTIMATESBASE2010': 'Estimates Base 2010'})\n\n# inplace=True => permanent changes!\n\nimport numpy as np\ndef min_max(row):\n data = row[['POPESTIMATE2010',\n 'POPESTIMATE2011',\n 'POPESTIMATE2012',\n 'POPESTIMATE2013',\n 'POPESTIMATE2014',\n 'POPESTIMATE2015']]\n return pd.Series({'min': np.min(data), 'max': np.max(data)})\n\ndf.apply(min_max, axis=1)\n\nimport numpy as np\ndef min_max(row):\n data = row[['POPESTIMATE2010',\n 'POPESTIMATE2011',\n 'POPESTIMATE2012',\n 'POPESTIMATE2013',\n 'POPESTIMATE2014',\n 'POPESTIMATE2015']]\n row['max'] = np.max(data)\n row['min'] = np.min(data)\n return row\ndf.apply(min_max, axis=1)\n\nrows = ['POPESTIMATE2010',\n 'POPESTIMATE2011',\n 'POPESTIMATE2012',\n 'POPESTIMATE2013',\n 'POPESTIMATE2014',\n 'POPESTIMATE2015']\ndf.apply(lambda x: np.max(x[rows]), axis=1)[:5]", "Group by", "import pandas as pd\nimport numpy as np\ndf = pd.read_csv('../../data/census.csv')\ndf = df[df['SUMLEV']==50]\ndf[:5]\n\n%%timeit -n 10\nfor state in df['STNAME'].unique():\n avg = np.average(df.where(df['STNAME']==state).dropna()['CENSUS2010POP'])\n print('Counties in state ' + state + ' have an average population of ' + str(avg))\n\n%%timeit -n 10\nfor group, frame in df.groupby('STNAME'):\n avg = np.average(frame['CENSUS2010POP'])\n print('Counties in state ' + group + ' have an average population of ' + str(avg))\n\ndf.head()\n\ndf = df.set_index('STNAME')\n\ndef fun(state_name):\n if state_name[0]<'M':\n return 0\n if state_name[0]<'Q':\n return 1\n return 2\n\nfor group, frame in df.groupby(fun):\n print('There are ' + str(len(frame)) + ' records in group ' + str(group) + ' for processing.')\n\n\ndf = pd.read_csv('../../data/census.csv')\ndf = df[df['SUMLEV']==50]\n\ndf.groupby('STNAME').agg({'CENSUS2010POP': np.average}).head()\n\nprint(type(df.groupby(level=0)['POPESTIMATE2010','POPESTIMATE2011']))\nprint(type(df.groupby(level=0)['POPESTIMATE2010']))\n\n(df.set_index('STNAME').groupby(level=0)['CENSUS2010POP']\n .agg([np.average, np.sum])\n .head())\n\n(df.set_index('STNAME').groupby(level=0)['POPESTIMATE2010','POPESTIMATE2011']\n .agg([np.average, np.sum])\n .head())\n\n(df.set_index('STNAME').groupby(level=0)['POPESTIMATE2010','POPESTIMATE2011']\n .agg({'POPESTIMATE2010': np.average, 'POPESTIMATE2011': np.sum})).head()", "Scales", "df = pd.DataFrame(['A+', 'A', 'A-', 'B+', 'B', 'B-', 'C+', 'C', 'C-', 'D+', 'D'],\n index=['excellent', 'excellent', 'excellent', 'good', 'good', 'good', 'ok', 'ok', 'ok', 'poor', 'poor'])\ndf.rename(columns={0: 'Grades'}, inplace=True)\ndf\n\ngrades = df['Grades'].astype('category').head()", "grades > 'C' # -> does not work", "grades = df['Grades'].astype('category',\n categories=['D', 'D+', 'C-', 'C', 'C+', 'B-', 'B', 'B+', 'A-', 'A', 'A+'],\n ordered=True)\ngrades.head()\n\ngrades > 'C'\n\ndf = pd.read_csv('../../data/census.csv')\ndf = df[df['SUMLEV']==50]\ndf = df.set_index('STNAME').groupby(level=0)['CENSUS2010POP'].agg([np.average])\npd.cut(df['average'],10)", "Pivot Tables", "#http://open.canada.ca/data/en/dataset/98f1a129-f628-4ce4-b24d-6f16bf24dd64\ndf = pd.read_csv('../../data/cars.csv')\n\ndf.head()\n\ndf.pivot_table(values='(kW)', index='YEAR', columns='Make', aggfunc=np.mean)\n\ndf.pivot_table(values='(kW)', index='YEAR', columns='Make', aggfunc=[np.mean,np.min], margins=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
pmgbergen/porepy
tutorials/automatic_differentiation.ipynb
gpl-3.0
[ "Introduction\nThis tutorial gives a short overview of the AD-module included in PorePy. For an example where the AD module has been used to solve non-linear compressible flow, see the tutorial: \"compressible_flow_with_automatic_differentiation\"", "import numpy as np\nimport scipy.sparse as sps\n\nfrom porepy.numerics.ad.forward_mode import Ad_array\nimport porepy.numerics.ad.functions as af", "Scalar AD-variables\nWe initiate a variable $x = 2$ by giving a pair (val, jac) to the Ad_array class. val is the value at which the function will be evaluated and jac =1 since $\\frac{d x}{dx} = 1$.", "x = Ad_array(2, 1)", "We can now define a function $y=x^2 + 3$", "y = x**2 + 3", "To obtain the function value and the derivative we can call .val and .jac", "print('y value is: ', y.val)\nprint('dy/dx is: ', y.jac)", "$y$ is also an AD variable as a function of $x$. We can use it to declare further functions, e.g., $h(x) = e^{y(x)}$. To take the exponential of an Ad_array we need to call the exponential function found in the AD module", "h = af.exp(y)\nprint('h value is: ', h.val)\nprint('dh/dx is: ', h.jac)", "If we knew the value and jacobian of $y$ we could alternatively skip initiating $x$ and initiate $y$ directly:", "y = Ad_array(7, 4)\nh = af.exp(y)\nprint('h value is: ', h.val)\nprint('dh/dx is: ', h.jac)", "Arrays of AD-variables\nThe Ad_array class also support arrays.", "x = Ad_array(np.array([1,2,3]), sps.diags([1,1,1]))", "As for the scalar case, it is straight forward to define functions using normal Python programming. Let us declare the function\n$$y = Ax + x^2$$\nwhich has the jacobian\n$$ J(y) = A + 2 \\text{diag}(x)$$\nWith this notation we mean $x^2 = [x_1^2, x_2^2, x_3^2]$, and $\\text{diag}(x)$ is a matrix with $x$ on the diagonal and zeros elsewhere.", "A = sps.csc_matrix(np.array([[0,2,3],[4,0,6],[7,8,0]]))\ny = A*x + x**2\n\nprint('Analytic y value: ')\nprint(np.array([14, 26, 32]))\nprint('Analytic y jacobian:')\nprint(np.array([[2,2,3],[4,4,6],[7,8,6]]),'\\n')\nprint('Ad y value: ')\nprint(y.val)\nprint('Ad y jacobian:')\nprint(y.jac.A)\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mayankjohri/LetsExplorePython
Section 2 - Advance Python/Chapter S2.01 - Functional Programming/04_functools.ipynb
gpl-3.0
[ "functools\nThe functools module is for higher-order functions: functions that act on or return other functions. In general, any callable object can be treated as a function for the purposes of this module.\nCommon functions in functools are as follows\n\npartial\nreduce\n\npartial\nfunctools.partial does the follows:\n\nMakes a new version of a function with one or more arguments already filled in.\nNew version of a function documents itself.", "def power(base, exponent):\n return base ** exponent\n\ndef square(base):\n return power(base, 2)\n\ndef cube(base):\n return power(base, 3)", "Now lets see the magic of partial", "from functools import partial\n\nsquare = partial(power, exponent=2)\ncube = partial(power, exponent=3)\n\nprint(square(2))\nprint(cube(2))\n\nprint(square(2, exponent=4))\nprint(cube(2, exponent=9))\n\nfrom functools import partial\n\ndef multiply(x,y):\n return x * y\n\n# create a new function that multiplies by 2\ndb2 = partial(multiply,2)\nprint(db2(4))\ndb4 = partial(multiply, 4)\nprint(db4(3))\n\nfrom functools import partial\n \n#----------------------------------------------------------------------\ndef add(x, y):\n \"\"\"\"\"\"\n return x + y\n \n#----------------------------------------------------------------------\ndef multiply(x, y):\n \"\"\"\"\"\"\n return x * y\n \n#----------------------------------------------------------------------\ndef run(func):\n \"\"\"\"\"\"\n print (func())\n \n#----------------------------------------------------------------------\ndef main():\n \"\"\"\"\"\"\n a1 = partial(add, 1, 2)\n m1 = partial(multiply, 5, 8)\n run(a1)\n run(m1)\n \nif __name__ == \"__main__\":\n main()\n\ndef another_function(func):\n \"\"\"\n A function that accepts another function\n \"\"\"\n \n def wrapper():\n \"\"\"\n A wrapping function\n \"\"\"\n val = \"The result of %s is %s\" % (func(),\n eval(func())\n )\n return val\n return wrapper\n \n#----------------------------------------------------------------------\n@another_function\ndef a_function():\n \"\"\"A pretty useless function\"\"\"\n return \"1+1\"\n \n#----------------------------------------------------------------------\nif __name__ == \"__main__\":\n print (a_function.__name__)\n print (a_function.__doc__)\n print(a_function())\n\nfrom functools import wraps\n \n#----------------------------------------------------------------------\ndef another_function(func):\n \"\"\"\n A function that accepts another function\n \"\"\"\n \n @wraps(func)\n def wrapper():\n \"\"\"\n A wrapping function\n \"\"\"\n val = \"The result of %s is %s\" % (func(),\n eval(func())\n )\n return val\n return wrapper\n \n#----------------------------------------------------------------------\n@another_function\ndef a_function():\n \"\"\"A pretty useless function\"\"\"\n return \"1+1\"\n \n#----------------------------------------------------------------------\nif __name__ == \"__main__\":\n #a_function()\n print (a_function.__name__)\n print (a_function.__doc__)\n print(a_function())", "Here we import wraps from the functools module and use it as a decorator for the nested wrapper function inside of another_function to map the name and doc to the wrapper function\nupdate_wrapper\nThe partial object does not have name or doc attributes by default, and without those attributes decorated functions are more difficult to debug. Using update_wrapper(), copies or adds attributes from the original function to the partial object.", "import functools\n\n\ndef myfunc1(a, b=2):\n print ('\\tcalled myfunc1 with:', (a, b))\n return\n\ndef myfunc(a, b=2):\n \"\"\"Docstring for myfunc().\"\"\"\n print ('\\tcalled myfunc with:', (a, b))\n return\n\ndef show_details(name, f):\n \"\"\"Show details of a callable object.\"\"\"\n print ('%s:' % name)\n print ('\\tobject:', f)\n print ('\\t__name__:',) \n try:\n print (f.__name__)\n except AttributeError:\n print ('(no __name__)')\n print ('\\t__doc__', repr(f.__doc__))\n print\n return\n\n\nshow_details('myfunc1', myfunc1)\nprint(\"~\"*20)\nshow_details('myfunc', myfunc)\n\np1 = functools.partial(myfunc, b=4)\nprint(\"+\"*20)\nshow_details('raw wrapper', p1)\nprint(\"^\"*20)\nprint ('Updating wrapper:')\nprint ('\\tassign:', functools.WRAPPER_ASSIGNMENTS)\nprint ('\\tupdate:', functools.WRAPPER_UPDATES)\nprint(\"*\"*20)\n\nfunctools.update_wrapper(p1, myfunc)\nshow_details('updated wrapper', p1)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
shahariarrabby/Mail_Server
Receive and server Mail.ipynb
mit
[ "Recive Mail\nThis file is imported by Server and Check ME. All function is define here.\nImporting all dependency", "__author__ = 'Shahariar Rabby'\nimport email\nimport imaplib\nimport ctypes\nimport getpass\nimport threading\nfrom playsound import playsound", "User Details Function", "def user():\n# ORG_EMAIL = \"@gmail.com\"\n# FROM_EMAIL = \"your mail\" + ORG_EMAIL\n# FROM_PWD = \"your pass\"\n FROM_EMAIL = raw_input(\"insert Email : \")+'@gmail.com'\n FROM_PWD = getpass.getpass(\"input : \")\n return FROM_EMAIL,FROM_PWD", "Login function\nIn this function we call user details function and get the user name and password, Than we use those details for IMAP login.\n IMAP (Internet Message Access Protocol) is a standard email protocol that stores email messages on a mail server, but allows the end user to view and manipulate the messages as though they were stored locally on the end user's computing device(s).", "def login():\n FROM_EMAIL,FROM_PWD = user()\n mail = imaplib.IMAP4_SSL('imap.gmail.com', 993)\n mail.login(FROM_EMAIL, FROM_PWD)\n mail.select(\"INBOX\")\n print 'Login successful'\n return mail\nmail = login()\n\ndef read_email_from_gmail(mail):\n try:\n type, data = mail.search(None, 'ALL') #Sharching all message frm inbox\n mail_ids = data[0] #Assining all mail id to mail_ids Variable\n id_list = mail_ids.split() #putting all mail id to id_list array\n first_email_id = int(id_list[0]) #getting first mail id\n latest_email_id = int(id_list[-1]) #getting last mail id\n\n for i in range(latest_email_id, latest_email_id - 10, -1): #this loop reading last 10 message\n typ, data = mail.fetch(i, '(RFC822)') #fatch mail data, and putting it a tuple where i=tuple no and 'RFC822' is mail\n\n for response_part in data: #reading all data from i no message\n if isinstance(response_part, tuple): \n msg = email.message_from_string(response_part[1]) #Reading mail\n email_subject = msg['subject'] #Email subject\n email_from = msg['from'] #Sender address\n\n print 'From : ' + email_from\n print 'Subject : ' + email_subject\n print \"Read mail: https://gmail.com\\n\"\n\n except Exception, e:\n print (str(e)) #printing if there is any error", "Mail Server\nThis will start a server that notify user when there is new email", "def Check_Unseen():\n mail.select(\"INBOX\") #Selecting inbox\n n = 0\n (retcode, messages) = mail.search(None, '(UNSEEN)') #sharching unseen mail\n if retcode == 'OK': #if unseen\n for num in messages[0].split():\n n = n + 1 \n print n #print message number\n typ, data = mail.fetch(num, '(RFC822)') #fatching mail\n for response_part in data:\n if isinstance(response_part, tuple):\n original = email.message_from_string(response_part[1])\n print original['From']\n data = original['Subject']\n playsound('demonstrative.wav') #play sound when mail recive\n print data\n print \"Read mail: https://gmail.com\"\n\n from sys import stdout\n stdout.write(str('#')) #printing mail server is alive", "This function call Check_Unseen in every 15 sec.", "def server():\n Check_Unseen()\n threading.Timer(15, server).start() #calling function evey 15 sec\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
km-Poonacha/python4phd
Session 1/ipython/Excercise - Working With CSV-Worksheet.ipynb
gpl-3.0
[ "Excercise - Working With CSV\nUsing the CSV module\n\nA CSV file is often used exchange format for spreadsheets and databases.\nEach line is called a record and each field within a record is seperated by a delimiter such as comma, tab etc.\nWe use the module \"CSV\" which is not included in the standard library of Python.\n\nNote: Keep in mind that Mac uses a different delimiter to determine the end of a row in a CSV file than Microsoft. Since the CSV python module we will use works well with Windows CSV files, we will save and use a Windows CSV file in our program. So in MAC, you have to save the CSV file as \"windows csv\" file rather than just csv file.\nLet us write a program to read a CSV file (word_sentiment.csv). This file contains a list of 2000 + words and its sentiment ranging form -5 to +5. \nWrite a function \"word_sentiment\" which checks if the entered word is found in the sentiment_csv file and returns the corresponding sentiment. If the word is not found it returns 0.\nStep 1:Import the module CSV.\nNote: If any module is not included in the computer, we will need to do \"pip install csv\" in the terminal (in case of mac) or in the command prompt (in case of windows). \nStep 2: Assign the path of the file to a global variable \"SENTIMENT_CSV\"\nStep 3: Open the file using the \"with open()\" command and read the file\nBefore we read a file, we need to open it. The \"with open()\" command is very handy since it can open the file and give you a handler with which you can read the file. One of the benefits of the \"with\"command is that (unlike the simple open() command) it can automaticaly close the file, allowing write operations to be completed. The syntax is :\nwith open('filename', 'mode', 'encoding') as fileobj* *\nWhere fileobj is the file object returned by open(); filename is the string name of the file. mode indicates what you want to do with the file and ecoding defines the type of encoding with which you want to open the file.\nMode could be:\n* w -> write. if the file exists it is overwritten\n* r -> read\n* a -> append. Write at the end of the file\n* x - > write. Only if the file does not exist. It does not allow a file to be re-written\nFor each, adding a subfix 't' refers to read/write as text and the subfix 'b' refers to read/write as bytes.\nEncoding could be:\n* 'ascii'\n* 'utf-8'\n* 'latin-1'\n* 'cp-1252'\n* 'unicode-escape'\nAfter opening the file, we call the csv.reader() function to read the data. It assigns a data structure (similar to a multidimentional list) which we can use to read any cell in the csv file.", "from google.colab import drive\nfrom google.colab import files\ndrive.mount('/content/drive/')\nuploaded = files.upload()", "The full code\nLet us package all of this into a nice function which \n- reads the word_sentiment.csv file \n- searches for a particualr given word \n- returns the sentiment value of the word given to it. If the word is not found it returns 0 .\nNow let us update this code so that we ask the user to enter a sentence. We then break the sentence into words and find the sentiment of each word. We then aggregate the sentiments across all the words to calcuate the sentiment of the sentence and tell if the sentence entered is positive or negative. Hint: Use the split() command we saw in lesson 1.\nCan you improve this code to handle double like \"not\" ? eg. \"poonacha is not good\" should return a negative sentiment rather than positive .", "# Enter code here \n\n\n\n\n", "Do you think we can build a rudimentary learning algorithm to imporve the corpus of sentiments ?" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
atulsingh0/MachineLearning
HandsOnML/code/07_ensemble_learning_and_random_forests.ipynb
gpl-3.0
[ "Chapter 7 – Ensemble Learning and Random Forests\nThis notebook contains all the sample code and solutions to the exercises in chapter 7.\nSetup\nFirst, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:", "# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"ensembles\"\n\ndef image_path(fig_id):\n return os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID, fig_id)\n\ndef save_fig(fig_id, tight_layout=True):\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(image_path(fig_id) + \".png\", format='png', dpi=300)", "Voting classifiers", "heads_proba = 0.51\ncoin_tosses = (np.random.rand(10000, 10) < heads_proba).astype(np.int32)\ncumulative_heads_ratio = np.cumsum(coin_tosses, axis=0) / np.arange(1, 10001).reshape(-1, 1)\n\nplt.figure(figsize=(8,3.5))\nplt.plot(cumulative_heads_ratio)\nplt.plot([0, 10000], [0.51, 0.51], \"k--\", linewidth=2, label=\"51%\")\nplt.plot([0, 10000], [0.5, 0.5], \"k-\", label=\"50%\")\nplt.xlabel(\"Number of coin tosses\")\nplt.ylabel(\"Heads ratio\")\nplt.legend(loc=\"lower right\")\nplt.axis([0, 10000, 0.42, 0.58])\nsave_fig(\"law_of_large_numbers_plot\")\nplt.show()\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import make_moons\n\nX, y = make_moons(n_samples=500, noise=0.30, random_state=42)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\n\nlog_clf = LogisticRegression(random_state=42)\nrnd_clf = RandomForestClassifier(random_state=42)\nsvm_clf = SVC(random_state=42)\n\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='hard')\nvoting_clf.fit(X_train, y_train)\n\nfrom sklearn.metrics import accuracy_score\n\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))\n\nlog_clf = LogisticRegression(random_state=42)\nrnd_clf = RandomForestClassifier(random_state=42)\nsvm_clf = SVC(probability=True, random_state=42)\n\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='soft')\nvoting_clf.fit(X_train, y_train)\n\nfrom sklearn.metrics import accuracy_score\n\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))", "Bagging ensembles", "from sklearn.ensemble import BaggingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nbag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n max_samples=100, bootstrap=True, n_jobs=-1, random_state=42)\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\n\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_test, y_pred))\n\ntree_clf = DecisionTreeClassifier(random_state=42)\ntree_clf.fit(X_train, y_train)\ny_pred_tree = tree_clf.predict(X_test)\nprint(accuracy_score(y_test, y_pred_tree))\n\nfrom matplotlib.colors import ListedColormap\n\ndef plot_decision_boundary(clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.5, contour=True):\n x1s = np.linspace(axes[0], axes[1], 100)\n x2s = np.linspace(axes[2], axes[3], 100)\n x1, x2 = np.meshgrid(x1s, x2s)\n X_new = np.c_[x1.ravel(), x2.ravel()]\n y_pred = clf.predict(X_new).reshape(x1.shape)\n custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])\n plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap, linewidth=10)\n if contour:\n custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])\n plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)\n plt.plot(X[:, 0][y==0], X[:, 1][y==0], \"yo\", alpha=alpha)\n plt.plot(X[:, 0][y==1], X[:, 1][y==1], \"bs\", alpha=alpha)\n plt.axis(axes)\n plt.xlabel(r\"$x_1$\", fontsize=18)\n plt.ylabel(r\"$x_2$\", fontsize=18, rotation=0)\n\nplt.figure(figsize=(11,4))\nplt.subplot(121)\nplot_decision_boundary(tree_clf, X, y)\nplt.title(\"Decision Tree\", fontsize=14)\nplt.subplot(122)\nplot_decision_boundary(bag_clf, X, y)\nplt.title(\"Decision Trees with Bagging\", fontsize=14)\nsave_fig(\"decision_tree_without_and_with_bagging_plot\")\nplt.show()", "Random Forests", "bag_clf = BaggingClassifier(\n DecisionTreeClassifier(splitter=\"random\", max_leaf_nodes=16, random_state=42),\n n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1, random_state=42)\n\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nrnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1, random_state=42)\nrnd_clf.fit(X_train, y_train)\n\ny_pred_rf = rnd_clf.predict(X_test)\n\nnp.sum(y_pred == y_pred_rf) / len(y_pred) # almost identical predictions\n\nfrom sklearn.datasets import load_iris\niris = load_iris()\nrnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1, random_state=42)\nrnd_clf.fit(iris[\"data\"], iris[\"target\"])\nfor name, score in zip(iris[\"feature_names\"], rnd_clf.feature_importances_):\n print(name, score)\n\nrnd_clf.feature_importances_\n\nplt.figure(figsize=(6, 4))\n\nfor i in range(15):\n tree_clf = DecisionTreeClassifier(max_leaf_nodes=16, random_state=42 + i)\n indices_with_replacement = np.random.randint(0, len(X_train), len(X_train))\n tree_clf.fit(X[indices_with_replacement], y[indices_with_replacement])\n plot_decision_boundary(tree_clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.02, contour=False)\n\nplt.show()", "Out-of-Bag evaluation", "bag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n bootstrap=True, n_jobs=-1, oob_score=True, random_state=40)\nbag_clf.fit(X_train, y_train)\nbag_clf.oob_score_\n\nbag_clf.oob_decision_function_\n\nfrom sklearn.metrics import accuracy_score\ny_pred = bag_clf.predict(X_test)\naccuracy_score(y_test, y_pred)", "Feature importance", "from sklearn.datasets import fetch_mldata\nmnist = fetch_mldata('MNIST original')\n\nrnd_clf = RandomForestClassifier(random_state=42)\nrnd_clf.fit(mnist[\"data\"], mnist[\"target\"])\n\ndef plot_digit(data):\n image = data.reshape(28, 28)\n plt.imshow(image, cmap = matplotlib.cm.hot,\n interpolation=\"nearest\")\n plt.axis(\"off\")\n\nplot_digit(rnd_clf.feature_importances_)\n\ncbar = plt.colorbar(ticks=[rnd_clf.feature_importances_.min(), rnd_clf.feature_importances_.max()])\ncbar.ax.set_yticklabels(['Not important', 'Very important'])\n\nsave_fig(\"mnist_feature_importance_plot\")\nplt.show()", "AdaBoost", "from sklearn.ensemble import AdaBoostClassifier\n\nada_clf = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=1), n_estimators=200,\n algorithm=\"SAMME.R\", learning_rate=0.5, random_state=42)\nada_clf.fit(X_train, y_train)\n\nplot_decision_boundary(ada_clf, X, y)\n\nm = len(X_train)\n\nplt.figure(figsize=(11, 4))\nfor subplot, learning_rate in ((121, 1), (122, 0.5)):\n sample_weights = np.ones(m)\n for i in range(5):\n plt.subplot(subplot)\n svm_clf = SVC(kernel=\"rbf\", C=0.05, random_state=42)\n svm_clf.fit(X_train, y_train, sample_weight=sample_weights)\n y_pred = svm_clf.predict(X_train)\n sample_weights[y_pred != y_train] *= (1 + learning_rate)\n plot_decision_boundary(svm_clf, X, y, alpha=0.2)\n plt.title(\"learning_rate = {}\".format(learning_rate), fontsize=16)\n\nplt.subplot(121)\nplt.text(-0.7, -0.65, \"1\", fontsize=14)\nplt.text(-0.6, -0.10, \"2\", fontsize=14)\nplt.text(-0.5, 0.10, \"3\", fontsize=14)\nplt.text(-0.4, 0.55, \"4\", fontsize=14)\nplt.text(-0.3, 0.90, \"5\", fontsize=14)\nsave_fig(\"boosting_plot\")\nplt.show()\n\nlist(m for m in dir(ada_clf) if not m.startswith(\"_\") and m.endswith(\"_\"))", "Gradient Boosting", "np.random.seed(42)\nX = np.random.rand(100, 1) - 0.5\ny = 3*X[:, 0]**2 + 0.05 * np.random.randn(100)\n\nfrom sklearn.tree import DecisionTreeRegressor\n\ntree_reg1 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg1.fit(X, y)\n\ny2 = y - tree_reg1.predict(X)\ntree_reg2 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg2.fit(X, y2)\n\ny3 = y2 - tree_reg2.predict(X)\ntree_reg3 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg3.fit(X, y3)\n\nX_new = np.array([[0.8]])\n\ny_pred = sum(tree.predict(X_new) for tree in (tree_reg1, tree_reg2, tree_reg3))\n\ny_pred\n\ndef plot_predictions(regressors, X, y, axes, label=None, style=\"r-\", data_style=\"b.\", data_label=None):\n x1 = np.linspace(axes[0], axes[1], 500)\n y_pred = sum(regressor.predict(x1.reshape(-1, 1)) for regressor in regressors)\n plt.plot(X[:, 0], y, data_style, label=data_label)\n plt.plot(x1, y_pred, style, linewidth=2, label=label)\n if label or data_label:\n plt.legend(loc=\"upper center\", fontsize=16)\n plt.axis(axes)\n\nplt.figure(figsize=(11,11))\n\nplt.subplot(321)\nplot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h_1(x_1)$\", style=\"g-\", data_label=\"Training set\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.title(\"Residuals and tree predictions\", fontsize=16)\n\nplt.subplot(322)\nplot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1)$\", data_label=\"Training set\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.title(\"Ensemble predictions\", fontsize=16)\n\nplt.subplot(323)\nplot_predictions([tree_reg2], X, y2, axes=[-0.5, 0.5, -0.5, 0.5], label=\"$h_2(x_1)$\", style=\"g-\", data_style=\"k+\", data_label=\"Residuals\")\nplt.ylabel(\"$y - h_1(x_1)$\", fontsize=16)\n\nplt.subplot(324)\nplot_predictions([tree_reg1, tree_reg2], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1) + h_2(x_1)$\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\n\nplt.subplot(325)\nplot_predictions([tree_reg3], X, y3, axes=[-0.5, 0.5, -0.5, 0.5], label=\"$h_3(x_1)$\", style=\"g-\", data_style=\"k+\")\nplt.ylabel(\"$y - h_1(x_1) - h_2(x_1)$\", fontsize=16)\nplt.xlabel(\"$x_1$\", fontsize=16)\n\nplt.subplot(326)\nplot_predictions([tree_reg1, tree_reg2, tree_reg3], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"$h(x_1) = h_1(x_1) + h_2(x_1) + h_3(x_1)$\")\nplt.xlabel(\"$x_1$\", fontsize=16)\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\n\nsave_fig(\"gradient_boosting_plot\")\nplt.show()\n\nfrom sklearn.ensemble import GradientBoostingRegressor\n\ngbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0, random_state=42)\ngbrt.fit(X, y)\n\ngbrt_slow = GradientBoostingRegressor(max_depth=2, n_estimators=200, learning_rate=0.1, random_state=42)\ngbrt_slow.fit(X, y)\n\nplt.figure(figsize=(11,4))\n\nplt.subplot(121)\nplot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"Ensemble predictions\")\nplt.title(\"learning_rate={}, n_estimators={}\".format(gbrt.learning_rate, gbrt.n_estimators), fontsize=14)\n\nplt.subplot(122)\nplot_predictions([gbrt_slow], X, y, axes=[-0.5, 0.5, -0.1, 0.8])\nplt.title(\"learning_rate={}, n_estimators={}\".format(gbrt_slow.learning_rate, gbrt_slow.n_estimators), fontsize=14)\n\nsave_fig(\"gbrt_learning_rate_plot\")\nplt.show()", "Gradient Boosting with Early stopping", "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\nX_train, X_val, y_train, y_val = train_test_split(X, y, random_state=49)\n\ngbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120, random_state=42)\ngbrt.fit(X_train, y_train)\n\nerrors = [mean_squared_error(y_val, y_pred)\n for y_pred in gbrt.staged_predict(X_val)]\nbst_n_estimators = np.argmin(errors)\n\ngbrt_best = GradientBoostingRegressor(max_depth=2,n_estimators=bst_n_estimators, random_state=42)\ngbrt_best.fit(X_train, y_train)\n\nmin_error = np.min(errors)\n\nplt.figure(figsize=(11, 4))\n\nplt.subplot(121)\nplt.plot(errors, \"b.-\")\nplt.plot([bst_n_estimators, bst_n_estimators], [0, min_error], \"k--\")\nplt.plot([0, 120], [min_error, min_error], \"k--\")\nplt.plot(bst_n_estimators, min_error, \"ko\")\nplt.text(bst_n_estimators, min_error*1.2, \"Minimum\", ha=\"center\", fontsize=14)\nplt.axis([0, 120, 0, 0.01])\nplt.xlabel(\"Number of trees\")\nplt.title(\"Validation error\", fontsize=14)\n\nplt.subplot(122)\nplot_predictions([gbrt_best], X, y, axes=[-0.5, 0.5, -0.1, 0.8])\nplt.title(\"Best model (%d trees)\" % bst_n_estimators, fontsize=14)\n\nsave_fig(\"early_stopping_gbrt_plot\")\nplt.show()\n\ngbrt = GradientBoostingRegressor(max_depth=2, warm_start=True, random_state=42)\n\nmin_val_error = float(\"inf\")\nerror_going_up = 0\nfor n_estimators in range(1, 120):\n gbrt.n_estimators = n_estimators\n gbrt.fit(X_train, y_train)\n y_pred = gbrt.predict(X_val)\n val_error = mean_squared_error(y_val, y_pred)\n if val_error < min_val_error:\n min_val_error = val_error\n error_going_up = 0\n else:\n error_going_up += 1\n if error_going_up == 5:\n break # early stopping\n\nprint(gbrt.n_estimators)", "Exercise solutions\nComing soon" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
rmdort/clipper
examples/tutorial/tutorial_part_two.ipynb
apache-2.0
[ "Clipper Tutorial: Part 2\nIn this part of the tutorial, you will put on your data scientist hat and train and deploy some models to Clipper to improve your application accuracy.\nConnect to Clipper (again)\nBecause this is a separate Python instance, you must create a new Clipper object and connect to your running Clipper instance. Make sure you enter the same information here as you did in part one.", "import sys\nimport os\nfrom clipper_admin import Clipper\n# Change the username if necessary\nuser = \"\"\n# Set the path to the SSH key\nkey = \"\"\n# Set the SSH host\nhost = \"\"\nclipper = Clipper(host, user, key)", "Load Cifar\nBecause this is a new notebook, you must load the CIFAR dataset again. This time, you will be using it to train and evaluate machine learning models.\nSet cifar_loc to the same location you did in the \"Download the Images\" section of part one of the tutorial. You will load into Python the number of training and test datapoints specified in \"Extract the images\" section of part one.", "cifar_loc = \"\"\nimport cifar_utils\ntrain_x, train_y = cifar_utils.filter_data(\n *cifar_utils.load_cifar(cifar_loc, cifar_filename=\"cifar_train.data\", norm=True))\ntest_x, test_y = cifar_utils.filter_data(\n *cifar_utils.load_cifar(cifar_loc, cifar_filename=\"cifar_test.data\", norm=True))", "Train Logistic Regression Model\nWhen tackling a new problem with machine learning, it's always good to start with simple models and only add complexity when needed. Start by training a logistic regression binary classifier using Scikit-Learn. This model gets about 68% accuracy on the offline evaluation dataset if you use 10,000 training examples. It gets about 74% if you use all 50,000 examples.", "from sklearn import linear_model as lm \ndef train_sklearn_model(m, train_x, train_y):\n m.fit(train_x, train_y)\n return m\nlr_model = train_sklearn_model(lm.LogisticRegression(), train_x, train_y)\nprint(\"Logistic Regression test score: %f\" % lr_model.score(test_x, test_y))", "Deploy Logistic Regression Model\nWhile 68-74% accuracy on a CIFAR binary classification task is significantly below state of the art, it's already much better than the 50% accuracy your application yields right now by guessing randomly.\nYou can deploy your logistic regression model directly to Clipper without having to worry about how to serialize the model or integrate it with application code.\nTo deploy a model to Clipper, you must assign it a name (\"sklearn_cifar\"), a version (1), and then provide some metadata about the model itself. In this case, you are specifying that you want to run the model using the sklearn_cifar_container Docker image in the Clipper repo on Docker Hub. You can assign the model descriptive labels, and specify the input type that this model expects. Finally, you can specify how many replicas of the model (how many Docker containers) to launch. Adding more replicas increases the throughput of this model.\nAfter completing this step, Clipper will be managing a new container in Docker with your model in it:\n<img src=\"img/deploy_sklearn_model.png\" style=\"width: 500px;\"/>\n\nOnce again, because you are deploying a Docker image this command may take awhile to download the image. Thanks for being patient!", "model_name = \"birds_vs_planes_classifier\"\n\nmodel_added = clipper.deploy_model(\n model_name,\n 1,\n lr_model,\n \"clipper/sklearn_cifar_container:latest\",\n \"doubles\",\n num_containers=1\n)\nprint(\"Model deploy successful? {success}\".format(success=model_added))", "Now that you've deployed your model, go ahead and check back on your running frontend application from part 1. You should see the accuracy rise from around 50% to the accuracy of your SKLearn model (68-74%), without having to stop or modify your application at all!\nLoad TensorFlow Model\nTo improve the accuracy of your application further, you will now deploy a TensorFlow convolutional neural network. This model takes a few hours to train, so you will download the trained model parameters rather than training it from scratch. This model gets about 88% accuracy on the test dataset.\nThere is a pre-trained TensorFlow model stored in the repo using git-lfs. Once you install git-lfs, you can download the model with the command git lfs pull. If you don't want to deploy a TensorFlow model, you can skip this step.", "import os\nimport tensorflow as tf\nimport numpy as np\ntf_cifar_model_path = os.path.abspath(\"tf_cifar_model/cifar10_model_full\")\ntf_session = tf.Session('', tf.Graph())\nwith tf_session.graph.as_default():\n saver = tf.train.import_meta_graph(\"%s.meta\" % tf_cifar_model_path)\n saver.restore(tf_session, tf_cifar_model_path)\n\ndef tensorflow_score(session, test_x, test_y):\n \"\"\"\n NOTE: This predict method expects pre-whitened (normalized) images\n \"\"\"\n logits = session.run('softmax_logits:0',\n feed_dict={'x:0': test_x})\n relevant_activations = logits[:, [cifar_utils.negative_class, cifar_utils.positive_class]]\n preds = np.argmax(relevant_activations, axis=1)\n return float(np.sum(preds == test_y)) / float(len(test_y))\nprint(\"TensorFlow CNN test score: %f\" % tensorflow_score(tf_session, test_x, test_y))", "Deploy TensorFlow Model\nSimilar to deploying the logistic regression model, you can now deploy your TensorFlow neural network. Note that you will specify a different model container to use this time: the tf_cifar_container. In this case, you are providing Clipper a serialized version of the model. The container has been set up to reconstruct the original model from the serialized representation.\nAfter completing this step, Clipper will send queries to the newly-deployed TensorFlow model instead of the logistic regression Scikit-Learn model, improving the application's accuracy.\n<img src=\"img/tf_replaces_sklearn_model.png\" style=\"width: 600px;\"/>\n\nOnce again, please patient while the Docker image is downloaded.", "model_added = clipper.deploy_model(\n model_name,\n 2,\n os.path.abspath(\"tf_cifar_model\"),\n \"clipper/tf_cifar_container:latest\",\n \"doubles\",\n num_containers=1\n)\nprint(\"Model deploy successful? {success}\".format(success=model_added))", "Inspect Clipper Metrics\nClipper also records various system performance metrics. You can inspect the current state of these metrics with the inspect_instance() command.", "clipper.inspect_instance()", "Congratulations! You've now successfully completed the tutorial. You started Clipper, created an application and queried it from a frontend client, and deployed two models trained in two different machine learning frameworks (Scikit-Learn and TensorFlow) to the running system.\nHead back to the notebook from part 1. When you're done watching the accuracy of your application, stop the cell (hit the little \"stop\" square in the notebook toolbar).\n<img src=\"img/warning.jpg\" style=\"width: 400px;\"/>\n\nThis step will stop and remove all Clipper Docker containers running on the host. This command will not affect other Docker containers running on the host machine.\n\nCleanup\nWhen you're completely done with the tutorial and want to shut down your Clipper instance, you can run the stop_all() command to stop all the Clipper Docker containers.\nIf you check the accuracy of your frontend application a final time, you should see accuracy around 88%. If the accuracy is below that, you can try sending more feedback to increase the weight on the TensorFlow model even more.", "clipper.stop_all()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
shawger/uc-dand
P2/UCDAND-P2.ipynb
gpl-3.0
[ "Titanic Dataset Investigation\nBy: Nick Shaw\nDate: 2016-07-01\nProject: P2 from the Udacity Data Analyst Nano Degree\n1. Introduction\nData describing passengers on the Titanic will be used to investigate the following questions:\n\nHow does sex effect passenger class?\nDoes the age of a passenger have any effect on their survival? What effect does sex have?\n\nThe data used is from the Kaggle Titanic Dataset and can be found here.\nPython with the help of pandas, numpy and matlibplot will be used for the investigation.\nThis project has a github page here.\n1.1 Code", "# Start of code. This block is for imports, global variables, common functions and any setup needed for the investigation\n\n%matplotlib inline\n\nimport pandas as pd\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n#Set some common formatting\nmatplotlib.rcParams.update({'font.size': 20})\nmatplotlib.rcParams.update({'figure.titlesize': 24})\nmatplotlib.rcParams.update({'axes.labelsize': 20})\n\nmatplotlib.rcParams.update({'figure.figsize': (18,12)})\n\n\n#For some reason setting 'font.size' does not effect the ytick and xtick font sizes.\nmatplotlib.rcParams.update({'ytick.labelsize': 20})\nmatplotlib.rcParams.update({'xtick.labelsize': 20})\n\n#Set some color maps to keep common color schemes\nsexColors = ['limegreen','dodgerblue']\nclassColors = ['gold','silver','rosybrown']\nsurvivedColors = ['lightcoral','plum']\n\n# The following function is used to create counts and percentages in the pie\ndef make_autopct(values):\n def my_autopct(pct):\n total = sum(values)\n val = int(round(pct*total/100.0))\n return '{p:.2f}% ({v:d})'.format(p=pct,v=val)\n return my_autopct\n\n\n", "2. Question 1: How does sex effect passenger class?\n2.1 Data Wrangling\nData loaded from trian.csv.", "# Open the csv and load into pandas dataframe\n\ndf = pd.read_csv('train.csv')", "Now that the dataset is loaded, check if any rows contain bad data for the variables we are looking at.\n\nPassenger sex\nPassenger class\nIf the passenger has siblings or spouses (this comes later)", "#Use the pandas.isnull function to find any missing data\nnullSex = df[pd.isnull(df['Sex'])]['PassengerId'].count()\nnullClass = df[pd.isnull(df['Pclass'])]['PassengerId'].count()\nnullSibSp = df[pd.isnull(df['SibSp'])]['PassengerId'].count()\n\nprint \"Rows with no sex: %d\\nRows with no pClass: %d\\nRows with no SibSp: %d\" % (nullSex,nullClass,nullSibSp)", "No missing data found, so we don't need to worry about cleaning the data for this investigation.\n2.2 1D Investigation\nFor the question, 'How does sex effect passenger class?', independently explore the variables sex and passenger class.\n2.2.1 Passenger Sex\nAs sex is a boolean thing (at least in this example) the only useful question we can answer is male vs female:", "sexNumbers = df.groupby('Sex')['Sex'].count()\n\nsexNumbers.plot.pie(subplots=True,\n figsize=(8, 8),\n autopct = make_autopct(sexNumbers),\n title='Passengers on Titanic Sex Distribution',\n colors = sexColors)", "About 2/3 of the passengers are male and the other 1/3 female.\nIt might be interesting to compare this with passenger data from other ships in that era, or ships/trains/planes today.\n2.2.2 Passenger Class\nThere are 3 classes (1, 2, and 3) so lets see how many passengers in each group and what is that as a percent.", "classNumbers = df.groupby('Pclass')['Pclass'].count()\n\nclassNumbers.plot.pie(subplots=True,\n figsize=(8, 8),\n autopct= make_autopct(classNumbers),\n title='Passengers on Titanic Class Distribution',\n labels = ['First Class', 'Second Class', 'Third Class'],\n colors=classColors)", "3rd Class makes up the majority of the passengers. There are a similar number of 1st and 2nd class passengers.\nIt might be interesting to compare this with passenger data from other ships in that era, or ships/trains/planes today.\n2.3 2D Investigation\nInvestigate the relationship between passenger class and sex.\n2.3.1 Class Vs Sex on the Titanic\nFor this we can break the dataset into 2 groups (male and female) then look at how the makeup of the class is.", "# Group passenegers into male and female, and then group by class and count the number of passengers in the groups\nfemaleVsClass = df[df['Sex'] == 'female'].groupby(['Pclass'])['Pclass'].count()\nmaleVsClass = df[df['Sex'] == 'male'].groupby(['Pclass'])['Pclass'].count()\n\n# Combine the male and female results (for better graphing)\nsexVsClass = pd.concat([femaleVsClass, maleVsClass], axis=1, keys=['females','males'])\n\n#Plot the results\nsexVsClass.plot.pie(subplots=True,\n figsize=(16, 8),\n autopct='%.2f%%',\n title='Passengers on Titanic Class Distribution for Males and Females',\n labels = ['First Class', 'Second Class', 'Third Class'],\n legend=None,\n colors=classColors)\n", "The biggest difference is that a higher pct of males are in 3rd class then the % of females in third class.\nOne thought I have is that maybe there are more poor single men on the trip trying to get to America to start a new life. Let's see.\n2.3.2 Passengers Class and Sex vs Spouses\nFor this analysis take a look at how many males and females in each class have siblings or spouses onboard. Include the breakdown of all males and females with sibling or spouses in all class for reference.", "# Find the amount of males and females in all classes and group by the sibsp (sibblings or spouses on board)\n# Since there are a different number of males and females in all classes, compare the results using % of total\nmale1AllClassTotal = df[(df['Sex'] == 'male')]['Pclass'].count()\nmaleAllClass = df[(df['Sex'] == 'male')].groupby(['SibSp'])['SibSp'].count()/male1AllClassTotal * 100\n\nfemaleAllClassTotal = df[(df['Sex'] == 'female')]['Pclass'].count()\nfemaleAllClass = df[(df['Sex'] == 'female')].groupby(['SibSp'])['SibSp'].count()/femaleAllClassTotal * 100\n\n# Combine the males and females in all class to display on same graph\nsexVsAllClass = pd.concat([femaleAllClass, maleAllClass], axis=1, keys=['females %','males %'])\n\n\n# Find the amount of males and females in first class and group by the sibsp (sibblings or spouses on board)\n# Since there are a different number of males and females in first class, compare the results using % of total\nmale1stClassTotal = df[(df['Sex'] == 'male') & (df['Pclass'] == 1)]['Pclass'].count()\nmale1stClass = df[(df['Sex'] == 'male') & (df['Pclass'] == 1)].groupby(['SibSp'])['SibSp'].count()/male1stClassTotal * 100\n\nfemale1stClassTotal = df[(df['Sex'] == 'female') & (df['Pclass'] == 1)]['Pclass'].count()\nfemale1stClass = df[(df['Sex'] == 'female') & (df['Pclass'] == 1)].groupby(['SibSp'])['SibSp'].count()/female1stClassTotal * 100\n\n# Combine the males and females in first class to display on same graph\nsexVs1stClass = pd.concat([female1stClass, male1stClass], axis=1, keys=['females %','males %'])\n\n# Find the amount of males and females in second class and group by the sibsp (sibblings or spouses on board)\n# Since there are a different number of males and females in second class, compare the results using % of total\nmale2ndClassTotal = df[(df['Sex'] == 'male') & (df['Pclass'] == 2)]['Pclass'].count()\nmale2ndClass = df[(df['Sex'] == 'male') & (df['Pclass'] == 2)].groupby(['SibSp'])['SibSp'].count()/male2ndClassTotal * 100\n\nfemale2ndClassTotal = df[(df['Sex'] == 'female') & (df['Pclass'] == 2)]['Pclass'].count()\nfemale2ndClass = df[(df['Sex'] == 'female') & (df['Pclass'] == 2)].groupby(['SibSp'])['SibSp'].count()/female2ndClassTotal * 100\n\n# Combine the males and females in second class to display on same graph\nsexVs2ndClass = pd.concat([female2ndClass, male2ndClass], axis=1, keys=['females %','males %'])\n\n# Find the amount of males and females in third class and group by the sibsp (sibblings or spouses on board)\n# Since there are a different number of males and females in third class, compare the results using % of total\nmale3rdClassTotal = df[(df['Sex'] == 'male') & (df['Pclass'] == 3)]['Pclass'].count()\nmale3rdClass = df[(df['Sex'] == 'male') & (df['Pclass'] == 3)].groupby(['SibSp'])['SibSp'].count()/male3rdClassTotal * 100\n\nfemale3rdClassTotal = df[(df['Sex'] == 'female') & (df['Pclass'] == 3)]['Pclass'].count()\nfemale3rdClass = df[(df['Sex'] == 'female') & (df['Pclass'] == 3)].groupby(['SibSp'])['SibSp'].count()/female3rdClassTotal * 100\n\n# Combine the males and females in third class to display on same graph\nsexVs3rdClass = pd.concat([female3rdClass, male3rdClass], axis=1, keys=['females %','males %'])\n\n# Display the results\nprint pd.concat([sexVsAllClass, sexVs1stClass,sexVs2ndClass,sexVs3rdClass], axis=1, keys=['All','First','Second','Third'])\na1 = sexVsAllClass.plot.bar(color=sexColors)\na1.set_title('All Passengers',fontsize=24)\na1.set_xlabel('Number of Siblings or Spouses',fontsize=20)\na1.set_ylabel('% Of Total',fontsize=20)\n\na2 = sexVs1stClass.plot.bar(color=sexColors)\na2.set_title('1st Class Passengers',fontsize=24)\na2.set_xlabel('Number of Siblings or Spouses',fontsize=20)\na2.set_ylabel('% Of Total',fontsize=20)\n\na3 = sexVs2ndClass.plot.bar(color=sexColors)\na3.set_title('2nd Class Passengers',fontsize=24)\na3.set_xlabel('Number of Siblings or Spouses',fontsize=20)\na3.set_ylabel('% Of Total',fontsize=20)\n\na4 = sexVs3rdClass.plot.bar(color=sexColors)\na4.set_title('3rd Class Passengers',fontsize=24)\na4.set_xlabel('Number of Siblings or Spouses',fontsize=20)\na4.set_ylabel('% Of Total',fontsize=20)", "Class does not seem to make much of a difference when it comes to the amount of men and women with siblings or spouses aboard. The % of men aboard with no spouses of siblings aboard is higher then the % of women. It would suggest that men were more likely to travel alone then women regardless of class. \nOne interesting thing (could be an out-lier) is that there are a few larger families in third class.\n2.4 Discussion and Conclusions\n\nThere were more men on the Titanic then women.\nThe majority of passengers would be considered to be of the 3rd class.\nThere is a higher % of men in 3rd class then women. The other classes are closer.\nThe amount of siblings or spouses a passenger has does not seem to effect class or sex.\n\nThis investigation does not take into account the fact that the number of men with spouses or siblings could be effected by the number of women with siblings and spouses and vs versa.\nAnything discussed in this section is based on the data in train.csv from the Kaggle website which only includes 891/2224 of the passengers. I can't find out which of the 891 passengers were selected so it is hard to know if there is any bias in the data (eg, was the crew included?). Therefor any conclusions only apply to the passengers included in the set.\n3. Question 2: Does the age of a passenger have any effect on their survival? What effect does sex have?\nFor the question, 'Does the age of a passenger have any effect on their survival? What effect does sex have?', the variables will be investigated independently, then see what effect they have on each other.\n3.1 Data Wrangling\nData loaded from trian.csv.\nThe data has already been loaded in section 2.1.", "#Use the pandas.isnull function to find any missing data\nnullSex = df[pd.isnull(df['Sex'])]['PassengerId'].count()\nnullAge = df[pd.isnull(df['Age'])]['PassengerId'].count()\nnullSurvived = df[pd.isnull(df['Survived'])]['PassengerId'].count()\ntotalRows = df['PassengerId'].count()\n\nprint \"Rows with no Sex: %d\\nRows with no Age: %d\\nRows with no Survived: %d\\nTotal: %d\" % (nullSex,nullAge,nullSurvived,totalRows)", "It appears some rows with passenger age is missing. For this investigation, rows with missing age information will be discarded.", "# Remove rows with a null age (blank age) from the dataframe\ndf = df[pd.notnull(df['Age'])]\n\nnullAge = df[pd.isnull(df['Age'])]['PassengerId'].count()\ntotalRows = df['PassengerId'].count()\n\nprint \"Rows with no Age: %d\\nTotal: %d\" % (nullAge,totalRows)", "For readability, add a column in the dataframe call 'Lived or Died' which has a string representation of whether a passenger survived.", "# Add a column called 'Live or Died' with a string representation of the Survivedd column (which is 0 or 1)\nd = {0: 'Died', 1: 'Lived'}\ndf['Lived or Died'] = df['Survived'].map(d)", "3.2 1D Investigation\nPassenger survival and passenger age will be investigated. To make it more interesting both will be investigated with the entire passenger population and then split into sexes. (Maybe not 1D, but splitting into sexes doesn't add a lot of complexity)\n3.2.1 Passenger Survival", "# Use group by to find numbers of passengers how survived. Break down into all, men and women\n\nsurvivedAllNumbers = df.groupby('Lived or Died')['Lived or Died'].count()\nsurvivedMenNumbers = df[df['Sex']=='male'].groupby('Lived or Died')['Lived or Died'].count()\nsurvivedWomenNumbers = df[df['Sex']=='female'].groupby('Lived or Died')['Lived or Died'].count()\n\n# Combine survival numbers for display\nsurvivedNumbers = pd.concat([survivedAllNumbers, survivedMenNumbers,survivedWomenNumbers], axis=1, keys=['All','Men','Women'])\n\n# Display the not \nprint(survivedNumbers)\nsurvivedNumbers.plot.pie(subplots=True,\n figsize=(18, 6),\n autopct='%.2f%%',\n title='Passengers on Titanic Survival Rates',\n legend=None,\n colors=survivedColors)", "More died then were saved. If you were a man it was much more unfortunate as most perished, with women having a much better (but still not perfect) survival rate.\n3.2.2 Passenger Age", "# Describe the datasets in one chart using a concat of the describes of all, males and females.\nprint pd.concat([df['Age'].describe(), \n df[df['Sex'] == 'male']['Age'].describe(),\n df[df['Sex'] == 'female']['Age'].describe()],axis=1,\n keys=['All','Male',\"Female\"])\n\n# Show histograms of the total population, then for males and females sepperatly\ndf['Age'].hist()\nplt.title('Histogram of Passenger Age for all Passengers on the Titanic',fontsize=24)\nplt.xlabel(\"Age\",fontsize=20)\nplt.ylabel(\"Frequency\",fontsize=20)\nplt.show()\n\ndf[df['Sex']=='female']['Age'].hist(color=sexColors[0])\nplt.title('Histogram of Females on Titanic',fontsize=24)\nplt.xlabel(\"Age\",fontsize=20)\nplt.ylabel(\"Frequency\",fontsize=20)\nplt.show()\n\ndf[df['Sex']=='male']['Age'].hist(color=sexColors[1])\nplt.title('Histogram of Males on Titanic',fontsize=24)\nplt.xlabel(\"Age\",fontsize=20)\nplt.ylabel(\"Frequency\",fontsize=20)\nplt.show()\n", "There are more males then females. The distribution looks pretty close with the average age of males is slightly higher, and there are a higher % of very young females then males.\n3.3 2D Investigation\n3.3.1 Passenger Age vs Survival\nOnce again we will took at the whole population and then separate into males and females.\nUse logistic regression to estimate the survival chances of a person at a certain age.", "#Use seaborn to create graphs that use logistic regression to predict the survival % at different ages\n#Show both population as a whole and split up males and females.\nsns.set_context(\"notebook\", font_scale=3)\nsns.set_style(\"darkgrid\")\n \ng = sns.lmplot(x='Age', \n y='Survived', \n data=df, \n y_jitter=.02, \n logistic=True,\n size=6, \n aspect=4)\n\ng.set(xlim=(0,80),title='Survival Rate of All Passengers Using Logistic Regression')\n\n\ng = sns.lmplot(x='Age',\n y='Survived', \n hue=\"Sex\", \n data=df, \n y_jitter=.02, \n logistic=True,\n size=6, \n aspect=4,)\n\ng.set(xlim=(0,80),title='Survival Rate of Passengers, Seperated by Sex, Using Logistic Regression')", "In general, being younger increases the chances of survival on the Titanic, but it changes a bit if you break it down by sexes.\n\nFor men, the younger, the better their chance of survival.\nFor women, older women actually had a better chance of survival.\n\n3.4 Conclusion and Discussion\n\nFemales had a much better chance of survival then males on the Titanic.\nYounger males had a better chance of survival then older males.\nOlder females had a better chance of survival then younger females.\n\nAnything discussed in this section is based on the data in train.csv from the Kaggle website which only includes 891/2224 of the passengers. I can't find out which of the 891 passengers were selected so it is hard to know if there is any bias in the data (eg, was the crew included?). Therefor any conclusions only apply to the passengers included in the set.\n4. References\n\nhttp://pandas.pydata.org/pandas-docs/stable/visualization.html - For information on how to draw graphs.\nWhoever reviewed my first attempt and provided the code for make_autopct()." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
AaronCWong/phys202-2015-work
days/day06/Matplotlib.ipynb
mit
[ "Visualization with Matplotlib\nLearning Objectives: Learn how to make basic plots using Matplotlib's pylab API and how to use the Matplotlib documentation.\nThis notebook focuses only on the Matplotlib API, rather that the broader question of how you can use this API to make effective and beautiful visualizations.\nImports\nThe following imports should be used in all of your notebooks where Matplotlib in used:", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np", "Overview\nThe following conceptual organization is simplified and adapted from Benjamin Root's AnatomyOfMatplotlib tutorial.\nFigures and Axes\n\nIn Matplotlib a single visualization is a Figure.\nA Figure can have multiple areas, called subplots. Each subplot is an Axes.\nIf you don't create a Figure and Axes yourself, Matplotlib will automatically create one for you.\nAll plotting commands apply to the current Figure and Axes.\n\nThe following functions can be used to create and manage Figure and Axes objects.\nFunction | Description \n:-----------------|:----------------------------------------------------------\nfigure | Creates a new Figure\ngca | Get the current Axes instance\nsavefig | Save the current Figure to a file\nsca | Set the current Axes instance\nsubplot | Create a new subplot Axes for the current Figure\nsubplots | Create a new Figure and a grid of subplots Axes\nPlotting Functions\nOnce you have created a Figure and one or more Axes objects, you can use the following function to put data onto that Axes.\nFunction | Description\n:-----------------|:--------------------------------------------\nbar | Make a bar plot\nbarh | Make a horizontal bar plot\nboxplot | Make a box and whisker plot\ncontour | Plot contours\ncontourf | Plot filled contours\nhist | Plot a histogram\nhist2d | Make a 2D histogram plot\nimshow | Display an image on the axes\nmatshow | Display an array as a matrix\npcolor | Create a pseudocolor plot of a 2-D array\npcolormesh | Plot a quadrilateral mesh\nplot | Plot lines and/or markers\nplot_date | Plot with data with dates\npolar | Make a polar plot\nscatter | Make a scatter plot of x vs y\nPlot modifiers\nYou can then use the following functions to modify your visualization.\nFunction | Description\n:-----------------|:---------------------------------------------------------------------\nannotate | Create an annotation: a piece of text referring to a data point\nbox | Turn the Axes box on or off\nclabel | Label a contour plot\ncolorbar | Add a colorbar to a plot\ngrid | Turn the Axes grids on or off\nlegend | Place a legend on the current Axes\nloglog | Make a plot with log scaling on both the x and y axis\nsemilogx | Make a plot with log scaling on the x axis \nsemilogy | Make a plot with log scaling on the y axis\nsubplots_adjust | Tune the subplot layout\ntick_params | Change the appearance of ticks and tick labels\nticklabel_format| Change the ScalarFormatter used by default for linear axes\ntight_layout | Automatically adjust subplot parameters to give specified padding\ntext | Add text to the axes\ntitle | Set a title of the current axes\nxkcd | Turns on XKCD sketch-style drawing mode\nxlabel | Set the x axis label of the current axis\nxlim | Get or set the x limits of the current axes\nxticks | Get or set the x-limits of the current tick locations and labels\nylabel | Set the y axis label of the current axis\nylim | Get or set the y-limits of the current axes\nyticks | Get or set the y-limits of the current tick locations and labels\nBasic plotting\nFor now, we will work with basic line plots (plt.plot) to show how the Matplotlib pylab plotting API works. In this case, we don't create a Figure so Matplotlib does that automatically.", "t = np.linspace(0, 10.0, 100)\nplt.plot(t, np.sin(t))\nplt.xlabel('Time')\nplt.ylabel('Signal')\nplt.title('My Plot'); # supress text output", "Basic plot modification\nWith a third argument you can provide the series color and line/marker style. Here we create a Figure object and modify its size.", "f = plt.figure(figsize=(9,6)) # 9\" x 6\", default is 8\" x 5.5\"\n\nplt.plot(t, np.sin(t), 'r.');\nplt.xlabel('x')\nplt.ylabel('y')", "Here is a list of the single character color strings:\nb: blue\n g: green\n r: red\n c: cyan\n m: magenta\n y: yellow\n k: black\n w: white\nThe following will show all of the line and marker styles:", "from matplotlib import lines\nlines.lineStyles.keys()\n\nfrom matplotlib import markers\nmarkers.MarkerStyle.markers.keys()", "To change the plot's limits, use xlim and ylim:", "plt.plot(t, np.sin(t)*np.exp(-0.1*t),'bo')\nplt.xlim(-1.0, 11.0)\nplt.ylim(-1.0, 1.0)", "You can change the ticks along a given axis by using xticks, yticks and tick_params:", "plt.plot(t, np.sin(t)*np.exp(-0.1*t),'bo')\nplt.xlim(0.0, 10.0)\nplt.ylim(-1.0, 1.0)\nplt.xticks([0,5,10], ['zero','five','10'])\nplt.tick_params(axis='y', direction='inout', length=10)", "Box and grid\nYou can enable a grid or disable the box. Notice that the ticks and tick labels remain.", "plt.plot(np.random.rand(100), 'b-')\nplt.grid(True)\nplt.box(False)", "Multiple series\nMultiple calls to a plotting function will all target the current Axes:", "plt.plot(t, np.sin(t), label='sin(t)')\nplt.plot(t, np.cos(t), label='cos(t)')\nplt.xlabel('t')\nplt.ylabel('Signal(t)')\nplt.ylim(-1.5, 1.5)\nplt.xlim(right=12.0)\nplt.legend()", "Subplots\nSubplots allow you to create a grid of plots in a single figure. There will be an Axes associated with each subplot and only one Axes can be active at a time.\nThe first way you can create subplots is to use the subplot function, which creates and activates a new Axes for the active Figure:", "plt.subplot(2,1,1) # 2 rows x 1 col, plot 1\nplt.plot(t, np.exp(0.1*t))\nplt.ylabel('Exponential')\n\nplt.subplot(2,1,2) # 2 rows x 1 col, plot 2\nplt.plot(t, t**2)\nplt.ylabel('Quadratic')\nplt.xlabel('x')\n\nplt.tight_layout()", "In many cases, it is easier to use the subplots function, which creates a new Figure along with an array of Axes objects that can be indexed in a rational manner:", "f, ax = plt.subplots(2, 2)\n\nfor i in range(2):\n for j in range(2):\n plt.sca(ax[i,j])\n plt.plot(np.random.rand(20))\n plt.xlabel('x')\n plt.ylabel('y')\n\nplt.tight_layout()", "The subplots function also makes it easy to pass arguments to Figure and to share axes:", "f, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(6,6))\n\nfor i in range(2):\n for j in range(2):\n plt.sca(ax[i,j])\n plt.plot(np.random.rand(20))\n if i==1:\n plt.xlabel('x')\n if j==0:\n plt.ylabel('y')\n\nplt.tight_layout()", "More marker and line styling\nAll plot commands, including plot, accept keyword arguments that can be used to style the lines in more detail. Fro more information see:\n\nControlling line properties\nSpecifying colors", "plt.plot(t, np.sin(t), marker='o', color='darkblue',\n linestyle='--', alpha=0.3, markersize=10)", "Resources\n\nMatplotlib Documentation, Matplotlib developers.\nMatplotlib Gallery, Matplotlib developers.\nMatplotlib List of Plotting Commands, Matplotlib developers.\nAnatomyOfMatplotlib, Benjamin Root.\nMatplotlib Tutorial, J.R. Johansson." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
DEIB-GECO/PyGMQL
examples/notebooks/PyGMQL_Example.ipynb
apache-2.0
[ "import seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "PyGMQL in action\nProblem description\nWe are given three replicas of a ChIP-Seq experiment.\nWe want to:\n1. Extract high-confidence regions into one sample\n2. Identify which of these regions overlap with a set of given genes\n3. For each resulting region count ICGC mutations.\n4. Finally we want to select the regions with at least one mutation.\nRequired GMQL operations\nFor this pipeline we will need the following GMQL operations:\n- cover: extracts regions which are confirmed by at least two replicas\n- join: extracts regions which overalap with genes\n- map: for each extracted region, counts the overlapping mutations\nPyGMQL in a nutshell\nPyGMQL enables the writing of GMQL queries using the Python programming language. It exposes to the user all the GMQL operators and also a data structure for holding, manipulating and converting the results of a GMQL query.\nThe library offers:\n- A data structure called GMQLDataset, which represent a GMQL variable in the query. Every GMQLDataset is produced by a GMQL operator. When you call the materialize operation on a GMQLDataset the execution is started and the result is returned.\n- A data structure called GDataframe, holding the result of a query. A GDataframe is a pure python structure and can be used directly as any other pandas dataframe. A GDataframe holds two pandas dataframes, one for the regions and one for the metadata. We can also, given a GDataframe, go back to a GMQLDataset for using it as a GMQL variable. This can be done calling the to_GMQLDataset function of the GDataframe.\n<img src=\"./images/GMQLDataset_to_GDataframe.png\" width=\"500\" height=\"600\">\nGetting the library\nThe library can be downloaded from the PyPi public repository through the pip packaging system.\npip install gmql\nIf you want the most recent version of the software you can directly download it from the GitHub page:\ngit pull https://github.com/DEIB-GECO/PyGMQL.git\n cd PyGMQL\n pip install -e .\nImporting the library", "import gmql as gl\nimport pandas as pd", "Execution modes\nThe queries are directly embedded inside the language and can be executed in two different modes:\n- Local mode: the computation is executed in the local machine\n- Remote mode: the query is sent to a remote server, executed there and the results are downloaded. This process is shown in the figure below.\n<img src=\"./images/remote.png\">", "gl.set_mode(\"local\")\ngl.set_progress(False)", "The query\nThe GMQL query that we are going to present is the following:\n```\nrefSeqGenes = SELECT(annotation_type == 'gene' AND provider == 'RefSeq') HG19_BED_ANNOTATION;\nmyExp = SELECT() myRawExp;\nmyConfirmExp = COVER(2, ANY) myExp;\nmyExpOverGenes = JOIN(DIST < 0; output: RIGHT_DISTINCT) refSeqGenes myConfirmExp;\nmyMut = SELECT() myRawMut;\nmyMutOverExp = MAP() myExpOverGenes myMut;\nmyFilteredExp = SELECT(region: count_myMutOverExp_myMut > 0) myMutOverExp;\nMATERIALIZE myFilteredExp INTO ./Results/FilteredExperiment\n```\nLoading a GMQL dataset\nLoading datasets in GDM format\nBoth the gene dataset and the personal one are already in the GDM format, therefore the library only needs the location of the data for importing.\nNB: This demo is meant to be executed without the need of a remote server. For this reason the HG19_BED_ANNOTATION dataset is loaded directly from the computer disk. Please be aware that it can also be found in the regular GMQL repository.", "bed_annotation = gl.load_from_path(\"./Data/HG19_BED_ANNOTATION/\")\n\nmyExp = gl.load_from_path(\"./Data/myRawExp/\")", "Loading datasets with a generic schema\nThe mutation dataset is in a classical BED format but not in the GDM format, therefore we need to specify its schema through a custom parser, which is an instance of a RegionParser. \nAfter the parser is instantiated we can use the load_from_path function to load the dataset, which is currently stored in the local file system", "mutations_parser = gl.parsers.RegionParser(parser_name=\"mutations_parser\",\n chrPos=0,\n startPos=1,\n stopPos=2,\n strandPos=3,\n delimiter=\"\\t\")\n\nmyMut = gl.load_from_path(\"./Data/myRawMut/\", parser=mutations_parser)", "Selection on the metadata\nWe have in the variable bed_annotation all the ENCODE dataset of annotations. We are interested only in the annotations regarding the genes. Therefore we need to filter the dataset on the basis of the metadata.\nThis is called meta-selection and in PyGMQL can be performed using the square-bracket notation common to pandas users.", "refSeqGenes = bed_annotation[(bed_annotation['annotation_type'] == 'gene') & \n (bed_annotation['provider'] == 'RefSeq')]", "Cover operation\nWe want only reliable data from the Chip-Seq experiment, therefore we define a Chip-Seq region highly confident if it is confirmed by at least two replicas. This is a job for the cover operation.\n\nminAcc: we define the minimum number of overlapping between samples for a region to be conserved. In this case 2.\nmaxAcc: we define the maximum number. The \"ANY\" keyword makes the upper bound infinite.\n\n<img src=\"./images/cover.PNG\">", "myConfirmExp = myExp.normal_cover(minAcc=2, maxAcc=\"ANY\")", "Join\nNow we want to extract those regions that overlap with genes. We can do it using the join operation. \n- We use the genes as reference dataset\n- The Chip-Seq regions as experiment. \n- The genometric predicate DLE(0) and the option output=\"RIGHT\" tell the engine to look at all the experiment regions that happen to intersect with the reference ones and keep them in the result.\n<img src=\"./images/join.PNG\" height=\"700\" width=\"700\">", "myExpOverGenes = refSeqGenes.join(experiment=myConfirmExp, refName=\"gene\",\n genometric_predicate=[gl.DLE(0)],\n output=\"RIGHT_DISTINCT\")", "Map\nNow in insideGene we have the set of highly confident Chip-Seq regions that intersect with a gene. In order to see how many mutations happen in those region we can use the map operation. This operation will add a new attribute to the resulting dataset which will be named count_GENE_MUTATION.\n<img src=\"./images/map.PNG\">", "myMutOverExp = myExpOverGenes.map(myMut, expName=\"MUTATION\", refName=\"GENE\")", "Selection on regions\nIn order to filter out all the regions in mutationCount that do not have any mutation we can do a selection on region data using the reg_select operation. This operation takes as input a predicate on region fields.", "myFilteredExp = myMutOverExp.reg_select(myMutOverExp.count_GENE_MUTATION > 0)", "Materialization\nWe can now materialize the result. PyGMQL adopts a lazy loading approach (like Spark) and no action is performed until materialization. The result can be saved as a GDM dataset and also directly loaded in python as a GDataframe.", "result = myFilteredExp.materialize(\"./Results/FilteredExperiment/\")", "The GDataframe\nThe result of a materialization is a GDataframe which contains both regions and metadata as two Pandas dataframes regs and meta", "result.regs.head()", "The region dataframe represents the regions in the output result in a tabular format. The index of the regions is the identifier of the sample they belong to.", "result.meta", "The metadata dataframe has as columns all the metadata attributes and every row represents a sample in the output dataset. \nThere can be multiple values in the same cell.\nAdditional operations\nHaving the result in a Pandas dataframe enables us to use all the Pandas functions and perform some statistics.\nFor example we can do an histogram of the mutation counts over the Chip-Seq regions showing the distribution of the number of mutations over those regions.", "plt.figure(figsize=(25, 20))\nresult.regs[result.regs.count_gene_mutation <=50].hist(\"count_gene_mutation\", bins=50)", "Of course we can also simply display the mean value", "result.regs['count_gene_mutation'].mean()", "High density regions", "result.regs[result.regs.count_gene_mutation > 5].sort_values(\"count_gene_mutation\", ascending=False)", "Alternative experiment\nMapping the mutations over the genes instead that over the experiment regions\nThe GMQL query that we are going to present is the following:\n```\nrefSeqGenes = SELECT(annotation_type == 'gene' AND provider == 'RefSeq') HG19_BED_ANNOTATION;\nmyExp = SELECT() myRawExp;\nmyConfirmExp = COVER(2, ANY) myExp;\nmyExpOverGenes = JOIN(DIST < 0; output: RIGHT_DISTINCT) refSeqGenes myConfirmExp;\nmyMut = SELECT() myRawMut;\nmyMutOverExp = MAP() myExpOverGenes myMut;\nmyFilteredExp = SELECT(region: count_myMutOverExp_myMut > 0) myMutOverExp;\nMATERIALIZE myFilteredExp INTO ./Results/FilteredExperiment\n```", "genesOverExp = refSeqGenes.join(experiment=myConfirmExp, refName=\"gene\",\n genometric_predicate=[gl.DLE(0)],\n output=\"LEFT_DISTINCT\")\nmyMutOverGenes = genesOverExp.map(myMut, expName=\"MUTATION\", refName=\"GENE\")\nmyFilteredGenes = myMutOverGenes.reg_select(myMutOverGenes.count_GENE_MUTATION > 0)\nresult_genes = myFilteredGenes.materialize(\"./Results/FilteredGenes/\")\n\nresult_genes.regs.head()\n\nplt.figure(figsize=(25, 20))\nresult_genes.regs[result_genes.regs.count_gene_mutation <=50].hist(\"count_gene_mutation\", bins=50)\n\nresult_genes.regs['count_gene_mutation'].mean()\n\nresult_genes.regs[result_genes.regs.count_gene_mutation > 50].sort_values(\"count_gene_mutation\", ascending=False)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
alasdairtran/mclearn
projects/alasdair/notebooks/02_exploratory_analysis.ipynb
bsd-3-clause
[ "Exploratory Analysis", "# remove after testing\n%load_ext autoreload\n%autoreload 2\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom urllib.request import urlopen\nfrom sklearn.decomposition import PCA\nfrom mclearn.viz import (plot_class_distribution,\n plot_hex_map,\n plot_filters_and_spectrum,\n plot_scatter_with_classes)\nfrom mclearn.preprocessing import balanced_train_test_split\n%matplotlib inline\nsns.set_style('ticks')\n\nfig_dir = '../thesis/figures/'\ntarget_col = 'class'\nsdss_features = ['psfMag_r_w14', 'psf_u_g_w14', 'psf_g_r_w14', 'psf_r_i_w14',\n 'psf_i_z_w14', 'petroMag_r_w14', 'petro_u_g_w14', 'petro_g_r_w14',\n 'petro_r_i_w14', 'petro_i_z_w14', 'petroRad_r']\nvstatlas_features = ['rmagC', 'umg', 'gmr', 'rmi', 'imz', 'rmw1', 'w1m2']\n\nsdss = pd.read_hdf('../data/sdss.h5', 'sdss')\nvstatlas = pd.read_hdf('../data/vstatlas.h5', 'vstatlas')", "Distribution of Classes", "fig = plt.figure(figsize=(5, 5))\nax = plot_class_distribution(sdss[target_col])\nax.tick_params(top='off', right='off')\nfig.savefig(fig_dir + '2_astro/sdss_class_distribution.pdf', bbox_inches='tight')\n\nfig = plt.figure(figsize=(5, 5))\nax = plot_class_distribution(vstatlas[target_col])\nax.tick_params(top='off', right='off')\nfig.savefig(fig_dir + '2_astro/vstatlas_class_distribution.pdf', bbox_inches='tight')\n\nsdss[target_col].value_counts()\n\n0.3*(25604+ 6559+ 2303+590)\n\nvstatlas[target_col].value_counts()", "Maps of Classes\nWe have around 2.8 million labelled data points. Below are the maps showing how the three classes - galaxies, stars, and quasars - are distributed. Here we use the Mollweide projection, with the following coordinate layout. The red line is the plane of the Milky Way.", "fig = plt.figure(figsize=(10,5))\nzero_values = np.zeros(1)\nax = plot_hex_map(zero_values, zero_values, axisbg=None, colorbar=False, labels=True)\nfig.savefig(fig_dir + '2_astro/mollweide_map.pdf', bbox_inches='tight')", "Here are the distribution map of galaxies, stars, and quasars, respectively.", "# make Boolean index of each object\nis_galaxy = sdss[target_col] == 'Galaxy'\nis_star = sdss[target_col] == 'Star'\nis_quasar = sdss[target_col] == 'Quasar'\n\n# extract the coordinates of each object\ngalaxy_ra, galaxy_dec = sdss[is_galaxy]['ra'], sdss[is_galaxy]['dec']\nstar_ra, star_dec = sdss[is_star]['ra'], sdss[is_star]['dec']\nquasar_ra, quasar_dec = sdss[is_quasar]['ra'], sdss[is_quasar]['dec']\n\n# plot galaxy map\nfig = plt.figure(figsize=(10,5))\nax = plot_hex_map(galaxy_ra, galaxy_dec)\nfig.savefig(fig_dir + '4_expt1/sdss_train_galaxies.png', bbox_inches='tight', dpi=300)\n\n# plot star map\nfig = plt.figure(figsize=(10,5))\nax = plot_hex_map(star_ra, star_dec)\nfig.savefig(fig_dir + '4_expt1/sdss_train_stars.png', bbox_inches='tight', dpi=300)\n\n# plot quasar map\nfig = plt.figure(figsize=(10,5))\nax = plot_hex_map(quasar_ra, quasar_dec)\nfig.savefig(fig_dir + '4_expt1/sdss_train_quasars.png', bbox_inches='tight', dpi=300)", "Photometry vs Spectroscopy\nTo see the difference between photometry and spectroscopy, we plot the spectrum of Vega (which gives us a lot of information but this is expensive to obtain) and the 5 ugriz photometric filters.", "vega_url = 'http://www.astro.washington.edu/users/ivezic/DMbook/data/1732526_nic_002.ascii'\nugriz_filter_url = 'http://www.sdss.org/dr7/instruments/imager/filters/%s.dat'\nfilter_dir = '../data/filters'\nspectra_dir = '../data/spectra'\n\nfig = plt.figure(figsize=(10,5))\nax = plot_filters_and_spectrum(ugriz_filter_url, vega_url, filter_dir=filter_dir, spectra_dir=spectra_dir)\nfig.savefig(fig_dir + '2_astro/vega_filters_and_spectrum.pdf', bbox_inches='tight')", "PCA and Dimensionality Reduction\nWe reduce the 11 dimensions down to 2 dimensions using PCA.", "X_train, X_test, y_train, y_test = balanced_train_test_split(\n sdss[sdss_features], sdss[target_col], train_size=200000, test_size=100000, random_state=2)\n\npca = PCA(n_components=2)\nprojection = pca.fit_transform(X_train)\nclasses = ['Galaxy', 'Quasar', 'Star']\n\nfig = plt.figure(figsize=(10, 5))\nax = plot_scatter_with_classes(projection, y_train, classes)\nax.set_xlim(-5, 5)\nax.set_ylim(-5, 5)\nfig.savefig(fig_dir + '4_expt1/sdss_pca_all.png', bbox_inches='tight', dpi=300)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Featuretools/featuretools
docs/source/getting_started/using_entitysets.ipynb
bsd-3-clause
[ "Representing Data with EntitySets\nAn EntitySet is a collection of dataframes and the relationships between them. They are useful for preparing raw, structured datasets for feature engineering. While many functions in Featuretools take dataframes and relationships as separate arguments, it is recommended to create an EntitySet, so you can more easily manipulate your data as needed.\nThe Raw Data\nBelow we have two tables of data (represented as Pandas DataFrames) related to customer transactions. The first is a merge of transactions, sessions, and customers so that the result looks like something you might see in a log file:", "import featuretools as ft\ndata = ft.demo.load_mock_customer()\ntransactions_df = data[\"transactions\"].merge(data[\"sessions\"]).merge(data[\"customers\"])\n\ntransactions_df.sample(10)", "And the second dataframe is a list of products involved in those transactions.", "products_df = data[\"products\"]\nproducts_df", "Creating an EntitySet\nFirst, we initialize an EntitySet. If you'd like to give it a name, you can optionally provide an id to the constructor.", "es = ft.EntitySet(id=\"customer_data\")", "Adding dataframes\nTo get started, we add the transactions dataframe to the EntitySet. In the call to add_dataframe, we specify three important parameters:\n\nThe index parameter specifies the column that uniquely identifies rows in the dataframe.\nThe time_index parameter tells Featuretools when the data was created.\nThe logical_types parameter indicates that \"product_id\" should be interpreted as a Categorical column, even though it is just an integer in the underlying data.", "from woodwork.logical_types import Categorical, PostalCode\n\nes = es.add_dataframe(\n dataframe_name=\"transactions\",\n dataframe=transactions_df,\n index=\"transaction_id\",\n time_index=\"transaction_time\",\n logical_types={\n \"product_id\": Categorical,\n \"zip_code\": PostalCode,\n },\n)\n\nes", "You can also use a setter on the EntitySet object to add dataframes\nThis method associates each column in the dataframe to a Woodwork logical type. Each logical type can have an associated standard semantic tag that helps define the column data type. If you don't specify the logical type for a column, it gets inferred based on the underlying data. The logical types and semantic tags are listed in the schema of the dataframe. For more information on working with logical types and semantic tags, take a look at the Woodwork documention.", "es[\"transactions\"].ww.schema", "Now, we can do that same thing with our products dataframe.", "es = es.add_dataframe(\n dataframe_name=\"products\",\n dataframe=products_df,\n index=\"product_id\")\n\nes", "With two dataframes in our EntitySet, we can add a relationship between them.\nAdding a Relationship\nWe want to relate these two dataframes by the columns called \"product_id\" in each dataframe. Each product has multiple transactions associated with it, so it is called the parent dataframe, while the transactions dataframe is known as the child dataframe. When specifying relationships, we need four parameters: the parent dataframe name, the parent column name, the child dataframe name, and the child column name. Note that each relationship must denote a one-to-many relationship rather than a relationship which is one-to-one or many-to-many.", "es = es.add_relationship(\"products\", \"product_id\", \"transactions\", \"product_id\")\nes", "Now, we see the relationship has been added to our EntitySet.\nCreating a dataframe from an existing table\nWhen working with raw data, it is common to have sufficient information to justify the creation of new dataframes. In order to create a new dataframe and relationship for sessions, we \"normalize\" the transaction dataframe.", "es = es.normalize_dataframe(\n base_dataframe_name=\"transactions\",\n new_dataframe_name=\"sessions\",\n index=\"session_id\",\n make_time_index=\"session_start\",\n additional_columns=[\n \"device\",\n \"customer_id\",\n \"zip_code\",\n \"session_start\",\n \"join_date\",\n ],\n)\nes", "Looking at the output above, we see this method did two operations:\n\nIt created a new dataframe called \"sessions\" based on the \"session_id\" and \"session_start\" columns in \"transactions\"\nIt added a relationship connecting \"transactions\" and \"sessions\"\n\nIf we look at the schema from the transactions dataframe and the new sessions dataframe, we see two more operations that were performed automatically:", "es[\"transactions\"].ww.schema\n\nes[\"sessions\"].ww.schema", "It removed \"device\", \"customer_id\", \"zip_code\" and \"join_date\" from \"transactions\" and created a new columns in the sessions dataframe. This reduces redundant information as the those properties of a session don't change between transactions.\nIt copied and marked \"session_start\" as a time index column into the new sessions dataframe to indicate the beginning of a session. If the base dataframe has a time index and make_time_index is not set, normalize_dataframe will create a time index for the new dataframe. In this case it would create a new time index called \"first_transactions_time\" using the time of the first transaction of each session. If we don't want this time index to be created, we can set make_time_index=False.\n\nIf we look at the dataframes, we can see what normalize_dataframe did to the actual data.", "es[\"sessions\"].head(5)\n\nes[\"transactions\"].head(5)", "To finish preparing this dataset, create a \"customers\" dataframe using the same method call.", "es = es.normalize_dataframe(\n base_dataframe_name=\"sessions\",\n new_dataframe_name=\"customers\",\n index=\"customer_id\",\n make_time_index=\"join_date\",\n additional_columns=[\"zip_code\", \"join_date\"],\n)\n\nes", "Using the EntitySet\nFinally, we are ready to use this EntitySet with any functionality within Featuretools. For example, let's build a feature matrix for each product in our dataset.", "feature_matrix, feature_defs = ft.dfs(entityset=es, target_dataframe_name=\"products\")\n\nfeature_matrix" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
kimkipyo/dss_git_kkp
통계, 머신러닝 복습/160608수_13일차_회귀분석 실습, 과최적화/2.다항 회귀와 과최적화.ipynb
mit
[ "다항 회귀\n선형 기저 함수 모형(Linear Basis Function Models) => ϕ(x)\n일반적인 선형 회귀 모형은 다음과 같은 수식을 만족한다.\n$$ y_i = \\sum_{i=1}^{D} w_i x_i = w^T x $$\n이 때 가중치 벡터 $w$의 차원은 독립 변수의 차원과 같다. 즉 $x \\in \\mathbf{R}^D$ 이면 $w \\in \\mathbf{R}^D$ 이다.\n선형 기저 함수 모형(Linear Basis Function Models)은 x를 직접 선형 조합(linear combination)하지 않고 기저 함수를 통해 변환한 값을 새로운 독립 변수로 가정하고 선형 회귀 모형을 적용한 것과 같다. 따라서 기저 함수에 따라 가중치 벡터의 차원이 달라질 수 있다. 즉, $ \\phi(\\cdot): \\mathbf{R}^D \\rightarrow \\mathbf{R}^M $ 이면 $w \\in \\mathbf{R}^M$ 이다.\n$$ y_i = \\sum_{j=1}^{M} w_j \\phi_j(x) = w^T \\phi(x) $$\n다항 회귀\n언제 쓰느냐? 밑에 1차항으로 할 때에는 잘 안 맞지만 2, 3, 4 차항으로 늘릴수록 잘 맞게 된다.\n다항 회귀는 다음과 같은 다항식 함수를 기저 함수로 사용하는 선형 기저 함수 모형의 일종이다.\n$$ 1, x, x^2, \\ldots, x^M $$\n따라서 종속 변수와 독립 변수의 관계는 다음과 같이 표현할 수 있다.\n$$ y = w_0 + w_1x + w_2x^2 + \\ldots + w_M x^M $$\nstatsmodels를 이용한 다항 회귀\n\n여기는 방법이 없다. 일일이 다 넣어줘야 한다.\n여기서는 I(x**2) -> I를 넣는 것이 중요하다.\n\nstatsmodels에서는 OLS 클래스의 from_formula 메서드를 사용하여 다항 회귀를 할 수 있다.", "np.random.seed(0)\nn_samples = 30\nX = np.sort(np.random.rand(n_samples))\ny = np.cos(1.5 * np.pi * X) + np.random.randn(n_samples) * 0.1\n\ndfX = pd.DataFrame(X, columns=[\"x\"])\ndfX = sm.add_constant(dfX)\ndfy = pd.DataFrame(y, columns=[\"y\"])\ndf = pd.concat([dfX, dfy], axis=1)\n\nprint(sm.OLS.from_formula(\"y ~ x\", data=df).fit().summary())\nprint(sm.OLS.from_formula(\"y ~ x + I(x**2)\", data=df).fit().summary())\nprint(sm.OLS.from_formula(\"y ~ x + I(x**2) + I(x**3)\", data=df).fit().summary())\nprint(sm.OLS.from_formula(\"y ~ x + I(x**2) + I(x**3) + I(x**4)\", data=df).fit().summary())\nprint(sm.OLS.from_formula(\"y ~ x + I(x**2) + I(x**3) + I(x**4) + I(x**5)\", data=df).fit().summary())", "Scikit-Learn을 이용한 다항 회귀\nScikit-Learn에서는 preprocessing 서브 패키지의 PolynomialFeatures 클래스를 사용하여 다항 회귀를 할 수 있다.", "from sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import Pipeline\n\ndef polyreg(degree, seed=0, plot=True):\n polynomial_features = PolynomialFeatures(degree=degree)\n linear_regression = LinearRegression()\n model = Pipeline([(\"polynomial_features\", polynomial_features),\n (\"linear_regression\", linear_regression)])\n \n np.random.seed(seed)\n n_samples = 30\n X = np.sort(np.random.rand(n_samples))\n y = np.cos(1.5 * np.pi * X) + np.random.randn(n_samples) * 0.1\n X = X[:, np.newaxis]\n \n model.fit(X, y)\n \n if plot:\n plt.scatter(X, y)\n xx = np.linspace(0, 1, 1000)\n plt.plot(xx, model.predict(xx[:, np.newaxis]))\n plt.ylim(-2, 2)\n plt.show()\n \n reg = model.named_steps[\"linear_regression\"]\n return reg.coef_, reg.intercept_\n\npolyreg(1)\n\npolyreg(2)\n\npolyreg(3)\n\npolyreg(4)\n\npolyreg(5)\n\npolyreg(50)", "과최적화\n모형을 특정 샘플 데이터에 대해 과도하게 최적화하는 것을 과최적화(overfitting)이라고 한다.\n과최적화는 \n* 독립 변수 데이터 갯수에 비해 모형 모수의 수가 과도하게 크거나 \n* 독립 변수 데이터가 서로 독립이 아닌 경우에 발생한다.\n이러한 상황에서는 같은 조건에 대해 답이 복수개 존재할 수 있기 때문이다. \n과최적화가 문제가 되는 이유는 다음과 같다.\n\n샘플이 조금만 변화해도 fitting 결과가 크게 달라지며\n트레이닝에 사용되지 않은 새로운 독립 변수 값을 입력하면 오차가 커진다. (cross-validation 오차)\n\n나쁜 이유? 회귀 결과의 안전성을 해치게 된다.", "polyreg(2, 0)\n\npolyreg(2, 1)\n\npolyreg(2, 2)\n\npolyreg(50, 1)\n\npolyreg(50, 2)\n\npolyreg(50, 3)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
csaladenes/csaladenes.github.io
present/bi2/2020/ubb/az_en_jupyter2_mappam/sklearn_tutorial/04.1-Dimensionality-PCA.ipynb
mit
[ "<small><i>This notebook was put together by Jake Vanderplas. Source and license info is on GitHub.</i></small>\nDimensionality Reduction: Principal Component Analysis in-depth\nHere we'll explore Principal Component Analysis, which is an extremely useful linear dimensionality reduction technique.\nWe'll start with our standard set of initial imports:", "from __future__ import print_function, division\n\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nplt.style.use('seaborn')", "Introducing Principal Component Analysis\nPrincipal Component Analysis is a very powerful unsupervised method for dimensionality reduction in data. It's easiest to visualize by looking at a two-dimensional dataset:", "np.random.seed(1)\nX = np.dot(np.random.random(size=(2, 2)), np.random.normal(size=(2, 200))).T\nplt.plot(X[:, 0], X[:, 1], 'o')\nplt.axis('equal');", "We can see that there is a definite trend in the data. What PCA seeks to do is to find the Principal Axes in the data, and explain how important those axes are in describing the data distribution:", "from sklearn.decomposition import PCA\npca = PCA(n_components=2)\npca.fit(X)\nprint(pca.explained_variance_)\nprint(pca.components_)", "To see what these numbers mean, let's view them as vectors plotted on top of the data:", "plt.plot(X[:, 0], X[:, 1], 'o', alpha=0.5)\nfor length, vector in zip(pca.explained_variance_, pca.components_):\n v = vector * 3 * np.sqrt(length)\n plt.plot([0, v[0]], [0, v[1]], '-k', lw=3)\nplt.axis('equal');", "Notice that one vector is longer than the other. In a sense, this tells us that that direction in the data is somehow more \"important\" than the other direction.\nThe explained variance quantifies this measure of \"importance\" in direction.\nAnother way to think of it is that the second principal component could be completely ignored without much loss of information! Let's see what our data look like if we only keep 95% of the variance:", "clf = PCA(0.95) # keep 95% of variance\nX_trans = clf.fit_transform(X)\nprint(X.shape)\nprint(X_trans.shape)", "By specifying that we want to throw away 5% of the variance, the data is now compressed by a factor of 50%! Let's see what the data look like after this compression:", "X_new = clf.inverse_transform(X_trans)\nplt.plot(X[:, 0], X[:, 1], 'o', alpha=0.2)\nplt.plot(X_new[:, 0], X_new[:, 1], 'ob', alpha=0.8)\nplt.axis('equal');", "The light points are the original data, while the dark points are the projected version. We see that after truncating 5% of the variance of this dataset and then reprojecting it, the \"most important\" features of the data are maintained, and we've compressed the data by 50%!\nThis is the sense in which \"dimensionality reduction\" works: if you can approximate a data set in a lower dimension, you can often have an easier time visualizing it or fitting complicated models to the data.\nApplication of PCA to Digits\nThe dimensionality reduction might seem a bit abstract in two dimensions, but the projection and dimensionality reduction can be extremely useful when visualizing high-dimensional data. Let's take a quick look at the application of PCA to the digits data we looked at before:", "from sklearn.datasets import load_digits\ndigits = load_digits()\nX = digits.data\ny = digits.target\n\nprint(X[0][:8])\nprint(X[0][8:16])\nprint(X[0][16:24])\nprint(X[0][24:32])\nprint(X[0][32:40])\nprint(X[0][40:48])\n\npca = PCA(2) # project from 64 to 2 dimensions\nXproj = pca.fit_transform(X)\nprint(X.shape)\nprint(Xproj.shape)\n\n(1797*2)/(1797*64)\n\nplt.scatter(Xproj[:, 0], Xproj[:, 1], c=y, edgecolor='none', alpha=0.5,\n cmap=plt.cm.get_cmap('nipy_spectral', 10))\nplt.colorbar();", "This gives us an idea of the relationship between the digits. Essentially, we have found the optimal stretch and rotation in 64-dimensional space that allows us to see the layout of the digits, without reference to the labels.\nWhat do the Components Mean?\nPCA is a very useful dimensionality reduction algorithm, because it has a very intuitive interpretation via eigenvectors.\nThe input data is represented as a vector: in the case of the digits, our data is\n$$\nx = [x_1, x_2, x_3 \\cdots]\n$$\nbut what this really means is\n$$\nimage(x) = x_1 \\cdot{\\rm (pixel~1)} + x_2 \\cdot{\\rm (pixel~2)} + x_3 \\cdot{\\rm (pixel~3)} \\cdots\n$$\nIf we reduce the dimensionality in the pixel space to (say) 6, we recover only a partial image:", "from fig_code.figures import plot_image_components\n\nwith plt.style.context('seaborn-white'):\n plot_image_components(digits.data[0])", "But the pixel-wise representation is not the only choice. We can also use other basis functions, and write something like\n$$\nimage(x) = {\\rm mean} + x_1 \\cdot{\\rm (basis~1)} + x_2 \\cdot{\\rm (basis~2)} + x_3 \\cdot{\\rm (basis~3)} \\cdots\n$$\nWhat PCA does is to choose optimal basis functions so that only a few are needed to get a reasonable approximation.\nThe low-dimensional representation of our data is the coefficients of this series, and the approximate reconstruction is the result of the sum:", "from fig_code.figures import plot_pca_interactive\nplot_pca_interactive(digits.data)", "Here we see that with only six PCA components, we recover a reasonable approximation of the input!\nThus we see that PCA can be viewed from two angles. It can be viewed as dimensionality reduction, or it can be viewed as a form of lossy data compression where the loss favors noise. In this way, PCA can be used as a filtering process as well.\nChoosing the Number of Components\nBut how much information have we thrown away? We can figure this out by looking at the explained variance as a function of the components:", "pca = PCA().fit(X)\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance');", "Here we see that our two-dimensional projection loses a lot of information (as measured by the explained variance) and that we'd need about 20 components to retain 90% of the variance. Looking at this plot for a high-dimensional dataset can help you understand the level of redundancy present in multiple observations.\nPCA as data compression\nAs we mentioned, PCA can be used for is a sort of data compression. Using a small n_components allows you to represent a high dimensional point as a sum of just a few principal vectors.\nHere's what a single digit looks like as you change the number of components:", "fig, axes = plt.subplots(8, 8, figsize=(8, 8))\nfig.subplots_adjust(hspace=0.1, wspace=0.1)\n\nfor i, ax in enumerate(axes.flat):\n pca = PCA(i + 1).fit(X)\n im = pca.inverse_transform(pca.transform(X[25:26]))\n\n ax.imshow(im.reshape((8, 8)), cmap='binary')\n ax.text(0.95, 0.05, 'n = {0}'.format(i + 1), ha='right',\n transform=ax.transAxes, color='green')\n ax.set_xticks([])\n ax.set_yticks([])", "Let's take another look at this by using IPython's interact functionality to view the reconstruction of several images at once:", "from ipywidgets import interact\n\ndef plot_digits(n_components):\n fig = plt.figure(figsize=(8, 8))\n plt.subplot(1, 1, 1, frameon=False, xticks=[], yticks=[])\n nside = 10\n \n pca = PCA(n_components).fit(X)\n Xproj = pca.inverse_transform(pca.transform(X[:nside ** 2]))\n Xproj = np.reshape(Xproj, (nside, nside, 8, 8))\n total_var = pca.explained_variance_ratio_.sum()\n \n im = np.vstack([np.hstack([Xproj[i, j] for j in range(nside)])\n for i in range(nside)])\n plt.imshow(im)\n plt.grid(False)\n plt.title(\"n = {0}, variance = {1:.2f}\".format(n_components, total_var),\n size=18)\n plt.clim(0, 16)\n \ninteract(plot_digits, n_components=[1, 15, 20, 25, 32, 40, 64], nside=[1, 8]);", "Other Dimensionality Reducting Routines\nNote that scikit-learn contains many other unsupervised dimensionality reduction routines: some you might wish to try are\nOther dimensionality reduction techniques which are useful to know about:\n\nsklearn.decomposition.PCA: \n Principal Component Analysis\nsklearn.decomposition.RandomizedPCA:\n extremely fast approximate PCA implementation based on a randomized algorithm\nsklearn.decomposition.SparsePCA:\n PCA variant including L1 penalty for sparsity\nsklearn.decomposition.FastICA:\n Independent Component Analysis\nsklearn.decomposition.NMF:\n non-negative matrix factorization\nsklearn.manifold.LocallyLinearEmbedding:\n nonlinear manifold learning technique based on local neighborhood geometry\nsklearn.manifold.IsoMap:\n nonlinear manifold learning technique based on a sparse graph algorithm\n\nEach of these has its own strengths & weaknesses, and areas of application. You can read about them on the scikit-learn website." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
keras-team/keras-io
guides/ipynb/keras_cv/custom_image_augmentations.ipynb
apache-2.0
[ "Custom Image Augmentations with BaseImageAugmentationLayer\nAuthor: lukewood<br>\nDate created: 2022/04/26<br>\nLast modified: 2022/04/26<br>\nDescription: Use BaseImageAugmentationLayer to implement custom data augmentations.\nOverview\nData augmentation is an integral part of training any robust computer vision model.\nWhile KerasCV offers a plethora of prebuild high quality data augmentation techniques,\nyou may still want to implement your own custom technique.\nKerasCV offers a helpful base class for writing data augmentation layers:\nBaseImageAugmentationLayer.\nAny augmentation layer built with BaseImageAugmentationLayer will automatically be\ncompatible with the KerasCV RandomAugmentationPipeline class.\nThis guide will show you how to implement your own custom augmentation layers using\nBaseImageAugmentationLayer. As an example, we will implement a layer that tints all\nimages blue.", "import tensorflow as tf\nfrom tensorflow import keras\nimport keras_cv\nfrom tensorflow.keras import layers\nfrom keras_cv import utils\nfrom keras_cv.layers import BaseImageAugmentationLayer\nimport matplotlib.pyplot as plt\n\ntf.autograph.set_verbosity(0)", "First, let's implement some helper functions to visualize intermediate results", "\ndef imshow(img):\n img = img.astype(int)\n plt.axis(\"off\")\n plt.imshow(img)\n plt.show()\n\n\ndef gallery_show(images):\n images = images.astype(int)\n for i in range(9):\n image = images[i]\n plt.subplot(3, 3, i + 1)\n plt.imshow(image.astype(\"uint8\"))\n plt.axis(\"off\")\n plt.show()\n", "BaseImageAugmentationLayer Introduction\nImage augmentation should operate on a sample-wise basis; not batch-wise.\nThis is a common mistake many machine learning practicioners make when implementing\ncustom techniques.\nBaseImageAugmentation offers a set of clean abstractions to make implementing image\naugmentation techniques on a sample wise basis much easier.\nThis is done by allowing the end user to override an augment_image() method and then\nperforming automatic vectorization under the hood.\nMost augmentation techniques also must sample from one or more random distributions.\nKerasCV offers an abstraction to make random sampling end user configurable: the\nFactorSampler API.\nFinally, many augmentation techniques requires some information about the pixel values\npresent in the input images. KerasCV offers the value_range API to simplify the handling of this.\nIn our example, we will use the FactorSampler API, the value_range API, and\nBaseImageAugmentationLayer to implement a robust, configurable, and correct RandomBlueTint layer.\nOverriding augment_image()\nLet's start off with the minimum:", "\nclass RandomBlueTint(keras_cv.layers.BaseImageAugmentationLayer):\n def augment_image(self, image, transformation=None):\n # image is of shape (height, width, channels)\n [*others, blue] = tf.unstack(image, axis=-1)\n blue = tf.clip_by_value(blue + 100, 0.0, 255.0)\n return tf.stack([*others, blue], axis=-1)\n", "Our layer overrides BaseImageAugmentationLayer.augment_image(). This method is\nused to augment images given to the layer. By default, using\nBaseImageAugmentationLayer gives you a few nice features for free:\n\nsupport for unbatched inputs (HWC Tensor)\nsupport for batched inputs (BHWC Tensor)\nautomatic vectorization on batched inputs (more information on this in automatic\n vectorization performance)\n\nLet's check out the result. First, let's download a sample image:", "SIZE = (300, 300)\nelephants = tf.keras.utils.get_file(\n \"african_elephant.jpg\", \"https://i.imgur.com/Bvro0YD.png\"\n)\nelephants = tf.keras.utils.load_img(elephants, target_size=SIZE)\nelephants = tf.keras.utils.img_to_array(elephants)\nimshow(elephants)", "Next, let's augment it and visualize the result:", "layer = RandomBlueTint()\naugmented = layer(elephants)\nimshow(augmented.numpy())", "Looks great! We can also call our layer on batched inputs:", "layer = RandomBlueTint()\naugmented = layer(tf.expand_dims(elephants, axis=0))\nimshow(augmented.numpy()[0])", "Adding Random Behavior with the FactorSampler API.\nUsually an image augmentation technique should not do the same thing on every\ninvocation of the layer's __call__ method.\nKerasCV offers the FactorSampler API to allow users to provide configurable random\ndistributions.", "\nclass RandomBlueTint(keras_cv.layers.BaseImageAugmentationLayer):\n \"\"\"RandomBlueTint randomly applies a blue tint to images.\n\n Args:\n factor: A tuple of two floats, a single float or a\n `keras_cv.FactorSampler`. `factor` controls the extent to which the\n image is blue shifted. `factor=0.0` makes this layer perform a no-op\n operation, while a value of 1.0 uses the degenerated result entirely.\n Values between 0 and 1 result in linear interpolation between the original\n image and a fully blue image.\n Values should be between `0.0` and `1.0`. If a tuple is used, a `factor` is\n sampled between the two values for every image augmented. If a single float\n is used, a value between `0.0` and the passed float is sampled. In order to\n ensure the value is always the same, please pass a tuple with two identical\n floats: `(0.5, 0.5)`.\n \"\"\"\n\n def __init__(self, factor, **kwargs):\n super().__init__(**kwargs)\n self.factor = utils.parse_factor(factor)\n\n def augment_image(self, image, transformation=None):\n [*others, blue] = tf.unstack(image, axis=-1)\n blue_shift = self.factor() * 255\n blue = tf.clip_by_value(blue + blue_shift, 0.0, 255.0)\n return tf.stack([*others, blue], axis=-1)\n", "Now, we can configure the random behavior of ou RandomBlueTint layer.\nWe can give it a range of values to sample from:", "many_elephants = tf.repeat(tf.expand_dims(elephants, axis=0), 9, axis=0)\nlayer = RandomBlueTint(factor=0.5)\naugmented = layer(many_elephants)\ngallery_show(augmented.numpy())", "Each image is augmented differently with a random factor sampled from the range\n(0, 0.5).\nWe can also configure the layer to draw from a normal distribution:", "many_elephants = tf.repeat(tf.expand_dims(elephants, axis=0), 9, axis=0)\nfactor = keras_cv.NormalFactorSampler(\n mean=0.3, stddev=0.1, min_value=0.0, max_value=1.0\n)\nlayer = RandomBlueTint(factor=factor)\naugmented = layer(many_elephants)\ngallery_show(augmented.numpy())", "As you can see, the augmentations now are drawn from a normal distributions.\nThere are various types of FactorSamplers including UniformFactorSampler,\nNormalFactorSampler, and ConstantFactorSampler. You can also implement you own.\nOverridding get_random_transformation()\nNow, suppose that your layer impacts the prediction targets: whether they are bounding\nboxes, classification labels, or regression targets.\nYour layer will need to have information about what augmentations are taken on the image\nwhen augmenting the label.\nLuckily, BaseImageAugmentationLayer was designed with this in mind.\nTo handle this issue, BaseImageAugmentationLayer has an overrideable\nget_random_transformation() method alongside with augment_label(),\naugment_target() and augment_bounding_boxes().\naugment_segmentation_map() and others will be added in the future.\nLet's add this to our layer.", "\nclass RandomBlueTint(keras_cv.layers.BaseImageAugmentationLayer):\n \"\"\"RandomBlueTint randomly applies a blue tint to images.\n\n Args:\n factor: A tuple of two floats, a single float or a\n `keras_cv.FactorSampler`. `factor` controls the extent to which the\n image is blue shifted. `factor=0.0` makes this layer perform a no-op\n operation, while a value of 1.0 uses the degenerated result entirely.\n Values between 0 and 1 result in linear interpolation between the original\n image and a fully blue image.\n Values should be between `0.0` and `1.0`. If a tuple is used, a `factor` is\n sampled between the two values for every image augmented. If a single float\n is used, a value between `0.0` and the passed float is sampled. In order to\n ensure the value is always the same, please pass a tuple with two identical\n floats: `(0.5, 0.5)`.\n \"\"\"\n\n def __init__(self, factor, **kwargs):\n super().__init__(**kwargs)\n self.factor = utils.parse_factor(factor)\n\n def get_random_transformation(self, **kwargs):\n # kwargs holds {\"images\": image, \"labels\": label, etc...}\n return self.factor() * 255\n\n def augment_image(self, image, transformation=None, **kwargs):\n [*others, blue] = tf.unstack(image, axis=-1)\n blue = tf.clip_by_value(blue + transformation, 0.0, 255.0)\n return tf.stack([*others, blue], axis=-1)\n\n def augment_label(self, label, transformation=None, **kwargs):\n # you can use transformation somehow if you want\n\n if transformation > 100:\n # i.e. maybe class 2 corresponds to blue images\n return 2.0\n\n return label\n\n def augment_bounding_boxes(self, bounding_boxes, transformation=None, **kwargs):\n # you can also perform no-op augmentations on label types to support them in\n # your pipeline.\n return bounding_boxes\n", "To make use of these new methods, you will need to feed your inputs in with a\ndictionary maintaining a mapping from images to targets.\nAs of now, KerasCV supports the following label types:\n\nlabels via augment_label().\nbounding_boxes via augment_bounding_boxes().\n\nIn order to use augmention layers alongside your prediction targets, you must package\nyour inputs as follows:", "labels = tf.constant([[1, 0]])\ninputs = {\"images\": elephants, \"labels\": labels}", "Now if we call our layer on the inputs:", "layer = RandomBlueTint(factor=(0.6, 0.6))\naugmented = layer(inputs)\nprint(augmented[\"labels\"])", "Both the inputs and labels are augmented.\nNote how when transformation is > 100 the label is modified to contain 2.0 as\nspecified in the layer above.\nvalue_range support\nImagine you are using your new augmentation layer in many pipelines.\nSome pipelines have values in the range [0, 255], some pipelines have normalized their\n images to the range [-1, 1], and some use a value range of [0, 1].\nIf a user calls your layer with an image in value range [0, 1], the outputs will be\nnonsense!", "layer = RandomBlueTint(factor=(0.1, 0.1))\nelephants_0_1 = elephants / 255\nprint(\"min and max before augmentation:\", elephants_0_1.min(), elephants_0_1.max())\naugmented = layer(elephants_0_1)\nprint(\n \"min and max after augmentation:\",\n (augmented.numpy()).min(),\n augmented.numpy().max(),\n)\nimshow((augmented * 255).numpy().astype(int))", "Note that this is an incredibly weak augmentation!\nFactor is only set to 0.1.\nLet's resolve this issue with KerasCV's value_range API.", "\nclass RandomBlueTint(keras_cv.layers.BaseImageAugmentationLayer):\n \"\"\"RandomBlueTint randomly applies a blue tint to images.\n\n Args:\n value_range: value_range: a tuple or a list of two elements. The first value\n represents the lower bound for values in passed images, the second represents\n the upper bound. Images passed to the layer should have values within\n `value_range`.\n factor: A tuple of two floats, a single float or a\n `keras_cv.FactorSampler`. `factor` controls the extent to which the\n image is blue shifted. `factor=0.0` makes this layer perform a no-op\n operation, while a value of 1.0 uses the degenerated result entirely.\n Values between 0 and 1 result in linear interpolation between the original\n image and a fully blue image.\n Values should be between `0.0` and `1.0`. If a tuple is used, a `factor` is\n sampled between the two values for every image augmented. If a single float\n is used, a value between `0.0` and the passed float is sampled. In order to\n ensure the value is always the same, please pass a tuple with two identical\n floats: `(0.5, 0.5)`.\n \"\"\"\n\n def __init__(self, value_range, factor, **kwargs):\n super().__init__(**kwargs)\n self.value_range = value_range\n self.factor = utils.parse_factor(factor)\n\n def get_random_transformation(self, **kwargs):\n # kwargs holds {\"images\": image, \"labels\": label, etc...}\n return self.factor() * 255\n\n def augment_image(self, image, transformation=None, **kwargs):\n image = utils.transform_value_range(image, self.value_range, (0, 255))\n [*others, blue] = tf.unstack(image, axis=-1)\n blue = tf.clip_by_value(blue + transformation, 0.0, 255.0)\n result = tf.stack([*others, blue], axis=-1)\n result = utils.transform_value_range(result, (0, 255), self.value_range)\n return result\n\n def augment_label(self, label, transformation=None, **kwargs):\n # you can use transformation somehow if you want\n\n if transformation > 100:\n # i.e. maybe class 2 corresponds to blue images\n return 2.0\n\n return label\n\n def augment_bounding_boxes(self, bounding_boxes, transformation=None, **kwargs):\n # you can also perform no-op augmentations on label types to support them in\n # your pipeline.\n return bounding_boxes\n\n\nlayer = RandomBlueTint(value_range=(0, 1), factor=(0.1, 0.1))\nelephants_0_1 = elephants / 255\nprint(\"min and max before augmentation:\", elephants_0_1.min(), elephants_0_1.max())\naugmented = layer(elephants_0_1)\nprint(\n \"min and max after augmentation:\",\n augmented.numpy().min(),\n augmented.numpy().max(),\n)\nimshow((augmented * 255).numpy().astype(int))", "Now our elephants are only slgihtly blue tinted. This is the expected behavior when\nusing a factor of 0.1. Great!\nNow users can configure the layer to support any value range they may need. Note that\nonly layers that interact with color information should use the value range API.\nMany augmentation techniques, such as RandomRotation will not need this.\nAuto vectorization performance\nIf you are wondering:\n\nDoes implementing my augmentations on an sample-wise basis carry performance\n implications?\n\nYou are not alone!\nLuckily, I have performed extensive analysis on the performance of automatic\nvectorization, manual vectorization, and unvectorized implementations.\nIn this benchmark, I implemented a RandomCutout layer using auto vectorization, no auto\nvectorization and manual vectorization.\nAll of these were benchmarked inside of an @tf.function annotation.\nThey were also each benchmarked with the jit_compile argument.\nThe following chart shows the results of this benchmark:\n\nThe primary takeaway should be that the difference between manual vectorization and\nautomatic vectorization is marginal!\nPlease note that Eager mode performance will be drastically different.\nCommon gotchas\nSome layers are not able to be automatically vectorizated.\nAn example of this is GridMask.\nIf you receive an error when invoking your layer, try adding the following to your\nconstructor:", "\nclass UnVectorizable(keras_cv.layers.BaseImageAugmentationLayer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # this disables BaseImageAugmentationLayer's Auto Vectorization\n self.auto_vectorize = False\n", "Conclusion and next steps\nKerasCV offers a standard set of APIs to streamline the process of implementing your\nown data augmentation techniques.\nThese include BaseImageAugmentationLayer, the FactorSampler API and the\nvalue_range API.\nWe used these APIs to implement a highly configurable RandomBlueTint layer.\nThis layer can take inputs as standalone images, a dictionary with keys of \"images\"\nand labels, inputs that are unbatched, or inputs that are batched. Inputs may be in any\nvalue range, and the random distribution used to sample the tint values is end user\nconfigurable.\nAs a follow up exercises you can:\n\nimplement your own data augmentation technique using BaseImageAugmentationLayer\ncontribute an augmentation layer to KerasCV\nread through the existing KerasCV augmentation layers" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
joferkington/scipy2015-3d_printing
Scipy 2015 - 3D Printing with Python.ipynb
mit
[ "Touch your data! 3D Color Printing with Python\n\nJoe Kington, Chevron\n\n<img src=\"images/3d_seismic_together.jpg\" style=\"float: left; width: 30%; margin-left: 4%;\">\n<img src=\"images/3d_seismic_hand.jpg\" style=\"float: left; width: 30%; margin-left: 1%;\">\n<img src=\"images/alaska_model_textured.jpg\" style=\"float: left; width: 30%; margin-left: 1%;\">\nBut First, Puppies!!\n<img src=\"images/darlin_cute.jpg\" width=90%>\n<img src=\"images/darlin_crazy.jpg\" width=100%>\n<img src=\"images/darlin_upside_down.jpg\" width=100%>\nYou can 3D print your dog!\nhttp://www.artylobster.com/testimonials.html\nhttp://www.petprints3d.com/\n<img src=\"images/arty_lobster_snapshot.png\" width=100%>\nSo why can't we 3D print Geology?\n\n\nSome people already are: e.g. GeoFabLab\n\n\nQuick shout out to Brendan Sullivan: This is his idea/question\n\n\nIt needs to be useful, but often comes off as gimmicky\n\n\nHow do we avoid the gimmicks?\n\n\nForm is not enough for geoscience\n\n\nNeed color 3D printing!\n\n\nNeed to find the right niche\n\n\nCommunication Tool, not a Visualization Tool", "%run slice_3d_example.py", "Communication Tool, not a Visualization Tool\n\n\nEach individual drives the interaction\n\n\nPhysical is the original interactive\n\n\nOur Niche: Communication with non-expert audience\n\n\nGreat, but what about python?\n\n\nMayavi / mlab / tvtk to the rescue!\n\nVTK is fantastic for visualizing 3D datasets\nMayavi/mlab/tvtk are more pythonic\n\n\n\nVRML is the de-facto standard for color 3D printing\n\nfig.scene.save_vrml(filename)\n\n\n\nBig Caveat: You need to verify printability!!", "# %load mayavi_logo.py\n\"\"\"\nA script to generate the Mayavi logo: a Boy surface.\n\nThe boy surface is a mathematical parametric surface, see\nhttp://en.wikipedia.org/wiki/Boy%27s_surface . We display it by sampling\nthe two parameters of the surface on a grid and using the mlab's mesh\nfunction: :func:`mayavi.mlab.mesh`.\n\"\"\"\n\n# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>\n# Copyright (c) 2007, Enthought, Inc.\n# License: BSD Style.\n\n\nfrom numpy import sin, cos, mgrid, pi, sqrt\nfrom mayavi import mlab\n\nimport utils\n\nfig = mlab.figure(fgcolor=(0, 0, 0), bgcolor=(1, 1, 1))\nu, v = mgrid[- 0.035:pi:0.01, - 0.035:pi:0.01]\n\nX = 2 / 3. * (cos(u) * cos(2 * v)\n + sqrt(2) * sin(u) * cos(v)) * cos(u) / (sqrt(2) -\n sin(2 * u) * sin(3 * v))\nY = 2 / 3. * (cos(u) * sin(2 * v) -\n sqrt(2) * sin(u) * sin(v)) * cos(u) / (sqrt(2)\n - sin(2 * u) * sin(3 * v))\nZ = -sqrt(2) * cos(u) * cos(u) / (sqrt(2) - sin(2 * u) * sin(3 * v))\nS = sin(u)\n\nmlab.mesh(X, Y, Z, scalars=S, colormap='YlGnBu', )\n\n# Nice view from the front\nmlab.view(.0, - 5.0, 4)\n\nutils.present(fig)\n", "The building blocks for a topographic model", "import numpy as np\nfrom mayavi import mlab\nfrom osgeo import gdal\ngdal.UseExceptions()\n\nimport utils\n\nz = gdal.Open('data/alaska/clipped_elev.tif').ReadAsArray()\n\nfig = mlab.figure()\nmlab.surf(z, warp_scale=0.05, colormap='gist_earth')\nutils.present(fig) # Normally, we'd call mlab.show()", "Let's worry about true vertical exaggeration", "def read(filename):\n ds = gdal.Open(filename)\n elev = ds.ReadAsArray()\n\n # True x, y coordinates\n x0, dx, dxdy, y0, dydx, dy = ds.GetGeoTransform()\n i, j = np.mgrid[:elev.shape[0], :elev.shape[1]]\n x = x0 + dx * j + dxdy * i\n y = y0 + dy * i + dydx * j\n\n return ds.ReadAsArray(), x, y\n\nz, x, y = read('data/alaska/clipped_elev.tif')\n\nfig = mlab.figure()\nmlab.mesh(x, y, z, colormap='gist_earth')\nutils.present(fig)", "Now, let's make it a printable model", "z, x, y = read('data/alaska/clipped_elev.tif')\n\nxpad, ypad, bottomz = [np.pad(item, 1, mode='edge') for item in x, y, z]\nbottomz[1:-1, 1:-1] = -1000\n\nfig = mlab.figure()\nmlab.mesh(x, y, z, colormap='gist_earth')\n\n# Add the bottom\nmlab.mesh(xpad, ypad, bottomz, color=(1,1,1)) # Inefficient, but okay for now...\n\nutils.present(fig)", "But what about scale??", "fig = mlab.figure()\nmlab.mesh(x - x.min(), y - y.min(), z, colormap='gist_earth')\nmlab.axes()\nutils.present(fig)", "VTK let's us \"shrink\" things", "import mayavi.tools\n\ndef scale(fig, ratio):\n \"\"\"Scales a Mayavi figure and resets the camera.\"\"\"\n for actor in fig.scene.renderer.actors:\n actor.scale = actor.scale * ratio\n mayavi.tools.camera.view(distance='auto', focalpoint='auto', figure=fig)\n \nfig = mlab.figure()\nmesh = mlab.mesh(x - x.min(), y - y.min(), z, colormap='gist_earth')\nmlab.mesh(xpad, ypad, bottomz, color=(1,1,1))\n\nscale(fig, 0.0001)\nmlab.axes()\nutils.present(fig)", "Or expand them", "fig = mlab.figure()\nmesh = mlab.mesh(x, y, z, colormap='gist_earth')\nmlab.mesh(xpad, ypad, bottomz, color=(1,1,1))\n\nutils.scale(fig, 0.0001)\nutils.scale(fig, [1, 1, 2.5]) # Apply vertical exaggeration\nutils.present(fig)", "But what we really want to do is \"drape\" data on a surface...\n<img src=\"images/geology.png\" width=90%>\nIn 3D modeling terms, this is a \"texture\"", "from tvtk.api import tvtk\n\ndef texture(mesh, fname, clamp=True):\n img = tvtk.PNGReader(file_name=fname).output\n t = tvtk.Texture(input=img, interpolate=True, edge_clamp=clamp)\n \n mesh.actor.enable_texture = True\n mesh.actor.actor.texture = t\n mesh.actor.tcoord_generator_mode = 'plane'\n mesh.actor.mapper.scalar_visibility = False\n \nfig = mlab.figure()\nmlab.mesh(xpad, ypad, bottomz, color=(1,1,1))\n\nmesh = mlab.mesh(x, y, z)\ntexture(mesh, \"images/geology.png\")\n\nutils.scale(fig, 0.0001 * np.array([1, 1, 2.5]))\nutils.present(fig)\n", "<img src=\"images/alaska_model_no_texture.jpg\" width=100%>\nOne caveat with Shapeways\n\n\nShapeways (and some other 3D printing services) doesn't like \"embedded\" textures in VRML\n\n\nSolution - un-embed the textures", "# %load shapeways_io.py\nimport os\nimport binascii\nimport tempfile\nfrom zipfile import ZipFile, ZIP_DEFLATED\nfrom cStringIO import StringIO\n\nimport numpy as np\nimport Image\n\ndef save_vrml(fig, output_filename):\n \"\"\"\n Saves a Mayavi figure as shapeways-formatted VRML in a zip file.\n\n Parameters\n ----------\n fig : a Mayavi/mlab figure\n output_filename : string\n \"\"\"\n _, fname = tempfile.mkstemp()\n fig.scene.save_vrml(fname)\n\n wrl_name = os.path.basename(output_filename).rstrip('.zip')\n vrml2shapeways(fname, output_filename, wrl_name)\n\n os.remove(fname)\n\ndef vrml2shapeways(filename, output_filename, wrl_name=None):\n \"\"\"\n Un-embededs images from a vrml file and creates a zip archive with the\n images saved as .png's and the vrml file with links to the images.\n\n Parameters\n ----------\n filename : string\n The name of the input VRML file\n output_filename : string\n The filename of the zip archive that will be created.\n wrl_name : string or None (optional)\n The name of the VRML file in the zip archive. If None, this will be\n taken from *filename*.\n \"\"\"\n if not output_filename.endswith('.zip'):\n output_filename += '.zip'\n\n with ZipFile(output_filename, 'w', ZIP_DEFLATED) as z:\n if wrl_name is None:\n wrl_name = os.path.basename(filename)\n if not wrl_name.endswith('.wrl'):\n wrl_name += '.wrl'\n\n outfile = StringIO()\n with open(filename, 'r') as infile:\n images = unembed_wrl_images(infile, outfile)\n z.writestr(wrl_name, outfile.getvalue())\n\n for fname, im in images.iteritems():\n outfile = StringIO()\n im.save(outfile, format='png')\n z.writestr(fname, outfile.getvalue())\n\ndef unembed_wrl_images(infile, outfile):\n \"\"\"\n Converts embedded images in a VRML file to linked .png's.\n\n Parameters\n ----------\n infile : file-like object\n outfile: file-like object\n\n Returns\n -------\n images : a dict of filename : PIL Image pairs\n\n Notes:\n -----\n Should use a proper parser instead of just iterating line-by-line...\n \"\"\"\n i = 1\n images = {}\n for line in infile:\n if 'texture' in line:\n data, width, height = read_texture_wrl(infile)\n image_filename = 'texture_{}.png'.format(i)\n im = ascii2image_wrl(data, width, height)\n line = ' texture ImageTexture {{ url [\"{}\"]}}'\n line = line.format(image_filename)\n images[image_filename] = im\n i += 1\n outfile.write(line)\n return images\n\ndef read_texture_wrl(infile):\n \"\"\"\n Reads hexlified image data from the current position in a VRML file.\n \"\"\"\n header = next(infile).strip().split()\n width, height, nbands = map(int, header[1:])\n\n data = []\n for line in infile:\n line = line.strip().split()\n for item in line:\n if item.startswith('0x'):\n data.append(item)\n else:\n return data, width, height\n\ndef ascii2image_wrl(data, width, height):\n \"\"\"\n Converts hexlified data in VRML to a PIL image.\n \"\"\"\n if len(data[0]) == 8:\n nbands = 3\n elif len(data[0]) == 10:\n nbands = 4\n else:\n raise ValueError('Unrecognized data type for image data')\n\n results = []\n for item in data:\n results.append(binascii.unhexlify(item[2:]))\n data = results\n data = ''.join(data)\n dat = np.fromstring(data, dtype=np.uint8).reshape(height, width, nbands)\n dat = np.roll(dat, nbands, -1)\n dat = np.flipud(dat)\n im = Image.fromarray(dat)\n return im\n", "Let's add some nice textures on the sides...\nFirst, let's build the sides individually", "def build_sides(x, y, z, level):\n slices = [np.s_[:,0], np.s_[:,-1], np.s_[0,:], np.s_[-1,:]]\n for sl in slices:\n build_side(x[sl], y[sl], z[sl], level)\n\ndef build_side(x, y, z, base_level):\n x = np.vstack([x, x])\n y = np.vstack([y, y])\n z = np.vstack([z, base_level * np.ones_like(z)])\n\n mesh = mlab.mesh(x, y, z, color=(1, 1, 1))\n return mesh\n\ndef build_bottom(x, y, z, level):\n i = [-1, -1, 0, 0]\n j = [0, -1, 0, -1]\n corners = lambda item: item[i, j].reshape(2, 2)\n mlab.mesh(corners(x), corners(y), level * np.ones((2,2)), color=(1, 1, 1))", "Then we can texture them individually", "def build_bottom(x, y, z, level):\n i = [-1, -1, 0, 0]\n j = [0, -1, 0, -1]\n corners = lambda item: item[i, j].reshape(2, 2)\n bottom = mlab.mesh(corners(x), corners(y), level * np.ones((2,2)))\n\n utils.texture(bottom, fname='data/alaska/bottom_annotated.png', clamp=False)\n return bottom\n\ndef build_sides(x, y, z, level):\n images = ['left_annotated_halo.png', 'right_annotated_halo.png',\n 'back_annotated_halo.png', 'front_annotated_halo.png']\n\n slices = [np.s_[:,0], np.s_[:,-1], np.s_[0,:], np.s_[-1,:]]\n for sl, im in zip(slices, images):\n image = 'data/alaska/' + im\n build_side(x[sl], y[sl], z[sl], level, image)\n \ndef build_side(x, y, z, level, fname):\n x = np.vstack([x, x])\n y = np.vstack([y, y])\n ze = np.vstack([z, level * np.ones_like(z)])\n\n mesh = mlab.mesh(x, y, z)\n utils.texture(mesh, fname=image)\n \n%run alaska_model_textured_sides.py", "<img src=\"images/alaska_model_textured.jpg\" width=100%>\nWe can extend the same idea to seismic data", "%run slice_3d_example.py", "Now we'll do the same thing as the topographic model, but texture the sides with seismic data", "%run make_base.py", "<img src=\"images/base.jpg\" width=100%>\nWe can even use another horizon as the \"base\" for the sides", "%run make_top.py", "<img src=\"images/3d_seismic_apart.jpg\" width=40%>\nConclusions\n\n3D printing can be a useful geoscience communication technique.\nColor or B&W is vital for geoscience! More than just form.\nGenerating printable models from real datasets is easy with scipy\n3D printed 3D sesimic is nifty!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
msadegh97/machine-learning-course
03-classification.ipynb
gpl-3.0
[ "Classification", "import pandas as pd\nfrom matplotlib import pyplot as plt\n%matplotlib inline\n\nfrom sklearn.datasets import load_breast_cancer\ncancer = load_breast_cancer()\n\ncancer['target_names']\n\ncancer['feature_names']\n\nX = cancer['data'][:, 0]\nX = X.reshape((X.shape[0], 1))\ny = cancer['target']\ny = 1 - y.reshape((y.shape[0], 1))\n\nplt.scatter(X, y)\n\nfrom sklearn.linear_model import LinearRegression\nmodel = LinearRegression().fit(X, y)\n\nplt.scatter(X, y);\nplt.plot(X, model.predict(X));", "Logistic Regression\nHypothesis\n$$ h_\\theta(x) = g(\\theta^Tx) = \\frac{1}{1 + e^{-\\theta^Tx}} $$\nwhere $ g(z) = \\frac{1}{1 + e^{-z}} $ is the sigmoid function:", "from sklearn.linear_model import LogisticRegression\nmodel = LogisticRegression().fit(X, y.ravel())\n\nplt.scatter(X, y, c=model.predict(X), cmap=plt.cm.rainbow, marker='.');\nplt.scatter(X, model.predict_proba(X)[:, 1], marker='.');\nplt.axhline(.5);\nplt.axvline(-model.intercept_ / model.coef_);", "Cost Function\n$$ J_\\theta(X) = -\\frac{1}{m}\\sum_{i=1}^m y^{(i)} \\log(h_\\theta(x^{(i)})) + (1 - y^{(i)}) \\log(1 - h_\\theta(x^{(i)})) $$\nGradient Descent\n$\nRepeat \\ { \\\n \\theta_j := \\theta_j - \\alpha \\frac{\\partial}{\\partial \\theta_j} J_\\theta(X) \\\n}$\nwhere:\n$ \\begin{equation}\n \\begin{split}\n \\frac{\\partial J_\\theta(X)}{\\partial \\theta_j}\n &= \\frac{\\partial}{\\partial \\theta_j} \\frac{-1}{m}\\sum_{i=1}^m y^{(i)} \\log(h_\\theta(x^{(i)})) + (1 - y^{(i)}) \\log(1 - h_\\theta(x^{(i)})) \\\n &= -\\frac{1}{m}\\sum_{i=1}^m (\\frac{y^{(i)}}{h_\\theta(x^{(i)})} - \\frac{1 - y^{(i)}}{1 - h_\\theta(x^{(i)})}) \\frac{\\partial}{\\partial \\theta_j} h_\\theta(x^{(i)}) \\\n &= -\\frac{1}{m}\\sum_{i=1}^m (\\frac{y^{(i)}}{g(\\theta^Tx)} - \\frac{1 - y^{(i)}}{1 - g(\\theta^Tx)}) g(\\theta^Tx) (1 - g(\\theta^Tx)) \\frac{\\partial}{\\partial \\theta_j} (\\theta^Tx^{(i)}) \\\n &= -\\frac{1}{m}\\sum_{i=1}^m (y^{(i)} (1 - g(\\theta^Tx)) - (1 - y^{(i)}) g(\\theta^Tx))x_j^{(i)} \\\n &= \\frac{1}{m}\\sum_{i=1}^m (h_{w,b}(x^{(i)}) - y^{(i)})x_j^{(i)}\n \\end{split}\n \\end{equation} $\nand:\n$ \\begin{equation}\n \\begin{split}\n g'(z) &= \\frac{d}{dz} \\frac{1}{1 + e^{-z}} \\\n &= \\frac{1}{(1 + e^{-z})^2}(e^{-z}) \\\n &= \\frac{1}{1 + e^{-z}}.(1 - \\frac{1}{1 + e^{-z}}) \\\n &= g(z)(1 - g(z))\n \\end{split}\n \\end{equation} $\n\nMulticlass\n<img style=\"float: left;\" width=\"450\" src=\"images/logistic-regression/multinomial.png\">\n<img style=\"float: left;\" width=\"450\" src=\"images/logistic-regression/ovr.png\">" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
Open-Power-System-Data/time_series
processing.ipynb
mit
[ "<div style=\"width:100%; background-color: #D9EDF7; border: 1px solid #CFCFCF; text-align: left; padding: 10px;\">\n <b>Time series: Processing Notebook</b>\n <ul>\n <li><a href=\"main.ipynb\">Main Notebook</a></li>\n <li>Processing Notebook</li>\n </ul>\n <br>This Notebook is part of the <a href=\"http://data.open-power-system-data.org/time_series\">Time series Data Package</a> of <a href=\"http://open-power-system-data.org\">Open Power System Data</a>.\n</div>\n\n<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Introductory-Notes\" data-toc-modified-id=\"Introductory-Notes-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Introductory Notes</a></span></li><li><span><a href=\"#Settings\" data-toc-modified-id=\"Settings-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Settings</a></span><ul class=\"toc-item\"><li><span><a href=\"#Set-version-number-and-recent-changes\" data-toc-modified-id=\"Set-version-number-and-recent-changes-2.1\"><span class=\"toc-item-num\">2.1&nbsp;&nbsp;</span>Set version number and recent changes</a></span></li><li><span><a href=\"#Import-Python-libraries\" data-toc-modified-id=\"Import-Python-libraries-2.2\"><span class=\"toc-item-num\">2.2&nbsp;&nbsp;</span>Import Python libraries</a></span></li><li><span><a href=\"#Display-options\" data-toc-modified-id=\"Display-options-2.3\"><span class=\"toc-item-num\">2.3&nbsp;&nbsp;</span>Display options</a></span></li><li><span><a href=\"#Set-directories\" data-toc-modified-id=\"Set-directories-2.4\"><span class=\"toc-item-num\">2.4&nbsp;&nbsp;</span>Set directories</a></span></li><li><span><a href=\"#Chromedriver\" data-toc-modified-id=\"Chromedriver-2.5\"><span class=\"toc-item-num\">2.5&nbsp;&nbsp;</span>Chromedriver</a></span></li><li><span><a href=\"#Set-up-a-log\" data-toc-modified-id=\"Set-up-a-log-2.6\"><span class=\"toc-item-num\">2.6&nbsp;&nbsp;</span>Set up a log</a></span></li><li><span><a href=\"#Select-timerange\" data-toc-modified-id=\"Select-timerange-2.7\"><span class=\"toc-item-num\">2.7&nbsp;&nbsp;</span>Select timerange</a></span></li><li><span><a href=\"#Select-download-source\" data-toc-modified-id=\"Select-download-source-2.8\"><span class=\"toc-item-num\">2.8&nbsp;&nbsp;</span>Select download source</a></span></li><li><span><a href=\"#Select-subset\" data-toc-modified-id=\"Select-subset-2.9\"><span class=\"toc-item-num\">2.9&nbsp;&nbsp;</span>Select subset</a></span></li></ul></li><li><span><a href=\"#Download\" data-toc-modified-id=\"Download-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Download</a></span><ul class=\"toc-item\"><li><span><a href=\"#Automatic-download-(for-most-sources)\" data-toc-modified-id=\"Automatic-download-(for-most-sources)-3.1\"><span class=\"toc-item-num\">3.1&nbsp;&nbsp;</span>Automatic download (for most sources)</a></span></li><li><span><a href=\"#Manual-download\" data-toc-modified-id=\"Manual-download-3.2\"><span class=\"toc-item-num\">3.2&nbsp;&nbsp;</span>Manual download</a></span><ul class=\"toc-item\"><li><span><a href=\"#Energinet.dk\" data-toc-modified-id=\"Energinet.dk-3.2.1\"><span class=\"toc-item-num\">3.2.1&nbsp;&nbsp;</span>Energinet.dk</a></span></li><li><span><a href=\"#CEPS\" data-toc-modified-id=\"CEPS-3.2.2\"><span class=\"toc-item-num\">3.2.2&nbsp;&nbsp;</span>CEPS</a></span></li><li><span><a href=\"#ENTSO-E-Power-Statistics\" data-toc-modified-id=\"ENTSO-E-Power-Statistics-3.2.3\"><span class=\"toc-item-num\">3.2.3&nbsp;&nbsp;</span>ENTSO-E Power Statistics</a></span></li></ul></li></ul></li><li><span><a href=\"#Read\" data-toc-modified-id=\"Read-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Read</a></span><ul class=\"toc-item\"><li><span><a href=\"#Preparations\" data-toc-modified-id=\"Preparations-4.1\"><span class=\"toc-item-num\">4.1&nbsp;&nbsp;</span>Preparations</a></span></li><li><span><a href=\"#Reading-loop\" data-toc-modified-id=\"Reading-loop-4.2\"><span class=\"toc-item-num\">4.2&nbsp;&nbsp;</span>Reading loop</a></span></li><li><span><a href=\"#Save-raw-data\" data-toc-modified-id=\"Save-raw-data-4.3\"><span class=\"toc-item-num\">4.3&nbsp;&nbsp;</span>Save raw data</a></span></li></ul></li><li><span><a href=\"#Processing\" data-toc-modified-id=\"Processing-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Processing</a></span><ul class=\"toc-item\"><li><span><a href=\"#Missing-data-handling\" data-toc-modified-id=\"Missing-data-handling-5.1\"><span class=\"toc-item-num\">5.1&nbsp;&nbsp;</span>Missing data handling</a></span><ul class=\"toc-item\"><li><span><a href=\"#Interpolation\" data-toc-modified-id=\"Interpolation-5.1.1\"><span class=\"toc-item-num\">5.1.1&nbsp;&nbsp;</span>Interpolation</a></span></li></ul></li><li><span><a href=\"#Aggregate-wind-offshore-+-onshore\" data-toc-modified-id=\"Aggregate-wind-offshore-+-onshore-5.2\"><span class=\"toc-item-num\">5.2&nbsp;&nbsp;</span>Aggregate wind offshore + onshore</a></span></li><li><span><a href=\"#Country-specific-calculations---not-used-in-this-release\" data-toc-modified-id=\"Country-specific-calculations---not-used-in-this-release-5.3\"><span class=\"toc-item-num\">5.3&nbsp;&nbsp;</span>Country specific calculations - not used in this release</a></span><ul class=\"toc-item\"><li><span><a href=\"#Germany\" data-toc-modified-id=\"Germany-5.3.1\"><span class=\"toc-item-num\">5.3.1&nbsp;&nbsp;</span>Germany</a></span><ul class=\"toc-item\"><li><span><a href=\"#Aggregate-German-data-from-individual-TSOs\" data-toc-modified-id=\"Aggregate-German-data-from-individual-TSOs-5.3.1.1\"><span class=\"toc-item-num\">5.3.1.1&nbsp;&nbsp;</span>Aggregate German data from individual TSOs</a></span></li></ul></li><li><span><a href=\"#Italy\" data-toc-modified-id=\"Italy-5.3.2\"><span class=\"toc-item-num\">5.3.2&nbsp;&nbsp;</span>Italy</a></span></li><li><span><a href=\"#Great-Britain-/-United-Kingdom\" data-toc-modified-id=\"Great-Britain-/-United-Kingdom-5.3.3\"><span class=\"toc-item-num\">5.3.3&nbsp;&nbsp;</span>Great Britain / United Kingdom</a></span></li></ul></li><li><span><a href=\"#Calculate-availabilities/profiles\" data-toc-modified-id=\"Calculate-availabilities/profiles-5.4\"><span class=\"toc-item-num\">5.4&nbsp;&nbsp;</span>Calculate availabilities/profiles</a></span></li><li><span><a href=\"#Resample-higher-frequencies-to-60'\" data-toc-modified-id=\"Resample-higher-frequencies-to-60'-5.5\"><span class=\"toc-item-num\">5.5&nbsp;&nbsp;</span>Resample higher frequencies to 60'</a></span></li><li><span><a href=\"#Fill-columns-not-retrieved-directly-from-TSO-webites-with--ENTSO-E-Transparency-data\" data-toc-modified-id=\"Fill-columns-not-retrieved-directly-from-TSO-webites-with--ENTSO-E-Transparency-data-5.6\"><span class=\"toc-item-num\">5.6&nbsp;&nbsp;</span>Fill columns not retrieved directly from TSO webites with ENTSO-E Transparency data</a></span></li><li><span><a href=\"#Insert-a-column-with-Central-European-(Summer-)time\" data-toc-modified-id=\"Insert-a-column-with-Central-European-(Summer-)time-5.7\"><span class=\"toc-item-num\">5.7&nbsp;&nbsp;</span>Insert a column with Central European (Summer-)time</a></span></li></ul></li><li><span><a href=\"#Create-a-final-savepoint\" data-toc-modified-id=\"Create-a-final-savepoint-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Create a final savepoint</a></span></li><li><span><a href=\"#Write-data-to-disk\" data-toc-modified-id=\"Write-data-to-disk-7\"><span class=\"toc-item-num\">7&nbsp;&nbsp;</span>Write data to disk</a></span><ul class=\"toc-item\"><li><span><a href=\"#Limit-time-range\" data-toc-modified-id=\"Limit-time-range-7.1\"><span class=\"toc-item-num\">7.1&nbsp;&nbsp;</span>Limit time range</a></span></li><li><span><a href=\"#Different-shapes\" data-toc-modified-id=\"Different-shapes-7.2\"><span class=\"toc-item-num\">7.2&nbsp;&nbsp;</span>Different shapes</a></span></li><li><span><a href=\"#Write-to-SQLite-database\" data-toc-modified-id=\"Write-to-SQLite-database-7.3\"><span class=\"toc-item-num\">7.3&nbsp;&nbsp;</span>Write to SQLite-database</a></span></li><li><span><a href=\"#Write-to-Excel\" data-toc-modified-id=\"Write-to-Excel-7.4\"><span class=\"toc-item-num\">7.4&nbsp;&nbsp;</span>Write to Excel</a></span></li><li><span><a href=\"#Write-to-CSV\" data-toc-modified-id=\"Write-to-CSV-7.5\"><span class=\"toc-item-num\">7.5&nbsp;&nbsp;</span>Write to CSV</a></span></li><li><span><a href=\"#Create-metadata\" data-toc-modified-id=\"Create-metadata-7.6\"><span class=\"toc-item-num\">7.6&nbsp;&nbsp;</span>Create metadata</a></span></li><li><span><a href=\"#Write-checksums.txt\" data-toc-modified-id=\"Write-checksums.txt-7.7\"><span class=\"toc-item-num\">7.7&nbsp;&nbsp;</span>Write checksums.txt</a></span></li></ul></li></ul></div>\n\nIntroductory Notes\nThis Notebook handles missing data, performs calculations and aggragations and creates the output files.\nSettings\nThis section performs some preparatory steps.\nSet version number and recent changes\nExecuting this script till the end will create a new version of the data package.\nThe Version number specifies the local directory for the data <br>\nWe include a note on what has been changed.", "version = '2020-10-06'\nchanges = '''Yearly update'''", "Import Python libraries", "# Python modules\nfrom datetime import datetime, date, timedelta, time\nimport pandas as pd\nimport numpy as np\nimport logging\nimport json\nimport sqlite3\nimport yaml\nimport itertools\nimport os\nimport pytz\nfrom shutil import copyfile\nimport pickle\n\n# Skripts from time-series repository\nfrom timeseries_scripts.read import read\nfrom timeseries_scripts.download import download\nfrom timeseries_scripts.imputation import find_nan, mark_own_calc\nfrom timeseries_scripts.make_json import make_json, get_sha_hash\n\n# Reload modules with execution of any code, to avoid having to restart\n# the kernel after editing timeseries_scripts\n%load_ext autoreload\n%autoreload 2\n\n# speed up tab completion in Jupyter Notebook\n%config Completer.use_jedi = False", "Display options", "# Allow pretty-display of multiple variables\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n# Adjust the way pandas DataFrames a re displayed to fit more columns\npd.reset_option('display.max_colwidth')\npd.options.display.max_columns = 60\n# pd.options.display.max_colwidth=5", "Set directories", "# make sure the working directory is this file's directory\ntry:\n os.chdir(home_path)\nexcept NameError:\n home_path = os.path.realpath('.')\n\n# optionally, set a different directory to store outputs and raw data,\n# which will take up around 15 GB of disk space\n#Milos: save_path is None <=> use_external_dir == False\nuse_external_dir = True\nif use_external_dir:\n save_path = os.path.join('C:', os.sep, 'OPSD_time_series_data')\nelse:\n save_path = home_path\n\ninput_path = os.path.join(home_path, 'input')\nsources_yaml_path = os.path.join(home_path, 'input', 'sources.yml')\nareas_csv_path = os.path.join(home_path, 'input', 'areas.csv')\ndata_path = os.path.join(save_path, version, 'original_data')\nout_path = os.path.join(save_path, version) \ntemp_path = os.path.join(save_path, 'temp')\nparsed_path = os.path.join(save_path, 'parsed')\nchromedriver_path = os.path.join(home_path, 'chromedriver', 'chromedriver')\nfor path in [data_path, out_path, temp_path, parsed_path]:\n os.makedirs(path, exist_ok=True)\n\n# change to temp directory\nos.chdir(temp_path)\nos.getcwd()", "Chromedriver\nIf you want to download from sources which require scraping, download the appropriate version of Chromedriver for your platform, name it chromedriver, create folder chromedriver in the working directory, and move the driver to it. It is used by Selenium to scrape the links from web pages.\nThe current list of sources which require scraping (as of December 2018):\n - Terna\n - Note that the package contains a database of Terna links up to 20 December 2018. Bu default, the links are first looked up for in this database, so if the end date of your query is not after 20 December 2018, you won't need Selenium. In the case that you need later dates, you have two options. If you set the variable extract_new_terna_urls to True, then Selenium will be used to download the files for those later dates. If you set extract_new_terna_urls to False (which is the default value), only the recorded links will be consulted and Selenium will not be used.\n - Note: Make sure that the database file, recorded_terna_urls.csv, is located in the working directory.", "# Deciding whether to use the provided database of Terna links\nextract_new_terna_urls = False\n\n# Saving the choice\nf = open(\"extract_new_terna_urls.pickle\", \"wb\")\npickle.dump(extract_new_terna_urls, f)\nf.close()", "Set up a log", "# Configure the display of logs in the notebook and attach it to the root logger\nlogstream = logging.StreamHandler()\nlogstream.setLevel(logging.INFO) #threshold for log messages displayed in here\nlogging.basicConfig(level=logging.INFO, handlers=[logstream])\n\n# Set up an additional logger for debug messages from the scripts\nscript_logger = logging.getLogger('timeseries_scripts')\nscript_logger.setLevel(logging.DEBUG)\nformatter = logging.Formatter(fmt='%(asctime)s %(name)s %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',)\n# Set up a logger for logs from the notebook\nlogger = logging.getLogger('notebook')\n\n# Set up a logfile and attach it to both loggers\nlogfile = logging.handlers.TimedRotatingFileHandler(os.path.join(temp_path, 'logfile.log'), when='midnight')\nlogfile.setFormatter(formatter)\nlogfile.setLevel(logging.DEBUG) #threshold for log messages in logfile\nscript_logger.addHandler(logfile)\nlogger.addHandler(logfile)", "Execute for more detailed logging message (May slow down computation).", "logstream.setLevel(logging.DEBUG)", "Select timerange\nThis section: select the time range and the data sources for download and read. Default: all data sources implemented, full time range available.\nSource parameters are specified in input/sources.yml, which describes, for each source, the datasets (such as wind and solar generation) alongside all the parameters necessary to execute the downloads.\nThe option to perform downloading and reading of subsets is for testing only. To be able to run the script succesfully until the end, all sources have to be included, or otherwise the script will run into errors (i.e. the step where aggregate German timeseries are caculated requires data from all four German TSOs to be loaded).\nIn order to do this, specify the beginning and end of the interval for which to attempt the download.\nType None to download all available data.", "start_from_user = date(2015, 1, 1)\nend_from_user = date(2020, 9, 30)", "Select download source\nInstead of downloading from the sources, the complete raw data can be downloaded as a zip file from the OPSD Server. Advantages are:\n- much faster download\n- back up of raw data in case it is deleted from the server at the original source\nIn order to do this, specify an archive version to use the raw data from that version that has been cached on the OPSD server as input. All data from that version will be downloaded - timerange and subset will be ignored.\nType None to download directly from the original sources.", "archive_version = None # i.e. '2016-07-14'", "Select subset\nRead in the configuration file which contains all the required infos for the download.", "with open(sources_yaml_path, 'r', encoding='UTF-8') as f:\n sources = yaml.full_load (f.read())", "The next cell prints the available sources and datasets.<br>\nCopy from its output and paste to following cell to get the right format.<br>", "for k, v in sources.items():\n print(yaml.dump({k: list(v.keys())}, default_flow_style=False))", "Optionally, specify a subset to download/read.<br>\nType subset = None to include all data.", "subset = yaml.full_load('''\nENTSO-E Transparency FTP:\n- Actual Generation per Production Type\n- Actual Total Load\n- Day-ahead Total Load Forecast\n- Day-ahead Prices\nOPSD:\n- capacity\n''')\nexclude=None", "Now eliminate sources and datasets not in subset.", "with open(sources_yaml_path, 'r', encoding='UTF-8') as f:\n sources = yaml.full_load(f.read())\nif subset: # eliminate sources and datasets not in subset\n sources = {source_name: \n {k: v for k, v in sources[source_name].items()\n if k in dataset_list}\n for source_name, dataset_list in subset.items()}\nif exclude: # eliminate sources and variables in exclude\n sources = {source_name: dataset_dict\n for source_name, dataset_dict in sources.items()\n if not source_name in exclude}\n\n# Printing the selected sources (all of them or just a subset)\nprint(\"Selected sources: \")\nfor k, v in sources.items():\n print(yaml.dump({k: list(v.keys())}, default_flow_style=False))", "Download\nThis section: download data. Takes about 1 hour to run for the complete data set (subset=None).\nFirst, a data directory is created on your local computer. Then, download parameters for each data source are defined, including the URL. These parameters are then turned into a YAML-string. Finally, the download is executed file by file.\nEach file is saved under it's original filename. Note that the original file names are often not self-explanatory (called \"data\" or \"January\"). The files content is revealed by its place in the directory structure.\nSome sources (currently only ENTSO-E Transparency) require an account to allow downloading. For ENTSO-E Transparency, set up an account here.", "auth = yaml.full_load('''\nENTSO-E Transparency FTP:\n username: your email\n password: your password\nElexon:\n username: your email\n password: your password\n''')", "Automatic download (for most sources)", "download(sources, data_path, input_path, chromedriver_path, auth,\n archive_version=None,\n start_from_user=start_from_user,\n end_from_user=end_from_user,\n testmode=False)", "Manual download\nEnerginet.dk\nGo to http://osp.energinet.dk/_layouts/Markedsdata/framework/integrations/markedsdatatemplate.aspx.\nCheck The Boxes as specified below:\n- Periode\n - Hent udtræk fra perioden: 01-01-2005 Til: 01-01-2019\n - Select all months\n- Datakolonner\n - Elspot Pris, Valutakode/MWh: Select all\n - Produktion og forbrug, MWh/h: Select all\n- Udtræksformat\n - Valutakode: EUR\n - Decimalformat: Engelsk talformat (punktum som decimaltegn\n - Datoformat: Andet datoformat (ÅÅÅÅ-MM-DD)\n - Hent Udtræk: Til Excel\nClick Hent Udtræk\nYou will receive a file Markedsata.xls of about 50 MB. Open the file in Excel. There will be a warning from Excel saying that file extension and content are in conflict. Select \"open anyways\" and and save the file as .xlsx.\nIn order to be found by the read-function, place the downloaded file in the following subdirectory:\n{{data_path}}{{os.sep}}Energinet.dk{{os.sep}}prices_wind_solar{{os.sep}}2005-01-01_2019-01-01\nCEPS\nGo to http://www.ceps.cz/en/all-data#GenerationRES\ncheck boxes as specified below:\nDISPLAY DATA FOR: Generation RES\nTURN ON FILTER checked\nFILTER SETTINGS: \n- Set the date range\n - interval\n - from: 2012 to: 2019\n- Agregation and data version\n - Aggregation: Hour\n - Agregation function: average (AVG)\n - Data version: real data\n- Filter\n - Type of power plant: ALL\n- Click USE FILTER\n- DOWNLOAD DATA: DATA V TXT\nYou will receive a file data.txt of about 1.5 MB.\nIn order to be found by the read-function, place the downloaded file in the following subdirectory:\n{{data_path}}{{os.sep}}CEPS{{os.sep}}wind_pv{{os.sep}}2012-01-01_2019-01-01\nENTSO-E Power Statistics\nGo to https://www.entsoe.eu/data/statistics/Pages/monthly_hourly_load.aspx\ncheck boxes as specified below:\n\nDate From: 01-01-2016 Date To: 28-02-2019\nCountry: (Select All)\nScale values to 100% using coverage ratio: YES\nView Report\nClick the Save symbol and select Excel\n\nYou will receive a file MHLV.xlsx of about 8 MB.\nIn order to be found by the read-function, place the downloaded file in the following subdirectory:\n{{os.sep}}original_data{{os.sep}}ENTSO-E Power Statistics{{os.sep}}load{{os.sep}}2016-01-01_2016-04-30\nThe data covers the period from 01-01-2016 up to the present, but 4 months of data seems to be the maximum that interface supports for a single download request, so you have to repeat the download procedure for 4-Month periods to cover the whole period until the present.\nRead\nThis section: Read each downloaded file into a pandas-DataFrame and merge data from different sources if it has the same time resolution. Takes ~15 minutes to run.\nPreparations\nSet the title of the rows at the top of the data used to store metadata internally. The order of this list determines the order of the levels in the resulting output.", "headers = ['region', 'variable', 'attribute', 'source', 'web', 'unit']", "Read a prepared table containing meta data on the geographical areas", "areas = pd.read_csv(areas_csv_path)", "View the areas table", "areas.loc[areas['area ID'].notnull(), :'EIC'].fillna('')", "Reading loop\nLoop through sources and datasets to do the reading.\nFirst read the original CSV, Excel etc. files into pandas DataFrames.", "areas = pd.read_csv(areas_csv_path)\n\nread(sources, data_path, parsed_path, areas, headers,\n start_from_user=start_from_user, end_from_user=end_from_user,\n testmode=False)", "Then combine the DataFrames that have the same temporal resolution", "# Create a dictionary of empty DataFrames to be populated with data\ndata_sets = {'15min': pd.DataFrame(),\n '30min': pd.DataFrame(),\n '60min': pd.DataFrame()}\nentso_e = {'15min': pd.DataFrame(),\n '30min': pd.DataFrame(),\n '60min': pd.DataFrame()}\n\nfor filename in os.listdir(parsed_path):\n res_key, source_name, dataset_name, = filename.split('_')[:3]\n if subset and not source_name in subset.keys():\n continue\n logger.info('include %s', filename)\n df_portion = pd.read_pickle(os.path.join(parsed_path, filename))\n\n #if source_name == 'ENTSO-E Transparency FTP':\n # dfs = entso_e\n #else:\n dfs = data_sets\n\n if dfs[res_key].empty:\n dfs[res_key] = df_portion\n elif not df_portion.empty:\n dfs[res_key] = dfs[res_key].combine_first(df_portion)\n else:\n logger.warning(filename + ' WAS EMPTY')\n\nfor res_key, df in data_sets.items():\n logger.info(res_key + ': %s', df.shape)\n#for res_key, df in entso_e.items():\n# logger.info('ENTSO-E ' + res_key + ': %s', df.shape)", "Display some rows of the dataframes to get a first impression of the data.", "data_sets['60min']", "Save raw data\nSave the DataFrames created by the read function to disk. This way you have the raw data to fall back to if something goes wrong in the ramainder of this notebook without having to repeat the previos steps.", "os.chdir(temp_path)\ndata_sets['15min'].to_pickle('raw_data_15.pickle')\ndata_sets['30min'].to_pickle('raw_data_30.pickle')\ndata_sets['60min'].to_pickle('raw_data_60.pickle')\nentso_e['15min'].to_pickle('raw_entso_e_15.pickle')\nentso_e['30min'].to_pickle('raw_entso_e_30.pickle')\nentso_e['60min'].to_pickle('raw_entso_e_60.pickle')", "Load the DataFrames saved above", "os.chdir(temp_path)\ndata_sets = {}\ndata_sets['15min'] = pd.read_pickle('raw_data_15.pickle')\ndata_sets['30min'] = pd.read_pickle('raw_data_30.pickle')\ndata_sets['60min'] = pd.read_pickle('raw_data_60.pickle')\nentso_e = {}\nentso_e['15min'] = pd.read_pickle('raw_entso_e_15.pickle')\nentso_e['30min'] = pd.read_pickle('raw_entso_e_30.pickle')\nentso_e['60min'] = pd.read_pickle('raw_entso_e_60.pickle')", "Processing\nThis section: missing data handling, aggregation of sub-national to national data, aggregate 15'-data to 60'-resolution. Takes 30 minutes to run.\nMissing data handling\nInterpolation\nPatch missing data. At this stage, only small gaps (up to 2 hours) are filled by linear interpolation. This catched most of the missing data due to daylight savings time transitions, while leaving bigger gaps untouched\nThe exact locations of missing data are stored in the nan_table DataFrames.\nPatch the datasets and display the location of missing Data in the original data. Takes ~5 minutes to run.", "nan_tables = {}\noverviews = {}\nfor res_key, df in data_sets.items():\n data_sets[res_key], nan_tables[res_key], overviews[res_key] = find_nan(\n df, res_key, headers, patch=True)\n\nfor res_key, df in entso_e.items():\n entso_e[res_key], nan_tables[res_key + ' ENTSO-E'], overviews[res_key + ' ENTSO-E'] = find_nan(\n df, res_key, headers, patch=True)", "Execute this to see an example of where the data has been patched.\nDisplay the table of regions of missing values", "nan_tables['60min']", "You can export the NaN-tables to Excel in order to inspect where there are NaNs", "os.chdir(temp_path)\nwriter = pd.ExcelWriter('NaN_table.xlsx')\nfor res_key, df in nan_tables.items():\n df.to_excel(writer, res_key)\nwriter.save()\n\nwriter = pd.ExcelWriter('Overview.xlsx')\nfor res_key, df in overviews.items():\n df.to_excel(writer, res_key)\nwriter.save()", "Save/Load the patched data sets", "os.chdir(temp_path)\ndata_sets['15min'].to_pickle('patched_15.pickle')\ndata_sets['30min'].to_pickle('patched_30.pickle')\ndata_sets['60min'].to_pickle('patched_60.pickle')\nentso_e['15min'].to_pickle('patched_entso_e_15.pickle')\nentso_e['30min'].to_pickle('patched_entso_e_30.pickle')\nentso_e['60min'].to_pickle('patched_entso_e_60.pickle')\n\nos.chdir(temp_path)\ndata_sets = {}\ndata_sets['15min'] = pd.read_pickle('patched_15.pickle')\ndata_sets['30min'] = pd.read_pickle('patched_30.pickle')\ndata_sets['60min'] = pd.read_pickle('patched_60.pickle')\nentso_e = {}\nentso_e['15min'] = pd.read_pickle('patched_entso_e_15.pickle')\nentso_e['30min'] = pd.read_pickle('patched_entso_e_30.pickle')\nentso_e['60min'] = pd.read_pickle('patched_entso_e_60.pickle')", "Some of the following operations require the Dataframes to be lexsorted in the columns", "for res_key, df in data_sets.items():\n df.sort_index(axis='columns', inplace=True)", "Aggregate wind offshore + onshore", "for res_key, df in data_sets.items():\n for geo in df.columns.get_level_values(0).unique():\n # we could also include 'generation_forecast'\n for attribute in ['generation_actual']:\n df_wind = df.loc[:, (geo, ['wind_onshore', 'wind_offshore'], attribute)]\n if ('wind_onshore' in df_wind.columns.get_level_values('variable') and\n 'wind_offshore' in df_wind.columns.get_level_values('variable')):\n logger.info(f'aggregate onhore + offshore for {res_key} {geo}')\n \n # skipna=False, otherwise NAs will become zeros after summation\n sum_col = df_wind.sum(axis='columns', skipna=False).to_frame()\n\n # Create a new MultiIndex\n new_col_header = {\n 'region': geo,\n 'variable': 'wind',\n 'attribute': 'generation_actual',\n 'source': 'own calculation based on ENTSO-E Transparency',\n 'web': '',\n 'unit': 'MW'\n }\n new_col_header = tuple(new_col_header[level] for level in headers)\n df[new_col_header] = sum_col\n #df[new_col_header].describe()\n\ndfi = data_sets['15min'].copy()\ndfi.columns = [' '.join(col[:3]).strip() for col in dfi.columns.values]\ndfi.info(verbose=True, null_counts=True)", "Country specific calculations - not used in this release\nGermany\nAggregate German data from individual TSOs\nThe wind and solar in-feed data for the 4 German control areas is summed up and stored in a new column. The column headers are created in the fashion introduced in the read script. Takes 5 seconds to run.", "df = data_sets['15min']\ncontrol_areas_DE = ['DE_50hertz', 'DE_amprion', 'DE_tennet', 'DE_transnetbw']\n\nfor variable in ['solar', 'wind', 'wind_onshore', 'wind_offshore']:\n # we could also include 'generation_forecast'\n for attribute in ['generation_actual']:\n # Calculate aggregate German generation\n sum_frame = df.loc[:, (control_areas_DE, variable, attribute)]\n sum_frame.head() \n sum_col = sum_frame.sum(axis='columns', skipna=False).to_frame().round(0)\n\n # Create a new MultiIndex\n new_col_header = {\n 'region': 'DE',\n 'variable': variable,\n 'attribute': attribute,\n 'source': 'own calculation based on German TSOs',\n 'web': '',\n 'unit': 'MW'\n }\n new_col_header = tuple(new_col_header[level] for level in headers)\n data_sets['15min'][new_col_header] = sum_col\n data_sets['15min'][new_col_header].describe()", "Italy\nGeneration data for Italy come by region (North, Central North, Sicily, etc.) and separately for DSO and TSO, so they need to be agregated in order to get values for the whole country. In the next cell, we sum up the data by region and for each variable-attribute pair present in the Terna dataset header.", "bidding_zones_IT = ['IT_CNOR', 'IT_CSUD', 'IT_NORD', 'IT_SARD', 'IT_SICI', 'IT_SUD']\nattributes = ['generation_actual', 'generation_actual_dso', 'generation_actual_tso']\n\nfor variable in ['solar', 'wind_onshore']:\n sum_col = (\n data_sets['60min']\n .loc[:, (bidding_zones_IT, variable, attributes)]\n .sum(axis='columns', skipna=False))\n \n # Create a new MultiIndex\n new_col_header = {\n 'region': 'IT',\n 'variable': variable,\n 'attribute': 'generation_actual',\n 'source': 'own calculation based on Terna',\n 'web': 'https://www.terna.it/SistemaElettrico/TransparencyReport/Generation/Forecastandactualgeneration.aspx',\n 'unit': 'MW'\n }\n new_col_header = tuple(new_col_header[level] for level in headers)\n data_sets['60min'][new_col_header] = sum_col\n data_sets['60min'][new_col_header].describe()", "Great Britain / United Kingdom\nData for Great Britain (without Northern Ireland) are disaggregated for DSO and TSO connected generators. We calculate aggregate values.", "for variable in ['solar', 'wind']:\n sum_col = (data_sets['30min']\n .loc[:, ('GB_GBN', variable, ['generation_actual_dso', 'generation_actual_tso'])]\n .sum(axis='columns', skipna=False))\n \n # Create a new MultiIndex\n new_col_header = {\n 'region' : 'GB_GBN',\n 'variable' : variable,\n 'attribute' : 'generation_actual',\n 'source': 'own calculation based on Elexon and National Grid',\n 'web': '',\n 'unit': 'MW'\n }\n new_col_header = tuple(new_col_header[level] for level in headers)\n data_sets['30min'][new_col_header] = sum_col\n data_sets['30min'][new_col_header].describe()", "Calculate availabilities/profiles\nCalculate profiles, that is, the share of wind/solar capacity producing at a given time.", "for res_key, df in data_sets.items():\n #if res_key == '60min':\n # continue\n for col_name, col in df.loc[:,(slice(None), slice(None), 'capacity')].iteritems():\n # Get the generation data for the selected capacity column\n kwargs = {\n 'key': (col_name[0], col_name[1], 'generation_actual'),\n 'level': ['region', 'variable', 'attribute'],\n 'axis': 'columns', 'drop_level': False}\n generation_col = df.xs(**kwargs)\n # take ENTSO-E transparency data if there is none from TSO\n if generation_col.size == 0:\n try:\n generation_col = entso_e[res_key].xs(**kwargs)\n except KeyError:\n continue\n if generation_col.size == 0:\n continue\n # Calculate the profile column\n profile_col = generation_col.divide(col, axis='index').round(4)\n\n # Create a new MultiIndex\n new_col_header = {\n 'region': '{region}',\n 'variable': '{variable}',\n 'attribute': 'profile',\n 'source': 'own calculation based on {source}',\n 'web': '',\n 'unit': 'fraction'\n }\n \n source_capacity = col_name[3]\n source_generation = generation_col.columns.get_level_values('source')[0]\n if source_capacity == source_generation:\n source = source_capacity\n else:\n source = (source_generation + ' and ' + source_capacity).replace('own calculation based on ', '')\n new_col_header = tuple(new_col_header[level].format(region=col_name[0], variable=col_name[1], source=source)\n for level in headers)\n data_sets[res_key][new_col_header] = profile_col\n data_sets[res_key][new_col_header].describe()\n \n # Append profile to the dataset\n df = df.combine_first(profile_col)\n new_col_header", "Some of the following operations require the Dataframes to be lexsorted in the columns", "for res_key, df in data_sets.items():\n df.sort_index(axis='columns', inplace=True)", "Another savepoint", "os.chdir(temp_path)\ndata_sets['15min'].to_pickle('calc_15.pickle')\ndata_sets['30min'].to_pickle('calc_30.pickle')\ndata_sets['60min'].to_pickle('calc_60.pickle')\n\nos.chdir(temp_path)\ndata_sets = {}\ndata_sets['15min'] = pd.read_pickle('calc_15.pickle')\ndata_sets['30min'] = pd.read_pickle('calc_30.pickle')\ndata_sets['60min'] = pd.read_pickle('calc_60.pickle')\nentso_e = {}\nentso_e['15min'] = pd.read_pickle('patched_entso_e_15.pickle')\nentso_e['30min'] = pd.read_pickle('patched_entso_e_30.pickle')\nentso_e['60min'] = pd.read_pickle('patched_entso_e_60.pickle')", "Resample higher frequencies to 60'\nSome data comes in 15 or 30-minute intervals (i.e. German or British renewable generation), other in 60-minutes (i.e. load data from ENTSO-E and Prices). We resample the 15 and 30-minute data to hourly resolution and append it to the 60-minutes dataset.\nThe .resample('H').mean() methods calculates the means from the values for 4 quarter hours [:00, :15, :30, :45] of an hour values, inserts that for :00 and drops the other 3 entries. Takes 15 seconds to run.", "for ds in [data_sets]:#, entso_e]:\n for res_key, df in ds.items():\n if res_key == '60min':\n continue\n # # Resample first the marker column\n # marker_resampled = df['interpolated_values'].groupby(\n # pd.Grouper(freq='60Min', closed='left', label='left')\n # ).agg(resample_markers, drop_region='DE_AT_LU')\n # marker_resampled = marker_resampled.reindex(ds['60min'].index)\n\n # # Glue condensed 15/30 min marker onto 60 min marker\n # ds['60min'].loc[:, 'interpolated_values'] = glue_markers(\n # ds['60min']['interpolated_values'],\n # marker_resampled.reindex(ds['60min'].index))\n\n # # Drop DE_AT_LU bidding zone data from the 15 minute resolution data to\n # # be resampled since it is already provided in 60 min resolution by\n # # ENTSO-E Transparency\n # df = df.drop('DE_AT_LU', axis=1, errors='ignore')\n\n # Do the resampling\n resampled = df.resample('H').mean()\n resampled.columns = resampled.columns.map(mark_own_calc)\n resampled.columns.names = headers\n\n # filter out columns already represented in hourly data\n data_cols = ds['60min'].columns.droplevel(['source', 'web', 'unit'])\n tuples = [col for col in resampled.columns if not col[:3] in data_cols]\n add_cols = pd.MultiIndex.from_tuples(tuples, names=headers)\n resampled = resampled[add_cols]\n \n # Round the resampled columns\n for col in resampled.columns:\n if col[2] == 'profile':\n resampled.loc[:, col] = resampled.loc[:, col].round(4)\n else:\n resampled.loc[:, col] = resampled.loc[:, col].round(0)\n\n ds['60min'] = ds['60min'].combine_first(resampled)", "Fill columns not retrieved directly from TSO webites with ENTSO-E Transparency data", "data_cols = data_sets['60min'].columns.droplevel(['source', 'web', 'unit'])\n\nfor res_key, df in entso_e.items():\n # Combine with TSO data\n\n# # Copy entire 30min data from ENTSO-E if there is no data from TSO\n if data_sets[res_key].empty:\n data_sets[res_key] = df\n\n else:\n # Keep only region, variable, attribute in MultiIndex for comparison\n # Compare columns from ENTSO-E against TSO's, keep which we don't have yet\n cols = [col for col in df.columns if not col[:3] in data_cols]\n add_cols = pd.MultiIndex.from_tuples(cols, names=headers)\n data_sets[res_key] = data_sets[res_key].combine_first(df[add_cols])\n\n# # Add the ENTSO-E markers (but only for the columns actually copied)\n# add_cols = ['_'.join(col[:3]) for col in tuples]\n# # Spread marker column out over a DataFrame for easiser comparison\n# # Filter out everey second column, which contains the delimiter \" | \"\n# # from the marker\n# marker_table = (df['interpolated_values'].str.split(' | ', expand=True)\n# .filter(regex='^\\d*[02468]$', axis='columns'))\n# # Replace cells with markers marking columns not copied with NaNs\n# marker_table[~marker_table.isin(add_cols)] = np.nan\n\n# for col_name, col in marker_table.iteritems():\n# if col_name == 0:\n# marker_entso_e = col\n# else:\n# marker_entso_e = glue_markers(marker_entso_e, col)\n\n# # Glue ENTSO-E marker onto our old marker\n# marker = data_sets[res_key]['interpolated_values']\n# data_sets[res_key].loc[:, 'interpolated_values'] = glue_markers(\n# marker, df['interpolated_values'].reindex(marker.index))", "Insert a column with Central European (Summer-)time\nThe index column of th data sets defines the start of the timeperiod represented by each row of that data set in UTC time. We include an additional column for the CE(S)T Central European (Summer-) Time, as this might help aligning the output data with other data sources.", "info_cols = {'utc': 'utc_timestamp',\n 'cet': 'cet_cest_timestamp'}\n\nfor ds in [data_sets]: #, entso_e]:\n for res_key, df in ds.items():\n if df.empty:\n continue\n df.index.rename(info_cols['utc'], inplace=True)\n df.insert(0, info_cols['cet'],\n df.index.tz_localize('UTC').tz_convert('CET'))", "Create a final savepoint", "data_sets['15min'].to_pickle('final_15.pickle')\ndata_sets['30min'].to_pickle('final_30.pickle')\ndata_sets['60min'].to_pickle('final_60.pickle')\n#entso_e['15min'].to_pickle('final_entso_e_15.pickle')\n#entso_e['30min'].to_pickle('final_entso_e_30.pickle')\n#entso_e['60min'].to_pickle('final_entso_e_60.pickle')\n\nos.chdir(temp_path)\ndata_sets = {}\ndata_sets['15min'] = pd.read_pickle('final_15.pickle')\ndata_sets['30min'] = pd.read_pickle('final_30.pickle')\ndata_sets['60min'] = pd.read_pickle('final_60.pickle')\n#entso_e = {}\n#entso_e['15min'] = pd.read_pickle('final_entso_e_15.pickle')\n#entso_e['30min'] = pd.read_pickle('final_entso_e_30.pickle')\n#entso_e['60min'] = pd.read_pickle('final_entso_e_60.pickle')\n\ncombined = data_sets", "Show the column names contained in the final DataFrame in a table", "col_info = pd.DataFrame()\ndf = combined['60min']\nfor level in df.columns.names:\n col_info[level] = df.columns.get_level_values(level)\n\ncol_info", "Write data to disk\nThis section: Save as Data Package (data in CSV, metadata in JSON file). All files are saved in the directory of this notebook. Alternative file formats (SQL, XLSX) are also exported. Takes about 1 hour to run.\nLimit time range\nCut off the data outside of [start_from_user:end_from_user]", "for res_key, df in combined.items():\n # In order to make sure that the respective time period is covered in both\n # UTC and CE(S)T, we set the start in CE(S)T, but the end in UTC\n if start_from_user:\n start_from_user = (pytz.timezone('Europe/Brussels')\n .localize(datetime.combine(start_from_user, time()))\n .astimezone(pytz.timezone('UTC'))\n .replace(tzinfo=None))\n if end_from_user:\n end_from_user = (pytz.timezone('UTC')\n .localize(datetime.combine(end_from_user, time()))\n .replace(tzinfo=None)\n # Appropriate offset to inlude the end of period\n + timedelta(days=1, minutes=-int(res_key[:2])))\n # Then cut off the data_set\n data_sets[res_key] = df.loc[start_from_user:end_from_user, :]", "Different shapes\nData are provided in three different \"shapes\": \n- SingleIndex (easy to read for humans, compatible with datapackage standard, small file size)\n - Fileformat: CSV, SQLite\n- MultiIndex (easy to read into GAMS, not compatible with datapackage standard, small file size)\n - Fileformat: CSV, Excel\n- Stacked (compatible with data package standard, large file size, many rows, too many for Excel) \n - Fileformat: CSV\nThe different shapes need to be created internally befor they can be saved to files. Takes about 1 minute to run.", "combined_singleindex = {}\ncombined_multiindex = {}\ncombined_stacked = {}\nfor res_key, df in combined.items():\n if df.empty:\n continue\n\n# # Round floating point numbers to 2 digits\n# for col_name, col in df.iteritems():\n# if col_name[0] in info_cols.values():\n# pass\n# elif col_name[2] == 'profile':\n# df[col_name] = col.round(4)\n# else:\n# df[col_name] = col.round(3)\n\n # MultIndex\n combined_multiindex[res_key + '_multiindex'] = df\n\n # SingleIndex\n df_singleindex = df.copy()\n # use first 3 levels of multiindex to create singleindex\n df_singleindex.columns = [\n col_name[0] if col_name[0] in info_cols.values()\n else '_'.join([level for level in col_name[0:3] if not level == ''])\n for col_name in df.columns.values]\n\n combined_singleindex[res_key + '_singleindex'] = df_singleindex\n\n # Stacked\n stacked = df.copy().drop(columns=info_cols['cet'], level=0)\n stacked.columns = stacked.columns.droplevel(['source', 'web', 'unit'])\n # Concatenate all columns below each other (=\"stack\").\n # df.transpose().stack() is faster than stacking all column levels\n # seperately\n stacked = stacked.transpose().stack(dropna=True).to_frame(name='data')\n combined_stacked[res_key + '_stacked'] = stacked", "Write to SQLite-database\nThis file format is required for the filtering function on the OPSD website. This takes ~3 minutes to complete.", "os.chdir(out_path)\nfor res_key, df in combined_singleindex.items():\n table = 'time_series_' + res_key\n df = df.copy()\n df.index = df.index.strftime('%Y-%m-%dT%H:%M:%SZ')\n cet_col_name = info_cols['cet']\n df[cet_col_name] = (df[cet_col_name].dt.strftime('%Y-%m-%dT%H:%M:%S%z'))\n df.to_sql(table, sqlite3.connect('time_series.sqlite'),\n if_exists='replace', index_label=info_cols['utc'])", "Write to Excel\nWriting the full tables to Excel takes extremely long. As a workaround, only the timestamp-columns are exported. The rest of the data can than be inserted manually from the _multindex.csv files.", "os.chdir(out_path)\nwriter = pd.ExcelWriter('time_series.xlsx')\nwriter.save()\nfor res_key, df in data_sets.items():\n # Need to convert CE(S)T-timestamps to tz-naive, otherwise Excel converts\n # them back to UTC\n df.loc[:,(info_cols['cet'], '', '', '', '', '')].dt.tz_localize(None).to_excel(writer, res_key)\n filename = 'tsos_' + res_key + '.csv'\n df.to_csv(filename, float_format='%.4f', date_format='%Y-%m-%dT%H:%M:%SZ')\n#for res_key, df in entso_e.items():\n# df.loc[:,(info_cols['cet'], '', '', '', '', '')].dt.tz_localize(None).to_excel(writer, res_key+ ' ENTSO-E')\n# filename = 'entso_e_' + res_key + '.csv'\n# df.to_csv(filename, float_format='%.4f', date_format='%Y-%m-%dT%H:%M:%SZ')", "Write to CSV\nThis takes about 10 minutes to complete.", "os.chdir(out_path)\n# itertoools.chain() allows iterating over multiple dicts at once\nfor res_stacking_key, df in itertools.chain(\n combined_singleindex.items(),\n combined_multiindex.items(),\n combined_stacked.items()):\n\n df = df.copy()\n\n # convert the format of the cet_cest-timestamp to ISO-8601\n if not res_stacking_key.split('_')[1] == 'stacked':\n df.iloc[:, 0] = df.iloc[:, 0].dt.strftime('%Y-%m-%dT%H:%M:%S%z') # https://frictionlessdata.io/specs/table-schema/#date\n filename = 'time_series_' + res_stacking_key + '.csv'\n df.to_csv(filename, float_format='%.4f',\n date_format='%Y-%m-%dT%H:%M:%SZ')", "Create metadata\nThis section: create the metadata, both general and column-specific. All metadata we be stored as a JSON file. Takes 10s to run.", "os.chdir(out_path)\nmake_json(combined, info_cols, version, changes, headers, areas,\n start_from_user, end_from_user)", "Write checksums.txt\nWe publish SHA-checksums for the outputfiles on GitHub to allow verifying the integrity of outputfiles on the OPSD server.", "os.chdir(out_path)\nfiles = os.listdir(out_path)\n\n# Create checksums.txt in the output directory\nwith open('checksums.txt', 'w') as f:\n for file_name in files:\n if file_name.split('.')[-1] in ['csv', 'sqlite', 'xlsx']:\n file_hash = get_sha_hash(file_name)\n f.write('{},{}\\n'.format(file_name, file_hash))\n\n# Copy the file to root directory from where it will be pushed to GitHub,\n# leaving a copy in the version directory for reference\ncopyfile('checksums.txt', os.path.join(home_path, 'checksums.txt'))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
metpy/MetPy
v1.1/_downloads/7fd39302ff9f3fa4a7870d3c31b04722/cross_section.ipynb
bsd-3-clause
[ "%matplotlib inline", "Cross Section Analysis\nThe MetPy function metpy.interpolate.cross_section can obtain a cross-sectional slice through\ngridded data.", "import cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\n\nimport metpy.calc as mpcalc\nfrom metpy.cbook import get_test_data\nfrom metpy.interpolate import cross_section", "Getting the data\nThis example uses NARR reanalysis data\nfor 18 UTC 04 April 1987 from NCEI.\nWe use MetPy's CF parsing to get the data ready for use, and squeeze down the size-one time\ndimension.", "data = xr.open_dataset(get_test_data('narr_example.nc', False))\ndata = data.metpy.parse_cf().squeeze()\nprint(data)", "Define start and end points:", "start = (37.0, -105.0)\nend = (35.5, -65.0)", "Get the cross section, and convert lat/lon to supplementary coordinates:", "cross = cross_section(data, start, end).set_coords(('lat', 'lon'))\nprint(cross)", "For this example, we will be plotting potential temperature, relative humidity, and\ntangential/normal winds. And so, we need to calculate those, and add them to the dataset:", "cross['Potential_temperature'] = mpcalc.potential_temperature(\n cross['isobaric'],\n cross['Temperature']\n)\ncross['Relative_humidity'] = mpcalc.relative_humidity_from_specific_humidity(\n cross['isobaric'],\n cross['Temperature'],\n cross['Specific_humidity']\n)\ncross['u_wind'] = cross['u_wind'].metpy.convert_units('knots')\ncross['v_wind'] = cross['v_wind'].metpy.convert_units('knots')\ncross['t_wind'], cross['n_wind'] = mpcalc.cross_section_components(\n cross['u_wind'],\n cross['v_wind']\n)\n\nprint(cross)", "Now, we can make the plot.", "# Define the figure object and primary axes\nfig = plt.figure(1, figsize=(16., 9.))\nax = plt.axes()\n\n# Plot RH using contourf\nrh_contour = ax.contourf(cross['lon'], cross['isobaric'], cross['Relative_humidity'],\n levels=np.arange(0, 1.05, .05), cmap='YlGnBu')\nrh_colorbar = fig.colorbar(rh_contour)\n\n# Plot potential temperature using contour, with some custom labeling\ntheta_contour = ax.contour(cross['lon'], cross['isobaric'], cross['Potential_temperature'],\n levels=np.arange(250, 450, 5), colors='k', linewidths=2)\ntheta_contour.clabel(theta_contour.levels[1::2], fontsize=8, colors='k', inline=1,\n inline_spacing=8, fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Plot winds using the axes interface directly, with some custom indexing to make the barbs\n# less crowded\nwind_slc_vert = list(range(0, 19, 2)) + list(range(19, 29))\nwind_slc_horz = slice(5, 100, 5)\nax.barbs(cross['lon'][wind_slc_horz], cross['isobaric'][wind_slc_vert],\n cross['t_wind'][wind_slc_vert, wind_slc_horz],\n cross['n_wind'][wind_slc_vert, wind_slc_horz], color='k')\n\n# Adjust the y-axis to be logarithmic\nax.set_yscale('symlog')\nax.set_yticklabels(np.arange(1000, 50, -100))\nax.set_ylim(cross['isobaric'].max(), cross['isobaric'].min())\nax.set_yticks(np.arange(1000, 50, -100))\n\n# Define the CRS and inset axes\ndata_crs = data['Geopotential_height'].metpy.cartopy_crs\nax_inset = fig.add_axes([0.125, 0.665, 0.25, 0.25], projection=data_crs)\n\n# Plot geopotential height at 500 hPa using xarray's contour wrapper\nax_inset.contour(data['x'], data['y'], data['Geopotential_height'].sel(isobaric=500.),\n levels=np.arange(5100, 6000, 60), cmap='inferno')\n\n# Plot the path of the cross section\nendpoints = data_crs.transform_points(ccrs.Geodetic(),\n *np.vstack([start, end]).transpose()[::-1])\nax_inset.scatter(endpoints[:, 0], endpoints[:, 1], c='k', zorder=2)\nax_inset.plot(cross['x'], cross['y'], c='k', zorder=2)\n\n# Add geographic features\nax_inset.coastlines()\nax_inset.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='k', alpha=0.2, zorder=0)\n\n# Set the titles and axes labels\nax_inset.set_title('')\nax.set_title(f'NARR Cross-Section \\u2013 {start} to {end} \\u2013 '\n f'Valid: {cross[\"time\"].dt.strftime(\"%Y-%m-%d %H:%MZ\").item()}\\n'\n 'Potential Temperature (K), Tangential/Normal Winds (knots), Relative Humidity '\n '(dimensionless)\\nInset: Cross-Section Path and 500 hPa Geopotential Height')\nax.set_ylabel('Pressure (hPa)')\nax.set_xlabel('Longitude (degrees east)')\nrh_colorbar.set_label('Relative Humidity (dimensionless)')\n\nplt.show()", "Note: The x-axis can display any variable that is the same length as the\nplotted variables, including latitude. Additionally, arguments can be provided\nto ax.set_xticklabels to label lat/lon pairs, similar to the default NCL output." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
drivendata/data-science-is-software
notebooks/lectures/4.0-testing.ipynb
mit
[ "<table style=\"width:100%; border: 0px solid black;\">\n <tr style=\"width: 100%; border: 0px solid black;\">\n <td style=\"width:75%; border: 0px solid black;\">\n <a href=\"http://www.drivendata.org\">\n <img src=\"https://s3.amazonaws.com/drivendata.org/kif-example/img/dd.png\" />\n </a>\n </td>\n </tr>\n</table>\n\nData Science is Software\nDeveloper #lifehacks for the Jupyter Data Scientist\nSection 4: Don't let other people break your toys\nMotivation\n\n\"Many machine learning algorithms have a curious property: they are robust against bugs. Since they’re designed to deal with noisy data, they can often deal pretty well with noise caused by math mistakes as well. If you make a math mistake in your implementation, the algorithm might still make sensible-looking predictions. This is bad news, not good news. It means bugs are subtle and hard to detect. Your algorithm might work well in some situations, such as small toy datasets you use for validation, and completely fail in other situations — high dimensions, large numbers of training examples, noisy observations, etc.\" — Roger Gross, \"Testing MCMC code, part 1: unit tests\", Harvard Intelligent Probabilistic Systems group", "from __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nPROJ_ROOT = os.path.abspath(os.path.join(os.pardir, os.pardir))", "numpy.testing\nProvides useful assertion methods for values that are numerically close and for numpy arrays.", "data = np.random.normal(0.0, 1.0, 1000000)\nassert np.mean(data) == 0.0\n\nnp.testing.assert_almost_equal(np.mean(data), 0.0, decimal=2)\n\na = np.random.normal(0, 0.0001, 10000)\nb = np.random.normal(0, 0.0001, 10000)\n\nnp.testing.assert_array_equal(a, b)\n\nnp.testing.assert_array_almost_equal(a, b, decimal=3)", "engarde decorators\nA new library that lets you practice defensive program--specifically with pandas DataFrame objects. It provides a set of decorators that check the return value of any function that returns a DataFrame and confirms that it conforms to the rules.", "import engarde.decorators as ed\n\ntest_data = pd.DataFrame({'a': np.random.normal(0, 1, 100),\n 'b': np.random.normal(0, 1, 100)})\n\n@ed.none_missing()\ndef process(dataframe):\n dataframe.loc[10, 'a'] = 1.0\n return dataframe\n\nprocess(test_data).head()", "engarde has an awesome set of decorators:\n\nnone_missing - no NaNs (great for machine learning--sklearn does not care for NaNs)\nhas_dtypes - make sure the dtypes are what you expect\nverify - runs an arbitrary function on the dataframe\nverify_all - makes sure every element returns true for a given function\n\nMore can be found in the docs.\n#lifehack: test your data science code. \nCode coverage\nWhat are those tests getting up to? Sometimes you think you wrote test cases that cover anything that might be interesting. But, sometimes you're wrong.\ncoverage.py is an amazing tool for seeing what code gets executed when you run your test suite. You can run these commands to generate a code coverage report:\ncoverage run --source src -m pytest\ncoverage html\ncoverage report" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
carefree0910/MachineLearning
Notebooks/SVM/zh-cn/LinearSVM.ipynb
mit
[ "线性支持向量机的朴素实现\n\n虽然从形式上来说,线性支持向量机(LinearSVM)和感知机的差别只在于损失函数,但如果只是简单地将感知机的训练策略(亦即每次只选出使得损失函数最大的样本点来进行梯度下降)迁移过来的话、会引发一些问题。为方便,我们称感知机的训练策略为极大梯度下降法(注:这不是被广泛承认的称谓,只是本文的一个代称)\n我们会先展示极大梯度下降法的有效性,然后会展示极大梯度下降法存在的问题,最后则会介绍一种解决方案、并将该解决方案拓展为 Mini-Batch 梯度下降法(MBGD)\n\n极大梯度下降法训练 LinearSVM", "import numpy as np\n\nclass LinearSVM:\n def __init__(self):\n self._w = self._b = None\n \n def fit(self, x, y, c=1, lr=0.01, epoch=10000):\n x, y = np.asarray(x, np.float32), np.asarray(y, np.float32)\n self._w = np.zeros(x.shape[1])\n self._b = 0.\n for _ in range(epoch):\n self._w *= 1 - lr\n err = 1 - y * self.predict(x, True)\n idx = np.argmax(err)\n # 注意即使所有 x, y 都满足 w·x + b >= 1\n # 由于损失里面有一个 w 的模长平方\n # 所以仍然不能终止训练,只能截断当前的梯度下降\n if err[idx] <= 0:\n continue\n delta = lr * c * y[idx]\n self._w += delta * x[idx]\n self._b += delta\n \n def predict(self, x, raw=False):\n x = np.asarray(x, np.float32)\n y_pred = x.dot(self._w) + self._b\n if raw:\n return y_pred\n return np.sign(y_pred).astype(np.float32)", "测试", "from Util import gen_two_clusters\n\nx, y = gen_two_clusters()\nsvm = LinearSVM()\nsvm.fit(x, y)\nprint(\"准确率:{:8.6} %\".format((svm.predict(x) == y).mean() * 100))", "可视化", "from Util import visualize2d\n\nvisualize2d(svm, x, y)\nvisualize2d(svm, x, y, True)", "可视化训练过程\n\n实现思路如下:\n在每一步迭代时生成一张如上所示的图像\n在最后调用相应的第三方库(imageio)、将生成的所有图像合成一个 mp4\n用ffmpeg将 mp4 转为方便分享的 gif\n\n\n\n\n\n存在的问题\n\n由上述可视化其实已经可以看出,用极大梯度下降法训练 LinearSVM 会非常不稳定\n从直观上来说,由于 LinearSVM 的损失函数比感知机要更复杂,所以相应的函数形状也会更复杂。这意味着当数据集稍微差一点的时候,直接单纯地应用极大梯度下降法可能会导致一些问题——比如说模型会卡在某个很奇怪的地方无法自拔(什么鬼)\n\n可以通过下面这个栗子来直观感受一下 LinearSVM 存在的这些问题:", "# 注意我们只是把 center 参数(亦即正负样本点的“中心”)\n# 从原点(0, 0)(默认值)挪到(5, 5)(亦即破坏了一定的对称性)、\n# 并将正负样本点之间的距离(dis 参数)稍微拉近了一点而已,\n# 结果就已经惨不忍睹了\nx, y = gen_two_clusters(center=5, dis=1)\nsvm = LinearSVM()\nsvm.fit(x, y)\nprint(\"准确率:{:8.6} %\".format((svm.predict(x) == y).mean() * 100))\nvisualize2d(svm, x, y)\nvisualize2d(svm, x, y, True)", "通过下面这张动图,我们能够直观地感受极大梯度下降法下 LinearSVM 的训练过程:\n\n可以看到,LinearSVM 确实卡在了奇怪的地方\n原理我不敢乱说,这里只提供一个牵强附会的直观解释:\n\n每次只取使得损失函数极大的一个样本进行梯度下降$\\rightarrow$模型在某个地方可能来来回回都只受那么几个样本的影响$\\rightarrow$死循环(什么鬼!)\n\n专业的理论就留待专业的观众老爷补充吧 ( σ'ω')σ\n解决方案\n极大梯度下降法的最大问题很有可能在于它每次都只根据使得损失函数最大的一个样本点来进行梯度下降,这会导致两个问题:\n+ 模型的训练将会很不稳定(这点和随机梯度下降类似)\n+ 模型对噪声或“不太好的点”极为敏感(因为它们往往会使损失函数最大)\n按部就班、我们先解决第一个问题,为此我们只需要多选出几个样本点(比如选出使得损失函数最大的 top n 个样本)、然后取它们梯度的平均即可\nTop n 梯度下降法\n\n注:该名字同样只是我瞎编的一个名字(喂)", "# 继承上一个 LinearSVM 以重复利用代码\nclass LinearSVM2(LinearSVM): \n # 用参数 batch_size 表示 Top n 中的 n\n def fit(self, x, y, c=1, lr=0.01, batch_size=128, epoch=10000):\n x, y = np.asarray(x, np.float32), np.asarray(y, np.float32)\n # 如果 batch_size 设得比样本总数还多、则将其改为样本总数\n batch_size = min(batch_size, len(y))\n self._w = np.zeros(x.shape[1])\n self._b = 0.\n for _ in range(epoch):\n self._w *= 1 - lr\n err = 1 - y * self.predict(x, True)\n # 利用 argsort 函数直接取出 Top n\n # 注意 argsort 的结果是从小到大的,所以要用 [::-1] 把结果翻转一下\n batch = np.argsort(err)[-batch_size:][::-1]\n err = err[batch]\n if err[0] <= 0:\n continue\n # 注意这里我们只能利用误分类的样本做梯度下降\n # 因为被正确分类的样本处、这一部分的梯度为 0\n mask = err > 0\n batch = batch[mask]\n # 取各梯度平均并做一步梯度下降\n delta = lr * c * y[batch]\n self._w += np.mean(delta[..., None] * x[batch], axis=0)\n self._b += np.mean(delta)", "测试", "x, y = gen_two_clusters(center=5, dis=1)\nsvm = LinearSVM2()\nsvm.fit(x, y)\nprint(\"准确率:{:8.6} %\".format((svm.predict(x) == y).mean() * 100))\nvisualize2d(svm, x, y)\nvisualize2d(svm, x, y, True)", "Mini-Batch 梯度下降法(MBGD)\n\n上述解决方案已经不错,但我们还是有些太“激进”了——我们每次进行梯度下降时,选取的样本点都是使得损失函数最大的样本点,但一般而言使损失函数最大的样本点如果不是关键的样本点(支持向量)的话、通常而言会是噪声。当数据集比较差时,噪声所带来的副作用很有可能就会盖过支持向量带来的正效应\n为此,我们应该引入一定的随机性。神经网络的训练中所用的 MBGD 就是很好的方法:每次都从数据集中抽样出一个小 Batch,然后用这个 Batch 来做梯度下降", "class LinearSVM3(LinearSVM):\n def fit(self, x, y, c=1, lr=0.01, batch_size=128, epoch=10000):\n x, y = np.asarray(x, np.float32), np.asarray(y, np.float32)\n batch_size = min(batch_size, len(y))\n self._w = np.zeros(x.shape[1])\n self._b = 0.\n for _ in range(epoch):\n self._w *= 1 - lr\n # 随机选取 batch_size 个样本\n batch = np.random.choice(len(x), batch_size)\n x_batch, y_batch = x[batch], y[batch]\n err = 1 - y_batch * self.predict(x_batch, True)\n if np.max(err) <= 0:\n continue\n mask = err > 0\n delta = lr * c * y_batch[mask]\n self._w += np.mean(delta[..., None] * x_batch[mask], axis=0)\n self._b += np.mean(delta)", "测试", "# 进一步拉近正负样本点间的距离以观察性能\nx, y = gen_two_clusters(center=5, dis=0.5)\ntop_n_svm = LinearSVM2()\ntop_n_svm.fit(x, y)\nprint(\"Top n LinearSVM 准确率:{:8.6} %\".format((top_n_svm.predict(x) == y).mean() * 100))\nmbgd_svm = LinearSVM3()\nmbgd_svm.fit(x, y)\nprint(\"MBGD LinearSVM 准确率:{:8.6} %\".format((mbgd_svm.predict(x) == y).mean() * 100))\n\nvisualize2d(top_n_svm, x, y)\nvisualize2d(mbgd_svm, x, y)", "存在的问题\nTop n LinearSVM 和 MBGD LinearSVM 各有优劣,很难直接说谁好谁坏;但它们都有一个共同的问题,那就是它们所运用的梯度下降法都只是朴素的Vanilla Update,这会导致当数据的 scale 很大时模型对参数极为敏感、从而导致持续的震荡(所谓的 scale 比较大,可以理解为“规模很大”,或者直白一点——以二维数据为例的话——就是横纵坐标的数值很大)\n可以通过下面这个栗子来直观感受一下 scale 很大的数据所带来的问题:", "# 将 scale 从 1(默认)调成 5\nx, y = gen_two_clusters(center=5, scale=5)\ntop_n_svm = LinearSVM2()\ntop_n_svm.fit(x, y)\nprint(\"Top n LinearSVM 准确率:{:8.6} %\".format((top_n_svm.predict(x) == y).mean() * 100))\nmbgd_svm = LinearSVM3()\nmbgd_svm.fit(x, y)\nprint(\"MBGD LinearSVM 准确率:{:8.6} %\".format((mbgd_svm.predict(x) == y).mean() * 100))\n\nvisualize2d(top_n_svm, x, y)\nvisualize2d(mbgd_svm, x, y)", "通过下面这张动图,我们能够直观地感受数据的 scale 很大时 LinearSVM 的训练过程:\n\n可以看到,模型确实一直在持续震荡\n解决方案\n\n采用更好的梯度下降法,比如Adam之类的\n进行数据预处理、把数据的 scale 弄回 1\n\n关于Adam等梯度下降算法的实现和在 LinearSVM 上的应用可以参见这里和这里,下面我们就仅展示进行数据预处理后的结果", "x, y = gen_two_clusters(center=5, dis=1, scale=5)\n# 进行归一化处理\nx -= x.mean(axis=0)\nx /= x.std(axis=0)\n# Top 1 梯度下降法即为极大梯度下降法\ntop_1_svm = LinearSVM()\ntop_1_svm.fit(x, y)\nprint(\"Top 1 LinearSVM 准确率:{:8.6} %\".format((top_1_svm.predict(x) == y).mean() * 100))\ntop_n_svm = LinearSVM2()\ntop_n_svm.fit(x, y)\nprint(\"Top n LinearSVM 准确率:{:8.6} %\".format((top_n_svm.predict(x) == y).mean() * 100))\nmbgd_svm = LinearSVM3()\nmbgd_svm.fit(x, y)\nprint(\"MBGD LinearSVM 准确率:{:8.6} %\".format((mbgd_svm.predict(x) == y).mean() * 100))\n\nvisualize2d(top_1_svm, x, y)\nvisualize2d(top_n_svm, x, y)\nvisualize2d(mbgd_svm, x, y)", "可以看到在归一化处理后,即使是简单地采用极大梯度下降法,也能够在比较苛刻的数据(center=5、dis=1、scale=5)上表现得不错" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tritemio/multispot_paper
out_notebooks/usALEX-5samples-PR-raw-out-all-ph-7d.ipynb
mit
[ "Executed: Mon Mar 27 11:34:11 2017\nDuration: 8 seconds.\nusALEX-5samples - Template\n\nThis notebook is executed through 8-spots paper analysis.\nFor a direct execution, uncomment the cell below.", "ph_sel_name = \"all-ph\"\n\ndata_id = \"7d\"\n\n# ph_sel_name = \"all-ph\"\n# data_id = \"7d\"", "Load software and filenames definitions", "from fretbursts import *\n\ninit_notebook()\nfrom IPython.display import display", "Data folder:", "data_dir = './data/singlespot/'\n\nimport os\ndata_dir = os.path.abspath(data_dir) + '/'\nassert os.path.exists(data_dir), \"Path '%s' does not exist.\" % data_dir", "List of data files:", "from glob import glob\nfile_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)\n## Selection for POLIMI 2012-11-26 datatset\nlabels = ['17d', '27d', '7d', '12d', '22d']\nfiles_dict = {lab: fname for lab, fname in zip(labels, file_list)}\nfiles_dict\n\nph_sel_map = {'all-ph': Ph_sel('all'), 'Dex': Ph_sel(Dex='DAem'), \n 'DexDem': Ph_sel(Dex='Dem')}\nph_sel = ph_sel_map[ph_sel_name]\n\ndata_id, ph_sel_name", "Data load\nInitial loading of the data:", "d = loader.photon_hdf5(filename=files_dict[data_id])", "Laser alternation selection\nAt this point we have only the timestamps and the detector numbers:", "d.ph_times_t, d.det_t", "We need to define some parameters: donor and acceptor ch, excitation period and donor and acceptor excitiations:", "d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)", "We should check if everithing is OK with an alternation histogram:", "plot_alternation_hist(d)", "If the plot looks good we can apply the parameters with:", "loader.alex_apply_period(d)", "Measurements infos\nAll the measurement data is in the d variable. We can print it:", "d", "Or check the measurements duration:", "d.time_max", "Compute background\nCompute the background using automatic threshold:", "d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)\n\ndplot(d, timetrace_bg)\n\nd.rate_m, d.rate_dd, d.rate_ad, d.rate_aa", "Burst search and selection", "bs_kws = dict(L=10, m=10, F=7, ph_sel=ph_sel)\nd.burst_search(**bs_kws)\n\nth1 = 30\nds = d.select_bursts(select_bursts.size, th1=30)\n\nbursts = (bext.burst_data(ds, include_bg=True, include_ph_index=True)\n .round({'E': 6, 'S': 6, 'bg_d': 3, 'bg_a': 3, 'bg_aa': 3, 'nd': 3, 'na': 3, 'naa': 3, 'nda': 3, 'nt': 3, 'width_ms': 4}))\n\nbursts.head()\n\nburst_fname = ('results/bursts_usALEX_{sample}_{ph_sel}_F{F:.1f}_m{m}_size{th}.csv'\n .format(sample=data_id, th=th1, **bs_kws))\nburst_fname\n\nbursts.to_csv(burst_fname)\n\nassert d.dir_ex == 0\nassert d.leakage == 0\n\nprint(d.ph_sel)\ndplot(d, hist_fret);\n\n# if data_id in ['7d', '27d']:\n# ds = d.select_bursts(select_bursts.size, th1=20)\n# else:\n# ds = d.select_bursts(select_bursts.size, th1=30)\n\nds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)\n\nn_bursts_all = ds.num_bursts[0]\n\ndef select_and_plot_ES(fret_sel, do_sel):\n ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)\n ds_do = ds.select_bursts(select_bursts.ES, **do_sel)\n bpl.plot_ES_selection(ax, **fret_sel)\n bpl.plot_ES_selection(ax, **do_sel) \n return ds_fret, ds_do\n\nax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)\n\nif data_id == '7d':\n fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)\n do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True) \n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n \nelif data_id == '12d':\n fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n\nelif data_id == '17d':\n fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n\nelif data_id == '22d':\n fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \n\nelif data_id == '27d':\n fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \n\nn_bursts_do = ds_do.num_bursts[0]\nn_bursts_fret = ds_fret.num_bursts[0]\n\nn_bursts_do, n_bursts_fret\n\nd_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)\nprint ('D-only fraction:', d_only_frac)\n\ndplot(ds_fret, hist2d_alex, scatter_alpha=0.1);\n\ndplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);", "Donor Leakage fit\nHalf-Sample Mode\nFit peak usng the mode computed with the half-sample algorithm (Bickel 2005).", "def hsm_mode(s):\n \"\"\"\n Half-sample mode (HSM) estimator of `s`.\n\n `s` is a sample from a continuous distribution with a single peak.\n \n Reference:\n Bickel, Fruehwirth (2005). arXiv:math/0505419\n \"\"\"\n s = memoryview(np.sort(s))\n i1 = 0\n i2 = len(s)\n\n while i2 - i1 > 3:\n n = (i2 - i1) // 2\n w = [s[n-1+i+i1] - s[i+i1] for i in range(n)]\n i1 = w.index(min(w)) + i1\n i2 = i1 + n\n\n if i2 - i1 == 3:\n if s[i1+1] - s[i1] < s[i2] - s[i1 + 1]:\n i2 -= 1\n elif s[i1+1] - s[i1] > s[i2] - s[i1 + 1]:\n i1 += 1\n else:\n i1 = i2 = i1 + 1\n\n return 0.5*(s[i1] + s[i2])\n\nE_pr_do_hsm = hsm_mode(ds_do.E[0])\nprint (\"%s: E_peak(HSM) = %.2f%%\" % (ds.ph_sel, E_pr_do_hsm*100))", "Gaussian Fit\nFit the histogram with a gaussian:", "E_fitter = bext.bursts_fitter(ds_do, weights=None)\nE_fitter.histogram(bins=np.arange(-0.2, 1, 0.03))\n\nE_fitter.fit_histogram(model=mfit.factory_gaussian())\nE_fitter.params\n\nres = E_fitter.fit_res[0]\nres.params.pretty_print()\n\nE_pr_do_gauss = res.best_values['center']\nE_pr_do_gauss", "KDE maximum", "bandwidth = 0.03\nE_range_do = (-0.1, 0.15)\nE_ax = np.r_[-0.2:0.401:0.0002]\n\nE_fitter.calc_kde(bandwidth=bandwidth)\nE_fitter.find_kde_max(E_ax, xmin=E_range_do[0], xmax=E_range_do[1])\nE_pr_do_kde = E_fitter.kde_max_pos[0]\nE_pr_do_kde", "Leakage summary", "mfit.plot_mfit(ds_do.E_fitter, plot_kde=True, plot_model=False)\nplt.axvline(E_pr_do_hsm, color='m', label='HSM')\nplt.axvline(E_pr_do_gauss, color='k', label='Gauss')\nplt.axvline(E_pr_do_kde, color='r', label='KDE')\nplt.xlim(0, 0.3)\nplt.legend()\nprint('Gauss: %.2f%%\\n KDE: %.2f%%\\n HSM: %.2f%%' % \n (E_pr_do_gauss*100, E_pr_do_kde*100, E_pr_do_hsm*100))", "Burst size distribution", "nt_th1 = 50\n\ndplot(ds_fret, hist_size, which='all', add_naa=False)\nxlim(-0, 250)\nplt.axvline(nt_th1)\n\nTh_nt = np.arange(35, 120)\nnt_th = np.zeros(Th_nt.size)\nfor i, th in enumerate(Th_nt):\n ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)\n nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th\n\nplt.figure()\nplot(Th_nt, nt_th)\nplt.axvline(nt_th1)\n\nnt_mean = nt_th[np.where(Th_nt == nt_th1)][0]\nnt_mean", "Fret fit\nMax position of the Kernel Density Estimation (KDE):", "E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')\nE_fitter = ds_fret.E_fitter\n\nE_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nE_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))\n\nE_fitter.fit_res[0].params.pretty_print()\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(E_fitter, ax=ax[0])\nmfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))\ndisplay(E_fitter.params*100)", "Weighted mean of $E$ of each burst:", "ds_fret.fit_E_m(weights='size')", "Gaussian fit (no weights):", "ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)", "Gaussian fit (using burst size as weights):", "ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')\n\nE_kde_w = E_fitter.kde_max_pos[0]\nE_gauss_w = E_fitter.params.loc[0, 'center']\nE_gauss_w_sig = E_fitter.params.loc[0, 'sigma']\nE_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))\nE_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr\nE_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr", "Stoichiometry fit\nMax position of the Kernel Density Estimation (KDE):", "S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)\nS_fitter = ds_fret.S_fitter\n\nS_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nS_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(S_fitter, ax=ax[0])\nmfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))\ndisplay(S_fitter.params*100)\n\nS_kde = S_fitter.kde_max_pos[0]\nS_gauss = S_fitter.params.loc[0, 'center']\nS_gauss_sig = S_fitter.params.loc[0, 'sigma']\nS_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))\nS_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr\nS_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr", "The Maximum likelihood fit for a Gaussian population is the mean:", "S = ds_fret.S[0]\nS_ml_fit = (S.mean(), S.std())\nS_ml_fit", "Computing the weighted mean and weighted standard deviation we get:", "weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)\nS_mean = np.dot(weights, S)/weights.sum()\nS_std_dev = np.sqrt(\n np.dot(weights, (S - S_mean)**2)/weights.sum())\nS_wmean_fit = [S_mean, S_std_dev]\nS_wmean_fit", "Save data to file", "sample = data_id", "The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.", "variables = ('sample n_bursts_all n_bursts_do n_bursts_fret '\n 'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '\n 'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '\n 'E_pr_do_kde E_pr_do_hsm E_pr_do_gauss nt_mean\\n')", "This is just a trick to format the different variables:", "variables_csv = variables.replace(' ', ',')\nfmt_float = '{%s:.6f}'\nfmt_int = '{%s:d}'\nfmt_str = '{%s}'\nfmt_dict = {**{'sample': fmt_str}, \n **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}\nvar_dict = {name: eval(name) for name in variables.split()}\nvar_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\\n'\ndata_str = var_fmt.format(**var_dict)\n\nprint(variables_csv)\nprint(data_str)\n\n# NOTE: The file name should be the notebook name but with .csv extension\nwith open('results/usALEX-5samples-PR-raw-%s.csv' % ph_sel_name, 'a') as f:\n f.seek(0, 2)\n if f.tell() == 0:\n f.write(variables_csv)\n f.write(data_str)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
dev/_downloads/b99fcf919e5d2f612fcfee22adcfc330/40_autogenerate_metadata.ipynb
bsd-3-clause
[ "%matplotlib inline", "Auto-generating Epochs metadata\nThis tutorial shows how to auto-generate metadata for ~mne.Epochs, based on\nevents via mne.epochs.make_metadata.\nWe are going to use data from the erp-core-dataset (derived from\n:footcite:Kappenman2021). This is EEG data from a single participant\nperforming an active visual task (Eriksen flanker task).\n<div class=\"alert alert-info\"><h4>Note</h4><p>If you wish to skip the introductory parts of this tutorial, you may jump\n straight to `tut-autogenerate-metadata-ern` after completing the data\n import and event creation in the\n `tut-autogenerate-metadata-preparation` section.</p></div>\n\nThis tutorial is loosely divided into two parts:\n\nWe will first focus on producing ERP time-locked to the visual\n stimulation, conditional on response correctness and response time in\n order to familiarize ourselves with the ~mne.epochs.make_metadata\n function.\nAfter that, we will calculate ERPs time-locked to the responses – again,\n conditional on response correctness – to visualize the error-related\n negativity (ERN), i.e. the ERP component associated with incorrect\n behavioral responses.\n\nPreparation\nLet's start by reading, filtering, and producing a simple visualization of the\nraw data. The data is pretty clean and contains very few blinks, so there's no\nneed to apply sophisticated preprocessing and data cleaning procedures.\nWe will also convert the ~mne.Annotations contained in this dataset to events\nby calling mne.events_from_annotations.", "from pathlib import Path\nimport matplotlib.pyplot as plt\nimport mne\n\n\ndata_dir = Path(mne.datasets.erp_core.data_path())\ninfile = data_dir / 'ERP-CORE_Subject-001_Task-Flankers_eeg.fif'\n\nraw = mne.io.read_raw(infile, preload=True)\nraw.filter(l_freq=0.1, h_freq=40)\nraw.plot(start=60)\n\n# extract events\nall_events, all_event_id = mne.events_from_annotations(raw)", "Creating metadata from events\nThe basics of make_metadata\nNow it's time to think about the time windows to use for epoching and\nmetadata generation. It is important to understand that these time windows\nneed not be the same! That is, the automatically generated metadata might\ninclude information about events from only a fraction of the epochs duration;\nor it might include events that occurred well outside a given epoch.\nLet us look at a concrete example. In the Flankers task of the ERP CORE\ndataset, participants were required to respond to visual stimuli by pressing\na button. We're interested in looking at the visual evoked responses (ERPs)\nof trials with correct responses. Assume that based on literature\nstudies, we decide that responses later than 1500 ms after stimulus onset are\nto be considered invalid, because they don't capture the neuronal processes\nof interest here. We can approach this in the following way with the help of\nmne.epochs.make_metadata:", "# metadata for each epoch shall include events from the range: [0.0, 1.5] s,\n# i.e. starting with stimulus onset and expanding beyond the end of the epoch\nmetadata_tmin, metadata_tmax = 0.0, 1.5\n\n# auto-create metadata\n# this also returns a new events array and an event_id dictionary. we'll see\n# later why this is important\nmetadata, events, event_id = mne.epochs.make_metadata(\n events=all_events, event_id=all_event_id,\n tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'])\n\n# let's look at what we got!\nmetadata", "Specifying time-locked events\nWe can see that the generated table has 802 rows, each one corresponding to\nan individual event in all_events. The first column, event_name,\ncontains the name of the respective event around which the metadata of that\nspecific column was generated – we'll call that the \"time-locked event\",\nbecause we'll assign it time point zero.\nThe names of the remaining columns correspond to the event names specified in\nthe all_event_id dictionary. These columns contain floats; the values\nrepresent the latency of that specific event in seconds, relative to\nthe time-locked event (the one mentioned in the event_name column).\nFor events that didn't occur within the given time window, you'll see\na value of NaN, simply indicating that no event latency could be\nextracted.\nNow, there's a problem here. We want investigate the visual ERPs only,\nconditional on responses. But the metadata that was just created contains\none row for every event, including responses. While we could create\nepochs for all events, allowing us to pass those metadata, and later subset\nthe created events, there's a more elegant way to handle things:\n~mne.epochs.make_metadata has a row_events parameter that\nallows us to specify for which events to create metadata rows, while\nstill creating columns for all events in the event_id dictionary.\nBecause the metadata, then, only pertains to a subset of our original events,\nit's important to keep the returned events and event_id around for\nlater use when we're actually going to create our epochs, to ensure that\nmetadata, events, and event descriptions stay in sync.", "row_events = ['stimulus/compatible/target_left',\n 'stimulus/compatible/target_right',\n 'stimulus/incompatible/target_left',\n 'stimulus/incompatible/target_right']\n\nmetadata, events, event_id = mne.epochs.make_metadata(\n events=all_events, event_id=all_event_id,\n tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],\n row_events=row_events)\n\nmetadata", "Keeping only the first events of a group\nThe metadata now contains 400 rows – one per stimulation – and the same\nnumber of columns as before. Great!\nWe have two types of responses in our data: response/left and\nresponse/right. We would like to map those to \"correct\" and \"incorrect\".\nTo make this easier, we can ask ~mne.epochs.make_metadata to generate an\nentirely new column that refers to the first response observed during the\ngiven time interval. This works by passing a subset of the\n:term:hierarchical event descriptors (HEDs, inspired by\n:footcite:BigdelyShamloEtAl2013) used to name events via the keep_first\nparameter. For example, in the case of the HEDs response/left and\nresponse/right, we could pass keep_first='response' to generate a new\ncolumn, response, containing the latency of the respective event. This\nvalue pertains only the first (or, in this specific example: the only)\nresponse, regardless of side (left or right). To indicate which event\ntype (here: response side) was matched, a second column is added:\nfirst_response. The values in this column are the event types without the\nstring used for matching, as it is already encoded as the column name, i.e.\nin our example, we expect it to only contain 'left' and 'right'.", "keep_first = 'response'\nmetadata, events, event_id = mne.epochs.make_metadata(\n events=all_events, event_id=all_event_id,\n tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],\n row_events=row_events,\n keep_first=keep_first)\n\n# visualize response times regardless of side\nmetadata['response'].plot.hist(bins=50, title='Response Times')\n\n# the \"first_response\" column contains only \"left\" and \"right\" entries, derived\n# from the initial event named \"response/left\" and \"response/right\"\nprint(metadata['first_response'])", "We're facing a similar issue with the stimulus events, and now there are not\nonly two, but four different types: stimulus/compatible/target_left,\nstimulus/compatible/target_right, stimulus/incompatible/target_left,\nand stimulus/incompatible/target_right. Even more, because in the present\nparadigm stimuli were presented in rapid succession, sometimes multiple\nstimulus events occurred within the 1.5 second time window we're using to\ngenerate our metadata. See for example:", "metadata.loc[metadata['stimulus/compatible/target_left'].notna() &\n metadata['stimulus/compatible/target_right'].notna(),\n :]", "This can easily lead to confusion during later stages of processing, so let's\ncreate a column for the first stimulus – which will always be the time-locked\nstimulus, as our time interval starts at 0 seconds. We can pass a list of\nstrings to keep_first.", "keep_first = ['stimulus', 'response']\nmetadata, events, event_id = mne.epochs.make_metadata(\n events=all_events, event_id=all_event_id,\n tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],\n row_events=row_events,\n keep_first=keep_first)\n\n# all times of the time-locked events should be zero\nassert all(metadata['stimulus'] == 0)\n\n# the values in the new \"first_stimulus\" and \"first_response\" columns indicate\n# which events were selected via \"keep_first\"\nmetadata[['first_stimulus', 'first_response']]", "Adding new columns to describe stimulation side and response correctness\nPerfect! Now it's time to define which responses were correct and incorrect.\nWe first add a column encoding the side of stimulation, and then simply\ncheck whether the response matches the stimulation side, and add this result\nto another column.", "# left-side stimulation\nmetadata.loc[metadata['first_stimulus'].isin(['compatible/target_left',\n 'incompatible/target_left']),\n 'stimulus_side'] = 'left'\n\n# right-side stimulation\nmetadata.loc[metadata['first_stimulus'].isin(['compatible/target_right',\n 'incompatible/target_right']),\n 'stimulus_side'] = 'right'\n\n# first assume all responses were incorrect, then mark those as correct where\n# the stimulation side matches the response side\nmetadata['response_correct'] = False\nmetadata.loc[metadata['stimulus_side'] == metadata['first_response'],\n 'response_correct'] = True\n\n\ncorrect_response_count = metadata['response_correct'].sum()\nprint(f'Correct responses: {correct_response_count}\\n'\n f'Incorrect responses: {len(metadata) - correct_response_count}')", "Creating Epochs with metadata, and visualizing ERPs\nIt's finally time to create our epochs! We set the metadata directly on\ninstantiation via the metadata parameter. Also it is important to\nremember to pass events and event_id as returned from\n~mne.epochs.make_metadata, as we only created metadata for a subset of\nour original events by passing row_events. Otherwise, the length\nof the metadata and the number of epochs would not match and MNE-Python\nwould raise an error.", "epochs_tmin, epochs_tmax = -0.1, 0.4 # epochs range: [-0.1, 0.4] s\nreject = {'eeg': 250e-6} # exclude epochs with strong artifacts\nepochs = mne.Epochs(raw=raw, tmin=epochs_tmin, tmax=epochs_tmax,\n events=events, event_id=event_id, metadata=metadata,\n reject=reject, preload=True)", "Lastly, let's visualize the ERPs evoked by the visual stimulation, once for\nall trials with correct responses, and once for all trials with correct\nresponses and a response time greater than 0.5 seconds\n(i.e., slow responses).", "vis_erp = epochs['response_correct'].average()\nvis_erp_slow = epochs['(not response_correct) & '\n '(response > 0.3)'].average()\n\nfig, ax = plt.subplots(2, figsize=(6, 6))\nvis_erp.plot(gfp=True, spatial_colors=True, axes=ax[0])\nvis_erp_slow.plot(gfp=True, spatial_colors=True, axes=ax[1])\nax[0].set_title('Visual ERPs – All Correct Responses')\nax[1].set_title('Visual ERPs – Slow Correct Responses')\nfig.tight_layout()\nfig", "Aside from the fact that the data for the (much fewer) slow responses looks\nnoisier – which is entirely to be expected – not much of an ERP difference\ncan be seen.\nApplying the knowledge: visualizing the ERN component\nIn the following analysis, we will use the same dataset as above, but\nwe'll time-lock our epochs to the response events, not to the stimulus\nonset. Comparing ERPs associated with correct and incorrect behavioral\nresponses, we should be able to see the error-related negativity (ERN) in\nthe difference wave.\nSince we want to time-lock our analysis to responses, for the automated\nmetadata generation we'll consider events occurring up to 1500 ms before\nthe response trigger.\nWe only wish to consider the last stimulus and response in each time\nwindow: Remember that we're dealing with rapid stimulus presentations in\nthis paradigm; taking the last response – at time point zero – and the last\nstimulus – the one closest to the response – ensures we actually create\nthe right stimulus-response pairings. We can achieve this by passing the\nkeep_last parameter, which works exactly like keep_first we got to\nknow above, only that it keeps the last occurrences of the specified\nevents and stores them in columns whose names start with last_.", "metadata_tmin, metadata_tmax = -1.5, 0\nrow_events = ['response/left', 'response/right']\nkeep_last = ['stimulus', 'response']\n\nmetadata, events, event_id = mne.epochs.make_metadata(\n events=all_events, event_id=all_event_id,\n tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],\n row_events=row_events,\n keep_last=keep_last)", "Exactly like in the previous example, create new columns stimulus_side\nand response_correct.", "# left-side stimulation\nmetadata.loc[metadata['last_stimulus'].isin(['compatible/target_left',\n 'incompatible/target_left']),\n 'stimulus_side'] = 'left'\n\n# right-side stimulation\nmetadata.loc[metadata['last_stimulus'].isin(['compatible/target_right',\n 'incompatible/target_right']),\n 'stimulus_side'] = 'right'\n\n# first assume all responses were incorrect, then mark those as correct where\n# the stimulation side matches the response side\nmetadata['response_correct'] = False\nmetadata.loc[metadata['stimulus_side'] == metadata['last_response'],\n 'response_correct'] = True\n\nmetadata", "Now it's already time to epoch the data! When deciding upon the epochs\nduration for this specific analysis, we need to ensure we see quite a bit of\nsignal from before and after the motor response. We also must be aware of\nthe fact that motor-/muscle-related signals will most likely be present\nbefore the response button trigger pulse appears in our data, so the time\nperiod close to the response event should not be used for baseline\ncorrection. But at the same time, we don't want to use a baseline\nperiod that extends too far away from the button event. The following values\nseem to work quite well.", "epochs_tmin, epochs_tmax = -0.6, 0.4\nbaseline = (-0.4, -0.2)\nreject = {'eeg': 250e-6}\nepochs = mne.Epochs(raw=raw, tmin=epochs_tmin, tmax=epochs_tmax,\n baseline=baseline, reject=reject,\n events=events, event_id=event_id, metadata=metadata,\n preload=True)", "Let's do a final sanity check: we want to make sure that in every row, we\nactually have a stimulus. We use epochs.metadata (and not metadata)\nbecause when creating the epochs, we passed the reject parameter, and\nMNE-Python always ensures that epochs.metadata stays in sync with the\navailable epochs.", "epochs.metadata.loc[epochs.metadata['last_stimulus'].isna(), :]", "Bummer! It seems the very first two responses were recorded before the\nfirst stimulus appeared: the values in the stimulus column are None.\nThere is a very simple way to select only those epochs that do have a\nstimulus (i.e., are not None):", "epochs = epochs['last_stimulus.notna()']", "Time to calculate the ERPs for correct and incorrect responses.\nFor visualization, we'll only look at sensor FCz, which is known to show\nthe ERN nicely in the given paradigm. We'll also create a topoplot to get an\nimpression of the average scalp potentials measured in the first 100 ms after\nan incorrect response.", "resp_erp_correct = epochs['response_correct'].average()\nresp_erp_incorrect = epochs['not response_correct'].average()\n\nmne.viz.plot_compare_evokeds({'Correct Response': resp_erp_correct,\n 'Incorrect Response': resp_erp_incorrect},\n picks='FCz', show_sensors=True,\n title='ERPs at FCz, time-locked to response')\n\n# topoplot of average field from time 0.0-0.1 s\nresp_erp_incorrect.plot_topomap(times=0.05, average=0.05, size=3,\n title='Avg. topography 0–100 ms after '\n 'incorrect responses')", "We can see a strong negative deflection immediately after incorrect\nresponses, compared to correct responses. The topoplot, too, leaves no doubt:\nwhat we're looking at is, in fact, the ERN.\nSome researchers suggest to construct the difference wave between ERPs for\ncorrect and incorrect responses, as it more clearly reveals signal\ndifferences, while ideally also improving the signal-to-noise ratio (under\nthe assumption that the noise level in \"correct\" and \"incorrect\" trials is\nsimilar). Let's do just that and put it into a publication-ready\nvisualization.", "# difference wave: incorrect minus correct responses\nresp_erp_diff = mne.combine_evoked([resp_erp_incorrect, resp_erp_correct],\n weights=[1, -1])\n\nfig, ax = plt.subplots()\nresp_erp_diff.plot(picks='FCz', axes=ax, selectable=False, show=False)\n\n# make ERP trace bolder\nax.lines[0].set_linewidth(1.5)\n\n# add lines through origin\nax.axhline(0, ls='dotted', lw=0.75, color='gray')\nax.axvline(0, ls=(0, (10, 10)), lw=0.75, color='gray',\n label='response trigger')\n\n# mark trough\ntrough_time_idx = resp_erp_diff.copy().pick('FCz').data.argmin()\ntrough_time = resp_erp_diff.times[trough_time_idx]\nax.axvline(trough_time, ls=(0, (10, 10)), lw=0.75, color='red',\n label='max. negativity')\n\n# legend, axis labels, title\nax.legend(loc='lower left')\nax.set_xlabel('Time (s)', fontweight='bold')\nax.set_ylabel('Amplitude (µV)', fontweight='bold')\nax.set_title('Channel: FCz')\nfig.suptitle('ERN (Difference Wave)', fontweight='bold')\n\nfig", "References\n.. footbibliography::" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]