repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
phoebe-project/phoebe2-docs
development/examples/sun.ipynb
gpl-3.0
#!pip install -I "phoebe>=2.4,<2.5" """ Explanation: Sun (single rotating star) Setup Let's first make sure we have the latest version of PHOEBE 2.4 installed (uncomment this line if running in an online notebook session such as colab). End of explanation """ import phoebe from phoebe import u # units import numpy as np import matplotlib.pyplot as plt logger = phoebe.logger() b = phoebe.default_star(starA='sun') """ Explanation: As always, let's do imports and initialize a logger and a new bundle. End of explanation """ print(b['sun']) """ Explanation: Setting Parameters End of explanation """ b.set_value('teff', 1.0*u.solTeff) b.set_value('requiv', 1.0*u.solRad) b.set_value('mass', 1.0*u.solMass) b.set_value('period', 24.47*u.d) """ Explanation: Let's set all the values of the sun based on the nominal solar values provided in the units package. End of explanation """ b.set_value('incl', 23.5*u.deg) b.set_value('distance', 1.0*u.AU) """ Explanation: And so that we can compare with measured/expected values, we'll observe the sun from the earth - with an inclination of 23.5 degrees and at a distance of 1 AU. End of explanation """ print(b.get_quantity('teff')) print(b.get_quantity('requiv')) print(b.get_quantity('mass')) print(b.get_quantity('period')) print(b.get_quantity('incl')) print(b.get_quantity('distance')) """ Explanation: Checking on the set values, we can see the values were converted correctly to PHOEBE's internal units. End of explanation """ b.add_dataset('lc', times=[0.], pblum=1*u.solLum) b.add_dataset('mesh', compute_times=[0.], columns=['teffs', 'loggs', 'rs']) b.run_compute(irrad_method='none', distortion_method='rotstar') """ Explanation: Running Compute Let's add a light curve so that we can compute the flux at a single time and compare it to the expected value. We'll set the passband luminosity to be the nominal value for the sun. We'll also add a mesh dataset so that we can plot the temperature distributions and test the size of the sun verse known values. End of explanation """ afig, mplfig = b['mesh'].plot(fc='teffs', x='xs', y='ys', show=True) afig, mplfig = b['mesh'].plot(fc='teffs', x='us', y='vs', show=True) print("teff: {} ({})".format(b.get_value('teffs').mean(), b.get_value('teff', context='component'))) """ Explanation: Comparing to Expected Values End of explanation """ print("rmin (pole): {} ({})".format(b.get_value('rs').min(), b.get_value('requiv', context='component'))) print("rmax (equator): {} (>{})".format(b.get_value('rs').max(), b.get_value('requiv', context='component'))) print("logg: {}".format(b.get_value('loggs').mean())) print("flux: {}".format(b.get_quantity('fluxes@model')[0])) """ Explanation: For a rotating sphere, the minimum radius should occur at the pole and the maximum should occur at the equator. End of explanation """
tensorflow/model-remediation
docs/min_diff/guide/integrating_min_diff_without_min_diff_model.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ !pip install --upgrade tensorflow-model-remediation import tensorflow as tf tf.get_logger().setLevel('ERROR') # Avoid TF warnings. from tensorflow_model_remediation import min_diff from tensorflow_model_remediation.tools.tutorials_utils import uci as tutorials_utils """ Explanation: Integrating MinDiff without MinDiffModel <div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/responsible_ai/model_remediation/min_diff/guide/integrating_min_diff_without_min_diff_model"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/model-remediation/blob/master/docs/min_diff/guide/integrating_min_diff_without_min_diff_model.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/model-remediation/blob/master/docs/min_diff/guide/integrating_min_diff_without_min_diff_model.ipynb"> <img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a> </td> <td> <a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/model-remediation/docs/min_diff/guide/integrating_min_diff_without_min_diff_model.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table></div> Introduction It is possible to integrate MinDiff directly into your model's implementation. While doing so does not have the convenience of using MinDiffModel, this option offers the highest level of control which can be particularly useful when your model is a subclass of tf.keras.Model. This guide demonstrates how you can integrate MinDiff directly into a custom model's implementation by adding to the train_step method. Setup End of explanation """ # Original Dataset for training, sampled at 0.3 for reduced runtimes. train_df = tutorials_utils.get_uci_data(split='train', sample=0.3) train_ds = tutorials_utils.df_to_dataset(train_df, batch_size=128) # Dataset needed to train with MinDiff. train_with_min_diff_ds = ( tutorials_utils.get_uci_with_min_diff_dataset(split='train', sample=0.3)) """ Explanation: First, download the data. For succinctness, the input preparation logic has been factored out into helper functions as described in the input preparation guide. You can read the full guide for details on this process. End of explanation """ class CustomModel(tf.keras.Model): def train_step(self, data): # Unpack the data. x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass. loss = self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) # Compute the loss value. loss = self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) # Compute gradients and update weights. self.optimizer.minimize(loss, self.trainable_variables, tape=tape) # Update and return metrics. self.compiled_metrics.update_state(y, y_pred, sample_weight) return {m.name: m.result() for m in self.metrics} """ Explanation: Original Custom Model Customizations tf.keras.Model is designed to be easily customized via subclassing. This usually involves changing what happens in the call to fit as described here. This guide uses a custom implementation where the train_step closely resembles the default tf.keras.Model.train_step. Normally, there would be no benefit to doing so, but here, it will help demonstrate how to integrate MinDiff. End of explanation """ model = tutorials_utils.get_uci_model(model_class=CustomModel) # Use CustomModel. model.compile(optimizer='adam', loss='binary_crossentropy') _ = model.fit(train_ds, epochs=1) """ Explanation: Train the model as you would a typical Model using the Functional API. End of explanation """ min_diff_loss_fn = min_diff.losses.MMDLoss() # Hard coded for convenience. min_diff_weight = 2 # Arbitrary number for example, hard coded for convenience. apply_min_diff = True # Flag to help show where the additional lines are. class CustomModelWithMinDiff(tf.keras.Model): def train_step(self, data): # Unpack the data. x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) # Unpack the MinDiff data. if apply_min_diff: min_diff_data = min_diff.keras.utils.unpack_min_diff_data(x) min_diff_x, membership, min_diff_sample_weight = ( tf.keras.utils.unpack_x_y_sample_weight(min_diff_data)) x = min_diff.keras.utils.unpack_original_inputs(x) with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass. loss = self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) # Compute the loss value. loss = self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) # Calculate and add the min_diff_loss. This must be done within the scope # of tf.GradientTape(). if apply_min_diff: min_diff_predictions = self(min_diff_x, training=True) min_diff_loss = min_diff_weight * min_diff_loss_fn( min_diff_predictions, membership, min_diff_sample_weight) loss += min_diff_loss # Compute gradients and update weights. self.optimizer.minimize(loss, self.trainable_variables, tape=tape) # Update and return metrics. self.compiled_metrics.update_state(y, y_pred, sample_weight) return {m.name: m.result() for m in self.metrics} """ Explanation: Integrating MinDiff directly into your model Adding MinDiff to the train_step To integrate MinDiff, you will need to add some lines to the CustomModel which is renamed here as CustomModelWithMinDiff. Note: For convenience the MinDiff loss and weight are hard coded. In practice, you will likely want to pass them into the __init__ method and access them as instance attributes. For clarity, this guide uses a boolean flag called apply_min_diff. All of the code relevant to MinDiff will only be run if it is set to True. If set to False then the model would behave exactly the same as CustomModel. End of explanation """ model = tutorials_utils.get_uci_model(model_class=CustomModelWithMinDiff) model.compile(optimizer='adam', loss='binary_crossentropy') _ = model.fit(train_with_min_diff_ds, epochs=1) """ Explanation: Training with this model looks exactly the same as with the previous with the exception of the dataset used. End of explanation """ for x, y in train_with_min_diff_ds.take(1): print('Type of x:', type(x)) # MinDiffPackedInputs print('Type of y:', type(y)) # Tensor (original labels) """ Explanation: Reshaping your input (optional) Given that this approach provides full control, you can take this opportunity to reshape the input into a slightly cleaner form. When using MinDiffModel, the min_diff_data needs to be packed into the first component of every batch. This is the case with the train_with_min_diff_ds dataset. End of explanation """ def _reformat_input(inputs, original_labels): min_diff_data = min_diff.keras.utils.unpack_min_diff_data(inputs) original_inputs = min_diff.keras.utils.unpack_original_inputs(inputs) original_data = (original_inputs, original_labels) return { 'min_diff_data': min_diff_data, 'original_data': original_data} customized_train_with_min_diff_ds = train_with_min_diff_ds.map(_reformat_input) """ Explanation: With this requirement lifted, you can reorganize the data in a slightly more intuitive structure with the original and MinDiff data cleanly separated. End of explanation """
mne-tools/mne-tools.github.io
dev/_downloads/141ddce18e923e8220337b357ba3dc45/ssd_spatial_filters.ipynb
bsd-3-clause
# Author: Denis A. Engemann <denis.engemann@gmail.com> # Victoria Peterson <victoriapeterson09@gmail.com> # License: BSD-3-Clause import matplotlib.pyplot as plt import mne from mne import Epochs from mne.datasets.fieldtrip_cmc import data_path from mne.decoding import SSD """ Explanation: Compute Spectro-Spatial Decomposition (SSD) spatial filters In this example, we will compute spatial filters for retaining oscillatory brain activity and down-weighting 1/f background signals as proposed by :footcite:NikulinEtAl2011. The idea is to learn spatial filters that separate oscillatory dynamics from surrounding non-oscillatory noise based on the covariance in the frequency band of interest and the noise covariance based on surrounding frequencies. End of explanation """ fname = data_path() / 'SubjectCMC.ds' # Prepare data raw = mne.io.read_raw_ctf(fname) raw.crop(50., 110.).load_data() # crop for memory purposes raw.resample(sfreq=250) raw.pick_types(meg=True, eeg=False, ref_meg=False) freqs_sig = 9, 12 freqs_noise = 8, 13 ssd = SSD(info=raw.info, reg='oas', sort_by_spectral_ratio=False, # False for purpose of example. filt_params_signal=dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], l_trans_bandwidth=1, h_trans_bandwidth=1), filt_params_noise=dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], l_trans_bandwidth=1, h_trans_bandwidth=1)) ssd.fit(X=raw.get_data()) """ Explanation: Define parameters End of explanation """ pattern = mne.EvokedArray(data=ssd.patterns_[:4].T, info=ssd.info) pattern.plot_topomap(units=dict(mag='A.U.'), time_format='') # The topographies suggest that we picked up a parietal alpha generator. # Transform ssd_sources = ssd.transform(X=raw.get_data()) # Get psd of SSD-filtered signals. psd, freqs = mne.time_frequency.psd_array_welch( ssd_sources, sfreq=raw.info['sfreq'], n_fft=4096) # Get spec_ratio information (already sorted). # Note that this is not necessary if sort_by_spectral_ratio=True (default). spec_ratio, sorter = ssd.get_spectral_ratio(ssd_sources) # Plot spectral ratio (see Eq. 24 in Nikulin 2011). fig, ax = plt.subplots(1) ax.plot(spec_ratio, color='black') ax.plot(spec_ratio[sorter], color='orange', label='sorted eigenvalues') ax.set_xlabel("Eigenvalue Index") ax.set_ylabel(r"Spectral Ratio $\frac{P_f}{P_{sf}}$") ax.legend() ax.axhline(1, linestyle='--') # We can see that the initial sorting based on the eigenvalues # was already quite good. However, when using few components only # the sorting might make a difference. """ Explanation: Let's investigate spatial filter with max power ratio. We will first inspect the topographies. According to Nikulin et al. 2011 this is done by either inverting the filters (W^{-1}) or by multiplying the noise cov with the filters Eq. (22) (C_n W)^t. We rely on the inversion approach here. End of explanation """ below50 = freqs < 50 # for highlighting the freq. band of interest bandfilt = (freqs_sig[0] <= freqs) & (freqs <= freqs_sig[1]) fig, ax = plt.subplots(1) ax.loglog(freqs[below50], psd[0, below50], label='max SNR') ax.loglog(freqs[below50], psd[-1, below50], label='min SNR') ax.loglog(freqs[below50], psd[:, below50].mean(axis=0), label='mean') ax.fill_between(freqs[bandfilt], 0, 10000, color='green', alpha=0.15) ax.set_xlabel('log(frequency)') ax.set_ylabel('log(power)') ax.legend() # We can clearly see that the selected component enjoys an SNR that is # way above the average power spectrum. """ Explanation: Let's also look at the power spectrum of that source and compare it to to the power spectrum of the source with lowest SNR. End of explanation """ # Build epochs as sliding windows over the continuous raw file. events = mne.make_fixed_length_events(raw, id=1, duration=5.0, overlap=0.0) # Epoch length is 5 seconds. epochs = Epochs(raw, events, tmin=0., tmax=5, baseline=None, preload=True) ssd_epochs = SSD(info=epochs.info, reg='oas', filt_params_signal=dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], l_trans_bandwidth=1, h_trans_bandwidth=1), filt_params_noise=dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], l_trans_bandwidth=1, h_trans_bandwidth=1)) ssd_epochs.fit(X=epochs.get_data()) # Plot topographies. pattern_epochs = mne.EvokedArray(data=ssd_epochs.patterns_[:4].T, info=ssd_epochs.info) pattern_epochs.plot_topomap(units=dict(mag='A.U.'), time_format='') """ Explanation: Epoched data Although we suggest to use this method before epoching, there might be some situations in which data can only be treated by chunks. End of explanation """
tensorflow/ranking
docs/tutorials/quickstart.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2021 The TensorFlow Authors. End of explanation """ !pip install -q tensorflow-ranking !pip install -q --upgrade tensorflow-datasets from typing import Dict, Tuple import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_ranking as tfr """ Explanation: Recommend movies for users with TensorFlow Ranking <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/ranking/tutorials/quickstart"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/ranking/blob/master/docs/tutorials/quickstart.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/ranking/blob/master/docs/tutorials/quickstart.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/ranking/docs/tutorials/quickstart.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In this tutorial, we build a simple two tower ranking model using the MovieLens 100K dataset with TF-Ranking. We can use this model to rank and recommend movies for a given user according to their predicted user ratings. Setup Install and import the TF-Ranking library: End of explanation """ %%capture --no-display # Ratings data. ratings = tfds.load('movielens/100k-ratings', split="train") # Features of all the available movies. movies = tfds.load('movielens/100k-movies', split="train") # Select the basic features. ratings = ratings.map(lambda x: { "movie_title": x["movie_title"], "user_id": x["user_id"], "user_rating": x["user_rating"] }) """ Explanation: Read the data Prepare to train a model by creating a ratings dataset and movies dataset. Use user_id as the query input feature, movie_title as the document input feature, and user_rating as the label to train the ranking model. End of explanation """ movies = movies.map(lambda x: x["movie_title"]) users = ratings.map(lambda x: x["user_id"]) user_ids_vocabulary = tf.keras.layers.experimental.preprocessing.StringLookup( mask_token=None) user_ids_vocabulary.adapt(users.batch(1000)) movie_titles_vocabulary = tf.keras.layers.experimental.preprocessing.StringLookup( mask_token=None) movie_titles_vocabulary.adapt(movies.batch(1000)) """ Explanation: Build vocabularies to convert all user ids and all movie titles into integer indices for embedding layers: End of explanation """ key_func = lambda x: user_ids_vocabulary(x["user_id"]) reduce_func = lambda key, dataset: dataset.batch(100) ds_train = ratings.group_by_window( key_func=key_func, reduce_func=reduce_func, window_size=100) for x in ds_train.take(1): for key, value in x.items(): print(f"Shape of {key}: {value.shape}") print(f"Example values of {key}: {value[:5].numpy()}") print() """ Explanation: Group by user_id to form lists for ranking models: End of explanation """ def _features_and_labels( x: Dict[str, tf.Tensor]) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]: labels = x.pop("user_rating") return x, labels ds_train = ds_train.map(_features_and_labels) ds_train = ds_train.apply( tf.data.experimental.dense_to_ragged_batch(batch_size=32)) """ Explanation: Generate batched features and labels: End of explanation """ for x, label in ds_train.take(1): for key, value in x.items(): print(f"Shape of {key}: {value.shape}") print(f"Example values of {key}: {value[:3, :3].numpy()}") print() print(f"Shape of label: {label.shape}") print(f"Example values of label: {label[:3, :3].numpy()}") """ Explanation: The user_id and movie_title tensors generated in ds_train are of shape [32, None], where the second dimension is 100 in most cases except for the batches when less than 100 items grouped in lists. A model working on ragged tensors is thus used. End of explanation """ class MovieLensRankingModel(tf.keras.Model): def __init__(self, user_vocab, movie_vocab): super().__init__() # Set up user and movie vocabulary and embedding. self.user_vocab = user_vocab self.movie_vocab = movie_vocab self.user_embed = tf.keras.layers.Embedding(user_vocab.vocabulary_size(), 64) self.movie_embed = tf.keras.layers.Embedding(movie_vocab.vocabulary_size(), 64) def call(self, features: Dict[str, tf.Tensor]) -> tf.Tensor: # Define how the ranking scores are computed: # Take the dot-product of the user embeddings with the movie embeddings. user_embeddings = self.user_embed(self.user_vocab(features["user_id"])) movie_embeddings = self.movie_embed( self.movie_vocab(features["movie_title"])) return tf.reduce_sum(user_embeddings * movie_embeddings, axis=2) """ Explanation: Define a model Define a ranking model by inheriting from tf.keras.Model and implementing the call method: End of explanation """ # Create the ranking model, trained with a ranking loss and evaluated with # ranking metrics. model = MovieLensRankingModel(user_ids_vocabulary, movie_titles_vocabulary) optimizer = tf.keras.optimizers.Adagrad(0.5) loss = tfr.keras.losses.get( loss=tfr.keras.losses.RankingLossKey.SOFTMAX_LOSS, ragged=True) eval_metrics = [ tfr.keras.metrics.get(key="ndcg", name="metric/ndcg", ragged=True), tfr.keras.metrics.get(key="mrr", name="metric/mrr", ragged=True) ] model.compile(optimizer=optimizer, loss=loss, metrics=eval_metrics) """ Explanation: Create the model, and then compile it with ranking tfr.keras.losses and tfr.keras.metrics, which are the core of the TF-Ranking package. This example uses a ranking-specific softmax loss, which is a listwise loss introduced to promote all relevant items in the ranking list with better chances on top of the irrelevant ones. In contrast to the softmax loss in the multi-class classification problem, where only one class is positive and the rest are negative, the TF-Ranking library supports multiple relevant documents in a query list and non-binary relevance labels. For ranking metrics, this example uses in specific Normalized Discounted Cumulative Gain (NDCG) and Mean Reciprocal Rank (MRR), which calculate the user utility of a ranked query list with position discounts. For more details about ranking metrics, review evaluation measures offline metrics. End of explanation """ model.fit(ds_train, epochs=3) """ Explanation: Train and evaluate the model Train the model with model.fit. End of explanation """ # Get movie title candidate list. for movie_titles in movies.batch(2000): break # Generate the input for user 42. inputs = { "user_id": tf.expand_dims(tf.repeat("42", repeats=movie_titles.shape[0]), axis=0), "movie_title": tf.expand_dims(movie_titles, axis=0) } # Get movie recommendations for user 42. scores = model(inputs) titles = tfr.utils.sort_by_scores(scores, [tf.expand_dims(movie_titles, axis=0)])[0] print(f"Top 5 recommendations for user 42: {titles[0, :5]}") """ Explanation: Generate predictions and evaluate. End of explanation """
landlab/landlab
notebooks/tutorials/flow_direction_and_accumulation/the_FlowAccumulator.ipynb
mit
%matplotlib inline # import plotting tools from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter import matplotlib as mpl # import numpy import numpy as np # import necessary landlab components from landlab import RasterModelGrid, HexModelGrid from landlab.components import FlowAccumulator from landlab.components import ( FlowDirectorD8, FlowDirectorDINF, FlowDirectorMFD, FlowDirectorSteepest, ) from landlab.components import DepressionFinderAndRouter # import landlab plotting functionality from landlab.plot.drainage_plot import drainage_plot # create a plotting routine to make a 3d plot of our surface. def surf_plot(mg, surface="topographic__elevation", title="Surface plot of topography"): fig = plt.figure() ax = fig.gca(projection="3d") # Plot the surface. Z = mg.at_node[surface].reshape(mg.shape) color = cm.gray((Z - Z.min()) / (Z.max() - Z.min())) surf = ax.plot_surface( mg.x_of_node.reshape(mg.shape), mg.y_of_node.reshape(mg.shape), Z, rstride=1, cstride=1, facecolors=color, linewidth=0.0, antialiased=False, ) ax.view_init(elev=35, azim=-120) ax.set_xlabel("X axis") ax.set_ylabel("Y axis") ax.set_zlabel("Elevation") plt.title(title) plt.show() """ Explanation: <a href="http://landlab.github.io"><img style="float: left" src="../../landlab_header.png"></a> Introduction to the FlowAccumulator Landlab directs flow and accumulates it using two types of components: FlowDirectors use the topography to determine how flow moves between adjacent nodes. For every node in the grid it determines the nodes to receive flow and the proportion of flow to send from one node to its receiver. The FlowAccumulator uses the direction and proportion of flow moving between each node and (optionally) water runoff to calculate drainage area and discharge. In this tutorial we will go over how to initialize and run the FlowAccumulator. For tutorials on how to initialize and run a FlowDirector and a brief comparison between the different flow direction algorithms or for more detailed examples that contrast the differences between each flow direction algorithm, refer to the other tutorials in this section. First, we import the necessary python modules and make a small plotting routine. End of explanation """ mg = RasterModelGrid((10, 10)) _ = mg.add_field( "topographic__elevation", 3.0 * mg.x_of_node ** 2 + mg.y_of_node ** 2, at="node" ) surf_plot(mg, title="Grid 1") """ Explanation: Topographic grids For this tutorial we will consider one topographic surface. Here it is plotted in three dimensions. End of explanation """ fa = FlowAccumulator(mg) # this is the same as writing: fa = FlowAccumulator( mg, surface="topographic__elevation", flow_director="FlowDirectorSteepest", runoff_rate=None, depression_finder=None, ) """ Explanation: Initalizing and running the FlowAccumulator To instantiate the FlowAccumulator, you must pass it the minimum of a model grid that has a field called 'topographic__elevation'. Alternatively, you can pass it the name of another field name at node, or an array with length number of nodes. This is the surface over which flow is first directed and then accumulated. FlowAccumulator will create and use a FlowDirector to calculate flow directions. The default FlowDirector is FlowDirectorSteepest, which is the same as D4 in the special case of a raster grid. There are a few different ways to specify which FlowDirector you want FlowAccumulator to use. The next section will go over these options. FlowAccumulator can take a constant or spatially variable input called runoff_rate, which it uses to calculate discharge. Alternatively, if there is an at_node field called water__unit_flux_in and no value is specified as the runoff_rate, FlowAccumulator will use the values stored in water__unit_flux_in. In addition to directing flow and accumulating it in one step, FlowAccumulator can also deal with depression finding internally. This can be done by passing a DepressionFinder to the keyword argument depression_finder. The default behavior is to not deal with depressions internally. Finally, if the FlowDirector you are using takes any keyword arguments, those can be passed to the FlowAccumulator. For example, FlowDirectorMFD has to option to use diagonals in addition to links and to proportion flow based on either the slope or the the square root of slope. End of explanation """ fa.run_one_step() (da, q) = fa.accumulate_flow() """ Explanation: The FlowAccumulator has two public methods: run_one_step() and accumulate_flow(). Both use the values of the surface provided to identify flow directions (and in the case of directing to more than one receiver, proportions) and then calculate discharge and drainage area. Both store the same information about receivers, proportions, and other calculated values to the model grid as fields. The difference is that run_one_step() does not return any values, while accumulate_flow() returns the drainage area and discharge as variables. End of explanation """ plt.figure() drainage_plot(mg) """ Explanation: We can illustrate the receiver node FlowDirectionSteepest has assigned to each donor node using a plotting function in Landlab called drainage_plot. We will see many of these plots in this tutorial so let's take a moment to walk through the plot and what it contains. The background image (white to black) shows the values of topographic elevation of the underlying surface or any other at_node field we choose to plot. The colors of the dots inside of each pixel show the locations of the nodes and the type of node. The arrows show the direction of flow, and the color shows the proportion of flow that travels along that link. An X on top of a node indicates that node is a local sink and flows to itself. Note that in Landlab Boundary Nodes, or nodes that are on the edge of a grid, do not have area and do not contribute flow to nodes. These nodes can either be Fixed Gradient Nodes, Fixed Value Nodes, or Closed Nodes. With the exception of Closed Nodes the boundary nodes can receive flow. An important step in all flow direction and accumulation is setting the proper boundary condition. Refer to the boundary condition tutorials for more information. End of explanation """ plt.figure() drainage_plot(mg, "drainage_area") """ Explanation: In this drainage plot, we can see that all of the flow is routed down the steepest link. A plot of the drainage area would illustrate how the flow would move. Next let's make a similar plot except that instead of plotting the topographic elevation as the background, we will plot the drainage area. End of explanation """ print(mg.at_node["drainage_area"].reshape(mg.shape)) """ Explanation: If we print out the drainage area, we can see that its maximum reaches 64, which is the total area of the interior of the grid. End of explanation """ print(mg.number_of_core_nodes) """ Explanation: This is the same number as the number of core nodes. This makes sense becaue these are the only nodes in Landlab that have area, and in our model grid they each have an area of one. End of explanation """ rain = 1.0 + 5.0 * np.random.rand(mg.number_of_nodes) plt.imshow(rain.reshape(mg.shape), origin="lower", cmap="PuBu", vmin=0) plt.colorbar() plt.show() _ = mg.add_field("water__unit_flux_in", rain, at="node", clobber=True) """ Explanation: We can rain on the surface, store that rain in the field water__unit_flux_in, and then re-run the FlowAccumulator. As an example, we will 'rain' a uniformly distributed random number between 0 and 1 on every node. Since we already ran the FlowAccumulator, under the hood our grid already has a field called water__unit_flux_in and we need to set the clobber keyword to True. End of explanation """ fa.run_one_step() plt.figure() drainage_plot(mg, "surface_water__discharge", title="Discharge") """ Explanation: Next, we re-run the FlowAccumulator and plot the discharge. End of explanation """ # option 1: Full name of FlowDirector fa = FlowAccumulator( mg, surface="topographic__elevation", flow_director="FlowDirectorSteepest" ) # option 2: Short name of FlowDirector fa = FlowAccumulator(mg, surface="topographic__elevation", flow_director="Steepest") # option 3: Uninstantiated FlowDirector Component fa = FlowAccumulator( mg, surface="topographic__elevation", flow_director=FlowDirectorSteepest ) # option 4: Instantiated FlowDirector Component fd = FlowDirectorSteepest(mg) fa = FlowAccumulator(mg, surface="topographic__elevation", flow_director=fd) """ Explanation: The basic pattern of drainage is the same but the values for the surface water discharge are different than for drainage area. Alternative ways to specify the FlowDirector FlowAccumulator allows the FlowDirector to be specified one of four ways: 1. As a string of the full name of the FlowDirector (e.g., 'FlowDirectorSteepest' or 'FlowDirectorD8' ) 2. As a string of the short name of the FlowDirector method (e.g., 'Steepest' or 'D8') 3. As the class name for the desired FlowDirector component. 4. As an instantiated version of a FlowDirector component. Thus, the following four ways to instantiate a FlowAccumulator are equivalent. End of explanation """ # option 1: Full name of FlowDirector fa = FlowAccumulator( mg, surface="topographic__elevation", flow_director="FlowDirectorD8", depression_finder="DepressionFinderAndRouter", ) # option 2: Uninstantiated FlowDirector Component fa = FlowAccumulator( mg, surface="topographic__elevation", flow_director=FlowDirectorD8, depression_finder="DepressionFinderAndRouter", ) # option 3: Instantiated FlowDirector Component fd = FlowDirectorD8(mg) df = DepressionFinderAndRouter(mg) fa = FlowAccumulator( mg, surface="topographic__elevation", flow_director=fd, depression_finder=df ) """ Explanation: Providing a DepressionFinder Just as with providing the FlowDirector, the DepressionFinder can be provided multiple ways. While there are presently four different FlowDirectors in Landlab, there is only one DepressionFinder. As a string of the full name of the DepressionFinder (e.g., 'DepressionFinderAndRouter') As the class name of the DepressionFinder component. As an instantiated version of a DepressionFinder component. NOTE: The current Landlab depression finder only works with FlowDirectorSteepest and FlowDirectorD8 no matter how the depression finder is run. This is because the depression finder presently only works with route-to-one methods. Thus, the following three ways to instantiated a DepressionFinder are equivalent. End of explanation """ df = DepressionFinderAndRouter(mg) fa = FlowAccumulator( mg, surface="topographic__elevation", flow_director="D8", depression_finder=df ) """ Explanation: Methods for specifying can be mixed, such that the following is permissible. End of explanation """ hmg = HexModelGrid((9, 5)) _ = hmg.add_field("topographic__elevation", hmg.x_of_node + hmg.y_of_node, at="node") fa = FlowAccumulator(hmg, flow_director="MFD") fa.run_one_step() plt.figure() drainage_plot(hmg) plt.figure() drainage_plot(hmg, "drainage_area") """ Explanation: Using the DepressionFinder with FlowAccumulator To conclude this tutorial, we examine an example of a Hexagonal Model grid with a depression. End of explanation """ hmg_hole = HexModelGrid((9, 5)) z = hmg_hole.add_field( "topographic__elevation", hmg_hole.x_of_node + np.round(hmg_hole.y_of_node), at="node", ) hole_nodes = [21, 22, 23, 30, 31, 39, 40] z[hole_nodes] = z[hole_nodes] * 0.1 fa = FlowAccumulator(hmg_hole, flow_director="Steepest") fa.run_one_step() plt.figure() drainage_plot(hmg_hole) plt.figure() drainage_plot(hmg_hole, "drainage_area") """ Explanation: We will put a depression in the middle of the topography, and then see what the drainage plot looks like. End of explanation """ # OPTION 1 fa = FlowAccumulator(hmg_hole, flow_director="Steepest") fa.run_one_step() df = DepressionFinderAndRouter(hmg_hole) df.map_depressions() # OPTION 2 fa = FlowAccumulator( hmg_hole, flow_director="Steepest", depression_finder="DepressionFinderAndRouter" ) fa.run_one_step() plt.figure() drainage_plot(hmg_hole, "drainage_area") """ Explanation: As you can see, the flow gets stuck in the hole. We'd like the flow in the hole to move out and to the boundary. To route the flow out of the hole, we have two options. 1. Run the FlowAccumulator and then the DepressionFinder 2. Run them together in FlowAccumulator. The options look like the following and they are equivalent. End of explanation """
quanhua92/learning-notes
libs/scipy/scipy-lectures/13_Math_Optimization.ipynb
apache-2.0
from scipy import optimize import numpy as np def f(x): return -np.exp(-(x - 0.7) ** 2) optimize.minimize_scalar(f) """ Explanation: Chapter 13: Mathematical optimization: finding minima of functions 13.1 Knowing your problem Dimensionality of the problem: The scale of an optimization problem is pretty much set by the dimensionality of the problem, i.e. the number of scalar variables on which the search is performed Optimizing convex functions is easy. Optimizing non-convex functions can be very hard Optimizing smooth functions is easier Noisy gradients: Many optimization methods rely on gradients of the objective function. If the gradient function is not given, they are computed numerically, which induces errors. In such situation, even if the object function is not noisy, a gradient-based optimization may be a noisy optimization. Optimizations under constraints. For example, $-1 < x_1 < 1$ 13.2 A review of the different optimizers 13.2.1 1D Optimization Let's find the minimum of the scalar function $f(x) = exp[(x - 0.7)^2]$ End of explanation """ def f(x): # The rosenbrock function return .5 * (1 - x[0]) ** 2 + (x[1] - x[0]**2)**2 optimize.minimize(f, [2, -1], method="CG") """ Explanation: 13.2.2 Gradient based methods Gradient descent basically consists in taking small steps in the direction of the gradient, that is the direction of the steepest descent. Conjugate gradient descent End of explanation """ def jacobian(x): return np.array((-2*.5*(1-x[0]) - 4*x[0]*(x[1] - x[0]**2), 2*(x[1] - x[0]**2))) optimize.minimize(f, [2, -1], method="CG", jac=jacobian) """ Explanation: Gradient methods need the Jacobian of the function. They can compute it numerically, but will perform better if you can pass them the gradient End of explanation """ def f(x): # The rosenbrock function return .5 * (1 - x[0]) ** 2 + (x[1] - x[0]**2)**2 def jacobian(x): return np.array((-2*.5*(1-x[0]) - 4*x[0]*(x[1] - x[0]**2), 2*(x[1] - x[0]**2))) optimize.minimize(f, [2, -1], method="Newton-CG", jac=jacobian) """ Explanation: Note that the function has only been evaluated 27 times, compared to 108 without the gradient 13.2.3 Newton and quasi-newton methods End of explanation """ def hessian(x): return np.array(((1 - 4*x[1] + 12*x[0]**2, -4*x[0]), (-4*x[0],2))) optimize.minimize(f, [2, -1], method="Newton-CG", jac=jacobian, hess=hessian) optimize.brute """ Explanation: Need lest function evaluations but more gradient evaluations, as it uses it to approximate the Hessian. Let's compute the Hessian and pass it to the algo End of explanation """ def f(x): return np.arctan(x) - np.arctan(np.linspace(0, 1, len(x))) x0 = np.zeros(10) optimize.leastsq(f, x0) def g(x): return np.sum(f(x) ** 2) optimize.minimize(g, x0, method="BFGS") f(x0) """ Explanation: 13.6.1 Minimizing the norm of a vector function End of explanation """
mroberge/hydrofunctions
docs/notebooks/USGS_Statistics_Service.ipynb
mit
import hydrofunctions as hf print(hf.__version__) """ Explanation: Requesting Statistics from the USGS Statistics Service The USGS calculates various types of statistics for its data and provides these values through a web service. You can access this service through the stats function. Learn more about the USGS Statistics Service. There are three types of report that you can request using the StatReportType parameter. 'annual': This summarizes all of the official daily data for each year using max, min, mean, and the 5, 10, 20, 25, 50, 75, 80, 90, and 95th percentiles. 'monthly': This calculates the mean of the 28 to 31 daily values that occur for each of the months in each of the years of record. 'daily': This summarizes all of the data for this month and day, using max, min, mean, and the 5, 10, 20, 25, 50, 75, 80, 90, and 95th percentiles. Request multiple sites You can request multiple sites by separating them with commas, like this: '01541200,01542500' Providing additional arguments The USGS Statistics Service allows you to specify a wide array of additional parameters in your request. You can provide these parameters as keyword arguments, like in this example: hf.stats('01452500', parameterCD='00060') This will only request statistics for discharge, which is specified with the '00060' parameter code. Limiting requests to only certain parameters The default behavior for the USGS Statistics Service is to provide statistics for every parameter that is collected at a site. This can make for a long table that you will have to filter by the parameter that you want, like this: my_stat_dataframe.loc(my_stat_dataframe['parameter_cd']='00060') Alternatively, you can just request the parameter that you are interested in, rather than all of the parameters. To limit your request, provide the parameterCD keyword argument, like this: hf.stats('01452500', parameterCD='00060') You can request more than one parameter by listing every parameter code that you are interested in, separated by a comma: parameterCD='00060,00065' Calculating annual statistics using water years The default behavior for the USGS Statistics Service is to calculate annual statistics using calendar years. Unfortunately, for many places in the US, this will split the wet season in half. Since discharge data tends to be autocorrelated, you are more likely to get a large flood in January 2020 if you had a large flood in December 2019. To fix this, hydrologists often use 'Water Years', which split the year during the more or less dry season, on October 1st. To calculate annual statistics using the water year, provide the statYearType='water' argument, like this: hf.stats('01452500','annual', statYearType='water') Missing data The default behavior for the USGS Statistics Service is to not calculate statistics for months or years if there are -ANY- missing values. In other words, in an annual report, every year reported will be based on 365 or 366 (leap year) values. You can override this behavior by providing the missingData='on' parameter. This will calculate the statistics as long as there are at least one measurement. You can decide whether or not to use the statistic by looking at the count_nu column to see how many values were used to generate the statistic. Viewing the metadata header or the data The USGS accompanies every dataset with a header that explains the data. Hydrofunctions will automatically display this header along with the data. To access just one item, use either the .header or .df attribute. ``` test = stats('01542500') test # Print the header & dataframe test.header # print just the header test.df # print just the dataframe. ``` Examples The first step as always is to import hydrofunctions. End of explanation """ may_2019 = hf.NWIS('01542500', 'dv', '2019-05-01', '2019-06-01') may_2019 """ Explanation: To get started, let's request some data from Karthus, PA to see what typically gets collected there. End of explanation """ annual_stats = hf.stats('01542500', 'annual', missingData='on') # Use annual_stats.header to access just the header, or .df for just the dataframe. # If you don't specify, both will be provided. annual_stats """ Explanation: Requesting annual statistics This site has collected discharge data since 1960, but other parameters, such as water temperature ('00010'), have only been collected since 2010. Unfortunately, in 2010, only 41 days of water temperature measurements were collected. By setting the missingData argument to on, we can ask the USGS to report averages for incomplete years. Now it is up to you to decide if 41 values is an adequate number! End of explanation """ monthly_stats = hf.stats('01542500', 'monthly') monthly_stats.df.loc[monthly_stats.df['parameter_cd']=='00060'] """ Explanation: Requesting monthly statistics The monthly report provides the mean value for each parameter for every month since 1960, when data collection began at this site. Since this site collects lots of parameters, we can limit our display of the dataframe by filtering everything out except the discharge parameter ('00060'). End of explanation """ daily_stats = hf.stats('01542500', 'daily', parameterCd='00060') daily_stats.df """ Explanation: Requesting daily reports The daily statistics report is different from the monthly and annual reports in that it aggregates multiple years together from across the entire period of record. So in the following example, in line 0, the report provides statistics for January 1st by calculating the mean of every January 1st from 1961 ('begin_yr') to 2019 ('end_yr'). Note that there are 366 rows, or 365 days each year plus Febrary 29th on leap years. End of explanation """
osamoylenko/YSDA_deeplearning17
Seminar1/Classwork_ru.ipynb
mit
!wget https://github.com/goto-ru/Unsupervised_ML/raw/20779daf2aebca80bfe38401bc87cf41fc7b493d/03_zebrafish/zebrafish.npy -O zebrafish.npy #alternative link: https://www.dropbox.com/s/hhep0wj4c11qibu/zebrafish.npy?dl=1 """ Explanation: Чем думает рыба? End of explanation """ import numpy as np data = np.load("zebrafish.npy")/255. import matplotlib.pyplot as plt %matplotlib inline tick0 = data[:,0] tick0_image = tick0.reshape(230, 202) print "размер 1 картинки:", tick0_image.shape plt.imshow(tick0_image.T); #мини-библиотека для рисования рыбы from zebrafish_drawing_factory import draw_component draw_component(data[:,0]) """ Explanation: Данные Сейчас в вашем распоряжении - данные о мозговой активности малька рыбы вида Danio Rerio https://en.wikipedia.org/wiki/Zebrafish . Мальку введено вещество, которое светится от электрической активности (например, от спайков нейронов). Мальки почти прозрачны, поэтому такое свечение видно извне. Сами данные содержат 240 фотографий головной части рыбки, на которых видна мозговая активность в каждой точке. Каждая фотография имеет размер 230 x 202 пикселей Ваша задача - попытаться восстановить структуру мозга рыбки. Для этого можно попытаться найти, например, группы нейронов, реагирующих вместе или с одинаковой частотой. Никакой разметки в данных нет, поэтому вам придётся использовать методы понижения размерности и кластеризации, чтобы эффективно анализировать данные. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=[10,10]) for i in range(0,240,10): plt.plot(data[i]) """ Explanation: Временные ряды Посмотрим на активность отдельных пикселей в течение времени: Попробуйте вручную найти какие-то характерные группы нейронов End of explanation """ from sklearn.decomposition import PCA pca = <создайте и обучите PCA с 20+ компонентами> data_pca = <преобразуйте данные в пространство главных компонент pca.transform> """ Explanation: Поищем характерные группы нейронов Давайте разложим временные ряды активности нейронов при помощи метода главных компонент. Важно! в этой части задания объектом выборки является временной ряд активности 1 точки на картинке, а не картинка целиком. End of explanation """ draw_component(data_pca[:,1]) draw_component(data_pca[:,2]) from zebrafish_drawing_factory import draw_components draw_components(data_pca[:,2],data_pca[:,3]) """ Explanation: Визуализируем компоненты End of explanation """ def extract_features(impulses): """given time series(array) of region activity, compute some feature representation of those time series Ideas: - fourier transform - mean, variance and percentiles - sums of every k-th element with shift b """ features = []<любые фичи> return features data_features = np.array(list(map(extract_features, data))) print "shape:",data_features.shape from sklearn.decomposition import PCA pca = <обучи PCA> data_pca = <преобразуй в пространство PCA> <визуализируй полученные компоненты> draw_component(...) draw_components(...) """ Explanation: Поищем фичи End of explanation """ from sklearn.cluster import KMeans from sklearn.mixture import GMM <покластеризуй области изображения на основе двух полученных PCA-представлений, используй любой метод на выбор> cluster_ids = <предскажи номер кластера для каждого пикселя> #cluster_ids должен содержать по 1 чиселке на пиксель assert np.prod(cluster_ids.shape) == (230*202) plt.imshow(cluster_ids.reshape(230,202),cmap='spectral') """ Explanation: Bonus: clustering in PCA space End of explanation """
ejolly/pymer4
docs/auto_examples/example_03_posthoc.ipynb
mit
# import basic libraries and sample data import os import pandas as pd from pymer4.utils import get_resource_path from pymer4.models import Lmer # IV3 is a categorical predictors with 3 levels in the sample data df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) # # We're going to fit a multi-level regression using the # categorical predictor (IV3) which has 3 levels model = Lmer("DV ~ IV3 + (1|Group)", data=df) # Using dummy-coding; suppress summary output model.fit(factors={"IV3": ["1.0", "0.5", "1.5"]}, summarize=False) # Get ANOVA table print(model.anova()) """ Explanation: 3. ANOVA tables and post-hoc comparisons <div class="alert alert-info"><h4>Note</h4><p>ANOVAs and post-hoc tests are only available for :code:`Lmer` models estimated using the :code:`factors` argument of :code:`model.fit()` and rely on implementations in R</p></div> In the previous tutorial where we looked at categorical predictors, behind the scenes :code:pymer4 was using the :code:factor functionality in R. This means the output of :code:model.fit() looks a lot like :code:summary() in R applied to a model with categorical predictors. But what if we want to compute an F-test across all levels of our categorical predictor? :code:pymer4 makes this easy to do, and makes it easy to ensure Type III sums of squares infereces are valid. It also makes it easy to follow up omnibus tests with post-hoc pairwise comparisons. ANOVA tables and orthogonal contrasts Because ANOVA is just regression, :code:pymer4 can estimate ANOVA tables with F-results using the :code:.anova() method on a fitted model. This will compute a Type-III SS table given the coding scheme provided when the model was initially fit. Based on the distribution of data across factor levels and the specific coding-scheme used, this may produce invalid Type-III SS computations. For this reason the :code:.anova() method has a :code:force-orthogonal=True argument that will reparameterize and refit the model using orthogonal polynomial contrasts prior to computing an ANOVA table. Here we first estimate a mode with dummy-coded categories and suppress the summary output of :code:.fit(). Then we use :code:.anova() to examine the F-test results. End of explanation """ # Get ANOVA table, but this time force orthogonality # for valid SS III inferences # In this case the data are balanced so nothing changes print(model.anova(force_orthogonal=True)) # Checkout current contrast scheme (for first contrast) # Notice how it's simply a linear contrast across levels print(model.factors) # Checkout previous contrast scheme # which was a treatment contrast with 1.0 # as the reference level print(model.factors_prev_) """ Explanation: Type III SS inferences will only be valid if data are fully balanced across levels or if contrasts between levels are orthogonally coded and sum to 0. Below we tell :code:pymer4 to respecify our contrasts to ensure this before estimating the ANOVA. :code:pymer4 also saves the last set of contrasts used priory to forcing orthogonality. Because the sample data is balanced across factor levels and there are not interaction terms, in this case orthogonal contrast coding doesn't change the results. End of explanation """ # Fix the random number generator # for reproducibility import numpy as np np.random.seed(10) # Create a new categorical variable with 3 levels df = df.assign(IV4=np.random.choice(["1", "2", "3"], size=df.shape[0])) # Estimate model with orthogonal polynomial contrasts model = Lmer("DV ~ IV4*IV3 + (1|Group)", data=df) model.fit( factors={"IV4": ["1", "2", "3"], "IV3": ["1.0", "0.5", "1.5"]}, ordered=True, summarize=False, ) # Get ANOVA table # We can ignore the note in the output because # we manually specified polynomial contrasts print(model.anova()) """ Explanation: Marginal estimates and post-hoc comparisons :code:pymer4 leverages the :code:emmeans package in order to compute marginal estimates ("cell means" in ANOVA lingo) and pair-wise comparisons of models that contain categorical terms and/or interactions. This can be performed by using the :code:.post_hoc() method on fitted models. Let's see an example: First we'll quickly create a second categorical IV to demo with and estimate a 3x3 ANOVA to get main effects and the interaction. End of explanation """ # Compute post-hoc tests marginal_estimates, comparisons = model.post_hoc( marginal_vars="IV3", grouping_vars="IV4" ) # "Cell" means of the ANOVA print(marginal_estimates) # Pairwise comparisons print(comparisons) """ Explanation: Example 1 ~~~~~~~~~ Compare each level of IV3 to each other level of IV3, within each level of IV4. Use default Tukey HSD p-values. End of explanation """ # Compute post-hoc tests marginal_estimates, comparisons = model.post_hoc( marginal_vars=["IV3", "IV4"], p_adjust="fdr" ) # Pairwise comparisons print(comparisons) """ Explanation: Example 2 ~~~~~~~~~ Compare each unique IV3,IV4 "cell mean" to every other IV3,IV4 "cell mean" and used FDR correction for multiple comparisons: End of explanation """ model = Lmer("DV ~ IV2*IV3*IV4 + (1|Group)", data=df) # Only need to polynomial contrasts for IV3 and IV4 # because IV2 is continuous model.fit( factors={"IV4": ["1", "2", "3"], "IV3": ["1.0", "0.5", "1.5"]}, ordered=True, summarize=False, ) # Get ANOVA table print(model.anova()) """ Explanation: Example 3 ~~~~~~~~~ For this example we'll estimate a more complicated ANOVA with 1 continuous IV and 2 categorical IVs with 3 levels each. This is the same model as before but with IV2 thrown into the mix. Now, pairwise comparisons reflect changes in the slope of the continuous IV (IV2) between levels of the categorical IVs (IV3 and IV4). First let's get the ANOVA table End of explanation """ # Compute post-hoc tests with bonferroni correction marginal_estimates, comparisons = model.post_hoc( marginal_vars="IV2", grouping_vars=["IV3", "IV4"], p_adjust="bonf" ) # Pairwise comparisons print(comparisons) """ Explanation: Now we can compute the pairwise difference in slopes End of explanation """
ShubhamDebnath/Coursera-Machine-Learning
Course 4/Convolution model Application v1.ipynb
mit
import math import numpy as np import h5py import matplotlib.pyplot as plt import scipy from PIL import Image from scipy import ndimage import tensorflow as tf from tensorflow.python.framework import ops from cnn_utils import * %matplotlib inline np.random.seed(1) """ Explanation: Convolutional Neural Networks: Application Welcome to Course 4's second assignment! In this notebook, you will: Implement helper functions that you will use when implementing a TensorFlow model Implement a fully functioning ConvNet using TensorFlow After this assignment you will be able to: Build and train a ConvNet in TensorFlow for a classification problem We assume here that you are already familiar with TensorFlow. If you are not, please refer the TensorFlow Tutorial of the third week of Course 2 ("Improving deep neural networks"). 1.0 - TensorFlow model In the previous assignment, you built helper functions using numpy to understand the mechanics behind convolutional neural networks. Most practical applications of deep learning today are built using programming frameworks, which have many built-in functions you can simply call. As usual, we will start by loading in the packages. End of explanation """ # Loading the data (signs) X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() """ Explanation: Run the next cell to load the "SIGNS" dataset you are going to use. End of explanation """ # Example of a picture index = 6 plt.imshow(X_train_orig[index]) print ("y = " + str(np.squeeze(Y_train_orig[:, index]))) """ Explanation: As a reminder, the SIGNS dataset is a collection of 6 signs representing numbers from 0 to 5. <img src="images/SIGNS.png" style="width:800px;height:300px;"> The next cell will show you an example of a labelled image in the dataset. Feel free to change the value of index below and re-run to see different examples. End of explanation """ X_train = X_train_orig/255. X_test = X_test_orig/255. Y_train = convert_to_one_hot(Y_train_orig, 6).T Y_test = convert_to_one_hot(Y_test_orig, 6).T print ("number of training examples = " + str(X_train.shape[0])) print ("number of test examples = " + str(X_test.shape[0])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) conv_layers = {} """ Explanation: In Course 2, you had built a fully-connected network for this dataset. But since this is an image dataset, it is more natural to apply a ConvNet to it. To get started, let's examine the shapes of your data. End of explanation """ # GRADED FUNCTION: create_placeholders def create_placeholders(n_H0, n_W0, n_C0, n_y): """ Creates the placeholders for the tensorflow session. Arguments: n_H0 -- scalar, height of an input image n_W0 -- scalar, width of an input image n_C0 -- scalar, number of channels of the input n_y -- scalar, number of classes Returns: X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float" Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float" """ ### START CODE HERE ### (≈2 lines) X = tf.placeholder(tf.float32, [None, n_H0, n_W0, n_C0], name = 'X') Y = tf.placeholder(tf.float32, [None, n_y], name = 'Y') ### END CODE HERE ### return X, Y X, Y = create_placeholders(64, 64, 3, 6) print ("X = " + str(X)) print ("Y = " + str(Y)) """ Explanation: 1.1 - Create placeholders TensorFlow requires that you create placeholders for the input data that will be fed into the model when running the session. Exercise: Implement the function below to create placeholders for the input image X and the output Y. You should not define the number of training examples for the moment. To do so, you could use "None" as the batch size, it will give you the flexibility to choose it later. Hence X should be of dimension [None, n_H0, n_W0, n_C0] and Y should be of dimension [None, n_y]. Hint. End of explanation """ # GRADED FUNCTION: initialize_parameters def initialize_parameters(): """ Initializes weight parameters to build a neural network with tensorflow. The shapes are: W1 : [4, 4, 3, 8] W2 : [2, 2, 8, 16] Returns: parameters -- a dictionary of tensors containing W1, W2 """ tf.set_random_seed(1) # so that your "random" numbers match ours ### START CODE HERE ### (approx. 2 lines of code) W1 = tf.get_variable('W1', [4, 4, 3, 8], initializer=tf.contrib.layers.xavier_initializer(seed=0)) W2 = tf.get_variable('W2', [2, 2, 8, 16], initializer=tf.contrib.layers.xavier_initializer(seed=0)) ### END CODE HERE ### parameters = {"W1": W1, "W2": W2} return parameters tf.reset_default_graph() with tf.Session() as sess_test: parameters = initialize_parameters() init = tf.global_variables_initializer() sess_test.run(init) print("W1 = " + str(parameters["W1"].eval()[1,1,1])) print("W2 = " + str(parameters["W2"].eval()[1,1,1])) """ Explanation: Expected Output <table> <tr> <td> X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32) </td> </tr> <tr> <td> Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32) </td> </tr> </table> 1.2 - Initialize parameters You will initialize weights/filters $W1$ and $W2$ using tf.contrib.layers.xavier_initializer(seed = 0). You don't need to worry about bias variables as you will soon see that TensorFlow functions take care of the bias. Note also that you will only initialize the weights/filters for the conv2d functions. TensorFlow initializes the layers for the fully connected part automatically. We will talk more about that later in this assignment. Exercise: Implement initialize_parameters(). The dimensions for each group of filters are provided below. Reminder - to initialize a parameter $W$ of shape [1,2,3,4] in Tensorflow, use: python W = tf.get_variable("W", [1,2,3,4], initializer = ...) More Info. End of explanation """ # GRADED FUNCTION: forward_propagation def forward_propagation(X, parameters): """ Implements the forward propagation for the model: CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "W2" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] W2 = parameters['W2'] ### START CODE HERE ### # CONV2D: stride of 1, padding 'SAME' Z1 = tf.nn.conv2d(X, W1, strides = [1, 1, 1, 1],padding = 'SAME') # RELU A1 = tf.nn.relu(Z1) # MAXPOOL: window 8x8, sride 8, padding 'SAME' P1 = tf.nn.max_pool(A1, [1, 8, 8, 1], [1, 8, 8, 1], 'SAME') # CONV2D: filters W2, stride 1, padding 'SAME' Z2 = tf.nn.conv2d(P1, W2, strides = [1, 1, 1, 1],padding = 'SAME') # RELU A2 = tf.nn.relu(Z2) # MAXPOOL: window 4x4, stride 4, padding 'SAME' P2 = tf.nn.max_pool(A2, [1, 4, 4, 1], [1, 4, 4, 1], 'SAME') # FLATTEN P2 = tf.contrib.layers.flatten(P2) # FULLY-CONNECTED without non-linear activation function (not not call softmax). # 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None" Z3 = tf.contrib.layers.fully_connected(P2, 6, activation_fn = None) ### END CODE HERE ### return Z3 tf.reset_default_graph() with tf.Session() as sess: np.random.seed(1) X, Y = create_placeholders(64, 64, 3, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) init = tf.global_variables_initializer() sess.run(init) a = sess.run(Z3, {X: np.random.randn(2,64,64,3), Y: np.random.randn(2,6)}) print("Z3 = " + str(a)) """ Explanation: Expected Output: <table> <tr> <td> W1 = </td> <td> [ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394 <br> -0.06847463 0.05245192] </td> </tr> <tr> <td> W2 = </td> <td> [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058 <br> -0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228 <br> -0.22779644 -0.1601823 -0.16117483 -0.10286498] </td> </tr> </table> 1.2 - Forward propagation In TensorFlow, there are built-in functions that carry out the convolution steps for you. tf.nn.conv2d(X,W1, strides = [1,s,s,1], padding = 'SAME'): given an input $X$ and a group of filters $W1$, this function convolves $W1$'s filters on X. The third input ([1,f,f,1]) represents the strides for each dimension of the input (m, n_H_prev, n_W_prev, n_C_prev). You can read the full documentation here tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'): given an input A, this function uses a window of size (f, f) and strides of size (s, s) to carry out max pooling over each window. You can read the full documentation here tf.nn.relu(Z1): computes the elementwise ReLU of Z1 (which can be any shape). You can read the full documentation here. tf.contrib.layers.flatten(P): given an input P, this function flattens each example into a 1D vector it while maintaining the batch-size. It returns a flattened tensor with shape [batch_size, k]. You can read the full documentation here. tf.contrib.layers.fully_connected(F, num_outputs): given a the flattened input F, it returns the output computed using a fully connected layer. You can read the full documentation here. In the last function above (tf.contrib.layers.fully_connected), the fully connected layer automatically initializes weights in the graph and keeps on training them as you train the model. Hence, you did not need to initialize those weights when initializing the parameters. Exercise: Implement the forward_propagation function below to build the following model: CONV2D -&gt; RELU -&gt; MAXPOOL -&gt; CONV2D -&gt; RELU -&gt; MAXPOOL -&gt; FLATTEN -&gt; FULLYCONNECTED. You should use the functions above. In detail, we will use the following parameters for all the steps: - Conv2D: stride 1, padding is "SAME" - ReLU - Max pool: Use an 8 by 8 filter size and an 8 by 8 stride, padding is "SAME" - Conv2D: stride 1, padding is "SAME" - ReLU - Max pool: Use a 4 by 4 filter size and a 4 by 4 stride, padding is "SAME" - Flatten the previous output. - FULLYCONNECTED (FC) layer: Apply a fully connected layer without an non-linear activation function. Do not call the softmax here. This will result in 6 neurons in the output layer, which then get passed later to a softmax. In TensorFlow, the softmax and cost function are lumped together into a single function, which you'll call in a different function when computing the cost. End of explanation """ # GRADED FUNCTION: compute_cost def compute_cost(Z3, Y): """ Computes the cost Arguments: Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples) Y -- "true" labels vector placeholder, same shape as Z3 Returns: cost - Tensor of the cost function """ ### START CODE HERE ### (1 line of code) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y)) ### END CODE HERE ### return cost tf.reset_default_graph() with tf.Session() as sess: np.random.seed(1) X, Y = create_placeholders(64, 64, 3, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) cost = compute_cost(Z3, Y) init = tf.global_variables_initializer() sess.run(init) a = sess.run(cost, {X: np.random.randn(4,64,64,3), Y: np.random.randn(4,6)}) print("cost = " + str(a)) """ Explanation: Expected Output: <table> <td> Z3 = </td> <td> [[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064] <br> [-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]] </td> </table> 1.3 - Compute cost Implement the compute cost function below. You might find these two functions helpful: tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y): computes the softmax entropy loss. This function both computes the softmax activation function as well as the resulting loss. You can check the full documentation here. tf.reduce_mean: computes the mean of elements across dimensions of a tensor. Use this to sum the losses over all the examples to get the overall cost. You can check the full documentation here. Exercise: Compute the cost below using the function above. End of explanation """ # GRADED FUNCTION: model def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009, num_epochs = 100, minibatch_size = 64, print_cost = True): """ Implements a three-layer ConvNet in Tensorflow: CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED Arguments: X_train -- training set, of shape (None, 64, 64, 3) Y_train -- test set, of shape (None, n_y = 6) X_test -- training set, of shape (None, 64, 64, 3) Y_test -- test set, of shape (None, n_y = 6) learning_rate -- learning rate of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost -- True to print the cost every 100 epochs Returns: train_accuracy -- real number, accuracy on the train set (X_train) test_accuracy -- real number, testing accuracy on the test set (X_test) parameters -- parameters learnt by the model. They can then be used to predict. """ ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep results consistent (tensorflow seed) seed = 3 # to keep results consistent (numpy seed) (m, n_H0, n_W0, n_C0) = X_train.shape n_y = Y_train.shape[1] costs = [] # To keep track of the cost # Create Placeholders of the correct shape ### START CODE HERE ### (1 line) X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y) ### END CODE HERE ### # Initialize parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the forward propagation in the tensorflow graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add cost function to tensorflow graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost. ### START CODE HERE ### (1 line) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables globally init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): minibatch_cost = 0. num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on a minibatch. # Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y). ### START CODE HERE ### (1 line) _ , temp_cost = sess.run([optimizer, cost], feed_dict = {X:minibatch_X, Y:minibatch_Y}) ### END CODE HERE ### minibatch_cost += temp_cost / num_minibatches # Print the cost every epoch if print_cost == True and epoch % 5 == 0: print ("Cost after epoch %i: %f" % (epoch, minibatch_cost)) if print_cost == True and epoch % 1 == 0: costs.append(minibatch_cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # Calculate the correct predictions predict_op = tf.argmax(Z3, 1) correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print(accuracy) train_accuracy = accuracy.eval({X: X_train, Y: Y_train}) test_accuracy = accuracy.eval({X: X_test, Y: Y_test}) print("Train Accuracy:", train_accuracy) print("Test Accuracy:", test_accuracy) return train_accuracy, test_accuracy, parameters """ Explanation: Expected Output: <table> <td> cost = </td> <td> 2.91034 </td> </table> 1.4 Model Finally you will merge the helper functions you implemented above to build a model. You will train it on the SIGNS dataset. You have implemented random_mini_batches() in the Optimization programming assignment of course 2. Remember that this function returns a list of mini-batches. Exercise: Complete the function below. The model below should: create placeholders initialize parameters forward propagate compute the cost create an optimizer Finally you will create a session and run a for loop for num_epochs, get the mini-batches, and then for each mini-batch you will optimize the function. Hint for initializing the variables End of explanation """ _, _, parameters = model(X_train, Y_train, X_test, Y_test) """ Explanation: Run the following cell to train your model for 100 epochs. Check if your cost after epoch 0 and 5 matches our output. If not, stop the cell and go back to your code! End of explanation """ fname = "images/thumbs_up.jpg" image = np.array(ndimage.imread(fname, flatten=False)) my_image = scipy.misc.imresize(image, size=(64,64)) plt.imshow(my_image) """ Explanation: Expected output: although it may not match perfectly, your expected output should be close to ours and your cost value should decrease. <table> <tr> <td> **Cost after epoch 0 =** </td> <td> 1.917929 </td> </tr> <tr> <td> **Cost after epoch 5 =** </td> <td> 1.506757 </td> </tr> <tr> <td> **Train Accuracy =** </td> <td> 0.940741 </td> </tr> <tr> <td> **Test Accuracy =** </td> <td> 0.783333 </td> </tr> </table> Congratulations! You have finised the assignment and built a model that recognizes SIGN language with almost 80% accuracy on the test set. If you wish, feel free to play around with this dataset further. You can actually improve its accuracy by spending more time tuning the hyperparameters, or using regularization (as this model clearly has a high variance). Once again, here's a thumbs up for your work! End of explanation """
cathalmccabe/PYNQ
boards/Pynq-Z1/base/notebooks/microblaze/microblaze_python_libraries.ipynb
bsd-3-clause
from pynq.overlays.base import BaseOverlay from pynq.lib import MicroblazeLibrary base = BaseOverlay('base.bit') lib = MicroblazeLibrary(base.PMODA, ['i2c', 'pmod_grove']) """ Explanation: Microblaze Python Libraries In addition to using the pynqmb libraries from C it is also possible to create Python wrappers for the libraries directly. PYNQ provides the MicroblazeLibrary class for this purpose. The MicroblazeLibrary class takes a list of libraries as a construction parameter which should be the names of the header files desired without the .h file extension. All of the constants and functions will then become members of the instance of the class. For this example we are going to interact with the Grove ADC device attached to a Pmod-Grove Adapter. We are going to want the i2c library for interacting with the device and the pmod_grove library to find the pins we want to connect to. End of explanation """ dir(lib) """ Explanation: We can now inspect the lib to see all of the functions we can call and constants we have access. End of explanation """ device = lib.i2c_open(lib.PMOD_G4_B, lib.PMOD_G4_A) """ Explanation: Next we need to open our I2C device using the i2c_open function. This will return us an i2c object we can use for interacting with the bus. End of explanation """ dir(device) """ Explanation: We can check the functions we can call by using dir again. End of explanation """ buf = bytearray(2) buf[0] = 0 device.write(0x50, buf, 1) device.read(0x50, buf, 2) ((buf[0] & 0x0F) << 8) | buf[1] """ Explanation: The Grove ADC responds to address 0x50 and to read from it we need to write the register we want (0 for the result) and then read the two bytes back. End of explanation """
jinntrance/MOOC
coursera/ml-classification/assignments/module-6-decision-tree-practical-assignment-blank.ipynb
cc0-1.0
import graphlab """ Explanation: Decision Trees in Practice In this assignment we will explore various techniques for preventing overfitting in decision trees. We will extend the implementation of the binary decision trees that we implemented in the previous assignment. You will have to use your solutions from this previous assignment and extend them. In this assignment you will: Implement binary decision trees with different early stopping methods. Compare models with different stopping parameters. Visualize the concept of overfitting in decision trees. Let's get started! Fire up GraphLab Create Make sure you have the latest version of GraphLab Create. End of explanation """ loans = graphlab.SFrame('lending-club-data.gl/') """ Explanation: Load LendingClub Dataset This assignment will use the LendingClub dataset used in the previous two assignments. End of explanation """ loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1) loans = loans.remove_column('bad_loans') """ Explanation: As before, we reassign the labels to have +1 for a safe loan, and -1 for a risky (bad) loan. End of explanation """ features = ['grade', # grade of the loan 'term', # the term of the loan 'home_ownership', # home_ownership status: own, mortgage or rent 'emp_length', # number of years of employment ] target = 'safe_loans' loans = loans[features + [target]] """ Explanation: We will be using the same 4 categorical features as in the previous assignment: 1. grade of the loan 2. the length of the loan term 3. the home ownership status: own, mortgage, rent 4. number of years of employment. In the dataset, each of these features is a categorical feature. Since we are building a binary decision tree, we will have to convert this to binary data in a subsequent section using 1-hot encoding. End of explanation """ safe_loans_raw = loans[loans[target] == 1] risky_loans_raw = loans[loans[target] == -1] # Since there are less risky loans than safe loans, find the ratio of the sizes # and use that percentage to undersample the safe loans. percentage = len(risky_loans_raw)/float(len(safe_loans_raw)) safe_loans = safe_loans_raw.sample(percentage, seed = 1) risky_loans = risky_loans_raw loans_data = risky_loans.append(safe_loans) print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data)) print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data)) print "Total number of loans in our new dataset :", len(loans_data) """ Explanation: Subsample dataset to make sure classes are balanced Just as we did in the previous assignment, we will undersample the larger class (safe loans) in order to balance out our dataset. This means we are throwing away many data points. We used seed = 1 so everyone gets the same results. End of explanation """ loans_data = risky_loans.append(safe_loans) for feature in features: loans_data_one_hot_encoded = loans_data[feature].apply(lambda x: {x: 1}) loans_data_unpacked = loans_data_one_hot_encoded.unpack(column_name_prefix=feature) # Change None's to 0's for column in loans_data_unpacked.column_names(): loans_data_unpacked[column] = loans_data_unpacked[column].fillna(0) loans_data.remove_column(feature) loans_data.add_columns(loans_data_unpacked) """ Explanation: Note: There are many approaches for dealing with imbalanced data, including some where we modify the learning algorithm. These approaches are beyond the scope of this course, but some of them are reviewed in this paper. For this assignment, we use the simplest possible approach, where we subsample the overly represented class to get a more balanced dataset. In general, and especially when the data is highly imbalanced, we recommend using more advanced methods. Transform categorical data into binary features Since we are implementing binary decision trees, we transform our categorical data into binary data using 1-hot encoding, just as in the previous assignment. Here is the summary of that discussion: For instance, the home_ownership feature represents the home ownership status of the loanee, which is either own, mortgage or rent. For example, if a data point has the feature {'home_ownership': 'RENT'} we want to turn this into three features: { 'home_ownership = OWN' : 0, 'home_ownership = MORTGAGE' : 0, 'home_ownership = RENT' : 1 } Since this code requires a few Python and GraphLab tricks, feel free to use this block of code as is. Refer to the API documentation for a deeper understanding. End of explanation """ features = loans_data.column_names() features.remove('safe_loans') # Remove the response variable features """ Explanation: The feature columns now look like this: End of explanation """ train_data, validation_set = loans_data.random_split(.8, seed=1) """ Explanation: Train-Validation split We split the data into a train-validation split with 80% of the data in the training set and 20% of the data in the validation set. We use seed=1 so that everyone gets the same result. End of explanation """ def reached_minimum_node_size(data, min_node_size): # Return True if the number of data points is less than or equal to the minimum node size. ## YOUR CODE HERE return len(data) <= min_node_size """ Explanation: Early stopping methods for decision trees In this section, we will extend the binary tree implementation from the previous assignment in order to handle some early stopping conditions. Recall the 3 early stopping methods that were discussed in lecture: Reached a maximum depth. (set by parameter max_depth). Reached a minimum node size. (set by parameter min_node_size). Don't split if the gain in error reduction is too small. (set by parameter min_error_reduction). For the rest of this assignment, we will refer to these three as early stopping conditions 1, 2, and 3. Early stopping condition 1: Maximum depth Recall that we already implemented the maximum depth stopping condition in the previous assignment. In this assignment, we will experiment with this condition a bit more and also write code to implement the 2nd and 3rd early stopping conditions. We will be reusing code from the previous assignment and then building upon this. We will alert you when you reach a function that was part of the previous assignment so that you can simply copy and past your previous code. Early stopping condition 2: Minimum node size The function reached_minimum_node_size takes 2 arguments: The data (from a node) The minimum number of data points that a node is allowed to split on, min_node_size. This function simply calculates whether the number of data points at a given node is less than or equal to the specified minimum node size. This function will be used to detect this early stopping condition in the decision_tree_create function. Fill in the parts of the function below where you find ## YOUR CODE HERE. There is one instance in the function below. End of explanation """ def error_reduction(error_before_split, error_after_split): # Return the error before the split minus the error after the split. ## YOUR CODE HERE return error_before_split-error_after_split """ Explanation: Quiz question: Given an intermediate node with 6 safe loans and 3 risky loans, if the min_node_size parameter is 10, what should the tree learning algorithm do next? Early stopping condition 3: Minimum gain in error reduction The function error_reduction takes 2 arguments: The error before a split, error_before_split. The error after a split, error_after_split. This function computes the gain in error reduction, i.e., the difference between the error before the split and that after the split. This function will be used to detect this early stopping condition in the decision_tree_create function. Fill in the parts of the function below where you find ## YOUR CODE HERE. There is one instance in the function below. End of explanation """ def intermediate_node_num_mistakes(labels_in_node): # Corner case: If labels_in_node is empty, return 0 if len(labels_in_node) == 0: return 0 # Count the number of 1's (safe loans) ## YOUR CODE HERE positive_num = sum([1 if i == 1 else 0 for i in labels_in_node]) # Count the number of -1's (risky loans) ## YOUR CODE HERE negative_num = sum([1 if i == -1 else 0 for i in labels_in_node]) # Return the number of mistakes that the majority classifier makes. ## YOUR CODE HERE label = 1 if sum(labels_in_node)>0 else -1 if 1 == label: return negative_num else: return positive_num """ Explanation: Quiz question: Assume an intermediate node has 6 safe loans and 3 risky loans. For each of 4 possible features to split on, the error reduction is 0.0, 0.05, 0.1, and 0.14, respectively. If the minimum gain in error reduction parameter is set to 0.2, what should the tree learning algorithm do next? Grabbing binary decision tree helper functions from past assignment Recall from the previous assignment that we wrote a function intermediate_node_num_mistakes that calculates the number of misclassified examples when predicting the majority class. This is used to help determine which feature is best to split on at a given node of the tree. Please copy and paste your code for intermediate_node_num_mistakes here. End of explanation """ def best_splitting_feature(data, features, target): best_feature = None # Keep track of the best feature best_error = 10 # Keep track of the best error so far # Note: Since error is always <= 1, we should intialize it with something larger than 1. # Convert to float to make sure error gets computed correctly. num_data_points = float(len(data)) # Loop through each feature to consider splitting on that feature for feature in features: # The left split will have all data points where the feature value is 0 left_split = data[data[feature] == 0] # The right split will have all data points where the feature value is 1 ## YOUR CODE HERE right_split = data[data[feature] == 1] # Calculate the number of misclassified examples in the left split. # Remember that we implemented a function for this! (It was called intermediate_node_num_mistakes) # YOUR CODE HERE left_mistakes = intermediate_node_num_mistakes(left_split['safe_loans']) # Calculate the number of misclassified examples in the right split. ## YOUR CODE HERE right_mistakes = intermediate_node_num_mistakes(right_split['safe_loans']) # Compute the classification error of this split. # Error = (# of mistakes (left) + # of mistakes (right)) / (# of data points) ## YOUR CODE HERE error = 1.0 * (left_mistakes + right_mistakes) / num_data_points # If this is the best error we have found so far, store the feature as best_feature and the error as best_error ## YOUR CODE HERE if error < best_error: best_error = error best_feature = feature return best_feature # Return the best feature we found """ Explanation: We then wrote a function best_splitting_feature that finds the best feature to split on given the data and a list of features to consider. Please copy and paste your best_splitting_feature code here. End of explanation """ def create_leaf(target_values): # Create a leaf node leaf = {'splitting_feature' : None, 'left' : None, 'right' : None, 'is_leaf': True } ## YOUR CODE HERE # Count the number of data points that are +1 and -1 in this node. num_ones = len(target_values[target_values == +1]) num_minus_ones = len(target_values[target_values == -1]) # For the leaf node, set the prediction to be the majority class. # Store the predicted class (1 or -1) in leaf['prediction'] if num_ones > num_minus_ones: leaf['prediction'] = 1 ## YOUR CODE HERE else: leaf['prediction'] = -1 ## YOUR CODE HERE # Return the leaf node return leaf """ Explanation: Finally, recall the function create_leaf from the previous assignment, which creates a leaf node given a set of target values. Please copy and paste your create_leaf code here. End of explanation """ def decision_tree_create(data, features, target, current_depth = 0, max_depth = 10, min_node_size=1, min_error_reduction=0.0): remaining_features = features[:] # Make a copy of the features. target_values = data[target] print "--------------------------------------------------------------------" print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values)) # Stopping condition 1: All nodes are of the same type. if intermediate_node_num_mistakes(target_values) == 0: print "Stopping condition 1 reached. All data points have the same target value." return create_leaf(target_values) # Stopping condition 2: No more features to split on. if remaining_features == []: print "Stopping condition 2 reached. No remaining features." return create_leaf(target_values) # Early stopping condition 1: Reached max depth limit. if current_depth >= max_depth: print "Early stopping condition 1 reached. Reached maximum depth." return create_leaf(target_values) # Early stopping condition 2: Reached the minimum node size. # If the number of data points is less than or equal to the minimum size, return a leaf. if reached_minimum_node_size(data, min_node_size): ## YOUR CODE HERE print "Early stopping condition 2 reached. Reached minimum node size." return create_leaf(target_values) ## YOUR CODE HERE # Find the best splitting feature splitting_feature = best_splitting_feature(data, features, target) # Split on the best feature that we found. left_split = data[data[splitting_feature] == 0] right_split = data[data[splitting_feature] == 1] # Early stopping condition 3: Minimum error reduction # Calculate the error before splitting (number of misclassified examples # divided by the total number of examples) error_before_split = intermediate_node_num_mistakes(target_values) / float(len(data)) # Calculate the error after splitting (number of misclassified examples # in both groups divided by the total number of examples) left_mistakes = intermediate_node_num_mistakes(left_split[target]) ## YOUR CODE HERE right_mistakes = intermediate_node_num_mistakes(right_split[target]) ## YOUR CODE HERE error_after_split = (left_mistakes + right_mistakes) / float(len(data)) # If the error reduction is LESS THAN OR EQUAL TO min_error_reduction, return a leaf. if error_reduction(error_before_split, error_after_split) <= min_error_reduction: ## YOUR CODE HERE print "Early stopping condition 3 reached. Minimum error reduction." return create_leaf(target_values) ## YOUR CODE HERE remaining_features.remove(splitting_feature) print "Split on feature %s. (%s, %s)" % (\ splitting_feature, len(left_split), len(right_split)) # Repeat (recurse) on left and right subtrees left_tree = decision_tree_create(left_split, remaining_features, target, current_depth + 1, max_depth, min_node_size, min_error_reduction) ## YOUR CODE HERE right_tree = decision_tree_create(right_split, remaining_features, target, current_depth + 1, max_depth) return {'is_leaf' : False, 'prediction' : None, 'splitting_feature': splitting_feature, 'left' : left_tree, 'right' : right_tree} """ Explanation: Incorporating new early stopping conditions in binary decision tree implementation Now, you will implement a function that builds a decision tree handling the three early stopping conditions described in this assignment. In particular, you will write code to detect early stopping conditions 2 and 3. You implemented above the functions needed to detect these conditions. The 1st early stopping condition, max_depth, was implemented in the previous assigment and you will not need to reimplement this. In addition to these early stopping conditions, the typical stopping conditions of having no mistakes or no more features to split on (which we denote by "stopping conditions" 1 and 2) are also included as in the previous assignment. Implementing early stopping condition 2: minimum node size: Step 1: Use the function reached_minimum_node_size that you implemented earlier to write an if condition to detect whether we have hit the base case, i.e., the node does not have enough data points and should be turned into a leaf. Don't forget to use the min_node_size argument. Step 2: Return a leaf. This line of code should be the same as the other (pre-implemented) stopping conditions. Implementing early stopping condition 3: minimum error reduction: Note: This has to come after finding the best splitting feature so we can calculate the error after splitting in order to calculate the error reduction. Step 1: Calculate the classification error before splitting. Recall that classification error is defined as: $$ \text{classification error} = \frac{\text{# mistakes}}{\text{# total examples}} $$ * Step 2: Calculate the classification error after splitting. This requires calculating the number of mistakes in the left and right splits, and then dividing by the total number of examples. * Step 3: Use the function error_reduction to that you implemented earlier to write an if condition to detect whether the reduction in error is less than the constant provided (min_error_reduction). Don't forget to use that argument. * Step 4: Return a leaf. This line of code should be the same as the other (pre-implemented) stopping conditions. Fill in the places where you find ## YOUR CODE HERE. There are seven places in this function for you to fill in. End of explanation """ def count_nodes(tree): if tree['is_leaf']: return 1 return 1 + count_nodes(tree['left']) + count_nodes(tree['right']) """ Explanation: Here is a function to count the nodes in your tree: End of explanation """ small_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 2, min_node_size = 10, min_error_reduction=0.0) if count_nodes(small_decision_tree) == 7: print 'Test passed!' else: print 'Test failed... try again!' print 'Number of nodes found :', count_nodes(small_decision_tree) print 'Number of nodes that should be there : 7' """ Explanation: Run the following test code to check your implementation. Make sure you get 'Test passed' before proceeding. End of explanation """ my_decision_tree_new = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 100, min_error_reduction=0.0) """ Explanation: Build a tree! Now that your code is working, we will train a tree model on the train_data with * max_depth = 6 * min_node_size = 100, * min_error_reduction = 0.0 Warning: This code block may take a minute to learn. End of explanation """ my_decision_tree_old = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 0, min_error_reduction=-1) """ Explanation: Let's now train a tree model ignoring early stopping conditions 2 and 3 so that we get the same tree as in the previous assignment. To ignore these conditions, we set min_node_size=0 and min_error_reduction=-1 (a negative value). End of explanation """ def classify(tree, x, annotate = False): # if the node is a leaf node. if tree['is_leaf']: if annotate: print "At leaf, predicting %s" % tree['prediction'] return tree['prediction'] else: # split on feature. split_feature_value = x[tree['splitting_feature']] if annotate: print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value) if split_feature_value == 0: return classify(tree['left'], x, annotate) else: ### YOUR CODE HERE return classify(tree['right'], x, annotate) """ Explanation: Making predictions Recall that in the previous assignment you implemented a function classify to classify a new point x using a given tree. Please copy and paste your classify code here. End of explanation """ validation_set[0] print 'Predicted class: %s ' % classify(my_decision_tree_new, validation_set[0]) """ Explanation: Now, let's consider the first example of the validation set and see what the my_decision_tree_new model predicts for this data point. End of explanation """ classify(my_decision_tree_new, validation_set[0], annotate = True) """ Explanation: Let's add some annotations to our prediction to see what the prediction path was that lead to this predicted class: End of explanation """ classify(my_decision_tree_old, validation_set[0], annotate = True) """ Explanation: Let's now recall the prediction path for the decision tree learned in the previous assignment, which we recreated here as my_decision_tree_old. End of explanation """ def evaluate_classification_error(tree, data): # Apply the classify(tree, x) to each row in your data prediction = data.apply(lambda x: classify(tree, x)) # Once you've made the predictions, calculate the classification error and return it ## YOUR CODE HERE right = data[data['safe_loans'] == prediction] return 1- right.num_rows()*1.0/data.num_rows() """ Explanation: Quiz question: For my_decision_tree_new trained with max_depth = 6, min_node_size = 100, min_error_reduction=0.0, is the prediction path for validation_set[0] shorter, longer, or the same as for my_decision_tree_old that ignored the early stopping conditions 2 and 3? Quiz question: For my_decision_tree_new trained with max_depth = 6, min_node_size = 100, min_error_reduction=0.0, is the prediction path for any point always shorter, always longer, always the same, shorter or the same, or longer or the same as for my_decision_tree_old that ignored the early stopping conditions 2 and 3? Quiz question: For a tree trained on any dataset using max_depth = 6, min_node_size = 100, min_error_reduction=0.0, what is the maximum number of splits encountered while making a single prediction? Evaluating the model Now let us evaluate the model that we have trained. You implemented this evautation in the function evaluate_classification_error from the previous assignment. Please copy and paste your evaluate_classification_error code here. End of explanation """ evaluate_classification_error(my_decision_tree_new, validation_set) """ Explanation: Now, let's use this function to evaluate the classification error of my_decision_tree_new on the validation_set. End of explanation """ evaluate_classification_error(my_decision_tree_old, validation_set) """ Explanation: Now, evaluate the validation error using my_decision_tree_old. End of explanation """ model_1 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 2, min_node_size = 0, min_error_reduction=-1) model_2 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 0, min_error_reduction=-1) model_3 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 14, min_node_size = 0, min_error_reduction=-1) """ Explanation: Quiz question: Is the validation error of the new decision tree (using early stopping conditions 2 and 3) lower than, higher than, or the same as that of the old decision tree from the previous assignment? Exploring the effect of max_depth We will compare three models trained with different values of the stopping criterion. We intentionally picked models at the extreme ends (too small, just right, and too large). Train three models with these parameters: model_1: max_depth = 2 (too small) model_2: max_depth = 6 (just right) model_3: max_depth = 14 (may be too large) For each of these three, we set min_node_size = 0 and min_error_reduction = -1. Note: Each tree can take up to a few minutes to train. In particular, model_3 will probably take the longest to train. End of explanation """ print "Training data, classification error (model 1):", evaluate_classification_error(model_1, train_data) print "Training data, classification error (model 2):", evaluate_classification_error(model_2, train_data) print "Training data, classification error (model 3):", evaluate_classification_error(model_3, train_data) """ Explanation: Evaluating the models Let us evaluate the models on the train and validation data. Let us start by evaluating the classification error on the training data: End of explanation """ print "Training data, classification error (model 1):", evaluate_classification_error(model_1, validation_set) print "Training data, classification error (model 2):", evaluate_classification_error(model_2, validation_set) print "Training data, classification error (model 3):", evaluate_classification_error(model_3, validation_set) """ Explanation: Now evaluate the classification error on the validation data. End of explanation """ def count_leaves(tree): if tree['is_leaf']: return 1 return count_leaves(tree['left']) + count_leaves(tree['right']) """ Explanation: Quiz Question: Which tree has the smallest error on the validation data? Quiz Question: Does the tree with the smallest error in the training data also have the smallest error in the validation data? Quiz Question: Is it always true that the tree with the lowest classification error on the training set will result in the lowest classification error in the validation set? Measuring the complexity of the tree Recall in the lecture that we talked about deeper trees being more complex. We will measure the complexity of the tree as complexity(T) = number of leaves in the tree T Here, we provide a function count_leaves that counts the number of leaves in a tree. Using this implementation, compute the number of nodes in model_1, model_2, and model_3. End of explanation """ print count_leaves(model_1) print count_leaves(model_2) print count_leaves(model_3) """ Explanation: Compute the number of nodes in model_1, model_2, and model_3. End of explanation """ model_4 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 0, min_error_reduction=-1) model_5 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 0, min_error_reduction=0) model_6 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 0, min_error_reduction=5) """ Explanation: Quiz question: Which tree has the largest complexity? Quiz question: Is it always true that the most complex tree will result in the lowest classification error in the validation_set? Exploring the effect of min_error We will compare three models trained with different values of the stopping criterion. We intentionally picked models at the extreme ends (negative, just right, and too positive). Train three models with these parameters: 1. model_4: min_error_reduction = -1 (ignoring this early stopping condition) 2. model_5: min_error_reduction = 0 (just right) 3. model_6: min_error_reduction = 5 (too positive) For each of these three, we set max_depth = 6, and min_node_size = 0. Note: Each tree can take up to 30 seconds to train. End of explanation """ print "Validation data, classification error (model 4):", evaluate_classification_error(model_4, validation_set) print "Validation data, classification error (model 5):", evaluate_classification_error(model_5, validation_set) print "Validation data, classification error (model 6):", evaluate_classification_error(model_6, validation_set) """ Explanation: Calculate the accuracy of each model (model_4, model_5, or model_6) on the validation set. End of explanation """ print count_leaves(model_4) print count_leaves(model_5) print count_leaves(model_6) """ Explanation: Using the count_leaves function, compute the number of leaves in each of each models in (model_4, model_5, and model_6). End of explanation """ model_7 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 0, min_error_reduction=-1) model_8 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 2000, min_error_reduction=-1) model_9 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6, min_node_size = 50000, min_error_reduction=-1) """ Explanation: Quiz Question: Using the complexity definition above, which model (model_4, model_5, or model_6) has the largest complexity? Did this match your expectation? Quiz Question: model_4 and model_5 have similar classification error on the validation set but model_5 has lower complexity? Should you pick model_5 over model_4? Exploring the effect of min_node_size We will compare three models trained with different values of the stopping criterion. Again, intentionally picked models at the extreme ends (too small, just right, and just right). Train three models with these parameters: 1. model_7: min_node_size = 0 (too small) 2. model_8: min_node_size = 2000 (just right) 3. model_9: min_node_size = 50000 (too large) For each of these three, we set max_depth = 6, and min_error_reduction = -1. Note: Each tree can take up to 30 seconds to train. End of explanation """ print "Validation data, classification error (model 7):", evaluate_classification_error(model_7, validation_set) print "Validation data, classification error (model 8):", evaluate_classification_error(model_8, validation_set) print "Validation data, classification error (model 9):", evaluate_classification_error(model_9, validation_set) """ Explanation: Now, let us evaluate the models (model_7, model_8, or model_9) on the validation_set. End of explanation """ print count_leaves(model_7) print count_leaves(model_8) print count_leaves(model_9) """ Explanation: Using the count_leaves function, compute the number of leaves in each of each models (model_7, model_8, and model_9). End of explanation """
ajgpitch/qutip-notebooks
examples/control-pulseoptim-symplectic.ipynb
lgpl-3.0
%matplotlib inline import numpy as np import matplotlib.pyplot as plt import datetime from qutip import Qobj, identity, sigmax, sigmay, sigmaz, tensor from qutip.qip import hadamard_transform import qutip.logging_utils as logging logger = logging.get_logger() #Set this to None or logging.WARN for 'quiet' execution log_level = logging.INFO #QuTiP control modules import qutip.control.pulseoptim as cpo import qutip.control.symplectic as sympl example_name = 'Symplectic' """ Explanation: Calculation of control fields for symplectic dynamics using L-BFGS-B algorithm Alexander Pitchford (agp1@aber.ac.uk) Example to demonstrate using the control library to determine control pulses using the ctrlpulseoptim.optimize_pulse function. The (default) L-BFGS-B algorithm is used to optimise the pulse to minimise the fidelity error, which in this case is given by the 'Trace difference' norm. This in a Symplectic quantum system example, with two coupled oscillators The user can experiment with the timeslicing, by means of changing the number of timeslots and/or total time for the evolution. Different initial (starting) pulse types can be tried. The initial and final pulses are displayed in a plot This example assumes that the example-control-pulseoptim-Hadamard has already been tried, and hence explanations in that notebook are not repeated here. End of explanation """ #Drift w1 = 1 w2 = 1 g1 = 0.5 A0 = Qobj(np.array([[w1, 0, g1, 0], [0, w1, 0, g1], [g1, 0, w2, 0], [0, g1, 0, w2]])) #Control Ac = Qobj(np.array([[1, 0, 0, 0,], \ [0, 1, 0, 0], \ [0, 0, 0, 0], \ [0, 0, 0, 0]])) ctrls = [Ac] n_ctrls = len(ctrls) initial = identity(4) # Target a = 1 Ag = np.array([[0, 0, a, 0], [0, 0, 0, a], [a, 0, 0, 0], [0, a, 0, 0]]) Sg = Qobj(sympl.calc_omega(2).dot(Ag)).expm() """ Explanation: Defining the physics End of explanation """ # Number of time slots n_ts = 1000 # Time allowed for the evolution evo_time = 10 """ Explanation: Defining the time evolution parameters End of explanation """ # Fidelity error target fid_err_targ = 1e-10 # Maximum iterations for the optisation algorithm max_iter = 500 # Maximum (elapsed) time allowed in seconds max_wall_time = 30 # Minimum gradient (sum of gradients squared) # as this tends to 0 -> local minima has been found min_grad = 1e-20 """ Explanation: Set the conditions which will cause the pulse optimisation to terminate End of explanation """ # pulse type alternatives: RND|ZERO|LIN|SINE|SQUARE|SAW|TRIANGLE| p_type = 'ZERO' """ Explanation: Set the initial pulse type End of explanation """ #Set to None to suppress output files f_ext = "{}_n_ts{}_ptype{}.txt".format(example_name, n_ts, p_type) """ Explanation: Give an extension for output files End of explanation """ # Note that this call uses # dyn_type='SYMPL' # This means that matrices that describe the dynamics are assumed to be # Symplectic, i.e. the propagator can be calculated using # expm(combined_dynamics.omega*dt) # This has defaults for: # prop_type='FRECHET' # therefore the propagators and their gradients will be calculated using the # Frechet method, i.e. an exact gradient # fid_type='TRACEDIFF' # so that the fidelity error, i.e. distance from the target, is give # by the trace of the difference between the target and evolved operators result = cpo.optimize_pulse(A0, ctrls, initial, Sg, n_ts, evo_time, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, dyn_type='SYMPL', out_file_ext=f_ext, init_pulse_type=p_type, log_level=log_level, gen_stats=True) """ Explanation: Run the optimisation End of explanation """ result.stats.report() print("Final evolution\n{}\n".format(result.evo_full_final)) print("********* Summary *****************") print("Final fidelity error {}".format(result.fid_err)) print("Final gradient normal {}".format(result.grad_norm_final)) print("Terminated due to {}".format(result.termination_reason)) print("Number of iterations {}".format(result.num_iter)) print("Completed in {} HH:MM:SS.US".format(datetime.timedelta(seconds=result.wall_time))) """ Explanation: Report the results End of explanation """ fig1 = plt.figure() ax1 = fig1.add_subplot(2, 1, 1) ax1.set_title("Initial Control amps") ax1.set_xlabel("Time") ax1.set_ylabel("Control amplitude") for j in range(n_ctrls): ax1.step(result.time, np.hstack((result.initial_amps[:, j], result.initial_amps[-1, j])), where='post') ax2 = fig1.add_subplot(2, 1, 2) ax2.set_title("Optimised Control Amplitudes") ax2.set_xlabel("Time") ax2.set_ylabel("Control amplitude") for j in range(n_ctrls): ax2.step(result.time, np.hstack((result.final_amps[:, j], result.final_amps[-1, j])), where='post') plt.tight_layout() plt.show() """ Explanation: Plot the initial and final amplitudes End of explanation """ from qutip.ipynbtools import version_table version_table() """ Explanation: Versions End of explanation """
sgratzl/ipython-tutorial-VA2015
03_Plotting_solution.ipynb
cc0-1.0
#disable some annoying warning import warnings warnings.filterwarnings('ignore', category=FutureWarning) #plots the figures in place instead of a new window %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np #use a standard dataset of heterogenous data cars = pd.read_csv('data/mtcars.csv') cars.head() """ Explanation: (Interactive) Plotting using Matplotlib and Seaborn Matplotlib is basic plotting library for Python inspired by Matlab. Seaborn is built on top of it with integrated analysis and specialized plots + pretty good integration with Pandas Also see the full gallery of Seaborn or Matplotlib. End of explanation """ plt.scatter(x=cars['mpg'],y=cars['wt']) plt.xlabel('miles per gallon') plt.ylabel('weight') plt.title('MPG vs WT') plt.show() #integrated in pandas, too cars.plot(x='mpg',y='wt',kind='scatter') cars.plot(kind='scatter', x='mpg',y='wt',c='hp',s=cars['cyl']*20,alpha=0.5) #what if we plot everything? cars.plot() """ Explanation: Scatterplot End of explanation """ cars['mpg'].hist(bins=5) plt.hist(cars['mpg'],bins=5) plt.title('miles per gallon') #seaborn not just a histogram but also an kernel density enstimation and better default settings sns.distplot(cars['mpg'],bins=5) """ Explanation: Histogram End of explanation """ #box plots cars['mpg'].plot(kind='box') cars.boxplot('mpg') #group by gear cars.boxplot('mpg', by='gear') # load gapminder again and select 2007 gap = pd.read_csv('data/gapminder-unfiltered.tsv',index_col=0, sep='\t') gap2007 = gap[gap.year == 2007] gap2007.columns """ Explanation: Box Plots End of explanation """ gap2007.plot(kind='scatter', x='lifeExp',y='gdpPercap') """ Explanation: Log Scale End of explanation """ gap2007.plot(kind='scatter', x='lifeExp',y='gdpPercap') plt.yscale('log') """ Explanation: unbalanced with outliers what about log scale? End of explanation """ #create a color palette colors = sns.color_palette() sns.palplot(colors) #for each group create an own plot an overlay them for (name, group),color in zip(gap2007.groupby('continent'),colors): plt.scatter(x=group['lifeExp'],y=group['gdpPercap'],label=name, c=color,s=30) plt.yscale('log') plt.legend() #playing with categories ... seaborn is pretty good with it plt.figure(figsize=(40,20)) plt.subplot(121) sns.boxplot(x='continent',y='gdpPercap',data=gap) plt.subplot(122) sns.violinplot(x='continent',y='gdpPercap',data=gap2007) # or with linear regression anscombe = sns.load_dataset("anscombe") sns.lmplot('x','y',col='dataset',hue='dataset', data=anscombe, col_wrap=2) #g = sns.FacetGrid(anscombe, col="dataset", size=4, aspect=1) #g.map(sns.regplot, "x", "y") # or with structured heatmaps #compute the correlations and take a look at them corrmat = gap.corr() # draw a clustered heatmap using seaborn sns.clustermap(corrmat, square=True) """ Explanation: Grouping / Coloring Plots grouped by color? End of explanation """ #for each group create an own plot an overlay them pop_max = gap2007['pop'].max() for (name, group),color in zip(gap2007.groupby('continent'),colors): plt.scatter(x=group['lifeExp'],y=group['gdpPercap'],label=name, c=color,s=(group['pop']/pop_max)*400) plt.yscale('log') plt.title('Life Expectancy vs GDP') plt.xlabel('Life Expectancy') plt.ylabel('GDP Per Cap') plt.legend() """ Explanation: TASK create a scatterplot where * x = lifeExp * y = gdpPerCap * color = continent * size = pop label the axis appropiately and use a log scale for gdp End of explanation """ from IPython.html.widgets import interact, interact_manual @interact(text='Hello', slider=(0,10),check=True,categories=['red','green','blue']) def react(text, slider,check,categories): print(text,slider*10,check,categories) @interact_manual(text='Hello', slider=(0,10),check=True,categories=['red','green','blue']) def react(text, slider,check,categories): print(text,slider*10,check,categories) @interact(bins=(5, 25, 5),color=['red','green','orange','blue']) def show_distplot(bins,color): cars['mpg'].hist(bins=bins, color=color) """ Explanation: Interactive plots simple interaction is possible with IPython by default. That means whenever the user changes some parameter the visualization is recreated on the server side and send to the client. End of explanation """ #hard core from IPython.html import widgets [widget for widget in dir(widgets) if not widget.endswith('Widget') and widget[0] == widget[0].upper() and widget[0] != '_'] @interact(bins=widgets.FloatTextWidget(value=5)) def show_distplot(bins): cars['mpg'].hist(bins=bins) text_widget = widgets.Textarea(value='Hello', description='text area') slider_widget = widgets.BoundedFloatText(5,min=0,max=10, description='slider area') check_widget = widgets.Checkbox(True,description="CheckboxWidget") toggle = widgets.RadioButtons(options=['red','green','blue'], description="RadioButtonsWidget") @interact(text=text_widget, slider=slider_widget,check=check_widget,categories=toggle) def react(text, slider,check,categories): print(text,slider*10,check,categories) b = widgets.Button(description="Update") checkbox = widgets.Checkbox(description="CheckboxWidget") tab1_children = [b, checkbox, widgets.Dropdown(options=['A','B'], description="DropdownWidget"), widgets.RadioButtons(options=['A','B'], description="RadioButtonsWidget"), widgets.Select(options=['A','B'], description="SelectWidget"), widgets.Text(description="TextWidget"), widgets.Textarea(description="TextareaWidget"), widgets.ToggleButton(description="ToggleButtonWidget"), widgets.ToggleButtons(options=["Value 1", "Value2"], description="ToggleButtonsWidget"), ] tab2_children = [widgets.BoundedFloatText(description="BoundedFloatTextWidget"), widgets.BoundedIntText(description="BoundedIntTextWidget"), widgets.FloatSlider(description="FloatSliderWidget"), widgets.FloatText(description="FloatTextWidget"), widgets.IntSlider(description="IntSliderWidget"), widgets.IntText(description="IntTextWidget"), ] tab1 = widgets.Box(children=tab1_children) tab2 = widgets.Box(children=tab2_children) i = widgets.Accordion(children=[tab1, tab2]) i.set_title(0,"Basic Widgets") i.set_title(1,"Numbers Input") from IPython.display import display def button_clicked(bb): print(checkbox.value) #TODO update plot b.on_click(button_clicked) display(i) """ Explanation: custom build widgets: http://nbviewer.ipython.org/github/ipython/ipython/blob/3.x/examples/Interactive%20Widgets/Widget%20List.ipynb End of explanation """ pop_max = gap['pop'].max() @interact(year=(gap.year.min(), gap.year.max())) def plot_gapminder(year): gapyear = gap[gap.year == year] for (name, group),color in zip(gapyear.groupby('continent'),colors): plt.scatter(x=group['lifeExp'],y=group['gdpPercap'],label=name, c=color,s=(group['pop']/pop_max)*400) plt.yscale('log') plt.title('Life Expectancy vs GDP') plt.xlabel('Life Expectancy') plt.ylabel('GDP Per Cap') plt.xlim(gap.gdpPercap.min(),gap.gdpPercap.max()) plt.xlim(gap.lifeExp.min(),gap.lifeExp.max()) plt.legend() """ Explanation: TASK make the plot from before interactive, such that you can slide the year End of explanation """
maartenbreddels/vaex
docs/source/tutorial_ml.ipynb
mit
import vaex vaex.multithreading.thread_count_default = 8 import vaex.ml import numpy as np import pylab as plt """ Explanation: Machine Learning with vaex.ml If you want to try out this notebook with a live Python kernel, use mybinder: <a class="reference external image-reference" href="https://mybinder.org/v2/gh/vaexio/vaex/latest?filepath=docs%2Fsource%2Ftutorial_ml.ipynb"><img alt="https://mybinder.org/badge_logo.svg" src="https://mybinder.org/badge_logo.svg" width="150px"></a> The vaex.ml package brings some machine learning algorithms to vaex. If you installed the individual subpackages (vaex-core, vaex-hdf5, ...) instead of the vaex metapackage, you may need to install it by running pip install vaex-ml, or conda install -c conda-forge vaex-ml. The API of vaex.ml stays close to that of scikit-learn, while providing better performance and the ability to efficiently perform operations on data that is larger than the available RAM. This page is an overview and a brief introduction to the capabilities offered by vaex.ml. End of explanation """ df = vaex.ml.datasets.load_iris() df df.scatter(df.petal_length, df.petal_width, c_expr=df.class_); """ Explanation: We will use the well known Iris flower and Titanic passenger list datasets, two classical datasets for machine learning demonstrations. End of explanation """ features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width'] scaler = vaex.ml.StandardScaler(features=features, prefix='scaled_') scaler.fit(df) df_trans = scaler.transform(df) df_trans """ Explanation: Preprocessing Scaling of numerical features vaex.ml packs the common numerical scalers: vaex.ml.StandardScaler - Scale features by removing their mean and dividing by their variance; vaex.ml.MinMaxScaler - Scale features to a given range; vaex.ml.RobustScaler - Scale features by removing their median and scaling them according to a given percentile range; vaex.ml.MaxAbsScaler - Scale features by their maximum absolute value. The usage is quite similar to that of scikit-learn, in the sense that each transformer implements the .fit and .transform methods. End of explanation """ df = vaex.ml.datasets.load_titanic() df.head(5) label_encoder = vaex.ml.LabelEncoder(features=['embarked']) one_hot_encoder = vaex.ml.OneHotEncoder(features=['embarked']) freq_encoder = vaex.ml.FrequencyEncoder(features=['embarked']) bayes_encoder = vaex.ml.BayesianTargetEncoder(features=['embarked'], target='survived') woe_encoder = vaex.ml.WeightOfEvidenceEncoder(features=['embarked'], target='survived') df = label_encoder.fit_transform(df) df = one_hot_encoder.fit_transform(df) df = freq_encoder.fit_transform(df) df = bayes_encoder.fit_transform(df) df = woe_encoder.fit_transform(df) df.head(5) """ Explanation: The output of the .transform method of any vaex.ml transformer is a shallow copy of a DataFrame that contains the resulting features of the transformations in addition to the original columns. A shallow copy means that this new DataFrame just references the original one, and no extra memory is used. In addition, the resulting features, in this case the scaled numerical features are virtual columns, which do not take any memory but are computed on the fly when needed. This approach is ideal for working with very large datasets. Encoding of categorical features vaex.ml contains several categorical encoders: vaex.ml.LabelEncoder - Encoding features with as many integers as categories, startinfg from 0; vaex.ml.OneHotEncoder - Encoding features according to the one-hot scheme; vaex.ml.FrequencyEncoder - Encode features by the frequency of their respective categories; vaex.ml.BayesianTargetEncoder - Encode categories with the mean of their target value; vaex.ml.WeightOfEvidenceEncoder - Encode categories their weight of evidence value. The following is a quick example using the Titanic dataset. End of explanation """ kbdisc = vaex.ml.KBinsDiscretizer(features=['age'], n_bins=5, strategy='quantile') df = kbdisc.fit_transform(df) df.head(5) """ Explanation: Notice that the transformed features are all included in the resulting DataFrame and are appropriately named. This is excellent for the construction of various diagnostic plots, and engineering of more complex features. The fact that the resulting (encoded) features take no memory, allows one to try out or combine a variety of preprocessing steps without spending any extra memory. Feature Engineering KBinsDiscretizer With the KBinsDiscretizer you can convert a continous into a discrete feature by binning the data into specified intervals. You can specify the number of bins, the strategy on how to determine their size: "uniform" - all bins have equal sizes; "quantile" - all bins have (approximately) the same number of samples in them; "kmeans" - values in each bin belong to the same 1D cluster as determined by the KMeans algorithm. End of explanation """ gbt = vaex.ml.GroupByTransformer(by='pclass', agg={'age': ['mean', 'std'], 'fare': ['mean', 'std'], }) df = gbt.fit_transform(df) df.head(5) """ Explanation: GroupBy Transformer The GroupByTransformer is a handy feature in vaex-ml that lets you perform a groupby aggregations on the training data, and then use those aggregations as features in the training and test sets. End of explanation """ df = vaex.ml.datasets.load_iris_1e9() n_samples = len(df) print(f'Number of samples in DataFrame: {n_samples:,}') features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width'] pca = vaex.ml.PCA(features=features, n_components=4, progress=True) pca.fit(df) """ Explanation: Dimensionality reduction Principal Component Analysis The PCA implemented in vaex.ml can scale to a very large number of samples, even if that data we want to transform does not fit into RAM. To demonstrate this, let us do a PCA transformation on the Iris dataset. For this example, we have replicated this dataset thousands of times, such that it contains over 1 billion samples. End of explanation """ df_trans = pca.transform(df) df_trans """ Explanation: The PCA transformer implemented in vaex.ml can be fit in well under a minute, even when the data comprises 4 columns and 1 billion rows. End of explanation """ import vaex.ml.cluster df = vaex.ml.datasets.load_iris() features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width'] kmeans = vaex.ml.cluster.KMeans(features=features, n_clusters=3, max_iter=100, verbose=True, random_state=42) kmeans.fit(df) df_trans = kmeans.transform(df) df_trans """ Explanation: Recall that the transformed DataFrame, which includes the PCA components, takes no extra memory. Clustering K-Means vaex.ml implements a fast and scalable K-Means clustering algorithm. The usage is similar to that of scikit-learn. End of explanation """ df_trans['predicted_kmean_map'] = df_trans.prediction_kmeans.map(mapper={0: 1, 1: 2, 2: 0}) df_trans """ Explanation: K-Means is an unsupervised algorithm, meaning that the predicted cluster labels in the transformed dataset do not necessarily correspond to the class label. We can map the predicted cluster identifiers to match the class labels, making it easier to construct diagnostic plots. End of explanation """ fig = plt.figure(figsize=(12, 5)) plt.subplot(121) df_trans.scatter(df_trans.petal_length, df_trans.petal_width, c_expr=df_trans.class_) plt.title('Original classes') plt.subplot(122) df_trans.scatter(df_trans.petal_length, df_trans.petal_width, c_expr=df_trans.predicted_kmean_map) plt.title('Predicted classes') plt.tight_layout() plt.show() """ Explanation: Now we can construct simple scatter plots, and see that in the case of the Iris dataset, K-Means does a pretty good job splitting the data into 3 classes. End of explanation """ df = vaex.ml.datasets.load_iris_1e9() n_samples = len(df) print(f'Number of samples in DataFrame: {n_samples:,}') %%time features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width'] kmeans = vaex.ml.cluster.KMeans(features=features, n_clusters=3, max_iter=100, verbose=True, random_state=31) kmeans.fit(df) """ Explanation: As with any algorithm implemented in vaex.ml, K-Means can be used on billions of samples. Fitting takes under 2 minutes when applied on the oversampled Iris dataset, numbering over 1 billion samples. End of explanation """ from vaex.ml.sklearn import Predictor from sklearn.ensemble import GradientBoostingClassifier df = vaex.ml.datasets.load_iris() features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width'] target = 'class_' model = GradientBoostingClassifier(random_state=42) vaex_model = Predictor(features=features, target=target, model=model, prediction_name='prediction') vaex_model.fit(df=df) df = vaex_model.transform(df) df """ Explanation: Supervised learning While vaex.ml does not yet implement any supervised machine learning models, it does provide wrappers to several popular libraries such as scikit-learn, XGBoost, LightGBM and CatBoost. The main benefit of these wrappers is that they turn the models into vaex.ml transformers. This means the models become part of the DataFrame state and thus can be serialized, and their predictions can be returned as virtual columns. This is especially useful for creating various diagnostic plots and evaluating performance metrics at no memory cost, as well as building ensembles. Scikit-Learn example The vaex.ml.sklearn module provides convenient wrappers to the scikit-learn estimators. In fact, these wrappers can be used with any library that follows the API convention established by scikit-learn, i.e. implements the .fit and .transform methods. Here is an example: End of explanation """ from vaex.ml.sklearn import IncrementalPredictor from sklearn.linear_model import SGDClassifier df = vaex.ml.datasets.load_iris_1e9() features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width'] target = 'class_' model = SGDClassifier(learning_rate='constant', eta0=0.0001, random_state=42) vaex_model = IncrementalPredictor(features=features, target=target, model=model, batch_size=11_000_000, partial_fit_kwargs={'classes':[0, 1, 2]}) vaex_model.fit(df=df, progress='widget') df = vaex_model.transform(df) df """ Explanation: One can still train a predictive model on datasets that are too big to fit into memory by leveraging the on-line learners provided by scikit-learn. The vaex.ml.sklearn.IncrementalPredictor conveniently wraps these learners and provides control on how the data is passed to them from a vaex DataFrame. Let us train a model on the oversampled Iris dataset which comprises over 1 billion samples. End of explanation """ from vaex.ml.xgboost import XGBoostModel df = vaex.ml.datasets.load_iris_1e5() df_train, df_test = df.ml.train_test_split(test_size=0.2, verbose=False) features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width'] target = 'class_' params = {'learning_rate': 0.1, 'max_depth': 3, 'num_class': 3, 'objective': 'multi:softmax', 'subsample': 1, 'random_state': 42, 'n_jobs': -1} booster = XGBoostModel(features=features, target=target, num_boost_round=500, params=params) booster.fit(df=df_train, evals=[(df_train, 'train'), (df_test, 'test')], early_stopping_rounds=5) df_test = booster.transform(df_train) df_test """ Explanation: XGBoost example Libraries such as XGBoost provide more options such as validation during training and early stopping for example. We provide wrappers that keeps close to the native API of these libraries, in addition to the scikit-learn API. While the following example showcases the XGBoost wrapper, vaex.ml implements similar wrappers for LightGBM and CatBoost. End of explanation """ from vaex.ml.catboost import CatBoostModel df = vaex.ml.datasets.load_iris_1e8() df_train, df_test = df.ml.train_test_split(test_size=0.2, verbose=False) features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width'] target = 'class_' params = { 'leaf_estimation_method': 'Gradient', 'learning_rate': 0.1, 'max_depth': 3, 'bootstrap_type': 'Bernoulli', 'subsample': 0.8, 'sampling_frequency': 'PerTree', 'colsample_bylevel': 0.8, 'reg_lambda': 1, 'objective': 'MultiClass', 'eval_metric': 'MultiClass', 'random_state': 42, 'verbose': 0, } booster = CatBoostModel(features=features, target=target, num_boost_round=23, params=params, prediction_type='Class', batch_size=11_000_000) booster.fit(df=df_train, progress='widget') df_test = booster.transform(df_train) df_test """ Explanation: CatBoost example The CatBoost library supports summing up models. With this feature, we can use CatBoost to train a model using data that is otherwise too large to fit in memory. The idea is to train a single CatBoost model per chunk of data, and than sum up the invidiual models to create a master model. To use this feature via vaex.ml just specify the batch_size argument in the CatBoostModel wrapper. One can also specify additional options such as the strategy on how to sum up the individual models, or how they should be weighted. End of explanation """ # Load data and split it in train and test sets df = vaex.ml.datasets.load_iris() df_train, df_test = df.ml.train_test_split(test_size=0.2, verbose=False) # Create new features df_train['petal_ratio'] = df_train.petal_length / df_train.petal_width df_train['sepal_ratio'] = df_train.sepal_length / df_train.sepal_width # Do a PCA transformation features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width', 'petal_ratio', 'sepal_ratio'] pca = vaex.ml.PCA(features=features, n_components=6) df_train = pca.fit_transform(df_train) # Display the training DataFrame at this stage df_train """ Explanation: State transfer - pipelines made easy Each vaex DataFrame consists of two parts: data and state. The data is immutable, and any operation such as filtering, adding new columns, or applying transformers or predictive models just modifies the state. This is extremely powerful concept and can completely redefine how we imagine machine learning pipelines. As an example, let us once again create a model based on the Iris dataset. Here, we will create a couple of new features, do a PCA transformation, and finally train a predictive model. End of explanation """ import lightgbm features = df_train.get_column_names(regex='^PCA') booster = lightgbm.LGBMClassifier() vaex_model = Predictor(model=booster, features=features, target='class_') vaex_model.fit(df=df_train) df_train = vaex_model.transform(df_train) df_train """ Explanation: At this point, we are ready to train a predictive model. In this example, let's use LightGBM with its scikit-learn API. End of explanation """ state = df_train.state_get() df_test.state_set(state) df_test """ Explanation: The final df_train DataFrame contains all the features we created, including the predictions right at the end. Now, we would like to apply the same transformations to the test set. All we need to do, is to simply extract the state from df_train and apply it to df_test. This will propagate all the changes that were made to the training set on the test set. End of explanation """ df_train.state_write('./iris_model.json') df_test.state_load('./iris_model.json') df_test """ Explanation: And just like that df_test contains all the columns, transformations and the prediction we modelled on the training set. The state can be easily serialized to disk in a form of a JSON file. This makes deployment of a machine learning model as trivial as simply copying a JSON file from one environment to another. End of explanation """
jmhsi/justin_tinker
data_science/lendingclub_bak/csv_dl_preparation/clean_pmt_history_2.ipynb
apache-2.0
import dir_constants as dc from tqdm import tqdm_notebook def find_dupe_dates(group): return pd.to_datetime(group[group.duplicated('date')]['date'].values) def merge_dupe_dates(group): df_chunks = [] dupe_dates = find_dupe_dates(group) df_chunks.append(group[~group['date'].isin(dupe_dates)]) for date in dupe_dates: problem_rows = group[group['date'] == date] ori_index = problem_rows.index keep_row = problem_rows.iloc[-1].to_dict() keep_row['outs_princp_beg'] = problem_rows.ix[ori_index[0],column_iloc_map['outs_princp_beg']] summed = problem_rows.sum() keep_row['princp_paid'] = summed['princp_paid'] keep_row['int_paid'] = summed['int_paid'] keep_row['fee_paid'] = summed['fee_paid'] keep_row['amt_due'] = summed['amt_due'] keep_row['amt_paid'] = summed['amt_paid'] keep_row['charged_off_amt'] = summed['charged_off_amt'] keep_row['recovs'] = summed['recovs'] keep_row['recov_fees'] = summed['recov_fees'] df_chunks.append(pd.DataFrame(pd.Series(keep_row),columns=[ori_index[-1]]).T) return pd.concat(df_chunks) platform = 'lendingclub' store = pd.HDFStore( dc.home_path+'/justin_tinkering/data_science/lendingclub/{0}_store.h5'. format(platform), append=True) """ Explanation: Author: Justin Hsi Part 2 of cleaning lending club payment history End of explanation """ pmt_hist_ids = store['pmt_hist_ids'].astype(int) max_id = pmt_hist_ids.max() chunksize = 800 n_chunks = len(pmt_hist_ids)//chunksize + 1 # fix loans with double month entries _________________________________________ # left_bound = 0 # right_bound = pmt_hist_ids[chunksize] already_good_dfs = [] fixed_dfs = [] k = 0 for n in tqdm_notebook(np.arange(n_chunks)): if n == 0: left_bound = 0 else: left_bound = pmt_hist_ids[n*chunksize] if n == (n_chunks - 1): right_bound = max_id else: right_bound = pmt_hist_ids[(n+1)*chunksize] chunk = pd.read_hdf( store, 'pmt_hist_intermediary_1', where='(loan_id_num > left_bound) & (loan_id_num <= right_bound)') loans_with_two_entries_in_same_month = chunk[chunk.duplicated( ['loan_id', 'date'])] dup_date_ids = loans_with_two_entries_in_same_month['loan_id'].unique() if k == 0: column_iloc_map = { col_name: chunk.iloc[-1].index.get_loc(col_name) for col_name in chunk.columns.values } k += 1 id_grouped = chunk.groupby('loan_id') already_good = chunk[~chunk['loan_id'].isin(dup_date_ids)] for ids, group in id_grouped: if ids in dup_date_ids: fixed_dfs.append(merge_dupe_dates(group)) else: pass already_good_dfs.append(already_good) """ Explanation: There are loans that have multiple row entries per month (as in multiple pmts in same month) and there are also loans that don't have any entry for a month End of explanation """ # Create min_itemsize_dict for allocating size when storing ___________________ min_itemsize_dict = {} for col in already_good.columns: if already_good[col].dtype == np.object: print(col, already_good[col].str.len().max()) if col in ['State', 'VINTAGE', 'grade']: pass else: min_itemsize_dict[col] = 15 col_dtype_map = already_good_dfs[0].dtypes.to_dict() all_fixed_dfs = pd.concat(fixed_dfs) for col, dtype in col_dtype_map.items(): all_fixed_dfs[col] = all_fixed_dfs[col].astype(dtype) k = 0 for chunk in tqdm_notebook([all_fixed_dfs] + already_good_dfs): if k == 0: store.append( 'pmt_hist_intermediary_2', chunk, data_columns=True, index=True, append=False, min_itemsize=min_itemsize_dict) k += 1 else: store.append( 'pmt_hist_intermediary_2', chunk, data_columns=True, index=True, append=True) store.close() """ Explanation: store before next cleaning step End of explanation """
metpy/MetPy
v1.0/_downloads/8c91fa5ab51e12860cfa1e679eaa746d/xarray_tutorial.ipynb
bsd-3-clause
import numpy as np import xarray as xr # Any import of metpy will activate the accessors import metpy.calc as mpcalc from metpy.cbook import get_test_data from metpy.units import units """ Explanation: xarray with MetPy Tutorial xarray &lt;http://xarray.pydata.org/&gt;_ is a powerful Python package that provides N-dimensional labeled arrays and datasets following the Common Data Model. MetPy's suite of meteorological calculations are designed to integrate with xarray DataArrays as one of its two primary data models (the other being Pint Quantities). MetPy also provides DataArray and Dataset accessors (collections of methods and properties attached to the .metpy property) for coordinate/CRS and unit operations. Full information on MetPy's accessors is available in the :doc:appropriate section of the reference guide &lt;/api/generated/metpy.xarray&gt;, otherwise, continue on in this tutorial for a demonstration of the three main components of MetPy's integration with xarray (coordinates/coordinate reference systems, units, and calculations), as well as instructive examples for both CF-compliant and non-compliant datasets. First, some general imports... End of explanation """ # Open the netCDF file as a xarray Dataset data = xr.open_dataset(get_test_data('irma_gfs_example.nc', False)) # View a summary of the Dataset data """ Explanation: ...and opening some sample data to work with. End of explanation """ temperature = data['Temperature_isobaric'] temperature """ Explanation: While xarray can handle a wide variety of n-dimensional data (essentially anything that can be stored in a netCDF file), a common use case is working with gridded model output. Such model data can be obtained from a THREDDS Data Server using the siphon package &lt;https://unidata.github.io/siphon/&gt;_, but here we've used an example subset of GFS data from Hurrican Irma (September 5th, 2017) included in MetPy's test suite. Generally, a local file (or remote file via OPeNDAP) can be opened with xr.open_dataset("path"). Going back to the above object, this Dataset consists of dimensions and their associated coordinates, which in turn make up the axes along which the data variables are defined. The dataset also has a dictionary-like collection of attributes. What happens if we look at just a single data variable? End of explanation """ temperature.metpy.time """ Explanation: This is a DataArray, which stores just a single data variable with its associated coordinates and attributes. These individual DataArray\s are the kinds of objects that MetPy's calculations take as input (more on that in Calculations_ section below). If you are more interested in learning about xarray's terminology and data structures, see the terminology section &lt;http://xarray.pydata.org/en/stable/terminology.html&gt;_ of xarray's documenation. Coordinates and Coordinate Reference Systems MetPy's first set of helpers comes with identifying coordinate types. In a given dataset, coordinates can have a variety of different names and yet refer to the same type (such as "isobaric1" and "isobaric3" both referring to vertical isobaric coordinates). Following CF conventions, as well as using some fall-back regular expressions, MetPy can systematically identify coordinates of the following types: time vertical latitude y longitude x When identifying a single coordinate, it is best to use the property directly associated with that type End of explanation """ x, y = temperature.metpy.coordinates('x', 'y') """ Explanation: When accessing multiple coordinate types simultaneously, you can use the .coordinates() method to yield a generator for the respective coordinates End of explanation """ heights = data['Geopotential_height_isobaric'].metpy.sel( time='2017-09-05 18:00', vertical=50000. ) """ Explanation: These coordinate type aliases can also be used in MetPy's wrapped .sel and .loc for indexing and selecting on DataArray\s. For example, to access 500 hPa heights at 1800Z, End of explanation """ # Parse full dataset data_parsed = data.metpy.parse_cf() # Parse subset of dataset data_subset = data.metpy.parse_cf([ 'u-component_of_wind_isobaric', 'v-component_of_wind_isobaric', 'Vertical_velocity_pressure_isobaric' ]) # Parse single variable relative_humidity = data.metpy.parse_cf('Relative_humidity_isobaric') """ Explanation: (Notice how we specified 50000 here without units...we'll go over a better alternative in the next section on units.) One point of warning: xarray's selection and indexing only works if these coordinates are dimension coordinates, meaning that they are 1D and share the name of their associated dimension. In practice, this means that you can't index a dataset that has 2D latitude and longitude coordinates by latitudes and longitudes, instead, you must index by the 1D y and x dimension coordinates. (What if these coordinates are missing, you may ask? See the final subsection on .assign_y_x for more details.) Beyond just the coordinates themselves, a common need for both calculations with and plots of geospatial data is knowing the coordinate reference system (CRS) on which the horizontal spatial coordinates are defined. MetPy follows the CF Conventions &lt;http://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#grid-mappings-and-projections&gt;_ for its CRS definitions, which it then caches on the metpy_crs coordinate in order for it to persist through calculations and other array operations. There are two ways to do so in MetPy: First, if your dataset is already conforming to the CF Conventions, it will have a grid mapping variable that is associated with the other data variables by the grid_mapping attribute. This is automatically parsed via the .parse_cf() method: End of explanation """ temperature = data['Temperature_isobaric'].metpy.assign_crs( grid_mapping_name='latitude_longitude', earth_radius=6371229.0 ) temperature """ Explanation: If your dataset doesn't have a CF-conforming grid mapping variable, you can manually specify the CRS using the .assign_crs() method: End of explanation """ # Cartopy CRS, useful for plotting relative_humidity.metpy.cartopy_crs # pyproj CRS, useful for projection transformations and forward/backward azimuth and great # circle calculations temperature.metpy.pyproj_crs """ Explanation: Notice the newly added metpy_crs non-dimension coordinate. Now how can we use this in practice? For individual DataArrays\s, we can access the cartopy and pyproj objects corresponding to this CRS: End of explanation """ heights = heights.metpy.quantify() heights """ Explanation: Finally, there are times when a certain horizontal coordinate type is missing from your dataset, and you need the other, that is, you have latitude/longitude and need y/x, or visa versa. This is where the .assign_y_x and .assign_latitude_longitude methods come in handy. Our current GFS sample won't work to demonstrate this (since, on its latitude-longitude grid, y is latitude and x is longitude), so for more information, take a look at the Non-Compliant Dataset Example_ below, or view the accessor documentation. Units Since unit-aware calculations are a major part of the MetPy library, unit support is a major part of MetPy's xarray integration! One very important point of consideration is that xarray data variables (in both Dataset\s and DataArray\s) can store both unit-aware and unit-naive array types. Unit-naive array types will be used by default in xarray, so we need to convert to a unit-aware type if we want to use xarray operations while preserving unit correctness. MetPy provides the .quantify() method for this (named since we are turning the data stored inside the xarray object into a Pint Quantity object) End of explanation """ heights_mean = heights.mean('longitude') heights_mean """ Explanation: Notice how the units are now represented in the data itself, rather than as a text attribute. Now, even if we perform some kind of xarray operation (such as taking the zonal mean), the units are preserved End of explanation """ heights_mean_str_units = heights_mean.metpy.dequantify() heights_mean_str_units """ Explanation: However, this "quantification" is not without its consequences. By default, xarray loads its data lazily to conserve memory usage. Unless your data is chunked into a Dask array (using the chunks argument), this .quantify() method will load data into memory, which could slow your script or even cause your process to run out of memory. And so, we recommend subsetting your data before quantifying it. Also, these Pint Quantity data objects are not properly handled by xarray when writing to disk. And so, if you want to safely export your data, you will need to undo the quantification with the .dequantify() method, which converts your data back to a unit-naive array with the unit as a text attribute End of explanation """ heights_at_45_north = data['Geopotential_height_isobaric'].metpy.sel( latitude=45 * units.degrees_north, vertical=300 * units.hPa ) heights_at_45_north """ Explanation: Other useful unit integration features include: Unit-based selection/indexing: End of explanation """ temperature_degC = temperature[0].metpy.convert_units('degC') temperature_degC """ Explanation: Unit conversion: End of explanation """ heights_on_hPa_levels = heights.metpy.convert_coordinate_units('isobaric3', 'hPa') heights_on_hPa_levels['isobaric3'] """ Explanation: Unit conversion for coordinates: End of explanation """ heights_unit_array = heights.metpy.unit_array heights_unit_array """ Explanation: Accessing just the underlying unit array: End of explanation """ height_units = heights.metpy.units height_units """ Explanation: Accessing just the underlying units: End of explanation """ heights = data_parsed.metpy.parse_cf('Geopotential_height_isobaric').metpy.sel( time='2017-09-05 18:00', vertical=500 * units.hPa ) u_g, v_g = mpcalc.geostrophic_wind(heights) u_g """ Explanation: Calculations MetPy's xarray integration extends to its calcuation suite as well. Most grid-capable calculations (such as thermodynamics, kinematics, and smoothers) fully support xarray DataArray\s by accepting them as inputs, returning them as outputs, and automatically using the attached coordinate data/metadata to determine grid arguments End of explanation """ data_at_point = data.metpy.sel( time1='2017-09-05 12:00', latitude=40 * units.degrees_north, longitude=260 * units.degrees_east ) dewpoint = mpcalc.dewpoint_from_relative_humidity( data_at_point['Temperature_isobaric'], data_at_point['Relative_humidity_isobaric'] ) cape, cin = mpcalc.surface_based_cape_cin( data_at_point['isobaric3'], data_at_point['Temperature_isobaric'], dewpoint ) cape """ Explanation: For profile-based calculations (and most remaining calculations in the metpy.calc module), xarray DataArray\s are accepted as inputs, but the outputs remain Pint Quantities (typically scalars) End of explanation """ # Load data, parse it for a CF grid mapping, and promote lat/lon data variables to coordinates data = xr.open_dataset( get_test_data('narr_example.nc', False) ).metpy.parse_cf().set_coords(['lat', 'lon']) # Subset to only the data you need to save on memory usage subset = data.metpy.sel(isobaric=500 * units.hPa) # Quantify if you plan on performing xarray operations that need to maintain unit correctness subset = subset.metpy.quantify() # Perform calculations heights = mpcalc.smooth_gaussian(subset['Geopotential_height'], 5) subset['u_geo'], subset['v_geo'] = mpcalc.geostrophic_wind(heights) # Plot heights.plot() # Save output subset.metpy.dequantify().drop_vars('metpy_crs').to_netcdf('500hPa_analysis.nc') """ Explanation: A few remaining portions of MetPy's calculations (mainly the interpolation module and a few other functions) do not fully support xarray, and so, use of .values may be needed to convert to a bare NumPy array. For full information on xarray support for your function of interest, see the :doc:/api/index. CF-Compliant Dataset Example The GFS sample used throughout this tutorial so far has been an example of a CF-compliant dataset. These kinds of datasets are easiest to work with it MetPy, since most of the "xarray magic" uses CF metadata. For this kind of dataset, a typical workflow looks like the following End of explanation """ nonstandard = xr.Dataset({ 'temperature': (('y', 'x'), np.arange(0, 9).reshape(3, 3) * units.degC), 'y': ('y', np.arange(0, 3) * 1e5, {'units': 'km'}), 'x': ('x', np.arange(0, 3) * 1e5, {'units': 'km'}) }) # Add both CRS and then lat/lon coords using chained methods data = nonstandard.metpy.assign_crs( grid_mapping_name='lambert_conformal_conic', latitude_of_projection_origin=38.5, longitude_of_central_meridian=262.5, standard_parallel=38.5, earth_radius=6371229.0 ).metpy.assign_latitude_longitude() # Preview the changes data """ Explanation: Non-Compliant Dataset Example When CF metadata (such as grid mapping, coordinate attributes, etc.) are missing, a bit more work is required to manually supply the required information, for example, End of explanation """
ykLIU1982/dental
dental_plan_comp.ipynb
mit
def nonPremCost(maxY, unitCostPrev, rPrev, prevDeduct, costBase, rBase, baseDeduct): paidPrev = 0 paidBase = 0 # first, calculate the preventive services, suppose that 2 units per year totalCostPrev = unitCostPrev * 2 coveredPrev = min(maxY, max(0, totalCostPrev - prevDeduct) * rPrev) #print('coveredPrev=', coveredPrev) coveredBase = maxY - coveredPrev paidPrev = totalCostPrev - coveredPrev; #print('paidPrev=', paidPrev) # second, calculate the basic filling services, suppose that total cost is costBase if coveredBase <= 0: paidBase = costBase else: paidBase = costBase - min(coveredBase, rBase*max(0, costBase-baseDeduct)) extraCost = paidPrev + paidBase return extraCost """ Explanation: The function, nonPremCost, calculates the annual out-of-pocket dental expense for a single adult excluding the monthly premium given the following assumptions: 1) The adult receives preventive services (exam and cleanings) twice a year at a constant rate, unitCostPrev. The insurance package have a deductible for the preventive services at prevDeduct and a copayment coverage of rPrev, 50%-100%. 2) We only consider the adult only needs some basic teeth services beside preventive services. 3) The cost of basic teeth services per year is set in a range from 0 to $2000 per year. 4) There is a deductible for the basic services at baseDeduct and the cost beyond the deductible will be paid by the insurance company at the coverage ratio of rBase. 5) The total paid coverage by the insurance company is bounded by the maximum benefit, maxY, which depends on the selection of the dental insurance package ranging from \$500 to \$2000. 6) In the cases when the deductible is shared by preventive services and basic services, we assume that the preventive services will first claim the deductible. The coverage of basic services will be triggered immediately after the preventive services when the latter use up the deductible, which is usually the case. End of explanation """ unitCostPrev = 150 # cost estimate for each teeth cleaning x = [50*i for i in range(41)] # vector for total base cost # BlueCare lower $28.93 # nonPremCost(1000, unitCostPrev, 1, 100, costBase, 0.8, 100) yB1 = [nonPremCost(1000, unitCostPrev, 1, 100, costBase, 0.8, 100) for costBase in x] yB1t = [i+28.93*12 for i in yB1] # total out-of-pocket (absolute) yB1a = [i/.7+28.93*12 for i in yB1] # adjusted to pre-tax cost # BlueCare Higher $ 44.76 # nonPremCost(1000, unitCostPrev, 1, 0, costBase, 0.8, 60) yB2 = [nonPremCost(1000, unitCostPrev, 1, 0, costBase, 0.8, 60) for costBase in x] yB2t = [i+44.76*12 for i in yB2] yB2a = [i/.7+44.76*12 for i in yB2] # Metlife Option 1 $33.62 # nonPremCost(1000, unitCostPrev, 1, 0, costBase, 0.7, 75) yM1 = [nonPremCost(1000, unitCostPrev, 1, 0, costBase, 0.7, 75) for costBase in x] yM1t = [i+33.62*12 for i in yM1] yM1a = [i/.7+33.62*12 for i in yM1] # Metlife Option 2 $37.5 # nonPremCost(1500, unitCostPrev, 1, 0, costBase, 0.7, 50) yM2 = [nonPremCost(1500, unitCostPrev, 1, 0, costBase, 0.7, 50) for costBase in x] yM2t = [i+37.5*12 for i in yM2] yM2a = [i/.7+37.5*12 for i in yM2] # Metlife Option 3 $42.36 # nonPremCost(2000, unitCostPrev, 1, 0, costBase, 0.8, 25) yM3 = [nonPremCost(2000, unitCostPrev, 1, 0, costBase, 0.8, 25) for costBase in x] yM3t = [i+42.36*12 for i in yM3] yM3a = [i/.7+42.36*12 for i in yM3] # Guardian Option 1 $ 37.07 # nonPremCost(1000, unitCostPrev, 1, 50, costBase, 0.7, 0) yG1 = [nonPremCost(1000, unitCostPrev, 1, 50, costBase, 0.7, 0) for costBase in x] yG1t = [i+37.07*12 for i in yG1] yG1a = [i/.7+37.07*12 for i in yG1] # Guardian Option 2 $ 25.50 # nonPremCost(500, unitCostPrev, 0.8, 50, costBase, 0.5, 0) yG2 = [nonPremCost(500, unitCostPrev, 0.8, 50, costBase, 0.5, 0) for costBase in x] yG2t = [i+25.50*12 for i in yG2] yG2a = [i/.7+25.50*12 for i in yG2] # Metlife Option 4 $37.5 # nonPremCost(1500, unitCostPrev, 1, 0, costBase, 0.8, 50) yMg = [nonPremCost(1500, unitCostPrev, 1, 0, costBase, 0.8, 50) for costBase in x] yMgt = [i+37.5*12 for i in yMg] yMga = [i/.7+37.5*12 for i in yMg] y = [i+2*unitCostPrev for i in x] plotChoice = 2 if plotChoice == 0: plt.plot(x,yB1,'b--',x,yB2,'b^',x,yM1,'r--',x,yM2,'r^',x,yM3,'r*',x,yG1,'g--', x,yG2,'g^', x, yMg, 'ro') plt.title('Extra Dental Cost when preventive cost: %s' %(unitCostPrev*2)) plt.xlabel('Estimate of basic restorative service cost') plt.ylabel('Out-of-pocket copayment (w/o annual premium)') elif plotChoice == 1: plt.plot(x,y,'k', x,yB1t,'b--',x,yB2t,'b^',x,yM1t,'r--',x,yM2t,'r^',x,yM3t,'r*',x,yG1t,'g--', x,yG2t,'g^') plt.title('Dental Cost in the bill when preventive cost: %s' %(unitCostPrev*2)) plt.xlabel('Estimate of basic restorative service cost') plt.ylabel('Cost in bill (annual premium plus out-of-pocket copayment)') elif plotChoice == 2: plt.plot(x,y,'k',x,yB1a,'b--',x,yB2a,'b^',x,yM1a,'r--',x,yM2a,'r^',x,yM3a,'r*',x,yG1a,'g--', x,yG2a,'g^', x, yMga, 'ro') plt.title('Adjusted Pre-Tax Dental Cost when preventive cost: %s' %(unitCostPrev*2)) plt.xlabel('Estimate of basic restorative service cost') plt.ylabel('Adjusted pre-tax dental cost (annual premium plus out-of-pocket copayment)') else: print('no figure generateds') plt.show() """ Explanation: Here, we consider different dental insurance options which are available online for individual and group member plans. End of explanation """
kaleoyster/nbi-data-science
Deterioration Curves/(West) Deterioration+Curves+and+Classification+of+Bridges+in+the+West+United+States.ipynb
gpl-2.0
import pymongo from pymongo import MongoClient import time import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import csv """ Explanation: Libraries and Packages End of explanation """ Client = MongoClient("mongodb://bridges:readonly@nbi-mongo.admin/bridge") db = Client.bridge collection = db["bridges"] """ Explanation: Connecting to National Data Service: The Lab Benchwork's NBI - MongoDB instance End of explanation """ def getData(state): pipeline = [{"$match":{"$and":[{"year":{"$gt":1991, "$lt":2017}},{"stateCode":state}]}}, {"$project":{"_id":0, "structureNumber":1, "yearBuilt":1, "deck":1, ## rating of deck "year":1, ## survey year "substructure":1, ## rating of substructure "superstructure":1, ## rating of superstructure }}] dec = collection.aggregate(pipeline) conditionRatings = pd.DataFrame(list(dec)) conditionRatings['Age'] = conditionRatings['year'] - conditionRatings['yearBuilt'] return conditionRatings """ Explanation: Deterioration Curves of West United states For demonstration purposes, the results only focuses on the states in the West United States which includes: Colorado, Wyoming, Montana, Idaho, Washington, Oregon, Utah, Nevada, California, Alaska, Hawaii The classification of the bridge into slow deteriorating, fast deteriorating, and average deteriorating is done based on bridge's rate of deterioration. Therefore, In this section will demonstrate how bridges deteriorate over time in the West United States. To plot the deterioration curve of bridges in every state of West United States, bridges were grouped by their age. As a result, There are 60 groups of bridges from age 1 to 60, The mean of the condition rating of the deck, superstructure, and substructure of the bridge is plotted for every age. Extracting Data of Northeast states of the United states from 1992 - 2016. The following query will extract data from the mongoDB instance and project only selected attributes such as structure number, yearBuilt, deck, year, superstructure, and subtructure. End of explanation """ def getMeanRatings(state,startAge, endAge, startYear, endYear): conditionRatings = getData(state) conditionRatings = conditionRatings[['structureNumber','Age','superstructure','deck','substructure','year']] conditionRatings = conditionRatings.loc[~conditionRatings['superstructure'].isin(['N','NA'])] conditionRatings = conditionRatings.loc[~conditionRatings['substructure'].isin(['N','NA'])] conditionRatings = conditionRatings.loc[~conditionRatings['deck'].isin(['N','NA'])] #conditionRatings = conditionRatings.loc[~conditionRatings['Structure Type'].isin([19])] #conditionRatings = conditionRatings.loc[~conditionRatings['Type of Wearing Surface'].isin(['6'])] maxAge = conditionRatings['Age'].unique() tempConditionRatingsDataFrame = conditionRatings.loc[conditionRatings['year'].isin([i for i in range(startYear, endYear+1, 1)])] MeanDeck = [] StdDeck = [] MeanSubstructure = [] StdSubstructure = [] MeanSuperstructure = [] StdSuperstructure = [] ## start point of the age to be = 1 and ending point = 100 for age in range(startAge,endAge+1,1): ## Select all the bridges from with age = i tempAgeDf = tempConditionRatingsDataFrame.loc[tempConditionRatingsDataFrame['Age'] == age] ## type conversion deck rating into int listOfMeanDeckOfAge = list(tempAgeDf['deck']) listOfMeanDeckOfAge = [ int(deck) for deck in listOfMeanDeckOfAge ] ## takeing mean and standard deviation of deck rating at age i meanDeck = np.mean(listOfMeanDeckOfAge) stdDeck = np.std(listOfMeanDeckOfAge) ## type conversion substructure rating into int listOfMeanSubstructureOfAge = list(tempAgeDf['substructure']) listOfMeanSubstructureOfAge = [ int(substructure) for substructure in listOfMeanSubstructureOfAge ] meanSub = np.mean(listOfMeanSubstructureOfAge) stdSub = np.std(listOfMeanSubstructureOfAge) ## type conversion substructure rating into int listOfMeanSuperstructureOfAge = list(tempAgeDf['superstructure']) listOfMeanSuperstructureOfAge = [ int(superstructure) for superstructure in listOfMeanSuperstructureOfAge ] meanSup = np.mean(listOfMeanSuperstructureOfAge) stdSup = np.std(listOfMeanSuperstructureOfAge) #Append Deck MeanDeck.append(meanDeck) StdDeck.append(stdDeck) #Append Substructure MeanSubstructure.append(meanSub) StdSubstructure.append(stdSub) #Append Superstructure MeanSuperstructure.append(meanSup) StdSuperstructure.append(stdSup) return [MeanDeck, StdDeck ,MeanSubstructure, StdSubstructure, MeanSuperstructure, StdSuperstructure] """ Explanation: Filtering Null Values, Converting JSON format to Dataframes, and Calculating Mean Condition Ratings of Deck, Superstructure, and Substucture After NBI data is extracted. The Data has to be filtered to remove data points with missing values such as 'N', 'NA'. The mean condition rating for all the components: Deck, Substructure, and Superstructe, has to be calculated. End of explanation """ states = ['08','56','30','16','53','41','49','32','06','02','15'] # state code to state abbreviation stateNameDict = {'25':'MA', '04':'AZ', '08':'CO', '38':'ND', '09':'CT', '19':'IA', '26':'MI', '48':'TX', '35':'NM', '17':'IL', '51':'VA', '23':'ME', '16':'ID', '36':'NY', '56':'WY', '29':'MO', '39':'OH', '28':'MS', '11':'DC', '21':'KY', '18':'IN', '06':'CA', '47':'TN', '12':'FL', '24':'MD', '34':'NJ', '46':'SD', '13':'GA', '55':'WI', '30':'MT', '54':'WV', '15':'HI', '32':'NV', '37':'NC', '10':'DE', '33':'NH', '44':'RI', '50':'VT', '42':'PA', '05':'AR', '20':'KS', '45':'SC', '22':'LA', '40':'OK', '72':'PR', '41':'OR', '27':'MN', '53':'WA', '01':'AL', '31':'NE', '02':'AK', '49':'UT' } def getBulkMeanRatings(states, stateNameDict): # Initializaing the dataframes for deck, superstructure and subtructure df_mean_deck = pd.DataFrame({'Age':range(1,61)}) df_mean_sup = pd.DataFrame({'Age':range(1,61)}) df_mean_sub = pd.DataFrame({'Age':range(1,61)}) df_std_deck = pd.DataFrame({'Age':range(1,61)}) df_std_sup = pd.DataFrame({'Age':range(1,61)}) df_std_sub = pd.DataFrame({'Age':range(1,61)}) for state in states: meanDeck, stdDeck, meanSub, stdSub, meanSup, stdSup = getMeanRatings(state,1,100,1992,2016) stateName = stateNameDict[state] df_mean_deck[stateName] = meanDeck[:60] df_mean_sup[stateName] = meanSup[:60] df_mean_sub[stateName] = meanSub[:60] df_std_deck[stateName] = stdDeck[:60] df_std_sup[stateName] = stdSup[:60] df_std_sub[stateName] = stdSub[:60] return df_mean_deck, df_mean_sup, df_mean_sub, df_std_deck, df_std_sup, df_std_sub df_mean_deck, df_mean_sup, df_mean_sub, df_std_deck, df_std_sup, df_std_sub = getBulkMeanRatings(states, stateNameDict) """ Explanation: The calculated Mean Condition Ratings of deck, superstructure, and substructure are now stored in seperate dataframe for the convience. End of explanation """ %matplotlib inline palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red','silver','purple', 'gold', 'black','olive' ] plt.figure(figsize = (10,8)) index = 0 for state in states: index = index + 1 stateName = stateNameDict[state] plt.plot(df_mean_deck['Age'],df_mean_deck[stateName], color = palette[index]) plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2) plt.xlim(1,60) plt.ylim(1,9) plt.title('Mean Deck Rating Vs Age') plt.xlabel('Age') plt.ylabel('Mean Deck Rating') plt.figure(figsize = (16,12)) plt.xlabel('Age') plt.ylabel('Mean') # Initialize the figure plt.style.use('seaborn-darkgrid') # create a color palette #palette = plt.get_cmap('gist_ncar') palette = [ 'blue', 'blue', 'green','magenta','cyan','brown','grey','red','silver','purple','gold','black','olive' ] # multiple line plot num=1 for column in df_mean_deck.drop('Age', axis=1): # Find the right spot on the plot plt.subplot(4,3, num) # Plot the lineplot plt.plot(df_mean_deck['Age'], df_mean_deck[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column) # Same limits for everybody! plt.xlim(1,60) plt.ylim(1,9) # Not ticks everywhere if num in range(10) : plt.tick_params(labelbottom='off') if num not in [1,4,7,10]: plt.tick_params(labelleft='off') # Add title plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num]) plt.text(30, -1, 'Age', ha='center', va='center') plt.text(1, 4, 'Mean Deck Rating', ha='center', va='center', rotation='vertical') num = num + 1 # general title plt.suptitle("Mean Deck Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02) """ Explanation: Deterioration Curves - Deck End of explanation """ palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red','silver','purple', 'gold', 'black','olive' ] plt.figure(figsize = (10,8)) index = 0 for state in states: index = index + 1 stateName = stateNameDict[state] plt.plot(df_mean_sup['Age'],df_mean_sup[stateName], color = palette[index]) plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2) plt.xlim(1,60) plt.ylim(1,9) plt.title('Mean Superstructure Rating Vs Age') plt.xlabel('Age') plt.ylabel('Mean Superstructure Rating') plt.figure(figsize = (16,12)) plt.xlabel('Age') plt.ylabel('Mean') # Initialize the figure plt.style.use('seaborn-darkgrid') # create a color palette #palette = plt.get_cmap('gist_ncar') palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red', 'silver', 'purple', 'gold', 'black', 'olive' ] # multiple line plot num=1 for column in df_mean_sup.drop('Age', axis=1): # Find the right spot on the plot plt.subplot(4,3, num) # Plot the lineplot plt.plot(df_mean_sup['Age'], df_mean_sup[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column) # Same limits for everybody! plt.xlim(1,60) plt.ylim(1,9) # Not ticks everywhere if num in range(10) : plt.tick_params(labelbottom='off') if num not in [1,4,7,10]: plt.tick_params(labelleft='off') # Add title plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num]) plt.text(30, -1, 'Age', ha='center', va='center') plt.text(1, 4, 'Mean Superstructure Rating', ha='center', va='center', rotation='vertical') num = num + 1 # general title plt.suptitle("Mean Superstructure Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02) palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red','silver','purple', 'gold', 'black','olive' ] plt.figure(figsize = (10,8)) index = 0 for state in states: index = index + 1 stateName = stateNameDict[state] plt.plot(df_mean_sup['Age'],df_mean_sup[stateName], color = palette[index]) plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2) plt.xlim(1,60) plt.ylim(1,9) plt.title('Mean Superstructure Rating Vs Age') plt.xlabel('Age') plt.ylabel('Mean Superstructure Rating') """ Explanation: Deterioration Curve - Superstructure End of explanation """ palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red','silver','purple', 'gold', 'black','olive' ] plt.figure(figsize = (10,8)) index = 0 for state in states: index = index + 1 stateName = stateNameDict[state] plt.plot(df_mean_sub['Age'],df_mean_sub[stateName], color = palette[index], linewidth=4) plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2) plt.xlim(1,60) plt.ylim(1,9) plt.title('Mean Substructure Rating Vs Age') plt.xlabel('Age') plt.ylabel('Mean Substructure Rating') plt.figure(figsize = (16,12)) plt.xlabel('Age') plt.ylabel('Mean') # Initialize the figure plt.style.use('seaborn-darkgrid') # create a color palette palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red', 'silver', 'purple', 'gold', 'black','olive' ] # multiple line plot num=1 for column in df_mean_sub.drop('Age', axis=1): # Find the right spot on the plot plt.subplot(4,3, num) # Plot the lineplot plt.plot(df_mean_sub['Age'], df_mean_sub[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column) # Same limits for everybody! plt.xlim(1,60) plt.ylim(1,9) # Not ticks everywhere if num in range(7) : plt.tick_params(labelbottom='off') if num not in [1,4,7] : plt.tick_params(labelleft='off') # Add title plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num]) plt.text(30, -1, 'Age', ha='center', va='center') plt.text(1, 4, 'Mean Substructure Rating', ha='center', va='center', rotation='vertical') num = num + 1 # general title plt.suptitle("Mean Substructure Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02) def getDataOneYear(state): pipeline = [{"$match":{"$and":[{"year":{"$gt":2015, "$lt":2017}},{"stateCode":state}]}}, {"$project":{"_id":0, "Structure Type":"$structureTypeMain.typeOfDesignConstruction", "Type of Wearing Surface":"$wearingSurface/ProtectiveSystem.typeOfWearingSurface", 'Structure Type':1, "structureNumber":1, "yearBuilt":1, "deck":1, ## rating of deck "year":1, ## survey year "substructure":1, ## rating of substructure "superstructure":1, ## rating of superstructure }}] dec = collection.aggregate(pipeline) conditionRatings = pd.DataFrame(list(dec)) conditionRatings['Age'] = conditionRatings['year'] - conditionRatings['yearBuilt'] conditionRatings = conditionRatings.loc[~conditionRatings['deck'].isin(['N','NA'])] conditionRatings = conditionRatings.loc[~conditionRatings['substructure'].isin(['N','NA'])] conditionRatings = conditionRatings.loc[~conditionRatings['superstructure'].isin(['N','NA'])] #conditionRatings = conditionRatings.loc[~conditionRatings['Type of Wearing Surface'].isin(['6'])] return conditionRatings df_mean_deck ## Condition ratings of all states concatenated into one single data frame ConditionRatings frames = [] for state in states: f = getDataOneYear(state) frames.append(f) df_nbi_w = pd.concat(frames) df_nbi_w = df_nbi_w.loc[~df_nbi_w['deck'].isin(['N','NA'])] df_nbi_w = df_nbi_w.loc[~df_nbi_w['substructure'].isin(['N','NA'])] df_nbi_w = df_nbi_w.loc[~df_nbi_w['superstructure'].isin(['N','NA'])] df_nbi_w = df_nbi_w.loc[~df_nbi_w['Type of Wearing Surface'].isin(['6'])] """ Explanation: Deterioration Curves - Substructure End of explanation """ D = {'slow':,'fast':,'average':} def label_bridges(rating,mean_age_conditionRating,std_age_conditionRating): if (mean_age_conditionRating - std_age_conditionRating) < int(rating) <= (mean_age_conditionRating + std_age_conditionRating): # Append a label return ('Average Deterioration') # else, if more than a value, elif int(rating) > (mean_age_conditionRating + std_age_conditionRating): # Append a label return ('Slow Deterioration') # else, if more than a value, elif int(rating) < (mean_age_conditionRating - std_age_conditionRating): # Append a label return ('Fast Deterioration') else: return ('Null Value') stat = ['08','56','30','16','53','41','49','32','06','02','15'] num = 1 slow = [] fast = [] avg = [] for st in stat: data = getDataOneYear(st) print(st,len(data)) AgeList = list(data['Age']) deckList = list(data['deck']) stateName = stateNameDict[st] labels = [] for deckRating, Age in zip (deckList,AgeList): if Age < 60: mean_age_conditionRating = df_mean_deck[stateName][Age] std_age_conditionRating = df_std_deck[stateName][Age] # deck Rating #detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating # Substructure Rating #Substructure_detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating # Superstructure Rating #Superstructure_detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating labels.append(label_bridges(deckRating,mean_age_conditionRating,std_age_conditionRating)) # empty list to initialize a list D = dict((x,labels.count(x)) for x in set(labels)) total = D['Fast Deterioration'] + D['Slow Deterioration'] + D['Average Deterioration'] slow_percent = (D['Slow Deterioration'] / total) * 100 fast_percent = (D['Fast Deterioration'] / total) * 100 avg_percent = (D['Average Deterioration'] / total) * 100 slow.append(slow_percent) fast.append(fast_percent) avg.append(avg_percent) #stateName = stateNameDict[st] plt.figure(figsize=(12,6)) plt.title(stateName) plt.bar(range(len(D)), list(D.values()), align='center') plt.xticks(range(len(D)), list(D.keys())) plt.xlabel('Categories') plt.ylabel('Number of Bridges') plt.show() num = num + 1 avg def label_bridges(rating,labels): if (mean_age_conditionRating - std_age_conditionRating) < int(rating) <= (mean_age_conditionRating + std_age_conditionRating): # Append a label labels.append('Average Deterioration') # else, if more than a value, elif int(rating) > (mean_age_conditionRating + std_age_conditionRating): # Append a label labels.append('Slow Deterioration') # else, if more than a value, elif int(rating) < (mean_age_conditionRating - std_age_conditionRating): # Append a label labels.append('Fast Deterioration') else: labels.append('Null Value') return labels rating = [9,2,32,32,1,3,4,5,6,7,8,9,0,32,3] for i in rating: stat = ['08','56','30','16','53','41','49','32','06','02','15'] AgeList = list(df_nbi_w['Age']) deckList = list(df_nbi_w['deck']) num = 1 label = [] for st in stat: deckR = [] deckR = getDataOneYear(st) deckR = deckR[['Age','deck']] deckR= deckR.loc[~deckR['deck'].isin(['N','NA'])] stateName = stateNameDict[st] for deckRating, Age in zip (deckList,AgeList): if Age < 60: mean_age_conditionRating = df_mean_deck[stateName][Age] std_age_conditionRating = df_std_deck[stateName][Age] detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating if (mean_age_conditionRating - std_age_conditionRating) < int(deckRating) <= (mean_age_conditionRating + std_age_conditionRating): # Append a label labels.append('Average Deterioration') # else, if more than a value, elif int(deckRating) > (mean_age_conditionRating + std_age_conditionRating): # Append a label labels.append('Slow Deterioration') # else, if more than a value, elif int(deckRating) < (mean_age_conditionRating - std_age_conditionRating): # Append a label labels.append('Fast Deterioration') else: labels.append('Null Value') """ Explanation: The mean deterioration curve can be a measure to evaluate the rate of deterioration. If the condition rating of a bridge lies above the deterioration curve then the bridge is deteriorating at a slower pace than mean deterioration of the bridges, and if the condition rating of the bridge lies below the deterioration curve of the bridges then it is deteriorating at a faster pace than the mean deterioration of the bridges. This concept can further be extended to calculate deterioration score. Deterioration score denotes the rate of deterioration. A positive deterioration denotes that the individual bridge is deteriorating at a slower pace than the mean rate of deterioration of bridges, and a negative deterioration denotes that the individual bridge is deteriorating at a higher pace than the mean deterioration of the bridges. The following provides definition of deterioration score: Classification Criteria The classfication criteria used to classify bridges into slow Deterioration, average deterioration and fast deterioration. Bridges are classified based on how far an individual bridge’s deterioration score is from the mean deterioration score. | Categories | Value | |------------------------|-------------------------------| | Slow Deterioration | $z_ia$ ​ ≥ $\bar x_a$ ​ + 1 σ ( $ x_a$ )​| | Average Deterioration| $\bar x_a$ ​ - 1 σ ( $x_a$ )​ ≥ $z_ia$ ≥ $\bar x_a$ ​ + 1 σ ( $ x_a$ )​ | | Fast Deterioration |$z_ia$ ​ ≤ $\bar x_a$ ​ - 1 σ ( $ x_a$ )​ | End of explanation """ D = dict((x,labels.count(x)) for x in set(labels)) plt.figure(figsize=(12,6)) plt.title('Classification of Bridges in West United States') plt.bar(range(len(D)), list(D.values()), align='center') plt.xticks(range(len(D)), list(D.keys())) plt.xlabel('Categories of Bridges') plt.ylabel('Number of Bridges') plt.show() """ Explanation: Classification of all the bridges in the West United States End of explanation """
christinawlindberg/LtaP
LtaP.ipynb
mit
!pip install nxpd %matplotlib inline import matplotlib.pyplot as plt import networkx as nx import pandas as pd import numpy as np from operator import truediv from collections import Counter import itertools import random import collaboratr #from nxpd import draw #import nxpd #reload(collaboratr) """ Explanation: This notebook will go through how we match up students to real scientists based on their science interests. This code is heavily based on collaboratr, a project developed at Astro Hack Week. Check it out here: github.com/benelson/collaboratr <span style="color:red"> Here, we will use real Letters to a Prescientist form data. </span> End of explanation """ def format_name(data): first_name = ['-'.join(list(map(str.capitalize,d))) for d in data['Name'].str.replace(" ", "-").str.split('-')] last_name = ['-'.join(list(map(str.capitalize,d))) for d in data['Last'].str.replace(" ", "-").str.split('-')] full_name = pd.Series([m+" "+n for m,n in zip(first_name,last_name)]) return full_name # Retrieve data from Google Sheet and parse using pandas dataframe student_data = pd.read_csv("students.csv") student_data = student_data.replace(np.nan,' ', regex=True) # Store student information in variables. # # Collaboratr divided people into "learners" and "teachers" based on what they wanted to "learn" and "teach." # Here, students are always "learners" by default and the scientists are always "teachers." # To maintain the structure of the pandas dataframe, # I've created blank values for what students want to "teach" and what scientists want to "learn." ### write a function that would format names (including hyphens) student_data['Full Name'] = format_name(student_data) student_names = student_data['Full Name'] nStudents = len(student_names) student_learn = student_data['If I could be any type of scientist when I grow up, I would want to study:'] student_teach = pd.Series(["" for i in range (nStudents)], index=[i for i in range(nStudents)]) student_email = pd.Series(["" for i in range (nStudents)], index=[i for i in range(nStudents)]) # Store scientist information in variables. scientist_data = pd.read_csv("scientists_1.csv") scientist_data = scientist_data.replace(np.nan,' ', regex=True) #drop any duplicate email entries in the data frame drop = np.where(scientist_data.duplicated('Email')==True)[0] temp = scientist_data.drop(scientist_data.index[drop]) scientist_data = temp scientist_data['Full Name'] = format_name(scientist_data) scientist_names = scientist_data['Full Name'] nScientists = len(scientist_names) scientist_learn = pd.Series(["" for i in range (nScientists)], index=[i for i in range(nScientists)]) scientist_teach = scientist_data['We will match you with a pen pal who has expressed an interest in at least one of the following subjects. Which topic is most relevant to your work?'] scientist_email = scientist_data['Email'] #drop any duplicate email entries in the data frame drop = np.where(scientist_data.duplicated('Full Name')==True)[0] temp = scientist_data.drop(scientist_data.index[drop]) scientist_data = temp """ Explanation: Step 1 Create a Google Form with these questions: 1. What is your name? [text entry] 2. What is your gender? [multiple choice] 3. What are your general science interests? [checkboxes] I can ask for other information from the students (e.g., grade, school name) and scientists (email). After receiving the responses, load up the CSV of responses from the Google Form by running the cell below (you'll have to change the path to your own CSV). End of explanation """ names = student_names.append(scientist_names, ignore_index=True) learn = student_learn.append(scientist_learn, ignore_index=True) teach = student_teach.append(scientist_teach, ignore_index=True) emails = student_email.append(scientist_email, ignore_index=True) G = nx.DiGraph() """ Explanation: Step 2: Merge the student and scientist dataframes End of explanation """ # Insert users in graphs for n,e,l,t in zip(names, emails, learn, teach): collaboratr.insert_node(G,n, email=e, learn=l.split(';'), teach=t.split(';')) def sort_things(stu_data, sci_data): num_interests = {} for i,r in stu_data.iterrows(): name = r['Name'].capitalize() + " " + r['Last'].capitalize() num_interests = { name: 1 } print(num_interests) stu_names_sorted = sorted(num_interests, key=num_interests.get) print(stu_names_sorted) interests_stu = Counter(list(itertools.chain.from_iterable(\ [ i.split(';') for i in stu_data['If I could be any type of scientist when I grow up, I would want to study:'] ]))) interests_sci = Counter(list(itertools.chain.from_iterable(\ [ i.split(';') for i in sci_data['We will match you with a pen pal who has expressed an interest in at least one of the following subjects. Which topic is most relevant to your work?'] ]))) interests_rel = { key: interests_stu[key]/interests_sci[key] for key in interests_sci.keys() } interests_rel_sorted = sorted(interests_rel, key=interests_rel.get) return interests_rel_sorted, stu_names_sorted def assigner(assign, stu_data, sci_data, max_students=2): assign_one = {} subscriptions = { n: 0 for n in sci_data['What is your name?'] } interests_rel_sorted, stu_names_sorted = sort_things(stu_data, sci_data) for key in interests_rel_sorted: for name in stu_names_sorted: if name not in assign_one: if key in assign[name].keys(): try: scientist = np.random.choice(assign[name][key]) except ValueError: scientist = np.random.choice(scientist_data['What is your name?']) assign_one[name] = scientist subscriptions[scientist] += 1 if subscriptions[scientist]>=max_students: for kk,vv in assign.items(): if vv: for k,v in vv.items(): if scientist in v: v.remove(scientist) for name in stu_names_sorted: if name not in assign_one: scientist = np.random.choice([ k for k,v in subscriptions.items() if v < max_students ]) assign_one[name] = scientist return assign_one assign_one = None max_students = 2 while assign_one is None: try: participants = G.nodes(data=True) assign = collaboratr.assign_users(G,participants) assign_one = assigner(assign, student_data, scientist_data, max_students=max_students) if max(Counter([v for k,v in assign_one.items()]).values())>max_students: assign_one = None except ValueError: # print("error") pass print(assign_one) print(Counter([v for k,v in assign_one.items()])) items = [] for k,v in assign_one.items(): items.append(str(v.ljust(22) + "-> " + k.ljust(22) + "who is interested in " \ + student_data.loc[student_data['What is your name?'] == k]\ ['What general science fields are you interested in?'].tolist()[0] )) for i in sorted(items): print(i) a, b = sort_things(student_data, scientist_data) print(a, b) """ Explanation: Step 3: Assign scientists to students I thought about several ways to do this. Each student has a "pool" of scientists to be assigned to based on their interests. This was a non-trivial problem. I try to have no more than 2 students assigned to each scientist, working with a limited dataset of roughly 20 scientists and 30 students. Most scientists come from astronomy/physics or psychology/neuroscience. Here are my attempts to do just that: For each student, randomly draw from their "pool" of scientists with matching interests. This typically caused the more "underrepresented" scientists to get oversubscribed quickly, e.g., having one biologist and having many students interested in biology. This didn't help for students who had limited interests. If I couldn't match everyone up, I'd try again with different random draws. Couldn't find a solution for the conditions listed above. Maybe this would work better if we had a nScientists > nStudents. Start with the "least popular" topic, that is the topic where the student-to-scientist ratio is smallest. Loop through the students with those interests and try to match them to a scientist. Then, we work are way up the list until we get to the most popular topic. This approach worked much better. End of explanation """
statsmodels/statsmodels
examples/notebooks/wls.ipynb
bsd-3-clause
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import statsmodels.api as sm from scipy import stats from statsmodels.iolib.table import SimpleTable, default_txt_fmt np.random.seed(1024) """ Explanation: Weighted Least Squares End of explanation """ nsample = 50 x = np.linspace(0, 20, nsample) X = np.column_stack((x, (x - 5) ** 2)) X = sm.add_constant(X) beta = [5.0, 0.5, -0.01] sig = 0.5 w = np.ones(nsample) w[nsample * 6 // 10 :] = 3 y_true = np.dot(X, beta) e = np.random.normal(size=nsample) y = y_true + sig * w * e X = X[:, [0, 1]] """ Explanation: WLS Estimation Artificial data: Heteroscedasticity 2 groups Model assumptions: Misspecification: true model is quadratic, estimate only linear Independent noise/error term Two groups for error variance, low and high variance groups End of explanation """ mod_wls = sm.WLS(y, X, weights=1.0 / (w ** 2)) res_wls = mod_wls.fit() print(res_wls.summary()) """ Explanation: WLS knowing the true variance ratio of heteroscedasticity In this example, w is the standard deviation of the error. WLS requires that the weights are proportional to the inverse of the error variance. End of explanation """ res_ols = sm.OLS(y, X).fit() print(res_ols.params) print(res_wls.params) """ Explanation: OLS vs. WLS Estimate an OLS model for comparison: End of explanation """ se = np.vstack( [ [res_wls.bse], [res_ols.bse], [res_ols.HC0_se], [res_ols.HC1_se], [res_ols.HC2_se], [res_ols.HC3_se], ] ) se = np.round(se, 4) colnames = ["x1", "const"] rownames = ["WLS", "OLS", "OLS_HC0", "OLS_HC1", "OLS_HC3", "OLS_HC3"] tabl = SimpleTable(se, colnames, rownames, txt_fmt=default_txt_fmt) print(tabl) """ Explanation: Compare the WLS standard errors to heteroscedasticity corrected OLS standard errors: End of explanation """ covb = res_ols.cov_params() prediction_var = res_ols.mse_resid + (X * np.dot(covb, X.T).T).sum(1) prediction_std = np.sqrt(prediction_var) tppf = stats.t.ppf(0.975, res_ols.df_resid) pred_ols = res_ols.get_prediction() iv_l_ols = pred_ols.summary_frame()["obs_ci_lower"] iv_u_ols = pred_ols.summary_frame()["obs_ci_upper"] """ Explanation: Calculate OLS prediction interval: End of explanation """ pred_wls = res_wls.get_prediction() iv_l = pred_wls.summary_frame()["obs_ci_lower"] iv_u = pred_wls.summary_frame()["obs_ci_upper"] fig, ax = plt.subplots(figsize=(8, 6)) ax.plot(x, y, "o", label="Data") ax.plot(x, y_true, "b-", label="True") # OLS ax.plot(x, res_ols.fittedvalues, "r--") ax.plot(x, iv_u_ols, "r--", label="OLS") ax.plot(x, iv_l_ols, "r--") # WLS ax.plot(x, res_wls.fittedvalues, "g--.") ax.plot(x, iv_u, "g--", label="WLS") ax.plot(x, iv_l, "g--") ax.legend(loc="best") """ Explanation: Draw a plot to compare predicted values in WLS and OLS: End of explanation """ resid1 = res_ols.resid[w == 1.0] var1 = resid1.var(ddof=int(res_ols.df_model) + 1) resid2 = res_ols.resid[w != 1.0] var2 = resid2.var(ddof=int(res_ols.df_model) + 1) w_est = w.copy() w_est[w != 1.0] = np.sqrt(var2) / np.sqrt(var1) res_fwls = sm.WLS(y, X, 1.0 / ((w_est ** 2))).fit() print(res_fwls.summary()) """ Explanation: Feasible Weighted Least Squares (2-stage FWLS) Like w, w_est is proportional to the standard deviation, and so must be squared. End of explanation """
lknelson/DH-Institute-2017
03-Operationalizing/Operationalizing.ipynb
bsd-2-clause
# Let's assign a string to a new variable # Using the triple quotation mark, we can simply paste a passage in between # and Python will treat it as a continuous string first_sonnet = """From fairest creatures we desire increase, That thereby beauty's rose might never die, But as the riper should by time decease, His tender heir might bear his memory""" # Note that when we print the 'first_sonnet', we see the character # that represents a line break: '\n' first_sonnet # A familiar string method first_sonnet.split() # Let's assign the list of tokens to a variable sonnet_tokens = first_sonnet.split() # And find out how many words there are in the quatrain len(sonnet_tokens) # Let's pull out the tokens from the second line sonnet_tokens[6:13] # How long is each word in sonnet_tokens? [len(token) for token in sonnet_tokens] # And why not assign that to a variable... token_lengths = [len(token) for token in sonnet_tokens] # ... so we can do something fun, like get the average word length sum(token_lengths) / len(token_lengths) ## EX. Retrieve the word 'thereby' from the list of 'sonnet_tokens' by calling its index. """ Explanation: <h1 align='center'>It Starts with a Research Question...</h1> <img src='Moretti 7, Fig 7.png' width="66%" height="66%"> <br> <img src='Moretti 7, Fig 8.png' width="66%" height="66%"> <br> <img src='Moretti 11, excerpt.png' width="66%" height="66%"> Operationalizing <ol> <li>Review/Preview: Strings & Lists</li> <li>Pandas</li> <li>Arithmetic!</li> <li>Character Space in Antigone</li> </ol> 1. Review/Preview Strings, Lists, & List Comprehensions Strings will be our go-to data type throughout the workshop. We have already seen strings assigned to variables, split over white spaces, added together, and sliced by index. Let's review those techniques and try out a couple variations. End of explanation """ # There's a twist! first_sonnet.split('\n') # Assign the list of whole lines to a new variable sonnet_lines = first_sonnet.split('\n') # How long is this list? len(sonnet_lines) # Create a list of lists! [line.split() for line in sonnet_lines] # Assign this to a variable tokens_by_line = [line.split() for line in sonnet_lines] # Check its length len(tokens_by_line) # Pull out the second line tokens_by_line[1] # How long is that second line? len(tokens_by_line[1]) # Pull up an individual word tokens_by_line[1][3] ## EX. Retrieve the word 'thereby' from the list of 'tokens_by_line' by calling its indices. ## EX. Find the average number of words per line in 'tokens_by_line'. """ Explanation: Extending our Methods Beyond a simple list, we often find it useful to organize information into lists of lists. That is, a list in which each entry is itself a list of elements. For example, we may not want to treat a poem as a flat list of words but instead would like to group words into their constitutive lines. End of explanation """ # Get ready! import pandas # Create a list of three sub-lists, each with three entries square_list = [[1,2,3],[4,5,6],[7,8,9]] # Let's slice it by row # Note that we would have to do some acrobatics in order to slice by column! square_list[:2] # Create a dataframe from that list pandas.DataFrame(square_list) # Let's create a couple of lists for our column and row labels column_names = ['Eggs', 'Bacon', 'Sausage'] row_names = ['Served','With','Spam'] # A-ha! pandas.DataFrame(square_list, columns = column_names, index = row_names) # Assign this to a variable spam_df = pandas.DataFrame(square_list, columns = column_names, index=row_names) # Call up a column of the dataframe spam_df['Eggs'] # Make that column into a list list(spam_df['Eggs']) # Get the indices for the entries in the column spam_df['Eggs'].index # Call up a row from the indices spam_df.loc['Served'] # Call up a couple of rows, using a list of indices! spam_df.loc[['Spam','Served']] # Get a specific entry by calling both row and column spam_df.loc['Spam']['Eggs'] # Temporarily re-order the dataframe by values in the 'Eggs' column spam_df.sort_values('Eggs', ascending=False) # Create a new column spam_df['Lobster Thermidor aux Crevettes'] = [10,11,12] # Inspect spam_df ## EX. Call up the entries (5) and (6) from the middle of the dataframe 'spam_df' individually ## CHALLENGE: Call up both entries at the same time """ Explanation: 2. Pandas We've started to grapple with the weirdly complicated idea of lists of lists and their utility for textual study. In fact, these translate rather easily into the very familiar idea of the spreadsheet. Very often, our data (whether number or text) can be represented as rows and columns. Once in that format, many mathematical operations come naturally. <i>Pandas</i> is a popular and flexible package whose primary use is its datatype: the <i>DataFrame</i>. The dataframe is essentially a spreadsheet, like you would find in Excel, but it integrates seamlessly into an NLP workflow and it has a few tricks up its sleeve. End of explanation """ # Slice out a column spam_df['Bacon'] # Evaluate whether each element in the column is greater than 5 spam_df['Bacon']==5 # Use that evaluation to subset the table spam_df[spam_df['Bacon']==5] ## EX. Slice 'spam_df' to contain only rows in which 'Sausage' is greater than 5 """ Explanation: DataFrame Subsetting End of explanation """ # Our dataframe spam_df # Pandas will produce a few descriptive statistics for each column spam_df.describe() # Multiply entries of the dataframe by 10 spam_df*10 # Add 10 to each entry spam_df+10 # Of course our dataframe hasn't changed spam_df # What if we just want to add the values in the column? sum(spam_df['Bacon']) # We can also perform operations among columns # Pandas knows to match up individual entries in each column spam_df['Bacon']/spam_df['Eggs'] """ Explanation: 3. Arithmetic! End of explanation """ # Read spreadsheet from the hard drive dialogue_df = pandas.read_csv('antigone_dialogue.csv', index_col=0) # Take a look dialogue_df # Pulling out a single column acts like a list -- with labels dialogue_df['NAMED_CHARACTER'] # If we wish, we can use metadata to subset our dataframe dialogue_df[dialogue_df['NAMED_CHARACTER']=='named'] # Check out the first element of the dialogue column dialogue_df['DIALOGUE'][0] # Create a list of lists; split each character's dialogue into a list of tokens dialogue_tokens = [character.split() for character in dialogue_df['DIALOGUE']] # A list of lists! dialogue_tokens # How many tokens are in each list? dialogue_len = [len(tokens) for tokens in dialogue_tokens] # Check the numbers of tokens per character dialogue_len # Assign this as a new column in the dataframe dialogue_df['WORDS_SPOKEN'] = dialogue_len # Let's visualize! # Tells Jupyter to produce images in notebook % pylab inline # Makes images look good style.use('ggplot') # Visualize using the 'plot' method from Pandas dialogue_df['WORDS_SPOKEN'].plot(kind='bar') ## Moretti had not simply plotted the number words spoken by each character ## but the percentage of all words in the play belonging to that character. ## He also had sorted the columns of his diagram by their height. ## EX. Calculate the share of each character's dialogue as a percentage of the total ## number of words in the play. ## EX. Reorganize the dataframe such that these percentages appear in descending order. ## EX. Visualize the ordered share of each character's dialogue as a bar chart. """ Explanation: 4. Character Space in Antigone In Moretti's study, he offers several measures of the concept of <i>character</i>. The simplest of these is to measure the relative dialogue belong to each character in a play. Presumably the main characters will speak more and peripheral characters will speak less. The statistical moves we will make here are not only counting the raw number of words spoken by each character but also normalizing them. That is, converting them into a fraction of all words in the play. In order to focus on the statistical tasks at hand, we will begin by importing a spreadsheet in which each row is labeled with an individual character's name. Its columns contain metadata about the character herself, as well as a single column containing all of her dialogue as a string. End of explanation """ # Read the text of Antigone from a file on your hard drive antigone_text = open('antigone.txt', 'r').read() # Create a list by splitting the string whereever a double line break occurs antigone_list = antigone_text.split('\n\n') # Create a new, empty dictionary dialogue_dict = {} # Iterate through each of the play's lines for line in antigone_list: # Find the first space in each line index_first_space = line.index(' ') # Slice the line, preceding the first space character_name = line[:index_first_space] # Check whether the character is in our dictionary yet if character_name not in dialogue_dict.keys(): # If not, create a new entry whose value is a slice of the line *after* the first space dialogue_dict[character_name] = line[index_first_space:] else: # If so, add the slice of line to the existing value dialogue_dict[character_name] = dialogue_dict[character_name] + line[index_first_space:] # Get ready! import pandas # Convert dictionary to DataFrame; instruct pandas that each dictionary entry is a row ('index') dialogue_df = pandas.DataFrame.from_dict(dialogue_dict, orient='index') # Add label to spreadsheet column dialogue_df.columns = ['DIALOGUE'] # Export as csv; save to hard drive dialogue_df.to_csv('antigone_dialogue_new.csv') ## EX. The text of Hamlet is also contained within the folder for this notebook ('hamlet.txt'). ## Perform Moretti's character space analysis on that play. ## Note that the dialogue is formatted slightly differently in our copy of Hamlet than it ## was in Antigone. This means that you will need to tweak the script above if you wish ## to use it for Hamlet. In reality it is very often the case that a script has to be ## tailored to different applications! """ Explanation: Extra: Transform Dramatic Text into Charcter-CSV This script uses a data type, a method, and an operation that are all closely related to ones that we've seen. The <i>dictionary</i> resembles a <i>list</i> or a <i>DataFrame</i>. The string-method <i>index</i> sort of reverse engineers our slicing method where we had called up specific characters from a string by their index. The <i>for-loop</i> bears a close resemblence to the <i>list comprehension</i>, although it doesn't necessarily produce a list. Try playing around with them to see what they do! End of explanation """
ledeprogram/algorithms
class7/homework/radhikapc_Homework7.ipynb
gpl-3.0
from sklearn import datasets import pandas as pd %matplotlib inline from sklearn import datasets from pandas.tools.plotting import scatter_matrix import matplotlib.pyplot as plt from sklearn import tree iris = datasets.load_iris() iris iris.keys() iris['target'] iris['target_names'] iris['data'] iris['feature_names'] x = iris.data[:,2:] # the attributes # we are picking up only the info on petal length and width y = iris.target # the target variable # The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features. dt = tree.DecisionTreeClassifier() # .fit testing dt = dt.fit(x,y) from sklearn.cross_validation import train_test_split x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.50,train_size=0.50) dt = dt.fit(x_train,y_train) from sklearn.cross_validation import train_test_split from sklearn import metrics import numpy as np def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True): y_pred=clf.predict(X) if show_accuracy: print("Accuracy:{0:.3f}".format(metrics.accuracy_score(y, y_pred)),"\n") if show_classification_report: print("Classification report") print(metrics.classification_report(y,y_pred),"\n") if show_confussion_matrix: print("Confusion matrix") print(metrics.confusion_matrix(y,y_pred),"\n") measure_performance(x_test,y_test,dt) #measure on the test data (rather than train) def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, iris.target_names, rotation=45) plt.yticks(tick_marks, iris.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') y_pred = dt.fit(x_train, y_train).predict(x_test) #generate a prediction based on the model created to output a predicted y cm = metrics.confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm) """ Explanation: We covered a lot of information today and I'd like you to practice developing classification trees on your own. For each exercise, work through the problem, determine the result, and provide the requested interpretation in comments along with the code. The point is to build classifiers, not necessarily good classifiers (that will hopefully come later) 1. Load the iris dataset and create a holdout set that is 50% of the data (50% in training and 50% in test). Output the results (don't worry about creating the tree visual unless you'd like to) and discuss them briefly (are they good or not?) End of explanation """ from sklearn.cross_validation import train_test_split x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.75,train_size=0.25) dt = dt.fit(x_train,y_train) from sklearn import metrics import numpy as np def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True): y_pred=clf.predict(X) if show_accuracy: print("Accuracy:{0:.3f}".format(metrics.accuracy_score(y, y_pred)),"\n") if show_classification_report: print("Classification report") print(metrics.classification_report(y,y_pred),"\n") if show_confussion_matrix: print("Confusion matrix") print(metrics.confusion_matrix(y,y_pred),"\n") measure_performance(x_test,y_test,dt) #measure on the test data (rather than train) def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, iris.target_names, rotation=45) plt.yticks(tick_marks, iris.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') y_pred = dt.fit(x_train, y_train).predict(x_test) cm = metrics.confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm) # 75-25 seems to be better at predicting with precision """ Explanation: 2. Redo the model with a 75% - 25% training/test split and compare the results. Are they better or worse than before? Discuss why this may be. End of explanation """ cancer = datasets.load_breast_cancer() print(cancer) cancer.keys() #cancer['DESCR'] # we are trying to predict how malignant / benign a specific cancer 'feature' is cancer['target_names'] cancer['data'] cancer['feature_names'] cancer['feature_names'][11] cancer['target'] x = cancer.data[:,10:11] print(x) plt.figure(2, figsize=(8, 6)) plt.scatter(x[:,10:11], x[:,13:14], c=y, cmap=plt.cm.CMRmap) plt.xlabel('texture error') plt.ylabel('smoothness error') plt.axhline(y=56) plt.axvline(x=0.5) plt.figure(2, figsize=(8, 6)) plt.scatter(x[:,1:2], x[:,3:4], c=y, cmap=plt.cm.CMRmap) plt.xlabel('mean perimeter') plt.ylabel('mean area') plt.axhline(y=800) plt.axvline(x=17) plt.figure(2, figsize=(8, 6)) plt.scatter(x[:,5:6], x[:,6:7], c=y, cmap=plt.cm.CMRmap) plt.xlabel('Mean Concavity') plt.ylabel('Mean Concave Point') plt.axhline(y=0.06) plt.axvline(x=0.25) """ Explanation: 3. Load the breast cancer dataset (datasets.load_breast_cancer()) and perform basic exploratory analysis. What attributes to we have? What are we trying to predict? For context of the data, see the documentation here: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29 End of explanation """ x = cancer.data[:,10:11] # the attributes of skin color y = cancer.target dt = tree.DecisionTreeClassifier() dt = dt.fit(x,y) x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.75,train_size=0.25) dt = dt.fit(x_train,y_train) def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True): y_pred=clf.predict(X) if show_accuracy: print("Accuracy:{0:.3f}".format(metrics.accuracy_score(y, y_pred)),"\n") if show_classification_report: print("Classification report") print(metrics.classification_report(y,y_pred),"\n") if show_confussion_matrix: print("Confusion matrix") print(metrics.confusion_matrix(y,y_pred),"\n") measure_performance(x_test,y_test,dt) #measure on the test data (rather than train) def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, cancer.target_names, rotation=45) plt.yticks(tick_marks, cancer.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') y_pred = dt.fit(x_train, y_train).predict(x_test) cm = metrics.confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm) """ Explanation: 4. Using the breast cancer data, create a classifier to predict the type of seed. Perform the above hold out evaluation (50-50 and 75-25) and discuss the results. Picking only one attribute : Skin Color End of explanation """ x = cancer.data[:,:] # the attributes of skin color y = cancer.target dt = tree.DecisionTreeClassifier() dt = dt.fit(x,y) x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.75,train_size=0.25) dt = dt.fit(x_train,y_train) def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True): y_pred=clf.predict(X) if show_accuracy: print("Accuracy:{0:.3f}".format(metrics.accuracy_score(y, y_pred)),"\n") if show_classification_report: print("Classification report") print(metrics.classification_report(y,y_pred),"\n") if show_confussion_matrix: print("Confusion matrix") print(metrics.confusion_matrix(y,y_pred),"\n") measure_performance(x_test,y_test,dt) #measure on the test data (rather than train) def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, cancer.target_names, rotation=45) plt.yticks(tick_marks, cancer.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') y_pred = dt.fit(x_train, y_train).predict(x_test) cm = metrics.confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm) """ Explanation: Predicted 216 for benign but only 54 is true,predicted 50 but there are 107 cases, so this model doesnt work. Picking all the attributes and testing the accuracy End of explanation """
astarostin/MachineLearningSpecializationCoursera
course6/week6/SentimentAnalysisContest.ipynb
apache-2.0
import scrapy # to run: # scrapy crawl reviews -o reviews.json class ReviewsSpider(scrapy.Spider): name = "reviews" start_urls = ['https://market.yandex.ru/catalog/54726/list?how=opinions&deliveryincluded=0&onstock=1'] def __init__(self): self.count = 0 self.LIMIT = 5 def parse(self, response): # follow links to phone pages for href in response.css('a.snippet-card__header-link::attr(href)').extract(): link = href[:href.index('?')] + '/reviews' yield scrapy.Request(response.urljoin(link), callback=self.parse_phone) # follow link to the next review page next_page = response.css('a.n-pager__button-next::attr(href)').extract_first() if self.count < self.LIMIT and next_page is not None: next_page = response.urljoin(next_page) self.count += 1 yield scrapy.Request(next_page, callback=self.parse) def parse_phone(self, response): for review in response.css('div.product-review-item'): pos = ' '.join(review.css('.product-review-item__stat:nth-child(4) .product-review-item__text::text').extract()) neg = ' '.join(review.css('.product-review-item__stat:nth-child(5) .product-review-item__text::text').extract()) if len(pos) > len(neg): text = pos rating = 5 else: text = neg rating = 1 yield { 'text': text, 'rating': rating, } # follow link to the next review page next_page = response.css('a.n-pager__button-next::attr(href)').extract_first() if next_page is not None: next_page = response.urljoin(next_page) yield scrapy.Request(next_page, callback=self.parse_phone) """ Explanation: Разработка сентимент-анализа под задачу заказчика Анализ задачи По условию задачи дана только небольшая тестовая выборка, подготовленной обучающей выборки нет. Поэтому выполнение задания можно разделить на следующие этапы: * Подготовка обучающей выборки * Нахождение данных для обучения * Разметка данных на классы * Обучение модели сентимент-анализа * Подбор алгоритма обучения * Экспериментирование с разными обучающими выборками и параметрами обучения * Запуск обученной модели на тестовых данных ## Подготовка обучающей выборки ### Нахождение данных для обучения Визуальный анализ тестовых данных показывает, что речь идет об отзывах на мобильные телефоны. Таким образом, нужно где-то найти базу отзывов на телефоны на русском языке. В первую очередь на ум приходит сервис Яндекс-Маркет, содержащий сведения и отзывы на множество моделей телефонов. Кроме того, отзывы на нем разделены на "Достоинства", "Недостатки" и "Комментарий", а также имеют общую оценку-рейтинг, которую можно было бы использовать для разметки на классы. Для парсинга страниц с отзывами была использована библиотека Scrapy, упомянутая в предыдущих заданиях. Эта библиотека позволила обойти ограничения, связанные с блокировкой автоматических запросов со стороны сайта. Использование Scrapy предполагает создание отдельного проекта со своей структурой, поэтому целиком привести его здесь и запустить из Ipython не получится. Код проекта приложен к архиву. Было использовано 2 подхода к парсингу отзывов: - парсинг полей "Достоинства" и "Недостатки" - парсинг поля "Комментарий" и "Оценка" Класс с основной логикой парсинга для библиотеки Scrapy (спайдер) приведен ниже: End of explanation """ import scrapy # to run: # scrapy crawl comments -o comments.json class CommentsSpider(scrapy.Spider): name = "comments" start_urls = ['https://market.yandex.ru/catalog/54726/list?how=opinions&deliveryincluded=0&onstock=1'] def __init__(self): self.count = 0 self.LIMIT = 15 def parse(self, response): # follow links to phone pages for href in response.css('a.snippet-card__header-link::attr(href)').extract(): link = href[:href.index('?')] + '/reviews' yield scrapy.Request(response.urljoin(link), callback=self.parse_phone) # follow link to the next review page next_page = response.css('a.n-pager__button-next::attr(href)').extract_first() if self.count < self.LIMIT and next_page is not None: next_page = response.urljoin(next_page) self.count += 1 yield scrapy.Request(next_page, callback=self.parse) def parse_phone(self, response): for review in response.css('div.product-review-item'): yield { 'text': ' '.join(review.css('.product-review-item__stat:nth-child(6) .product-review-item__text::text').extract()), 'rating': review.css('div.rating::text').extract_first(), } # follow link to the next review page next_page = response.css('a.n-pager__button-next::attr(href)').extract_first() if next_page is not None: next_page = response.urljoin(next_page) yield scrapy.Request(next_page, callback=self.parse_phone) """ Explanation: В качестве стартового URL используется адрес страницы со списком наиболее популярных телефонов по числу отзывов. Затем парсер переходит по ссылкам на страницы отзывов каждого телефона в списке и далее переходя по ссылкам "листает" страницы. Для отзывов из блока "Достоинства" присваивался рейтинг 5, из "Недостатков" - 1. В другом варианте спайдера использовались блоки с полями "Комментарий" и "Оценка". Код спайдера приведен ниже: End of explanation """ from sklearn.feature_extraction.text import CountVectorizer from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier import pandas as pd import datetime import numpy as np from time import time from sklearn.grid_search import GridSearchCV from sklearn.externals import joblib from sklearn.metrics import accuracy_score from lxml import etree import seaborn as sns %pylab inline """ Explanation: В этом варианте рейтинг мог быть любым от 1 до 5. Результатом работы спайдеров scrapy были JSON-файлы вида: {"text": "Постоянно глючит. Нормально смогла попользоваться, используя различные приложения, 1 год. Дальше глюкнул\r Постоянно зависал. Из программ вылетал. Пользовалась всё остальное время не как смартфоном, а как телефоном.", "rating": 1}, {"text": "1. HTC\r 2. Отличное качество изготовления\r 3. Отличное \"железо\", не тормозит\r 4. OS Android 4\r 5. Стекло не боится царапин\r 6. Play Market\r 7. Много аксессуаров", "rating": 5}, {"text": "не советую покупать данное устройство.", "rating": "3"} ... Количество записей в выборке в ходе экспериментов варьировалось от 6000 до 12000 записей. Разметка данных на классы Разметка на классы выполнялась на основе поля "rating" из JSON-файла с отзывами. Если рейтинг меньше 4, то принимался класс 0 (отрицательный отзыв), иначе - 1 (положительный отзыв). Далее приведен код загрузки данных, предобработки, обучения модели, нахождения оптимальных параметров и предсказания для тестовых данных. End of explanation """ def add_outer_tags(filename): with open(filename, 'r+') as f: content = f.read() f.seek(0, 0) firstline = f.readline() if '<data>' not in firstline: f.seek(0, 0) f.write('<data>' + '\n' + content + '\n</data>') """ Explanation: Добавление обрамляющих тегов в файл с тестовой выборкой, чтобы он был валидным XML-документом (для удобства парсинга) End of explanation """ def make_target(row): if row['rating'] < 4: return 0 else: return 1 """ Explanation: Определение класса для объектов обучающей выборки End of explanation """ def prepare_data_set(data): data['target'] = data.apply(lambda row: make_target(row),axis=1) data = data.dropna(subset=['target']) data['target'] = data['target'].astype(int) return data """ Explanation: Предобработка обучающей выборки, добавление целевой метки End of explanation """ def iter_tree(etree): n = -1 for review in etree.iter('review'): n += 1 yield (n, review.text) """ Explanation: Итератор по XML-файлу с тестовыми данными End of explanation """ def get_dataframes(training_data_file, test_data_file): df_training = pd.read_json(training_data_file) df_training = prepare_data_set(df_training) add_outer_tags(test_data_file) tree = etree.parse(test_data_file) df_test = pd.DataFrame(list(iter_tree(tree)), columns=['id', 'text']) return df_training, df_test """ Explanation: Функция загрузки данных по обучающей и тестовой выборкам в датафреймы End of explanation """ def read_data(df_training, df_test): X_train = df_training['text'].values X_test = df_test['text'].values id_test = df_test['id'].values y_train = df_training['target'].values return X_train, y_train, id_test, X_test """ Explanation: Преобразование данных из датафреймов в массивы End of explanation """ def get_pipeline_and_params_1(): pipeline = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier()), ]) parameters = { 'vect__max_df': (0.75, 1), #'vect__max_features': (None, 5000, 10000, 50000), 'vect__ngram_range': ((1, 1), (1, 3), (1, 2)), # unigrams or bigrams #'tfidf__use_idf': (True, False), #'tfidf__norm': ('l1', 'l2'), #'clf__alpha': (0.00001, 0.000001), #'clf__penalty': ('l2', 'elasticnet'), #'clf__n_iter': (10, 50, 80), } return pipeline, parameters def get_pipeline_and_params_2(): pipeline = Pipeline([ ('tfidf', TfidfVectorizer()), ('logreg', LogisticRegression()), ]) parameters = { 'tfidf__max_df': (0.6,0.8,1), #'tfidf__min_df': (0, 5, 10, 15), 'tfidf__ngram_range': ((1, 1), (1, 2), (1,3), (2,3)), # unigrams or bigrams #'tfidf__use_idf': (True, False), #'tfidf__norm': ('l1', 'l2'), #'logreg__C': (0.0001, 0.01, 1), #'logreg__penalty': ('l2', 'l1'), } return pipeline, parameters def get_pipeline_and_params_3(): pipeline = Pipeline([ ('vect', CountVectorizer()), ('logreg', LogisticRegression()), ]) parameters = { 'vect__max_df': (0.6, 1.0), 'vect__min_df': (0, 5), #'vect__stop_words': ('english', None), 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams #'logreg__C': (0.0001, 0.01, 1), #'logreg__penalty': ('l2', 'l1'), } return pipeline, parameters """ Explanation: Обучение модели для сентимент-анализа Подбор алгоритма обучения Различные сочетания алгоритмов для обучения (выделение признаков, классификация) и перечень их параметров для перебора при поиске наилучшего варианта End of explanation """ def predict(predictor, data_train, y, id_test, data_test, cv_score=None): predictor.fit(data_train, y) joblib.dump(predictor, './SentimentAnalysisModel.pkl') prediction = predictor.predict(data_test) #print predictor timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") filepath_prediction = 'data/prediction-%s-data.csv' % timestamp filepath_description = 'data/prediction-%s-estimator.txt' % timestamp prediction_str = ['pos' if p == 1 else 'neg' for p in prediction] # Create a dataframe with predictions and write it to CSV file predictions_df = pd.DataFrame(data=prediction_str, columns=['y']) predictions_df.to_csv(filepath_prediction, sep=',', index_label='Id') # Write a short description of the classifier that was used f = open(filepath_description, 'w') f.write(str(predictor)) score = '\nCross-validation score %.8f' % cv_score f.write(score) f.close() """ Explanation: Предсказание результата для тестовых данных End of explanation """ def do_grid_search(pipeline, parameters, X_train, y_train): grid_search = GridSearchCV(pipeline, parameters, scoring='accuracy') t0 = time() grid_search.fit(X_train, y_train) print "done in %0.3fs" % (time() - t0) print("Best score: %.4f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name])) return grid_search """ Explanation: Экспериментирование с разными обучающими выборками и параметрами обучения Поиск оптимальных параметров модели End of explanation """ def do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params): pipeline, parameters = get_pipeline_and_params() gs = do_grid_search(pipeline, parameters, X_train, y_train) predict(gs.best_estimator_, X_train, y_train, id_test, X_test, gs.best_score_) """ Explanation: Общая структура эксперимента: выбор конфигурации, поиск параметров, предсказание на основе наилучшего варианта End of explanation """ df_training, df_test = get_dataframes('reviews11665.json', 'test.csv') """ Explanation: Получение датафреймов с обучающей и тестовой выборками. Используется подготовленный с помощью Scrapy JSON-файл, содержащий 11665 отзывов о мобильных телефонах с Яндекс.Маркета. End of explanation """ df_training.head() df_test.head() """ Explanation: Общий вид датафреймов End of explanation """ ax = sns.barplot(df_training['target'].value_counts().keys(), df_training['target'].value_counts().values) ax.set_xticklabels(['neg', 'pos']) """ Explanation: Гистограмма распределения классов в обучающей выборке End of explanation """ X_train, y_train, id_test, X_test = read_data(df_training, df_test) """ Explanation: Подготовка данных End of explanation """ do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params_1) do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params_2) do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params_3) """ Explanation: Проведение серии экспериментов End of explanation """
tensorflow/docs-l10n
site/ja/guide/mixed_precision.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2019 The TensorFlow Authors. End of explanation """ import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras import mixed_precision """ Explanation: 混合精度 <table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/guide/mixed_precision"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/guide/mixed_precision.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a></td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/guide/mixed_precision.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/guide/mixed_precision.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a> </td> </table> 概要 混合精度とは、 16 ビットと 32 ビット浮動小数点型の両方を使ってモデルのトレーニングを高速化し、使用するメモリを少なくする手法です。数値の安定性を保つためにモデルの特定の部分を 32 ビットで保持することにより、モデルのステップ時間を短縮しつつ、精度などの評価指標は同様にトレーニングすることができます。このガイドでは、実験的な Keras 混合精度 API でモデルを高速化する使い方を説明します。この API を使用すると、最新の GPU で 3 倍以上、TPU で 60% 以上パフォーマンスを向上させることができます。 現在、殆どのモデルでは 32 ビットのメモリを必要とする float32 dtype が使用されています。しかしながら、精度が低い代わりに必要とするメモリが 16 ビットの float16 と bfloat16 という 2 つの dtype があります。最近のアクセラレータは、16 ビットの計算実行専門のハードウェアを備えており、16 ビットの dtype はより高速でメモリから読み取ることができるため、 16 ビットの dtype 演算をより高速に実行できます。 NVIDIA GPU は float32 よりも float16 で速く演算を実行でき、TPU は float32 よりも bfloat16 で速く演算を実行できます。したがって、これらのデバイスでは低精度の dtype を可能な限り使用すべきです。ただし、変数および一部の計算は、モデルのトレーニング品質を維持するために、数値的理由から float32 のままにする必要があります。Keras 混合精度 API を使用すると、float16 または bfloat16 と float32 の組み合わせが可能になり、float16 / bfloat16 によるパフォーマンスの利点と float32 による数値的安定性の利点の両方を得ることができます。 注意: このガイドでは、「数値的安定性」という用語は、高精度の dtype ではなく低精度の dtype を使用することによって、モデルの品質がどのように影響を受けるかを指します。これらの dtype のいずれかで実行し、モデルの評価精度やその他のメトリクスが float32 での演算実行と比較して低下する場合、その演算は float16 または bfloat16 で「数値的に不安定」と言えます。 セットアップ End of explanation """ !nvidia-smi -L """ Explanation: 対応ハードウェア 混合精度はほとんどのハードウェアで動作しますが、高速化できるのは最近の NVIDIA GPU または Cloud TPU のモデルに限ります。 NVIDIA GPU は float16 と float32 を組み合わせた使用に、TPU は bfloat16 と float32 を組み合わせた使用に対応しています。 NVIDIA GPUの中でも、コンピューティング機能が 7.0 以上の場合、float16 の行列乗算と畳み込みを加速する Tensor Core と呼ばれる特別なハードウェアユニットを備えているため、混合精度のパフォーマンスが最大になります。古い GPU の場合、混合精度の使用による数学的パフォーマンスは期待できませんが、メモリと帯域幅の節約によって幾分かの高速化は可能です。NVIDIA の CUDA GPU ウェブページから、お持ちの GPU のコンピューティング機能を確認できます。混合精度が最も有効な GPU には、RTX GPU、V100、A100 などがあります。 注意: このガイドを Google Colab で実行する場合、通常、GPU ランタイムには P100 が接続されています。 P100 のコンピューティング機能は 6.0 なので、大幅なスピードアップは期待できません。 GPU タイプは以下の方法で確認できます。このコマンドは NVIDIA ドライバがインストールされている場合にのみ存在するため、そうでない場合はエラーが生成されます。 End of explanation """ policy = mixed_precision.Policy('mixed_float16') mixed_precision.set_global_policy(policy) """ Explanation: すべての Cloud TPU は bfloat16 に対応しています。 高速化が期待できない CPU や古い GPU でも、単体テスト、デバッグ、または単に API を試す目的で、混合精度 API の使用は可能です。ただし CPU での混合精度の実行は、大幅に遅くなります。 dtype ポリシーを設定する Keras で混合精度を使用するには、通常 <em>dtype ポリシー</em>と呼ばれるtf.keras.mixed_precision.Policy を作成する必要があります。dtype ポリシーは、実行される dtypes レイヤーを指定します。このガイドでは、文字列 'mixed_float16' からポリシーを作成し、それをグローバルポリシーとして設定します。これにより、その後に作成されるレイヤーは、float16 と float32 を組み合わせた混合精度を使用します。 End of explanation """ # Equivalent to the two lines above mixed_precision.set_global_policy('mixed_float16') """ Explanation: つまり、文字列を直接 set_global_policy に渡すことができ、この方法は一般的に実践で行われます。 End of explanation """ print('Compute dtype: %s' % policy.compute_dtype) print('Variable dtype: %s' % policy.variable_dtype) """ Explanation: ポリシーは、レイヤーの計算が行われる dtype と、レイヤーの変数の dtype という、レイヤーの 2 つの重要な側面を指定します。上記では、mixed_float16ポリシー('mixed_float16'をコンストラクタに渡して作成したmixed_precision.Policy)を作成しました。このポリシーでは、レイヤーは float16 計算と float32 変数を使用します。パフォーマンスのために計算は float16 で行いますが、数値を安定させるために変数は float32 を保持する必要があります。ポリシーのプロパティに直接クエリすることが可能です。 End of explanation """ inputs = keras.Input(shape=(784,), name='digits') if tf.config.list_physical_devices('GPU'): print('The model will run with 4096 units on a GPU') num_units = 4096 else: # Use fewer units on CPUs so the model finishes in a reasonable amount of time print('The model will run with 64 units on a CPU') num_units = 64 dense1 = layers.Dense(num_units, activation='relu', name='dense_1') x = dense1(inputs) dense2 = layers.Dense(num_units, activation='relu', name='dense_2') x = dense2(x) """ Explanation: 前述したように、mixed_float16ポリシーは、7.0 以上のコンピューティング機能を備えた NVIDIA GPU のパフォーマンスを大幅に向上させます。ポリシーは他の GPU や CPU でも実行できますが、パフォーマンスは向上しない可能性があります。TPU の場合は、代わりにmixed_bfloat16ポリシーを使用する必要があります。 モデルを作成する 次に、簡単なモデルを作成してみましょう。非常に小さなトイモデルでは、通常 TensorFlow ランタイムのオーバーヘッドが実行時間を支配し、GPU のパフォーマンス向上がごく僅かになってしまうため、混合精度の恩恵を受けることができません。したがって、GPU を使用する場合には、それぞれが 4096 ユニットの 2 つの大きなDenseレイヤーを構築しましょう。 End of explanation """ print(dense1.dtype_policy) print('x.dtype: %s' % x.dtype.name) # 'kernel' is dense1's variable print('dense1.kernel.dtype: %s' % dense1.kernel.dtype.name) """ Explanation: 各レイヤーにはポリシーがあり、グローバルポリシーをデフォルトで使用します。前にグローバルポリシーをmixed_float16と設定したため、各Denseレイヤーにはmixed_float16ポリシーがあります。これによって、密なレイヤーは float16 計算を行い、float32 変数を持ちます。これらはfloat16 計算を行うために入力を float16 にキャストし、その結果、出力は float16 になります。変数は float32 なので、dtype の不一致によるエラーを回避するためにレイヤーを呼び出す際に、 float16 にキャストされます。 End of explanation """ # INCORRECT: softmax and model output will be float16, when it should be float32 outputs = layers.Dense(10, activation='softmax', name='predictions')(x) print('Outputs dtype: %s' % outputs.dtype.name) """ Explanation: 次に、出力予測を作成します。 通常、次のように出力予測を作成できますが、これは float16 で常に数値的に安定しているとは限りません。 End of explanation """ # CORRECT: softmax and model output are float32 x = layers.Dense(10, name='dense_logits')(x) outputs = layers.Activation('softmax', dtype='float32', name='predictions')(x) print('Outputs dtype: %s' % outputs.dtype.name) """ Explanation: モデル最後の softmax(ソフトマックス)のアクティブ化は、float32 にする必要があります。dtype ポリシーはmixed_float16であるため、ソフトマックスのアクティブ化は通常 float16 計算 dtype を持ち、float16 テンソルを出力します。 これは、高密度(Dense)レイヤーとソフトマックスレイヤーを分離して、ソフトマックスレイヤーにdtype='float32'を渡して修正することができます。 End of explanation """ # The linear activation is an identity function. So this simply casts 'outputs' # to float32. In this particular case, 'outputs' is already float32 so this is a # no-op. outputs = layers.Activation('linear', dtype='float32')(outputs) """ Explanation: dtype='float32'をソフトマックス レイヤー コンストラクタ―に渡すと、レイヤーの dtype ポリシーがfloat32ポリシーにオーバーライドされ、計算を行い、変数を float32 で保持します。同様に、その代わりにレイヤーが常に dtype 引数をポリシーに変換するdtype=mixed_precision.Policy('float32')を渡すこともできます。Activationレイヤーには変数がないため、ポリシーの変数 dtype は無視されますが、ポリシーの計算 dtype float32 はソフトマックスを適用し、モデル出力は float32 になります。 モデルの中間で float16 ソフトマックスを追加することは問題ありませんが、モデルの最後のソフトマックスは float32 にする必要があります。 その理由は、ソフトマックスから損失に流れる中間テンソルが float16 または bfloat16 である場合、数値の問題が発生する可能性があるためです。 float16 計算で数値的に安定しないと思われる場合、dtype='float32'を渡して任意のレイヤーを dtype にオーバーライドできます。ただし、ほとんどのレイヤーはmixed_float16またはmixed_bfloat16で十分な精度があるため、通常は、モデルの最後のレイヤーのみで必要です。 モデルがソフトマックスで終わらない場合でも、出力は float32 である必要があります。 この特定のモデルでは不要ですが、次のようにしてモデル出力を float32 にキャストすることができます。 End of explanation """ model = keras.Model(inputs=inputs, outputs=outputs) model.compile(loss='sparse_categorical_crossentropy', optimizer=keras.optimizers.RMSprop(), metrics=['accuracy']) (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = x_train.reshape(60000, 784).astype('float32') / 255 x_test = x_test.reshape(10000, 784).astype('float32') / 255 """ Explanation: 次に、モデルを完成させてコンパイルし、入力データを生成します。 End of explanation """ initial_weights = model.get_weights() """ Explanation: この例では、入力データを int8 から float32 にキャストします。255 による除算は CPU で行われ、CPU の float16 演算は float32 演算よりも実行速度が遅いため、float16 にはキャストしません。この場合のパフォーマンスの違いはごくわずかですが、一般に CPU で実行する場合には float32 で入力処理演算を実行する必要があります。各レイヤーが浮動小数点入力を dtype 計算にキャストするため、モデルの最初のレイヤーは、入力を float16 にキャストします。 モデルの重みの初期値が取得されます。これによって、重みをロードして最初からトレーニングを再開できます。 End of explanation """ history = model.fit(x_train, y_train, batch_size=8192, epochs=5, validation_split=0.2) test_scores = model.evaluate(x_test, y_test, verbose=2) print('Test loss:', test_scores[0]) print('Test accuracy:', test_scores[1]) """ Explanation: Model.fit でモデルをトレーニングする 次に、モデルをトレーニングします。 End of explanation """ x = tf.constant(256, dtype='float16') (x ** 2).numpy() # Overflow x = tf.constant(1e-5, dtype='float16') (x ** 2).numpy() # Underflow """ Explanation: モデルはステップあたりの時間をログに出力します(例:「25ms/step」)。TensorFlow はモデルの最適化にある程度の時間を費やすため、最初のエポックは遅くなる可能性がありますが、その後はステップあたりの時間が安定するはずです。 このガイドを Colab で実行している場合は、混合精度と float32 のパフォーマンスの比較ができます。これを行うには、「dtype ポリシーを設定する」のセクションに従ってポリシーをmixed_float16 から float32 に変更し、この時点までのすべてのセルを再実行します。コンピューティング機能が 7.0 以上の GPU では、ステップあたりの時間が大幅に増加し、混合精度がモデルを高速化していることが分かるはずです。ガイドを続行する前に、必ずポリシーを mixed_float16 に戻し、セルを再実行してください。 コンピューティング機能が 8.0 以上の GPU(Ampere GPU 以上)では、混合精度を使った場合、 float32 に比べ、このガイドのトイモデルにおけるパフォーマンスの改善は見られません。これは、TensorFloat-32 の使用に起因するもので、特定の float32 演算で tf.linalg.matmul などのより精度の低い算術を自動的に使用するためです。TensorFloat-32 は float32 を使用した場合に混合精度のパフォーマンスメリットを提供しますが、実世界モデルではそれでも通常は、メモリ帯域幅の節約と TensorFloat-32 がサポートしない演算により、混同精度からの大幅なパフォーマンスの改善が見られます。 TPU で混合精度を実行している場合は、GPU で実行する場合に比べそれほどパフォーマンスのゲインは見られません。これは、TPU が、デフォルトの dtype ポリシーが float32 であっても内部的には bfloat16 で特定の演算を行うためです。これは Ampere GPU が TensorFloat-32 をデフォルトで使用する方法に似ています。Ampere GPU に比べ、TPU では通常は、実世界モデルの混合精度でパフォーマンスのゲインをあまり得られません。 float16 テンソルの使用メモリは半分で済むため、実世界の多くのモデルでは、バッチサイズを 2 倍にしてもメモリ不足にならずに混合精度の使用が可能です。ただし、60,000 枚の画像から成る MNIST データセット全体で構成されるバッチは任意の dtype でモデルを実行できるため、これはこのトイモデルには適用されません。 Loss Scaling(損失スケーリング) 損失スケーリングは、tf.keras.Model.fit が mixed_float16 ポリシーを使用して自動的に実行し、数値のアンダーフローを回避する手法です。このセクションでは、損失スケーリングをカスタムトレーニングループと使用する方法について説明します。 アンダーフローとオーバーフロー float16 データ型は、float32 と比較するとダイナミックレンジが狭いです。これは、$65504$ を超える値はオーバーフローして無限大になり、$6.0 \times 10^{-8}$ 未満の値はアンダーフローしてゼロになることを意味します。float32 および bfloat16 はダイナミックレンジがはるかに高いため、オーバーフローとアンダーフローは問題になりません。 例: End of explanation """ optimizer = keras.optimizers.RMSprop() optimizer = mixed_precision.LossScaleOptimizer(optimizer) """ Explanation: 実際には、float16 によるオーバーフローは滅多に発生しません。また、フォワードパス中にアンダーフローが発生することもほとんどありません。ただし、バックワードパス(逆方向パス)中に、勾配がアンダーフローしてゼロになる可能性があります。損失スケーリングは、このアンダーフローを防ぐための手法です。 損失スケーリングの概要 損失スケーリングの基本的概念は単純です。単純に、損失に大きな数値($1024$ など)を掛け、その数値を 損失スケールと呼びます。これによって、勾配も $1024$ だけスケーリングされ、アンダーフローの可能性が大幅に減少します。最終的な勾配が計算されたら、それを $1024$ で除算して、正しい値に戻します。 このプロセスの擬似コードは次のようになります。 ``` loss_scale = 1024 loss = model(inputs) loss *= loss_scale Assume grads are float32. You do not want to divide float16 gradients. grads = compute_gradient(loss, model.trainable_variables) grads /= loss_scale ``` 損失スケールの選択は難しい場合があります。損失スケールが低すぎると、勾配はアンダーフローしてゼロになる可能性があります。高すぎると、反対の問題が発生し、勾配がオーバーフローして無限大になる可能性があります。 これを解決するために、TensorFlow は動的に損失スケールを決定します。手動で選択する必要はありません。tf.keras.Model.fit を使用すると損失スケーリングが行われるため、追加の作業を行う必要はありません。またカスタムトレーニングループを使用する場合は、損失スケーリングを使用するために、特別なオプティマイザラッパーである tf.keras.mixed_precision.LossScaleOptimizer を明示的に使用する必要があります。これについては、次のセクションで詳しく説明します。 カスタムトレーニングループでモデルをトレーニングする これまでに、tf.keras.Model.fitを使用し、混合精度で Keras モデルをトレーニングしました。次は、カスタムトレーニングループで混合精度を使用します。カスタムトレーニングループについてまだ知らない方は、まずカスタムトレーニングガイドをお読みください。 混合精度でカスタムトレーニングループを実行するには、float32 のみで実行する場合に比べ、2 つの変更が必要です。 混合精度でモデルを構築する(既に構築済み) mixed_float16が使用されている場合は、明示的に損失スケーリングを使用する 手順 (2) では、tf.keras.mixed_precision.LossScaleOptimizer クラスを使用し、オプティマイザをラップして損失スケーリングを適用します。デフォルトでは、損失スケールが動的に決定されるようになっているため、何も選択する必要はありません。次のようにして、これにはオプティマイザと損失スケールの 2 つの引数が必要です。LossScaleOptimizer は次のようにして作成します。 End of explanation """ loss_object = tf.keras.losses.SparseCategoricalCrossentropy() train_dataset = (tf.data.Dataset.from_tensor_slices((x_train, y_train)) .shuffle(10000).batch(8192)) test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(8192) """ Explanation: 必要であれば、明示的な損失スケールを選択するか、損失スケーリングの動作をカスタマイズすることもできますが、すべての既知のモデルで十分に動作することがわかっているため、損失スケーリングのデフォルトの動作を維持することを強くお勧めします。損失スケーリングの動作をカスタマイズする場合は、tf.keras.mixed_precision.LossScaleOptimizer ドキュメントをご覧ください。 次に、損失オブジェクトと tf.data.Dataset を定義します。 End of explanation """ @tf.function def train_step(x, y): with tf.GradientTape() as tape: predictions = model(x) loss = loss_object(y, predictions) scaled_loss = optimizer.get_scaled_loss(loss) scaled_gradients = tape.gradient(scaled_loss, model.trainable_variables) gradients = optimizer.get_unscaled_gradients(scaled_gradients) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) return loss """ Explanation: 次に、トレーニングステップ関数を定義します。 損失をスケーリングし、勾配のスケーリングを解除するために、損失スケールオプティマイザの 2 つの新しいメソッドを使用します。 get_scaled_loss(loss): 損失スケールで損失を乗算する get_unscaled_gradients(gradients): スケーリングされた勾配のリストを入力として取り込み、それぞれを損失スケールで除算してスケーリング解除する これらの関数は、勾配のアンダーフローを防ぐために使用する必要があります。勾配に Inf や NaN がなければ、LossScaleOptimizer.apply_gradientsがそれらを適用します。 損失スケールも更新されるので、勾配に Inf または NaN があった場合は半分に、そうでない場合は高くなる可能性もあります。 End of explanation """ @tf.function def test_step(x): return model(x, training=False) """ Explanation: LossScaleOptimizerは、トレーニングの開始時に最初の数ステップを省略する可能性があります。最適な損失スケールを素早く決定するために、最初の損失スケールは高めです。いくらかのステップを踏むと、損失スケールが安定化し、省略されるステップが大幅に少なくなります。 このプロセスは自動的に行われ、トレーニングの品質に影響はありません。 次に、テストステップを定義します。 End of explanation """ model.set_weights(initial_weights) """ Explanation: モデルの初期の重み値を読み込み、最初から再トレーニングできるようにします。 End of explanation """ for epoch in range(5): epoch_loss_avg = tf.keras.metrics.Mean() test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='test_accuracy') for x, y in train_dataset: loss = train_step(x, y) epoch_loss_avg(loss) for x, y in test_dataset: predictions = test_step(x) test_accuracy.update_state(y, predictions) print('Epoch {}: loss={}, test accuracy={}'.format(epoch, epoch_loss_avg.result(), test_accuracy.result())) """ Explanation: 最後に、カスタムトレーニングループを実行します。 End of explanation """
johnnyliu27/openmc
examples/jupyter/mdgxs-part-ii.ipynb
mit
%matplotlib inline import math import matplotlib.pyplot as plt import numpy as np import openmc import openmc.mgxs """ Explanation: This IPython Notebook illustrates the use of the openmc.mgxs.Library class. The Library class is designed to automate the calculation of multi-group cross sections for use cases with one or more domains, cross section types, and/or nuclides. In particular, this Notebook illustrates the following features: Calculation of multi-energy-group and multi-delayed-group cross sections for a fuel assembly Automated creation, manipulation and storage of MGXS with openmc.mgxs.Library Steady-state pin-by-pin delayed neutron fractions (beta) for each delayed group. Generation of surface currents on the interfaces and surfaces of a Mesh. Generate Input Files End of explanation """ # 1.6 enriched fuel fuel = openmc.Material(name='1.6% Fuel') fuel.set_density('g/cm3', 10.31341) fuel.add_nuclide('U235', 3.7503e-4) fuel.add_nuclide('U238', 2.2625e-2) fuel.add_nuclide('O16', 4.6007e-2) # borated water water = openmc.Material(name='Borated Water') water.set_density('g/cm3', 0.740582) water.add_nuclide('H1', 4.9457e-2) water.add_nuclide('O16', 2.4732e-2) water.add_nuclide('B10', 8.0042e-6) # zircaloy zircaloy = openmc.Material(name='Zircaloy') zircaloy.set_density('g/cm3', 6.55) zircaloy.add_nuclide('Zr90', 7.2758e-3) """ Explanation: First we need to define materials that will be used in the problem: fuel, water, and cladding. End of explanation """ # Create a materials collection and export to XML materials = openmc.Materials((fuel, water, zircaloy)) materials.export_to_xml() """ Explanation: With our three materials, we can now create a Materials object that can be exported to an actual XML file. End of explanation """ # Create cylinders for the fuel and clad fuel_outer_radius = openmc.ZCylinder(R=0.39218) clad_outer_radius = openmc.ZCylinder(R=0.45720) # Create boundary planes to surround the geometry min_x = openmc.XPlane(x0=-10.71, boundary_type='reflective') max_x = openmc.XPlane(x0=+10.71, boundary_type='reflective') min_y = openmc.YPlane(y0=-10.71, boundary_type='reflective') max_y = openmc.YPlane(y0=+10.71, boundary_type='reflective') min_z = openmc.ZPlane(z0=-10., boundary_type='reflective') max_z = openmc.ZPlane(z0=+10., boundary_type='reflective') """ Explanation: Now let's move on to the geometry. This problem will be a square array of fuel pins and control rod guide tubes for which we can use OpenMC's lattice/universe feature. The basic universe will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces for fuel and clad, as well as the outer bounding surfaces of the problem. End of explanation """ # Create a Universe to encapsulate a fuel pin fuel_pin_universe = openmc.Universe(name='1.6% Fuel Pin') # Create fuel Cell fuel_cell = openmc.Cell(name='1.6% Fuel') fuel_cell.fill = fuel fuel_cell.region = -fuel_outer_radius fuel_pin_universe.add_cell(fuel_cell) # Create a clad Cell clad_cell = openmc.Cell(name='1.6% Clad') clad_cell.fill = zircaloy clad_cell.region = +fuel_outer_radius & -clad_outer_radius fuel_pin_universe.add_cell(clad_cell) # Create a moderator Cell moderator_cell = openmc.Cell(name='1.6% Moderator') moderator_cell.fill = water moderator_cell.region = +clad_outer_radius fuel_pin_universe.add_cell(moderator_cell) """ Explanation: With the surfaces defined, we can now construct a fuel pin cell from cells that are defined by intersections of half-spaces created by the surfaces. End of explanation """ # Create a Universe to encapsulate a control rod guide tube guide_tube_universe = openmc.Universe(name='Guide Tube') # Create guide tube Cell guide_tube_cell = openmc.Cell(name='Guide Tube Water') guide_tube_cell.fill = water guide_tube_cell.region = -fuel_outer_radius guide_tube_universe.add_cell(guide_tube_cell) # Create a clad Cell clad_cell = openmc.Cell(name='Guide Clad') clad_cell.fill = zircaloy clad_cell.region = +fuel_outer_radius & -clad_outer_radius guide_tube_universe.add_cell(clad_cell) # Create a moderator Cell moderator_cell = openmc.Cell(name='Guide Tube Moderator') moderator_cell.fill = water moderator_cell.region = +clad_outer_radius guide_tube_universe.add_cell(moderator_cell) """ Explanation: Likewise, we can construct a control rod guide tube with the same surfaces. End of explanation """ # Create fuel assembly Lattice assembly = openmc.RectLattice(name='1.6% Fuel Assembly') assembly.pitch = (1.26, 1.26) assembly.lower_left = [-1.26 * 17. / 2.0] * 2 """ Explanation: Using the pin cell universe, we can construct a 17x17 rectangular lattice with a 1.26 cm pitch. End of explanation """ # Create array indices for guide tube locations in lattice template_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8, 11, 14, 2, 5, 8, 11, 14, 3, 13, 5, 8, 11]) template_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11, 13, 13, 14, 14, 14]) # Create universes array with the fuel pin and guide tube universes universes = np.tile(fuel_pin_universe, (17,17)) universes[template_x, template_y] = guide_tube_universe # Store the array of universes in the lattice assembly.universes = universes """ Explanation: Next, we create a NumPy array of fuel pin and guide tube universes for the lattice. End of explanation """ # Create root Cell root_cell = openmc.Cell(name='root cell', fill=assembly) # Add boundary planes root_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z # Create root Universe root_universe = openmc.Universe(universe_id=0, name='root universe') root_universe.add_cell(root_cell) """ Explanation: OpenMC requires that there is a "root" universe. Let us create a root cell that is filled by the pin cell universe and then assign it to the root universe. End of explanation """ # Create Geometry and export to XML geometry = openmc.Geometry(root_universe) geometry.export_to_xml() """ Explanation: We now must create a geometry that is assigned a root universe and export it to XML. End of explanation """ # OpenMC simulation parameters batches = 50 inactive = 10 particles = 2500 # Instantiate a Settings object settings = openmc.Settings() settings.batches = batches settings.inactive = inactive settings.particles = particles settings.output = {'tallies': False} # Create an initial uniform spatial source distribution over fissionable zones bounds = [-10.71, -10.71, -10, 10.71, 10.71, 10.] uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True) settings.source = openmc.source.Source(space=uniform_dist) # Export to "settings.xml" settings.export_to_xml() """ Explanation: With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 2500 particles. End of explanation """ # Plot our geometry plot = openmc.Plot.from_geometry(geometry) plot.pixels = (250, 250) plot.color_by = 'material' openmc.plot_inline(plot) """ Explanation: Let us also create a plot to verify that our fuel assembly geometry was created successfully. End of explanation """ # Instantiate a 20-group EnergyGroups object energy_groups = openmc.mgxs.EnergyGroups() energy_groups.group_edges = np.logspace(-3, 7.3, 21) # Instantiate a 1-group EnergyGroups object one_group = openmc.mgxs.EnergyGroups() one_group.group_edges = np.array([energy_groups.group_edges[0], energy_groups.group_edges[-1]]) """ Explanation: As we can see from the plot, we have a nice array of fuel and guide tube pin cells with fuel, cladding, and water! Create an MGXS Library Now we are ready to generate multi-group cross sections! First, let's define a 20-energy-group and 1-energy-group. End of explanation """ # Instantiate a tally mesh mesh = openmc.Mesh(mesh_id=1) mesh.type = 'regular' mesh.dimension = [17, 17, 1] mesh.lower_left = [-10.71, -10.71, -10000.] mesh.width = [1.26, 1.26, 20000.] # Initialize an 20-energy-group and 6-delayed-group MGXS Library mgxs_lib = openmc.mgxs.Library(geometry) mgxs_lib.energy_groups = energy_groups mgxs_lib.num_delayed_groups = 6 # Specify multi-group cross section types to compute mgxs_lib.mgxs_types = ['total', 'transport', 'nu-scatter matrix', 'kappa-fission', 'inverse-velocity', 'chi-prompt', 'prompt-nu-fission', 'chi-delayed', 'delayed-nu-fission', 'beta'] # Specify a "mesh" domain type for the cross section tally filters mgxs_lib.domain_type = 'mesh' # Specify the mesh domain over which to compute multi-group cross sections mgxs_lib.domains = [mesh] # Construct all tallies needed for the multi-group cross section library mgxs_lib.build_library() # Create a "tallies.xml" file for the MGXS Library tallies_file = openmc.Tallies() mgxs_lib.add_to_tallies_file(tallies_file, merge=True) # Instantiate a current tally mesh_filter = openmc.MeshFilter(mesh) current_tally = openmc.Tally(name='current tally') current_tally.scores = ['current'] current_tally.filters = [mesh_filter] # Add current tally to the tallies file tallies_file.append(current_tally) # Export to "tallies.xml" tallies_file.export_to_xml() """ Explanation: Next, we will instantiate an openmc.mgxs.Library for the energy and delayed groups with our the fuel assembly geometry. End of explanation """ # Run OpenMC openmc.run() """ Explanation: Now, we can run OpenMC to generate the cross sections. End of explanation """ # Load the last statepoint file sp = openmc.StatePoint('statepoint.50.h5') """ Explanation: Tally Data Processing Our simulation ran successfully and created statepoint and summary output files. We begin our analysis by instantiating a StatePoint object. End of explanation """ # Initialize MGXS Library with OpenMC statepoint data mgxs_lib.load_from_statepoint(sp) # Extrack the current tally separately current_tally = sp.get_tally(name='current tally') """ Explanation: The statepoint is now ready to be analyzed by the Library. We simply have to load the tallies from the statepoint into the Library and our MGXS objects will compute the cross sections for us under-the-hood. End of explanation """ # Set the time constants for the delayed precursors (in seconds^-1) precursor_halflife = np.array([55.6, 24.5, 16.3, 2.37, 0.424, 0.195]) precursor_lambda = math.log(2.0) / precursor_halflife beta = mgxs_lib.get_mgxs(mesh, 'beta') # Create a tally object with only the delayed group filter for the time constants beta_filters = [f for f in beta.xs_tally.filters if type(f) is not openmc.DelayedGroupFilter] lambda_tally = beta.xs_tally.summation(nuclides=beta.xs_tally.nuclides) for f in beta_filters: lambda_tally = lambda_tally.summation(filter_type=type(f), remove_filter=True) * 0. + 1. # Set the mean of the lambda tally and reshape to account for nuclides and scores lambda_tally._mean = precursor_lambda lambda_tally._mean.shape = lambda_tally.std_dev.shape # Set a total nuclide and lambda score lambda_tally.nuclides = [openmc.Nuclide(name='total')] lambda_tally.scores = ['lambda'] delayed_nu_fission = mgxs_lib.get_mgxs(mesh, 'delayed-nu-fission') # Use tally arithmetic to compute the precursor concentrations precursor_conc = beta.xs_tally.summation(filter_type=openmc.EnergyFilter, remove_filter=True) * \ delayed_nu_fission.xs_tally.summation(filter_type=openmc.EnergyFilter, remove_filter=True) / lambda_tally # The difference is a derived tally which can generate Pandas DataFrames for inspection precursor_conc.get_pandas_dataframe().head(10) """ Explanation: Using Tally Arithmetic to Compute the Delayed Neutron Precursor Concentrations Finally, we illustrate how one can leverage OpenMC's tally arithmetic data processing feature with MGXS objects. The openmc.mgxs module uses tally arithmetic to compute multi-group cross sections with automated uncertainty propagation. Each MGXS object includes an xs_tally attribute which is a "derived" Tally based on the tallies needed to compute the cross section type of interest. These derived tallies can be used in subsequent tally arithmetic operations. For example, we can use tally artithmetic to compute the delayed neutron precursor concentrations using the Beta and DelayedNuFissionXS objects. The delayed neutron precursor concentrations are modeled using the following equations: $$\frac{\partial}{\partial t} C_{k,d} (t) = \int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r} \beta_{k,d} (t) \nu_d \sigma_{f,x}(\mathbf{r},E',t)\Phi(\mathbf{r},E',t) - \lambda_{d} C_{k,d} (t) $$ $$C_{k,d} (t=0) = \frac{1}{\lambda_{d}} \int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r} \beta_{k,d} (t=0) \nu_d \sigma_{f,x}(\mathbf{r},E',t=0)\Phi(\mathbf{r},E',t=0) $$ End of explanation """ current_tally.get_pandas_dataframe().head(10) """ Explanation: Another useful feature of the Python API is the ability to extract the surface currents for the interfaces and surfaces of a mesh. We can inspect the currents for the mesh by getting the pandas dataframe. End of explanation """ # Extract the energy-condensed delayed neutron fraction tally beta_by_group = beta.get_condensed_xs(one_group).xs_tally.summation(filter_type='energy', remove_filter=True) beta_by_group.mean.shape = (17, 17, 6) beta_by_group.mean[beta_by_group.mean == 0] = np.nan # Plot the betas plt.figure(figsize=(18,9)) fig = plt.subplot(231) plt.imshow(beta_by_group.mean[:,:,0], interpolation='none', cmap='jet') plt.colorbar() plt.title('Beta - delayed group 1') fig = plt.subplot(232) plt.imshow(beta_by_group.mean[:,:,1], interpolation='none', cmap='jet') plt.colorbar() plt.title('Beta - delayed group 2') fig = plt.subplot(233) plt.imshow(beta_by_group.mean[:,:,2], interpolation='none', cmap='jet') plt.colorbar() plt.title('Beta - delayed group 3') fig = plt.subplot(234) plt.imshow(beta_by_group.mean[:,:,3], interpolation='none', cmap='jet') plt.colorbar() plt.title('Beta - delayed group 4') fig = plt.subplot(235) plt.imshow(beta_by_group.mean[:,:,4], interpolation='none', cmap='jet') plt.colorbar() plt.title('Beta - delayed group 5') fig = plt.subplot(236) plt.imshow(beta_by_group.mean[:,:,5], interpolation='none', cmap='jet') plt.colorbar() plt.title('Beta - delayed group 6') """ Explanation: Cross Section Visualizations In addition to inspecting the data in the tallies by getting the pandas dataframe, we can also plot the tally data on the domain mesh. Below is the delayed neutron fraction tallied in each mesh cell for each delayed group. End of explanation """
stargaser/advancedviz2016
Linking_and_brushing.ipynb
mit
import bokeh import numpy as np from astropy.table import Table sdss = Table.read('data/sdss_galaxies_qsos_50k.fits') sdss from bokeh.models import ColumnDataSource from bokeh.plotting import figure, gridplot, output_notebook, output_file, show umg = sdss['u'] - sdss['g'] gmr = sdss['g'] - sdss['r'] rmi = sdss['r'] - sdss['i'] imz = sdss['i'] - sdss['z'] # create a column data source for the plots to share source = ColumnDataSource(data=dict(umg=umg, gmr=gmr, rmi=rmi,imz=imz)) """ Explanation: Linking and brushing with bokeh Linking and brushing is a powerful method for exploratory data analysis. One way to create linked plots in the notebook is to use Bokeh. End of explanation """ output_file('sdss_color_color.html') TOOLS = "pan,wheel_zoom,reset,box_select,poly_select,help" # create a new plot and add a renderer left = figure(tools=TOOLS, width=400, height=400, title='SDSS g-r vs u-g', webgl=True) left.x('umg', 'gmr', source=source) # create another new plot and add a renderer right = figure(tools=TOOLS, width=400, height=400, title='SDSS i-z vs r-i') right.x('rmi', 'imz', source=source) p = gridplot([[left, right]]) show(p) """ Explanation: We will output to a static html file. The output_notebook() function can output to the notebook, but with 50,000 points it really slows down. End of explanation """ #import glue # Quick way to launch Glue #from glue import qglue #qglue() """ Explanation: See many examples of configuring plot tools at http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html Interacting with Glue End of explanation """ import astropy.io.fits as fits hdu = fits.open('data/w5.fits') hdu[0].header from astropy.table import Table w5catalog = Table.read('data/w5_psc.vot') wisecat = Table.read('data/w5_wise.tbl', format='ipac') %gui qt #qglue(catalog=catalog, image=hdu, wisecat=wisecat) from glue.core.data_factories import load_data from glue.core import DataCollection from glue.core.link_helpers import LinkSame from glue.app.qt.application import GlueApplication #load 2 datasets from files image = load_data('data/w5.fits') catalog = load_data('data/w5_psc.vot') dc = DataCollection([image, catalog]) # link positional information dc.add_link(LinkSame(image.id['Right Ascension'], catalog.id['RAJ2000'])) dc.add_link(LinkSame(image.id['Declination'], catalog.id['DEJ2000'])) #start Glue app = GlueApplication(dc) app.start() """ Explanation: Here we'll interact with Glue from the notebook. End of explanation """ dc dc[0].components dc[0].id['Right Ascension'] """ Explanation: Now we have access to the data collection in our notebook End of explanation """ catalog = dc[1] j_minus_h = catalog['Jmag'] - catalog['Hmag'] """ Explanation: Now go select the "Western arm" of the star-forming region (in Glue) and make a subset of it End of explanation """ catalog['jmh'] = j_minus_h hmag = catalog.id['Hmag'] jmag = catalog.id['Jmag'] """ Explanation: We can add something to our catalog and it shows up in Glue. End of explanation """ jmhred = (jmag - hmag) > 1.5 dc.new_subset_group('j - h > 1.5', jmhred) dc.subset_groups dc.subset_groups[2].label catalog.subsets catalog.subsets[0]['Jmag'] mask = catalog.subsets[0].to_mask() new_catalog = w5catalog[mask] """ Explanation: We can define a new subset group here or in Glue End of explanation """
lo-co/atm-py
examples/SMPS Algorithm Test.ipynb
mit
# Instantiate a DMA object based on the NOAA wide DMA dimensions noaa = dma.NoaaWide() # We also need a gas object that will represent the carrier gas of interest: air air = atm.Air() # Set the conditions to something representative of the conditions in the Boulder DMA air.t = 23 air.p = 820 d = noaa.v2d(50, air, 5, 5) print('The diameter associated with a 5 lpm flow and 50 V is ' + str(d) + ' nm.') """ Explanation: Demonstration of DMA Calculations The DMA is represented as a single object in the code. The object consists of three attributes: _ro, _ri and _l, the dimensions of the DMA (outer and inner radius - $r_o$ and $r_i$ as well as length $l$). These parameters are considered to be "private" and are defined by the children although the class can be used directly by setting these directly. The DMA object contains one method: v2d. This method calculates the diameter of a size selected particle as a function of voltage, atmospheric conditions and flow rates. ``` python def v2d(self, v, gas, qc, qm): """ Find selected diameter at a given voltage. This function uses a Newton-Raphson root finder to solve for the diameter of the particle. Parameters ---------- v: float Voltage in Volts gas: gas object Carrier gas object for performing calculations qc: float Input sheath flow in lpm qm: float Output sheath flow in lpm Returns ------- Diameter in nanometers """ gamma = self._l/log(self._ro/self._ri) # Convert flow rates from lpm to m3/s qc = float(qc)/60*0.001 qm = float(qm)/60*0.001 # Central mobility zc = (qc+qm)/(4*pi*gamma*v) return newton(lambda d: z(d, gas, 1)-zc, 1, maxiter=1000) ``` This method uses a Newton-Raphson zero finding routine to determine the diameter associated with central mobility. Using the DMA Class The following code demonstrates how to use the DMA class that is found in the atmPy package. End of explanation """ sd = sizedistribution.simulate_sizedistribution(d=[10, 2000], ndp=250, dg=60, sg=0.2, nmode=3000) f,a = sd.plot() a.set_xlim([10,1000]) a.grid() """ Explanation: Generating a Simulation Size Distribution In order to test the SMPS inversion, we need to simulate some type of size distribution Generate a size distribution that we can use to test the retrieval of the SMPS data. We will put the geometric mean (dg) at 60 nm, close to the observed point during HAGiS. End of explanation """ nd = sd.convert2numberconcentration() f,a = nd.plot() """ Explanation: Now, we need to transform the size distribution so that it is what the SMPS might observe. End of explanation """ # demonstrate charing efficiency on size distribution f2 = [aerosol.ndistr(i,n=-2,t=20) for i in nd.bincenters] f3 = [aerosol.ndistr(i,n=-3,t=20) for i in nd.bincenters] f1 = [aerosol.ndistr(i,n=-1,t=20) for i in nd.bincenters] fig,ax = plt.subplots() ax.plot(nd.bincenters, f1, 'r', nd.bincenters, f2,'k', nd.bincenters, f3, 'b') ax.set_xscale('log') ax.set_xlabel('Dp (nm)') ax.set_ylabel('f') ax.grid() fig.tight_layout() ax.set_title('Charging Efficiency vs. Diameter') # Create an air object of aerosl calculations... air = atm.Air() air.t = 20 air.p = 850 # Validate vs. Paul Barron's calculations; LOOKS GOOD aerosol.z(1000, air, 1) # Define a function for finding the diameter fmin = lambda dm: (np.abs(nd.bincenters - dm)).argmin() xd = nd.copy() for i,n in enumerate(xd.data.iloc[0,:].values): f1 = aerosol.ndistr(xd.bincenters[i],n=-1,t = air.t) f2 = aerosol.ndistr(xd.bincenters[i],n=-2,t = air.t) f3 = aerosol.ndistr(xd.bincenters[i],n=-3,t = air.t) d2 =aerosol.z2d(aerosol.z(xd.bincenters[i],air, n=2),air, n=1) if d2>= xd.bins[0]: k = fmin(d2) if d2 > xd.bins[k]: xd.data.values[0,k] += f2*n else: xd.data.values[0,k-1] +=f2*n d3 =aerosol.z2d(aerosol.z(xd.bincenters[i],air, n=3),air, n=1) if d3>= xd.bins[0]: k = fmin(d3) if d3 > xd.bins[k]: xd.data.values[0,k] += f3*n else: xd.data.values[0,k-1] +=f3*n xd.data.values[0,i] = f1*n # print([i, xd.bincenters[i], fmin(d2), d2, fmin(d3), d3]) f,a = xd.plot() a.grid() """ Explanation: Now, we need to "correct" the size distribution so that it is what the SMPS would observe: Starting at the bottom-most bin, calculate the number of singly charged particles in the bin. This will be the base number of particles in the bin. Based on the total number of particles, calculate the number of multiply charged particles expected as well as the diameters where they would reside. If the diameter of the multiply charged particles is greater than the min, place that number in the nearest diameter bin. End of explanation """ # The SMPS routines need a mean data dataframe with the following entries mean_data = {"Sh_Q_VLPM":3.9, "Aer_Temp_C": air.t, "Aer_Pres_PSI":air.p, "Aer_Q_VLPM":0.39 } # Convert the dict to a list and generate a dataframe df = pd.DataFrame([mean_data]) # Create a new SMPS object using the dimensions of the NOAA Wide DMA... tSMPS = smps.SMPS(dma.NoaaWide()) xd.data.iloc[0,:].values dndlogdp = tSMPS.__fwhm__(xd.bincenters, xd.data.iloc[0,:].values, df) dndlogdp[-1] = 0 xd.data.iloc[0,:].values xd.plot() fig, ax= plt.subplots() ax.plot(tSMPS.diam_interp, dndlogdp) ax.set_xscale('log') ax.grid() noaa = dma.NoaaWide() air = atm.Air() air.t = 25 air.p = 820 noaa.v2d(37.2, air, 3.7, 3.7) """ Explanation: So there is some funniness in the discreet nature of the problem - the reason the discreet peaks appear is that the algorithm is dumping multiples of multiples into a single bin. I have tried to address this by dumping the data into the bins with a fixed bin width (rather than searching for the nearest bin center), but to no avail. The problem is exacerbated by increasing the granularity of the problem (the opposite of what I thought might happen). At this point, I will just move ahead and hope that I get this right... End of explanation """
ohgodscience/Python
mousetrackerdata/post2.ipynb
gpl-2.0
import pandas as pd import re data = pd.read_csv("mousetrackercorrected.csv") data.columns.values data.iloc[0:4, 0:19] """ Explanation: Overview I'm going to be looking at some pilot data that some colleagues and I collected using Jon Freeman's Mousetracker (http://www.mousetracker.org/). As the name suggests, mousetracker is a program designed to track participants' mouse movements. In social psychology, researchers use it to track participants' implicit decision making processes. Originally, it was developed to study how individuals categorize faces. An example of the paradigm would be a participant having to choose whether a face is male or female, like so: The researcher could then vary the degree to which the face has stereotypically male features, or stereotypically female features, and track not just what participants are categorizing the faces as, but also, how they reach those decisions, by tracking the paths and time course of the mouse movements. Current project Anyway, some friends and I are currently working on distinguishing how individuals allocate resources in the context of a relationship. We hypothesize that at any given time, individuals are concerned with: their self-interest their partner's interests the interest of the group or dyad, or the relationship, or them as a pair and these motives affect the way individuals choose to distribute resources. To distinguish between these three motives, we generated three sets of stimuli using poker chips that pit each of these motives against each other. The first set of stimuli pit participants' self-interest against the interests of their partner. For example, if red poker chips were paid out to you and green to your partner, one dilemma would be choosing between these two stacks of poker chips of equal height (i.e., the group receives the same in both cases): Left | Right | :------:|:---:| | The second set of stimuli pits a participant's concern for the interest of their partner vs. their own self interest and the group's interest. This captures participants' "pure" altruistic motives in the sense that choosing to favor their partner in this scenario sacrifices both their own interests and the group's interest: Left | Right | :--:|:---:| | Finally, the last set of stimuli pit participants' self-interest against that of their partner and the group. In this case, one set of poker chips results in the participant getting more than the other set of chips, but in the other set of poker chips, his/her partner gets more and so does the pair of them: Left | Right | :--:|:---:| | The data The data come in a person-period dataset. This is a "long" format where each participant has multiple rows that represent each trial of the experiment (there were 60 or so trials). However, each row also contains multiple columns each representing a bin of average locations the participant's mouse pointer was during that time span. There are ~100 such bins. In other words, each participant made 60 choices, and their mouse positions were averaged into ~100 time points per trial. The first thing we're going to do is to load our data. To do this, we first import Pandas, read our .csv file and print a list of columns. The raw data can be found here: https://raw.githubusercontent.com/bryansim/Python/master/mousetrackerdata/mousetrackercorrected.csv End of explanation """ data['MD'] = data.loc[data['MD_1'].isnull() == False, ['MD_1']] data.loc[data['MD'].isnull() == True,['MD']] = data.loc[data['MD_2'].isnull() == False]['MD_2'] #We do this to get a slice instead of data.loc[data['MD_2'].isnull() == False, ['MD_2']] which returns a dataframe data['AUC'] = data.loc[data['AUC_1'].isnull() == False, ['AUC_1']] data.loc[data['AUC'].isnull() == True, ['AUC']] = data.loc[data['AUC_2'].isnull() == False]['AUC_2'] """ Explanation: Descriptives In the above data, what we're going to be first doing is finding the mean of participants' reaction time (RT), maximum deviation (MD), and aure under curve (AUC). The latter two measures are measures of how much participants were "attracted" to the other option despite selecting the option that they did. There are two columns for each (e.g., MD_1 and MD_2 depending on which option participants chose). These end up being redundant with one another, and we'll have to combine them. x-flips and y-flips, as their names suggest, measure the number of times participants' cursors flipped on the x and y axis. To combine the two MD columns, we create a new column, find all the rows which have data in MD_1, and then fill in the rows which don't have data in MD_1 with the rows that have data in MD_2. We do the same with AUC. End of explanation """ data['AUC'].mean() data['MD'].mean() """ Explanation: Mean MD and AUC Now, we can use the .mean() method to get the mean of the above. End of explanation """ sodata = data.loc[data['code'].str.extract(r'(so)', expand = False).isnull() == False] smgldata = data.loc[data['code'].str.extract(r'(smgl)', expand = False).isnull() == False] smgmdata = data.loc[data['code'].str.extract(r'(smgm)', expand = False).isnull() == False] print sodata['MD'].mean() print smgldata['MD'].mean() print smgmdata['MD'].mean() print sodata['AUC'].mean() print smgldata['AUC'].mean() print smgmdata['AUC'].mean() """ Explanation: Means by choice type The next thing we want to do is see whether participants differed depending on what the type of choice was (e.g., self vs. other etc.) Eventually, we will have a 3x2 table of means: self vs. other | group more w/ self less | group more w/ self more | ---:|:---:|:---: chose selfish | chose selfish | chose selfish chose selfless | chose selfless | chose selffless Because of the way the conditions were coded (they include trial numbers), we'll use some regex to ignore those numbers: End of explanation """ print sodata.loc[sodata['error'] == 0]['MD'].mean() print sodata.loc[sodata['error'] == 1]['MD'].mean() print smgldata.loc[smgldata['error'] == 0]['MD'].mean() print smgldata.loc[smgldata['error'] == 1]['MD'].mean() print smgmdata.loc[smgmdata['error'] == 0]['MD'].mean() print smgmdata.loc[smgmdata['error'] == 1]['MD'].mean() print sodata.loc[sodata['error'] == 0]['AUC'].mean() print sodata.loc[sodata['error'] == 1]['AUC'].mean() print smgldata.loc[smgldata['error'] == 0]['AUC'].mean() print smgldata.loc[smgldata['error'] == 1]['AUC'].mean() print smgmdata.loc[smgmdata['error'] == 0]['AUC'].mean() print smgmdata.loc[smgmdata['error'] == 1]['AUC'].mean() """ Explanation: AS IT TURNS OUT, this isn't very helpful, because this analysis collapses over whether or not participant chose the selfish or unselfish option, which is really what we're interested in. So let's look at that next: End of explanation """
ewulczyn/talk_page_abuse
src/data_generation/crowdflower_analysis/src/Crowdflower Analysis (Experiment v. 2).ipynb
apache-2.0
%matplotlib inline from __future__ import division import pandas as pd import numpy as np import matplotlib.pyplot as plt pd.set_option('display.max_colwidth', 1000) # Download data from google drive (Respect Eng / Wiki Collab): wikipdia data/v2_annotated dat = pd.read_csv('../data/exp2_annotated_1k_no_admin_blocked_user_post_sample.csv') # Remove test questions dat = dat[dat['_golden'] == False] # Replace missing data with 'False' dat = dat.replace(np.nan, False, regex=True) #Translate the values from #how_aggressive_or_friendly_is_the_tone_of_this_comment into numbers def translate_how_aggressive_to_num(score): str_to_num = {'+++ Very Friendly': 3, '++': 2, '+': 1, 'Neutral': 0, '-': -1, '--': -2, '--- Very Aggressive (including passive aggression)': -3} if score: return str_to_num[score] else: return np.nan # We reshape the answers for future analysis dat['aggression_score'] = dat['how_aggressive_or_friendly_is_the_tone_of_this_comment'].apply(translate_how_aggressive_to_num) def create_column_of_counts(df, col): return df.apply(lambda x: col in str(x)) attack_columns = ['not_attack', 'other', 'quoting', 'recipient', 'third_party'] for col in attack_columns: dat[col] = create_column_of_counts(dat['is_harassment_or_attack'], col) def create_column_of_counts_from_nums(df, col): return df.apply(lambda x: int(col) == x) aggressive_columns = ['-3', '-2', '-1', '0', '1', '2', '3'] for col in aggressive_columns: dat[col] = create_column_of_counts_from_nums(dat['aggression_score'], col) dat['not_attack_0'] = 1 - dat['not_attack'] dat['not_attack_1'] = dat['not_attack'] # Group the data agg_dict = dict.fromkeys(attack_columns, 'mean') agg_dict.update(dict.fromkeys(aggressive_columns, 'sum')) agg_dict.update({'clean_diff': 'first', 'na': 'mean', 'aggression_score': 'mean', '_id':'count', 'not_attack_0':'sum', 'not_attack_1': 'sum'}) grouped_dat = dat.groupby(['rev_id'], as_index=False).agg(agg_dict) # Get rid of data which the majority thinks is not in English or not readable grouped_dat = grouped_dat[grouped_dat['na'] < 0.5] """ Explanation: Introduction This notebook is an analysis of the Crowdflower labels of 1000 revisions of Wikipedia talk pages by users who have been blocked for personal harassment. This dataset has been cleaned and filtered to remove common administrator messages. These datasets are annotated via crowdflower to measure friendliness, aggressiveness and whether the comment constitutes a personal attack. On Crowdflower, each revision is rated 7 times. The raters are given three questions: Is this comment not English or not human readable? Column 'na' How aggressive or friendly is the tone of this comment? Column 'how_aggressive_or_friendly_is_the_tone_of_this_comment' Ranges from '---' (Very Aggressive) to '+++' (Very Friendly) Does the comment contain a personal attack or harassment? Please mark all that apply: Column 'is_harassment_or_attack' Users can specify that the attack is: Targeted at the recipient of the message (i.e. you suck). ('recipent') Targeted at a third party (i.e. Bob sucks). ('third_party') Being reported or quoted (i.e. Bob said Henri sucks). ('quoting') Another kind of attack or harassment. ('other') This is not an attack or harassment. ('not_attack') Below, we plot histograms of the units by average rating of each of the questions, examine quantiles of answers, and compute inter-annotator agreement. Loading packages and data End of explanation """ def hist_comments(df, bins, plot_by, title): plt.figure() sliced_array = df[[plot_by]] weights = np.ones_like(sliced_array)/len(sliced_array) sliced_array.plot.hist(bins = bins, legend = False, title = title, weights=weights) plt.ylabel('Proportion') plt.xlabel('Average Score') bins = np.linspace(-3,3,11) hist_comments(grouped_dat, bins, 'aggression_score', 'Average Aggressiveness Rating for Blocked Data') bins = np.linspace(0,1,9) for col in attack_columns: hist_comments(grouped_dat, bins, col, 'Average %s Rating' % col) """ Explanation: Plot histogram of average ratings by revision For each revision, we take the average of all the ratings by level of friendliness/aggressiveness and for each of the answers to Question 3. The histograms of these averages is displayed below. End of explanation """ def sorted_comments(df, sort_by, quartile, num, is_ascending = True): n = df.shape[0] start_index = int(quartile*n) return df[['clean_diff', 'aggression_score', 'not_attack', 'other', 'quoting', 'recipient', 'third_party']].sort_values( by=sort_by, ascending = is_ascending)[start_index:start_index + num] # Most aggressive comments sorted_comments(grouped_dat, 'aggression_score', 0, 5) # Median aggressive comments sorted_comments(grouped_dat, 'aggression_score', 0.5, 5) # Least aggressive comments sorted_comments(grouped_dat, 'aggression_score', 0, 5, False) """ Explanation: Selected harassing and aggressive revisions by quartile We look at a sample of revisions whose average aggressive score falls into various quantiles. This allows us to subjectively evaluate the quality of the questions that we are asking on Crowdflower. End of explanation """ # Most aggressive comments which are labelled 'This is not an attack or harassment.' sorted_comments(grouped_dat[grouped_dat['not_attack'] > 0.5], 'aggression_score', 0, 5) # Most aggressive comments which are labelled 'Being reported or quoted (i.e. Bob said Henri sucks).' sorted_comments(grouped_dat[grouped_dat['quoting'] > 0.3], 'aggression_score', 0, 5) # Most aggressive comments which are labelled 'Another kind of attack or harassment.' sorted_comments(grouped_dat[grouped_dat['other'] > 0.5], 'aggression_score', 0, 5) # Most aggressive comments which are labelled 'Targeted at a third party (i.e. Bob sucks).' sorted_comments(grouped_dat[grouped_dat['third_party'] > 0.5], 'aggression_score', 0, 5) # Least aggressive comments which are NOT labelled 'This is not an attack or harassment.' sorted_comments(grouped_dat[grouped_dat['not_attack'] < 0.5], 'aggression_score', 0, 5, False) """ Explanation: Selected revisions on multiple questions In this section, we examine a selection of revisions by their answer to Question 3 and sorted by aggression score. Again, this allows us to subjectively evaluate the quality of questions and responses that we obtain from Crowdflower. End of explanation """ def add_row_to_coincidence(o, row, columns): m_u = row.sum(1) for i in columns: for j in columns: if i == j: o[i][j] = o[i][j] + row[i]*(row[i]-1)/(m_u-1) else: o[i][j] = o[i][j] + row[i]*row[j]/(m_u-1) return o def make_coincidence_matrix(df, columns): df = df[columns] n = df.shape[0] num_cols = len(columns) o = pd.DataFrame(np.zeros((num_cols,num_cols)), index = columns, columns=columns) for i in xrange(n): o = add_row_to_coincidence(o, df[i:i+1], columns) return o def binary_distance(i,j): return i!=j def interval_distance(i,j): return (int(i)-int(j))**2 def e(n, i, j): if i == j: return n[i]*(n[i]-1)/sum(n)-1 else: return n[i]*n[j]/sum(n)-1 def D_e(o, columns, distance): n = o.sum(1) output = 0 for i in columns: for j in columns: output = output + e(n,i,j)*distance(i,j) return output def D_o(o, columns, distance): output = 0 for i in columns: for j in columns: output = output + o[i][j]*distance(i,j) return output def Krippendorf_alpha(df, columns, distance = binary_distance, o = None): if o is None: o = make_coincidence_matrix(df, columns) d_o = D_o(o, columns, distance) d_e = D_e(o, columns, distance) return (1 - d_o/d_e) o = make_coincidence_matrix(grouped_dat, aggressive_columns) print "Krippendorf's Alpha: " Krippendorf_alpha(grouped_dat, aggressive_columns, distance = interval_distance, o = o) Krippendorf_alpha(grouped_dat, ['not_attack_0', 'not_attack_1']) grouped_dat.columns grouped_dat['not_attack'] """ Explanation: Inter-Annotator Agreement Below, we compute the Krippendorf's Alpha, which is a measure of the inter-annotator agreement of our Crowdflower responses. We achieve an Alpha value of 0.668 on our dataset, which is a relatively good level of inter-annotator agreement for this type of subjective inquiry. End of explanation """
mne-tools/mne-tools.github.io
0.23/_downloads/299b3deaa8eb66e88d34f06090d06628/evoked_ers_source_power.ipynb
bsd-3-clause
# Authors: Luke Bloy <luke.bloy@gmail.com> # Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import os.path as op import numpy as np import mne from mne.cov import compute_covariance from mne.datasets import somato from mne.time_frequency import csd_morlet from mne.beamformer import (make_dics, apply_dics_csd, make_lcmv, apply_lcmv_cov) from mne.minimum_norm import (make_inverse_operator, apply_inverse_cov) print(__doc__) """ Explanation: Compute evoked ERS source power using DICS, LCMV beamformer, and dSPM Here we examine 3 ways of localizing event-related synchronization (ERS) of beta band activity in this dataset: somato-dataset using :term:DICS, :term:LCMV beamformer, and :term:dSPM applied to active and baseline covariance matrices. End of explanation """ data_path = somato.data_path() subject = '01' task = 'somato' raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg', 'sub-{}_task-{}_meg.fif'.format(subject, task)) # crop to 5 minutes to save memory raw = mne.io.read_raw_fif(raw_fname).crop(0, 300) # We are interested in the beta band (12-30 Hz) raw.load_data().filter(12, 30) # The DICS beamformer currently only supports a single sensor type. # We'll use the gradiometers in this example. picks = mne.pick_types(raw.info, meg='grad', exclude='bads') # Read epochs events = mne.find_events(raw) epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, picks=picks, preload=True, decim=3) # Read forward operator and point to freesurfer subject directory fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject), 'sub-{}_task-{}-fwd.fif'.format(subject, task)) subjects_dir = op.join(data_path, 'derivatives', 'freesurfer', 'subjects') fwd = mne.read_forward_solution(fname_fwd) """ Explanation: Reading the raw data and creating epochs: End of explanation """ active_win = (0.5, 1.5) baseline_win = (-1, 0) baseline_cov = compute_covariance(epochs, tmin=baseline_win[0], tmax=baseline_win[1], method='shrunk', rank=None) active_cov = compute_covariance(epochs, tmin=active_win[0], tmax=active_win[1], method='shrunk', rank=None) # Weighted averaging is already in the addition of covariance objects. common_cov = baseline_cov + active_cov """ Explanation: Compute covariances ERS activity starts at 0.5 seconds after stimulus onset. End of explanation """ def _gen_dics(active_win, baseline_win, epochs): freqs = np.logspace(np.log10(12), np.log10(30), 9) csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20) csd_baseline = csd_morlet(epochs, freqs, tmin=baseline_win[0], tmax=baseline_win[1], decim=20) csd_ers = csd_morlet(epochs, freqs, tmin=active_win[0], tmax=active_win[1], decim=20) filters = make_dics(epochs.info, fwd, csd.mean(), pick_ori='max-power', reduce_rank=True, real_filter=True) stc_base, freqs = apply_dics_csd(csd_baseline.mean(), filters) stc_act, freqs = apply_dics_csd(csd_ers.mean(), filters) stc_act /= stc_base return stc_act # generate lcmv source estimate def _gen_lcmv(active_cov, baseline_cov, common_cov): filters = make_lcmv(epochs.info, fwd, common_cov, reg=0.05, noise_cov=None, pick_ori='max-power') stc_base = apply_lcmv_cov(baseline_cov, filters) stc_act = apply_lcmv_cov(active_cov, filters) stc_act /= stc_base return stc_act # generate mne/dSPM source estimate def _gen_mne(active_cov, baseline_cov, common_cov, fwd, info, method='dSPM'): inverse_operator = make_inverse_operator(info, fwd, common_cov) stc_act = apply_inverse_cov(active_cov, info, inverse_operator, method=method, verbose=True) stc_base = apply_inverse_cov(baseline_cov, info, inverse_operator, method=method, verbose=True) stc_act /= stc_base return stc_act # Compute source estimates stc_dics = _gen_dics(active_win, baseline_win, epochs) stc_lcmv = _gen_lcmv(active_cov, baseline_cov, common_cov) stc_dspm = _gen_mne(active_cov, baseline_cov, common_cov, fwd, epochs.info) """ Explanation: Compute some source estimates Here we will use DICS, LCMV beamformer, and dSPM. See ex-inverse-source-power for more information about DICS. End of explanation """ brain_dics = stc_dics.plot( hemi='rh', subjects_dir=subjects_dir, subject=subject, time_label='DICS source power in the 12-30 Hz frequency band') """ Explanation: Plot source estimates DICS: End of explanation """ brain_lcmv = stc_lcmv.plot( hemi='rh', subjects_dir=subjects_dir, subject=subject, time_label='LCMV source power in the 12-30 Hz frequency band') """ Explanation: LCMV: End of explanation """ brain_dspm = stc_dspm.plot( hemi='rh', subjects_dir=subjects_dir, subject=subject, time_label='dSPM source power in the 12-30 Hz frequency band') """ Explanation: dSPM: End of explanation """
quoniammm/mine-tensorflow-examples
fastAI/deeplearning1/nbs/lesson4.ipynb
mit
ratings = pd.read_csv(path+'ratings.csv') ratings.head() len(ratings) """ Explanation: Set up data We're working with the movielens data, which contains one rating per row, like this: End of explanation """ movie_names = pd.read_csv(path+'movies.csv').set_index('movieId')['title'].to_dict() users = ratings.userId.unique() movies = ratings.movieId.unique() userid2idx = {o:i for i,o in enumerate(users)} movieid2idx = {o:i for i,o in enumerate(movies)} """ Explanation: Just for display purposes, let's read in the movie names too. End of explanation """ ratings.movieId = ratings.movieId.apply(lambda x: movieid2idx[x]) ratings.userId = ratings.userId.apply(lambda x: userid2idx[x]) user_min, user_max, movie_min, movie_max = (ratings.userId.min(), ratings.userId.max(), ratings.movieId.min(), ratings.movieId.max()) user_min, user_max, movie_min, movie_max n_users = ratings.userId.nunique() n_movies = ratings.movieId.nunique() n_users, n_movies """ Explanation: We update the movie and user ids so that they are contiguous integers, which we want when using embeddings. End of explanation """ n_factors = 50 np.random.seed = 42 """ Explanation: This is the number of latent factors in each embedding. End of explanation """ msk = np.random.rand(len(ratings)) < 0.8 trn = ratings[msk] val = ratings[~msk] """ Explanation: Randomly split into training and validation. End of explanation """ g=ratings.groupby('userId')['rating'].count() topUsers=g.sort_values(ascending=False)[:15] g=ratings.groupby('movieId')['rating'].count() topMovies=g.sort_values(ascending=False)[:15] top_r = ratings.join(topUsers, rsuffix='_r', how='inner', on='userId') top_r = top_r.join(topMovies, rsuffix='_r', how='inner', on='movieId') pd.crosstab(top_r.userId, top_r.movieId, top_r.rating, aggfunc=np.sum) """ Explanation: Create subset for Excel We create a crosstab of the most popular movies and most movie-addicted users which we'll copy into Excel for creating a simple example. This isn't necessary for any of the modeling below however. End of explanation """ user_in = Input(shape=(1,), dtype='int64', name='user_in') u = Embedding(n_users, n_factors, input_length=1, W_regularizer=l2(1e-4))(user_in) movie_in = Input(shape=(1,), dtype='int64', name='movie_in') m = Embedding(n_movies, n_factors, input_length=1, W_regularizer=l2(1e-4))(movie_in) x = merge([u, m], mode='dot') x = Flatten()(x) model = Model([user_in, movie_in], x) model.compile(Adam(0.001), loss='mse') model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=1, validation_data=([val.userId, val.movieId], val.rating)) model.optimizer.lr=0.01 model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=3, validation_data=([val.userId, val.movieId], val.rating)) model.optimizer.lr=0.001 model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=6, validation_data=([val.userId, val.movieId], val.rating)) """ Explanation: Dot product The most basic model is a dot product of a movie embedding and a user embedding. Let's see how well that works: End of explanation """ def embedding_input(name, n_in, n_out, reg): inp = Input(shape=(1,), dtype='int64', name=name) return inp, Embedding(n_in, n_out, input_length=1, W_regularizer=l2(reg))(inp) user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4) movie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4) def create_bias(inp, n_in): x = Embedding(n_in, 1, input_length=1)(inp) return Flatten()(x) ub = create_bias(user_in, n_users) mb = create_bias(movie_in, n_movies) x = merge([u, m], mode='dot') x = Flatten()(x) x = merge([x, ub], mode='sum') x = merge([x, mb], mode='sum') model = Model([user_in, movie_in], x) model.compile(Adam(0.001), loss='mse') model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=1, validation_data=([val.userId, val.movieId], val.rating)) model.optimizer.lr=0.01 model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=6, validation_data=([val.userId, val.movieId], val.rating)) model.optimizer.lr=0.001 model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=10, validation_data=([val.userId, val.movieId], val.rating)) model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=5, validation_data=([val.userId, val.movieId], val.rating)) """ Explanation: The best benchmarks are a bit over 0.9, so this model doesn't seem to be working that well... Bias The problem is likely to be that we don't have bias terms - that is, a single bias for each user and each movie representing how positive or negative each user is, and how good each movie is. We can add that easily by simply creating an embedding with one output for each movie and each user, and adding it to our output. End of explanation """ model.save_weights(model_path+'bias.h5') model.load_weights(model_path+'bias.h5') """ Explanation: This result is quite a bit better than the best benchmarks that we could find with a quick google search - so looks like a great approach! End of explanation """ model.predict([np.array([3]), np.array([6])]) """ Explanation: We can use the model to generate predictions by passing a pair of ints - a user id and a movie id. For instance, this predicts that user #3 would really enjoy movie #6. End of explanation """ g=ratings.groupby('movieId')['rating'].count() topMovies=g.sort_values(ascending=False)[:2000] topMovies = np.array(topMovies.index) """ Explanation: Analyze results To make the analysis of the factors more interesting, we'll restrict it to the top 2000 most popular movies. End of explanation """ get_movie_bias = Model(movie_in, mb) movie_bias = get_movie_bias.predict(topMovies) movie_ratings = [(b[0], movie_names[movies[i]]) for i,b in zip(topMovies,movie_bias)] """ Explanation: First, we'll look at the movie bias term. We create a 'model' - which in keras is simply a way of associating one or more inputs with one more more outputs, using the functional API. Here, our input is the movie id (a single id), and the output is the movie bias (a single float). End of explanation """ sorted(movie_ratings, key=itemgetter(0))[:15] sorted(movie_ratings, key=itemgetter(0), reverse=True)[:15] """ Explanation: Now we can look at the top and bottom rated movies. These ratings are corrected for different levels of reviewer sentiment, as well as different types of movies that different reviewers watch. End of explanation """ get_movie_emb = Model(movie_in, m) movie_emb = np.squeeze(get_movie_emb.predict([topMovies])) movie_emb.shape """ Explanation: We can now do the same thing for the embeddings. End of explanation """ from sklearn.decomposition import PCA pca = PCA(n_components=3) movie_pca = pca.fit(movie_emb.T).components_ fac0 = movie_pca[0] movie_comp = [(f, movie_names[movies[i]]) for f,i in zip(fac0, topMovies)] """ Explanation: Because it's hard to interpret 50 embeddings, we use PCA to simplify them down to just 3 vectors. End of explanation """ sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] fac1 = movie_pca[1] movie_comp = [(f, movie_names[movies[i]]) for f,i in zip(fac1, topMovies)] """ Explanation: Here's the 1st component. It seems to be 'critically acclaimed' or 'classic'. End of explanation """ sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] fac2 = movie_pca[2] movie_comp = [(f, movie_names[movies[i]]) for f,i in zip(fac2, topMovies)] """ Explanation: The 2nd is 'hollywood blockbuster'. End of explanation """ sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] """ Explanation: The 3rd is 'violent vs happy'. End of explanation """ import sys stdout, stderr = sys.stdout, sys.stderr # save notebook stdout and stderr reload(sys) sys.setdefaultencoding('utf-8') sys.stdout, sys.stderr = stdout, stderr # restore notebook stdout and stderr start=50; end=100 X = fac0[start:end] Y = fac2[start:end] plt.figure(figsize=(15,15)) plt.scatter(X, Y) for i, x, y in zip(topMovies[start:end], X, Y): plt.text(x,y,movie_names[movies[i]], color=np.random.rand(3)*0.7, fontsize=14) plt.show() """ Explanation: We can draw a picture to see how various movies appear on the map of these components. This picture shows the 1st and 3rd components. End of explanation """ user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4) movie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4) x = merge([u, m], mode='concat') x = Flatten()(x) x = Dropout(0.3)(x) x = Dense(70, activation='relu')(x) x = Dropout(0.75)(x) x = Dense(1)(x) nn = Model([user_in, movie_in], x) nn.compile(Adam(0.001), loss='mse') nn.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=8, validation_data=([val.userId, val.movieId], val.rating)) """ Explanation: Neural net Rather than creating a special purpose architecture (like our dot-product with bias earlier), it's often both easier and more accurate to use a standard neural network. Let's try it! Here, we simply concatenate the user and movie embeddings into a single vector, which we feed into the neural net. End of explanation """
nadvamir/deep-learning
autoencoder/Simple_Autoencoder.ipynb
mit
%matplotlib inline import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', validation_size=0) """ Explanation: A Simple Autoencoder We'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data. In this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset. End of explanation """ img = mnist.train.images[2] plt.imshow(img.reshape((28, 28)), cmap='Greys_r') """ Explanation: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits. End of explanation """ # Size of the encoding layer (the hidden layer) encoding_dim = 32 # feel free to change this value input_dim = output_dim = mnist.train.images.shape[1] inputs_ = tf.placeholder(tf.float32, (None, input_dim), name='inputs') targets_ = tf.placeholder(tf.float32, (None, output_dim), name='targets') # Output of hidden layer encoded = tf.layers.dense(inputs_, encoding_dim, activation_fn=tf.nn.relu) # Output layer logits logits = tf.layers.dense(inputs_, output_dim, activation_fn=None) # Sigmoid output from logits decoded = tf.nn.sigmoid(logits) # Sigmoid cross-entropy loss loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets_) # Mean of the loss cost = tf.reduce_mean(loss) # Adam optimizer opt = tf.train.AdamOptimizer(0.001).minimize(cost) """ Explanation: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input. Exercise: Build the graph for the autoencoder in the cell below. The input images will be flattened into 784 length vectors. The targets are the same as the inputs. And there should be one hidden layer with a ReLU activation and an output layer with a sigmoid activation. The loss should be calculated with the cross-entropy loss, there is a convenient TensorFlow function for this tf.nn.sigmoid_cross_entropy_with_logits (documentation). You should note that tf.nn.sigmoid_cross_entropy_with_logits takes the logits, but to get the reconstructed images you'll need to pass the logits through the sigmoid function. End of explanation """ # Create the session sess = tf.Session() """ Explanation: Training End of explanation """ epochs = 20 batch_size = 200 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) feed = {inputs_: batch[0], targets_: batch[0]} batch_cost, _ = sess.run([cost, opt], feed_dict=feed) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) """ Explanation: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss. Calling mnist.train.next_batch(batch_size) will return a tuple of (images, labels). We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with sess.run(tf.global_variables_initializer()). Then, run the optimizer and get the loss with batch_cost, _ = sess.run([cost, opt], feed_dict=feed). End of explanation """ fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs}) for images, row in zip([in_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) sess.close() """ Explanation: Checking out the results Below I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts. End of explanation """
google/earthengine-api
python/examples/ipynb/Earth_Engine_REST_API_compute_image.ipynb
apache-2.0
# INSERT YOUR PROJECT HERE PROJECT = 'your-project' !gcloud auth login --project {PROJECT} """ Explanation: <table class="ee-notebook-buttons" align="left"><td> <a target="_blank" href="http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/Earth_Engine_REST_API_compute_image.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td><td> <a target="_blank" href="https://github.com/google/earthengine-api/blob/master/python/examples/ipynb/Earth_Engine_REST_API_compute_image.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td></table> Image computations with the Earth Engine REST API Note: The REST API contains new and advanced features that may not be suitable for all users. If you are new to Earth Engine, please get started with the JavaScript guide. The Earth Engine REST API quickstart shows how to access blocks of pixels from an Earth Engine asset. Suppose you want to apply a computation to the pixels before obtaining the result. This guide shows how to prototype a computation with one of the client libraries, serialize the computation graph and use the REST API to obtain the computed result. Making compute requests through the REST API corresponds to a POST request to one of the compute endpoints, for example computePixels, computeFeatures, or the generic value.compute. Specifically, this example demonstrates getting a median composite of Sentinel-2 imagery in a small region. Before you begin Follow these instructions to: Apply for Earth Engine Create a Google Cloud project Enable the Earth Engine API on the project Create a service account Give the service account project level permission to perform Earth Engine computations Note: To complete this tutorial, you will need a service account that is registered for Earth Engine access. See these instructions to register a service account before proceeding. Authenticate to Google Cloud The first thing to do is login so that you can make authenticated requests to Google Cloud. You will set the project at the same time. Follow the instructions in the output to complete the sign in. End of explanation """ # INSERT YOUR SERVICE ACCOUNT HERE SERVICE_ACCOUNT='your-service-account@your-project.iam.gserviceaccount.com' KEY = 'key.json' !gcloud iam service-accounts keys create {KEY} --iam-account {SERVICE_ACCOUNT} """ Explanation: Obtain a private key file for your service account You should already have a service account registered to use Earth Engine. If you don't, follow these instructions to get one. Copy the email address of your service account into the following cell. (The service account must already be registered to use Earth Engine). In the following cell, the gsutil command line is used to generate a key file for the service account. The key file will be created on the notebook VM. End of explanation """ from google.auth.transport.requests import AuthorizedSession from google.oauth2 import service_account credentials = service_account.Credentials.from_service_account_file(KEY) scoped_credentials = credentials.with_scopes( ['https://www.googleapis.com/auth/cloud-platform']) session = AuthorizedSession(scoped_credentials) url = 'https://earthengine.googleapis.com/v1beta/projects/earthengine-public/assets/LANDSAT' response = session.get(url) from pprint import pprint import json pprint(json.loads(response.content)) """ Explanation: Start an AuthorizedSession and test your credentials Test the private key by using it to get credentials. Use the credentials to create an authorized session to make HTTP requests. Make a GET request through the session to check that the credentials work. End of explanation """ import ee # Get some new credentials since the other ones are cloud scope. ee_creds = ee.ServiceAccountCredentials(SERVICE_ACCOUNT, KEY) ee.Initialize(ee_creds) """ Explanation: Serialize a computation Before you can send a request to compute something, the computation needs to be put into the Earth Engine expression graph format. The following demonstrates how to obtain the expression graph. Authenticate to Earth Engine Get Earth Engine scoped credentials from the service account. Use them to initialize Earth Engine. End of explanation """ coords = [ -121.58626826832939, 38.059141484827485, ] region = ee.Geometry.Point(coords) collection = ee.ImageCollection('COPERNICUS/S2') collection = collection.filterBounds(region) collection = collection.filterDate('2020-04-01', '2020-09-01') image = collection.median() """ Explanation: Define a computation Prototype a simple computation with the client API. Note that the result of the computation is an Image. End of explanation """ serialized = ee.serializer.encode(image) """ Explanation: Serialize the expression graph This will create an object that represents the Earth Engine expression graph (specifically, an Expression). In general, you should build these with one of the client APIs. End of explanation """ # Make a projection to discover the scale in degrees. proj = ee.Projection('EPSG:4326').atScale(10).getInfo() # Get scales out of the transform. scale_x = proj['transform'][0] scale_y = -proj['transform'][4] """ Explanation: Create the desired projection (WGS84) at the desired scale (10 meters for Sentinel-2). This is just to discover the desired scale in degrees, the units of the projection. These scales will be used to specify the affine transform in the request. End of explanation """ import json url = 'https://earthengine.googleapis.com/v1beta/projects/{}/image:computePixels' url = url.format(PROJECT) response = session.post( url=url, data=json.dumps({ 'expression': serialized, 'fileFormat': 'PNG', 'bandIds': ['B4','B3','B2'], 'grid': { 'dimensions': { 'width': 640, 'height': 640 }, 'affineTransform': { 'scaleX': scale_x, 'shearX': 0, 'translateX': coords[0], 'shearY': 0, 'scaleY': scale_y, 'translateY': coords[1] }, 'crsCode': 'EPSG:4326', }, 'visualizationOptions': {'ranges': [{'min': 0, 'max': 3000}]}, }) ) image_content = response.content """ Explanation: Send the request Make a POST request to the computePixels endpoint. Note that the request contains the Expression, which is the serialized computation. It also contains a PixelGrid. The PixelGrid contains dimensions for the desired output and an AffineTransform in the units of the requested coordinate system. Here the coordinate system is geographic, so the transform is specified with scale in degrees and geographic coordinates of the upper left corner of the requested image patch. End of explanation """ # Import the Image function from the IPython.display module. from IPython.display import Image Image(image_content) """ Explanation: If you are running this in a notebook, you can display the results using the IPython image display widget. End of explanation """
zhuanxuhit/deep-learning
batch-norm/my_Batch_Normalization_Lesson.ipynb
mit
# Import necessary packages import tensorflow as tf import tqdm import numpy as np import matplotlib.pyplot as plt %matplotlib inline # Import MNIST data so we have something for our experiments from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) """ Explanation: Batch Normalization – Lesson What is it? What are it's benefits? How do we add it to a network? Let's see it work! What are you hiding? What is Batch Normalization?<a id='theory'></a> Batch normalization was introduced in Sergey Ioffe's and Christian Szegedy's 2015 paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. The idea is that, instead of just normalizing the inputs to the network, we normalize the inputs to layers within the network. It's called "batch" normalization because during training, we normalize each layer's inputs by using the mean and variance of the values in the current mini-batch. Why might this help? Well, we know that normalizing the inputs to a network helps the network learn. But a network is a series of layers, where the output of one layer becomes the input to another. That means we can think of any layer in a neural network as the first layer of a smaller network. For example, imagine a 3 layer network. Instead of just thinking of it as a single network with inputs, layers, and outputs, think of the output of layer 1 as the input to a two layer network. This two layer network would consist of layers 2 and 3 in our original network. Likewise, the output of layer 2 can be thought of as the input to a single layer network, consistng only of layer 3. When you think of it like that - as a series of neural networks feeding into each other - then it's easy to imagine how normalizing the inputs to each layer would help. It's just like normalizing the inputs to any other neural network, but you're doing it at every layer (sub-network). Beyond the intuitive reasons, there are good mathematical reasons why it helps the network learn better, too. It helps combat what the authors call internal covariate shift. This discussion is best handled in the paper and in Deep Learning a book you can read online written by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Specifically, check out the batch normalization section of Chapter 8: Optimization for Training Deep Models. Benefits of Batch Normalization<a id="benefits"></a> Batch normalization optimizes network training. It has been shown to have several benefits: 1. Networks train faster – Each training iteration will actually be slower because of the extra calculations during the forward pass and the additional hyperparameters to train during back propagation. However, it should converge much more quickly, so training should be faster overall. 2. Allows higher learning rates – Gradient descent usually requires small learning rates for the network to converge. And as networks get deeper, their gradients get smaller during back propagation so they require even more iterations. Using batch normalization allows us to use much higher learning rates, which further increases the speed at which networks train. 3. Makes weights easier to initialize – Weight initialization can be difficult, and it's even more difficult when creating deeper networks. Batch normalization seems to allow us to be much less careful about choosing our initial starting weights. 4. Makes more activation functions viable – Some activation functions do not work well in some situations. Sigmoids lose their gradient pretty quickly, which means they can't be used in deep networks. And ReLUs often die out during training, where they stop learning completely, so we need to be careful about the range of values fed into them. Because batch normalization regulates the values going into each activation function, non-linearlities that don't seem to work well in deep networks actually become viable again. 5. Simplifies the creation of deeper networks – Because of the first 4 items listed above, it is easier to build and faster to train deeper neural networks when using batch normalization. And it's been shown that deeper networks generally produce better results, so that's great. 6. Provides a bit of regularlization – Batch normalization adds a little noise to your network. In some cases, such as in Inception modules, batch normalization has been shown to work as well as dropout. But in general, consider batch normalization as a bit of extra regularization, possibly allowing you to reduce some of the dropout you might add to a network. 7. May give better results overall – Some tests seem to show batch normalization actually improves the train.ing results. However, it's really an optimization to help train faster, so you shouldn't think of it as a way to make your network better. But since it lets you train networks faster, that means you can iterate over more designs more quickly. It also lets you build deeper networks, which are usually better. So when you factor in everything, you're probably going to end up with better results if you build your networks with batch normalization. Batch Normalization in TensorFlow<a id="implementation_1"></a> This section of the notebook shows you one way to add batch normalization to a neural network built in TensorFlow. The following cell imports the packages we need in the notebook and loads the MNIST dataset to use in our experiments. However, the tensorflow package contains all the code you'll actually need for batch normalization. End of explanation """ class NeuralNet: def __init__(self, initial_weights, activation_fn, use_batch_norm): """ Initializes this object, creating a TensorFlow graph using the given parameters. :param initial_weights: list of NumPy arrays or Tensors Initial values for the weights for every layer in the network. We pass these in so we can create multiple networks with the same starting weights to eliminate training differences caused by random initialization differences. The number of items in the list defines the number of layers in the network, and the shapes of the items in the list define the number of nodes in each layer. e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would create a network with 784 inputs going into a hidden layer with 256 nodes, followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes. :param activation_fn: Callable The function used for the output of each hidden layer. The network will use the same activation function on every hidden layer and no activate function on the output layer. e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers. :param use_batch_norm: bool Pass True to create a network that uses batch normalization; False otherwise Note: this network will not use batch normalization on layers that do not have an activation function. """ # Keep track of whether or not this network uses batch normalization. self.use_batch_norm = use_batch_norm self.name = "With Batch Norm" if use_batch_norm else "Without Batch Norm" # Batch normalization needs to do different calculations during training and inference, # so we use this placeholder to tell the graph which behavior to use. self.is_training = tf.placeholder(tf.bool, name="is_training") # This list is just for keeping track of data we want to plot later. # It doesn't actually have anything to do with neural nets or batch normalization. self.training_accuracies = [] # Create the network graph, but it will not actually have any real values until after you # call train or test self.build_network(initial_weights, activation_fn) def build_network(self, initial_weights, activation_fn): """ Build the graph. The graph still needs to be trained via the `train` method. :param initial_weights: list of NumPy arrays or Tensors See __init__ for description. :param activation_fn: Callable See __init__ for description. """ self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]]) layer_in = self.input_layer for weights in initial_weights[:-1]: layer_in = self.fully_connected(layer_in, weights, activation_fn) self.output_layer = self.fully_connected(layer_in, initial_weights[-1]) def fully_connected(self, layer_in, initial_weights, activation_fn=None): """ Creates a standard, fully connected layer. Its number of inputs and outputs will be defined by the shape of `initial_weights`, and its starting weight values will be taken directly from that same parameter. If `self.use_batch_norm` is True, this layer will include batch normalization, otherwise it will not. :param layer_in: Tensor The Tensor that feeds into this layer. It's either the input to the network or the output of a previous layer. :param initial_weights: NumPy array or Tensor Initial values for this layer's weights. The shape defines the number of nodes in the layer. e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256 outputs. :param activation_fn: Callable or None (default None) The non-linearity used for the output of the layer. If None, this layer will not include batch normalization, regardless of the value of `self.use_batch_norm`. e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers. """ # Since this class supports both options, only use batch normalization when # requested. However, do not use it on the final layer, which we identify # by its lack of an activation function. if self.use_batch_norm and activation_fn: # Batch normalization uses weights as usual, but does NOT add a bias term. This is because # its calculations include gamma and beta variables that make the bias term unnecessary. # (See later in the notebook for more details.) weights = tf.Variable(initial_weights) linear_output = tf.matmul(layer_in, weights) # Apply batch normalization to the linear combination of the inputs and weights batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training) # Now apply the activation function, *after* the normalization. return activation_fn(batch_normalized_output) else: # When not using batch normalization, create a standard layer that multiplies # the inputs and weights, adds a bias, and optionally passes the result # through an activation function. weights = tf.Variable(initial_weights) biases = tf.Variable(tf.zeros([initial_weights.shape[-1]])) linear_output = tf.add(tf.matmul(layer_in, weights), biases) return linear_output if not activation_fn else activation_fn(linear_output) def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None): """ Trains the model on the MNIST training dataset. :param session: Session Used to run training graph operations. :param learning_rate: float Learning rate used during gradient descent. :param training_batches: int Number of batches to train. :param batches_per_sample: int How many batches to train before sampling the validation accuracy. :param save_model_as: string or None (default None) Name to use if you want to save the trained model. """ # This placeholder will store the target labels for each mini batch labels = tf.placeholder(tf.float32, [None, 10]) # Define loss and optimizer cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer)) # Define operations for testing correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) if self.use_batch_norm: # If we don't include the update ops as dependencies on the train step, the # tf.layers.batch_normalization layers won't update their population statistics, # which will cause the model to fail at inference time with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy) else: train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy) # Train for the appropriate number of batches. (tqdm is only for a nice timing display) for i in tqdm.tqdm(range(training_batches)): # We use batches of 60 just because the original paper did. You can use any size batch you like. batch_xs, batch_ys = mnist.train.next_batch(60) session.run(train_step, feed_dict={self.input_layer: batch_xs, labels: batch_ys, self.is_training: True}) # Periodically test accuracy against the 5k validation images and store it for plotting later. if i % batches_per_sample == 0: test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images, labels: mnist.validation.labels, self.is_training: False}) self.training_accuracies.append(test_accuracy) # After training, report accuracy against test data test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images, labels: mnist.validation.labels, self.is_training: False}) print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy)) # If you want to use this model later for inference instead of having to retrain it, # just construct it with the same parameters and then pass this file to the 'test' function if save_model_as: tf.train.Saver().save(session, save_model_as) def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None): """ Trains a trained model on the MNIST testing dataset. :param session: Session Used to run the testing graph operations. :param test_training_accuracy: bool (default False) If True, perform inference with batch normalization using batch mean and variance; if False, perform inference with batch normalization using estimated population mean and variance. Note: in real life, *always* perform inference using the population mean and variance. This parameter exists just to support demonstrating what happens if you don't. :param include_individual_predictions: bool (default True) This function always performs an accuracy test against the entire test set. But if this parameter is True, it performs an extra test, doing 200 predictions one at a time, and displays the results and accuracy. :param restore_from: string or None (default None) Name of a saved model if you want to test with previously saved weights. """ # This placeholder will store the true labels for each mini batch labels = tf.placeholder(tf.float32, [None, 10]) # Define operations for testing correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # If provided, restore from a previously saved model if restore_from: tf.train.Saver().restore(session, restore_from) # Test against all of the MNIST test data test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.test.images, labels: mnist.test.labels, self.is_training: test_training_accuracy}) print('-'*75) print('{}: Accuracy on full test set = {}'.format(self.name, test_accuracy)) # If requested, perform tests predicting individual values rather than batches if include_individual_predictions: predictions = [] correct = 0 # Do 200 predictions, 1 at a time for i in range(200): # This is a normal prediction using an individual test case. However, notice # we pass `test_training_accuracy` to `feed_dict` as the value for `self.is_training`. # Remember that will tell it whether it should use the batch mean & variance or # the population estimates that were calucated while training the model. pred, corr = session.run([tf.arg_max(self.output_layer,1), accuracy], feed_dict={self.input_layer: [mnist.test.images[i]], labels: [mnist.test.labels[i]], self.is_training: test_training_accuracy}) correct += corr predictions.append(pred[0]) print("200 Predictions:", predictions) print("Accuracy on 200 samples:", correct/200) """ Explanation: Neural network classes for testing The following class, NeuralNet, allows us to create identical neural networks with and without batch normalization. The code is heaviy documented, but there is also some additional discussion later. You do not need to read through it all before going through the rest of the notebook, but the comments within the code blocks may answer some of your questions. About the code: This class is not meant to represent TensorFlow best practices – the design choices made here are to support the discussion related to batch normalization. It's also important to note that we use the well-known MNIST data for these examples, but the networks we create are not meant to be good for performing handwritten character recognition. We chose this network architecture because it is similar to the one used in the original paper, which is complex enough to demonstrate some of the benefits of batch normalization while still being fast to train. End of explanation """ def plot_training_accuracies(*args, **kwargs): """ Displays a plot of the accuracies calculated during training to demonstrate how many iterations it took for the model(s) to converge. :param args: One or more NeuralNet objects You can supply any number of NeuralNet objects as unnamed arguments and this will display their training accuracies. Be sure to call `train` the NeuralNets before calling this function. :param kwargs: You can supply any named parameters here, but `batches_per_sample` is the only one we look for. It should match the `batches_per_sample` value you passed to the `train` function. """ fig, ax = plt.subplots() batches_per_sample = kwargs['batches_per_sample'] for nn in args: ax.plot(range(0,len(nn.training_accuracies)*batches_per_sample,batches_per_sample), nn.training_accuracies, label=nn.name) ax.set_xlabel('Training steps') ax.set_ylabel('Accuracy') ax.set_title('Validation Accuracy During Training') ax.legend(loc=4) ax.set_ylim([0,1]) plt.yticks(np.arange(0, 1.1, 0.1)) plt.grid(True) plt.show() def train_and_test(use_bad_weights, learning_rate, activation_fn, training_batches=50000, batches_per_sample=500): """ Creates two networks, one with and one without batch normalization, then trains them with identical starting weights, layers, batches, etc. Finally tests and plots their accuracies. :param use_bad_weights: bool If True, initialize the weights of both networks to wildly inappropriate weights; if False, use reasonable starting weights. :param learning_rate: float Learning rate used during gradient descent. :param activation_fn: Callable The function used for the output of each hidden layer. The network will use the same activation function on every hidden layer and no activate function on the output layer. e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers. :param training_batches: (default 50000) Number of batches to train. :param batches_per_sample: (default 500) How many batches to train before sampling the validation accuracy. """ # Use identical starting weights for each network to eliminate differences in # weight initialization as a cause for differences seen in training performance # # Note: The networks will use these weights to define the number of and shapes of # its layers. The original batch normalization paper used 3 hidden layers # with 100 nodes in each, followed by a 10 node output layer. These values # build such a network, but feel free to experiment with different choices. # However, the input size should always be 784 and the final output should be 10. if use_bad_weights: # These weights should be horrible because they have such a large standard deviation weights = [np.random.normal(size=(784,100), scale=5.0).astype(np.float32), np.random.normal(size=(100,100), scale=5.0).astype(np.float32), np.random.normal(size=(100,100), scale=5.0).astype(np.float32), np.random.normal(size=(100,10), scale=5.0).astype(np.float32) ] else: # These weights should be good because they have such a small standard deviation weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,10), scale=0.05).astype(np.float32) ] # Just to make sure the TensorFlow's default graph is empty before we start another # test, because we don't bother using different graphs or scoping and naming # elements carefully in this sample code. tf.reset_default_graph() # build two versions of same network, 1 without and 1 with batch normalization nn = NeuralNet(weights, activation_fn, False) bn = NeuralNet(weights, activation_fn, True) # train and test the two models with tf.Session() as sess: tf.global_variables_initializer().run() nn.train(sess, learning_rate, training_batches, batches_per_sample) bn.train(sess, learning_rate, training_batches, batches_per_sample) nn.test(sess) bn.test(sess) # Display a graph of how validation accuracies changed during training # so we can compare how the models trained and when they converged plot_training_accuracies(nn, bn, batches_per_sample=batches_per_sample) """ Explanation: There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines. We add batch normalization to layers inside the fully_connected function. Here are some important points about that code: 1. Layers with batch normalization do not include a bias term. 2. We use TensorFlow's tf.layers.batch_normalization function to handle the math. (We show lower-level ways to do this later in the notebook.) 3. We tell tf.layers.batch_normalization whether or not the network is training. This is an important step we'll talk about later. 4. We add the normalization before calling the activation function. In addition to that code, the training step is wrapped in the following with statement: python with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): This line actually works in conjunction with the training parameter we pass to tf.layers.batch_normalization. Without it, TensorFlow's batch normalization layer will not operate correctly during inference. Finally, whenever we train the network or perform inference, we use the feed_dict to set self.is_training to True or False, respectively, like in the following line: python session.run(train_step, feed_dict={self.input_layer: batch_xs, labels: batch_ys, self.is_training: True}) We'll go into more details later, but next we want to show some experiments that use this code and test networks with and without batch normalization. Batch Normalization Demos<a id='demos'></a> This section of the notebook trains various networks with and without batch normalization to demonstrate some of the benefits mentioned earlier. We'd like to thank the author of this blog post Implementing Batch Normalization in TensorFlow. That post provided the idea of - and some of the code for - plotting the differences in accuracy during training, along with the idea for comparing multiple networks using the same initial weights. Code to support testing The following two functions support the demos we run in the notebook. The first function, plot_training_accuracies, simply plots the values found in the training_accuracies lists of the NeuralNet objects passed to it. If you look at the train function in NeuralNet, you'll see it that while it's training the network, it periodically measures validation accuracy and stores the results in that list. It does that just to support these plots. The second function, train_and_test, creates two neural nets - one with and one without batch normalization. It then trains them both and tests them, calling plot_training_accuracies to plot how their accuracies changed over the course of training. The really imporant thing about this function is that it initializes the starting weights for the networks outside of the networks and then passes them in. This lets it train both networks from the exact same starting weights, which eliminates performance differences that might result from (un)lucky initial weights. End of explanation """ train_and_test(False, 0.01, tf.nn.relu) """ Explanation: Comparisons between identical networks, with and without batch normalization The next series of cells train networks with various settings to show the differences with and without batch normalization. They are meant to clearly demonstrate the effects of batch normalization. We include a deeper discussion of batch normalization later in the notebook. The following creates two networks using a ReLU activation function, a learning rate of 0.01, and reasonable starting weights. End of explanation """ train_and_test(False, 0.01, tf.nn.relu, 2000, 50) """ Explanation: As expected, both networks train well and eventually reach similar test accuracies. However, notice that the model with batch normalization converges slightly faster than the other network, reaching accuracies over 90% almost immediately and nearing its max acuracy in 10 or 15 thousand iterations. The other network takes about 3 thousand iterations to reach 90% and doesn't near its best accuracy until 30 thousand or more iterations. If you look at the raw speed, you can see that without batch normalization we were computing over 1100 batches per second, whereas with batch normalization that goes down to just over 500. However, batch normalization allows us to perform fewer iterations and converge in less time over all. (We only trained for 50 thousand batches here so we could plot the comparison.) The following creates two networks with the same hyperparameters used in the previous example, but only trains for 2000 iterations. End of explanation """ train_and_test(False, 0.01, tf.nn.sigmoid) """ Explanation: As you can see, using batch normalization produces a model with over 95% accuracy in only 2000 batches, and it was above 90% at somewhere around 500 batches. Without batch normalization, the model takes 1750 iterations just to hit 80% – the network with batch normalization hits that mark after around 200 iterations! (Note: if you run the code yourself, you'll see slightly different results each time because the starting weights - while the same for each model - are different for each run.) In the above example, you should also notice that the networks trained fewer batches per second then what you saw in the previous example. That's because much of the time we're tracking is actually spent periodically performing inference to collect data for the plots. In this example we perform that inference every 50 batches instead of every 500, so generating the plot for this example requires 10 times the overhead for the same 2000 iterations. The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and reasonable starting weights. End of explanation """ train_and_test(False, 1, tf.nn.relu) """ Explanation: With the number of layers we're using and this small learning rate, using a sigmoid activation function takes a long time to start learning. It eventually starts making progress, but it took over 45 thousand batches just to get over 80% accuracy. Using batch normalization gets to 90% in around one thousand batches. The following creates two networks using a ReLU activation function, a learning rate of 1, and reasonable starting weights. End of explanation """ train_and_test(False, 1, tf.nn.relu) """ Explanation: Now we're using ReLUs again, but with a larger learning rate. The plot shows how training started out pretty normally, with the network with batch normalization starting out faster than the other. But the higher learning rate bounces the accuracy around a bit more, and at some point the accuracy in the network without batch normalization just completely crashes. It's likely that too many ReLUs died off at this point because of the high learning rate. The next cell shows the same test again. The network with batch normalization performs the same way, and the other suffers from the same problem again, but it manages to train longer before it happens. End of explanation """ train_and_test(False, 1, tf.nn.sigmoid) """ Explanation: In both of the previous examples, the network with batch normalization manages to gets over 98% accuracy, and get near that result almost immediately. The higher learning rate allows the network to train extremely fast. The following creates two networks using a sigmoid activation function, a learning rate of 1, and reasonable starting weights. End of explanation """ train_and_test(False, 1, tf.nn.sigmoid, 2000, 50) """ Explanation: In this example, we switched to a sigmoid activation function. It appears to hande the higher learning rate well, with both networks achieving high accuracy. The cell below shows a similar pair of networks trained for only 2000 iterations. End of explanation """ train_and_test(False, 2, tf.nn.relu) """ Explanation: As you can see, even though these parameters work well for both networks, the one with batch normalization gets over 90% in 400 or so batches, whereas the other takes over 1700. When training larger networks, these sorts of differences become more pronounced. The following creates two networks using a ReLU activation function, a learning rate of 2, and reasonable starting weights. End of explanation """ train_and_test(False, 2, tf.nn.sigmoid) """ Explanation: With this very large learning rate, the network with batch normalization trains fine and almost immediately manages 98% accuracy. However, the network without normalization doesn't learn at all. The following creates two networks using a sigmoid activation function, a learning rate of 2, and reasonable starting weights. End of explanation """ train_and_test(False, 2, tf.nn.sigmoid, 2000, 50) """ Explanation: Once again, using a sigmoid activation function with the larger learning rate works well both with and without batch normalization. However, look at the plot below where we train models with the same parameters but only 2000 iterations. As usual, batch normalization lets it train faster. End of explanation """ train_and_test(True, 0.01, tf.nn.relu) """ Explanation: In the rest of the examples, we use really bad starting weights. That is, normally we would use very small values close to zero. However, in these examples we choose randome values with a standard deviation of 5. If you were really training a neural network, you would not want to do this. But these examples demonstrate how batch normalization makes your network much more resilient. The following creates two networks using a ReLU activation function, a learning rate of 0.01, and bad starting weights. End of explanation """ train_and_test(True, 0.01, tf.nn.sigmoid) """ Explanation: As the plot shows, without batch normalization the network never learns anything at all. But with batch normalization, it actually learns pretty well and gets to almost 80% accuracy. The starting weights obviously hurt the network, but you can see how well batch normalization does in overcoming them. The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and bad starting weights. End of explanation """ train_and_test(True, 1, tf.nn.relu) """ Explanation: Using a sigmoid activation function works better than the ReLU in the previous example, but without batch normalization it would take a tremendously long time to train the network, if it ever trained at all. The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.<a id="successful_example_lr_1"></a> End of explanation """ train_and_test(True, 1, tf.nn.sigmoid) """ Explanation: The higher learning rate used here allows the network with batch normalization to surpass 90% in about 30 thousand batches. The network without it never gets anywhere. The following creates two networks using a sigmoid activation function, a learning rate of 1, and bad starting weights. End of explanation """ train_and_test(True, 2, tf.nn.relu) """ Explanation: Using sigmoid works better than ReLUs for this higher learning rate. However, you can see that without batch normalization, the network takes a long time tro train, bounces around a lot, and spends a long time stuck at 90%. The network with batch normalization trains much more quickly, seems to be more stable, and achieves a higher accuracy. The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.<a id="successful_example_lr_2"></a> End of explanation """ train_and_test(True, 2, tf.nn.sigmoid) """ Explanation: We've already seen that ReLUs do not do as well as sigmoids with higher learning rates, and here we are using an extremely high rate. As expected, without batch normalization the network doesn't learn at all. But with batch normalization, it eventually achieves 90% accuracy. Notice, though, how its accuracy bounces around wildly during training - that's because the learning rate is really much too high, so the fact that this worked at all is a bit of luck. The following creates two networks using a sigmoid activation function, a learning rate of 2, and bad starting weights. End of explanation """ train_and_test(True, 1, tf.nn.relu) """ Explanation: In this case, the network with batch normalization trained faster and reached a higher accuracy. Meanwhile, the high learning rate makes the network without normalization bounce around erratically and have trouble getting past 90%. Full Disclosure: Batch Normalization Doesn't Fix Everything Batch normalization isn't magic and it doesn't work every time. Weights are still randomly initialized and batches are chosen at random during training, so you never know exactly how training will go. Even for these tests, where we use the same initial weights for both networks, we still get different weights each time we run. This section includes two examples that show runs when batch normalization did not help at all. The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights. End of explanation """ train_and_test(True, 2, tf.nn.relu) """ Explanation: When we used these same parameters earlier, we saw the network with batch normalization reach 92% validation accuracy. This time we used different starting weights, initialized using the same standard deviation as before, and the network doesn't learn at all. (Remember, an accuracy around 10% is what the network gets if it just guesses the same value all the time.) The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights. End of explanation """ def fully_connected(self, layer_in, initial_weights, activation_fn=None): """ Creates a standard, fully connected layer. Its number of inputs and outputs will be defined by the shape of `initial_weights`, and its starting weight values will be taken directly from that same parameter. If `self.use_batch_norm` is True, this layer will include batch normalization, otherwise it will not. :param layer_in: Tensor The Tensor that feeds into this layer. It's either the input to the network or the output of a previous layer. :param initial_weights: NumPy array or Tensor Initial values for this layer's weights. The shape defines the number of nodes in the layer. e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256 outputs. :param activation_fn: Callable or None (default None) The non-linearity used for the output of the layer. If None, this layer will not include batch normalization, regardless of the value of `self.use_batch_norm`. e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers. """ if self.use_batch_norm and activation_fn: # Batch normalization uses weights as usual, but does NOT add a bias term. This is because # its calculations include gamma and beta variables that make the bias term unnecessary. weights = tf.Variable(initial_weights) linear_output = tf.matmul(layer_in, weights) num_out_nodes = initial_weights.shape[-1] # Batch normalization adds additional trainable variables: # gamma (for scaling) and beta (for shifting). gamma = tf.Variable(tf.ones([num_out_nodes])) beta = tf.Variable(tf.zeros([num_out_nodes])) # These variables will store the mean and variance for this layer over the entire training set, # which we assume represents the general population distribution. # By setting `trainable=False`, we tell TensorFlow not to modify these variables during # back propagation. Instead, we will assign values to these variables ourselves. pop_mean = tf.Variable(tf.zeros([num_out_nodes]), trainable=False) pop_variance = tf.Variable(tf.ones([num_out_nodes]), trainable=False) # Batch normalization requires a small constant epsilon, used to ensure we don't divide by zero. # This is the default value TensorFlow uses. epsilon = 1e-3 def batch_norm_training(): # Calculate the mean and variance for the data coming out of this layer's linear-combination step. # The [0] defines an array of axes to calculate over. batch_mean, batch_variance = tf.nn.moments(linear_output, [0]) # Calculate a moving average of the training data's mean and variance while training. # These will be used during inference. # Decay should be some number less than 1. tf.layers.batch_normalization uses the parameter # "momentum" to accomplish this and defaults it to 0.99 decay = 0.99 train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay)) train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay)) # The 'tf.control_dependencies' context tells TensorFlow it must calculate 'train_mean' # and 'train_variance' before it calculates the 'tf.nn.batch_normalization' layer. # This is necessary because the those two operations are not actually in the graph # connecting the linear_output and batch_normalization layers, # so TensorFlow would otherwise just skip them. with tf.control_dependencies([train_mean, train_variance]): return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon) def batch_norm_inference(): # During inference, use the our estimated population mean and variance to normalize the layer return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon) # Use `tf.cond` as a sort of if-check. When self.is_training is True, TensorFlow will execute # the operation returned from `batch_norm_training`; otherwise it will execute the graph # operation returned from `batch_norm_inference`. batch_normalized_output = tf.cond(self.is_training, batch_norm_training, batch_norm_inference) # Pass the batch-normalized layer output through the activation function. # The literature states there may be cases where you want to perform the batch normalization *after* # the activation function, but it is difficult to find any uses of that in practice. return activation_fn(batch_normalized_output) else: # When not using batch normalization, create a standard layer that multiplies # the inputs and weights, adds a bias, and optionally passes the result # through an activation function. weights = tf.Variable(initial_weights) biases = tf.Variable(tf.zeros([initial_weights.shape[-1]])) linear_output = tf.add(tf.matmul(layer_in, weights), biases) return linear_output if not activation_fn else activation_fn(linear_output) """ Explanation: When we trained with these parameters and batch normalization earlier, we reached 90% validation accuracy. However, this time the network almost starts to make some progress in the beginning, but it quickly breaks down and stops learning. Note: Both of the above examples use extremely bad starting weights, along with learning rates that are too high. While we've shown batch normalization can overcome bad values, we don't mean to encourage actually using them. The examples in this notebook are meant to show that batch normalization can help your networks train better. But these last two examples should remind you that you still want to try to use good network design choices and reasonable starting weights. It should also remind you that the results of each attempt to train a network are a bit random, even when using otherwise identical architectures. Batch Normalization: A Detailed Look<a id='implementation_2'></a> The layer created by tf.layers.batch_normalization handles all the details of implementing batch normalization. Many students will be fine just using that and won't care about what's happening at the lower levels. However, some students may want to explore the details, so here is a short explanation of what's really happening, starting with the equations you're likely to come across if you ever read about batch normalization. In order to normalize the values, we first need to find the average value for the batch. If you look at the code, you can see that this is not the average value of the batch inputs, but the average value coming out of any particular layer before we pass it through its non-linear activation function and then feed it as an input to the next layer. We represent the average as $\mu_B$, which is simply the sum of all of the values $x_i$ divided by the number of values, $m$ $$ \mu_B \leftarrow \frac{1}{m}\sum_{i=1}^m x_i $$ We then need to calculate the variance, or mean squared deviation, represented as $\sigma_{B}^{2}$. If you aren't familiar with statistics, that simply means for each value $x_i$, we subtract the average value (calculated earlier as $\mu_B$), which gives us what's called the "deviation" for that value. We square the result to get the squared deviation. Sum up the results of doing that for each of the values, then divide by the number of values, again $m$, to get the average, or mean, squared deviation. $$ \sigma_{B}^{2} \leftarrow \frac{1}{m}\sum_{i=1}^m (x_i - \mu_B)^2 $$ Once we have the mean and variance, we can use them to normalize the values with the following equation. For each value, it subtracts the mean and divides by the (almost) standard deviation. (You've probably heard of standard deviation many times, but if you have not studied statistics you might not know that the standard deviation is actually the square root of the mean squared deviation.) $$ \hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}} $$ Above, we said "(almost) standard deviation". That's because the real standard deviation for the batch is calculated by $\sqrt{\sigma_{B}^{2}}$, but the above formula adds the term epsilon, $\epsilon$, before taking the square root. The epsilon can be any small, positive constant - in our code we use the value 0.001. It is there partially to make sure we don't try to divide by zero, but it also acts to increase the variance slightly for each batch. Why increase the variance? Statistically, this makes sense because even though we are normalizing one batch at a time, we are also trying to estimate the population distribution – the total training set, which itself an estimate of the larger population of inputs your network wants to handle. The variance of a population is higher than the variance for any sample taken from that population, so increasing the variance a little bit for each batch helps take that into account. At this point, we have a normalized value, represented as $\hat{x_i}$. But rather than use it directly, we multiply it by a gamma value, $\gamma$, and then add a beta value, $\beta$. Both $\gamma$ and $\beta$ are learnable parameters of the network and serve to scale and shift the normalized value, respectively. Because they are learnable just like weights, they give your network some extra knobs to tweak during training to help it learn the function it is trying to approximate. $$ y_i \leftarrow \gamma \hat{x_i} + \beta $$ We now have the final batch-normalized output of our layer, which we would then pass to a non-linear activation function like sigmoid, tanh, ReLU, Leaky ReLU, etc. In the original batch normalization paper (linked in the beginning of this notebook), they mention that there might be cases when you'd want to perform the batch normalization after the non-linearity instead of before, but it is difficult to find any uses like that in practice. In NeuralNet's implementation of fully_connected, all of this math is hidden inside the following line, where linear_output serves as the $x_i$ from the equations: python batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training) The next section shows you how to implement the math directly. Batch normalization without the tf.layers package Our implementation of batch normalization in NeuralNet uses the high-level abstraction tf.layers.batch_normalization, found in TensorFlow's tf.layers package. However, if you would like to implement batch normalization at a lower level, the following code shows you how. It uses tf.nn.batch_normalization from TensorFlow's neural net (nn) package. 1) You can replace the fully_connected function in the NeuralNet class with the below code and everything in NeuralNet will still work like it did before. End of explanation """ def batch_norm_test(test_training_accuracy): """ :param test_training_accuracy: bool If True, perform inference with batch normalization using batch mean and variance; if False, perform inference with batch normalization using estimated population mean and variance. """ weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,10), scale=0.05).astype(np.float32) ] tf.reset_default_graph() # Train the model bn = NeuralNet(weights, tf.nn.relu, True) # First train the network with tf.Session() as sess: tf.global_variables_initializer().run() bn.train(sess, 0.01, 2000, 2000) bn.test(sess, test_training_accuracy=test_training_accuracy, include_individual_predictions=True) """ Explanation: This version of fully_connected is much longer than the original, but once again has extensive comments to help you understand it. Here are some important points: It explicitly creates variables to store gamma, beta, and the population mean and variance. These were all handled for us in the previous version of the function. It initializes gamma to one and beta to zero, so they start out having no effect in this calculation: $y_i \leftarrow \gamma \hat{x_i} + \beta$. However, during training the network learns the best values for these variables using back propagation, just like networks normally do with weights. Unlike gamma and beta, the variables for population mean and variance are marked as untrainable. That tells TensorFlow not to modify them during back propagation. Instead, the lines that call tf.assign are used to update these variables directly. TensorFlow won't automatically run the tf.assign operations during training because it only evaluates operations that are required based on the connections it finds in the graph. To get around that, we add this line: with tf.control_dependencies([train_mean, train_variance]): before we run the normalization operation. This tells TensorFlow it needs to run those operations before running anything inside the with block. The actual normalization math is still mostly hidden from us, this time using tf.nn.batch_normalization. tf.nn.batch_normalization does not have a training parameter like tf.layers.batch_normalization did. However, we still need to handle training and inference differently, so we run different code in each case using the tf.cond operation. We use the tf.nn.moments function to calculate the batch mean and variance. 2) The current version of the train function in NeuralNet will work fine with this new version of fully_connected. However, it uses these lines to ensure population statistics are updated when using batch normalization: python if self.use_batch_norm: with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy) else: train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy) Our new version of fully_connected handles updating the population statistics directly. That means you can also simplify your code by replacing the above if/else condition with just this line: python train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy) 3) And just in case you want to implement every detail from scratch, you can replace this line in batch_norm_training: python return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon) with these lines: python normalized_linear_output = (linear_output - batch_mean) / tf.sqrt(batch_variance + epsilon) return gamma * normalized_linear_output + beta And replace this line in batch_norm_inference: python return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon) with these lines: python normalized_linear_output = (linear_output - pop_mean) / tf.sqrt(pop_variance + epsilon) return gamma * normalized_linear_output + beta As you can see in each of the above substitutions, the two lines of replacement code simply implement the following two equations directly. The first line calculates the following equation, with linear_output representing $x_i$ and normalized_linear_output representing $\hat{x_i}$: $$ \hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}} $$ And the second line is a direct translation of the following equation: $$ y_i \leftarrow \gamma \hat{x_i} + \beta $$ We still use the tf.nn.moments operation to implement the other two equations from earlier – the ones that calculate the batch mean and variance used in the normalization step. If you really wanted to do everything from scratch, you could replace that line, too, but we'll leave that to you. Why the difference between training and inference? In the original function that uses tf.layers.batch_normalization, we tell the layer whether or not the network is training by passing a value for its training parameter, like so: python batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training) And that forces us to provide a value for self.is_training in our feed_dict, like we do in this example from NeuralNet's train function: python session.run(train_step, feed_dict={self.input_layer: batch_xs, labels: batch_ys, self.is_training: True}) If you looked at the low level implementation, you probably noticed that, just like with tf.layers.batch_normalization, we need to do slightly different things during training and inference. But why is that? First, let's look at what happens when we don't. The following function is similar to train_and_test from earlier, but this time we are only testing one network and instead of plotting its accuracy, we perform 200 predictions on test inputs, 1 input at at time. We can use the test_training_accuracy parameter to test the network in training or inference modes (the equivalent of passing True or False to the feed_dict for is_training). End of explanation """ batch_norm_test(True) """ Explanation: In the following cell, we pass True for test_training_accuracy, which performs the same batch normalization that we normally perform during training. End of explanation """ batch_norm_test(False) """ Explanation: As you can see, the network guessed the same value every time! But why? Because during training, a network with batch normalization adjusts the values at each layer based on the mean and variance of that batch. The "batches" we are using for these predictions have a single input each time, so their values are the means, and their variances will always be 0. That means the network will normalize the values at any layer to zero. (Review the equations from before to see why a value that is equal to the mean would always normalize to zero.) So we end up with the same result for every input we give the network, because its the value the network produces when it applies its learned weights to zeros at every layer. Note: If you re-run that cell, you might get a different value from what we showed. That's because the specific weights the network learns will be different every time. But whatever value it is, it should be the same for all 200 predictions. To overcome this problem, the network does not just normalize the batch at each layer. It also maintains an estimate of the mean and variance for the entire population. So when we perform inference, instead of letting it "normalize" all the values using their own means and variance, it uses the estimates of the population mean and variance that it calculated while training. So in the following example, we pass False for test_training_accuracy, which tells the network that we it want to perform inference with the population statistics it calculates during training. End of explanation """
wso2/product-apim
modules/recommendation-engine/repository/resources/Word2vec_Model/Build_Word2vec_model.ipynb
apache-2.0
model = gensim.models.Word2Vec (dataset, size=300, window=10, min_count=5, workers=10) model.train(dataset,total_examples=len(dataset),epochs=15) """ Explanation: The 'Dataset.txt' file consists of API descriptions of over 15,000 APIs. Using the 'Dataset_PW.txt' file, a dataset which consists of sentences, is created. End of explanation """ model.save("word2vec_model.model") """ Explanation: Using gensim, a word2vec model is built and trained using the above dataset. End of explanation """
simonsfoundation/CaImAn
demos/notebooks/demo_VST.ipynb
gpl-2.0
get_ipython().magic('load_ext autoreload') get_ipython().magic('autoreload 2') import logging import matplotlib.pyplot as plt import numpy as np import os import timeit logging.basicConfig(format= "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", # filename="/tmp/caiman.log", level=logging.DEBUG) import caiman.external.houghvst.estimation as est from caiman.external.houghvst.gat import compute_gat, compute_inverse_gat import caiman as cm from caiman.paths import caiman_datadir """ Explanation: Estimating noise and stabilizing variance in calcium imaging data The purpose of this notebook is to show how to use the VST transform for estimating the noise profile of calcium imaging data and apply a generalized Anscombe transform that aims to transform the noise into white Gaussian. End of explanation """ def main(): fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] movie = cm.load(fnames) movie = movie.astype(np.float) # makes estimation numerically better: movie -= movie.mean() # use one every 200 frames temporal_stride = 100 # use one every 8 patches (patches are 8x8 by default) spatial_stride = 6 movie_train = movie[::temporal_stride] t = timeit.default_timer() estimation_res = est.estimate_vst_movie(movie_train, stride=spatial_stride) print('\tTime', timeit.default_timer() - t) alpha = estimation_res.alpha sigma_sq = estimation_res.sigma_sq movie_gat = compute_gat(movie, sigma_sq, alpha=alpha) # save movie_gat here movie_gat_inv = compute_inverse_gat(movie_gat, sigma_sq, alpha=alpha, method='asym') # save movie_gat_inv here return movie, movie_gat, movie_gat_inv movie, movie_gat, movie_gat_inv = main() """ Explanation: Below is a function that will compute and apply the transformation and its inverse. The underlying noise model is scaled Poisson plus Gaussian, i.e., the underlying fluorescence value $x$ is related to the observed value $y$ by the equation $$y = \alpha*\text{Poisson}(x) + \varepsilon$$ where $\alpha$ is non-negative scalar, and $\varepsilon \sim \mathcal{N}(\mu,\sigma^2)$ is distributed according to a Gaussian distribution. End of explanation """ movie_gat.play(magnification=4, q_max=99.8) """ Explanation: The transformed movie should have more uniform dynamic range (press q to exit): End of explanation """ CI = movie.local_correlations(swap_dim=False) CI_gat = movie_gat.local_correlations(swap_dim=False) plt.figure(figsize=(15,5)) plt.subplot(1,2,1); plt.imshow(CI); plt.colorbar(); plt.title('Correlation Image (original)') plt.subplot(1,2,2); plt.imshow(CI_gat); plt.colorbar(); plt.title('Correlation Image (transformed)') """ Explanation: The movie might appear more noisy but information is preserved as seen from the correlation image: End of explanation """ sn = cm.source_extraction.cnmf.pre_processing.get_noise_fft(movie.transpose(1,2,0), noise_method='mean')[0] sn_gat = cm.source_extraction.cnmf.pre_processing.get_noise_fft(movie_gat.transpose(1,2,0), noise_method='mean')[0] # sn = np.std(movie.transpose(1,2,0), axis=-1) # sn_gat = np.std(movie_gat.transpose(1,2,0), axis=-1) plt.figure(figsize=(15,5)) plt.subplot(1,2,1); plt.imshow(sn); plt.colorbar(); plt.title('Noise standard deviation (original)') plt.subplot(1,2,2); plt.imshow(sn_gat); plt.colorbar(); plt.title('Noise standard deviation (transformed)') """ Explanation: The noise estimates in space should also be more uniform End of explanation """ cm.concatenate([movie,movie_gat_inv],axis=2).play(magnification=5, q_max=99.5) """ Explanation: If we apply the inverse transform we approximately get back the original movie (press q to exit): End of explanation """
kevntao/ThinkStats2
code/chap03ex.ipynb
gpl-3.0
kids = resp['numkdhh'] kids """ Explanation: Make a PMF of <tt>numkdhh</tt>, the number of children under 18 in the respondent's household. End of explanation """ pmf = thinkstats2.Pmf(kids) thinkplot.Pmf(pmf, label='PMF') thinkplot.Show(xlabel='# of Children', ylabel='PMF') """ Explanation: Display the PMF. End of explanation """ def BiasPmf(pmf, label=''): """Returns the Pmf with oversampling proportional to value. If pmf is the distribution of true values, the result is the distribution that would be seen if values are oversampled in proportion to their values; for example, if you ask students how big their classes are, large classes are oversampled in proportion to their size. Args: pmf: Pmf object. label: string label for the new Pmf. Returns: Pmf object """ new_pmf = pmf.Copy(label=label) for x, p in pmf.Items(): new_pmf.Mult(x, x) new_pmf.Normalize() return new_pmf """ Explanation: Define <tt>BiasPmf</tt>. End of explanation """ biasedpmf = BiasPmf(pmf, label='BiasPMF') """ Explanation: Make a the biased Pmf of children in the household, as observed if you surveyed the children instead of the respondents. End of explanation """ thinkplot.PrePlot(2) thinkplot.Pmfs([pmf,biasedpmf]) thinkplot.Show(xlabel='# of Children', ylabel='PMF') """ Explanation: Display the actual Pmf and the biased Pmf on the same axes. End of explanation """ pmf.Mean() biasedpmf.Mean() """ Explanation: Compute the means of the two Pmfs. End of explanation """
ultiyuan/test0
lessons/VortexPanelMethod.ipynb
gpl-2.0
import numpy # velocity component functions def get_u( x, y, S, gamma ): return gamma/(2*numpy.pi)*(numpy.arctan((x-S)/y)-numpy.arctan((x+S)/y)) def get_v( x, y, S, gamma ): return gamma/(4*numpy.pi)*(numpy.log(((x+S)**2+y**2)/((x-S)**2+y**2))) # vortex panel class class Panel: # save the inputs and pre-compute factors for the coordinate tranform def __init__( self, x0, y0, x1, y1, gamma=0 ): self.x,self.y,self.gamma = [x0,x1],[y0,y1],gamma self.xc = 0.5*(x0+x1) # panel x-center self.yc = 0.5*(y0+y1) # panel y-center self.S = numpy.sqrt( # ... (x1-self.xc)**2+(y1-self.yc)**2) # panel width self.sx = (x1-self.xc)/self.S # unit vector in x self.sy = (y1-self.yc)/self.S # unit vector in y # get the velocity! def velocity( self, x, y, gamma=None ): if gamma is None: gamma = self.gamma # default gamma xp,yp = self.transform_xy( x, y ) # transform up = get_u( xp, yp, self.S, gamma ) # get u prime vp = get_v( xp, yp, self.S, gamma ) # get v prime return self.rotate_uv( up, vp ) # rotate back # plot the panel def plot(self): return pyplot.plot(self.x,self.y,'k-',lw=2) # transform from global to panel coordinates def transform_xy( self, x, y ): xt = x-self.xc # shift x yt = y-self.yc # shift y xp = xt*self.sx+yt*self.sy # rotate x yp = yt*self.sx-xt*self.sy # rotate y return [ xp, yp ] # rotate velocity back to global coordinates def rotate_uv( self, up, vp): u = up*self.sx-vp*self.sy # reverse rotate u prime v = vp*self.sx+up*self.sy # reverse rotate v prime return [ u, v ] """ Explanation: Vortex panel method The previous notebook introduced the concept of vortex sheets. We will now extend this concept to solve to the flow around general objects. General vortex sheets A curved vortex sheet with a variable strength can describe the flow around any immersed object. This is achieved by having the sheet act as an infinitely thin version of the boundary layer to enforce the no-slip boundary condition. In other words we use the sheets to force the tangential velocity $u_s$ to zero at every point $s$ on the body surface $\cal S$ $$u_s = \vec u \cdot \hat s = 0 \quad s \in \cal S.$$ From the previous notebook, we know the velocity at any point is determined by an integral over the whole vortex sheet. Therefore, the tangential velocity condition is $$ u_s = \left[\vec U_\infty+\frac{\partial}{\partial \vec x}\oint_{\cal S} \frac{\gamma(s')}{2\pi}\theta(s,s')\ ds'\right] \cdot\hat s = 0 $$ where $\vec U_\infty$ is the background velocity that has been added by superposition. If we can use this equation to determine the strength distribution $\gamma(s)$ along the sheet then we will have solved for the potential flow around the body! Descrete vortex panels For general body surface shapes the velocity is a highly nonlinear function of $\gamma(s)$, rendering analytic solution unlikely. We could attempt some complex analytic expansions, but why would we want to do that? Numerical fundamental: Discritization Replace continuous functions with linear approximations We already know that the velocity depends linearly on $\gamma$ for a vortex panel. This makes it easy to solve for $\gamma$ as a function of $u_s$. And we can add any number of panels together using superposition. Therefore, if we break up the continuous sheet into a series of vortex panels, we can approximate the tangential velocity condition above, and use it to solve for $\gamma$. This is the essense of the vortex panel method. Linear velocity function First, lets write the velocity induced by a single panel in a way that makes the linearity explicit: $$ \vec u(x,y)=\gamma\ \vec f(x,y \ |\ x_0,y_0,x_1,y_1)$$ where $\vec f$ is a function that depends on the panel geometry. In fact, we've already written all the code we need to evaluate this function in the last notebook... Quiz 1: Which python function from the previous notebook can we use as $\vec f$? get_u, get_v Panel.velocity if we set $\gamma=1$ for the panel. Panel.velocity if we set $\gamma=0$ for the panel. Lets copy that old code here: End of explanation """ def polynomial(theta,n): a = theta % (2.*numpy.pi/n)-numpy.pi/n r = numpy.cos(numpy.pi/n)/numpy.cos(a) return [r*numpy.cos(theta),r*numpy.sin(theta)] """ Explanation: We added an optional argument to Panel.velocity to set gamma. If we don't specify gamma the function will use self.gamma giving us the same output as in the previous note book. Array of panels The next step is to extend this to more than one panel and to add the background flow. To help make this more concrete, lets consider a polynomial-shape body. I found this beautiful little equation to define regular polynomials. End of explanation """ N_panels = 3 # number of panels triangle = numpy.empty(N_panels, dtype=object) # initialize an array of panels # Define the end-points of the panels theta_ends = numpy.linspace(0, -2*numpy.pi, N_panels+1) x_ends,y_ends = polynomial( theta_ends, n=3) # Initialize each panel with the end points for i in xrange(N_panels): triangle[i] = Panel(x_ends[i], y_ends[i], x_ends[i+1], y_ends[i+1]) # Plot it from matplotlib import pyplot %matplotlib inline for p in triangle: p.plot() """ Explanation: where $n$ is the number of sides in the polynomial, and theta=$\theta$ is the polar angle around the origin. In the case of a triangle it only takes three panels to define the shape: End of explanation """ def flow_velocity(panels,x,y,alpha=0): # get the uniform velocity ( make it the same size & shape as x ) u = numpy.cos(alpha)*numpy.ones_like(x) v = numpy.sin(alpha)*numpy.ones_like(x) # add the velocity contribution from each panel for p in panels: u0,v0 = p.velocity(x,y) u = u+u0 v = v+v0 return [u,v] """ Explanation: Looks good. Note the code for p in triangle: loops through all the panels in the array. Very handy and clean. So, what is the velocity induced by these panels? Using superposition, the total velocity at any point $x,y$ is simply $$ \vec u(x,y) = \vec U_\infty+\sum_{j=0}^{N-1} \gamma_j \ \vec f_j(x,y) $$ where we use the index $j$ to label each of the $N$ panels. This can be coded as: End of explanation """ def plot_flow(panels,alpha=0,xmax=2,N_grid=100): # define the grid X = numpy.linspace(-xmax, xmax, N_grid) # computes a 1D-array for x Y = numpy.linspace(-xmax, xmax, N_grid) # computes a 1D-array for y x, y = numpy.meshgrid(X, Y) # generates a mesh grid # get the velocity from the free stream and panels u,v = flow_velocity(panels,x,y,alpha) # plot it pyplot.figure(figsize=(8,11)) # set size pyplot.xlabel('x', fontsize=16) # label x pyplot.ylabel('y', fontsize=16) # label y m = numpy.sqrt(u**2+v**2) # compute velocity magnitude velocity = pyplot.contourf(x, y, m) # plot magnitude contours cbar = pyplot.colorbar(velocity, orientation='horizontal') cbar.set_label('Velocity magnitude', fontsize=16); pyplot.quiver(x[::4,::4], y[::4,::4], u[::4,::4], v[::4,::4]) # plot vector field # pyplot.streamplot(x, y, u, v) # plots streamlines - this is slow! for p in panels: p.plot() """ Explanation: where the alpha argument defines the angle of attack. To visualize this, we copy the grid definition and plotting code from before to define a new function plotFlow: End of explanation """ plot_flow(triangle) """ Explanation: Are we done? Let's plot the flow and check... End of explanation """ # define the influence of panel_j on panel_i def influence(panel_i,panel_j): u,v = panel_j.velocity(panel_i.xc,panel_i.yc,gamma=1) return u*panel_i.sx+v*panel_i.sy """ Explanation: Quiz 2 Why is the flow going through the body above? We set gamma=0 for the panels We haven't applied the no-slip condition We haven't determined the correct gamma for each panel System of linear equations The no-slip boundary condition is $$ u_s = \hat s\cdot\left[\vec U_\infty + \sum_{j=0}^{N-1} \gamma_j \ \vec f_j(x,y)\right]=0 $$ We note again that this system is linear in $\gamma$. So the goal is to set $\gamma$ on each sheet such that this condition is enforced on the body. Quiz 3 How many unknowns are there? $1$ $N$ $N^2$ But we only have one equation, the no-slip condition... right? Numerical fundamental: Consistency Develop enough equations to match the unknowns For a linear system of equations to be consistent, that is for it to have a solution, we need as many equations as unknowns. Luckily the no-slip condition is a continuous equation - it applies to every point on the body. Therefore, we can evaluate the boundary equation multiple times - say at the center of each panel. Then we will have a consistent linear system. There is one important point to consider before we do this. The tangential velocity is discontinuous across the vortex panel (the jump from no-slip to the free stream) so we need to be careful to apply the no-slip condition on the body side of the panel. The way we've drawn it, this is the negative side ($y^-$) in the coordinates of each panel. Quiz 4 What tangential velocity does a panel induce on the negative side of itself? $u_s = \frac 12 \gamma$ $u_s = -\frac 12 \gamma$ $u_s = U_\infty$ (Hint: think about the previous notebook.) Using this relation, the tangential velocity at the center of each panel is $$ \frac 12 \gamma_i + \hat s_i \cdot \left[ \vec U_\infty + \sum_{j=0, j\ne i}^N \gamma_j \ \vec f_j(x_i,y_i)\right] = 0 \quad i=0\ldots N-1$$ where $\hat s_i$ is the tangent vector of panel $i$, and $x_i,y_i$ is the center of that panel. Notice that the equation for the velocity on panel $i$ depends on the strength of every other panel. Will we need to write a special function to solve for $\gamma$? Numerical fundamental: Linear Algebra Packages Never write your own matrix solver Every worthwhile numerical language has a set of linear algebra solution routines - in numpy it is the linalg package. All we need to do is reform this system to look like $$ \mathbf{A \gamma = b} $$ and use the function numpy.linalg.solve. From the equation above, we see $$ a_{ij} = \vec f_j(x_i,y_i) \cdot \hat s_i \quad i\ne j$$ $$ a_{ii} = \frac 1 2, \quad b_i = -\vec U \cdot \hat s_i$$ where $a_{ij}$ are the coefficient of the matrix $\mathbf A$ which represent the influence of panel $j$ on panel $i$, and $b_i$ are the coefficients of the vector $\mathbf b$ which represent the forcing of the external flow on panel $i$. Construct and solve Again - all we need to do is construct (or assemble) this matrix and vector, and use the function to solve for $\gamma_i$. First, we write a function for the cross-influence term: End of explanation """ # construct the linear system def construct_A_b(panels,alpha=0): # construct matrix N_panels = len(panels) A = numpy.empty((N_panels, N_panels), dtype=float) # empty matrix numpy.fill_diagonal(A, 0.5) # fill diagonal with 1/2 for i, p_i in enumerate(panels): for j, p_j in enumerate(panels): if i != j: # off-diagonals A[i,j] = influence(p_i,p_j) # find influence # computes the RHS b = [-numpy.cos(alpha)*p.sx-numpy.sin(alpha)*p.sy for p in panels] return [A,b] """ Explanation: which uses the optional gamma=1 argument to evaluate $\vec f_j$. Next, we write a function to fill the matrix $\mathbf A$ using the enumerate function, and the RHS vector $\mathbf b$ using a list comprehension (which is terribly named, but very handy). End of explanation """ # determine the vortex strength on a set of panels def solve_gamma(panels,alpha=0): A,b = construct_A_b(panels,alpha) # construct linear system gamma = numpy.linalg.solve(A, b) # solve for gamma! for i,p_i in enumerate(panels): p_i.gamma = gamma[i] # update panels """ Explanation: Finally, we can write a function to solve for $\gamma_i$ End of explanation """ solve_gamma(triangle) # solve for gamma plot_flow(triangle) # compute flow field and plot """ Explanation: Lets test it out! End of explanation """ # defining the end-points of the panels N_panels = 20 x_ends = numpy.cos(numpy.linspace(0, -2*numpy.pi, N_panels+1)) y_ends = numpy.sin(numpy.linspace(0, -2*numpy.pi, N_panels+1)) # defining the panels circle = numpy.empty(N_panels, dtype=object) for i in xrange(N_panels): circle[i] = Panel(x_ends[i], y_ends[i], x_ends[i+1], y_ends[i+1]) solve_gamma(circle) # solve for gamma plot_flow(circle) # compute flow field and plot """ Explanation: Much better, but... Quiz 5 Why is there still flow through the wedge? Modeling error (ie incorrect conditions) Numerical error (ie insufficient resolution) Implementation error (ie inadequate care) (Hint: one of these is immediately testable.) Numerical Fundamental: Convergence with resolution The more panels you use, the closer you should get to the analytic solution Other objects We can now get the flow around any shape! Let try one we know well - a circle: End of explanation """ # your code here """ Explanation: Looks about right! Quiz 6 This vortex panel method can be used to solve for the flow around: multiple bodies three-dimensional bodies a sailboat keel Your turn Write a method to acheive this. It should only take a few lines of code. Hint: numpy.concatenate((body_1,body_2)) End of explanation """ from IPython.core.display import HTML def css_styling(): styles = open('../styles/custom.css', 'r').read() return HTML(styles) css_styling() """ Explanation: Ignore the line below - it just loads the style sheet. End of explanation """
nehal96/Deep-Learning-ND-Exercises
Sentiment Analysis/Sentiment Analysis with Andrew Trask/4-reducing-noise.ipynb
mit
def pretty_print_review_and_label(i): print(labels[i] + "\t:\t" + reviews[i][:80] + "...") g = open('reviews.txt','r') # What we know! reviews = list(map(lambda x:x[:-1],g.readlines())) g.close() g = open('labels.txt','r') # What we WANT to know! labels = list(map(lambda x:x[:-1].upper(),g.readlines())) g.close() len(reviews) reviews[0] labels[0] """ Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network by Andrew Trask Twitter: @iamtrask Blog: http://iamtrask.github.io What You Should Already Know neural networks, forward and back-propagation stochastic gradient descent mean squared error and train/test splits Where to Get Help if You Need it Re-watch previous Udacity Lectures Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17) Shoot me a tweet @iamtrask Tutorial Outline: Intro: The Importance of "Framing a Problem" Curate a Dataset Developing a "Predictive Theory" PROJECT 1: Quick Theory Validation Transforming Text to Numbers PROJECT 2: Creating the Input/Output Data Putting it all together in a Neural Network PROJECT 3: Building our Neural Network Understanding Neural Noise PROJECT 4: Making Learning Faster by Reducing Noise Analyzing Inefficiencies in our Network PROJECT 5: Making our Network Train and Run Faster Further Noise Reduction PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary Analysis: What's going on in the weights? Lesson: Curate a Dataset End of explanation """ print("labels.txt \t : \t reviews.txt\n") pretty_print_review_and_label(2137) pretty_print_review_and_label(12816) pretty_print_review_and_label(6267) pretty_print_review_and_label(21934) pretty_print_review_and_label(5297) pretty_print_review_and_label(4998) """ Explanation: Lesson: Develop a Predictive Theory End of explanation """ from collections import Counter import numpy as np positive_counts = Counter() negative_counts = Counter() total_counts = Counter() for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 positive_counts.most_common()[:15] pos_neg_ratios = Counter() for term,cnt in list(total_counts.most_common()): if(cnt > 100): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) pos_neg_ratios[term] = pos_neg_ratio for word,ratio in pos_neg_ratios.most_common(): if(ratio > 1): pos_neg_ratios[word] = np.log(ratio) else: pos_neg_ratios[word] = -np.log((1 / (ratio+0.01))) # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common()[:15] # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] """ Explanation: Project 1: Quick Theory Validation End of explanation """ from IPython.display import Image review = "This was a horrible, terrible movie." Image(filename='sentiment_network.png') review = "The movie was excellent" Image(filename='sentiment_network_pos.png') """ Explanation: Transforming Text into Numbers End of explanation """ vocab = set(total_counts.keys()) vocab_size = len(vocab) print(vocab_size) list(vocab)[:15] import numpy as np layer_0 = np.zeros((1,vocab_size)) layer_0 from IPython.display import Image Image(filename='sentiment_network.png') word2index = {} for i,word in enumerate(vocab): word2index[word] = i word2index_sample = {k: word2index[k] for k in list(word2index.keys())[:15]} word2index_sample def update_input_layer(review): global layer_0 # clear out previous state, reset the layer to be all 0s layer_0 *= 0 for word in review.split(" "): layer_0[0][word2index[word]] += 1 update_input_layer(reviews[0]) layer_0 def get_target_for_label(label): if(label == 'POSITIVE'): return 1 else: return 0 labels[0] get_target_for_label(labels[0]) labels[1] get_target_for_label(labels[1]) """ Explanation: Project 2: Creating the Input/Output Data End of explanation """ import time import sys import numpy as np # Let's tweak our network from before to model these phenomena class SentimentNetwork: def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1): # set our random number generator np.random.seed(1) self.pre_process_data(reviews, labels) self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) self.review_vocab = list(review_vocab) label_vocab = set() for label in labels: label_vocab.add(label) self.label_vocab = list(label_vocab) self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) self.learning_rate = learning_rate self.layer_0 = np.zeros((1,input_nodes)) def update_input_layer(self,review): # clear out previous state, reset the layer to be all 0s self.layer_0 *= 0 for word in review.split(" "): if(word in self.word2index.keys()): self.layer_0[0][self.word2index[word]] += 1 def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews, training_labels): assert(len(training_reviews) == len(training_labels)) correct_so_far = 0 start = time.time() for i in range(len(training_reviews)): review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### # Input Layer self.update_input_layer(review) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # TODO: Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # TODO: Update the weights self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step if(np.abs(layer_2_error) < 0.5): correct_so_far += 1 reviews_per_second = i / float(time.time() - start) sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): correct = 0 start = time.time() for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 reviews_per_second = i / float(time.time() - start) sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): # Input Layer self.update_input_layer(review.lower()) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) if(layer_2[0] > 0.5): return "POSITIVE" else: return "NEGATIVE" mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) # evaluate our model before training (just to show how horrible it is) mlp.test(reviews[-1000:],labels[-1000:]) # train the network mlp.train(reviews[:-1000],labels[:-1000]) mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01) # train the network mlp.train(reviews[:-1000],labels[:-1000]) mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001) # train the network mlp.train(reviews[:-1000],labels[:-1000]) """ Explanation: Project 3: Building a Neural Network Start with your neural network from the last chapter 3 layer neural network no non-linearity in hidden layer use our functions to create the training data create a "pre_process_data" function to create vocabulary for our training data generating functions modify "train" to train over the entire corpus Where to Get Help if You Need it Re-watch previous week's Udacity Lectures Chapters 3-5 - Grokking Deep Learning - (40% Off: traskud17) End of explanation """ from IPython.display import Image Image(filename='sentiment_network.png') def update_input_layer(review): global layer_0 # clear out previous state, reset the layer to be all 0s layer_0 *= 0 for word in review.split(" "): layer_0[0][word2index[word]] += 1 update_input_layer(reviews[0]) layer_0 review_counter = Counter() for word in reviews[0].split(" "): review_counter[word] += 1 review_counter.most_common()[:20] """ Explanation: Understanding Neural Noise End of explanation """ import time import sys import numpy as np # Let's tweak our network from before to model these phenomena class SentimentNetwork: def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1): # set our random number generator np.random.seed(1) self.pre_process_data(reviews, labels) self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) self.review_vocab = list(review_vocab) label_vocab = set() for label in labels: label_vocab.add(label) self.label_vocab = list(label_vocab) self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) self.learning_rate = learning_rate self.layer_0 = np.zeros((1,input_nodes)) def update_input_layer(self,review): # clear out previous state, reset the layer to be all 0s self.layer_0 *= 0 for word in review.split(" "): if(word in self.word2index.keys()): self.layer_0[0][self.word2index[word]] = 1 def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews, training_labels): assert(len(training_reviews) == len(training_labels)) correct_so_far = 0 start = time.time() for i in range(len(training_reviews)): review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### # Input Layer self.update_input_layer(review) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # TODO: Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # TODO: Update the weights self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step if(np.abs(layer_2_error) < 0.5): correct_so_far += 1 reviews_per_second = i / float(time.time() - start) sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): correct = 0 start = time.time() for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 reviews_per_second = i / float(time.time() - start) sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): # Input Layer self.update_input_layer(review.lower()) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) if(layer_2[0] > 0.5): return "POSITIVE" else: return "NEGATIVE" mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) mlp.train(reviews[:-1000],labels[:-1000]) mlp.test(reviews[-1000:],labels[-1000:]) """ Explanation: Project 4: Reducing Noise in our Input Data End of explanation """
armgilles/presentation
EPSI/I5/Projet Big Data/Cours 2 Big Data.ipynb
mit
# Importer les lib python import pandas as pd """ Explanation: On va débuter step by step Inscription et récupération des données : Aller sur le site Kaggle et inscrivez-vous Ensuite aller sur le contest du Titanic Télécharger les données train.csv et test.csv dans l'onglet data Mettez ces données dans un répertoire nommé input Lancer votre notebook grâce à la commande dans votre terminal jupyter notebook End of explanation """ # Lire un fichier Train train = pd.read_csv('input/train.csv') """ Explanation: Petit rappel pour lire un fichier en local, utiliser la fonction read_csv Si vous avez des difficultés à utiliser une fonction, vous pouvez taper le nom de la fonction + "?" : pd.read_csv? Pour fermer la fenêtre d'aide, cliquez sur la croix en haut à gauche de la fenêtre d'aide End of explanation """ # Regarder le début d'un DataFrame train.head(17) """ Explanation: Pour regarder un fichier, vous pouvez taper : - le nom du DataFrame --> Afficher l'ensemble du DataFrame - Utiliser ".head()" --> Affiche les 5 premières lignes - Utiliser ".tail()" --> Affiche les 5 dernières lignes End of explanation """ train.count() """ Explanation: Signification des colonnes : - Survived : indique la mort ou la survie du passager pour les données d'apprentissage. C'est ce que l'on doit prédire sur fichier test. Cette valeur est booléene (0 ou 1) : 1 pour survie, 0 pour la mort - Pclass : La classe des chambres du navire (3 niveaux), 1 étant la meilleure classe et 3 la classe "éco". C'est une variable texte qui va falloir transformer en faisant attention car il y a une notion d'ordre. - Name : Nom de la personne - Sex : Sexe du passager - Age : âge du passager - SisbSp : (Sibling and Spouse) : le nombre de membres de la famille du passager de type frère, soeur, demi-frère, demi-soeur, époux, épouse... - Parch : (Parent and Child) : le nombre de membres de la famille du passager du type père, mère, fils, fille, beau-fils, etc... - Ticket : Numéro du ticket - Fare : le prix du ticket - Cabin : numéro de cabine - Embarked : le port d'embarquement du passager : C -> Cherbourg; Q -> Queenstown; S -> Southampton End of explanation """ def parse_model(X): """ Parse mes données pour nourrir un algo. - Return : - X : features pour prédire le résutat - target : si la personne à survécu ou est morte """ target = X.Survived X = X[['Fare', 'SibSp', 'Parch']] return X, target X, y = parse_model(train.copy()) """ Explanation: count() permet de voir qu'il y a des données qui sont manquantes / absentes Faire un premier modèle simple : On va crée un function python afin de faciler le reproduction par la suite Target est donc notre cible (ce que l'on essaye de prédire') End of explanation """ X.head() """ Explanation: X sont nos données afin de déviner si le passager est mort ou non End of explanation """ y.head() # Permet de valider notre modèle from sklearn.cross_validation import cross_val_score, train_test_split # Modèle linéaire from sklearn.linear_model import LogisticRegression # Etape 1 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) print"X_train : " + str(X_train.shape) print"X_test : " + str(X_test.shape) print"y_train : " + str(y_train.shape) print"y_test : " + str(y_test.shape) ma_regression = LogisticRegression() # On initialise notre algo ma_regression.fit(X_train, y_train) ma_regression? ma_prediction = ma_regression.predict(X_test) # ma_prediction est un array (= list) ma_prediction[0:5] # On affiche les 5 premiers résultats # Mon score de précision sur mes données d'apprentissage ma_regression.score(X_train, y_train) # Etape 2 # Function de score from sklearn.metrics import accuracy_score accuracy_score(y_test, ma_prediction) """ Explanation: y est la valeur booléen de survit ou non du Titanic End of explanation """ accuracy_score(y_train, ma_regression.predict(X_train)) """ Explanation: On peux voir en changeant le chiffre du paramêtre random_state de train_test_split (étape 1) que le résultat du score sur les données d'apprentissage et sur les données test change D'ailleurs l'étape 2 est égal : End of explanation """ from sklearn.cross_validation import cross_val_score def cross_validation_score(algo, X, y): """ Utilise une cross validation sur un algo et return tout ses scores """ score_cv = cross_val_score(algo, X, y, cv=5) return score_cv mon_score = cross_validation_score(ma_regression, X, y) print "Voila le résultat de la CV " + str(mon_score) print "Voila la moyenne de la CV " + str(mon_score.mean()) """ Explanation: Utilisation de la cross_validation (CV) : La méthode de validation Cross Validation permet de prendre en entrée X (nos features) et y (survie ou mort) et d'entrainer un algo sur 4/5 du jeu de données et de tester (le score) sur 1/5. End of explanation """ test = pd.read_csv("input/test.csv") test.head() test.count() # Etape 3 # On fit notre algo ma_regression.fit(X, y) use_features = ['Fare', 'SibSp', 'Parch'] prediction = ma_regression.predict(test[use_features]) # Error """ Explanation: Submission : Charger le fichier test : End of explanation """ pd.isnull(test[use_features]).sum() test[pd.isnull(test.Fare)][use_features] """ Explanation: Il semble que nos données de test soient NaN (absentes) End of explanation """ # Lib de calcul import numpy as np mediane_fare = np.median(train['Fare']) print "La medianne de Fare dans notre jeu de données train est : " + str(mediane_fare) """ Explanation: On va donc remplacer cette valeur manquante : - Soit on choisit une valeur subjective - On prend la valeur la plus adéquate sur notre jeux de données train (comme la médiane) End of explanation """ test['Fare'] = test['Fare'].fillna(mediane_fare) test[pd.isnull(test.Fare)][use_features] # Plus de Fare NaN """ Explanation: Il faut donc remplacer la / les valeurs manquantes de Fare dans notre jeu de données test par la médiane calculé End of explanation """ # Etape 3 bis # On fit notre algo ma_regression.fit(X, y) use_features = ['Fare', 'SibSp', 'Parch'] test['Fare'] = test['Fare'].fillna(mediane_fare) prediction = ma_regression.predict(test[use_features]) print prediction test.shape len(prediction) # On ajoute notre prédiction sur notre jeux de données test test['Survived'] = prediction fichier_prediction = test[['PassengerId', 'Survived']] fichier_prediction.head() train.Survived.value_counts(normalize=True) fichier_prediction.Survived.value_counts(normalize=True) """ Explanation: On reprendre notre étape 3 : End of explanation """ # Ecrire un fichier à partir d'un DataFrame fichier_prediction.to_csv('output/premier_test.csv', index=False) """ Explanation: Créer un dossier "output" au même niveau que le dossier "input" End of explanation """ # 0.6555 est assez proche de notre CV """ Explanation: Aller sur Kaggle et faite une submission afin de voir votre score :) End of explanation """ # Pour afficher des images (pas besoin de taper cet import) from IPython.display import Image Image(url="http://i.giphy.com/9ABgKHIu3acWA.gif") import seaborn as sns import matplotlib.pyplot as plt # Pour que les graphique s'affichent dans le notebook %matplotlib inline train.head() sns.countplot(train.Pclass, hue=train.Survived) pd.get_dummies(train.Pclass, prefix="split_Pclass").head() v X, y = parse_model_1(train) X.head() y.head() mon_score_2 = cross_validation_score(ma_regression, X, y) mon_score_2 print "Voila la moyenne de la CV " + str(mon_score_2.mean()) mon_score_2.mean() # 1er score = 0.674 # On recherche notre jeu de données test mon_test = pd.read_csv("input/test.csv") # Etape 3 bis ma_regression.fit(X, y) #use_features = [['Fare', 'SibSp', 'Parch', 'Pclass']] mon_test = mon_test[['Fare', 'SibSp', 'Parch', 'Pclass']] mes_dummy_test = pd.get_dummies(mon_test.Pclass, prefix="split_Pclass") mon_test = mon_test.join(mes_dummy_test) mon_test = mon_test.drop('Pclass', axis=1) mon_test['Fare'] = mon_test['Fare'].fillna(mediane_fare) mon_test.head() ma_regression.fit(X,y) prediction = ma_regression.predict(mon_test) prediction # On ajoute notre prédiction sur notre jeux de données test test['Survived'] = prediction fichier_prediction = test[['PassengerId', 'Survived']] fichier_prediction.head() # Ecrire un fichier à partir d'un DataFrame fichier_prediction.to_csv('output/dexieme_test.csv', index=False) """ Explanation: Battre notre 1er modèle : Comment faire mieux que notre premier modèle. Il faut garder en mémoire que l'on à utiliser seulement 3 features... Etude de la features Pclass (classe des chambres) End of explanation """ ma_regression.coef_[0] mon_resultat = pd.DataFrame({'coef' : ma_regression.coef_[0], 'features' : X.columns}) #mon_resultat['coef'] = ma_regression.coef_[0] #mon_resultat['features'] = ['Fare', 'SibSp', 'Parch', 'Pclass'] mon_resultat plt.figure(figsize=(10,8)) sns.barplot(x='features', y='coef', data=mon_resultat) # 1 er modèle plt.figure(figsize=(10,8)) sns.barplot(x='features', y='coef', data=mon_resultat) """ Explanation: Interpréter notre algo End of explanation """ train.head() """ Explanation: Un poid positif augmente la probalité de survie. Un poid négatif diminue la probabilité de survie. End of explanation """ def parse_model_2(X): target = X.Survived X = X[['Fare', 'SibSp', 'Parch', 'Pclass', 'Sex']] # Ajout d'une features # Pclass mes_dummy_Pclass = pd.get_dummies(X.Pclass, prefix="split_Pclass") X = X.join(mes_dummy_Pclass) X = X.drop('Pclass', axis=1) # Sex mes_dummy_Sex = pd.get_dummies(X.Sex, prefix="split_Sex") X = X.join(mes_dummy_Sex) X = X.drop('Sex', axis=1) return X, target X, y = parse_model_2(train.copy()) X.head() mon_score_3 = cross_validation_score(ma_regression, X, y) mon_score_3 mon_score_3.mean() ma_regression.fit(X, y) # On recherche notre jeu de données test mon_test = pd.read_csv("input/test.csv") #use_features = [['Fare', 'SibSp', 'Parch', 'Pclass']] mon_test = mon_test[['Fare', 'SibSp', 'Parch', 'Pclass', 'Sex']] # Pclass mes_dummy_Pclass_test = pd.get_dummies(mon_test.Pclass, prefix="split_Pclass") mon_test = mon_test.join(mes_dummy_Pclass_test) mon_test = mon_test.drop('Pclass', axis=1) # Sex mes_dummy_Sex_test = pd.get_dummies(mon_test.Sex, prefix="split_Sex") mon_test = mon_test.join(mes_dummy_Sex_test) mon_test = mon_test.drop('Sex', axis=1) # Autres mon_test['Fare'] = mon_test['Fare'].fillna(mediane_fare) mon_test.head() prediction = ma_regression.predict(mon_test) prediction # On ajoute notre prédiction sur notre jeux de données test test['Survived'] = prediction fichier_prediction = test[['PassengerId', 'Survived']] fichier_prediction.head() # Ecrire un fichier à partir d'un DataFrame fichier_prediction.to_csv('output/test_3.csv', index=False) """ Explanation: Ajout du sexe : End of explanation """ train.tail() def parse_model_3(X): target = X.Survived X = X[['Fare', 'SibSp', 'Parch', 'Pclass', 'Sex', 'Age']] # Ajout d'une features # Pclass mes_dummy_Pclass = pd.get_dummies(X.Pclass, prefix="split_Pclass") X = X.join(mes_dummy_Pclass) X = X.drop('Pclass', axis=1) # Sex mes_dummy_Sex = pd.get_dummies(X.Sex, prefix="split_Sex") X = X.join(mes_dummy_Sex) X = X.drop('Sex', axis=1) # Enfant oui ou non ? X['enfant'] = 0 X.loc[X.Age <= 10, 'enfant'] = 1 X = X.drop('Age', axis=1) return X, target X, y = parse_model_3(train.copy()) X.head() X.enfant.value_counts(normalize=True) mon_score_4 = cross_validation_score(ma_regression, X, y) mon_score_4 mon_score_4.mean() # On recherche notre jeu de données test mon_test = pd.read_csv("input/test.csv") #use_features = [['Fare', 'SibSp', 'Parch', 'Pclass']] mon_test = mon_test[['Fare', 'SibSp', 'Parch', 'Pclass', 'Sex','Age']] # Pclass mes_dummy_Pclass_test = pd.get_dummies(mon_test.Pclass, prefix="split_Pclass") mon_test = mon_test.join(mes_dummy_Pclass_test) mon_test = mon_test.drop('Pclass', axis=1) # Sex mes_dummy_Sex_test = pd.get_dummies(mon_test.Sex, prefix="split_Sex") mon_test = mon_test.join(mes_dummy_Sex_test) mon_test = mon_test.drop('Sex', axis=1) # Enfant oui ou non ? mon_test['enfant'] = 0 mon_test.loc[mon_test.Age <= 10, 'enfant'] = 1 mon_test = mon_test.drop('Age', axis=1) # Autres mon_test['Fare'] = mon_test['Fare'].fillna(mediane_fare) mon_test.head() ma_regression.fit(X, y) prediction = ma_regression.predict(mon_test) prediction # On ajoute notre prédiction sur notre jeux de données test test['Survived'] = prediction fichier_prediction = test[['PassengerId', 'Survived']] fichier_prediction.head() # Ecrire un fichier à partir d'un DataFrame fichier_prediction.to_csv('output/test_4.csv', index=False) mon_resultat = pd.DataFrame({'coef' : ma_regression.coef_[0], 'features' : X.columns}) #mon_resultat['coef'] = ma_regression.coef_[0] #mon_resultat['features'] = ['Fare', 'SibSp', 'Parch', 'Pclass'] plt.figure(figsize=(10,8)) sns.barplot(x='features', y='coef', data=mon_resultat) """ Explanation: Ajout d'une features "Enfant" : End of explanation """ moi = pd.DataFrame() moi['Fare']=30, moi['SibSp']=1, moi['Parch']=2, moi['split_Pclass_1']=1, moi['split_Pclass_2']=0, moi['split_Pclass_3']=0, moi['split_Sex_female']=0, moi['split_Sex_male']=1, moi['enfant']=1 moi moi_prediction = ma_regression.predict(moi) # 1 = survie / 0 = mort print 'je suis ....' + str(moi_prediction) # Probabilité de survie ma_regression.predict_proba(moi)[0][1] """ Explanation: Mes prédiction / probabilité de survie End of explanation """
HsKA-ThermalFluiddynamics/NSS-1
PythonTutorial.ipynb
mit
from __future__ import print_function """ Explanation: Python Tutorial End of explanation """ # Output "Hello World!" print("Hello, World!") print("Hello World!", 10.0) """ Explanation: Hello World! End of explanation """ # define a variable s = "Hello World!" x = 10.0 i = 42 # define 2 variables at once a,b = 1,1 # output the types print(type(10.0), type(42), type("Hello World!")) print(type(s), type(x)) # now change the type of s! s = 3 print(type(s)) """ Explanation: # starts a comment line. f(x,y) is a function call. f is the function name, x and y are the arguments. Values, Types, Variables End of explanation """ name = "olaf" print(len(name)) print(name.capitalize()) print(name.upper()) print(name[2]) """ Explanation: RHS of = (10.0, "Hello World") are values. LHS of = (s, x, ...) are variables. Values have a type. We can assign more than one variable in a single assignment. type(x) can be used to determine the type of x. variables can change their type. Strings End of explanation """ x = 2.0 i = 42 print(type(x), type(i)) # math expressions y = (-2*x**3 + x**.5) / (x-1.0) n = (-2*i**3 + 23) print(y) print(n) # mixed expressions y = (i*x + x**2) print(y) # division! print("17 / 2 =", 17/2) print("17. / 2 =", 17./2) print("17 % 2 =", 17%2) print(float(i)/2) print((i+0.)/2) # unary operators i += 10 i -= 7 x /= 3.0 print(i, x) print(3 * 10) print("3" * 10) """ Explanation: Python has strings. Various functions to modify strings. These functions return the new value. The strings itself are immutable! Ints and Floats End of explanation """ truth = True lie = False print(type(truth)) print(truth) print(not lie and (truth or lie)) truth = (i == 42) truth = lie or (i == n) and (x != y) or (x < y) """ Explanation: "Standard" math operators exist: +, -, *, /, ** Be careful with /: integer division when both numbers are integers! (only Python 2!) Unary operators +=, -=, ... Operators may behave differently depending on the type. Boolean Expressions End of explanation """ print("x={}, y={}".format(x, y)) print("y={1}, x={0}".format(x, y)) x = 3.14159 print("x={:.4}, x={:5.3}".format(x, y)) """ Explanation: Boolean values are True and False. and, or and not are logical operators. ==, !=, &lt;, &gt;, &lt;=, &gt;= are comparison operators. Formatted Output End of explanation """ # create an empty list l = [] # append different elements l.append("Hallo") l.append("Welt!") l.append(42) l.append(23) l.append([1.0, 2.0]) # create a number range ns = range(20) # output the list print(l) print(ns) print(range(7,15)) print(len(ns)) # access elements print(l[0]) print(l[2]) print(l[-1]) # take slices print(ns) print(ns[2:7]) print(ns[17:]) print(ns[:7]) # with stride print(ns[7:16:2]) # unpack the list print(l) s1, s2, n1, n2, _ = l print(s1, s2) print(17 in ns) print("Hallo" in l) """ Explanation: str.format() to format output (was "%") {} is replaced by function arguments in order {1} explicitely denotes the second argument {:5.3} can be used to format the output in more detail (here: precision) Lists End of explanation """ fib = [] a, b = 0, 1 while b < 100: a, b = b, (a+b) fib.append(a) print(fib) for n in fib: if n % 3 == 0: print("{} is modulo 3!".format(n)) elif n % 2 == 0: print("{} is even".format(n)) else: print("{} is odd".format(n)) """ Explanation: Python provides lists. [] is an emtpy list. list.append(value) can be used to append anything to a list. Nested lists are possible. range(start, end) creates a list of numbers len(list) returns the length of the list. list[i] to access the ith element of the list (start counting with 0). Negative indices denote elements from the end. Slices can be used to take sublists. A list can be unpacked in an assignment. _ can be used to ignore an element. in to find an element in a list. Control Structures End of explanation """ # define a function def f(x, c): return x**2-c, x**2+c print(f(3.0, 1.0)) print(f(5.0, 2.0)) # with default argument def f(x, c=1.0): return x**2-c, x**2+c print(f(3.0)) print(f(5.0, 2.0)) # with docstring def f(x): "Computes the square of x." return x**2 help(f) """ Explanation: while, if, else, for control structures Indentation is significant! All lines with the same indent belong to one block : at end of previous line starts a new block while is a loop with an end condition for is a loop over elements of a list (or other generators) Function Definitions End of explanation """ def f(x): print("i =", i) print("x =", x, "(in function, before add)") x += 2 print("x =", x, "(in function, after add)") return x x = 3 i = 42 print("x =", x, "(in script, before call)") y = f(x) print("x =", x, "(in script, after function call)") print("y =", y) x = 3 def f(): global x x += 2 print("x =", x, "(before call)") f() print("x =", x, "(after function call)") """ Explanation: def to define a function return defines the return values of the function * to unpack lists/take argument lists Variable Scoping End of explanation """ l = ["Hello", "World"] ll = [l, l] print(ll) l[1] = "Olaf" print(ll) ll[0][1] = "Axel" print(ll) """ Explanation: Local variables override global variables. Global variables are not modified when changing local variables. Still, in a function definition, global variables can be accessed (i.e., they are copied). When the global variable should be modified inside a function, explicitely declare it with global. Variables contain References End of explanation """ # Create an object of class "file" f = file('test.txt', 'w') # Call the method "write" on the object f.write('Hello') # Close he file f.close() """ Explanation: Variables are references to values. When an value is modified, all variables that reference it get changed. Basic types are immutable, i.e. they cannot be modiified. When they are assigned to another variable, a new copy is created. Functions modifying them return a new, modified version. Not so with other types! Classes and Objects Classical Python programs contain variables that have a value of a certain data type, and functions. Observation: functions are tied to a given data type. For example, print should behave differently when used with a string, and integer, or a file. The same basically holds for all functions. Idea: Tie functions to the data type also syntactically The world is made of objects (a.k.a. value) An object is an instance of a class (a.k.a. data type) A class provides methods (a.k.a. as functions) that can be used to do anything with these objects. End of explanation """ s = "Hello {}" print(s.format("World!")) print("Hello {}".format("World")) x = 42.0 print(x.is_integer()) print((42.0).is_integer()) # same as l=[] l = list() l.append(42) l.append(23) """ Explanation: file is a class. When using a class like a function, this will create an object (a.k.a. an instance of a class): f = file('test.txt', 'w') Several instances/objects of a class can be created. An object has methods (a.k.a. class functions) that can be used to do something with the object. Methods are called like object.method(): f.write('Hello') Everything is an object End of explanation """ class Circle: "This class represents a circle." def create(self, r): "Generate a circle." self.radius = r def area(self): "Compute the area of the circle." return 3.14159 * self.radius**2 help(Circle) # create two circles c1 = Circle() c1.create(2.0) c2 = Circle() c2.create(3.0) print(c1.area()) print(c2.area()) print(c2.radius) """ Explanation: All types in Python are classes, all values are objects! Defining a Class End of explanation """ class Circle: pi = 3.14159 # __init__ is the constructor def __init__(self, r): self.radius = r def area(self): return Circle.pi * self.radius**2 # define operator "+" def __add__(self, other): new = Circle(((self.area() + other.area())/3.14159)**0.5) return new # define how to convert it to a string (e.g. to print it) def __str__(self): return "I am a circle with radius {}.".format(self.radius) c1 = Circle(2.0) c2 = Circle(3.0) print(c1.area()) print(c2.radius) # We have defined "__add__", so we can add two circles c3 = c1 + c2 print(c3.radius) print(c3.area()) # We have defined "__str__", so we can print a circle print(c1) """ Explanation: Create a class using the keyword class. Methods are nested functions. A method gets the object itself as first argument (usually called self). Methods can access and modify fields (a.k.a. instance variables). Special Methods End of explanation """ # same a + 23 a = 19 print(a.__add__(23)) # same as "Hello Olaf!"[6:10] print("Hello Olaf!".__getslice__(6, 10)) """ Explanation: __init__ is the constructor. Other special functions can be used to allow operators etc. End of explanation """ class Polynomial: "Represents a polynomial p(x)=a*x**2 + b*x + c." def __init__(self, a, b, c): self.a = a self.b = b self.c = c # allows the object to be used as a function def __call__(self, x): return self.a*x**2 + self.b*x + self.c p = Polynomial(3.0, 2.0, 1.0) print(p(1.0)) """ Explanation: Ultimately, the language intrinsics of Python are assignments function definitions class definitions method or functions calls The rest (e.g. all operators) are "syntactical sugar" and use special methods (e.g. __add__). End of explanation """ class MyCircle(Circle): def __init__(self, r = 1.0, color = "red"): Circle.__init__(self, r) self.color = color def __str__(self): return "I am a {} circle with radius {} and area {}.".format(self.color, self.radius, self.area()) c1 = MyCircle() c2 = MyCircle(2.0, "green") print(c1) print(c2) print(c1 + c2) """ Explanation: Inheritance End of explanation """ import math print(math.pi) print(math.sin(math.pi)) import sys print("Hello World!", file=sys.stderr) from math import sin, pi print(sin(pi)) from math import * print(log(pi)) """ Explanation: A class can inherit all methods from another class. The inherited methods can be overridden. Allows to extend functionality of a class. Modules End of explanation """
aboSamoor/compsocial
Word_Tracker/3rd_Yr_Paper/Grants.ipynb
gpl-3.0
NIH_df = GetNIH() NSF_df = GetNSF() NIH_df.head() NSF_df.head() !mkdir data/processed """ Explanation: Merge CSV files Each cvs file represent a specific word results obtained from the NSF and NIH websites. End of explanation """ NSF_df.to_csv("data/Grants/processed/nsf_combined.csv", encoding='utf-8') NIH_df.to_csv("data/Grants/processed/nih_combined.csv", encoding='utf-8') """ Explanation: Save the combined query results End of explanation """ NIH = df.from_csv("data/Grants/processed/nih_combined.csv", encoding='utf-8') """ Explanation: Process NIH database End of explanation """ university_df = pd.DataFrame.from_csv("data/Grants/Accreditation_2015_09.csv") university_df[["Institution_Name", "Institution_State"]].head() code_to_state = { 'AK': 'Alaska', 'AL': 'Alabama', 'AR': 'Arkansas', 'AS': 'American Samoa', 'AZ': 'Arizona', 'CA': 'California', 'CO': 'Colorado', 'CT': 'Connecticut', 'DC': 'District of Columbia', 'DE': 'Delaware', 'FL': 'Florida', 'GA': 'Georgia', 'GU': 'Guam', 'HI': 'Hawaii', 'IA': 'Iowa', 'ID': 'Idaho', 'IL': 'Illinois', 'IN': 'Indiana', 'KS': 'Kansas', 'KY': 'Kentucky', 'LA': 'Louisiana', 'MA': 'Massachusetts', 'MD': 'Maryland', 'ME': 'Maine', 'MI': 'Michigan', 'MN': 'Minnesota', 'MO': 'Missouri', 'MP': 'Northern Mariana Islands', 'MS': 'Mississippi', 'MT': 'Montana', 'NA': 'National', 'NC': 'North Carolina', 'ND': 'North Dakota', 'NE': 'Nebraska', 'NH': 'New Hampshire', 'NJ': 'New Jersey', 'NM': 'New Mexico', 'NV': 'Nevada', 'NY': 'New York', 'OH': 'Ohio', 'OK': 'Oklahoma', 'OR': 'Oregon', 'PA': 'Pennsylvania', 'PR': 'Puerto Rico', 'RI': 'Rhode Island', 'SC': 'South Carolina', 'SD': 'South Dakota', 'TN': 'Tennessee', 'TX': 'Texas', 'UT': 'Utah', 'VA': 'Virginia', 'VI': 'Virgin Islands', 'VT': 'Vermont', 'WA': 'Washington', 'WI': 'Wisconsin', 'WV': 'West Virginia', 'WY': 'Wyoming' } state_to_code = {state.lower():code for code,state in code_to_state.items()} university_to_state = {uni.lower():state for uni,state in university_df[["Institution_Name", "Institution_State"]].values} universities = set(university_to_state.keys()) def FindState(name): name = name.lower() if name in universities: return university_to_state[name] for state in state_to_code.keys(): if state in name: return state_to_code[state] for n in universities: if name in n: return university_to_state[n] return "" NIH_universities = {x.lower() for x in set(NIH["Organization Name"].fillna("").values)} NIH_uni_state_df = pd.DataFrame.from_dict([{"University":uni, "State":FindState(uni)} for uni in NIH_universities]) NIH_uni_state_df.to_csv("data/Grants/processed/university_to_state.csv") """ Explanation: Load University Database The goal to map each university that is mentioned in the NIH database to a state. We will rely on two source of information to build a partial mapping: University accredation database. List of states and their acronyms. After building the partial list, we will fill the gaps manually. End of explanation """ tmp = df.from_csv("data/Grants/university_to_state.csv").fillna("") university_to_state = {x[1]:x[0] for x in tmp.values} """ Explanation: Load manual university -> state dictionary End of explanation """ NIH = df.from_csv("data/Grants/processed/nih_combined.csv", encoding='utf-8') NIH.loc[:, "FY Total Cost "] = NIH["FY Total Cost "].fillna(0) NIH.loc[:, "FY Total Cost (Sub Projects)"] = NIH["FY Total Cost (Sub Projects)"].fillna(0) NIH["Award Amount"] = NIH["FY Total Cost (Sub Projects)"] + NIH["FY Total Cost "] remaining_cols = [c for c in NIH.columns if c not in {'ARRA Indicator', 'Administering IC', 'Application ID', 'Contact PI Person ID', 'Other PI or Project Leader(s)', 'Project Number', 'Serial Number', 'Subproject Number', 'Suffix', "Contact PI / Project Leader", "FY Total Cost ", "FY Total Cost (Sub Projects)", 'Subproject Number '}] NIH_minimal = NIH[remaining_cols] NIH_minimal.index.rename("ID", inplace=True) NIH_minimal = NIH_minimal.rename(columns={"word": "Term", "Activity": "Grant Program", "FY": "Start Year", "Type": "Grant Type", "Grant Organization": "Funded Organization"}) NIH_minimal = NIH_minimal.rename(columns={"IC": "Grant Organization"}) id_to_term = { 1: "multiculturalism", 2: "polyculturalism", 3: "cultural pluralism", 4: "monocultural", 5: "monoracial", 6: "bicultural", 7: "biracial", 8: "biethnic", 9: "interracial", 10: "multicultural", 11: "multiracial", 12: "polycultural", 13: "polyracial", 14: "polyethnic", 15: "mixed race", 16: "mixed ethnicity", 17: "other race", 18: "other ethnicity"} term_to_id = {term:id for id, term in id_to_term.items()} NIH_minimal.loc[:, "TermCode"] = NIH_minimal.Term.map(term_to_id) NIH_minimal.loc[:, "Grant Type"] = NIH_minimal["Grant Type"].map(lambda x: 1 if x=='1' else 2) NIH_minimal.loc[:, "State"] = NIH_minimal["Organization Name"].fillna("").map(lambda x: university_to_state[x.lower()]) title_occurrence = [] for x,y in NIH_minimal[["Term", "Project Title"]].fillna("").values: if x.lower() in y.lower(): title_occurrence.append(1) else: title_occurrence.append(0) NIH_minimal["term_in_title"] = title_occurrence NIH_minimal.head() Xs = [] for x in NIH_minimal["Organization Name"].fillna(""): if x.lower() not in university_to_state: Xs.append(x.lower()) print("\n".join(set(Xs))) """ Explanation: NIH Modification Steps ~~Remove the following columns: ARRA indicator, Administering IC, Application ID, Contact PI Person ID, Contact PI, Other PI, Project Number, Serial Number, Subproject Number, Suffix~~ ~~First column --> give title 'ID #'~~ ~~Copy the word data into a new column (title it 'terms')--> code them as the following: 1 = multiculturalism, 2 = polyculturalism, 3 = cultural pluralism, 4 = monocultural, 5 = monoracial, 6 = bicultural, 7 = biracial, 8 = biethnic, 9 = interracial, 10 = multicultural, 11 = multiracial, 12 = polycultural, 13 = polyracial, 14 = polyethnic, 15 = mixed race, 16 = mixed ethnicity, 17 = other race, 18 = other ethnicity~~ ~~'Activity' --> Rename 'Grant Program'~~ ~~'FY' - 1 --> Rename 'Start Year'~~ ~~Combine 'FY Total Cost' and 'FY Total Cost Subproject' Columns --> Rename 'Award Amount'~~ ~~'IC' --> Rename 'Grant Organization'~~ ~~'Organization Name' --> Rename 'Funded Organization'~~ Add new column --> 'Organization State' [use csv file in email to get state info for universities in list] ~~'Type'--> Rename 'Grant Type' [Recode as the following: 1 = 1 (New Grant), 2+ = 2 (Continuing Grant)]~~ Remove the following columns: Contact PI, FY Total Cost, FY Total Cost (SubProjects), Subproject Number End of explanation """ NIH_minimal.to_csv("data/Grants/processed/nih_clean.csv", encoding='utf-8') """ Explanation: Save new NIH dataset End of explanation """ NSF = df.from_csv("data/Grants/processed/nsf_combined.csv", encoding='utf-8') """ Explanation: Process NSF Database End of explanation """ NSF["AwardInstrument"].value_counts(dropna=False) NSF.columns NSF = df.from_csv("data/Grants/processed/nsf_combined.csv", encoding='utf-8') NSF.loc[:, "Support Year"] = 2015 - pd.DatetimeIndex(NSF["StartDate"]).year NSF.loc[:, "Award Length"] = pd.DatetimeIndex(NSF["EndDate"]).year - pd.DatetimeIndex(NSF["StartDate"]).year NSF.loc[:, "Start Year"] = pd.DatetimeIndex(NSF["StartDate"]).year grant_to_code = {"Standard Grant": 1, "Continuing grant": 2, "Fellowship": 3, "Cooperative Agreement": 4, "":np.nan} NSF.loc[:, "AwardInstrument"] = NSF.AwardInstrument.fillna("").map(lambda x: grant_to_code.get(x, 5)) remaining_cols = [c for c in NSF.columns if c not in {"ARRAAmount", "LastAmmendmentDate", "OrganizationCity", "OrganizationZip", "ProgramElementCode", "StartDate", "AwardNumber", "Co-PIName(s)", "LastAmmendmentDate", "OrganizationPhone", "OrganizationStreet", "PIEmailAddress", "PrincipalInvestigator", "Program(s)", "ProgramElementCode(s)", "ProgramManager", "ProgramReferenceCode(s)", "OrganizationState"}] NSF_minimal = NSF[remaining_cols] NSF_minimal.index.rename("ID", inplace=True) NSF_minimal = NSF_minimal.rename(columns={"word": "Term", "Abstract": "Project Abstract", "AwardedAmountToDate": "Award Amount", "OrganizationName": "Funded Organization", "Title":"Project Title", "StartDate":"Start Year", "AwardInstrument": "Grant Type"}) id_to_term = { 1: "multiculturalism", 2: "polyculturalism", 3: "cultural pluralism", 4: "monocultural", 5: "monoracial", 6: "bicultural", 7: "biracial", 8: "biethnic", 9: "interracial", 10: "multicultural", 11: "multiracial", 12: "polycultural", 13: "polyracial", 14: "polyethnic", 15: "mixed race", 16: "mixed ethnicity", 17: "other race", 18: "other ethnicity"} term_to_id = {term:id for id, term in id_to_term.items()} NSF_minimal.loc[:, "TermCode"] = NSF_minimal.Term.map(term_to_id) title_occurrence = [] for x,y in NSF_minimal[["Term", "Project Title"]].fillna("").values: if x.lower() in y.lower(): title_occurrence.append(1) else: title_occurrence.append(0) NSF_minimal["term_in_title"] = title_occurrence abstract_occurrence = [] for x,y in NSF_minimal[["Term", "Project Abstract"]].fillna("").values: if x.lower() in y.lower(): abstract_occurrence.append(1) else: abstract_occurrence.append(0) NSF_minimal["term_in_abstract"] = abstract_occurrence NSF_minimal.head() """ Explanation: NSF Modification Steps ~~Remove the following columns: ARRA amount, Last Amendment Date, Organization City, Organization Zip, Program Element Code, State~~ ~~First column --> give title 'ID #'~~ ~~Copy the word data into a new column (title it 'terms')--> code them as the following: 1 = multiculturalism, 2 = polyculturalism, 3 = cultural pluralism, 4 = monocultural, 5 = monoracial, 6 = bicultural, 7 = biracial, 8 = biethnic, 9 = interracial, 10 = multicultural, 11 = multiracial, 12 = polycultural, 13 = polyracial, 14 = polyethnic, 15 = mixed race, 16 = mixed ethnicity, 17 = other race, 18 = other ethnicity~~ ~~'Abstract' --> Rename 'Project Abstract'~~ ~~'Award Instrument'--> Rename 'Grant Type' [Recode as the following: 1 = Standard Grant, 2 = Continuing Grant, 3 = Fellowship, 4 = Cooperative Agreement, 5 = Other]~~ ~~'Awarded Amount to Date' --> Rename 'Award Amount'~~ ~~'NSF Organization' --> Rename 'Grant Organization' --> Note: subdivision of the NSF directorate~~ ~~'Organization Name' --> Rename 'Funded Organization'~~ ~~'Start Date' --> Rename 'Start Year' [Only use year from date information]~~ ~~New Column 'Support Year': 2015-Start Year~~ (I do not understand this one!) ~~New Column 'Award Length': End Date - Start Year (Years only)~~ ~~Title --> Rename 'Project Title'~~ ~~Remove the following columns: Award number, Co-PI names, Last Amendment Date, Organization Phone, Organization Street, PI Email Address, Principal Investigator, Program(s), Program Element Code(s), Program Manager, Program Reference Codes~~ End of explanation """ NSF_minimal.to_csv("data/Grants/processed/nsf_clean.csv", encoding='utf-8') """ Explanation: Save results End of explanation """ NSF_minimal = NSF_minimal.rename(columns={"Organization": "Organization Name"}) common_cols = ['Term', 'Start Year', 'Organization Name', 'Project Title', 'Support Year', 'Grant Type', 'Award Amount', 'State', "TermCode"] NSF_merge = NSF_minimal[common_cols] NSF_merge.insert(0, "Grant Agency", ["NSF"]*len(NSF_merge)) NIH_merge = NIH_minimal[common_cols] NIH_merge.insert(0, "Grant Agency", ["NIH"]*len(NIH_merge)) merged = pd.concat([NSF_merge, NIH_merge]) merged.loc[:, "AgencyCode"] = merged["Grant Agency"].map(lambda x: 1 if x == 'NIH' else 2) title_occurrence = [] for x,y in merged[["Term", "Project Title"]].fillna("").values: if x.lower() in y.lower(): title_occurrence.append(1) else: title_occurrence.append(0) merged["term_in_title"] = title_occurrence merged.tail() merged.to_csv("data/Grants/processed/nsf_nih_merged.csv") merged = pd.read_csv("data/Grants/processed/nsf_nih_merged.csv") years = merged["Start Year"].fillna("2017") years = years.map(lambda x: {" ": "2017"}.get(x, x)) years = years.astype(np.int64) merged[years==1956] years[years<=1981].sort_values() """ Explanation: Mega NIH/NSF Dataset New Column: 'Grant Agency' [Code --> 1 = NIH, 2 = NSF] Combine the two datasets (after following the instructions below) NIH Add NIH to the beginning of each remaining column Remove the following columns: Grant Program Grant Organization Funded Organization? NSF Add NSF to the beginning of each remaining column Remove the following columns: Abstract NSF Organization/NSF Directorate Funded Organization? Program Topic End of explanation """ import xml.etree.ElementTree as ET import locale file = "data/Grants/NSF/NSFBudgetHistory.xml" locale.setlocale(locale.LC_ALL, 'en_US.UTF8') tree = ET.parse(file) root = tree.getroot() records = [] seen = {} for child in root.getchildren(): record = {} if child.tag.endswith("Omnibus") or child.tag.endswith("ARRA"): continue year_ = child.tag.replace("FY", "") year = int(year_.replace("Total", "")) if(year > 2009) and not year_.endswith("Total"): continue record["year"] = year for grandchild in child.getchildren(): if grandchild.tag == "ConstantDollars": for grandchild2 in grandchild.getchildren(): if grandchild2.tag == "NSFTotal": v = list(grandchild2.itertext())[0].strip() record["Total"] = locale.atof(v.strip("$")) records.append(record) nsf_budget = df.from_records(records) nsf_budget.set_index("year", inplace=True) nsf_budget.to_csv("data/Grants/processed/nsf_budget.csv") """ Explanation: NSF Budget End of explanation """
SubhankarGhosh/NetworkX
3. Hubs and Paths (Instructor).ipynb
mit
# Load the sociopatterns network data. G = cf.load_sociopatterns_network() """ Explanation: Load Data We will load the sociopatterns network data for this notebook. From the Konect website: This network describes the face-to-face behavior of people during the exhibition INFECTIOUS: STAY AWAY in 2009 at the Science Gallery in Dublin. Nodes represent exhibition visitors; edges represent face-to-face contacts that were active for at least 20 seconds. Multiple edges between two nodes are possible and denote multiple contacts. The network contains the data from the day with the most interactions. End of explanation """ # Let's find out the number of neighbors that individual #7 has. len(G.neighbors(7)) """ Explanation: Hubs: How do we evaluate the importance of some individuals in a network? Within a social network, there will be certain individuals which perform certain important functions. For example, there may be hyper-connected individuals who are connected to many, many more people. They would be of use in the spreading of information. Alternatively, if this were a disease contact network, identifying them would be useful in stopping the spread of diseases. How would one identify these people? Approach 1: Neighbors One way we could compute this is to find out the number of people an individual is conencted to. NetworkX let's us do this by giving us a G.neighbors(node) function. End of explanation """ # Possible Answers: # sorted(G.nodes(), key=lambda x:len(G.neighbors(x)), reverse=True) sorted([(n, G.neighbors(n)) for n in G.nodes()], key=lambda x: len(x[1]), reverse=True) """ Explanation: Exercise Can you create a ranked list of the importance of each individual, based on the number of neighbors they have? Hint: One suggested output would be a list of tuples, where the first element in each tuple is the node ID (an integer number), and the second element is the number of neighbors that it has. Hint: Python's sorted(iterable, key=lambda x:...., reverse=True) function may be of help here. End of explanation """ nx.degree_centrality(G) """ Explanation: Approach 2: Degree Centrality The number of other nodes that one node is connected to is a measure of its centrality. NetworkX implements a degree centrality, which is defined as the number of neighbors that a node has normalized to the number of individuals it could be connected to in the entire graph. This is accessed by using nx.degree_centrality(G) End of explanation """ # Possible Answers: fig = plt.figure(0) # Get a list of degree centrality scores for all of the nodes. degree_centralities = list(nx.degree_centrality(G).values()) # Plot the histogram of degree centralities. plt.hist(degree_centralities) # Set the plot title. plt.title('Degree Centralities') fig = plt.figure(1) neighbors = [len(G.neighbors(node)) for node in G.nodes()] plt.hist(neighbors) # plt.yscale('log') plt.title('Number of Neighbors') fig = plt.figure(2) plt.scatter(degree_centralities, neighbors, alpha=0.1) plt.xlabel('Degree Centralities') plt.ylabel('Number of Neighbors') """ Explanation: If you inspect the dictionary closely, you will find that node 51 is the one that has the highest degree centrality, just as we had measured by counting the number of neighbors. There are other measures of centrality, namely betweenness centrality, flow centrality and load centrality. You can take a look at their definitions on the NetworkX API docs and their cited references. You can also define your own measures if those don't fit your needs, but that is an advanced topic that won't be dealt with here. The NetworkX API docs that document the centrality measures are here: http://networkx.readthedocs.io/en/networkx-1.11/reference/algorithms.centrality.html?highlight=centrality#module-networkx.algorithms.centrality Exercises The following exercises are designed to get you familiar with the concept of "distribution of metrics" on a graph. Can you create a histogram of the distribution of degree centralities? Can you create a histogram of the distribution of number of neighbors? Can you create a scatterplot of the degree centralities against number of neighbors? If I have n nodes, then how many possible edges are there in total, assuming self-edges are allowed? What if self-edges are not allowed? Hint: You may want to use: plt.hist(list_of_values) and plt.scatter(x_values, y_values) Hint: You can access the dictionary .keys() and .values() and cast them as a list. If you know the Matplotlib API, feel free to get fancy :). End of explanation """ from circos import CircosPlot import numpy as np nodes = sorted(G.nodes()) edges = G.edges() edgeprops = dict(alpha=0.1) nodecolor = plt.cm.viridis(np.arange(len(nodes)) / len(nodes)) # be sure to use viridis! fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(111) c = CircosPlot(nodes, edges, radius=10, ax=ax, fig=fig, edgeprops=edgeprops, nodecolor=nodecolor) c.draw() plt.savefig('images/sociopatterns.png', dpi=300) """ Explanation: Exercise Before we move on to paths in a network, see if you can use the Circos plot to visualize the network. End of explanation """ def path_exists(node1, node2, G): """ This function checks whether a path exists between two nodes (node1, node2) in graph G. Special thanks to @ghirlekar for suggesting that we keep track of the "visited nodes" to prevent infinite loops from happening. Reference: https://github.com/ericmjl/Network-Analysis-Made-Simple/issues/3 """ visited_nodes = set() queue = [node1] for node in queue: neighbors = G.neighbors(node) if node2 in neighbors: print('Path exists between nodes {0} and {1}'.format(node1, node2)) return True break else: queue.remove(node) visited_nodes.add(node) queue.extend([n for n in neighbors if n not in visited_nodes]) if len(queue) == 0: print('Path does not exist between nodes {0} and {1}'.format(node1, node2)) return False # Test your answer below def test_path_exists(): assert path_exists(18, 5, G) assert path_exists(22, 318, G) test_path_exists() """ Explanation: What can you deduce about the structure of the network, based on this visualization? Nodes are sorted by ID. Nodes are more connected to proximal rather than distal nodes. The data are based on people streaming through an enclosed space, so it makes sense that people are mostly connected to others proximal in order, but occasionally some oddballs stick around. Paths in a Network Graph traversal is akin to walking along the graph, node by node, restricted by the edges that connect the nodes. Graph traversal is particularly useful for understanding the local structure (e.g. connectivity, retrieving the exact relationships) of certain portions of the graph and for finding paths that connect two nodes in the network. Using the synthetic social network, we will figure out how to answer the following questions: How long will it take for a message to spread through this group of friends? (making some assumptions, of course) How do we find the shortest path to get from individual A to individual B? Shortest Path Let's say we wanted to find the shortest path between two nodes. How would we approach this? One approach is what one would call a breadth-first search (http://en.wikipedia.org/wiki/Breadth-first_search). While not necessarily the fastest, it is the easiest to conceptualize. The approach is essentially as such: Begin with a queue of the starting node. Add the neighbors of that node to the queue. If destination node is present in the queue, end. If destination node is not present, proceed. For each node in the queue: Remove node from the queue. Add neighbors of the node to the queue. Check if destination node is present or not. If destination node is present, end. <!--Credit: @cavaunpeu for finding bug in pseudocode.--> If destination node is not present, continue. Exercise Try implementing this algorithm in a function called path_exists(node1, node2, G). The function should take in two nodes, node1 and node2, and the graph G that they belong to, and return a Boolean that indicates whether a path exists between those two nodes or not. For convenience, also print out whether a path exists or not between the two nodes. End of explanation """ nx.has_path(G, 400, 1) """ Explanation: If you write an algorithm that runs breadth-first, the recursion pattern is likely to follow what we have done above. If you do a depth-first search (i.e. DFS), the recursion pattern is likely to look a bit different. Take it as a challenge exercise to figure out how a DFS looks like. Meanwhile... thankfully, NetworkX has a function for us to use, titled has_path, so we don't have to implement this on our own. :-) http://networkx.readthedocs.io/en/networkx-1.11/reference/generated/networkx.algorithms.shortest_paths.generic.has_path.html End of explanation """ nx.shortest_path(G, 4, 400) """ Explanation: NetworkX also has other shortest path algorithms implemented. http://networkx.readthedocs.io/en/networkx-1.11/reference/algorithms.shortest_paths.html We can build upon these to build our own graph query functions. Let's see if we can trace the shortest path from one node to another. nx.shortest_path(G, source, target) gives us a list of nodes that exist within one of the shortest paths between the two nodes. (Not all paths are guaranteed to be found.) End of explanation """ # Possible Answer: def extract_path_edges(G, source, target): # Check to make sure that a path does exists between source and target. if nx.has_path(G, source, target): nodes = nx.shortest_path(G, source, target) newG = G.subgraph(nodes) return newG else: raise Exception('Path does not exist between nodes {0} and {1}.'.format(source, target)) newG = extract_path_edges(G, 4, 400) nx.draw(newG, with_labels=True) """ Explanation: Incidentally, the node list is in order as well. Exercise Write a function that extracts the edges in the shortest path between two nodes and puts them into a new graph, and draws it to the screen. It should also return an error if there is no path between the two nodes. Hint: You may want to use G.subgraph(iterable_of_nodes) to extract just the nodes and edges of interest from the graph G. You might want to use the following lines of code somewhere: newG = G.subgraph(nodes_of_interest) nx.draw(newG) newG will be comprised of the nodes of interest and the edges that connect them. End of explanation """ # Possible Answer def extract_neighbor_edges(G, node): neighbors = G.neighbors(node) newG = nx.Graph() for n1, n2 in G.edges(): if (n1 == node and n2 in neighbors) or (n1 in neighbors and n2 == node): newG.add_edge(n1, n2) return newG fig = plt.figure(0) newG = extract_neighbor_edges(G, 19) nx.draw(newG, with_labels=True) def extract_neighbor_edges2(G, node): neighbors = G.neighbors(node) newG = nx.Graph() for neighbor in neighbors: if (node, neighbor) in G.edges() or (neighbor, node) in G.edges(): newG.add_edge(node, neighbor) return newG fig = plt.figure(1) newG = extract_neighbor_edges2(G, 19) nx.draw(newG, with_labels=True) """ Explanation: Challenge Exercise (at home) These exercises below are designed to let you become more familiar with manipulating and visualizing subsets of a graph's nodes. Write a function that extracts only node, its neighbors, and the edges between that node and its neighbors as a new graph. Then, draw the new graph to screen. End of explanation """ # Possible answer to Question 1: # All we need here is the length of the path. def compute_transmission_time(G, source, target): """ Fill in code below. """ length = nx.shortest_path_length(G, source, target) time = sum([i for i in range(1, length+1)]) return time compute_transmission_time(G, 14, 4) # Possible answer to Question 2: # We need to know the length of every single shortest path between every pair of nodes. # If we don't put a source and target into the nx.shortest_path_length(G) function call, then # we get a dictionary of dictionaries, where all source-->target-->lengths are shown. lengths = [] times = [] for source, sink_length in nx.shortest_path_length(G).items(): for sink, length in sink_length.items(): times.append(sum(range(1, length+1))) lengths.append(length) plt.figure(0) plt.bar(Counter(lengths).keys(), Counter(lengths).values()) plt.figure(1) plt.bar(Counter(times).keys(), Counter(times).values()) """ Explanation: Challenge Exercises (at home) Let's try some other problems that build on the NetworkX API. Refer to the following for the relevant functions: http://networkx.readthedocs.io/en/networkx-1.11/reference/algorithms.shortest_paths.html If we want a message to go from one person to another person, and we assume that the message takes 1 day for the initial step and 1 additional day per step in the transmission chain (i.e. the first step takes 1 day, the second step takes 2 days etc.), how long will the message take to spread from any two given individuals? Write a function to compute this. What is the distribution of message spread times from person to person? What about chain lengths? End of explanation """ btws = nx.betweenness_centrality(G, normalized=False) plt.bar(btws.keys(), btws.values()) """ Explanation: Hubs Revisited It looks like individual 19 is an important person of some sorts - if a message has to be passed through the network in the shortest time possible, then usually it'll go through person 19. Such a person has a high betweenness centrality. This is implemented as one of NetworkX's centrality algorithms. Check out the Wikipedia page for a further description. http://en.wikipedia.org/wiki/Betweenness_centrality End of explanation """ # Possible answer: deg_centrality = nx.degree_centrality(G) btw_centrality = nx.betweenness_centrality(G) deg_cent_sorted = [i[1] for i in sorted(zip(deg_centrality.keys(), deg_centrality.values()))] btw_cent_sorted = [i[1] for i in sorted(zip(btw_centrality.keys(), btw_centrality.values()))] plt.scatter(deg_cent_sorted, btw_cent_sorted) plt.xlabel('degree') plt.ylabel('betweeness') plt.title('centrality scatterplot') """ Explanation: Exercise Plot betweeness centrality against degree centrality for the network data. End of explanation """ nx.draw(nx.barbell_graph(5, 1)) """ Explanation: Think about it... From the scatter plot, we can see that the dots don't all fall on the same line. Degree centrality and betweenness centrality don't necessarily correlate. Can you think of scenarios where this is true? What would be the degree centrality and betweenness centrality of the middle connecting node in the barbell graph below? End of explanation """
ldiary/marigoso
notebooks/handling_select2_controls_in_selenium_webdriver.ipynb
mit
import os from marigoso import Test request = { 'firefox': { 'capabilities': { 'marionette': False, }, } } """ Explanation: Handling Select2 Controls in Selenium WebDriver Select2 is a jQuery based replacement for select boxes. This article will demonstrate how Selenium webdriver can handle Select2 by manipulating the first such selection box in the Examples page of Select2. Creating an instance of Selenium webdriver equipped with Firefox Extensions Firebug and FirePath are very helpful Firefox extensions that I want to use in this demonstration, so I will make Selenium launch a Firefox browser equipped with these extensions. End of explanation """ browser.get_url('https://select2.github.io/') browser.press("Examples") """ Explanation: Note that in order for the extensions to be installed in the browser, you need to either specify an extension enabled Firefox profile to Selenium or you specify the location and name of Firefox extensions you want to install. In the above example, I have Firebug and FirePath files stored in 'tools\firefox' folder so I can just specify the location and filenames of the extensions. Navigate to Select2 Examples page End of explanation """ browser.press("css=[id^='select2']" ) """ Explanation: Identify the locator for the Selection Box Right click on the first Select2 box and select 'Inspect Element with Firebug' Firebug will then display and highlight the HTML source of the Selection Box as well as highlight the control itself if you hover your mouse to the HTML source. We now have the task of figuring out what locator we can use to locate this Selection Box. The Selection Box is a 'span' element with an id="select2-jnw9-container", we can surely make use of this id attribute. However, it appears that this id is randomly generated so I made a slight modification to make sure my locator will still work even if the page is refreshed. Verify the adopted locator works In the Firebug window, click on 'FirePath' tab. Click on the dropdown before the input box and select 'CSS:'. Then enter "[id^='select2']" in the input box and press Enter key. Firebug will now display the same thing as before, but notice now that at the lower left part of Firebug window it says '17 matching nodes'. This means we have 17 such Selection Box that can be located using my chosen selector. However, this time we are only interested on the first Selection Box, so I think my chosen selector is still useful. The ultimate way to verify that the locator works is to feed it to Selenium and run it. So we execute the following command. End of explanation """ browser.select_text("css=*[id^='select2']", "Nevada", 'css=span.select2-dropdown > span > ul') """ Explanation: If the Selection Dropdown appears upon executing the above command, then we are on the right track. You can run the above command several times to confirm the closing and opening of the selection dropdown. Identify the locator for the Selection Dropdown We now need to identify the locator for the Selection Dropdown. We do this by clicking back on the 'HTML' tab in the Firebug window and observing that when you manually click on the Selection Box another 'span' element is dynamically being added at the buttom of the HTML source. We can use previous technique of locating the Selection Box above to arrive to a conclusion that the locator for Selection Dropdown could be 'css=span.select2-dropdown > span > ul'. Note that in this case we specifically located until the 'ul' tag element. This is because the options for Select2 are not 'option' tag elements, instead they are 'li' elements of a 'ul' tag. Verify that both Selection Box and Dropdown works After all this hardwork of figuring out the best locators for Selection Box and Selection Dropdown, we then test it to see if we can now properly handle Select2. Marigoso offers two syntax for performing the same action. select_text We can use the usual select_text function by just appending the Select Dropdown locator at the end. End of explanation """ browser.select2("css=*[id^='select2']", 'css=span.select2-dropdown > span > ul', "Hawaii") """ Explanation: select2 We can also use the select2 function of Marigoso by swapping the order of the Selection Dropdown locator and the value of the text you want to select. End of explanation """ import os from marigoso import Test request = { 'firefox': { 'extensions_path': os.path.join(os.getcwd(), 'tools', 'firefox'), 'extensions': ['firebug@software.joehewitt.com.xpi', 'FireXPath@pierre.tholence.com.xpi'], } } browser = Test(request).launch_browser('Firefox') browser.get_url('https://select2.github.io/') browser.press("Examples") browser.select_text("css=*[id^='select2']", "Nevada", 'css=span.select2-dropdown > span > ul') browser.quit() """ Explanation: Final Solution Finally, here again is the summary of the necessary commands used in this demonstration. End of explanation """
tyamamot/h29iro
codes/1_Try_Notebook.ipynb
mit
print ("Hello" + ", World") print(10 + 4) """ Explanation: 第1回 Jupyter notebookに慣れる 参考文献: IPython データサイエンスクックブック,オライリー社 1. IPythonとJupyterとは IPython IPythonはPythonを対話的に動作させるためのプラットフォームです.たとえばある文を入力した直後に結果を確認するといったように,インタラクティブに結果を確認しながらプログラミングしていくことができます. Jupyter notebookとは IPythonをWebベースで実行するためのプラットフォームです.ソースコードを書くだけでなく,その場で結果を表示したり,一般的なテキストや画像,HTMLなどを1つのページ内に混在させることができます. 2. notebookを実行してみよう この演習では,時間の都合上notebookやPythonの詳しい使い方については説明しません(もちろん個別にサポートします).詳細な使い方はWebや参考書で各自勉強することをお勧めします. 取り急ぎ,notebookを体験してみるということで, Hello World プログラムを実行してみましょう. notebookを開始するには,ターミナルで以下のコマンドを入力します. ipython notebook notebookを開始したら,新しいファイルを作成し,セルと呼ばれる領域に,次の式を入力し,Shift+Enterを押してみてください. End of explanation """ import numpy as np # numpy モジュールのインポート import matplotlib.pyplot as plt # pyplotモジュールのインポート %matplotlib inline # 平均 x = -2, y = -2 の2変量正規分布からデータを100個サンプリングする mean = [-2,-2] cov = [[1,0],[0,1]] x1,y1 = np.random.multivariate_normal(mean, cov, 100).T # サンプリングしたデータの xとy の値 10件を確認してみる x1[:10], y1[:10] # 今サンプリングしたデータを散布図で確認 plt.scatter(x1, y1, color="r", label="d1") # 同様に 平均 x=2, y=2 のガウス分布から100個データをサンプリング mean2 = [2,2] cov2 = [[1,0],[0,1]] x2,y2 = np.random.multivariate_normal(mean2, cov2, 100).T plt.scatter(x2,y2, c="b", marker="x", label="d2") # 両方のデータを1つの散布図で表示する plt.scatter(x1,y1, c="r", label="d1") plt.scatter(x2,y2, c="b", marker="x", label="d2") plt.grid(True) # 枠線をつけてみる plt.legend() # 凡例をつけてみる plt.show() """ Explanation: 正しく動作すれば,画面に python Hello, World 14 と表示されたはずです.このように,IPython + Jupyter notebook 環境では,Webベースで対話的に実行結果を確認しながらプログラミングをすることができます. 3. numpy + matplotlib チュートリアル それでは,IPython + Jupyter notebook環境に慣れるという目的で,以下のコードを自分のNotebook環境で入力しながら,このページと同じような結果が得られるか確認してみましょう. このチュートリアルは,異なる種類の確率分布から得られたデータを散布図として表示するというプログラムになっています. 実際にコードを実行しながら, python および notebook の雰囲気に慣れてください. End of explanation """
topgate/training-gcp
CPB102/tensorflow/tfclassic.ipynb
apache-2.0
import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data print(tf.__version__) """ Explanation: TensorFlow Low-Level API 高レベル API を使わない、いわゆる生の TensorFlow でコードを書いてみましょう。 End of explanation """ mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) """ Explanation: TensorFlow 付属のモジュールを使って MNIST データセットをダウンロードします。 End of explanation """ x_ph = tf.placeholder(tf.float32, [None, 784]) y_ph = tf.placeholder(tf.float32, [None, 10]) """ Explanation: 学習用のデータを流し込むための tf.placeholder を作成します。 End of explanation """ weights = tf.Variable(tf.random_normal([784, 20], stddev=0.1)) biases = tf.Variable(tf.zeros([20])) hidden = tf.matmul(x_ph, weights) + biases weights = tf.Variable(tf.random_normal([20, 10], stddev=0.1)) biases = tf.Variable(tf.zeros([10])) logits = tf.matmul(hidden, weights) + biases y = tf.nn.softmax(logits) """ Explanation: ニューラルネットの weight を tf.Variable として作成します。 行列積の計算なども自分で記述する形になります。 End of explanation """ cross_entropy = -tf.reduce_mean(y_ph * tf.log(y)) correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_ph, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) train_op = tf.train.GradientDescentOptimizer(1e-1).minimize(cross_entropy) init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) for i in range(10001): x_train, y_train = mnist.train.next_batch(100) sess.run(train_op, feed_dict={x_ph: x_train, y_ph: y_train}) if i % 100 == 0: train_loss = sess.run(cross_entropy, feed_dict={x_ph: x_train, y_ph: y_train}) test_loss = sess.run(cross_entropy, feed_dict={x_ph: mnist.test.images, y_ph: mnist.test.labels}) tf.logging.info("Iteration: {0} Training Loss: {1} Test Loss: {2}".format(i, train_loss, test_loss)) test_accuracy = sess.run(accuracy, feed_dict={x_ph: mnist.test.images, y_ph: mnist.test.labels}) tf.logging.info("Accuracy: {}".format(test_accuracy)) """ Explanation: ここから先は tf.layers を使ったときと全く同じです。 End of explanation """
kingb12/languagemodelRNN
report_notebooks/encdec_noing_250_512_040dr.ipynb
mit
report_file = '/Users/bking/IdeaProjects/LanguageModelRNN/reports/encdec_noing_250_512_040dr_2.json' log_file = '/Users/bking/IdeaProjects/LanguageModelRNN/logs/encdec_noing_250_512_040dr_2.json' import json import matplotlib.pyplot as plt with open(report_file) as f: report = json.loads(f.read()) with open(log_file) as f: logs = json.loads(f.read()) print'Encoder: \n\n', report['architecture']['encoder'] print'Decoder: \n\n', report['architecture']['decoder'] """ Explanation: Encoder-Decoder Analysis Model Architecture End of explanation """ print('Train Perplexity: ', report['train_perplexity']) print('Valid Perplexity: ', report['valid_perplexity']) print('Test Perplexity: ', report['test_perplexity']) """ Explanation: Perplexity on Each Dataset End of explanation """ %matplotlib inline for k in logs.keys(): plt.plot(logs[k][0], logs[k][1], label=str(k) + ' (train)') plt.plot(logs[k][0], logs[k][2], label=str(k) + ' (valid)') plt.title('Loss v. Epoch') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show() """ Explanation: Loss vs. Epoch End of explanation """ %matplotlib inline for k in logs.keys(): plt.plot(logs[k][0], logs[k][3], label=str(k) + ' (train)') plt.plot(logs[k][0], logs[k][4], label=str(k) + ' (valid)') plt.title('Perplexity v. Epoch') plt.xlabel('Epoch') plt.ylabel('Perplexity') plt.legend() plt.show() """ Explanation: Perplexity vs. Epoch End of explanation """ def print_sample(sample): enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>']) gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>']) print('Input: '+ enc_input + '\n') print('Gend: ' + sample['generated'] + '\n') print('True: ' + gold + '\n') print('\n') for sample in report['train_samples']: print_sample(sample) for sample in report['valid_samples']: print_sample(sample) for sample in report['test_samples']: print_sample(sample) """ Explanation: Generations End of explanation """ print 'Overall Score: ', report['bleu']['score'], '\n' print '1-gram Score: ', report['bleu']['components']['1'] print '2-gram Score: ', report['bleu']['components']['2'] print '3-gram Score: ', report['bleu']['components']['3'] print '4-gram Score: ', report['bleu']['components']['4'] """ Explanation: BLEU Analysis End of explanation """ npairs_generated = report['n_pairs_bleu_generated'] npairs_gold = report['n_pairs_bleu_gold'] print 'Overall Score (Generated): ', npairs_generated['score'], '\n' print '1-gram Score: ', npairs_generated['components']['1'] print '2-gram Score: ', npairs_generated['components']['2'] print '3-gram Score: ', npairs_generated['components']['3'] print '4-gram Score: ', npairs_generated['components']['4'] print '\n' print 'Overall Score: (Gold)', npairs_gold['score'], '\n' print '1-gram Score: ', npairs_gold['components']['1'] print '2-gram Score: ', npairs_gold['components']['2'] print '3-gram Score: ', npairs_gold['components']['3'] print '4-gram Score: ', npairs_gold['components']['4'] """ Explanation: N-pairs BLEU Analysis This analysis randomly samples 1000 pairs of generations/ground truths and treats them as translations, giving their BLEU score. We can expect very low scores in the ground truth and high scores can expose hyper-common generations End of explanation """ print 'Average Generated Score: ', report['average_alignment_generated'] print 'Average Gold Score: ', report['average_alignment_gold'] """ Explanation: Alignment Analysis This analysis computs the average Smith-Waterman alignment score for generations, with the same intuition as N-pairs BLEU, in that we expect low scores in the ground truth and hyper-common generations to raise the scores End of explanation """
quantumlib/ReCirq
docs/benchmarks/rabi_oscillations.ipynb
apache-2.0
# @title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The Cirq Developers End of explanation """ try: import cirq import recirq except ImportError: !pip install -U pip !pip install --quiet cirq !pip install --quiet git+https://github.com/quantumlib/ReCirq import cirq import recirq import numpy as np import cirq_google """ Explanation: Rabi oscillation experiment <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://quantumai.google/cirq/experiments/benchmarks/rabi_oscillations.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/quantumlib/ReCirq/blob/master/docs/benchmarks/rabi_oscillations.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/quantumlib/ReCirq/blob/master/docs/benchmarks/rabi_oscillations.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/ReCirq/docs/benchmarks/rabi_oscillations.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> </td> </table> End of explanation """ working_device = cirq_google.Sycamore print(working_device) """ Explanation: In this experiment, you are going to use Cirq to check that rotating a qubit by an increasing angle, and then measuring the qubit, produces Rabi oscillations. This requires you to do the following things: Prepare the $|0\rangle$ state. Rotate by an angle $\theta$ around the $X$ axis. Measure to see if the result is a 1 or a 0. Repeat steps 1-3 $k$ times. Report the fraction of $\frac{\text{Number of 1's}}{k}$ found in step 3. 1. Getting to know Cirq Cirq emphasizes the details of implementing quantum algorithms on near term devices. For example, when you work on a qubit in Cirq you don't operate on an unspecified qubit that will later be mapped onto a device by a hidden step. Instead, you are always operating on specific qubits at specific locations that you specify. Suppose you are working with a 54 qubit Sycamore chip. This device is included in Cirq by default. It is called cirq_google.Sycamore, and you can see its layout by printing it. End of explanation """ my_qubit = cirq.GridQubit(5, 6) """ Explanation: For this experiment you only need one qubit and you can just pick whichever one you like. End of explanation """ from cirq.contrib.svg import SVGCircuit # Create a circuit with X, Ry(pi/2) and H. my_circuit = cirq.Circuit( # Rotate the qubit pi/2 radians around the X axis. cirq.rx(np.pi / 2).on(my_qubit), # Measure the qubit. cirq.measure(my_qubit, key="out"), ) SVGCircuit(my_circuit) """ Explanation: Once you've chosen your qubit you can build circuits that use it. End of explanation """ sim = cirq.Simulator() samples = sim.sample(my_circuit, repetitions=10) """ Explanation: Now you can simulate sampling from your circuit using cirq.Simulator. End of explanation """ state_vector_before_measurement = sim.simulate(my_circuit[:-1]) sampled_state_vector_after_measurement = sim.simulate(my_circuit) print(f"State before measurement:") print(state_vector_before_measurement) print(f"State after measurement:") print(sampled_state_vector_after_measurement) """ Explanation: You can also get properties of the circuit, such as the density matrix of the circuit's output or the state vector just before the terminal measurement. End of explanation """ noisy_sim = cirq.DensityMatrixSimulator(noise=cirq.depolarize(0.1)) noisy_post_measurement_state = noisy_sim.simulate(my_circuit) noisy_pre_measurement_state = noisy_sim.simulate(my_circuit[:-1]) print("Noisy state after measurement:" + str(noisy_post_measurement_state)) print("Noisy state before measurement:" + str(noisy_pre_measurement_state)) """ Explanation: You can also examine the outputs from a noisy environment. For example, an environment where 10% depolarization is applied to each qubit after each operation in the circuit: End of explanation """ import sympy theta = sympy.Symbol("theta") parameterized_circuit = cirq.Circuit( cirq.rx(theta).on(my_qubit), cirq.measure(my_qubit, key="out") ) SVGCircuit(parameterized_circuit) """ Explanation: 2. Parameterized Circuits and Sweeps Now that you have some of the basics end to end, you can create a parameterized circuit that rotates by an angle $\theta$: End of explanation """ sim.sample(parameterized_circuit, params={theta: 2}, repetitions=10) """ Explanation: In the above block you saw that there is a sympy.Symbol that you placed in the circuit. Cirq supports symbolic computation involving circuits. What this means is that when you construct cirq.Circuit objects you can put placeholders in many of the classical control parameters of the circuit which you can fill with values later on. Now if you wanted to use cirq.simulate or cirq.sample with the parameterized circuit you would also need to specify a value for theta. End of explanation """ sim.sample(parameterized_circuit, params=[{theta: 0.5}, {theta: np.pi}], repetitions=10) """ Explanation: You can also specify multiple values of theta, and get samples back for each value. End of explanation """ sim.sample( parameterized_circuit, params=cirq.Linspace(theta, start=0, stop=np.pi, length=5), repetitions=5, ) """ Explanation: Cirq has shorthand notation you can use to sweep theta over a range of values. End of explanation """ import pandas big_results = sim.sample( parameterized_circuit, params=cirq.Linspace(theta, start=0, stop=np.pi, length=20), repetitions=10_000, ) # big_results is too big to look at. Plot cross tabulated data instead. pandas.crosstab(big_results.theta, big_results.out).plot() """ Explanation: The result value being returned by sim.sample is a pandas.DataFrame object. Pandas is a common library for working with table data in python. You can use standard pandas methods to analyze and summarize your results. End of explanation """ import datetime from recirq.benchmarks import rabi_oscillations result = rabi_oscillations( sampler=noisy_sim, qubit=my_qubit, num_points=50, repetitions=10000 ) result.plot() """ Explanation: 3. The ReCirq experiment ReCirq comes with a pre-written Rabi oscillation experiment recirq.benchmarks.rabi_oscillations, which performs the steps outlined at the start of this tutorial to create a circuit that exhibits Rabi Oscillations or Rabi Cycles. This method takes a cirq.Sampler, which could be a simulator or a network connection to real hardware, as well as a qubit to test and two iteration parameters, num_points and repetitions. It then runs repetitions many experiments on the provided sampler, where each experiment is a circuit that rotates the chosen qubit by some $\theta$ Rabi angle around the $X$ axis (by applying an exponentiated $X$ gate). The result is a sequence of the expected probabilities of the chosen qubit at each of the Rabi angles. End of explanation """ import hashlib class SecretNoiseModel(cirq.NoiseModel): def noisy_operation(self, op): # Hey! No peeking! q = op.qubits[0] v = hashlib.sha256(str(q).encode()).digest()[0] / 256 yield cirq.depolarize(v).on(q) yield op secret_noise_sampler = cirq.DensityMatrixSimulator(noise=SecretNoiseModel()) q = cirq_google.Sycamore.qubits[3] print("qubit", repr(q)) rabi_oscillations(sampler=secret_noise_sampler, qubit=q).plot() """ Explanation: Notice that you can tell from the plot that you used the noisy simulator you defined earlier. You can also tell that the amount of depolarization is roughly 10%. 4. Exercise: Find the best qubit As you have seen, you can use Cirq to perform a Rabi oscillation experiment. You can either make the experiment yourself out of the basic pieces made available by Cirq, or use the prebuilt experiment method. Now you're going to put this knowledge to the test. There is some amount of depolarizing noise on each qubit. Your goal is to characterize every qubit from the Sycamore chip using a Rabi oscillation experiment, and find the qubit with the lowest noise according to the secret noise model. End of explanation """
tOverney/ADA-Project
preprocessing/process_path.ipynb
apache-2.0
columns = [ 'agency_id', 'service_date_id', 'service_date_date', 'route_id', 'route_short_name', 'route_long_name', 'trip_id', 'trip_headsign', 'trip_short_name', 'stop_time_id', 'stop_time_arrival_time', 'stop_time_departure_time', 'stop_time_stop_sequence', 'stop_id', 'stop_stop_id', 'stop_name', 'capacity_path_id', 'capacity_path_path', 'capacity_capacity_id', 'capacity_capacity_capacity1st', 'capacity_capacity_capacity2nd' ] in_dir = "in_data/" out_dir = "out_data/" """ Explanation: End of explanation """ R = 6373.0 # Compute the distances between two (lat,lng) def compute_distance(lat1, lon1, lat2, lon2): lat1 = radians(lat1) lon1 = radians(lon1) lat2 = radians(lat2) lon2 = radians(lon2) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) return (R * c)*1000 max_depth = 20 max_queue = 100000 def bfs(graph, start, end, max_depth=max_depth): queue = [] queue.append([start]) while queue: path = queue.pop(0) if len(path) > max_depth or len(queue) > max_queue: return [] node = path[-1] if node == end: return path for adjacent in graph.get(node, []): new_path = list(path) new_path.append(adjacent) queue.append(new_path) return [-1] """ Explanation: Helper End of explanation """ with open(in_dir + 'edges.geojson') as file: edgeid2feature = {} data1 = json.load(file) for feature in data1['features']: edgeid2feature[feature['properties']['edge_id']] = feature """ Explanation: Processing Preparation First, we create some dictionnaries. We begin by creating the relation to go from edge_id to the feature of the edge (meaning the path in geojson format). End of explanation """ with open(in_dir + 'stations.geojson') as file: stopid2coord = {} data2 = json.load(file) for feature in data2['features']: stopid2coord[feature['properties']['station_id']] = feature """ Explanation: Then, we do the same to map the station id to the features of station. We are mainly interested the the coordinate of the station but we keep all of them. End of explanation """ max_d = 5 edgeid2edgeid = {} for edge_id1, feature1 in tqdm(edgeid2feature.items()): edge_start_lat1 = feature1['geometry']['coordinates'][0][1] edge_start_lng1 = feature1['geometry']['coordinates'][0][0] edge_end_lat1 = feature1['geometry']['coordinates'][-1][1] edge_end_lng1 = feature1['geometry']['coordinates'][-1][0] edges = [] for edge_id2, feature2 in edgeid2feature.items(): if edge_id2 != edge_id1: edge_start_lat2 = feature2['geometry']['coordinates'][0][1] edge_start_lng2 = feature2['geometry']['coordinates'][0][0] edge_end_lat2 = feature2['geometry']['coordinates'][-1][1] edge_end_lng2 = feature2['geometry']['coordinates'][-1][0] d1 = compute_distance(edge_start_lat1, edge_start_lng1, edge_start_lat2, edge_start_lng2) d2 = compute_distance(edge_start_lat1, edge_start_lng1, edge_end_lat2, edge_end_lng2) d3 = compute_distance(edge_end_lat1, edge_end_lng1, edge_start_lat2, edge_start_lng2) d4 = compute_distance(edge_end_lat1, edge_end_lng1, edge_end_lat2, edge_end_lng2) if d1 < max_d or d2 < max_d or d3 < max_d or d4 < max_d: if (min(d1,d2,d3,d4) > 0.5): print(edge_id1, min(d1,d2,d3,d4)) edges.append(edge_id2) if not edges: print(edge_id1) edgeid2edgeid[edge_id1] = edges """ Explanation: We need to create a map of edge id to list of edge ids. Indeed, to find the path we first need to know which edge are adjacent to each other. To do this, we look independently at the start and end of each segment and look for path with end of start geographic position close the one we are interested in. End of explanation """ stopid2edgeid = {} for stop_id, feature in tqdm(stopid2coord.items()): stop_lat = feature['geometry']['coordinates'][1] stop_lng = feature['geometry']['coordinates'][0] edges = [] for edge_id, feature in edgeid2feature.items(): edge_start_lat = feature['geometry']['coordinates'][0][1] edge_start_lng = feature['geometry']['coordinates'][0][0] edge_end_lat = feature['geometry']['coordinates'][-1][1] edge_end_lng = feature['geometry']['coordinates'][-1][0] d_start = compute_distance(stop_lat, stop_lng, edge_start_lat, edge_start_lng) d_end = compute_distance(stop_lat, stop_lng, edge_end_lat, edge_end_lng) if d_start < max_d or d_end < max_d: if (min(d_start,d_end) > 0.5): print(edge_id1, min(d_start,d_end)) edges.append(edge_id) if not edges: print("Error", stop_id) stopid2edgeid[stop_id] = edges edgeid2stopid = {} for stopid, edgeids in tqdm(stopid2edgeid.items()): for edgeid in edgeids: if edgeid in edgeid2stopid: edgeid2stopid[edgeid].append(stopid) else: edgeid2stopid[edgeid] = [stopid] """ Explanation: Finally, we find the edges going "out" of each station, also by looking at the latitude and longitude of the station and of the edges. End of explanation """ dates = ['2017-01-30','2017-01-31','2017-02-01','2017-02-02','2017-02-03','2017-02-04','2017-02-05'] df = pd.concat([pd.read_csv(out_dir + date + '_processed.csv', index_col=0) for date in dates]) df.columns = columns grouped = df.groupby(['trip_id', 'service_date_id']) len(df) """ Explanation: Processing We import the data from the csv and do a groupby based on the trip id and service date id. We have to group by service date as we are processing all the data in one go. End of explanation """ keys = set() for name, group in tqdm(grouped, desc="Trips"): trip = group.sort_values(['stop_time_stop_sequence']) rows = trip.iterrows() last_index, last_stop = next(rows) for next_index, next_stop in rows: stop_1 = str(last_stop.stop_stop_id) stop_2 = str(next_stop.stop_stop_id) if (stop_1, stop_2) not in keys and (stop_2, stop_1) not in keys: keys.add((stop_1, stop_2)) last_index, last_stop = (next_index, next_stop) print(len(keys)) """ Explanation: Prepare keys We create a set a pairs composed of stations where each pair represent a part of a trip. We use a set to avoid having duplicates. End of explanation """ trips_by_station_id = {} for key in tqdm(keys): stop_1 = key[0] stop_2 = key[1] if stop_1 != stop_2: if key not in trips_by_station_id and stop_1 in stopid2edgeid and stop_2 in stopid2edgeid: start = sorted(stopid2edgeid[stop_1]) end = sorted(stopid2edgeid[stop_2]) for s in start: for e in end: r = bfs(edgeid2edgeid, s, e) if key not in trips_by_station_id or (r and len(trips_by_station_id[key]) > len(r)): trips_by_station_id[key] = r print(key, r) else: print(key, "Error",) pickle.dump(trips_by_station_id, file=open(out_dir + "path_trips_by_station_id.dump", 'wb'), protocol=2) """ Explanation: Find all paires of stations and their path Now, the real work begin. We use a breadth first search algorithm to find the path between each station. The full algo goes like this: Find the two station id (stop_id). Verify that the path for the pair has not been already calculated Find every path going out from each station using the previously calculated map For each pair of edges, run the BFS and insert if found a path If find a path shorter than previously found, update It takes some time, so be patient. End of explanation """ sum([1 for k, v in trips_by_station_id.items() if not v]) """ Explanation: Unfortunately, some path are way too long to be found using a algo in a decent amount of time. End of explanation """ new = { ('8500010', '8503000'): [584, 404, 403, 346, 369, 445, 1921, 446, 1973, 447, 448, 449, 450, 451, 452, 1945, 371, 591, 592, 455, 1960, 456, 2002, 2407, 2373, 2374, 2375, 2377, 2376, 2414, 2423, 2415, 2396, 2395, 2379, 2389, 2400, 2401, 2372, 2340, 2339, 2338, 2337, 2224, 2225, 2226, 2227, 2228,2303, 2243, 2250, 2287, 2246, 2245], ('8500218', '8503000'): [350,358,357,360,359,1984,376,424,423,426,425,210,106,1959,2406,2405,2412,2378,2389,2400,2401,2372,2340,2260,2368,2259,2249,2308,2309,2252,2299], ('8501506', '8501300'): [234,236,233,232,237,228,412,411,1944,220,222,409,408,1360,273,221,223,226,224,225], ('8502113', '8503000'): [1984, 376, 424, 423, 426, 425, 210, 106, 1959, 105, 2406, 2405, 2412,2378,2389,2400,2401,2372,2340,2260,2368,2259,2249,2308,2309,2252,2299], ('8502119', '8503001'): [424, 423, 426, 425, 210, 106, 1959, 105, 2406, 2405, 2412,2378,2389,2400,2401,2372,2340,2260,2368,2259,2249,2308,2309,2252,2299], ('8502202', '8503000'): [428, 373, 504, 505, 157, 156, 155, 154, 366, 367, 161, 502, 503, 165, 2001, 2000, 1999, 162, 1998, 163, 164, 142, 1991, 2099, 2356, 2229], ('8502204', '8503000'): [161, 502, 503, 165, 2001, 2000, 1999, 162, 1998, 163, 164, 142, 1991, 2099, 2356, 2229], ('8503000', '8503003'): [2318,2321,1992,384,1993], ('8503000', '8503020'): [2218,2317,2320,2241,2315,2313,2234], ('8503000', '8503424'): [2310,2301,2248,2303,2267,2220,2175,2137,2352,2149,2136,2146,2155,2123,2112,2163,2111,458,457,595,855,459,460,461,594,593,463,462,386,1936,483,1334,1333], ('8503000', '8509002'): [2324, 2325, 2236, 2237, 2359, 2238, 2239, 2240, 144, 145, 1995, 146, 143, 147, 142, 140, 139, 138, 141, 611, 612, 124, 123, 613, 614, 604, 605, 125, 122, 113, 129, 128, 617, 618, 632, 633, 127, 126, 136, 137, 130, 133, 134, 132, 629, 630, 135, 131], ('8503001', '8502105'): [2419, 2423, 2414, 2376, 2369, 1997, 2406, 105, 1959, 106, 210], ('8503001', '8502220'): [2362, 2331, 2351], ('8503001', '8503000'): [2332, 2349, 2367, 2366, 2347, 2242, 2314, 2313, 2334, 2313, 2315, 2316, 2317, 2261, 2249,2308, 2311, 2290, 2251, 2285], ('8503016', '8503000'): [212, 2133, 2145, 2174, 2172, 2135, 2138, 2146, 2354, 2143, 2137, 2175, 2220,2267, 2303, 2243, 2251, 2291, 2293], ('8503202', '8503000'): [142, 1991, 2099, 2356, 2229], ('8503206', '8503000'): [141, 138, 139, 140, 142, 1991, 2099, 2356, 2229], ('8503504', '8503000'): [1960, 456, 2002, 2002, 2407, 2373, 2374, 2375, 2377, 2376, 2414, 2423, 2415, 2396, 2395, 2379, 2389, 2400, 2401, 2372, 2340, 2339, 2338, 2337, 2224, 2225, 2226, 2227, 2228,2303, 2243, 2250, 2287, 2246, 2245], ('8503508', '8503001'): [2387, 2388, 2397, 2403, 2425, 2426], ('8503509', '8503001'): [2409], ('8509411', '8503000'): [132, 134, 133, 130, 137, 136, 126, 127, 633, 632, 618, 617, 128, 129, 113, 122,125, 605, 604, 614, 613, 123, 124, 612, 611, 141, 138, 139, 140, 142,142, 1991, 2099, 2356, 2229], #('8503506', '8516219'): [2405, 2406, 105], ('8507000', '8503000'): [1300, 1303, 1302, 356, 396, 107, 1952, 1954, 1956, 1957, 203, 59, 363, 362, 350, 358, 357,360, 359, 1984, 376, 424, 423, 426, 425, 210, 106, 1959, 105, 2406, 2406, 2405, 2412,2378,2389,2400,2401,2372,2340,2260,2368,2259,2249,2308,2309,2252,2299], ('8500010', '8500309'): [584,404,403,346,368,347,345,348,344,566,565,349,568,567,358,357,360,359,1984,376,394,546,453,454], ('8500113', '8500010'): [545,176,179,177,178,856,272,1324,584], ('8500207', '8500218'): [33,34,35,1377,415,36,2200,37,38,441,442,443,444,362], ('8500207', '8504300'): [430,429,561,2216,560,31,30,29,353,27,26,2214,25,24,16], ('8500218', '8500010'): [567,568,349,565,566,344,348,345,347,368,369,346,403,404,584], ('8500218', '8500023'): [350,567,568,349,565,566,344,348,345], ('8500218', '8505000'): [362, 363, 196, 375, 422, 421, 487, 488, 489, 490, 492, 491, 495, 494, 493, 496, 497, 498, 499 ,2004, 501, 500, 419, 535, 534, 148], ('8501008', '8501030'): [175,174,173,166,172,167,171,168,170,169,95], ('8501037', '8501008'): [397,101,102,100,103,99,104,95,169,170,168,171,167,172,166,173,174,175], ('8501120', '8501008'): [2087,116,1362,361,97,98,96,398,397,101,102,100,103,99,104,95,169,170,168,171,167,172,166,173,174,175], ('8501120', '8501103'): [2087,116,1362, 389,390,391,587,267,392,549,393,1,572,573], ('8501120', '8504200'): [2087,116,1362, 389,390,391,587,115,697,698,696,695,548], ('8501200', '8501120'): [581,580,64,65,66,60,61,62,63,94], ('8501300', '8501120'): [110,112,111,109,581,580,64,65,66,60,61,62,63,94], ('8501400', '8501120'): [221,223,226,224,225,110,112,111,109,581,580,64,65,66,60,61,62,63,94], ('8501400', '8501500'): [273,1360,408,409,222,220,1944,411], ('8501605', '8507483'): [1906,2081,2082,1907,275,218,219,1942,254], ('8501609', '8501506'): [227,1906,1905,240,239,238,230,229,231,235], ('8502009', '8500218'): [496,493,494,495,491,492,490,489,488,487,421,422,196,363,362], ('8502119', '8503000'): [426, 425, 210, 106, 1959, 105, 2406, 2405, 2412,2378,2389,2400,2401,2372,2340,2260,2368,2259,2249,2308,2309,2252,2299], ('8502204', '8503202'): [161,502,503,165,2001,2000,1999,162,1998,163,164,142], ('8502205', '8502204'): [2020,1919,509,508,388,1918,2018,387,367], ('8502206', '8503010'): [165,2001,2000,1999,162,1998,163,164,142,147,143,146,1995,145], ('8503003', '8503104'): [385, 1994, 1647, 1669, 1670, 1671, 1672, 1673, 1674], ('8503006', '8503007'): [2155], ('8503006', '8503016'): [2155, 2138, 2135, 2172, 2174, 2145, 2133, 212], ##('8503006', '8503310') ('8503006', '8503340'): [2113, 2140, 2142, 2141, 2139, 2135, 2172, 2174], ('8503006', '8503526'): [2113, 2107, 2106, 2110, 2177, 2104, 2105, 850, 1609], ('8503008', '8503006'): [850, 2105, 2103, 2178, 2110, 2106, 2154], ('8503020', '8503006'): [2366, 2347, 2346, 2342, 2365, 2333, 93, 2108, 2102, 2154, 2106], ('8503209', '8509411'): [605, 125, 122, 113, 129, 128, 617, 618, 632, 633, 127, 126, 136, 137, 130, 133, 134, 132,], ('8503400', '8503006'): [459,855,457,458,2162,2115,2140], ('8503424', '8503400'): [1332,1333,1334,483,1936,386,462,463,593,594,461,460], ('8503504', '8503508'): [1960,456,2002,2407,2380,2381,2382,2379,2389], ('8503505', '8503508'): [456,2002,2407,2380,2381,2382,2379,2389], ('8503506', '8503508'): [2417,2418,2390], ('8503508', '8502119'): [2389,2378,2412,2405,2406,1959,106,210,425,426], ('8503508', '8503512'): [2391,2389,2378,2412,2405,2406,1959,106,210,425,426], ('8504014', '8504100'): [569,570,89,88,1985,2096,80,80,74,79,75,78,76,77], ('8504100', '8501120'): [77,76,78,75,79,74,81,80,2096,1985,88,89,570,569,86,87,579,578,85,84,82,83,94], ('8504200', '8501037'): [573,572,1,393,549,392,267,587,391,390,389,1362, 361,97,98,96,], ('8504200', '8501118'): [573,572,1,393,549,392,267,587,391,390,389,1362], ('8504221', '8504200'): [554,553,8,552,551,7,6,5,4,3,2,644,645], ('8504300', '8504221'): [563,562,15,14,13,12,11,10,9,577,576], ('8504414', '8507000'): [20,21,22,23,364,274,1903,1902,643,1297,1298,1299,1300], ('8505000', '8502007'): [534,535,500,501,2004,499,498,497,496,493,494], ('8505000', '8502009'): [534,535,500,501,2004,499,498,497], ('8505000', '8502202'): [534,535,395,158,160,159,427], ('8505000', '8502204'): [534,535,395,158,160,159,427,428,373,504,505,157,156,155,154,366,367], ('8505000', '8505004'): [534,1330,1329,1851,1850,1849,1848,440,506], ('8505004', '8502204'): [2022,507,2020,1919,509,508,388,2018,387,367], ('8505004', '8505112'): [510,511,512,513,2023,2029,2027,2034,2032,514,2026,2025], ('8505007', '8505112'): [2025,2026,514,2032,2034,2027,2029,2023,513], ('8505112', '8505213'): [515], ('8505114', '8505119'): [2037,2038,2039,2040,2036,2035,2048,2041,2042,2043,2044,2045,517], ('8505213', '8505004'): [510,511,512,513,2023,2029,2027,2034,2032,514,2026,2025,515,516,2037,2038,2039,2040,2036,2035,2048,2041,2042,2043,2044,2045,517,277,2046,2047,518,1917,2049,2050,2051,2052,2054,2053,1965,1966,2055,2058,2059,2056,1962,2057,519,520,2061,521], ('8506000', '8503003'): [600, 603, 602, 330, 329, 92, 377, 1311, 1310, 383, 1993], ('8506000', '8506302'): [620,619,343,340,624,623,339,338,341,622,621,336,337,342,335,331,333,334,332,379,378,372,601], ('8506105', '8014586'): [473,479,480,481,482,484,485,486,596,597], ('8506206', '8506000'): [336,337,342,335,331,333,334,332,379,378,372,601,], ('8507000', '8500010'): [1300,1299,1298,1297,643,1902,1903,274,364,23,22,21,20,19,413,18,17,24,25,2214,26,27,28,558,559,32,264,545,176,179,177,178,856,272,1324,584,], ('8507000', '8500218'): [1300,1303,1302,356,396,107,1952,1954,1956,1957,203,59,363,362,], ('8507000', '8502001'): [1300,1303,1302,356,396,107,1952,1954,1956,1957,203,374,422,421], ('8507000', '8504100'): [1301,324,1363,72,73,71,68,69,70,67,1327,575,574], ('8507000', '8507493'): [1300,1302,356,352,323,322,317,318,319,320,582,434,433,321,255,254,243,245,244,246], ('8507100', '8507000'): [433,434,582,320,319,318,317,322,323,352,356,1302,1303,1300], ('8507478', '8507475'): [1950,2072,2076,2075,2073,1949,2074,2078,2077,1596,2079], ('8507483', '8501609'): [254,1942,219,218,275,2082,1906,227], ('8508005', '8507000'): [786,432,207,199,351,365,364,274,1903,1902,643,1297,1298,1299,1300], ('8516219', '8503508'): [2406,2405,2412,2378,2389] } trips_by_station_id.update(new) sum([1 for k, v in trips_by_station_id.items() if not v]) """ Explanation: So we rely on manual inspection and complete by end the set. End of explanation """ paths = {} for name, group in tqdm(grouped, desc="Trips"): trip = group.sort_values(['stop_time_stop_sequence']) rows = list(trip.iterrows()) last_index, last_stop = rows[0] for next_index, next_stop in rows[1:]: stop_1 = str(last_stop.stop_stop_id) stop_2 = str(next_stop.stop_stop_id) key1 = (stop_1, stop_2) key2 = (stop_2, stop_1) key_full = (name[0], last_stop.stop_id) path = None if key1 in trips_by_station_id: path = trips_by_station_id[key1] elif key2 in trips_by_station_id: path = trips_by_station_id[key2] path.reverse() if (key_full not in paths) or (path and len(paths[key_full]) > len(path)): paths[key_full] = path last_index, last_stop = (next_index, next_stop) pickle.dump(paths, file=open(out_dir + "paths.dump", 'wb'), protocol=2) """ Explanation: Finally, we go through each stop sequence for every trip. We can now determine the path each of the element and create a dictionary that can be imported in the database. End of explanation """
rjenc29/numerical
notebooks/principal_component_analysis.ipynb
mit
from sklearn.datasets import load_iris import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from pprint import pprint import matplotlib.pyplot as plt import matplotlib %matplotlib inline import ipywidgets as widgets from scipy.optimize import fmin import seaborn as sns sns.set() matplotlib.rcParams['figure.figsize'] = (16, 8) """ Explanation: Principal Component Analysis TL;DR This notebook provides an overview of Principal Component Analysis and its application. End of explanation """ data_set = load_iris() data = data_set.data target = data_set.target df = pd.DataFrame(np.array(data), columns=data_set.feature_names) df['species'] = data_set.target_names[target] df.head(10) """ Explanation: Principal Component Analysis is fuundamentally a mechanism to reduce the dimensionality of large datasets will minimising loss of information. There are a number of applications of PCA by extension - classification / noise filtration / visualisation and more. To build an intuition for how / why PCA works, we're going to use the IRIS dataset, which comprises a collection of measurements of petal and sepal widths and lengths along with which category each measured plant belongs to. There are many excellent tutorials on applying PCA to the IRIS dataset an unsupervised classification model; we're going to instead use the data to try to build some intuition about how and why PCA works. Let's take a look at the data. End of explanation """ def demean(series): return series - series.mean() demeaned_df = df[data_set.feature_names].apply(demean) demeaned_df.head() df.var() demeaned_df.var() """ Explanation: Data pre-processing: de-meaning The first step we're going to take is to pre-process the data by making it mean-centred. We'll come back to why this is necessary (and it is) but for now, let's look at how to achieve it and verify that doesn't affect the variance of our dataset in any way. End of explanation """ axes = plt.gca() axes.set_ylim([-4, 4]) axes.set_xlim([-4, 4]) plt.gca().set_aspect('equal', adjustable='box') p_x = demeaned_df['petal length (cm)'] p_y = demeaned_df['petal width (cm)'] plt.scatter(p_x, p_y, alpha = 0.4, s=50) plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)') """ Explanation: Visualising the input data It's much easier to build an intuition for PCA when working with 2 dimensions. So we'll extract the petal measurements from the mean-centred data and plot one against the other. End of explanation """ def plot_line(angle_in_degrees): # original data plt.scatter(p_x, p_y, alpha = 0.4, s=50) # our current fitted line m = np.tan(np.pi * angle_in_degrees / 360) x = np.linspace(-4, 4, 3) y = m * x plt.plot(x, y, 'r--') # perpendicular lines between the original data and the # current fitted line p_x_line = (p_x + m * p_y) / (m*m + 1) p_y_line = m * p_x_line for idx in range(len(p_x)): plt.plot([p_x[idx], p_x_line[idx]], [p_y[idx], p_y_line[idx]], color='g', alpha=0.1) # average sq distance from origin of perp line intercepts # i.e. the points where the green line touches the dashed red line var = np.mean(np.power(p_x_line, 2) + np.power(p_y_line, 2)) plt.gca().set_aspect('equal', adjustable='box') axes = plt.gca() axes.set_ylim([-4, 4]) axes.set_xlim([-4, 4]) plt.title('Variance {0:.4f}'.format(var)) plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)') plt.show() plot_line(85) #static plot for arbitrarty slope angle """ Explanation: Fitting a line (hyperplane) to the input data There would appear to be an approximately linear relationship between petal length and width, which is intuitively reasonable. In the plot below, we additionally draw perpendicular lines (in green) from each data point back to the hyperplane. End of explanation """ widgets.interact(plot_line, angle_in_degrees=widgets.FloatSlider(min=0, max=360, step=1, value=85)) """ Explanation: We introduced a quantity called variance: python var = np.mean(np.power(p_x_line, 2) + np.power(p_y_line, 2)) If we define variance in the general sense for a discrete dataset as: $$ Var(x) = \frac{1}{n} \sum_{n=1}^{n} (x_i−\mu)^2 $$ Noting that $\mu$ is zero for our de-meaned data set, and that - by Pythogoras - our $x_i$ values are the hypotenuse lengths of triangles with sides p_x_line and p_y_line, we have: $$ Var(x) = \frac{1}{n} \sum_{n=1}^{n} (x_i-0)^2 $$ $$ Var(x) = \frac{1}{n} \sum_{n=1}^{n} (x_i)^2 $$ $$ Var(x) = \frac{1}{n} \sum_{n=1}^{n} ((p.x.line_i)^2 + (p.y.line_i)^2)$$ Interactively changing the orientation of the hyperplane We could try to fit a stright line through the data as a means of generalising the petal width / length relationship. There are clearly inifinitely many solutions, but certain solutions have interesting properties. Try changing the slope of the line in the interactive plot below. As you change the angle of the line: Make a note of the plot title (variance) Take a look at the green lines End of explanation """ def get_variance(angle_in_degrees): x = p_x y = p_y # our current fitted line m = np.tan(np.pi * angle_in_degrees / 360) y = m * x # perpendicular lines between the original data and the # current fitted line p_x_line = (p_x + m * p_y) / (m*m + 1) p_y_line = m * p_x_line # average sq distance from origin of perp line intercepts # i.e. the points where the green line touches the dashed red line var = np.mean(np.power(p_x_line, 2) + np.power(p_y_line, 2)) return var df = pd.DataFrame({'angle': range(361)}) df['variance'] = df.angle.apply(get_variance) df = df.set_index('angle') df.plot() plt.xlabel('angle (degrees))') plt.ylabel('variance') """ Explanation: As you vary the slope of the line, you should find that maximal variance is found at about 45 degrees. Minimal variance is around 225 degrees - i.e. a line which is orthogonal to the line of maximum variance. The values were about 3.63 and 0.036 respectively. Fast-forwarding a little, these are the 'explained variances' which a fitted PCA model returns. ```python petal_data = demeaned_df[['petal length (cm)', 'petal width (cm)']].values pca = PCA().fit(petal_data) pca.explained_variance_ array([ 3.63497866, 0.03597779]) ``` Programattically changing the orientation of the hyperplane Let us programmatically vary the slope of the line and build a plot explained variance as a funtion of angle. End of explanation """ angle = fmin(lambda a: -1 * get_variance(a), 50) var = get_variance(angle) print('\nVariance: {0:.5f} obtained at angle: {1:.3f} degrees'.format(var, angle[0])) angle = fmin(get_variance, 200) var = get_variance(angle) print('\nVariance: {0:.5f} obtained at angle: {1:.3f} degrees'.format(var, angle[0])) """ Explanation: We can use a solver to find the maxima and minima, which should correspond with our previous findings. End of explanation """ petal_data = demeaned_df[['petal length (cm)', 'petal width (cm)']].values n = len(petal_data) cov = 1 / (n - 1) * petal_data.T @ petal_data cov """ Explanation: Solving analytically In some ways, PCA provides us with an analytic mechanism for doing exactly what we did above. The above procedure is perfectly valid and tractible for problems with 2 dimensions and small amounts of data. But there are a number of analytic solutions to the problem which scale well and the above is intended just for building intuition. What we've discovered so far is that (for our petal dataset) there exists exactly one axis which, when data points are projected onto it, exhibits maximal variance. This is in fact our first Principal Component. So we need an analytic approach to decompose the covariance of our data points and recover the principal axes. The elements of a covariance matrix are given by: $$ \sigma_{jk} = \frac{1}{n-1} \sum_{n=1}^{n} (x_{ij}−\overline{x}j)(x{ik}−\overline{x}_k) $$ In matrix notation: $$ cov = \frac{1}{n-1} ((X−\overline{x})^T(X−\overline{x})) $$ As we've already de-meaned our data, our covariance matrix is given by: End of explanation """ cov = np.cov(petal_data.T) cov """ Explanation: We can obtain this using numpy directly: End of explanation """ eigenvalues, eigenvectors = np.linalg.eigh(cov) eigenvalues eigenvectors """ Explanation: TODO : add stuff about maximising variance in matrix form The eigenvalues and corresponding vectors (organised in ascending eigenvalue order): End of explanation """ n # number of data points factor = (n - 1) /n (factor * eigenvalues)[::-1] # apply factor and flip the order """ Explanation: The eigenvalues look very close to the variance minimum and maximum we found earlier. In fact, they're very closely related - the returned eigenvalues are just scaled differently. Recall that we previously wrote down: ```python petal_data = demeaned_df[['petal length (cm)', 'petal width (cm)']].values pca = PCA().fit(petal_data) pca.explained_variance_ array([ 3.63497866, 0.03597779]) ``` End of explanation """ eigenvectors[:, -1] """ Explanation: Interpretation of eigenvectors So what can we make of the eigenvectors? The eigenvector corresponding to the largest eigenvalue is: End of explanation """ plt.scatter(p_x, p_y, alpha=0.4) # slope m = eigenvectors[:, -1][1]/eigenvectors[:, -1][0] e_x = np.linspace(-4, 4, 3) e_y = m * e_x plt.plot(e_x, e_y, 'r--') plt.gca().set_aspect('equal', adjustable='box') axes = plt.gca() axes.set_ylim([-4, 4]) axes.set_xlim([-4, 4]) plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)') """ Explanation: If we plot this over out original data, we can visualise this as the first principal component - i.e. the axis which explains maximal variance. End of explanation """ angle = np.arctan(eigenvectors[:, -1][1]/eigenvectors[:, -1][0])*360/np.pi print('Angle implied by first eigenvector: {0:.3f} degrees'.format(angle)) """ Explanation: We can check the angle implied by the first Principal Component against the value we solved for previously. End of explanation """ plt.scatter(p_x, p_y, alpha=0.4) # slope m1 = eigenvectors[:, -1][1]/eigenvectors[:, -1][0] m2 = eigenvectors[:, 0][1]/eigenvectors[:, 0][0] e_x1 = np.linspace(-3, 3, 3) e_y1 = m1 * e_x1 e_x2 = np.linspace(-0.3, 0.3, 3) e_y2 = m2 * e_x2 plt.plot(e_x1, e_y1, 'r--') plt.plot(e_x2, e_y2, 'r--') plt.gca().set_aspect('equal', adjustable='box') axes = plt.gca() axes.set_ylim([-4, 4]) axes.set_xlim([-4, 4]) plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)') """ Explanation: We can trivially add the second eigenvector, which is orthogonal to the first and in fact the only other Principal Component that our two dimensional data has. This gives us a new coordinate system whereby the axes are orthogonal to eath other and the variance of the data is maximal on the first axis. End of explanation """ transformed_data = petal_data @ eigenvectors df_trans = pd.DataFrame(transformed_data, columns=['pc2', 'pc1']) df_trans.head() """ Explanation: We can use the eigenvectors to transform our original data into our new coordinate space: End of explanation """ petal_df = demeaned_df[['petal length (cm)', 'petal width (cm)']].copy() petal_df.head() """ Explanation: These new features are in fact just linear combinations of our original features. We can show this as follows. Recall our original data (demeaned): End of explanation """ eigenvectors[:, -1] """ Explanation: The eigenvector corresponding to the largest eigenvalue was: End of explanation """ petal_df['new_qty'] = -0.92154695 * petal_df['petal length (cm)'] - 0.38826694 * petal_df['petal width (cm)'] petal_df.head() """ Explanation: So instead of recording petal width and length, suppose we had recorded a quantity: (-0.9215469 multiplied by length) + (-0.3882669 multiplied by width) End of explanation """ np.allclose(df_trans.pc1, petal_df.new_qty) """ Explanation: As follows, we can prove that pc1 data exactly tallies with new_qty End of explanation """ scaled_eigenvalues = eigenvalues * (n - 1) / n scaled_eigenvalues scaled_eigenvalues / sum(scaled_eigenvalues) """ Explanation: The 'new_qty' is often called a 'score' and it would be normal to call the transformed values 'scores' - i.e. the values which each data point corresponds to in the new Principal Component space. Interpretation of eigenvalues So what this means is that if we'd recorded the synthetic quantity (-0.9215469 multiplied by length) + (-0.3882669 multiplied by width), then we'd have one collection of data points which almost completely represents the information / variance of the original data which comprised two features (length and width). These values would be the PC1 scores. So what fraction of total variance would we retain? The answer is given by the scaled eigenvalues. End of explanation """ np.around(np.corrcoef(transformed_data.T), 3) """ Explanation: This means that using PC1 alone explains 99% of the variance of our original data. Uncorrelated nature of principal components One other fact to note is that the transformed data for PC1 and PC2 are uncorrelated (as a consequence of the orthoginal nature of the axes). This should feel intuitively reasonable as moving along one axis does not impact the value on the other. End of explanation """ petal_data = demeaned_df[['petal length (cm)', 'petal width (cm)']].values pca = PCA().fit(petal_data) pca.explained_variance_ pca.explained_variance_ratio_ pca.components_ """ Explanation: Using sklearn So let's revisit sklearn PCA and see how we'd use it to recover the above results. End of explanation """ pd.DataFrame(pca.transform(petal_data), columns=['pc1', 'pc2']).head() pca.get_covariance() * n / (n - 1) # rescaled """ Explanation: Note that the transformed values have a flipped sign compared to the results we manually derived above. It doesn't really have any statistical significance and doesn't affect variance. It would be trivial to add a conditioning step to determine a sign which matches sklearn. End of explanation """ petal_data = demeaned_df[['petal length (cm)', 'petal width (cm)']].values pca = PCA(n_components=1) one_dimensional = pd.DataFrame(pca.fit_transform(petal_data), columns=['pc1']) one_dimensional.head() """ Explanation: The power of the sklearn model is that we can very simply reduce down to our desired number of dimesions. End of explanation """ trans_data = pca.inverse_transform(one_dimensional.values) x = trans_data[:, 0] y = trans_data[:, 1] plt.scatter(p_x, p_y, alpha=0.4) plt.scatter(x, y, alpha=0.4) plt.gca().set_aspect('equal', adjustable='box') axes = plt.gca() axes.set_ylim([-4, 4]) axes.set_xlim([-4, 4]) plt.xlabel('petal length (cm)') plt.ylabel('petal width (cm)') """ Explanation: The following plot shows the data points transformed into PC1 and then mapped back into the original coordinate system. Recalling the interactive chart above, the green dots repesent the projection of each blue data point onto the PC1 best fit line. The difference between the green and blue dots gives an indication of the amount of information / variance which is lost by reducing to one dimension. End of explanation """ petal_df = demeaned_df[['petal length (cm)', 'petal width (cm)']].copy() petal_df.head() petal_df['animal'] = 0 petal_df['vegetable'] = 1 petal_df['mineral'] = 0 petal_df.head() pca = PCA().fit(petal_df.values) pca.explained_variance_ratio_ pca.explained_variance_ pca.components_ pd.DataFrame(pca.transform(petal_df.values), columns=['pc1', 'pc2', 'pc3', 'pc4', 'pc5']).head() """ Explanation: Effect of including a feature with zero variance PCA is a variance explanation technique. What would happen if we added a feature which had zero variance? Let's say we added a feature called 'animal, vegetable, mineral' which we one-hot encode into three columns: [animal, vegetable, mineral]. End of explanation """ petal_data = demeaned_df[['petal length (cm)', 'petal width (cm)']].copy() petal_data.head() petal_data['petal length (mm)'] = petal_data['petal length (cm)'] * 10 petal_data['petal width (m)'] = petal_data['petal width (cm)'] /100 del petal_data['petal length (cm)'] del petal_data['petal width (cm)'] petal_data.head() pca = PCA().fit(petal_data) pca.explained_variance_ pca.explained_variance_ratio_ """ Explanation: As you might expect, the features which have no variance are not useful in explaining the variance of the dataset, so PC1 and PC2 are unchanged. Effect of including a features with different scales What would happen if the dimensions we'd recorded had different scales? So let's say we recorded petal width in meters and petal length in milimeters. End of explanation """ def zscore(series): return (series - series.mean()) / series.std() petal_data_std = petal_data.apply(zscore) petal_data_std.columns = ['petal length', 'petal width'] petal_data_std.head() pca = PCA().fit(petal_data_std) pca.explained_variance_ pca.explained_variance_ratio_ """ Explanation: So, perhaps unsurprisingly as PCA 'works' by explaining the variance in the data, the enormously different scales of the inputs means that one feature dominates the other. This is perhaps something to bear in mind when working with cross-sectional data where features use very different scales. Data pre-conditioning by z-scoring So what can we do about it? One option is to z-score. End of explanation """ np.cov(petal_data_std.T) np.corrcoef(petal_data_std.T) """ Explanation: There are many ways of normalising data; z-scoring is just one. So should features always be scaled before fitting a PCA model? That's a matter of some debate; a valid counter argument is that it can artificially 'inflate' the contribution of an otherwise relatively unimportant feature. In any event, it makes sense to be explicit about what preconditioning (if any) you've decided to use and why. A corrollary of z-scoring is that it makes the covariance matrix and correlation matrix equal. End of explanation """ petal_df = petal_df[['petal length (cm)', 'petal width (cm)']].copy() petal_df.head() petal_df['length_times_factor'] = petal_df['petal length (cm)'] * 0.8 petal_df.head() petal_df.corr() pca = PCA().fit(petal_df.values) pca.explained_variance_ratio_ pca.explained_variance_ pca.components_ df = pd.DataFrame(pca.transform(petal_df.values), columns=['pc1', 'pc2', 'pc3']).head() df """ Explanation: This should come as no great suprise as the act of z-scoring is to rescale by feature standard deviation and by definition: $$ cor(X, Y) = \frac{cov(X, Y)}{\sigma_X \sigma_Y}$$ We've hithero chosen to decompose the data's covariance matrix but it may be valid to instead decompose the correlation matrix (e.g. where data scaling is a significant factor). In the event that input features are preconditioned using z-scoring then it makes no difference. Effect of including a feature which is perfectly correlated with some other feature What happens if we introduce a feature which is perfectly correlated with some other feature? End of explanation """ pca.components_[-1] sum(pca.components_[-1] * petal_df.values[0]) """ Explanation: So what's happening here is that we end up with a third principal component which is not useful at all in explaining variance. Indeed, the linear combination of features is zero (i.e. all the scores are zero). Here's ehat happens when we apply the factors to the first data point. End of explanation """ df.pc3.sum() """ Explanation: And note that if we sum pc3, it's approximately zero. End of explanation """
tanghaibao/goatools
notebooks/report_depth_level.ipynb
bsd-2-clause
# Get http://geneontology.org/ontology/go-basic.obo from goatools.base import download_go_basic_obo obo_fname = download_go_basic_obo() """ Explanation: Report counts of GO terms at various levels and depths Reports the number of GO terms at each level and depth. Level refers to the length of the shortest path from the top. Depth refers to the length of the longest path from the top. See the Gene Ontology Consorium's (GOC) advice regarding levels and depths of a GO term GO level and depth reporting GO terms reported can be all GO terms in an ontology. Or subsets of GO terms can be reported. GO subset examples include all GO terms annotated for a species or all GO terms in a study. Example report on full Ontology: ``` go-basic.obo: fmt(1.2) rel(2019-01-12) 47,374 GO Terms Summary for all Ontologies: Dep <-Depth Counts-> <-Level Counts-> Lev BP MF CC BP MF CC 00 1 1 1 1 1 1 01 29 16 21 29 16 21 02 264 125 345 421 154 746 03 1273 570 494 2205 866 1073 04 2376 1516 735 4825 2072 1359 05 3692 4801 913 7297 5035 697 06 4474 1834 787 7287 1934 230 07 4699 1029 600 4696 728 68 08 4214 508 254 2018 194 10 09 3516 312 51 631 79 1 10 2399 153 4 241 13 0 11 1511 140 1 38 19 0 12 854 42 0 0 0 0 13 303 35 0 0 0 0 14 66 21 0 0 0 0 15 14 7 0 0 0 0 16 4 1 0 0 0 0 ``` 1. Download Ontologies, if necessary End of explanation """ # Get ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz from goatools.base import download_ncbi_associations gene2go = download_ncbi_associations() """ Explanation: 2. Download Associations, if necessary End of explanation """ from goatools.obo_parser import GODag obodag = GODag("go-basic.obo") """ Explanation: 3. Initialize GODag object End of explanation """ from goatools.rpt.rpt_lev_depth import RptLevDepth rptobj = RptLevDepth(obodag) """ Explanation: 4. Initialize Reporter class End of explanation """ rptobj.write_summary_cnts_all() """ Explanation: 5. Generate depth/level report for all GO terms End of explanation """ all_terms = obodag.values() all_terms_unique = set(all_terms) print(f"All terms: {len(all_terms)}, Unique terms: {len(all_terms_unique)}") MF = len(obodag["GO:0003674"].get_all_children()) CC = len(obodag["GO:0005575"].get_all_children()) BP = len(obodag["GO:0008150"].get_all_children()) total = MF + CC + BP + 3 # Should match "Unique terms" above print(f"MF: {MF}, CC: {CC}, BP: {BP}, Total terms: {total}") # Find some examples of duplicates from collections import Counter counter = Counter(all_terms) (most_common, most_common_count), = counter.most_common(1) print(most_common) print(f"This term shows up {most_common_count} times in GoDag") # Find which keys led to GO:0003887 go_0003887 = [k for k, v in obodag.items() if v == most_common] print(f"Alternative ids: {go_0003887}") print(obodag["GO:0003888"]) """ Explanation: 6. Count of GO terms End of explanation """
highb/deep-learning
language-translation/dlnd_language_translation.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper import problem_unittests as tests source_path = 'data/small_vocab_en' target_path = 'data/small_vocab_fr' source_text = helper.load_data(source_path) target_text = helper.load_data(target_path) """ Explanation: Language Translation In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French. Get the Data Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus. End of explanation """ view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()}))) sentences = source_text.split('\n') word_counts = [len(sentence.split()) for sentence in sentences] print('Number of sentences: {}'.format(len(sentences))) print('Average number of words in a sentence: {}'.format(np.average(word_counts))) print() print('English sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) print() print('French sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) """ Explanation: Explore the Data Play around with view_sentence_range to view different parts of the data. End of explanation """ import string def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int): """ Convert source and target text to proper word ids :param source_text: String that contains all the source text. :param target_text: String that contains all the target text. :param source_vocab_to_int: Dictionary to go from the source words to an id :param target_vocab_to_int: Dictionary to go from the target words to an id :return: A tuple of lists (source_id_text, target_id_text) """ source_ints, target_ints = [], [] for line in source_text.splitlines(): line_int = [source_vocab_to_int[word] for word in line.split()] source_ints.append(line_int) for line in target_text.splitlines(): line_int = [target_vocab_to_int[word] for word in line.split()] line_int.append(target_vocab_to_int['<EOS>']) target_ints.append(line_int) return source_ints, target_ints """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_text_to_ids(text_to_ids) """ Explanation: Implement Preprocessing Function Text to Word Ids As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the &lt;EOS&gt; word id at the end of target_text. This will help the neural network predict when the sentence should end. You can get the &lt;EOS&gt; word id by doing: python target_vocab_to_int['&lt;EOS&gt;'] You can get other word ids using source_vocab_to_int and target_vocab_to_int. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ helper.preprocess_and_save_data(source_path, target_path, text_to_ids) """ Explanation: Preprocess all the data and save it Running the code cell below will preprocess all the data and save it to file. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np import helper (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf from tensorflow.python.layers.core import Dense # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) """ Explanation: Check the Version of TensorFlow and Access to GPU This will check to make sure you have the correct version of TensorFlow and access to a GPU End of explanation """ def model_inputs(): """ Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences. :return: Tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) """ # rank 2 inputs = tf.placeholder(dtype=tf.int32, shape=(None, None), name='input') targets = tf.placeholder(dtype=tf.int32, shape=(None, None), name='targets') # scalars lr = tf.placeholder(dtype=tf.float32, shape=(), name="learning_rate") keep_prob = tf.placeholder(dtype=tf.float32, shape=(), name="keep_prob") target_sequence_length = tf.placeholder(dtype=tf.int32, shape=(None,), name="target_sequence_length") max_target_len = tf.reduce_max(target_sequence_length, name="max_target_len") source_sequence_length = tf.placeholder(dtype=tf.int32, shape=(None,), name="source_sequence_length") return inputs, targets, lr, keep_prob, target_sequence_length, max_target_len, source_sequence_length """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_model_inputs(model_inputs) """ Explanation: Build the Neural Network You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below: - model_inputs - process_decoder_input - encoding_layer - decoding_layer_train - decoding_layer_infer - decoding_layer - seq2seq_model Input Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders: Input text placeholder named "input" using the TF Placeholder name parameter with rank 2. Targets placeholder with rank 2. Learning rate placeholder with rank 0. Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0. Target sequence length placeholder named "target_sequence_length" with rank 1 Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0. Source sequence length placeholder named "source_sequence_length" with rank 1 Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) End of explanation """ def process_decoder_input(target_data, target_vocab_to_int, batch_size): """ Preprocess target data for encoding :param target_data: Target Placehoder :param target_vocab_to_int: Dictionary to go from the target words to an id :param batch_size: Batch Size :return: Preprocessed target data """ # Reference https://www.tensorflow.org/api_docs/python/tf/strided_slice # strided slice is an array slicing method used to give slices from # the provided array with the given stride distance between each slice # until there are no more slices to generate. # By using this method, we prepare the batches into the correct batch_size ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) # Add the <GO> keyword in order for the RNN to recognize when it needs to start # processing a new sequence. decoding_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1) return decoding_input """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_process_encoding_input(process_decoder_input) """ Explanation: Process Decoder Input Implement process_decoder_input by removing the last word id from each batch in target_data and concat the GO ID to the begining of each batch. End of explanation """ from imp import reload reload(tests) def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size): """ Create encoding layer :param rnn_inputs: Inputs for the RNN :param rnn_size: RNN Size :param num_layers: Number of layers :param keep_prob: Dropout keep probability :param source_sequence_length: a list of the lengths of each sequence in the batch :param source_vocab_size: vocabulary size of source data :param encoding_embedding_size: embedding size of source data :return: tuple (RNN output, RNN state) """ # Encoder embedding - given the provided set of input symbols in sequences, translate them # into a consistent set of embeddings that can be recognized by both the encoder and decocer enc_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, vocab_size=source_vocab_size, embed_dim=encoding_embedding_size) # Encoder - num_layers of lstm wrapped in dropout layers on the input and output # Given the embedded sequence provided, encodes an internal representation of the # sequence that can be passed to the decoder. stacked_lstm = [tf.contrib.rnn.BasicLSTMCell(rnn_size) for _ in range(num_layers)] multi_rnn = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.MultiRNNCell(stacked_lstm), input_keep_prob=keep_prob, output_keep_prob=keep_prob) enc_output, enc_state = tf.nn.dynamic_rnn(cell=multi_rnn, inputs=enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32) return enc_output, enc_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_encoding_layer(encoding_layer) """ Explanation: Encoding Implement encoding_layer() to create a Encoder RNN layer: * Embed the encoder input using tf.contrib.layers.embed_sequence * Construct a stacked tf.contrib.rnn.LSTMCell wrapped in a tf.contrib.rnn.DropoutWrapper * Pass cell and embedded input to tf.nn.dynamic_rnn() End of explanation """ def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_summary_length, output_layer, keep_prob): """ Create a decoding layer for training :param encoder_state: Encoder State :param dec_cell: Decoder RNN Cell :param dec_embed_input: Decoder embedded input :param target_sequence_length: The lengths of each sequence in the target batch :param max_summary_length: The length of the longest sequence in the batch :param output_layer: Function to apply the output layer :param keep_prob: Dropout keep probability :return: BasicDecoderOutput containing training logits and sample_id """ # Decoding takes the internal representation generated by the encoding # layer and decodes a corresponding sequence depending on what it has # been trained to decode. # # In this project, we are translating an English phrase to French, but # we could also generate a dialog between one phrase and another in the # same language or generate a summary from a source text. training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input, sequence_length=target_sequence_length, time_major=False) training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, encoder_state, output_layer) dec_output, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder, impute_finished=True, maximum_iterations=max_summary_length) return dec_output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer_train(decoding_layer_train) """ Explanation: Decoding - Training Create a training decoding layer: * Create a tf.contrib.seq2seq.TrainingHelper * Create a tf.contrib.seq2seq.BasicDecoder * Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode End of explanation """ def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob): """ Create a decoding layer for inference :param encoder_state: Encoder state :param dec_cell: Decoder RNN Cell :param dec_embeddings: Decoder embeddings :param start_of_sequence_id: GO ID :param end_of_sequence_id: EOS Id :param max_target_sequence_length: Maximum length of target sequences :param vocab_size: Size of decoder/target vocabulary :param decoding_scope: TenorFlow Variable Scope for decoding :param output_layer: Function to apply the output layer :param batch_size: Batch size :param keep_prob: Dropout keep probability :return: BasicDecoderOutput containing inference logits and sample_id """ start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32), [batch_size], name='start_tokens') inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, start_tokens, end_of_sequence_id) inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, inference_helper, encoder_state, output_layer) inference_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(inference_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) return inference_decoder_output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer_infer(decoding_layer_infer) """ Explanation: Decoding - Inference Create inference decoder: * Create a tf.contrib.seq2seq.GreedyEmbeddingHelper * Create a tf.contrib.seq2seq.BasicDecoder * Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode End of explanation """ def decoding_layer(dec_input, encoder_state, target_sequence_length, max_target_sequence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, decoding_embedding_size): """ Create decoding layer :param dec_input: Decoder input :param encoder_state: Encoder state :param target_sequence_length: The lengths of each sequence in the target batch :param max_target_sequence_length: Maximum length of target sequences :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :param target_vocab_size: Size of target vocabulary :param batch_size: The size of the batch :param keep_prob: Dropout keep probability :param decoding_embedding_size: Decoding embedding size :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ start_of_sequence_id = target_vocab_to_int['<GO>'] end_of_sequence_id = target_vocab_to_int['<EOS>'] # Reference seq2seq/ # 1. Decoder embeddings dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size])) dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input) # 2. Construct the decoder cell def make_cell(rnn_size): dec_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) return dec_cell dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)]) # 3. Keras Dense layer to translate the decoder's output at each time output_layer = Dense(target_vocab_size, kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1)) with tf.variable_scope("decoding") as decoding_scope: train = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) # Reuse the global variables from the training decoding_scope.reuse_variables() infer = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, target_vocab_size, output_layer, batch_size, keep_prob) return train, infer """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer(decoding_layer) """ Explanation: Build the Decoding Layer Implement decoding_layer() to create a Decoder RNN layer. Embed the target sequences Construct the decoder LSTM cell (just like you constructed the encoder cell above) Create an output layer to map the outputs of the decoder to the elements of our vocabulary Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) function to get the training logits. Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob) function to get the inference logits. Note: You'll need to use tf.variable_scope to share variables between training and inference. End of explanation """ def seq2seq_model(input_data, target_data, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sentence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int): """ Build the Sequence-to-Sequence part of the neural network :param input_data: Input placeholder :param target_data: Target placeholder :param keep_prob: Dropout keep probability placeholder :param batch_size: Batch Size :param source_sequence_length: Sequence Lengths of source sequences in the batch :param target_sequence_length: Sequence Lengths of target sequences in the batch :param source_vocab_size: Source vocabulary size :param target_vocab_size: Target vocabulary size :param enc_embedding_size: Decoder embedding size :param dec_embedding_size: Encoder embedding size :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ _, encoding_state = encoding_layer(input_data, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, enc_embedding_size) decoder_input = process_decoder_input(target_data, target_vocab_to_int, batch_size) training_decoder_output, inference_decoder_output = decoding_layer(decoder_input, encoding_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) return training_decoder_output, inference_decoder_output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_seq2seq_model(seq2seq_model) """ Explanation: Build the Neural Network Apply the functions you implemented above to: Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size). Process target data using your process_decoder_input(target_data, target_vocab_to_int, batch_size) function. Decode the encoded input using your decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) function. End of explanation """ # Number of Epochs epochs = 6 # Batch Size batch_size = 128 # RNN Size rnn_size = 256 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 230 decoding_embedding_size = 230 # Learning Rate learning_rate = 0.001 # Dropout Keep Probability keep_probability = 0.5 display_step = 100 """ Explanation: Neural Network Training Hyperparameters Tune the following parameters: Set epochs to the number of epochs. Set batch_size to the batch size. Set rnn_size to the size of the RNNs. Set num_layers to the number of layers. Set encoding_embedding_size to the size of the embedding for the encoder. Set decoding_embedding_size to the size of the embedding for the decoder. Set learning_rate to the learning rate. Set keep_probability to the Dropout keep probability Set display_step to state how many steps between each debug output statement End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ save_path = 'checkpoints/dev' (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() max_target_sentence_length = max([len(sentence) for sentence in source_int_text]) train_graph = tf.Graph() with train_graph.as_default(): input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs() #sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length') input_shape = tf.shape(input_data) train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sequence_length, len(source_vocab_to_int), len(target_vocab_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int) training_logits = tf.identity(train_logits.rnn_output, name='logits') inference_logits = tf.identity(inference_logits.sample_id, name='predictions') masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( training_logits, targets, masks) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) """ Explanation: Build the Graph Build the graph using the neural network you implemented. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ def pad_sentence_batch(sentence_batch, pad_int): """Pad sentences with <PAD> so that each sentence of a batch has the same length""" max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int): """Batch targets, sources, and the lengths of their sentences together""" for batch_i in range(0, len(sources)//batch_size): start_i = batch_i * batch_size # Slice the right amount for the batch sources_batch = sources[start_i:start_i + batch_size] targets_batch = targets[start_i:start_i + batch_size] # Pad pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # Need the lengths for the _lengths parameters pad_targets_lengths = [] for target in pad_targets_batch: pad_targets_lengths.append(len(target)) pad_source_lengths = [] for source in pad_sources_batch: pad_source_lengths.append(len(source)) yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths """ Explanation: Batch and pad the source and target sequences End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ def get_accuracy(target, logits): """ Calculate accuracy """ max_seq = max(target.shape[1], logits.shape[1]) if max_seq - target.shape[1]: target = np.pad( target, [(0,0),(0,max_seq - target.shape[1])], 'constant') if max_seq - logits.shape[1]: logits = np.pad( logits, [(0,0),(0,max_seq - logits.shape[1])], 'constant') return np.mean(np.equal(target, logits)) # Split data to training and validation sets train_source = source_int_text[batch_size:] train_target = target_int_text[batch_size:] valid_source = source_int_text[:batch_size] valid_target = target_int_text[:batch_size] (valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source, valid_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(epochs): for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate( get_batches(train_source, train_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])): _, loss = sess.run( [train_op, cost], {input_data: source_batch, targets: target_batch, lr: learning_rate, target_sequence_length: targets_lengths, source_sequence_length: sources_lengths, keep_prob: keep_probability}) if batch_i % display_step == 0 and batch_i > 0: batch_train_logits = sess.run( inference_logits, {input_data: source_batch, source_sequence_length: sources_lengths, target_sequence_length: targets_lengths, keep_prob: 1.0}) batch_valid_logits = sess.run( inference_logits, {input_data: valid_sources_batch, source_sequence_length: valid_sources_lengths, target_sequence_length: valid_targets_lengths, keep_prob: 1.0}) train_acc = get_accuracy(target_batch, batch_train_logits) valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits) print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}' .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_path) print('Model Trained and Saved') """ Explanation: Train Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params(save_path) """ Explanation: Save Parameters Save the batch_size and save_path parameters for inference. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess() load_path = helper.load_params() """ Explanation: Checkpoint End of explanation """ def sentence_to_seq(sentence, vocab_to_int): """ Convert a sentence to a sequence of ids :param sentence: String :param vocab_to_int: Dictionary to go from the words to an id :return: List of word ids """ sentence_lower = [word.lower() for word in sentence.split()] words_as_id_or_unk = [vocab_to_int.get(word, vocab_to_int['<UNK>']) for word in sentence_lower] return words_as_id_or_unk """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_sentence_to_seq(sentence_to_seq) """ Explanation: Sentence to Sequence To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences. Convert the sentence to lowercase Convert words into ids using vocab_to_int Convert words not in the vocabulary, to the &lt;UNK&gt; word id. End of explanation """ translate_sentence = 'he saw a old yellow truck .' """ DON'T MODIFY ANYTHING IN THIS CELL """ translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_path + '.meta') loader.restore(sess, load_path) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('predictions:0') target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0') source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0') keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size, target_sequence_length: [len(translate_sentence)*2]*batch_size, source_sequence_length: [len(translate_sentence)]*batch_size, keep_prob: 1.0})[0] print('Input') print(' Word Ids: {}'.format([i for i in translate_sentence])) print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence])) print('\nPrediction') print(' Word Ids: {}'.format([i for i in translate_logits])) print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits]))) """ Explanation: Translate This will translate translate_sentence from English to French. End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/text_classification/solutions/text_classification.ipynb
apache-2.0
# Import necessary libraries import matplotlib.pyplot as plt import os import re import shutil import string import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import losses # Print the TensorFlow version print(tf.__version__) """ Explanation: Basic Text Classification Overview This notebook demonstrates text classification starting from plain text files stored on disk. You'll train a binary classifier to perform sentiment analysis on an IMDB dataset. At the end of the notebook, there is an exercise for you to try, in which you'll train a multi-class classifier to predict the tag for a programming question on Stack Overflow. Learning Objective In this notebook, you learn how to: Prepare the dataset for training Use loss function and optimizer Train the model Evaluate the model Export the model Introduction This notebook shows how to train a sentiment analysis model to classify movie reviews as positive or negative, based on the text of the review. Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete the target notebook first and then review this solution notebook. End of explanation """ # Download the IMDB dataset url = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz" dataset = tf.keras.utils.get_file("aclImdb_v1", url, untar=True, cache_dir='.', cache_subdir='') dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb') # Explore the dataset os.listdir(dataset_dir) train_dir = os.path.join(dataset_dir, 'train') os.listdir(train_dir) """ Explanation: Sentiment analysis This notebook trains a sentiment analysis model to classify movie reviews as positive or negative, based on the text of the review. This is an example of binary—or two-class—classification, an important and widely applicable kind of machine learning problem. You'll use the Large Movie Review Dataset that contains the text of 50,000 movie reviews from the Internet Movie Database. These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are balanced, meaning they contain an equal number of positive and negative reviews. Download and explore the IMDB dataset Let's download and extract the dataset, then explore the directory structure. End of explanation """ # Print the file content sample_file = os.path.join(train_dir, 'pos/1181_9.txt') with open(sample_file) as f: print(f.read()) """ Explanation: The aclImdb/train/pos and aclImdb/train/neg directories contain many text files, each of which is a single movie review. Let's take a look at one of them. End of explanation """ remove_dir = os.path.join(train_dir, 'unsup') shutil.rmtree(remove_dir) """ Explanation: Load the dataset Next, you will load the data off disk and prepare it into a format suitable for training. To do so, you will use the helpful text_dataset_from_directory utility, which expects a directory structure as follows. main_directory/ ...class_a/ ......a_text_1.txt ......a_text_2.txt ...class_b/ ......b_text_1.txt ......b_text_2.txt To prepare a dataset for binary classification, you will need two folders on disk, corresponding to class_a and class_b. These will be the positive and negative movie reviews, which can be found in aclImdb/train/pos and aclImdb/train/neg. As the IMDB dataset contains additional folders, you will remove them before using this utility. End of explanation """ # Create the validation set batch_size = 32 seed = 42 raw_train_ds = tf.keras.utils.text_dataset_from_directory( 'aclImdb/train', batch_size=batch_size, validation_split=0.2, subset='training', seed=seed) """ Explanation: Next, you will use the text_dataset_from_directory utility to create a labeled tf.data.Dataset. tf.data is a powerful collection of tools for working with data. When running a machine learning experiment, it is a best practice to divide your dataset into three splits: train, validation, and test. The IMDB dataset has already been divided into train and test, but it lacks a validation set. Let's create a validation set using an 80:20 split of the training data by using the validation_split argument below. End of explanation """ # Print few examples for text_batch, label_batch in raw_train_ds.take(1): for i in range(3): print("Review", text_batch.numpy()[i]) print("Label", label_batch.numpy()[i]) """ Explanation: As you can see above, there are 25,000 examples in the training folder, of which you will use 80% (or 20,000) for training. As you will see in a moment, you can train a model by passing a dataset directly to model.fit. If you're new to tf.data, you can also iterate over the dataset and print out a few examples as follows. End of explanation """ print("Label 0 corresponds to", raw_train_ds.class_names[0]) print("Label 1 corresponds to", raw_train_ds.class_names[1]) """ Explanation: Notice the reviews contain raw text (with punctuation and occasional HTML tags like &lt;br/&gt;). You will show how to handle these in the following section. The labels are 0 or 1. To see which of these correspond to positive and negative movie reviews, you can check the class_names property on the dataset. End of explanation """ raw_val_ds = tf.keras.utils.text_dataset_from_directory( 'aclImdb/train', batch_size=batch_size, validation_split=0.2, subset='validation', seed=seed) raw_test_ds = tf.keras.utils.text_dataset_from_directory( 'aclImdb/test', batch_size=batch_size) """ Explanation: Next, you will create a validation and test dataset. You will use the remaining 5,000 reviews from the training set for validation. Note: When using the validation_split and subset arguments, make sure to either specify a random seed, or to pass shuffle=False, so that the validation and training splits have no overlap. End of explanation """ def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ') return tf.strings.regex_replace(stripped_html, '[%s]' % re.escape(string.punctuation), '') """ Explanation: Prepare the dataset for training Next, you will standardize, tokenize, and vectorize the data using the helpful tf.keras.layers.TextVectorization layer. Standardization refers to preprocessing the text, typically to remove punctuation or HTML elements to simplify the dataset. Tokenization refers to splitting strings into tokens (for example, splitting a sentence into individual words, by splitting on whitespace). Vectorization refers to converting tokens into numbers so they can be fed into a neural network. All of these tasks can be accomplished with this layer. As you saw above, the reviews contain various HTML tags like &lt;br /&gt;. These tags will not be removed by the default standardizer in the TextVectorization layer (which converts text to lowercase and strips punctuation by default, but doesn't strip HTML). You will write a custom standardization function to remove the HTML. Note: To prevent training-testing skew (also known as training-serving skew), it is important to preprocess the data identically at train and test time. To facilitate this, the TextVectorization layer can be included directly inside your model, as shown later in this tutorial. End of explanation """ max_features = 10000 sequence_length = 250 # TODO # Created the TextVectorization layer vectorize_layer = layers.TextVectorization( standardize=custom_standardization, max_tokens=max_features, output_mode='int', output_sequence_length=sequence_length) """ Explanation: Next, you will create a TextVectorization layer. You will use this layer to standardize, tokenize, and vectorize our data. You set the output_mode to int to create unique integer indices for each token. Note that you're using the default split function, and the custom standardization function you defined above. You'll also define some constants for the model, like an explicit maximum sequence_length, which will cause the layer to pad or truncate sequences to exactly sequence_length values. End of explanation """ # Make a text-only dataset (without labels), then call adapt train_text = raw_train_ds.map(lambda x, y: x) vectorize_layer.adapt(train_text) """ Explanation: Next, you will call adapt to fit the state of the preprocessing layer to the dataset. This will cause the model to build an index of strings to integers. Note: It's important to only use your training data when calling adapt (using the test set would leak information). End of explanation """ def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label # retrieve a batch (of 32 reviews and labels) from the dataset text_batch, label_batch = next(iter(raw_train_ds)) first_review, first_label = text_batch[0], label_batch[0] print("Review", first_review) print("Label", raw_train_ds.class_names[first_label]) print("Vectorized review", vectorize_text(first_review, first_label)) """ Explanation: Let's create a function to see the result of using this layer to preprocess some data. End of explanation """ # Print the token (string) that each integer corresponds print("1287 ---> ",vectorize_layer.get_vocabulary()[1287]) print(" 313 ---> ",vectorize_layer.get_vocabulary()[313]) print('Vocabulary size: {}'.format(len(vectorize_layer.get_vocabulary()))) """ Explanation: As you can see above, each token has been replaced by an integer. You can lookup the token (string) that each integer corresponds to by calling .get_vocabulary() on the layer. End of explanation """ # Apply the TextVectorization layer you created earlier to the train, validation, and test dataset train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) """ Explanation: You are nearly ready to train your model. As a final preprocessing step, you will apply the TextVectorization layer you created earlier to the train, validation, and test dataset. End of explanation """ AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) """ Explanation: Configure the dataset for performance These are two important methods you should use when loading data to make sure that I/O does not become blocking. .cache() keeps data in memory after it's loaded off disk. This will ensure the dataset does not become a bottleneck while training your model. If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache, which is more efficient to read than many small files. .prefetch() overlaps data preprocessing and model execution while training. You can learn more about both methods, as well as how to cache data to disk in the data performance guide. End of explanation """ embedding_dim = 16 # Create your neural network model = tf.keras.Sequential([ layers.Embedding(max_features + 1, embedding_dim), layers.Dropout(0.2), layers.GlobalAveragePooling1D(), layers.Dropout(0.2), layers.Dense(1)]) model.summary() """ Explanation: Create the model It's time to create your neural network: End of explanation """ # TODO # Configure the model to use an optimizer and a loss function model.compile(loss=losses.BinaryCrossentropy(from_logits=True), optimizer='adam', metrics=tf.metrics.BinaryAccuracy(threshold=0.0)) """ Explanation: The layers are stacked sequentially to build the classifier: The first layer is an Embedding layer. This layer takes the integer-encoded reviews and looks up an embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: (batch, sequence, embedding). To learn more about embeddings, check out the Word embeddings tutorial. Next, a GlobalAveragePooling1D layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible. This fixed-length output vector is piped through a fully-connected (Dense) layer with 16 hidden units. The last layer is densely connected with a single output node. Loss function and optimizer A model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs a probability (a single-unit layer with a sigmoid activation), you'll use losses.BinaryCrossentropy loss function. Now, configure the model to use an optimizer and a loss function: End of explanation """ # TODO # Train the model epochs = 10 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs) """ Explanation: Train the model You will train the model by passing the dataset object to the fit method. End of explanation """ # TODO # Evaluate the model loss, accuracy = model.evaluate(test_ds) print("Loss: ", loss) print("Accuracy: ", accuracy) """ Explanation: Evaluate the model Let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy. End of explanation """ history_dict = history.history history_dict.keys() """ Explanation: This fairly naive approach achieves an accuracy of about 86%. Create a plot of accuracy and loss over time model.fit() returns a History object that contains a dictionary with everything that happened during training: End of explanation """ # Plot the loss over time acc = history_dict['binary_accuracy'] val_acc = history_dict['val_binary_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) # "bo" is for "blue dot" plt.plot(epochs, loss, 'bo', label='Training loss') # b is for "solid blue line" plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # Plot the accuracy over time plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.show() """ Explanation: There are four entries: one for each monitored metric during training and validation. You can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy: End of explanation """ # TODO # Export the model export_model = tf.keras.Sequential([ vectorize_layer, model, layers.Activation('sigmoid') ]) export_model.compile( loss=losses.BinaryCrossentropy(from_logits=False), optimizer="adam", metrics=['accuracy'] ) # Test it with `raw_test_ds`, which yields raw strings loss, accuracy = export_model.evaluate(raw_test_ds) print(accuracy) """ Explanation: In this plot, the dots represent the training loss and accuracy, and the solid lines are the validation loss and accuracy. Notice the training loss decreases with each epoch and the training accuracy increases with each epoch. This is expected when using a gradient descent optimization—it should minimize the desired quantity on every iteration. This isn't the case for the validation loss and accuracy—they seem to peak before the training accuracy. This is an example of overfitting: the model performs better on the training data than it does on data it has never seen before. After this point, the model over-optimizes and learns representations specific to the training data that do not generalize to test data. For this particular case, you could prevent overfitting by simply stopping the training when the validation accuracy is no longer increasing. One way to do so is to use the tf.keras.callbacks.EarlyStopping callback. Export the model In the code above, you applied the TextVectorization layer to the dataset before feeding text to the model. If you want to make your model capable of processing raw strings (for example, to simplify deploying it), you can include the TextVectorization layer inside your model. To do so, you can create a new model using the weights you just trained. End of explanation """ examples = [ "The movie was great!", "The movie was okay.", "The movie was terrible..." ] export_model.predict(examples) """ Explanation: Inference on new data To get predictions for new examples, you can simply call model.predict(). End of explanation """
nathanshammah/pim
doc/notebooks/piqs_superradiance.ipynb
mit
import matplotlib as mpl from matplotlib import cm import matplotlib.pyplot as plt from qutip import * from piqs import * #TLS parameters N = 6 ntls = N nds = num_dicke_states(ntls) [jx, jy, jz, jp, jm] = jspin(N) w0 = 1 gE = 0.1 gD = 0.01 h = w0 * jz #photonic parameters nphot = 20 wc = 1 kappa = 1 ratio_g = 2 g = ratio_g/np.sqrt(N) a = destroy(nphot) #TLS liouvillian system = Dicke(N = N) system.hamiltonian = h system.emission = gE system.dephasing = gD liouv = system.liouvillian() #photonic liouvilian h_phot = wc * a.dag() * a c_ops_phot = [np.sqrt(kappa) * a] liouv_phot = liouvillian(h_phot, c_ops_phot) #identity operators id_tls = to_super(qeye(nds)) id_phot = to_super(qeye(nphot)) #light-matter superoperator and total liouvillian liouv_sum = super_tensor(liouv_phot, id_tls) + super_tensor(id_phot, liouv) h_int = g * tensor(a + a.dag(), jx) liouv_int = -1j* spre(h_int) + 1j* spost(h_int) liouv_tot = liouv_sum + liouv_int #total operators jz_tot = tensor(qeye(nphot), jz) jpjm_tot = tensor(qeye(nphot), jp*jm) nphot_tot = tensor(a.dag()*a, qeye(nds)) rho_ss = steadystate(liouv_tot) jz_ss = expect(jz_tot, rho_ss) jpjm_ss = expect(jpjm_tot, rho_ss) nphot_ss = expect(nphot_tot, rho_ss) psi = rho_ss.ptrace(0) xvec = np.linspace(-6, 6, 100) W = wigner(psi, xvec, xvec) """ Explanation: Superradiance in the open Dicke model: $N$ qubits in a bosonic cavity We consider a system of $N$ two-level systems (TLSs) coupled to a cavity mode. This is known as the Dicke model \begin{eqnarray} H &=&\omega_{0}J_z + \omega_{c}a^\dagger a + g\left(a^\dagger + a\right)\left(J_{+} + J_{-}\right) \end{eqnarray} where each TLS has identical frequency $\omega_{0}$. The light matter coupling can be in the ultrastrong coupling (USC) regime, $g/\omega_{0}>0.1$. If we study this model as an open quantum system, the cavity can leak photons and the TLSs are subject to local processes. For example the system can be incoherently pumped at a rate $\gamma_\text{P}$, the TLSs are subject to dephaisng at a rate $\gamma_\text{D}$, and local incoherent emission occurs at a rate $\gamma_\text{E}$. The dynamics of the coupled light-matter system is governed by \begin{eqnarray} \dot{\rho} &=& -i\lbrack \omega_{0}J_z + \omega_{a}a^\dagger a + g\left(a^\dagger + a\right)\left(J_{+} + J_{-}\right),\rho \rbrack +\frac{\kappa}{2}\mathcal{L}{a}[\rho] +\sum{n=1}^{N}\left(\frac{\gamma_\text{P}}{2}\mathcal{L}{J{+,n}}[\rho] +\frac{\gamma_\text{E}}{2}\mathcal{L}{J{+,n}}[\rho] +\frac{\gamma_\text{D}}{2}\mathcal{L}{J{+,n}}[\rho]\right) \end{eqnarray} When only the dissipation of the cavity is present, beyond a critical value of the coupling $g$, the steady state of the system becomes superradiant. This is visible by looking at the Wigner function of the photonic part of the density matrix, which displays two displaced lobes in the $x$ and $p$ plane. As it has been shown in Ref. [1], the presence of dephasing suppresses the superradiant phase transition, while the presence of local emission restores it [2]. In order to study this system using QuTiP and $PIQS$, we will first build the TLS Liouvillian, then we will build the photonic Liouvillian and finally we will build the light-matter interaction. The total dynamics of the system is thus defined in a Liouvillian space that has both TLS and photonic degrees of freedom. This is driven-dissipative system dysplaying out-of-equilibrium quantum phase transition. End of explanation """ jmax = (0.5 * N) j2max = (0.5 * N + 1) * (0.5 * N) plt.rc('text', usetex = True) label_size = 20 plt.rc('xtick', labelsize=label_size) plt.rc('ytick', labelsize=label_size) wmap = wigner_cmap(W) # Generate Wigner colormap nrm = mpl.colors.Normalize(0, W.max()) max_cb =np.max(W) min_cb =np.min(W) fig2 = plt.figure(2) plotw = plt.contourf(xvec, xvec, W, 100, cmap=wmap, norm=nrm) plt.title(r"Wigner Function", fontsize=label_size); plt.xlabel(r'$x$', fontsize = label_size) plt.ylabel(r'$p$', fontsize = label_size) cb = plt.colorbar() cb.set_ticks( [min_cb, max_cb]) cb.set_ticklabels([r'$0$',r'max']) plt.show() plt.close() """ Explanation: Wigner Function Below we calculate the Wigner function of the photonic part of the steady state. It shows two displaced squeezed states in the reciprocal photonic space. The result is in agreement with the findings of Ref [2]. End of explanation """ #set initial conditions for spins and cavity tmax = 40 nt = 1000 t = np.linspace(0, tmax, nt) rho0 = dicke(N, N/2, N/2) rho0_phot = ket2dm(basis(nphot,0)) rho0_tot = tensor(rho0_phot, rho0) result = mesolve(liouv_tot, rho0_tot, t, [], e_ops = [jz_tot, jpjm_tot, nphot_tot]) rhot_tot = result.states jzt_tot = result.expect[0] jpjmt_tot = result.expect[1] adagat_tot = result.expect[2] """ Explanation: Time Evolution Here we calculate the time evolution of a state initialized in the most excited spin state with no photons in the cavity. We calculate the full density matrix evolution as well as spin and photon operator mean values. End of explanation """ jmax = (N/2) j2max = N/2*(N/2+1) fig1 = plt.figure(1) plt.plot(t, jzt_tot/jmax, 'k-', label='time evolution') plt.plot(t, t*0+jz_ss/jmax, 'g--', label='steady state') plt.title('Total inversion', fontsize = label_size) plt.xlabel(r'$t$', fontsize = label_size) plt.ylabel(r'$\langle J_z\rangle (t)$', fontsize = label_size) plt.legend(fontsize = label_size) plt.show() plt.close() fig2 = plt.figure(2) plt.plot(t, jpjmt_tot/j2max, 'k-', label='time evolution') plt.plot(t, t*0+jpjm_ss/j2max, 'g--', label='steady state') plt.xlabel(r'$t$', fontsize = label_size) plt.ylabel(r'$\langle J_{+}J_{-}\rangle (t)$', fontsize = label_size) plt.title('Light emission', fontsize = label_size) plt.xlabel(r'$t$', fontsize = label_size) plt.legend(fontsize = label_size) plt.show() plt.close() fig3 = plt.figure(3) plt.plot(t, adagat_tot, 'k-', label='time evolution') plt.plot(t, t*0 + nphot_ss, 'g--', label='steady state') plt.title('Cavity photons', fontsize = label_size) plt.xlabel(r'$t$', fontsize = label_size) plt.ylabel(r'$\langle a^\dagger a \rangle (t)$', fontsize = label_size) plt.legend(fontsize = label_size) plt.show() plt.close() """ Explanation: Plots End of explanation """ qutip.about() """ Explanation: References [1] E.G. Dalla Torre et al., Phys Rev. A 94, 061802(R) (2016) [2] P. Kirton and J. Keeling, , Phys. Rev. Lett. 118, 123602 (2017) [3] N. Shammah, S. Ahmed, N. Lambert, S. De Liberato, and F. Nori, to be submitted. [4] J. R. Johansson, P. D. Nation, and F. Nori, Comp. Phys. Comm. 183, 1760 (2012). http://qutip.org End of explanation """
thehackerwithin/berkeley
code_examples/python_mayavi/mayavi_intermediate.ipynb
bsd-3-clause
# setup parameters for Lorenz equations sigma=10 beta=8/3. rho=28 def lorenz(x, t, ): dx = np.zeros(3) dx[0] = -sigma*x[0] + sigma*x[1] dx[1] = rho*x[0] - x[1] - x[0]*x[2] dx[2] = -beta*x[2] + x[0]*x[1] return dx # solve for a specific particle # initial condition y0 = np.ones(3) + .01 # time steps to compute location n_time = 20000 t = np.linspace(0,200,n_time) # solve the ODE y = odeint( lorenz, y0, t ) y.shape """ Explanation: Lorenz Attractor - 3D line and point plotting demo Lorenz attractor is a 3D differential equation that we will use to demonstrate mayavi's 3D plotting ability. We will look at some ways to make plotting lots of data more efficient. End of explanation """ # plot the data as a line # change the tube radius to see the difference mlab.figure('Line') mlab.clf() mlab.plot3d(y[:,0], y[:,1], y[:,2], tube_radius=.1) mlab.colorbar() # plot the data as a line, with color representing the time evolution mlab.figure('Line') mlab.clf() mlab.plot3d(y[:,0], y[:,1], y[:,2], t, tube_radius=None, ) mlab.colorbar() """ Explanation: Rendering Points and Lines Mayavi has several ways to render 3D line and point data. The default is to use surfaces, which uses more resources. There are kwargs that can be changed to make it render with 2-D lines and points that make plotting large amounts of data more efficient. LinePlot End of explanation """ # plot the data as a line, with color representing the time evolution mlab.figure() # By default, mayavi will plot points as spheres, so each point will # be represented by a surface. # Using mode='2dvertex' is needed for plotting large numbers of points. mlab.figure('Points') mlab.clf() mlab.points3d(y[:,0], y[:,1], y[:,2], t, mode='2dvertex') mlab.colorbar( title='time') mlab.axes() """ Explanation: Point Plot End of explanation """ # plot the data as a line, with color representing the time evolution mlab.figure('Line and Points') mlab.clf() # plot the data as a line, with color representing the time evolution mlab.plot3d(y[:,0], y[:,1], y[:,2], t, tube_radius=None, line_width=1 ) mlab.colorbar() # By default, mayavi will plot points as spheres, so each point will # be represented by a surface. # Using mode='2dvertex' is needed for plotting large numbers of points. mlab.points3d(y[:,0], y[:,1], y[:,2], t, scale_factor=.3, scale_mode='none') #mode='2dvertex') mlab.colorbar( title='time') """ Explanation: Line + Point Plot End of explanation """ h3d = np.histogramdd(y, bins=50) # generate the midpoint coordinates xg,yg,zg = h3d[1] xm = xg[1:] - .5*(xg[1]-xg[0]) ym = yg[1:] - .5*(yg[1]-yg[0]) zm = zg[1:] - .5*(zg[1]-zg[0]) xg, yg, zg = np.meshgrid(xm, ym, zm) mlab.figure('contour') mlab.clf() mlab.contour3d( h3d[0], opacity=.5, contours=25 ) """ Explanation: Contour Plot Let's see how long the particle spends in each location End of explanation """ # plot the data as a line mlab.figure('Animate') mlab.clf() # mlab.plot3d(y[:,0], y[:,1], y[:,2], tube_radius=None) # mlab.colorbar() a = mlab.points3d(y0[0], y0[1], y0[2], mode='2dvertex') # number of points to plot # n_plot = n_time n_plot = 1000 @mlab.animate(delay=10, ui=True ) def anim(): for i in range(n_time): # a.mlab_source.set(x=y[i,0],y=y[i,1],z=y[i,2], color=(1,0,0)) mlab.points3d(y[i,0],y[i,1],y[i,2], mode='2dvertex', reset_zoom=False) yield anim() """ Explanation: Animation Animation can be accomplished with a mlab.animate decorator. You must define a function that yields to the animate decorator. The yield defines when mayavi will rerender the image. End of explanation """
minxuancao/shogun
doc/ipython-notebooks/multiclass/KNN.ipynb
gpl-3.0
import numpy as np import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') from scipy.io import loadmat, savemat from numpy import random from os import path mat = loadmat(os.path.join(SHOGUN_DATA_DIR, 'multiclass/usps.mat')) Xall = mat['data'] Yall = np.array(mat['label'].squeeze(), dtype=np.double) # map from 1..10 to 0..9, since shogun # requires multiclass labels to be # 0, 1, ..., K-1 Yall = Yall - 1 random.seed(0) subset = random.permutation(len(Yall)) Xtrain = Xall[:, subset[:5000]] Ytrain = Yall[subset[:5000]] Xtest = Xall[:, subset[5000:6000]] Ytest = Yall[subset[5000:6000]] Nsplit = 2 all_ks = range(1, 21) print Xall.shape print Xtrain.shape print Xtest.shape """ Explanation: K-Nearest Neighbors (KNN) by Chiyuan Zhang and S&ouml;ren Sonnenburg This notebook illustrates the <a href="http://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm">K-Nearest Neighbors</a> (KNN) algorithm on the USPS digit recognition dataset in Shogun. Further, the effect of <a href="http://en.wikipedia.org/wiki/Cover_tree">Cover Trees</a> on speed is illustrated by comparing KNN with and without it. Finally, a comparison with <a href="http://en.wikipedia.org/wiki/Support_vector_machine#Multiclass_SVM">Multiclass Support Vector Machines</a> is shown. The basics The training of a KNN model basically does nothing but memorizing all the training points and the associated labels, which is very cheap in computation but costly in storage. The prediction is implemented by finding the K nearest neighbors of the query point, and voting. Here K is a hyper-parameter for the algorithm. Smaller values for K give the model low bias but high variance; while larger values for K give low variance but high bias. In SHOGUN, you can use CKNN to perform KNN learning. To construct a KNN machine, you must choose the hyper-parameter K and a distance function. Usually, we simply use the standard CEuclideanDistance, but in general, any subclass of CDistance could be used. For demonstration, in this tutorial we select a random subset of 1000 samples from the USPS digit recognition dataset, and run 2-fold cross validation of KNN with varying K. First we load and init data split: End of explanation """ %matplotlib inline import pylab as P def plot_example(dat, lab): for i in xrange(5): ax=P.subplot(1,5,i+1) P.title(int(lab[i])) ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest') ax.set_xticks([]) ax.set_yticks([]) _=P.figure(figsize=(17,6)) P.gray() plot_example(Xtrain, Ytrain) _=P.figure(figsize=(17,6)) P.gray() plot_example(Xtest, Ytest) """ Explanation: Let us plot the first five examples of the train data (first row) and test data (second row). End of explanation """ from modshogun import MulticlassLabels, RealFeatures from modshogun import KNN, EuclideanDistance labels = MulticlassLabels(Ytrain) feats = RealFeatures(Xtrain) k=3 dist = EuclideanDistance() knn = KNN(k, dist, labels) labels_test = MulticlassLabels(Ytest) feats_test = RealFeatures(Xtest) knn.train(feats) pred = knn.apply_multiclass(feats_test) print "Predictions", pred[:5] print "Ground Truth", Ytest[:5] from modshogun import MulticlassAccuracy evaluator = MulticlassAccuracy() accuracy = evaluator.evaluate(pred, labels_test) print "Accuracy = %2.2f%%" % (100*accuracy) """ Explanation: Then we import shogun components and convert the data to shogun objects: End of explanation """ idx=np.where(pred != Ytest)[0] Xbad=Xtest[:,idx] Ybad=Ytest[idx] _=P.figure(figsize=(17,6)) P.gray() plot_example(Xbad, Ybad) """ Explanation: Let's plot a few missclassified examples - I guess we all agree that these are notably harder to detect. End of explanation """ knn.set_k(13) multiple_k=knn.classify_for_multiple_k() print multiple_k.shape """ Explanation: Now the question is - is 97.30% accuracy the best we can do? While one would usually re-train KNN with different values for k here and likely perform Cross-validation, we just use a small trick here that saves us lots of computation time: When we have to determine the $K\geq k$ nearest neighbors we will know the nearest neigbors for all $k=1...K$ and can thus get the predictions for multiple k's in one step: End of explanation """ for k in xrange(13): print "Accuracy for k=%d is %2.2f%%" % (k+1, 100*np.mean(multiple_k[:,k]==Ytest)) """ Explanation: We have the prediction for each of the 13 k's now and can quickly compute the accuracies: End of explanation """ from modshogun import Time, KNN_COVER_TREE, KNN_BRUTE start = Time.get_curtime() knn.set_k(3) knn.set_knn_solver_type(KNN_BRUTE) pred = knn.apply_multiclass(feats_test) print "Standard KNN took %2.1fs" % (Time.get_curtime() - start) start = Time.get_curtime() knn.set_k(3) knn.set_knn_solver_type(KNN_COVER_TREE) pred = knn.apply_multiclass(feats_test) print "Covertree KNN took %2.1fs" % (Time.get_curtime() - start) """ Explanation: So k=3 seems to have been the optimal choice. Accellerating KNN Obviously applying KNN is very costly: for each prediction you have to compare the object against all training objects. While the implementation in SHOGUN will use all available CPU cores to parallelize this computation it might still be slow when you have big data sets. In SHOGUN, you can use Cover Trees to speed up the nearest neighbor searching process in KNN. Just call set_use_covertree on the KNN machine to enable or disable this feature. We also show the prediction time comparison with and without Cover Tree in this tutorial. So let's just have a comparison utilizing the data above: End of explanation """ def evaluate(labels, feats, use_cover_tree=False): from modshogun import MulticlassAccuracy, CrossValidationSplitting import time split = CrossValidationSplitting(labels, Nsplit) split.build_subsets() accuracy = np.zeros((Nsplit, len(all_ks))) acc_train = np.zeros(accuracy.shape) time_test = np.zeros(accuracy.shape) for i in range(Nsplit): idx_train = split.generate_subset_inverse(i) idx_test = split.generate_subset_indices(i) for j, k in enumerate(all_ks): #print "Round %d for k=%d..." % (i, k) feats.add_subset(idx_train) labels.add_subset(idx_train) dist = EuclideanDistance(feats, feats) knn = KNN(k, dist, labels) knn.set_store_model_features(True) if use_cover_tree: knn.set_knn_solver_type(KNN_COVER_TREE) else: knn.set_knn_solver_type(KNN_BRUTE) knn.train() evaluator = MulticlassAccuracy() pred = knn.apply_multiclass() acc_train[i, j] = evaluator.evaluate(pred, labels) feats.remove_subset() labels.remove_subset() feats.add_subset(idx_test) labels.add_subset(idx_test) t_start = time.clock() pred = knn.apply_multiclass(feats) time_test[i, j] = (time.clock() - t_start) / labels.get_num_labels() accuracy[i, j] = evaluator.evaluate(pred, labels) feats.remove_subset() labels.remove_subset() return {'eout': accuracy, 'ein': acc_train, 'time': time_test} """ Explanation: So we can significantly speed it up. Let's do a more systematic comparison. For that a helper function is defined to run the evaluation for KNN: End of explanation """ labels = MulticlassLabels(Ytest) feats = RealFeatures(Xtest) print("Evaluating KNN...") wo_ct = evaluate(labels, feats, use_cover_tree=False) wi_ct = evaluate(labels, feats, use_cover_tree=True) print("Done!") """ Explanation: Evaluate KNN with and without Cover Tree. This takes a few seconds: End of explanation """ import matplotlib fig = P.figure(figsize=(8,5)) P.plot(all_ks, wo_ct['eout'].mean(axis=0), 'r-*') P.plot(all_ks, wo_ct['ein'].mean(axis=0), 'r--*') P.legend(["Test Accuracy", "Training Accuracy"]) P.xlabel('K') P.ylabel('Accuracy') P.title('KNN Accuracy') P.tight_layout() fig = P.figure(figsize=(8,5)) P.plot(all_ks, wo_ct['time'].mean(axis=0), 'r-*') P.plot(all_ks, wi_ct['time'].mean(axis=0), 'b-d') P.xlabel("K") P.ylabel("time") P.title('KNN time') P.legend(["Plain KNN", "CoverTree KNN"], loc='center right') P.tight_layout() """ Explanation: Generate plots with the data collected in the evaluation: End of explanation """ from modshogun import GaussianKernel, GMNPSVM width=80 C=1 gk=GaussianKernel() gk.set_width(width) svm=GMNPSVM(C, gk, labels) _=svm.train(feats) """ Explanation: Although simple and elegant, KNN is generally very resource costly. Because all the training samples are to be memorized literally, the memory cost of KNN learning becomes prohibitive when the dataset is huge. Even when the memory is big enough to hold all the data, the prediction will be slow, since the distances between the query point and all the training points need to be computed and ranked. The situation becomes worse if in addition the data samples are all very high-dimensional. Leaving aside computation time issues, k-NN is a very versatile and competitive algorithm. It can be applied to any kind of objects (not just numerical data) - as long as one can design a suitable distance function. In pratice k-NN used with bagging can create improved and more robust results. Comparison to Multiclass Support Vector Machines In contrast to KNN - multiclass Support Vector Machines (SVMs) attempt to model the decision function separating each class from one another. They compare examples utilizing similarity measures (so called Kernels) instead of distances like KNN does. When applied, they are in Big-O notation computationally as expensive as KNN but involve another (costly) training step. They do not scale very well to cases with a huge number of classes but usually lead to favorable results when applied to small number of classes cases. So for reference let us compare how a standard multiclass SVM performs wrt. KNN on the mnist data set from above. Let us first train a multiclass svm using a Gaussian kernel (kind of the SVM equivalent to the euclidean distance). End of explanation """ out=svm.apply(feats_test) evaluator = MulticlassAccuracy() accuracy = evaluator.evaluate(out, labels_test) print "Accuracy = %2.2f%%" % (100*accuracy) """ Explanation: Let's apply the SVM to the same test data set to compare results: End of explanation """ Xrem=Xall[:,subset[6000:]] Yrem=Yall[subset[6000:]] feats_rem=RealFeatures(Xrem) labels_rem=MulticlassLabels(Yrem) out=svm.apply(feats_rem) evaluator = MulticlassAccuracy() accuracy = evaluator.evaluate(out, labels_rem) print "Accuracy = %2.2f%%" % (100*accuracy) idx=np.where(out.get_labels() != Yrem)[0] Xbad=Xrem[:,idx] Ybad=Yrem[idx] _=P.figure(figsize=(17,6)) P.gray() plot_example(Xbad, Ybad) """ Explanation: Since the SVM performs way better on this task - let's apply it to all data we did not use in training. End of explanation """
tensorflow/docs-l10n
site/ja/tutorials/load_data/text.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2018 The TensorFlow Authors. End of explanation """ !pip install "tensorflow-text==2.8.*" import collections import pathlib import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import losses from tensorflow.keras import utils from tensorflow.keras.layers import TextVectorization import tensorflow_datasets as tfds import tensorflow_text as tf_text """ Explanation: tf.data を使ったテキストの読み込み <table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/text"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a></td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/load_data/text.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/load_data/text.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tutorials/load_data/text.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a> </td> </table> このチュートリアルでは、テキストを読み込んで前処理する 2 つの方法を紹介します。 まず、Keras ユーティリティと前処理レイヤーを使用します。これには、データを tf.data.Dataset に変換するための tf.keras.utils.text_dataset_from_directory とデータを標準化、トークン化、およびベクトル化するための tf.keras.layers.TextVectorization が含まれます。TensorFlow を初めて使用する場合は、これらから始める必要があります。 次に、tf.data.TextLineDataset などの低レベルのユーティリティを使用してテキストファイルを読み込み、text.UnicodeScriptTokenizer や text.case_fold_utf8 などの TensorFlow Text APIを使用して、よりきめ細かい制御のためにデータを前処理します。 End of explanation """ data_url = 'https://storage.googleapis.com/download.tensorflow.org/data/stack_overflow_16k.tar.gz' dataset_dir = utils.get_file( origin=data_url, untar=True, cache_dir='stack_overflow', cache_subdir='') dataset_dir = pathlib.Path(dataset_dir).parent list(dataset_dir.iterdir()) train_dir = dataset_dir/'train' list(train_dir.iterdir()) """ Explanation: 例 1: StackOverflow の質問のタグを予測する 最初の例として、StackOverflow からプログラミングの質問のデータセットをダウンロードします。それぞれの質問 (「ディクショナリを値で並べ替えるにはどうすればよいですか?」) は、1 つのタグ (<code>Python</code>、CSharp、JavaScript、またはJava) でラベルされています。このタスクでは、質問のタグを予測するモデルを開発します。これは、マルチクラス分類の例です。マルチクラス分類は、重要で広く適用できる機械学習の問題です。 データセットをダウンロードして調査する まず、tf.keras.utils.get_file を使用して Stack Overflow データセットをダウンロードし、ディレクトリの構造を調べます。 End of explanation """ sample_file = train_dir/'python/1755.txt' with open(sample_file) as f: print(f.read()) """ Explanation: train/csharp、train/java、train/python および train/javascript ディレクトリには、多くのテキストファイルが含まれています。それぞれが Stack Overflow の質問です。 サンプルファイルを出力してデータを調べます。 End of explanation """ batch_size = 32 seed = 42 raw_train_ds = utils.text_dataset_from_directory( train_dir, batch_size=batch_size, validation_split=0.2, subset='training', seed=seed) """ Explanation: データセットを読み込む 次に、データをディスクから読み込み、トレーニングに適した形式に準備します。これを行うには、tf.keras.utils.text_dataset_from_directory ユーティリティを使用して、ラベル付きの tf.data.Dataset を作成します。これは、入力パイプラインを構築するための強力なツールのコレクションです。tf.data を始めて使用する場合は、tf.data: TensorFlow 入力パイプラインを構築するを参照してください。 tf.keras.utils.text_dataset_from_directory API は、次のようなディレクトリ構造を想定しています。 train/ ...csharp/ ......1.txt ......2.txt ...java/ ......1.txt ......2.txt ...javascript/ ......1.txt ......2.txt ...python/ ......1.txt ......2.txt 機械学習実験を実行するときは、データセットをトレーニング、検証、および、テストの 3 つに分割することをお勧めします。 Stack Overflow データセットは、すでにトレーニングセットとテストセットに分割されていますが、検証セットはありません。 tf.keras.utils.text_dataset_from_directory を使用し、validation_split を 0.2 (20%) に設定し、トレーニングデータを 80:20 に分割して検証セットを作成します。 End of explanation """ for text_batch, label_batch in raw_train_ds.take(1): for i in range(10): print("Question: ", text_batch.numpy()[i]) print("Label:", label_batch.numpy()[i]) """ Explanation: 前のセル出力が示すように、トレーニングフォルダには 8,000 の例があり、そのうち 80% (6,400) をトレーニングに使用します。tf.data.Dataset を Model.fit に直接渡すことで、モデルをトレーニングできます。詳細は、後ほど見ていきます。 まず、データセットを反復処理し、いくつかの例を出力して、データを確認します。 Note: To increase the difficulty of the classification problem, the dataset author replaced occurrences of the words Python, CSharp, JavaScript, or Java in the programming question with the word blank. End of explanation """ for i, label in enumerate(raw_train_ds.class_names): print("Label", i, "corresponds to", label) """ Explanation: ラベルは、0、1、2 または 3 です。これらのどれがどの文字列ラベルに対応するかを確認するには、データセットの class_names プロパティを確認します。 End of explanation """ # Create a validation set. raw_val_ds = utils.text_dataset_from_directory( train_dir, batch_size=batch_size, validation_split=0.2, subset='validation', seed=seed) test_dir = dataset_dir/'test' # Create a test set. raw_test_ds = utils.text_dataset_from_directory( test_dir, batch_size=batch_size) """ Explanation: 次に、tf.keras.utils.text_dataset_from_directory を使って検証およびテスト用データセットを作成します。トレーニング用セットの残りの 1,600 件のレビューを検証に使用します。 注意: tf.keras.utils.text_dataset_from_directory の validation_split および subset 引数を使用する場合は、必ずランダムシードを指定するか、shuffle=Falseを渡して、検証とトレーニング分割に重複がないようにします。 End of explanation """ VOCAB_SIZE = 10000 binary_vectorize_layer = TextVectorization( max_tokens=VOCAB_SIZE, output_mode='binary') """ Explanation: トレーニング用データセットを準備する 次に、tf.keras.layers.TextVectorization レイヤーを使用して、データを標準化、トークン化、およびベクトル化します。 標準化とは、テキストを前処理することを指します。通常、句読点や HTML 要素を削除して、データセットを簡素化します。 トークン化とは、文字列をトークンに分割することです(たとえば、空白で分割することにより、文を個々の単語に分割します)。 ベクトル化とは、トークンを数値に変換して、ニューラルネットワークに入力できるようにすることです。 これらのタスクはすべて、このレイヤーで実行できます。これらの詳細については、tf.keras.layers.TextVectorization API ドキュメントを参照してください。 注意点 : デフォルトの標準化では、テキストが小文字に変換され、句読点が削除されます (standardize='lower_and_strip_punctuation')。 デフォルトのトークナイザーは空白で分割されます (split='whitespace')。 デフォルトのベクトル化モードは int です (output_mode='int')。これは整数インデックスを出力します(トークンごとに1つ)。このモードは、語順を考慮したモデルを構築するために使用できます。binary などの他のモードを使用して、bag-of-word モデルを構築することもできます。 TextVectorization を使用した標準化、トークン化、およびベクトル化について詳しくみるために、2 つのモデルを作成します。 まず、'binary' ベクトル化モードを使用して、bag-of-words モデルを構築します。 次に、1D ConvNet で 'int' モードを使用します。 End of explanation """ MAX_SEQUENCE_LENGTH = 250 int_vectorize_layer = TextVectorization( max_tokens=VOCAB_SIZE, output_mode='int', output_sequence_length=MAX_SEQUENCE_LENGTH) """ Explanation: 'int' モードの場合、最大語彙サイズに加えて、明示的な最大シーケンス長 (MAX_SEQUENCE_LENGTH) を設定する必要があります。これにより、レイヤーはシーケンスを正確に output_sequence_length 値にパディングまたは切り捨てます。 End of explanation """ # Make a text-only dataset (without labels), then call `TextVectorization.adapt`. train_text = raw_train_ds.map(lambda text, labels: text) binary_vectorize_layer.adapt(train_text) int_vectorize_layer.adapt(train_text) """ Explanation: 次に、TextVectorization.adapt を呼び出して、前処理レイヤーの状態をデータセットに適合させます。これにより、モデルは文字列から整数へのインデックスを作成します。 注意: TextVectorization.adapt を呼び出すときは、トレーニング用データのみを使用することが重要です (テスト用セットを使用すると情報が漏洩します)。 End of explanation """ def binary_vectorize_text(text, label): text = tf.expand_dims(text, -1) return binary_vectorize_layer(text), label def int_vectorize_text(text, label): text = tf.expand_dims(text, -1) return int_vectorize_layer(text), label # Retrieve a batch (of 32 reviews and labels) from the dataset. text_batch, label_batch = next(iter(raw_train_ds)) first_question, first_label = text_batch[0], label_batch[0] print("Question", first_question) print("Label", first_label) print("'binary' vectorized question:", binary_vectorize_text(first_question, first_label)[0]) print("'int' vectorized question:", int_vectorize_text(first_question, first_label)[0]) """ Explanation: これらのレイヤーを使用してデータを前処理した結果を出力します。 End of explanation """ print("1289 ---> ", int_vectorize_layer.get_vocabulary()[1289]) print("313 ---> ", int_vectorize_layer.get_vocabulary()[313]) print("Vocabulary size: {}".format(len(int_vectorize_layer.get_vocabulary()))) """ Explanation: 上に示したように、TextVectorization の 'binary' モードは、入力に少なくとも 1 回存在するトークンを示す配列を返しますが、'int' モードでは、各トークンが整数に置き換えられるため、トークンの順序が保持されます。 レイヤーで TextVectorization.get_vocabulary を呼び出すことにより、各整数が対応するトークン (文字列) を検索できます。 End of explanation """ binary_train_ds = raw_train_ds.map(binary_vectorize_text) binary_val_ds = raw_val_ds.map(binary_vectorize_text) binary_test_ds = raw_test_ds.map(binary_vectorize_text) int_train_ds = raw_train_ds.map(int_vectorize_text) int_val_ds = raw_val_ds.map(int_vectorize_text) int_test_ds = raw_test_ds.map(int_vectorize_text) """ Explanation: モデルをトレーニングする準備がほぼ整いました。 最後の前処理ステップとして、トレーニング、検証、およびデータセットのテストのために前に作成した TextVectorization レイヤーを適用します。 End of explanation """ AUTOTUNE = tf.data.AUTOTUNE def configure_dataset(dataset): return dataset.cache().prefetch(buffer_size=AUTOTUNE) binary_train_ds = configure_dataset(binary_train_ds) binary_val_ds = configure_dataset(binary_val_ds) binary_test_ds = configure_dataset(binary_test_ds) int_train_ds = configure_dataset(int_train_ds) int_val_ds = configure_dataset(int_val_ds) int_test_ds = configure_dataset(int_test_ds) """ Explanation: パフォーマンスのためにデータセットを構成する 以下は、データを読み込むときに I/O がブロックされないようにするために使用する必要がある 2 つの重要な方法です。 Dataset.cache はデータをディスクから読み込んだ後、データをメモリに保持します。これにより、モデルのトレーニング中にデータセットがボトルネックになることを回避できます。データセットが大きすぎてメモリに収まらない場合は、この方法を使用して、パフォーマンスの高いオンディスクキャッシュを作成することもできます。これは、多くの小さなファイルを読み込むより効率的です。 Dataset.prefetch はトレーニング中にデータの前処理とモデルの実行をオーバーラップさせます。 以上の 2 つの方法とデータをディスクにキャッシュする方法についての詳細は、<a>データパフォーマンスガイド</a>の <em>プリフェッチ</em>を参照してください。 End of explanation """ binary_model = tf.keras.Sequential([layers.Dense(4)]) binary_model.compile( loss=losses.SparseCategoricalCrossentropy(from_logits=True), optimizer='adam', metrics=['accuracy']) history = binary_model.fit( binary_train_ds, validation_data=binary_val_ds, epochs=10) """ Explanation: モデルをトレーニングする ニューラルネットワークを作成します。 'binary' のベクトル化されたデータの場合、単純な bag-of-words 線形モデルを定義し、それを構成してトレーニングします。 End of explanation """ def create_model(vocab_size, num_labels): model = tf.keras.Sequential([ layers.Embedding(vocab_size, 64, mask_zero=True), layers.Conv1D(64, 5, padding="valid", activation="relu", strides=2), layers.GlobalMaxPooling1D(), layers.Dense(num_labels) ]) return model # `vocab_size` is `VOCAB_SIZE + 1` since `0` is used additionally for padding. int_model = create_model(vocab_size=VOCAB_SIZE + 1, num_labels=4) int_model.compile( loss=losses.SparseCategoricalCrossentropy(from_logits=True), optimizer='adam', metrics=['accuracy']) history = int_model.fit(int_train_ds, validation_data=int_val_ds, epochs=5) """ Explanation: 次に、'int' ベクトル化レイヤーを使用して、1D ConvNet を構築します。 End of explanation """ print("Linear model on binary vectorized data:") print(binary_model.summary()) print("ConvNet model on int vectorized data:") print(int_model.summary()) """ Explanation: 2 つのモデルを比較します。 End of explanation """ binary_loss, binary_accuracy = binary_model.evaluate(binary_test_ds) int_loss, int_accuracy = int_model.evaluate(int_test_ds) print("Binary model accuracy: {:2.2%}".format(binary_accuracy)) print("Int model accuracy: {:2.2%}".format(int_accuracy)) """ Explanation: テストデータで両方のモデルを評価します。 End of explanation """ export_model = tf.keras.Sequential( [binary_vectorize_layer, binary_model, layers.Activation('sigmoid')]) export_model.compile( loss=losses.SparseCategoricalCrossentropy(from_logits=False), optimizer='adam', metrics=['accuracy']) # Test it with `raw_test_ds`, which yields raw strings loss, accuracy = export_model.evaluate(raw_test_ds) print("Accuracy: {:2.2%}".format(binary_accuracy)) """ Explanation: 注意: このサンプルデータセットは、かなり単純な分類問題を表しています。より複雑なデータセットと問題は、前処理戦略とモデルアーキテクチャに微妙ながら重要な違いをもたらします。さまざまなアプローチを比較するために、さまざまなハイパーパラメータとエポックを試してみてください。 モデルをエクスポートする 上記のコードでは、モデルにテキストをフィードする前に、tf.keras.layers.TextVectorization レイヤーをデータセットに適用しました。モデルで生の文字列を処理できるようにする場合 (たとえば、展開を簡素化するため)、モデル内に TextVectorization レイヤーを含めることができます。 これを行うには、トレーニングしたばかりの重みを使用して新しいモデルを作成できます。 End of explanation """ def get_string_labels(predicted_scores_batch): predicted_int_labels = tf.argmax(predicted_scores_batch, axis=1) predicted_labels = tf.gather(raw_train_ds.class_names, predicted_int_labels) return predicted_labels """ Explanation: これで、モデルは生の文字列を入力として受け取り、Model.predict を使用して各ラベルのスコアを予測できます。最大スコアのラベルを見つける関数を定義します。 End of explanation """ inputs = [ "how do I extract keys from a dict into a list?", # 'python' "debug public static void main(string[] args) {...}", # 'java' ] predicted_scores = export_model.predict(inputs) predicted_labels = get_string_labels(predicted_scores) for input, label in zip(inputs, predicted_labels): print("Question: ", input) print("Predicted label: ", label.numpy()) """ Explanation: 新しいデータで推論を実行する End of explanation """ DIRECTORY_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/' FILE_NAMES = ['cowper.txt', 'derby.txt', 'butler.txt'] for name in FILE_NAMES: text_dir = utils.get_file(name, origin=DIRECTORY_URL + name) parent_dir = pathlib.Path(text_dir).parent list(parent_dir.iterdir()) """ Explanation: モデル内にテキスト前処理ロジックを含めると、モデルを本番環境にエクスポートして展開を簡素化し、トレーニング/テストスキューの可能性を減らすことができます。 tf.keras.layers.TextVectorization を適用する場所を選択する際に性能の違いに留意する必要があります。モデルの外部で使用すると、GPU でトレーニングするときに非同期 CPU 処理とデータのバッファリングを行うことができます。したがって、GPU でモデルをトレーニングしている場合は、モデルの開発中に最高のパフォーマンスを得るためにこのオプションを使用し、デプロイの準備ができたらモデル内に TextVectorization レイヤーを含めるように切り替えることをお勧めします。 モデルの保存の詳細については、モデルの保存と読み込みチュートリアルをご覧ください。 例 2: イーリアスの翻訳者を予測する 以下に、tf.data.TextLineDataset を使用してテキストファイルから例を読み込み、TensorFlow Text を使用してデータを前処理する例を示します。この例では、ホーマーのイーリアスの 3 つの異なる英語翻訳を使用し、与えられた 1 行のテキストから翻訳者を識別するようにモデルをトレーニングします。 データセットをダウンロードして調査する 3 つのテキストの翻訳者は次のとおりです。 ウィリアム・クーパー — テキスト エドワード、ダービー伯爵 — テキスト サミュエル・バトラー — テキスト このチュートリアルで使われているテキストファイルは、ヘッダ、フッタ、行番号、章のタイトルの削除など、いくつかの典型的な前処理が行われています。 前処理後のファイルをローカルにダウンロードします。 End of explanation """ def labeler(example, index): return example, tf.cast(index, tf.int64) labeled_data_sets = [] for i, file_name in enumerate(FILE_NAMES): lines_dataset = tf.data.TextLineDataset(str(parent_dir/file_name)) labeled_dataset = lines_dataset.map(lambda ex: labeler(ex, i)) labeled_data_sets.append(labeled_dataset) """ Explanation: データセットを読み込む 以前は、tf.keras.utils.text_dataset_from_directory では、ファイルのすべてのコンテンツが 1 つの例として扱われていました。ここでは、tf.data.TextLineDataset を使用します。これは、テキストファイルから tf.data.Dataset を作成するように設計されています。それぞれの例は、元のファイルからの行です。TextLineDataset は、主に行ベースのテキストデータ (詩やエラーログなど) に役立ちます。 これらのファイルを繰り返し処理し、各ファイルを独自のデータセットに読み込みます。各例には個別にラベルを付ける必要があるため、Dataset.map を使用して、それぞれにラベラー関数を適用します。これにより、データセット内のすべての例が繰り返され、 (example, label) ペアが返されます。 End of explanation """ BUFFER_SIZE = 50000 BATCH_SIZE = 64 VALIDATION_SIZE = 5000 all_labeled_data = labeled_data_sets[0] for labeled_dataset in labeled_data_sets[1:]: all_labeled_data = all_labeled_data.concatenate(labeled_dataset) all_labeled_data = all_labeled_data.shuffle( BUFFER_SIZE, reshuffle_each_iteration=False) """ Explanation: 次に、Dataset.concatenate を使用し、これらのラベル付きデータセットを 1 つのデータセットに結合し、Dataset.shuffle を使用してシャッフルします。 End of explanation """ for text, label in all_labeled_data.take(10): print("Sentence: ", text.numpy()) print("Label:", label.numpy()) """ Explanation: 前述の手順でいくつかの例を出力します。データセットはまだバッチ処理されていないため、all_labeled_data の各エントリは 1 つのデータポイントに対応します。 End of explanation """ tokenizer = tf_text.UnicodeScriptTokenizer() def tokenize(text, unused_label): lower_case = tf_text.case_fold_utf8(text) return tokenizer.tokenize(lower_case) tokenized_ds = all_labeled_data.map(tokenize) """ Explanation: トレーニング用データセットを準備する tf.keras.layers.TextVectorization を使用してテキストデータセットを前処理する代わりに、TensorFlow Text API を使用してデータを標準化およびトークン化し、語彙を作成し、tf.lookup.StaticVocabularyTable を使用してトークンを整数にマッピングし、モデルにフィードします。(詳細については TensorFlow Text を参照してください)。 テキストを小文字に変換してトークン化する関数を定義します。 TensorFlow Text は、さまざまなトークナイザーを提供します。この例では、text.UnicodeScriptTokenizer を使用してデータセットをトークン化します。 Dataset.map を使用して、トークン化をデータセットに適用します。 End of explanation """ for text_batch in tokenized_ds.take(5): print("Tokens: ", text_batch.numpy()) """ Explanation: データセットを反復処理して、トークン化されたいくつかの例を出力します。 End of explanation """ tokenized_ds = configure_dataset(tokenized_ds) vocab_dict = collections.defaultdict(lambda: 0) for toks in tokenized_ds.as_numpy_iterator(): for tok in toks: vocab_dict[tok] += 1 vocab = sorted(vocab_dict.items(), key=lambda x: x[1], reverse=True) vocab = [token for token, count in vocab] vocab = vocab[:VOCAB_SIZE] vocab_size = len(vocab) print("Vocab size: ", vocab_size) print("First five vocab entries:", vocab[:5]) """ Explanation: 次に、トークンを頻度で並べ替え、上位の VOCAB_SIZE トークンを保持することにより、語彙を構築します。 End of explanation """ keys = vocab values = range(2, len(vocab) + 2) # Reserve `0` for padding, `1` for OOV tokens. init = tf.lookup.KeyValueTensorInitializer( keys, values, key_dtype=tf.string, value_dtype=tf.int64) num_oov_buckets = 1 vocab_table = tf.lookup.StaticVocabularyTable(init, num_oov_buckets) """ Explanation: トークンを整数に変換するには、vocab セットを使用して、tf.lookup.StaticVocabularyTable を作成します。トークンを [2, vocab_size + 2] の範囲の整数にマップします。TextVectorization レイヤーと同様に、0 はパディングを示すために予約されており、1 は語彙外 (OOV) トークンを示すために予約されています。 End of explanation """ def preprocess_text(text, label): standardized = tf_text.case_fold_utf8(text) tokenized = tokenizer.tokenize(standardized) vectorized = vocab_table.lookup(tokenized) return vectorized, label """ Explanation: 最後に、トークナイザーとルックアップテーブルを使用して、データセットを標準化、トークン化、およびベクトル化する関数を定義します。 End of explanation """ example_text, example_label = next(iter(all_labeled_data)) print("Sentence: ", example_text.numpy()) vectorized_text, example_label = preprocess_text(example_text, example_label) print("Vectorized sentence: ", vectorized_text.numpy()) """ Explanation: 1 つの例でこれを試して、出力を確認します。 End of explanation """ all_encoded_data = all_labeled_data.map(preprocess_text) """ Explanation: 次に、Dataset.map を使用して、データセットに対して前処理関数を実行します。 End of explanation """ train_data = all_encoded_data.skip(VALIDATION_SIZE).shuffle(BUFFER_SIZE) validation_data = all_encoded_data.take(VALIDATION_SIZE) train_data = train_data.padded_batch(BATCH_SIZE) validation_data = validation_data.padded_batch(BATCH_SIZE) """ Explanation: データセットをトレーニング用セットとテスト用セットに分割する Keras TextVectorization レイヤーでも、ベクトル化されたデータをバッチ処理してパディングします。バッチ内の例は同じサイズと形状である必要があるため、パディングが必要です。これらのデータセットの例はすべて同じサイズではありません。テキストの各行には、異なる数の単語があります。 tf.data.Dataset は、データセットの分割とパディングのバッチ処理をサポートしています End of explanation """ sample_text, sample_labels = next(iter(validation_data)) print("Text batch shape: ", sample_text.shape) print("Label batch shape: ", sample_labels.shape) print("First text example: ", sample_text[0]) print("First label example: ", sample_labels[0]) """ Explanation: validation_data および train_data は (example, label) ペアのコレクションではなく、バッチのコレクションです。各バッチは、配列として表される (多くの例、多くのラベル) のペアです。 以下に示します。 End of explanation """ vocab_size += 2 """ Explanation: パディングに 0 を使用し、語彙外 (OOV) トークンに 1 を使用するため、語彙のサイズが 2 つ増えました。 End of explanation """ train_data = configure_dataset(train_data) validation_data = configure_dataset(validation_data) """ Explanation: 以前と同じように、パフォーマンスを向上させるためにデータセットを構成します。 End of explanation """ model = create_model(vocab_size=vocab_size, num_labels=3) model.compile( optimizer='adam', loss=losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit(train_data, validation_data=validation_data, epochs=3) loss, accuracy = model.evaluate(validation_data) print("Loss: ", loss) print("Accuracy: {:2.2%}".format(accuracy)) """ Explanation: モデルをトレーニングする 以前と同じように、このデータセットでモデルをトレーニングできます。 End of explanation """ preprocess_layer = TextVectorization( max_tokens=vocab_size, standardize=tf_text.case_fold_utf8, split=tokenizer.tokenize, output_mode='int', output_sequence_length=MAX_SEQUENCE_LENGTH) preprocess_layer.set_vocabulary(vocab) export_model = tf.keras.Sequential( [preprocess_layer, model, layers.Activation('sigmoid')]) export_model.compile( loss=losses.SparseCategoricalCrossentropy(from_logits=False), optimizer='adam', metrics=['accuracy']) # Create a test dataset of raw strings. test_ds = all_labeled_data.take(VALIDATION_SIZE).batch(BATCH_SIZE) test_ds = configure_dataset(test_ds) loss, accuracy = export_model.evaluate(test_ds) print("Loss: ", loss) print("Accuracy: {:2.2%}".format(accuracy)) """ Explanation: モデルをエクスポートする モデルが生の文字列を入力として受け取ることができるようにするには、カスタム前処理関数と同じ手順を実行する TextVectorization レイヤーを作成します。すでに語彙をトレーニングしているので、新しい語彙をトレーニングする TextVectorization.adapt の代わりに、TextVectorization.set_vocabulary を使用できます。 End of explanation """ inputs = [ "Join'd to th' Ionians with their flowing robes,", # Label: 1 "the allies, and his armour flashed about him so that he seemed to all", # Label: 2 "And with loud clangor of his arms he fell.", # Label: 0 ] predicted_scores = export_model.predict(inputs) predicted_labels = tf.argmax(predicted_scores, axis=1) for input, label in zip(inputs, predicted_labels): print("Question: ", input) print("Predicted label: ", label.numpy()) """ Explanation: エンコードされた検証セットのモデルと生の検証セットのエクスポートされたモデルの損失と正確度は、予想どおり同じです。 新しいデータで推論を実行する End of explanation """ # Training set. train_ds = tfds.load( 'imdb_reviews', split='train[:80%]', batch_size=BATCH_SIZE, shuffle_files=True, as_supervised=True) # Validation set. val_ds = tfds.load( 'imdb_reviews', split='train[80%:]', batch_size=BATCH_SIZE, shuffle_files=True, as_supervised=True) """ Explanation: TensorFlow Datasets (TFDS) を使用して、より多くのデータセットをダウンロードする TensorFlow Dataset からより多くのデータセットをダウンロードできます。 この例では、IMDB 大規模映画レビューデータセットを使用して、感情分類のモデルをトレーニングします。 End of explanation """ for review_batch, label_batch in val_ds.take(1): for i in range(5): print("Review: ", review_batch[i].numpy()) print("Label: ", label_batch[i].numpy()) """ Explanation: いくつかの例を出力します。 End of explanation """ vectorize_layer = TextVectorization( max_tokens=VOCAB_SIZE, output_mode='int', output_sequence_length=MAX_SEQUENCE_LENGTH) # Make a text-only dataset (without labels), then call `TextVectorization.adapt`. train_text = train_ds.map(lambda text, labels: text) vectorize_layer.adapt(train_text) def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label train_ds = train_ds.map(vectorize_text) val_ds = val_ds.map(vectorize_text) # Configure datasets for performance as before. train_ds = configure_dataset(train_ds) val_ds = configure_dataset(val_ds) """ Explanation: これで、以前と同じようにデータを前処理してモデルをトレーニングできます。 注意: これは二項分類の問題であるため、モデルには tf.keras.losses.SparseCategoricalCrossentropy の代わりに tf.keras.losses.BinaryCrossentropy を使用します。 トレーニング用データセットを準備する End of explanation """ model = create_model(vocab_size=VOCAB_SIZE + 1, num_labels=1) model.summary() model.compile( loss=losses.BinaryCrossentropy(from_logits=True), optimizer='adam', metrics=['accuracy']) history = model.fit(train_ds, validation_data=val_ds, epochs=3) loss, accuracy = model.evaluate(val_ds) print("Loss: ", loss) print("Accuracy: {:2.2%}".format(accuracy)) """ Explanation: モデルを作成、構成、およびトレーニングする End of explanation """ export_model = tf.keras.Sequential( [vectorize_layer, model, layers.Activation('sigmoid')]) export_model.compile( loss=losses.SparseCategoricalCrossentropy(from_logits=False), optimizer='adam', metrics=['accuracy']) # 0 --> negative review # 1 --> positive review inputs = [ "This is a fantastic movie.", "This is a bad movie.", "This movie was so bad that it was good.", "I will never say yes to watching this movie.", ] predicted_scores = export_model.predict(inputs) predicted_labels = [int(round(x[0])) for x in predicted_scores] for input, label in zip(inputs, predicted_labels): print("Question: ", input) print("Predicted label: ", label) """ Explanation: モデルをエクスポートする End of explanation """
mne-tools/mne-tools.github.io
0.19/_downloads/f01121873dbae065a1740e6c0c20d1d5/plot_eeg_no_mri.ipynb
bsd-3-clause
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Joan Massich <mailsik@gmail.com> # # License: BSD Style. import os.path as op import mne from mne.datasets import eegbci from mne.datasets import fetch_fsaverage # Download fsaverage files fs_dir = fetch_fsaverage(verbose=True) subjects_dir = op.dirname(fs_dir) # The files live in: subject = 'fsaverage' trans = 'fsaverage' # MNE has a built-in fsaverage transformation src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif') bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif') """ Explanation: EEG forward operator with a template MRI This tutorial explains how to compute the forward operator from EEG data using the standard template MRI subject fsaverage. .. caution:: Source reconstruction without an individual T1 MRI from the subject will be less accurate. Do not over interpret activity locations which can be off by multiple centimeters. :depth: 2 End of explanation """ raw_fname, = eegbci.load_data(subject=1, runs=[6]) raw = mne.io.read_raw_edf(raw_fname, preload=True) # Clean channel names to be able to use a standard 1005 montage new_names = dict( (ch_name, ch_name.rstrip('.').upper().replace('Z', 'z').replace('FP', 'Fp')) for ch_name in raw.ch_names) raw.rename_channels(new_names) # Read and set the EEG electrode locations montage = mne.channels.make_standard_montage('standard_1005') raw.set_montage(montage) raw.set_eeg_reference(projection=True) # needed for inverse modeling # Check that the locations of EEG electrodes is correct with respect to MRI mne.viz.plot_alignment( raw.info, src=src, eeg=['original', 'projected'], trans=trans, show_axes=True, mri_fiducials=True, dig='fiducials') """ Explanation: Load the data We use here EEG data from the BCI dataset. <div class="alert alert-info"><h4>Note</h4><p>See `plot_montage` to view all the standard EEG montages available in MNE-Python.</p></div> End of explanation """ fwd = mne.make_forward_solution(raw.info, trans=trans, src=src, bem=bem, eeg=True, mindist=5.0, n_jobs=1) print(fwd) # for illustration purposes use fwd to compute the sensitivity map eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed') eeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir, clim=dict(lims=[5, 50, 100])) """ Explanation: Setup source space and compute forward End of explanation """
fionapigott/Data-Science-45min-Intros
matplotlib-201/matplotlib-201.ipynb
unlicense
import matplotlib.pyplot as plt %matplotlib inline plt.plot([1,2,3,4]) plt.ylabel('some numbers') plt.show() """ Explanation: Understanding just enough of matplotlib... (or, Why a State Machine within a State Machine is a Shitty Idea) <br> So, you want to plot something in Python. Perhaps you've typed python import matplotlib.pyplot as plt a few times...maybe you even managed to format your axis ticklabels for a special plot. Do you have a few examples lying around where you once used matplotlib to do something, but you don't quite remember why it worked? This tutorial is for you. We will assume that you can follow the simple plotting tutorials. Instead, we will try to understand the ideas behind matplotlib's structure, but only enough to develop intuition for efficiently switching between simple, high-level commands and all the glorious guts beneath. This intuition requires an understanding of the different roles of pyplot, backends, and the matplotlib API. We'll start by understanding backends, why they're confusing, and how we can stop thinking about them. We'll then see how convenient pylot is, and what its limitations are. We'll end with a dive into the matplotlib object space. Expect some bouncing back-and-forth between concepts and explicit examples. Requirements: matplotlib, numpy A Simple Example Let's start with an example from the tutorials. End of explanation """ import matplotlib.pyplot %matplotlib inline matplotlib.pyplot.plot([1,2,3,4]) matplotlib.pyplot.ylabel('some numbers') #matplotlib.pyplot.show() """ Explanation: Observations: * we've used an ipython "magic" function to do something to matplotlib * we've imported the pyplot module and used some functions defined in it * there's nothing object-oriented here: we called 3 stand-alone functions, sequentially * yet the show function caused the plot to render in a jupyter notebook. Lots of magic here... Some background Matplotlib consists of 3 groups of Python objects: 1. pyplot: a set of command-style functions to provide MATLAB-style interface 2. Matplotlib API / frontend: classes that do the heavy lifting, creating and managing figures, text, lines, plots and so on. 3. Backend: device dependent resources for turning abstract graphical objects into pixels Efficient, effective use of matplotlib involves: configuring the backend correctly, using pyplot when possible, and dropping into the API when necessary. Foundational ideas: (http://matplotlib.org/faq/usage_faq.html) Environmental hierarchy: * Pyplot state machine: it knows about what you’ve plotted (for purely programmatic use, this may be dropped as irrelevant [or unhelpful!]) * pyplot functions that look like: matplotlib.pyplot.FUNCTION * Figure - associates Axes objects and the Canvas (a lower-level object) * Axes - a plot, plus a few things like a title * Axis - number-line-like: ticks, ticklabels, etc. * Plotting input is always one or more numpy.array objects * Backends combine a rendering engine (PNG,SVG,etc) with an environment awareness <img src="files/fig_map.png"> Peeling away the magic... The cell below is same example as above, but more verbose: the pyplot state machine maintains knowledge of what was plotted with plot, such that a call to show renders the figure, and the %matplotlib magic command handles the backend (object to pixel mapping, display, inline figure embedding). We've also made the namespaces really explicit. End of explanation """ import matplotlib.pyplot matplotlib.pyplot.plot([1,2,3,4]) matplotlib.pyplot.ylabel('some numbers') matplotlib.pyplot.savefig('test.png') """ Explanation: It turns out that we don't even need the call to show because of what the %matplotlib call does. We can remove the magic function if we specify the backend with an explicit 'write' call. This is a good example of how pyplot and the backend manage separate concerns. Restart kernel here if you want to prove it actually works. End of explanation """ #import matplotlib #matplotlib.use('TkAgg') #import matplotlib.pyplot #matplotlib.pyplot.plot([1,2,3,4]) #matplotlib.pyplot.ylabel('some numbers') #matplotlib.pyplot.show() """ Explanation: Look for test.png in your local directory. What have we done? There's still the pyplot state machine (running in the ipython state machine!), but we've made explicit some of the magic the %matplotlib inline call was performing. There are other arguments for %matplotlib including notebook, which provides a more interactive inline plot, see http://ipython.readthedocs.io/en/stable/interactive/magics.html?highlight=magic#magic-matplotlib [OPTIONAL] We can also set the backend explicitly; see http://matplotlib.org/faq/usage_faq.html#what-is-a-backend. You'll need to restart your kernel again and uncomment the code in the cell below. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline # call to 'plot' returns a tuple of Line2D objects line, = plt.plot([1,2,3,4]) # call to 'ylabel' returns a Text object text = plt.ylabel('some numbers') """ Explanation: That's all we'll say about backends. You have options for how and where to render your plots: you can make use of the magic %matplotlib function, which sets an appropriate backend and does the inline plotting, or you can write your plots explicitly to files. We'll do the former here. Moving between pyplot and the matplotlib API You can find lots of neat plotting examples at http://matplotlib.org/examples/index.html. Most of them involve both pyplot function calls and operations on matplotlib API objects. The pattern is simple: ```python matplotlib.OBJECT() = matplotlib.pyplot.FUNCTION() ``` Let's go back to our example, in which we were discarding the return values of the pyplot functions. End of explanation """ # Here's an example of a setter method line, = plt.plot([1,2,3,4]) line.set_color('r') # and here's the pyplot way of doing the same thing: _ = plt.plot([1,2,3,4],'r') # an example of some getter methods line, = plt.plot([1,2,3,4]) axes = line.axes y_axis = axes.get_yaxis() # And another setter (and getter) call labels = y_axis.set_ticklabels([1,'',2,'','three','',4]) # and the pyplot version import numpy as np plt.plot([1,2,3,4]) _ = plt.yticks(np.arange(1,4.5,0.5),(1,'',2,'','three','',4)) # now for something really crazy import matplotlib.ticker as ticker # plot and get the y-axis line, = plt.plot([1,2,3,4]) axes = line.axes y_axis = axes.get_yaxis() # I want major tick marks at integer y-values (meh, just use pyplot) _ = plt.yticks(range(1,5)) # I want a single, un-labeled, minor tick at y = 2.75 y_axis.set_minor_locator( ticker.FixedLocator([2.75])) # I want proportional precision in my y-labels def func(x,pos): """ return a string representation of `x` with floating point precision `pos` """ return_str = '{0:.' + str(pos) + 'f}' return return_str.format(x) tick_formatter = ticker.FuncFormatter( func ) labels = y_axis.set_major_formatter(tick_formatter) """ Explanation: line is an object representing the line on the chart and all of its properties. Because it's an object, we can use its methods to modify the line. End of explanation """ import matplotlib.pyplot as plt # use pyplot to get a figure fig = plt.figure() # create an array of Axes objects and get the specified one ax = fig.add_subplot(2,1,1) # two rows, one column, first plot """ Explanation: What do we observe? pyplot functions are convenient, even when dealing with API objects the pyplot namespace is flat, while the API namespace is hiearchical somethings you just can't do with pyplot The matplotlib API We've seen how pyplot functions interact with maplotlib API by returning objects. Now we'll briefly note the 3 layers of classes that make the API work. Canvas - the area onto which the figure is drawn Inherits from matplotlib.backend_bases.FigureCanvas Renderer - the object which knows how to draw on the Canvas Inherits from matplotlib.backend_bases.Renderer Artists - the object that knows how to use a renderer to paint onto the canvas Inherits from matplotlib.artist.Artist Artist objects handle all the high level constructs like representing and laying out the figure, text, and lines. Almost all objects interacted with are Artists, including container-like objects such as Figure, Axes, Axis, and graphical primitives such as Rectangle, Line2D, and Text. <img src="files/fig_map.png"> Axes objects are one of the most important API components, because they are the containers that hold and reference most other objects. The Axes class provides helpful interface methods like plot and hist which create primitive Artist instances, like Line2D, from input numpy arrays and strings. End of explanation """ fig2 = plt.figure() ax2 = fig2.add_axes([0.15, 0.1, 0.7, 0.3]) """ Explanation: Note above that there is a Subplot class, and that inherits from Axes. A Figure can place an Axes at an arbitrary location with the add_axes method. End of explanation """ # have to recreate the figure and subplots because inline plotting is opaque. fig = plt.figure() ax = fig.add_subplot(2,1,1) t = np.arange(0.0, 1.0, 0.01) s = np.sin(2*np.pi*t) line, = ax.plot(t, s, color='blue', lw=2) """ Explanation: Let's make some dummy data. End of explanation """ ax.lines[0] line """ Explanation: Now, let's compare the 'line' object, a Line2D instance, to the graphical primitives that the Axes knows about. End of explanation """ import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.cm as cmx import numpy as np %matplotlib inline # some random data: NCURVES = 6 np.random.seed(101) # each curve consists of 20 points with random values in [0,1] curves = [np.random.random(20) for i in range(NCURVES)] # make one Subplot/Axes object in the figure, # and make the figure a bit bigger fig = plt.figure(figsize=(14,8)) ax = fig.add_subplot(111) # use pyplot to get a color map jet = plt.get_cmap('jet') # make an object that represent a normalization onto a range color_normalizer = colors.Normalize(vmin=0, vmax=len(curves)-1) # an object to map scalar data in the range specified by the normalizer # to a color on the specified color map scalar_map = cmx.ScalarMappable(norm=color_normalizer,cmap=jet) for idx in range(len(curves)): # get the np.array line = curves[idx] # map it to a color value color_val = scalar_map.to_rgba(idx) # make the legend text color_text = ( 'color {0}: ({1:4.2f},{2:4.2f},{3:4.2f})'.format(idx,color_val[0],color_val[1],color_val[2]) ) # use Axes.plot to plot it _ = ax.plot(line, color=color_val, label=color_text) # legend stuff handles,labels = ax.get_legend_handles_labels() ax.legend(handles, labels, loc='upper right') ax.grid() #plt.show() """ Explanation: As expected (hoped for?), they are the same object. Helper libraries In addition to modules that define common Artists, the API contains a bunch of modules that provide helper classes around a particular topic. You saw an example earlier when we imported matplotlib.ticker and used its Locator and Formatter classes. Other examples are (http://matplotlib.org/api/index.html): dates units finance legend colors animation Most of what is implemented in these modules are additional Artists, which can delegate their rendering to the same classes used by other Artists. Final Example Let's dig into an example that uses the colors module. There are patterns of use here that are typical across the API. See documentation and examples at http://matplotlib.org/examples/color/colormaps_reference.html. Goal: make a dynamic set of curves with different colors, and label the curves by their RGB values. Why? Suppose you have an unknown number of curves that will display on a dashboard. To distinguish them, you'll need to dynamically assign distinguishable colors. End of explanation """
debsankha/network_course_python
exercises/01-exercise-python.ipynb
gpl-2.0
numbers = [[1,2,3],[4,5,6],[7,8,9]] words = ['if','i','could','just','go','outside','and','have','an','ice','cream'] """ Explanation: 1. Party game: squeezed One guessing game, called “squeezed”, is very common in parties. It consists of a player, the chooser, who writes down a number between 00–99. The other players then take turns guessing numbers, with a catch: if one says the chosen number, he loses and has to do something daft. If the guessed number is not the chosen one, it splits the range. The chooser then states the part which contains the chosen number. If the new region only has one number, the chooser is said to be “squeezed” and is punished. An example of gameplay would be: Chooser writes down (secretly) his number (let’s say, 30). Chooser: “State a number between 00 and 99.” Player: “42”. Chooser: “State a number between 00 and 42.” Player: “26”. Chooser: “State a number between 26 and 42.” $\vdots$ Chooser: “State a number between 29 and 32.” Player: “31”. Chooser dances some very silly children song. Implement this game in Python, where the computer is the chooser. Useful: $\mathtt{random.randint()}$ and $\mathtt{input()}$. 2. List coprehensions Given the following lists: End of explanation """ import scipy.ndimage im = scipy.ndimage.imread("images/ill.png") plt.imshow(im) plt.grid(0) plt.axis('Off') """ Explanation: 'numbers' is a list of lists. Using a list comprehension, flatten 'numbers' so it is a list of only numbers (not list of lists). use the newly flattened 'numbers' and filter it in a way that it only contains odd numbers. using a list comprehension, remove all words containing an 'i' from 'words' using a list comprehension, remove all words containing more than two vowels from 'words'. find all prime numbers between 1 and 100 using a single list comprehension 3. Cartesian/Polar Coordinates Points may be given in polar $(r, \theta)$ or cartesian coordinates $(x, y)$, see Figure 1. <img src="https://upload.wikimedia.org/wikipedia/commons/1/18/Polar_coordinates_.png" /> Figure 1. Relationship between polar and cartesian coordinates. 3.1 Polar to cartesian Write a function $\mathtt{pol2cart}$, that takes a tuple $\mathtt{(r, θ)}$ in polar coordinates and returns a tuple in cartesian coordinates. 3.2 Cartesian to polar Write the inverse function $\mathtt{cart2pol}$, such that $\mathtt{pol2cart( cart2pol( ( x,y) ) )}$ is $\mathtt{(x, y)}$ for any input $\mathtt{(x, y)}$. 3.3 Extend the two functions: such that they can in addition handle lists of tuples. 4. A bit of statistics Draw $N=10000$ unifromly distributed random numbers (use np.random.uniform, for example). Plot it's histogram and check that it looks uniform. Now draw another such sample, and sum the two. How does the histogram of the sum look like? Continue to sum $3,4,5,..$ such samples and keep plotting the histogram. It should quickly start to look like a gaussian. 5. Some numpy foo 5.1 Defeat optical illusions This is a quite famous optical illusion: <img src="images/ill.png"/> The rows are perfectly straight, althought they appear crooked. Use numpy and slicing operations to verify for yourself that they are indeed so. The code for loading the image as a numpy array is provided below: End of explanation """ x = np.arange(-1,1, 0.01) y = np.arange(-1,1, 0.01) X,Y = np.meshgrid(x,y) Z = X**2 + Y**2 Z = np.where(Z<1, 1, 0) plt.matshow(Z) """ Explanation: 5.2 Compute $\pi$: Below is an array $Z$ which, when plotted, produces an image of a circle. Compute the value of $\pi$ by counting the number of black pixels in the array. End of explanation """ mos = scipy.ndimage.imread("images/mosaic_grey.png") plt.imshow(mos) """ Explanation: 5.3 Twist and turn Convert this image: <img width = "400px" src = "images/mosaic_grey.png" /> to <img width = "400px" src = "images/mosaic_conv.png" /> End of explanation """
fsilva/deputado-histogramado
notebooks/Deputado-Histogramado-2.ipynb
gpl-3.0
%matplotlib inline import pylab import matplotlib import pandas import numpy dateparse = lambda x: pandas.datetime.strptime(x, '%Y-%m-%d') sessoes = pandas.read_csv('sessoes_democratica_org.csv',index_col=0,parse_dates=['data'], date_parser=dateparse) """ Explanation: Deputado Histogramado expressao.xyz/deputado/ Como processar as sessões do parlamento Português Índice Reunír o dataset Contando as palavras mais comuns Fazendo histogramas Representações geograficas Simplificar o dataset e exportar para o expressa.xyz/deputado/ O que se passou nas mais de 4000 sessões de discussão do parlamento Português que ocorreram desde 1976? Neste notebook vamos tentar visualizar o que se passou da maneira mais simples - contando palavras, e fazendo gráficos. Para obter os textos de todas as sessões usaremos o demo.cratica.org, onde podemos aceder facilmente a todas as sessões do parlamento de 1976 a 2015. Depois com um pouco de python, pandas e matplotlib vamos analisar o que se passou. Para executar estes notebook será necessário descarregar e abrir com o Jupiter Notebooks (a distribuição Anaconda faz com que instalar todas as ferramentas necessárias seja fácil - https://www.continuum.io/downloads) Parte 2 - Contando Palavras Código para carregar os dados do notebook anterior: End of explanation """ #define a funçao que conta aplausos # procura 'aplauso' (nota: tambem conta 'aplausos') def conta_aplausos(texto): return texto.count('aplauso') #aplica a funçao a cada sessão e cria nova coluna com os resultados sessoes['n_aplausos'] = sessoes['sessao'].map(conta_aplausos) ax = sessoes.plot(x='data',y='n_aplausos',figsize=(15,6),linewidth=0.1,marker='.',markersize=1) ax.set_xlabel('Data da sessão') ax.set_ylabel('Numero de aplausos') """ Explanation: Bem, agora podemos começar a brincadeira. Pandas, quantas vezes aparece a palavra 'Aplausos' no texto? End of explanation """ #define funçao que verifica se o numero de aplausos é maior que 200 def tem_mais_que_250_aplausos(val): return val['n_aplausos']>250 #aplica a função a todas as linhas do dataframe # assim cria-se um novo dataframe apenas com os elementos onde a função é 'true' sessoes_250aplausos = sessoes[tem_mais_que_250_aplausos] sessoes_250aplausos """ Explanation: Salta logo á vista que há sessões excepcionais com muito mais aplausos que outras (para cima de 200 menções de aplausos). Talvez sejam mais interessantes? ou controversas? Vamos investigar as sessões mais animadas. Pandas, quais são as sessões com mais de 250 aplausos? End of explanation """ import re from collections import Counter # aceita um texto, e retorna as 10 palavras mais comuns, com o correspondente número de ocorrencias def conta_palavras(texto): palavras = re.split(';|,|\n| |\(|\)|\?|\!|:',texto) #separa as palavras palavras = [x.title() for x in palavras if len(x)>=5] #organiza e remove as palavras com menos de 5 caracteres return Counter(palavras).most_common(10) # analisa as palavras e determina as 10 mais frequentes # conta palavras em cada sessao com mais de 250 aplausos l = sessoes_250aplausos['sessao'].map(conta_palavras) datas = sessoes_250aplausos['data'][l.index] #selecciona apenas as datas, a sintaxe é contrieved #agrupa os dados num dataframe, com uma coluna por data, e cada célula indicando ('n_contagens' x 'palavra') dados = [[str(str(z)+' x '+y) for (y,z) in l[x]] for x in l.index] pandas.DataFrame(dados,index=datas).transpose() """ Explanation: Várias sessões. Analisemos as palavras mais frequentes em cada uma. End of explanation """ # conta as 10 palavras mais comuns, tendo em conta expressoes comuns com significado importante, e removendo as palavras mais comuns que não nos ajudam a perceber o que se esta a passar def conta_palavras_xpto(texto): # substitui expressoes expressoes = [['milhões de euros','milhões-de-euros'],['programa do governo','programa-do-Governo'],['orçamento do estado','orçamento-de-Estado'],['orçamento de Estado','orçamento-de-Estado'],['união europeia','união-europeia'],['bernardino soares','bernardino-soares'],['antónio josé seguro','antónio-seguro'],['honório novo','honório-novo'],['moção de censura','moção-de-censura'],['partido socialista','partido-socialista'],['partido social democrata','partido-social-democrata'],['bloco de esquerda','bloco-de-esquerda'],['partido comunista','partido-comunista'],['jerónimo de sousa','jerónimo-de-sousa'],['luís montenegro','luís-montenegro'],['medidas','medida']] for chave in expressoes: texto = texto.replace(chave[0],chave[1]) #remove palavras nao interessantes lista = ['portugal','portugueses','estado','governo','deputado','deputada','primeiro-ministro','presidente','ministro','ministra','sobre','fazer','vozes','também','aplausos','quando','porque','muito','cds-pp','palavra','ainda','dizer','todos','deste','nesta','nossa','temos','nosso','nossa','estão','maria','sempre','sr.as','neste','silva','favor','agora'] for palavra in lista: texto = texto.replace(palavra,'') palavras = re.split(';|,|\n| |\(|\)|\?|\!|:',texto) #separa as palavras palavras = [x.title() for x in palavras if len(x)>=5] #organiza e remove as palavras com menos de 5 caracteres return Counter(palavras).most_common(10) # analisa as palavras e determina as 10 mais frequentes # conta palavras em cada sessao com mais de 250 aplausos l = sessoes_250aplausos['sessao'].map(conta_palavras_xpto) datas = sessoes_250aplausos['data'][l.index] #agrupa os dados num dataframe, com uma coluna por data, e cada célula indicando ('n_contagens' x 'palavra') dados = [[str(str(z)+' x '+y) for (y,z) in l[x]] for x in l.index] pandas.DataFrame(dados,index=datas).transpose() """ Explanation: Antes de tirarmos conclusões, resolvemos 2 problemas com os dados. Primeiro, há palavras que não adicionam muito á conversa ('governo', 'muito','ministro','todos', etc). Segundo, há palavras que podem ter um significado diferente consoante são usadas sozinhas ou numa expressão (e.g. 'Milhões de Euros' vs 'Milhões de Pensionistas'). Vamos reprocessar os dados com isto em conta. End of explanation """ def conta_orcamento_de_estado(texto): return (texto.count('orçamento de estado') + texto.count('orçamento do estado')) #aplica a funçao a cada sessão e cria nova coluna com os resultados sessoes['n_orcamento_de_estado'] = sessoes['sessao'].map(conta_orcamento_de_estado) ax = sessoes.plot(x='data',y='n_orcamento_de_estado',figsize=(15,6),linewidth=0.2,marker='.',markersize=0.5) start, end = ax.get_xlim() #poe uma tick por ano import numpy #para criar um eixo do x uniformemente espaçado ax.xaxis.set_ticks((numpy.linspace(start, end, 29))) ax.set_xlabel('Data da sessão') ax.set_ylabel('Numero de ocorrencias de "Orçamento de Estado"') """ Explanation: Vemos que as sessões mais aplaudidas são ou sobre o orçamento de estado ou discussões aparentemente partidárias. Curiosamente as sessões sobre o orçamento de estado parecem ser todas no fim de Outubro. Vamos ver se isso tambem é verdade nos outros anos. Contemos o número de ocorrencias da expressão e façamos o gráfico. End of explanation """ # retorna um vector com um item por sessao, e valor verdadeira se o número da semana é =i, falso se nao é def selecciona_semana(data,i): return data.map(lambda d: d.weekofyear == i) import numpy ocorrencias_por_semana = numpy.zeros(53) for i in range(1,53): # para cada semana do ano, calcula um vector com verdadeiro para as sessoes que ocorreram nesta semana e falso para as que nao ocorreram. soma tudo (verdadeiro = 1, falso = 0) # com esse vector, filtra apenas os items da coluna 'n_orcamento_de_estado' onde o vector é verdadeiro/sessao foi na semana que estamos a contar # soma as contagens das sessoes seleccionadas/filtradas ocorrencias_por_semana[i] = numpy.sum(sessoes['n_orcamento_de_estado'][selecciona_semana(sessoes['data'],i)]) f = pylab.figure(figsize=(10,8)) ax = pylab.bar(range(1,53),ocorrencias_por_semana[1:53]) pylab.xlabel('Semana do ano') pylab.ylabel('Ocorrencias de "Orçamento de Estado"') """ Explanation: Interessante - antes de 1984 não se usava a expressão orçamento de estado ou orçamento do estado. Tambem, parece ser periódico. Vamos histogramar isto para ver o número de ocorrencias em cada semana do ano: End of explanation """ def selecciona_antes_ano(data,ano): return data.map(lambda d: d.year < ano) sessoes_antes1984 = sessoes[selecciona_antes_ano(sessoes['data'],1984)] sessoes_antes1988 = sessoes[selecciona_antes_ano(sessoes['data'],1988)] sessoes_antes1984 ax = sessoes_antes1988.plot(x='data',y='n_orcamento_de_estado',figsize=(15,6),linewidth=0.2,marker='.',markersize=0.5) ax.set_xlabel('Data da sessão') ax.set_ylabel('Numero de ocorrencias de "Orçamento de Estado"') """ Explanation: Parece que se fala em orçamento de estado +- sempre com a mesma frequência de Jan a Junho, depois há férias (Julho-Agosto), e na 3ª semana de Outubro a coisa começa a aquecer - o termo orçamento de estado começa-se a usar 2x ou 3x mais do que o normal. Antes de mais, vamos tentar perceber porque não se usa a expressão orçamento de estado antes de 1984. Vamos analisar as palavras: End of explanation """ def conta_orcamento(texto): return texto.count('orçamento') sessoes_antes1988.loc[:,'n_orcamento'] = sessoes_antes1988['sessao'].apply(conta_orcamento) ax = sessoes_antes1988.plot(x='data',y='n_orcamento',figsize=(15,6),linewidth=0.2,marker='.',markersize=0.5) ax.set_xlabel('Data da sessão') ax.set_ylabel('Numero de ocorrencias de "Orçamento"') """ Explanation: Foi claramente em 1984 que a moda começou. Vamos ver se pelo menos usavam 'orçamento' com a mesma frequencia que em 1985. End of explanation """ # Cria uma lista com todas as palavras no texto com mais de 4 caracteres, na sequência que aparecem neste def agrupa_palavras(texto): palavras = re.split(';|,|\n| |\(|\)|\?|\!|:',texto) # separa as palavras palavras = [x.title() for x in palavras if len(x)>=5] # organiza e remove as palavras com menos de 5 caracteres return palavras # Cria uma lista das 40 palavras mais comuns neste conjunto de sessões, e conta o número de ocorrencias def conta_palavras(sessoes): lista = sessoes['sessao'].map(agrupa_palavras) # cria uma lista de 'lista de palavras', um elemento por sessao palavras = [] for l in lista: palavras.extend(l) # junta as várias 'listas de palavras' todas na mesma lista (i.e. junta as várias sessoes, antes de contar) return Counter(palavras).most_common(40) # conta as palavras mais frequentes def selecciona_ano(data,ano): return data.map(lambda d: d.year == ano) #filtra sessoes que ocorreram num dado ano sessoes_1980 = sessoes[selecciona_ano(sessoes['data'],1980)] sessoes_1990 = sessoes[selecciona_ano(sessoes['data'],1990)] sessoes_2000 = sessoes[selecciona_ano(sessoes['data'],2000)] sessoes_2010 = sessoes[selecciona_ano(sessoes['data'],2010)] # conta palavras mais frequentes nos vários grupos de sessoes c1980 = conta_palavras(sessoes_1980) c1990 = conta_palavras(sessoes_1990) c2000 = conta_palavras(sessoes_2000) c2010 = conta_palavras(sessoes_2010) #organiza os dados numa tabela dados = [[str(str(z)+' x '+y) for (y,z) in c1980]] dados.append([str(str(z)+' x '+y) for (y,z) in c1990]) dados.append([str(str(z)+' x '+y) for (y,z) in c2000]) dados.append([str(str(z)+' x '+y) for (y,z) in c2010]) pandas.DataFrame(dados,index=['1980','1990','2000','2010']).transpose() """ Explanation: Orçamento sempre se usou, pelos vistos. Por curiosidade, vamos analisar as palavras mais frequentes de todas as sessões de vários anos 1980, 1990, 2000 e 2010 e ver se há alguma tendência. End of explanation """ # Cria uma lista das 100 palavras mais comuns neste conjunto de sessões, e conta o número de ocorrencias def conta_palavras100(sessoes): lista = sessoes['sessao'].map(agrupa_palavras) # cria uma lista de 'lista de palavras', um elemento por sessao palavras = [] for l in lista: palavras.extend(l) # junta as 'listas de palavras' todas na mesma lista return Counter(palavras).most_common(100) # conta as palavras mais frequentes todas_sessoes = pandas.concat([sessoes_1980, sessoes_1990, sessoes_2000, sessoes_2010]) contagem = conta_palavras100(todas_sessoes) dados = [[str(str(z)+' x '+y) for (y,z) in contagem]] palavras_a_retirar = sorted([y for (y,z) in contagem],key=len) pandas.DataFrame(dados).transpose() # Cria uma lista com todas as palavras no texto com mais de 4 caracteres, na sequência que aparecem neste # mas não incluí as 100 palavras mais comuns nesta lista def agrupa_palavras2(texto): texto = texto.lower() #processa tudo em minusculas #remove palavras nao interessantes for palavra in palavras_a_retirar: texto = texto.replace(palavra.lower(),'') palavras = re.split(';|,|\n| |\(|\)|\?|\!|:',texto) # separa as palavras palavras = [x.title() for x in palavras if len(x)>=5] # organiza e remove as palavras com menos de 5 caracteres return palavras def conta_palavras2(sessoes): lista = sessoes['sessao'].map(agrupa_palavras2) # cria uma lista de 'lista de palavras', um elemento por sessao palavras = [] for l in lista: palavras.extend(l) # junta as 'listas de palavras' todas na mesma lista return Counter(palavras).most_common(40) # conta as palavras mais frequentes c1980 = conta_palavras2(sessoes_1980) c1990 = conta_palavras2(sessoes_1990) c2000 = conta_palavras2(sessoes_2000) c2010 = conta_palavras2(sessoes_2010) dados = [[str(str(z)+' x '+y) for (y,z) in c1980]] dados.append([str(str(z)+' x '+y) for (y,z) in c1990]) dados.append([str(str(z)+' x '+y) for (y,z) in c2000]) dados.append([str(str(z)+' x '+y) for (y,z) in c2010]) pandas.DataFrame(dados,index=['1980','1990','2000','2010']).transpose() """ Explanation: Sao praticamente as mesmas palavras. Calculemos as mais frequentes de todos juntos e retiremo-las desta contagem: End of explanation """ def conta_decretolei(texto): return (texto.count('decreto-lei')+texto.count('decretos-lei')) sessoes['n_decreto'] = sessoes['sessao'].map(conta_decretolei) ax = sessoes.plot(x='data',y='n_decreto',figsize=(15,6),linewidth=0.2,marker='.',markersize=0.5) ax.set_xlabel('Data da sessão') ax.set_ylabel('Numero de ocorrencias de decreto-lei') """ Explanation: Quando descontamos as 100 palavras mais comuns salientam-se mais as diferenças entre os textos. Mesmo assim é difícil tirar conclusões profundas, mas há algumas mais básicas: - Paulo Portas estava a todo o gás no ano 2000, com Paulo e Portas a ocorrerem dois milhares de vezes - os Verdes aparecem apenas nas colunas dos anos 2000 e 2010, apesar de tambem terem 2 deputados no ano 1990. Em 1980 ainda não tinha sido fundado o partido. - 'Empresas' não aparece nem em 1990 nem 2000. (e não é uma das 100 palavras mais usadas no parlamento) - 'Bloco' só aparece em 2010. Foi fundado em 1999, o que pode explicar isto. No entanto já tinha 4 deputados em 1999-2002. - 'Crise' aparece apenas em 2010. - 'Decreto-Lei' apenas aparece em 1980. Será que se falava mais de decretos lei nessa altura? End of explanation """ def selecciona_ano(data,i): return data.map(lambda d: d.year == i) import numpy ocorrencias_por_ano = numpy.zeros(2016-1976) for i in range(0,2016-1976): # para cada ano, soma o número de ocorrencias de decreto # (filtrando as sessoes que ocorrem num dado ano, selecionando-as da coluna 'n_decreto' e somando todos os valores seleccionados) ocorrencias_por_ano[i] = numpy.sum(sessoes['n_decreto'][selecciona_ano(sessoes['data'],i+1976)]) f = pylab.figure(figsize=(10,8)) ax = pylab.bar(range(1976,2016),ocorrencias_por_ano) pylab.xlabel('Ano') pylab.ylabel('Ocorrencias de "decreto-lei"') """ Explanation: Há picos maiores na decada de 80, mas o que interessa neste gráfico é a área,o que é difícil de quantificar num gráfico com tantos pontos. Vamos agrupar por ano: End of explanation """
bpgc-cte/python2017
Week 5/Lecture_11_Decorators_Multiple_Inheritance.ipynb
mit
def func2(func1): return func1 + 1 def func3(func2, arg):# Here func2 is being passed as a parameter. return func2(arg) print(func2(2)) print(func3(func2, 3)) def user_defined_decorator(function1): def wrapper(): print("This statement is being printed before the passed function is called.") function1() print("This statement is being printed after the passed function is called.") return wrapper @us def task(): print("Lite") user_defined_decorator(task)() """ Explanation: Object Oriented Programming - Decorators and Multiple Inheritance Decorators In mathematics higher order functions are ones which take function/s as arguments and return a function as a result. Such capabilites are implemented in Python by using decorators. Of course, you can implement a similar functionality using combination of def and lambda. However, it is generally unsafe practice to use lambda methods. In fact, the creator of Python language, Guido Van Rossum suggested its removal but the whole community of programmers was too used to it, protested against its removal and hence it remained. A decorator is a function which takes as input another function and extends its behaviour/capability without making any explicit changes to it. First of all you need to understand the idea of first class objects. A first class object is an language entity that can be treated as a native variable. That means it can be created, destroyed, passed as an argument to a function, printed as you wish, etc. End of explanation """ class BITSian(): def __init__(self, name, bitsian=True): self.name = name self.bitsian = bitsian @staticmethod def is_object(): return True def is_human(cls): print(cls) return True def get_name(self): print(self) return self.name def is_bitsian(self): return str(self.name + " is a BITSian : " + str(self.bitsian)) p = BITSian("Reuben D'Souza") print(p.get_name()) print(p.is_bitsian()) # If @staticmethod wasn't there, then this would result in an error cause arguments don't match. print(p.is_object()) # If @classmethod wasn't there, then this "p" would be interpreted as "object" type and not "class" type. print(classmethod(p.is_human)()) class BITSian(): def __init__(self, name, bitsian=True): self.k = name self.bitsian = bitsian @staticmethod def is_object(): return True @classmethod def is_human(cls): return True @property def name(self): print("TEST getter") return self.k @name.setter def name(self, name): print("TEST setter") self.k = name def is_bitsian(self): return str(self.name + " is a BITSian : " + str(self.bitsian)) def __str__(self): return "BITSian : " + self.name p = BITSian("Keerthana") print(p) p.name ="Rohan Prabhu" print(p) """ Explanation: Common decorators @staticmethod acts as a wrapper and informs the interpreter that the method is one which does not depend on the class or the object. It is just a method which is logical to include in the class body. @classmethod acts as a wrapper and informs the interpreter that the method is one which depends on the class. This can be clearly understood cause the first argument is interpreted as the class type. It is a method that is commonly shared by all objects of the class type. End of explanation """ class A(): def save(self): print("Save in A") class B(): def save(self): print("Save in B") class C(A,B): def __init__(self): pass a = A() c = C() b = B() """ Explanation: @slow, @XFAIL, etc are decorators used in unit testing(i.e. pytest). They will make sense only when unit testing is taught. Inheriting from Multiple Classes End of explanation """ class A: def test(self): print("Test of A called") class B(A): def test(self): print("Test of B called") class C(A): def test(self): print("Test of C called") class D(C, B): pass print(D.mro()) #d = D() #d.test() D.mro()[2].test(d)# This is a terrible thing to write in development level code!! Think about re-implementation. """ Explanation: The Diamond Problem Consider a situation where there is one parentclass A and then two more subclasses B and C. Then consider a further subclass D inheriting from B and C both. If there be a method defined in A which is inherited in B and C and then overidden, which one will D use ? End of explanation """
dsacademybr/PythonFundamentos
Cap02/Notebooks/DSA-Python-Cap02-02-Variaveis.ipynb
gpl-3.0
# Versão da Linguagem Python from platform import python_version print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version()) """ Explanation: <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 2</font> Download: http://github.com/dsacademybr End of explanation """ # Atribuindo o valor 1 à variável var_teste var_teste = 1 # Imprimindo o valor da variável var_teste # Imprimindo o valor da variável print(var_teste) # Não podemos utilizar uma variável que não foi definida. Veja a mensagem de erro. my_var var_teste = 2 var_teste type(var_teste) var_teste = 9.5 type(var_teste) x = 1 x """ Explanation: Variáveis e Operadores End of explanation """ pessoa1, pessoa2, pessoa3 = "Maria", "José", "Tobias" pessoa1 pessoa2 pessoa3 fruta1 = fruta2 = fruta3 = "Laranja" fruta1 fruta2 # Fique atento!!! Python é case-sensitive. Criamos a variável fruta2, mas não a variável Fruta2. # Letras maiúsculas e minúsculas tem diferença no nome da variável. Fruta2 """ Explanation: Declaração Múltipla End of explanation """ x1 = 50 x1 # Mensagem de erro, pois o Python não permite nomes de variáveis que iniciem com números 1x = 50 """ Explanation: Pode-se usar letras, números e underline (mas não se pode começar com números) End of explanation """ # Não podemos usar palavras reservadas como nome de variável break = 1 """ Explanation: Não se pode usar palavras reservadas como nome de variável False class finally is return None continue for lambda try True def from nonlocal while and del global not with as elif if or yield assert else import pass break except in raise End of explanation """ largura = 2 altura = 4 area = largura * altura area perimetro = 2 * largura + 2 * altura perimetro # A ordem dos operadores é a mesma seguida na Matemática perimetro = 2 * (largura + 2) * altura perimetro """ Explanation: Variáveis atribuídas a outras variáveis e ordem dos operadores End of explanation """ idade1 = 25 idade2 = 35 idade1 + idade2 idade2 - idade1 idade2 * idade1 idade2 / idade1 idade2 % idade1 """ Explanation: Operações com variáveis End of explanation """ nome = "Steve" sobrenome = "Jobs" fullName = nome + " " + sobrenome fullName """ Explanation: Concatenação de Variáveis End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/test-institute-1/cmip6/models/sandbox-3/aerosol.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-3', 'aerosol') """ Explanation: ES-DOC CMIP6 Model Properties - Aerosol MIP Era: CMIP6 Institute: TEST-INSTITUTE-1 Source ID: SANDBOX-3 Topic: Aerosol Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. Properties: 69 (37 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:43 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Meteorological Forcings 5. Key Properties --&gt; Resolution 6. Key Properties --&gt; Tuning Applied 7. Transport 8. Emissions 9. Concentrations 10. Optical Radiative Properties 11. Optical Radiative Properties --&gt; Absorption 12. Optical Radiative Properties --&gt; Mixtures 13. Optical Radiative Properties --&gt; Impact Of H2o 14. Optical Radiative Properties --&gt; Radiative Scheme 15. Optical Radiative Properties --&gt; Cloud Interactions 16. Model 1. Key Properties Key properties of the aerosol model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of aerosol model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/volume ratio for aerosols" # "3D number concenttration for aerosols" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Prognostic variables in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of tracers in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are aerosol calculations generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses atmospheric chemistry time stepping" # "Specific timestepping (operator splitting)" # "Specific timestepping (integrated)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Physical properties of seawater in ocean 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the time evolution of the prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the aerosol model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.5. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Meteorological Forcings ** 4.1. Variables 3D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Variables 2D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Two dimensionsal forcing variables, e.g. land-sea mask definition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Frequency Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Frequency with which meteological forcings are applied (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Resolution Resolution in the aersosol model grid 5.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for aerosol model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Transport Aerosol transport 7.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of transport in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Specific transport scheme (eulerian)" # "Specific transport scheme (semi-lagrangian)" # "Specific transport scheme (eulerian and semi-lagrangian)" # "Specific transport scheme (lagrangian)" # TODO - please enter value(s) """ Explanation: 7.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for aerosol transport modeling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Mass adjustment" # "Concentrations positivity" # "Gradients monotonicity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.3. Mass Conservation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to ensure mass conservation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.convention') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Convective fluxes connected to tracers" # "Vertical velocities connected to tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.4. Convention Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Transport by convention End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Emissions Atmospheric aerosol emissions 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of emissions in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Prescribed (climatology)" # "Prescribed CMIP6" # "Prescribed above surface" # "Interactive" # "Interactive above surface" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to define aerosol species (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Volcanos" # "Bare ground" # "Sea surface" # "Lightning" # "Fires" # "Aircraft" # "Anthropogenic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the aerosol species are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Interannual" # "Annual" # "Monthly" # "Daily" # TODO - please enter value(s) """ Explanation: 8.4. Prescribed Climatology Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify the climatology type for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed via a climatology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Other Method Characteristics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Characteristics of the &quot;other method&quot; used for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Concentrations Atmospheric aerosol concentrations 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of concentrations in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.4. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as mass mixing ratios. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.5. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as AOD plus CCNs. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Optical Radiative Properties Aerosol optical and radiative properties 10.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of optical and radiative properties End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11. Optical Radiative Properties --&gt; Absorption Absortion properties in aerosol scheme 11.1. Black Carbon Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Dust Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.3. Organics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Optical Radiative Properties --&gt; Mixtures ** 12.1. External Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there external mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12.2. Internal Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there internal mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Mixing Rule Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If there is internal mixing with respect to chemical composition then indicate the mixinrg rule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13. Optical Radiative Properties --&gt; Impact Of H2o ** 13.1. Size Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact size? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.2. Internal Mixture Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact internal mixture? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Optical Radiative Properties --&gt; Radiative Scheme Radiative scheme for aerosol 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.2. Shortwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of shortwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Optical Radiative Properties --&gt; Cloud Interactions Aerosol-cloud interactions 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol-cloud interactions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.2. Twomey Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the Twomey effect included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.3. Twomey Minimum Ccn Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the Twomey effect is included, then what is the minimum CCN number? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.4. Drizzle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect drizzle? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Cloud Lifetime Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect cloud lifetime? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.6. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Model Aerosol model 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dry deposition" # "Sedimentation" # "Wet deposition (impaction scavenging)" # "Wet deposition (nucleation scavenging)" # "Coagulation" # "Oxidation (gas phase)" # "Oxidation (in cloud)" # "Condensation" # "Ageing" # "Advection (horizontal)" # "Advection (vertical)" # "Heterogeneous chemistry" # "Nucleation" # TODO - please enter value(s) """ Explanation: 16.2. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Processes included in the Aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Radiation" # "Land surface" # "Heterogeneous chemistry" # "Clouds" # "Ocean" # "Cryosphere" # "Gas phase chemistry" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Coupling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other model components coupled to the Aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.gas_phase_precursors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "DMS" # "SO2" # "Ammonia" # "Iodine" # "Terpene" # "Isoprene" # "VOC" # "NOx" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.4. Gas Phase Precursors Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of gas phase aerosol precursors. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Bulk" # "Modal" # "Bin" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.5. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.bulk_scheme_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon / soot" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.6. Bulk Scheme Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of species covered by the bulk scheme. End of explanation """
4DGenome/Chromosomal-Conformation-Course
Notebooks/00-Hi-C_quality_check.ipynb
gpl-3.0
for renz in ['HindIII', 'MboI']: print renz ! head -n 4 /media/storage/FASTQs/K562_"$renz"_1.fastq print '' """ Explanation: FASTQ format The file is organized in 4 lines per read: 1 - The header of the DNA sequence with the read id (the read length is optional) 2 - The DNA sequence 3 - The header of the sequence quality (this line could be either a repetition of line 1 or empty) 4 - The sequence quality (it is not human readble, but is provided as PHRED score. Check https://en.wikipedia.org/wiki/Phred_quality_score for more details) End of explanation """ ! wc -l /media/storage/FASTQs/K562_HindIII_1.fastq """ Explanation: Count the number of lines in the file (4 times the number of reads) End of explanation """ from pytadbit.utils.fastq_utils import quality_plot for r_enz in ['HindIII', 'MboI']: quality_plot('/media/storage/FASTQs/K562_{0}_1.fastq'.format(r_enz), r_enz=r_enz, nreads=1000000, paired=False) """ Explanation: There are 40 M lines in the file, which means 10 M reads in total. Quality check before mapping End of explanation """
hashiprobr/redes-sociais
encontro02/2-largura.ipynb
gpl-3.0
import sys sys.path.append('..') import socnet as sn """ Explanation: Encontro 02, Parte 2: Revisão de Busca em Largura Este guia foi escrito para ajudar você a atingir os seguintes objetivos: implementar o algoritmo de busca em largura; usar funcionalidades avançadas da biblioteca da disciplina. Primeiramente, vamos importar a biblioteca: End of explanation """ sn.graph_width = 320 sn.graph_height = 180 """ Explanation: A seguir, vamos configurar as propriedades visuais: End of explanation """ g = sn.load_graph('2-largura.gml', has_pos=True) sn.show_graph(g) """ Explanation: Por fim, vamos carregar e visualizar um grafo: End of explanation """ from math import inf, isinf from queue import Queue s = 1 q = Queue() for n in g.nodes(): g.node[n]['d'] = inf g.node[s]['d'] = 0 q.put(s) while not q.empty(): n = q.get() for m in g.neighbors(n): if isinf(g.node[m]['d']): g.node[m]['d'] = g.node[n]['d'] + 1 q.put(m) for n in g.nodes(): print('distância de {}: {}'.format(n, g.node[n]['d'])) """ Explanation: Caminhos de comprimento mínimo Seja $\langle n_0, n_1, \ldots, n_{k-1} \rangle$ um caminho. Dizemos que: * $n_0$ é a origem desse caminho, ou seja, o nó no qual ele começa; * $n_{k-1}$ é o destino desse caminho, ou seja, o nó no qual ele termina; * $k-1$ é o comprimento desse caminho, ou seja, a quantidade de arestas pelas quais ele passa. Um caminho de origem $s$ e destino $t$ tem comprimento mínimo se não existe outro caminho de origem $s$ e destino $t$ de comprimento menor. Note que podem existir múltiplos caminhos de comprimento mínimo. A distância de $s$ a $t$ é o comprimento mínimo de um caminho de origem $s$ e destino $t$. Por completude, dizemos que a distância de $s$ a $t$ é $\infty$ se não existe caminho de origem $s$ e destino $t$. Algoritmo de busca em largura Dado um nó $s$, podemos eficientemente calcular as distâncias desse a todos os outros nós do grafo usando o algoritmo de busca em largura. A ideia desse algoritmo é simples: a partir dos nós de distância $0$, ou seja apenas o próprio $s$, podemos descobrir os nós de distância $1$, a partir dos nós de distância $1$ podemos descobrir os nós de distância $2$, e assim em diante. Podemos usar uma fila para garantir que os nós são visitados nessa ordem. End of explanation """ def snapshot(g, frames): for n in g.nodes(): if isinf(g.node[n]['d']): g.node[n]['label'] = '∞' else: g.node[n]['label'] = str(g.node[n]['d']) frame = sn.generate_frame(g, nlab=True) frames.append(frame) """ Explanation: Visualizando algoritmos A função generate_frame é parecida com a função show_graph mas, em vez de mostrar uma imagem imediatamente, gera um quadro que pode ser usado para montar uma animação. Vamos então definir uma função de conveniência que cria atributos label a partir de distâncias e adiciona um quadro a uma lista. End of explanation """ red = (255, 0, 0) # linha nova blue = (0, 0, 255) # linha nova frames = [] # linha nova s = 1 q = Queue() for n in g.nodes(): g.node[n]['d'] = inf g.node[s]['d'] = 0 q.put(s) sn.reset_node_colors(g) # linha nova sn.reset_edge_colors(g) # linha nova snapshot(g, frames) # linha nova while not q.empty(): n = q.get() g.node[n]['color'] = red # linha nova snapshot(g, frames) # linha nova for m in g.neighbors(n): g.edge[n][m]['color'] = red # linha nova snapshot(g, frames) # linha nova if isinf(g.node[m]['d']): g.node[m]['d'] = g.node[n]['d'] + 1 q.put(m) g.edge[n][m]['color'] = sn.edge_color # linha nova snapshot(g, frames) # linha nova g.node[n]['color'] = blue # linha nova snapshot(g, frames) # linha nova sn.show_animation(frames) """ Explanation: Vamos agora escrever uma versão alternativa da busca em largura. End of explanation """
agile-geoscience/notebooks
Jerk_jounce_etc.ipynb
apache-2.0
import numpy as np %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns sns.set() """ Explanation: Jerk, jounce, etc. This notebook accompanies a blog post on Agile. First, the usual preliminaries... End of explanation """ data = np.loadtxt('data/tesla_speed.csv', delimiter=',') """ Explanation: Load the data This dataset is from this (slightly weird) blog post https://www.duckware.com/blog/tesla-elon-musk-nytimes-john-broder-feud/index.html. It was the only decent bit of telemetry data I could find. I doubt it's properly licensed. If you have access to any open data — maybe from a Formula 1 car, or maybe your own vehicle, I'd love to know about it! End of explanation """ x = (data[:, 0] + 3) * 2.05404 x = x - np.min(x) v_x = np.mean(data[:, 1:], axis=1) * 0.0380610 plt.plot(x, v_x) plt.xlabel('Displacement [m]') plt.ylabel('Velocity [m/s]') plt.show() """ Explanation: Convert x to m and v to m/s, per the instructions in the blog post about the dataset (modified for metric units). End of explanation """ elapsed_time = np.cumsum(1 / v_x) """ Explanation: Note that the sampling was done per unit of displacement; we'd really prefer time. Let's convert it! Time conversion Convert to the time domain, since we want derivatives with respect to time, not distance. End of explanation """ elapsed_time[-1] = 2 * elapsed_time[-2] - elapsed_time[-3] t = np.linspace(0, elapsed_time[-1], 1000) v_t = np.interp(t, elapsed_time, v_x) plt.plot(t, v_t) plt.show() """ Explanation: Adjust the last entry, to avoid a very long interval. End of explanation """ import scipy.integrate # Displacement, d d = scipy.integrate.cumtrapz(v_t, t, initial=0) plt.plot(t, d) plt.show() # Absement abt = scipy.integrate.cumtrapz(d, t, initial=0) # Absity aby = scipy.integrate.cumtrapz(abt, t, initial=0) # Abseleration abn = scipy.integrate.cumtrapz(aby, t, initial=0) plt.plot(abn) plt.show() """ Explanation: Compute integrals Use trapezoidal integral approximation, https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.integrate.cumtrapz.html End of explanation """ import scipy.signal dt = t[1] - t[0] # Check that Savitsky-Golay filter gives velocity from d/dt displacement. v_ = scipy.signal.savgol_filter(d, delta=dt, window_length=3, polyorder=2, deriv=1) plt.figure(figsize=(15, 3)) plt.plot(t, v_, lw=3) plt.plot(t, v_t, '--', lw=3) """ Explanation: That's a boring graph! Check that derivative of displacement gives back velocity Use Savitsky-Golay filter for differentiation with some smoothing: https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter End of explanation """ # Acceleration a = scipy.signal.savgol_filter(v_t, delta=dt, window_length=11, polyorder=2, deriv=1) plt.figure(figsize=(15,3)) plt.plot(a, lw=3, color='green') plt.axhline(c='k', lw=0.5, zorder=0) plt.show() plt.figure(figsize=(15,3)) plt.imshow([a], cmap='RdBu_r', vmin=-1.6, vmax=1.6, alpha=0.8, aspect='auto', extent=[t.min(), t.max(), v_t.min(), v_t.max()]) plt.colorbar(label="Acceleration [m/s²]") plt.plot(t, v_t, 'white', lw=4) plt.plot(t, v_t, 'green') plt.title("Velocity (green) and acceleration (red-blue)") plt.xlabel('Time [s]') plt.ylabel('Velocity [m/s]') plt.grid('off') plt.show() """ Explanation: It does: we seem to be computing integrals properly. Compute derivatives End of explanation """ j = scipy.signal.savgol_filter(v_t, delta=dt, window_length=11, polyorder=2, deriv=2) s = scipy.signal.savgol_filter(v_t, delta=dt, window_length=15, polyorder=3, deriv=3) c = scipy.signal.savgol_filter(v_t, delta=dt, window_length=19, polyorder=4, deriv=4) p = scipy.signal.savgol_filter(v_t, delta=dt, window_length=23, polyorder=5, deriv=5) plt.figure(figsize=(15,3)) plt.imshow([j], cmap='RdBu_r', vmin=-3, vmax=3, alpha=0.8, aspect='auto', extent=[t.min(), t.max(), v_t.min(), v_t.max()]) plt.colorbar(label="Jerk [m/s³]") plt.plot(t, v_t, 'white', lw=4) plt.plot(t, v_t, 'green') plt.title("Velocity (green) and jerk (red-blue)") plt.xlabel('Time [s]') plt.ylabel('Velocity [m/s]') plt.grid('off') plt.show() """ Explanation: Jerk, jounce, and so on End of explanation """ plots = { 'Abseleration': abn, 'Absity': aby, 'Absement': abt, 'Displacement': d, 'Velocity': v_t, 'Acceleration': a, 'Jerk': j, 'Jounce': s, # 'Crackle': c, # 'Pop': p, } colors = ['C0', 'C0', 'C0', 'C1', 'C2', 'C2', 'C2', 'C2'] fig, axs = plt.subplots(figsize=(15,15), nrows=len(plots)) pos = 0.01, 0.8 params = dict(fontsize=13) for i, (k, v) in enumerate(plots.items()): ax = axs[i] ax.plot(t, v, lw=2, color=colors[i]) ax.text(*pos, k, transform=ax.transAxes, **params) # if np.min(v) < 0: # ax.axhline(color='k', lw=0.5, zorder=0) if i < len(plots)-1: ax.set_xticklabels([]) plt.show() """ Explanation: Plot everything! End of explanation """
tpin3694/tpin3694.github.io
python/create_a_new_file_and_the_write_to_it.ipynb
mit
# Create a file if it doesn't already exist with open('file.txt', 'xt') as f: # Write to the file f.write('This file now exsits!') # Close the connection to the file f.close() """ Explanation: Title: Create A New File Then Write To It Slug: create_a_new_file_and_the_write_to_it Summary: Create A New File Then Write To It Using Python. Date: 2017-02-02 12:00 Category: Python Tags: Basics Authors: Chris Albon Create A New File And Write To It End of explanation """ # Open the file with open('file.txt', 'rt') as f: # Read the data in the file data = f.read() # Close the connection to the file f.close() """ Explanation: Open The File And Read It End of explanation """ # View the data in the file data """ Explanation: View The Contents Of The File End of explanation """ # Import the os package import os # Delete the file os.remove('file.txt') """ Explanation: Delete The File End of explanation """
tensorflow/docs-l10n
site/ja/xla/tutorials/compile.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2018 The TensorFlow Authors. End of explanation """ import tensorflow as tf from tensorflow.contrib.compiler import xla """ Explanation: XLAコンパイラAPI <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/xla/tutorials/compile"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/xla/tutorials/compile.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/xla/tutorials/compile.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/xla/tutorials/compile.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> TensorFlowとXLAライブラリをインポートします。XLAには、一部または全てのモデルを XLA でコンパイルする実験的なAPIである xla.compile() が含まれています。 End of explanation """ # それぞれの入力イメージの大きさは、 28 x 28ピクセル IMAGE_SIZE = 28 * 28 # 個別の数字のラベル [0..9] の個数 NUM_CLASSES = 10 # それぞれのトレーニングバッチ(ステップ)での標本数 TRAIN_BATCH_SIZE = 100 # トレーニングステップを実行する回数 TRAIN_STEPS = 1000 # MNISTデータセットをロードする。 train, test = tf.keras.datasets.mnist.load_data() train_ds = tf.data.Dataset.from_tensor_slices(train).batch(TRAIN_BATCH_SIZE).repeat() test_ds = tf.data.Dataset.from_tensor_slices(test).batch(TRAIN_BATCH_SIZE) iterator = tf.data.Iterator.from_structure(train_ds.output_types, train_ds.output_shapes) images, labels = iterator.get_next() images = tf.reshape(images, [-1, IMAGE_SIZE]) images, labels = tf.cast(images, tf.float32), tf.cast(labels, tf.int64) """ Explanation: 必要ないくつかの定数を定義し、 MNISTのデータセットを用意します。 End of explanation """ def build_mnist_model(x, y_): y = tf.keras.layers.Dense(NUM_CLASSES).apply(x) cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) return y, train_step """ Explanation: モデルを構築する関数の定義 以下のコードブロックは、順伝搬と逆伝搬の両方を行う、1つのdenseレイヤーを持つ簡単なモデルを構築する関数を含みます。 コードが呼ばれたとき、2つの値を返します。 y は、それぞれのターゲットのクラスの予測確率を表す tf.Tensor です。 train_step は global_step の値を増加し、変数の更新を行う tf.Operation です。 End of explanation """ [y] = xla.compile(build_mnist_model, inputs=[images, labels]) """ Explanation: XLA の有効化 XLA を有効化するには build_mnist_model 関数を xla.compile に渡します。以下のコードブロックは、モデルを xla.compile() 関数でラップします。これにより、提供された入力を持つターゲット関数をXLAで実行できます。 End of explanation """ # セッションを作成しすべての変数を初期化。 # xla.compile()は、Keras model.fit() APIやTF eager modeとはまだ動作しません。 sess = tf.Session() sess.run(tf.global_variables_initializer()) """ Explanation: グラフをコンパイルするとき、XLAはターゲット関数によって構築されたグラフの全てのノードを、いくつかのXLAのオペレータで置き換えます。 xla.compileは、生成されたXLAのオペレータから独立して実行できる tf.Operation を返しません 代わりに、ターゲット関数から返された tf.Operation ノードは、返された全ての tf.Tensor の値との制御依存関係として追加されます。これにより、 返されたテンソルが評価されるときに、 tf.Operation ノードの実行をトリガします。 擬似コードによるxla.compileの実装は、以下のようになります: ``` TensorFlowに、XLAが扱いやすい方法でコードを実行するよう依頼する y, train_step = build_mnist_model(images, labels) with tf.control_dependencies([train_step]): y = tf.identity(y) TensorFlowに、XLAが扱いやすい方法でコードの実行を停止するよう依頼する ``` xla.compile()は常に tf.Tensor のリスト(1要素しか無かったとしても)を返します。 もしあなたが構築したグラフを今表示したら、通常のTensorFlowのグラフとそれほど変わらないことがわかり、前に述べたXLAのオペレータを見つけることができないでしょう。これは、あなたが sess.run() でグラフを実行しようとしても、実際のコンパイルは後ほど発生するからです。後ほど、TensorFlowは実際にXLAオペレータを生成する一連のグラフ書き換えパスをトリガーします。これは、すべての入力がそろったときに、計算をコンパイルして実行します。 モデルの学習とテスト End of explanation """ # 学習用データセットを与える sess.run(iterator.make_initializer(train_ds)) # TRAIN_STEPS ステップだけ実行する for i in range(TRAIN_STEPS): sess.run(y) print("Model trained for %s steps." % TRAIN_STEPS) # 学習済みモデルをテストする # テスト用データセットを与える sess.run(iterator.make_initializer(test_ds)) # 精度を計算する correct_prediction = tf.equal(tf.argmax(y, 1), labels) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print("Prediction accuracy after training: %s" % sess.run(accuracy)) # セッションを片付ける sess.close() """ Explanation: 以下のコードブロックはモデルを学習します。 y の評価は、制御依存関係がある train_step をトリガします。これは、モデル変数を更新します。 End of explanation """
GoogleCloudPlatform/training-data-analyst
blogs/pandas_to_beam/pandas_to_beam.ipynb
apache-2.0
%pip install --quiet apache-beam[gcp]==2.26.0 import apache_beam as beam import pandas as pd print(beam.__version__) """ Explanation: Pandas API in Apache Beam Apache Beam 2.26 onwards supports the Pandas API. This makes it very convenient to write complex pipelines, and execute them at scale, or in a streaming manner. End of explanation """ %%bigquery df SELECT airline, departure_airport, arrival_airport, departure_delay, arrival_delay FROM `bigquery-samples.airline_ontime_data.flights` WHERE date = '2006-08-20' df.head() # most frequent airports used by each carrier aa = df.groupby('airline').get_group('AA') arr = aa.rename(columns={'arrival_airport': 'airport'}).airport.value_counts() arr.head() dep = aa.rename(columns={'departure_airport': 'airport'}).airport.value_counts() dep.head() total = arr + dep top_airports = total.nlargest(10) top_airports.index.values means = aa[aa['arrival_airport'].isin(top_airports.index.values)].mean() means print({ 'airline': aa.airline.iloc[0], 'departure_delay': means['departure_delay'], 'arrival_delay': means['arrival_delay'], }) """ Explanation: 1. Experiment on one day of flights data Let's pull out one day of data using BigQuery and display the stats we need. End of explanation """ # do this for all the carriers def get_delay_at_top_airports(aa): arr = aa.rename(columns={'arrival_airport': 'airport'}).airport.value_counts() dep = aa.rename(columns={'departure_airport': 'airport'}).airport.value_counts() total = arr + dep top_airports = total.nlargest(10) means = aa[aa['arrival_airport'].isin(top_airports.index.values)].mean() return '{:2f},{:2f}'.format( means['departure_delay'], means['arrival_delay']) df.groupby('airline').apply(get_delay_at_top_airports) """ Explanation: 2. Combine the Pandas code into functions Make functions out of the Pandas code so that it is repeatable End of explanation """ import time import datetime def to_unixtime(s): return time.mktime(datetime.datetime.strptime(s, "%Y-%m-%d").timetuple()) print(to_unixtime('2006-08-20')) from apache_beam.dataframe.convert import to_dataframe, to_pcollection query = """ SELECT date, airline, departure_airport, arrival_airport, departure_delay, arrival_delay FROM `bigquery-samples.airline_ontime_data.flights` """ with beam.Pipeline() as p: tbl = (p | 'read table' >> beam.io.ReadFromBigQuery(query=query) | 'assign ts' >> beam.Map( lambda x: beam.window.TimestampedValue(x, to_unixtime(x['date']))) | 'set schema' >> beam.Select( date=lambda x: str(x['date']), airline=lambda x: str(x['airline']), departure_airport=lambda x: str(x['departure_airport']), arrival_airport=lambda x: str(x['arrival_airport']), departure_delay=lambda x: float(x['departure_delay']), arrival_delay=lambda x: float(x['arrival_delay'])) ) daily = tbl | 'daily windows' >> beam.WindowInto(beam.window.FixedWindows(60*60*24)) # group the flights data by carrier df = to_dataframe(daily) grouped = df.groupby('airline') #agg = to_pcollection(grouped.groups) # get dataframes corresponding to each group, and apply our function to it #result = agg | 'avg delays' >> beam.Map(lambda pc: to_dataframe(pc).get_delay_at_top_airports) result = grouped.apply(get_delay_at_top_airports) result.to_csv('output.csv') """ Explanation: 3. Productionize pipeline using Apache Beam on Dataflow Apache Beam lets you run batch and streaming pipelines at scale and in resilient way. To do this, build a pipeline. * Do it on full dataset (batch) * Do it on streaming data by adding a Sliding or Fixed Time Window to process daily/hourly/minute-by-minute data as it comes in. End of explanation """
dwillis/agate
example.py.ipynb
mit
import agate table = agate.Table.from_csv('examples/realdata/ks_1033_data.csv') print(table) """ Explanation: Using agate in a Jupyter notebook First we import agate. Then we create an agate Table by loading data from a CSV file. End of explanation """ kansas_city = table.where(lambda r: r['county'] in ('JACKSON', 'CLAY', 'CASS', 'PLATTE')) print(len(table.rows)) print(len(kansas_city.rows)) """ Explanation: Question 1: What was the total cost to Kansas City area counties? To answer this question, we first must filter the table to only those rows which refer to a Kansas City area county. End of explanation """ print('$%d' % kansas_city.aggregate(agate.Sum('total_cost'))) """ Explanation: We can then print the Sum of the costs of all those rows. (The cost column is named total_cost.) End of explanation """ # Group by county counties = table.group_by('county') print(counties.keys()) """ Explanation: Question 2: Which counties spent the most? This question is more complicated. First we group the data by county, which gives us a TableSet named counties. A TableSet is a group of tables with the same columns. End of explanation """ # Aggregate totals for all counties totals = counties.aggregate([ ('total_cost_sum', agate.Sum('total_cost'),) ]) print(totals.column_names) """ Explanation: We then use the aggregate function to sum the total_cost column for each table in the group. The resulting values are collapsed into a new table, totals, which has a row for each county and a column named total_cost_sum containing the new total. End of explanation """ totals.order_by('total_cost_sum', reverse=True).limit(20).print_bars('county', 'total_cost_sum', width=100) """ Explanation: Finally, we sort the counties by their total cost, limit the results to the top 10 and then print the results as a text bar chart. End of explanation """
rtidatascience/connected-nx-tutorial
notebooks/2. Creating Graphs.ipynb
mit
import csv import networkx as nx import pandas as pd import matplotlib.pyplot as plt %matplotlib inline # Create empty graph G = nx.Graph() # Add nodes G.add_node(1) G.add_nodes_from([2, 3]) G.add_node(4) G.nodes() """ Explanation: Creating Graphs in NetworkX Creating a graph object Adding nodes and edges Adding attributes Loading in several data types End of explanation """ # add edges G.add_edge(1, 2) # get graph info print(nx.info(G)) nx.draw(G, with_labels=True) """ Explanation: ⚠️ Note: In networkx 2.0, several methods now return iterators For more details see: https://networkx.github.io/documentation/development/reference/migration_guide_from_1.x_to_2.0.html End of explanation """ # add at creation # nodes G.add_node(5, favorite_color='blue') G.add_nodes_from([(6, {'favorite_color' : 'red'}), (7, {'favorite_color' :'purple'})]) # edges G.add_edge(5, 6, {'relationship' : 'best friends'}) # accessing node attributes print("Node 5 attributes:", G.node[5]) # accessing edge attributes print("Edge 5-6 attributes:", G.edge[5][6]) """ Explanation: Adding and Inspecting Attributes End of explanation """ favorite_foods = { 1 : 'pizza', 2 : 'mac and cheese', 3 : 'balogna sandwich', 4 : 'pizza', 5 : 'chocolate', 6 : 'pizza', 7 : 'bananas' } nx.set_node_attributes(G, 'favorite_food', favorite_foods) print("Node 4's favorite food is %s" % G.node[4]['favorite_food']) """ Explanation: Adding Attributes for each existing node End of explanation """ # what does it look like? !head ../data/ga_edgelist.csv edges = [] with open('../data/ga_edgelist.csv', 'r') as f: filereader = csv.reader(f, delimiter=",", quotechar='"') next(filereader) # skips header row for row in filereader: edges.append(row) edges[0:5] GA = nx.from_edgelist(edges) print(nx.info(GA)) """ Explanation: Reading in Different Representations of Graphs Data for graphs and networks comes in many different representations. Representations: - Edge List - Adjacency Matrix - Adjacency List (not covered) - Incidence Matrix (not covered) Note: Representations are related to, but distinct from, the storage format. In our examples, we'll be loading our data from text files. You may also have network data stored as JSON, GEXF, or other formats. For more details, check the docs. Grey's Anatomy Dataset The dataset we'll look at is a record of all "romantic" encounters between characters on the TV show Grey's Anatomy. Edge Lists An edge list is a common way of representing a graph. This representation can be thought of as a list of tuples, where each tuple represents an edge between two of the nodes in your graph. The nodes of the graph can be inferred by taking the set of objects from all tuples. You can infer/determine whether a graph is directed or weighted from an edge list. - Weighted: If edges appear more than once, or if an additional weight attribute is added as a 3rd column, the graph is weighted - Directed: If the "From" and "To" (often seen as "Source" and "Target") of an edge in the list is not arbitrary, it's a directed graph End of explanation """ ga_edges = pd.read_csv('../data/ga_edgelist.csv') ga_edges.head() GA = nx.from_pandas_dataframe(ga_edges, source="from", target="to") # validate info print(nx.info(GA)) nx.draw(GA, with_labels=True) """ Explanation: Mediating Data Processing through pandas Often times the data we'll want to use will probably be analyzed beforehand with pandas. Reading in our data to a DataFrame first saves us a bit of time writng code to open the files due to read_csv having sensible defaults around quoted characters and header rows. End of explanation """ ga_adj = pd.read_csv('../data/ga_adj.csv', index_col=0) ga_adj.ix[0:5, 0:5] GAAdj = nx.from_numpy_matrix(ga_adj.values) # Numpy matrices don't have labels :( print(GAAdj.nodes()) label_mapping = dict(zip(GAAdj.nodes(), ga_adj.columns)) GAAdj = nx.relabel_nodes(GAAdj, label_mapping) nx.draw_spring(GAAdj, with_labels=True) """ Explanation: Adjacency Matrices A common way of representing graph data is through an adjacency matrix -- often referred to mathematically as A. This data structure is a square, n x n matrix where n = number of nodes. Each column and row in the matrix is a node. For any two nodes, i and j the value at Aij (row i and column j) represents the weight of the edge between nodes i and j. End of explanation """ # Easiest, least robust way: print("Edge List Graph\n", nx.info(GA)) print("\nAdj. Matrix Graph\n", nx.info(GAAdj)) # Fancy math way that checks additional conditions print("Isomorphic?", nx.is_isomorphic(GA, GAAdj)) """ Explanation: Are the two graphs the same? End of explanation """ print("'denny' From Edge List Graph:", GA['denny']) print("'denny' From Adjacency Matrix Graph:", GAAdj['denny']) """ Explanation: Gotchas End of explanation """ original_edgelist = sorted(nx.to_edgelist(GA)) adjacency_edgelist = sorted(nx.to_edgelist(GAAdj)) for i, edge in enumerate(original_edgelist): adjacency_edge = adjacency_edgelist[i] if edge[0] != adjacency_edge[0]: print("Sorted Edge Mismatch at edge %s:" % i, edge, adjacency_edge) break """ Explanation: ⚠️ Observation: Edge weights are inferred from adjacency matrix End of explanation """ nx.write_gexf(GA, '../data/ga_graph.gexf') """ Explanation: ⚠️ Observation: Source and Target are ambiguously defined in undirected graphs Exporting Graphs We'll export the graph in GEXF (Graph Exchange XML Format). End of explanation """
andre-martins/AD3
examples/python/parse_example.ipynb
lgpl-3.0
rng = np.random.RandomState(0) sentence = ["*", "the", "quick", "fox", "jumps", "."] # possible edge from every node to every other node OR the root link_ix = [] link_var = [] fg = ad3.PFactorGraph() for mod in range(1, len(sentence)): for head in range(len(sentence)): if mod == head: continue link_ix.append((head, mod)) var = fg.create_binary_variable() var.set_log_potential(rng.uniform(-1, 1)) link_var.append(var) fg.set_eta_ad3(.1) fg.adapt_eta_ad3(True) fg.set_max_iterations_ad3(1000) value, marginals, edge_marginals, value = fg.solve_lp_map_ad3() pred_links = [link for link, posterior in zip(link_ix, marginals) if posterior > 0.1] DepGraph(sentence, pred_links) """ Explanation: Parse sentence using random potentials, but no tree constraints End of explanation """ tree_f = ad3.extensions.PFactorTree() fg.declare_factor(tree_f, link_var) tree_f.initialize(len(sentence), link_ix) value, marginals, edge_marginals, status = fg.solve() print(status) pred_links = [link for link, posterior in zip(link_ix, marginals) if posterior > 0.1] DepGraph(sentence, pred_links) """ Explanation: Same model, this time WITH tree constraints End of explanation """
kit-cel/wt
mloc/ch7_Evolutionary_Algorithms/Differential_Evolution.ipynb
gpl-2.0
import numpy as np import matplotlib.pyplot as plt """ Explanation: Optimization of Non-Differentiable Functions Using Differential Evolution This code is provided as supplementary material of the lecture Machine Learning and Optimization in Communications (MLOC).<br> This code illustrates: * Use of differential evolution to optimize Griewank's function End of explanation """ # Griewank's function def fun(x,y): value = (x**2 + y**2)/4000.0 - np.cos(x)*np.cos(y/np.sqrt(2))+1 return value # vector-version of the function vfun = np.vectorize(fun) """ Explanation: For illustration, we optimize Griewank's function in 2 dimensions. The function is given by \begin{equation} f(\boldsymbol{x}) = \frac{x_1^2+x_2^2}{4000} - \cos(x_1)\cos\left(\frac{x_2}{\sqrt{2}}\right) + 1 \end{equation} Note that Griewank's function is actually differentiable, however, the method works with any function which may not necessary be differentiable. End of explanation """ # plot map of Griewanks function x = np.arange(-20.0, 20.0, 0.1) y = np.arange(-20.0, 20.0, 0.1) X, Y = np.meshgrid(x, y) fZ = vfun(X,Y) plt.figure(1,figsize=(10,9)) plt.rcParams.update({'font.size': 14}) plt.contourf(X,Y,fZ,levels=20) plt.colorbar() plt.axis('scaled') plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.show() """ Explanation: Plot the function as a heat map. End of explanation """ def initial_population(D, NP, xmin, xmax): v = np.random.rand(NP, D)*(xmax - xmin) + xmin return v """ Explanation: Helper function to generate a random initial population of $D$ dimensions $N_P$ elements. The elementsof the population are randomly distributed in the interval $[x_{\min},x_{\max}]$ (in every dimension). End of explanation """ #dimension D = 2 # population NP = 15*D # twiddling parameter F = 0.8 # cross-over probability CR = 0.3 # maximum 1000 iterations max_iter = 1000 # generate initial population population = initial_population(D, NP, -20, 20)[:] # compute initial cost cost = vfun(population[:,0], population[:,1]) best_index = np.argmin(cost) best_cost = cost[best_index] iteration = 0 # keep track of population save_population = [] while iteration < max_iter: # loop over every element from the population for k in range(NP): # get 4 random elements rp = np.random.permutation(NP)[0:4] # remove ourselves from the list rp = [j for j in rp if j != k] # generate new candidate vector v = population[rp[0],:] + F*( population[rp[1],:] - population[rp[2],:] ) # take vector from population u = np.array(population[k,:]) # cross-over each coordinate with probability CR with entry from candidate vector v idx = np.random.rand(D) < CR # cross-over u[idx] = v[idx] new_cost = fun(u[0], u[1]) if new_cost < cost[k]: # better cost? keep! cost[k] = new_cost population[k,:] = u if new_cost < best_cost: best_cost = new_cost best_index = k save_population.append(np.array(population[:])) iteration += 1 if iteration % 100 == 0: print('After iteration %d, best cost %1.4f (obtained for (%1.2f,%1.2f))' % (iteration, best_cost, population[best_index,0], population[best_index,1])) """ Explanation: Carry out differential evolution similar (not identical, slightly modified) to the scheme DE1 described in [1] [1] R. Storn and K. Price, "Differential Evolution - A simple and efficient adaptive scheme for global optimization over continuous spaces", Technical Report TR-95-012, March 1995 End of explanation """ plt.figure(1,figsize=(9,9)) plt.rcParams.update({'font.size': 14}) plt.contourf(X,Y,fZ,levels=20) index = 180 cost = vfun(save_population[index][:,0], save_population[index][:,1]) best_index = np.argmin(cost) plt.scatter(save_population[index][:,0], save_population[index][:,1], c='w') plt.scatter(save_population[index][best_index,0], save_population[index][best_index,1], c='r') plt.xlim((-20,20)) plt.ylim((-20,20)) plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.savefig('DE_Griewangk.pdf',bbox_inches='tight') %matplotlib notebook # Generate animation from matplotlib import animation, rc from matplotlib.animation import PillowWriter # Disable if you don't want to save any GIFs. font = {'size' : 18} plt.rc('font', **font) fig, ax = plt.subplots(1, figsize=(10,10)) ax.set_xlim(( -20, 20)) ax.set_ylim(( -20, 20)) ax.axis('scaled') written = False def animate(i): ax.clear() ax.contourf(X,Y,fZ,levels=20) cost = vfun(save_population[i][:,0], save_population[i][:,1]) best_index = np.argmin(cost) ax.scatter(save_population[i][:,0], save_population[i][:,1], c='w') ax.scatter(save_population[i][best_index,0], save_population[i][best_index,1], c='r') ax.set_xlabel(r'$x_1$',fontsize=18) ax.set_ylabel(r'$x_2$',fontsize=18) ax.set_xlim(( -20, 20)) ax.set_ylim(( -20, 20)) anim = animation.FuncAnimation(fig, animate, frames=300, interval=80, blit=False) fig.show() anim.save('differential_evolution_Griewank.gif', writer=PillowWriter(fps=7)) """ Explanation: Generate animation. End of explanation """
crystalzhaizhai/cs207_yi_zhai
lectures/L9/L9.ipynb
mit
from IPython.display import HTML """ Explanation: Lecture 9 Object Oriented Programming Monday, October 2nd 2017 End of explanation """ def Complex(a, b): # constructor return (a,b) def real(c): # method return c[0] def imag(c): return c[1] def str_complex(c): return "{0}+{1}i".format(c[0], c[1]) c1 = Complex(1,2) # constructor print(real(c1), " ", str_complex(c1)) """ Explanation: Motiviation We would like to find a way to represent complex, structured data in the context of our programming language. For example, to represent a location, we might want to associate a name, a latitude and a longitude with it. Thus we would want to create a compound data type which carries this information. In C, for example, this is a struct: C struct location { float longitude; float latitude; } REMEMBER: A language has 3 parts: expressions and statements: how to structure simple computations means of combination: how to structure complex computations means of abstraction: how to build complex units Review When we write a function, we give it some sensible name which can then be used by a "client" programmer. We don't care about how this function is implemented. We just want to know its signature (API) and use it. In a similar way, we want to encapsulate our data: we dont want to know how it is stored and all that. We just want to be able to use it. This is one of the key ideas behind object oriented programming. To do this, write constructors that make objects. We also write other functions that access or change data on the object. These functions are called the "methods" of the object, and are what the client programmer uses. First Examples Objects thru tuples: An object for complex numbers How might we implement such objects? First, lets think of tuples. End of explanation """ c1[0] """ Explanation: But things aren't hidden so I can get through the interface: End of explanation """ c1[0]=2 """ Explanation: Because I used a tuple, and a tuple is immutable, I can't change this complex number once it's created. End of explanation """ def Complex2(a, b): # constructor def dispatch(message): # capture a and b at constructor-run time if message=="real": return a elif message=='imag': return b elif message=="str": return "{0}+{1}i".format(a, b) return dispatch z=Complex2(1,2) print(z("real"), " ", z("imag"), " ", z("str")) """ Explanation: Objects thru closures Let's try an implementation that uses a closure to capture the value of arguments. End of explanation """ def Complex3(a, b): in_a=a in_b=b def dispatch(message, value=None): nonlocal in_a, in_b if message=='set_real' and value != None: in_a = value elif message=='set_imag' and value != None: in_b = value elif message=="real": return in_a elif message=='imag': return in_b elif message=="str": return "{0}+{1}i".format(in_a, in_b) return dispatch c3=Complex3(1,2) print(c3("real"), " ", c3("imag"), " ", c3("str")) c3('set_real', 2) print(c3("real"), " ", c3("imag"), " ", c3("str")) """ Explanation: This looks pretty good so far. The only problem is that we don't have a way to change the real and imaginary parts. For this, we need to add things called setters. Objects with Setters End of explanation """ class ComplexClass(): def __init__(self, a, b): self.real = a self.imaginary = b """ Explanation: Python Classes and instance variables We constructed an object system above. But Python comes with its own. Classes allow us to define our own types in the Python type system. End of explanation """ HTML('<iframe width="800" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=class%20ComplexClass%28%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20__init__%28self,%20a,%20b%29%3A%0A%20%20%20%20%20%20%20%20self.real%20%3D%20a%0A%20%20%20%20%20%20%20%20self.imaginary%20%3D%20b%0A%0Ac1%20%3D%20ComplexClass%281,2%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>') c1 = ComplexClass(1,2) print(c1, c1.real) print(vars(c1), " ",type(c1)) c1.real=5.0 print(c1, " ", c1.real, " ", c1.imaginary) """ Explanation: __init__ is a special method run automatically by Python. It is a constructor. self is the instance of the object. It acts like this in C++ but self is explicit. End of explanation """ class Animal(): def __init__(self, name): self.name = name def make_sound(self): raise NotImplementedError class Dog(Animal): def make_sound(self): return "Bark" class Cat(Animal): def __init__(self, name): self.name = "A very interesting cat: {}".format(name) def make_sound(self): return "Meow" """ Explanation: Inheritance and Polymorphism Inheritance Inheritance is the idea that a "Cat" is-a "Animal" and a "Dog" is-a "Animal". Animals make sounds, but Cats Meow and Dogs Bark. Inheritance makes sure that methods not defined in a child are found and used from a parent. Polymorphism Polymorphism is the idea that an interface is specified, but not necessarily implemented, by a superclass and then the interface is implemented in subclasses (differently). [Actually Polymorphism is much more complex and interesting than this, and this definition is really an outcome of polymorphism. But we'll come to this later.] Example: Super- and subclasses End of explanation """ a0 = Animal("David") print(a0.name) a0.make_sound() a1 = Dog("Snoopy") a2 = Cat("Hello Kitty") animals = [a1, a2] for a in animals: print(a.name) print(isinstance(a, Animal)) print(a.make_sound()) print('--------') print(a1.make_sound, " ", Dog.make_sound) print(a1.make_sound()) print('----') print(Dog.make_sound(a1)) Dog.make_sound() """ Explanation: Animal is the superclass (a.k.a the base class). Dog and Cat are both subclasses (a.k.a derived classes) of the Animal superclass. Using the Animal class End of explanation """ HTML('<iframe width="800" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=class%20Animal%28%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20__init__%28self,%20name%29%3A%0A%20%20%20%20%20%20%20%20self.name%20%3D%20name%0A%20%20%20%20%20%20%20%20%0A%20%20%20%20def%20make_sound%28self%29%3A%0A%20%20%20%20%20%20%20%20raise%20NotImplementedError%0A%20%20%20%20%0Aclass%20Dog%28Animal%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20make_sound%28self%29%3A%0A%20%20%20%20%20%20%20%20return%20%22Bark%22%0A%20%20%20%20%0Aclass%20Cat%28Animal%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20__init__%28self,%20name%29%3A%0A%20%20%20%20%20%20%20%20self.name%20%3D%20%22A%20very%20interesting%20cat%3A%20%7B%7D%22.format%28name%29%0A%20%20%20%20%20%20%20%20%0A%20%20%20%20def%20make_sound%28self%29%3A%0A%20%20%20%20%20%20%20%20return%20%22Meow%22%0A%0Aa1%20%3D%20Dog%28%22Snoopy%22%29%0Aa2%20%3D%20Cat%28%22Hello%20Kitty%22%29%0Aanimals%20%3D%20%5Ba1,%20a2%5D%0Afor%20a%20in%20animals%3A%0A%20%20%20%20print%28a.name%29%0A%20%20%20%20print%28isinstance%28a,%20Animal%29%29%0A%20%20%20%20print%28a.make_sound%28%29%29%0A%20%20%20%20print%28\'--------\'%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>') """ Explanation: How does this all work? End of explanation """ class Animal(): def __init__(self, name): self.name=name print("Name is", self.name) class Mouse(Animal): def __init__(self, name): self.animaltype="prey" super().__init__(name) print("Created %s as %s" % (self.name, self.animaltype)) class Cat(Animal): pass a1 = Mouse("Tom") print(vars(a1)) a2 = Cat("Jerry") print(vars(a2)) HTML('<iframe width="800" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=class%20Animal%28%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20__init__%28self,%20name%29%3A%0A%20%20%20%20%20%20%20%20self.name%3Dname%0A%20%20%20%20%20%20%20%20print%28%22Name%20is%22,%20self.name%29%0A%20%20%20%20%20%20%20%20%0Aclass%20Mouse%28Animal%29%3A%0A%20%20%20%20def%20__init__%28self,%20name%29%3A%0A%20%20%20%20%20%20%20%20self.animaltype%3D%22prey%22%0A%20%20%20%20%20%20%20%20super%28%29.__init__%28name%29%0A%20%20%20%20%20%20%20%20print%28%22Created%20%25s%20as%20%25s%22%20%25%20%28self.name,%20self.animaltype%29%29%0A%20%20%20%20%0Aclass%20Cat%28Animal%29%3A%0A%20%20%20%20pass%0A%0Aa1%20%3D%20Mouse%28%22Tom%22%29%0Aa2%20%3D%20Cat%28%22Jerry%22%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>') """ Explanation: Calling a superclasses initializer Say we dont want to do all the work of setting the name variable in the subclasses. We can set this "common" work up in the superclass and use super to call the superclass's initializer from the subclass. There's another way to think about this: A subclass method will be called instead of a superclass method if the method is in both the sub- and superclass and we call the subclass (polymorphism!). If we really want the superclass method, then we can use the super built-in function. See https://rhettinger.wordpress.com/2011/05/26/super-considered-super/ End of explanation """ # Both implement the "Animal" Protocol, which consists of the one make_sound function class Dog(): def make_sound(self): return "Bark" class Cat(): def make_sound(self): return "Meow" a1 = Dog() a2 = Cat() animals = [a1, a2] for a in animals: print(isinstance(a, Animal), " ", a.make_sound()) """ Explanation: Interfaces The above examples show inheritance and polymorphism. Notice that we didn't actually need to set up the inheritance. We could have just defined 2 different classes and have them both make_sound. In Java and C++ this is done more formally through Interfaces and Abstract Base Classes, respectively, plus inheritance. In Python, this agreement to define make_sound is called duck typing. "If it walks like a duck and quacks like a duck, it is a duck." End of explanation """ class Animal(): def __init__(self, name): self.name=name def __repr__(self): class_name = type(self).__name__ return "{0!s}({1.name!r})".format(class_name, self) r = Animal("David") r print(r) repr(r) """ Explanation: The Python Data Model Duck typing is used throughout Python. Indeed it's what enables the "Python Data Model" All python classes implicitly inherit from the root object class. The Pythonic way is to just document your interface and implement it. This usage of common interfaces is pervasive in dunder functions to comprise the Python data model. Example: Printing with __repr__ and __str__ The way printing works is that Python wants classes to implement __repr__ and __str__ methods. It will use inheritance to give the built-in objects methods when these are not defined. Any class can define __repr__ and __str__. When an instance of such a class is interrogated with the repr or str function, then these underlying methods are called. We'll see __repr__ here. If you define __repr__ you have made an object sensibly printable. __repr__ End of explanation """
rohinkumar/galsurveystudy
old/Parallel Computing with Python public.ipynb
mit
%pylab inline """ Explanation: Parallel Computing with Python Rodrigo Nemmen, IAG USP This IPython notebook illustrates a few simple ways of doing parallel computing. Practical examples included: Parallel function mapping to a list of arguments (multiprocessing module) Parallel execution of array function (scatter/gather) + parallel execution of scripts Easy parallel Monte Carlo (parallel magics) End of explanation """ import multiprocessing """ Explanation: 1. Mapping a model to a grid of parameters <!--- Inspired on "useful parallel". --> Uses the multiprocessing module that comes by default with python, i.e. method independent of IPython. Idea: you have a function $f(\mathbf{x},\mathbf{y})$ of two parameters (e.g., $f$ may represent your model) stored in the arrays $(\mathbf{x},\mathbf{y})$. Given the arrays $\mathbf{x}$ and $\mathbf{y}$, you want to compute the values of $f(\mathbf{x},\mathbf{y})$. Let's assume for simplicity that there is no dependence on the neighbours. This is an embarassingly parallel problem. <!--- ### TODO * Random sampling of parameter space if desired --> End of explanation """ import scipy def f(z): x=z[1]*scipy.random.standard_normal(100000)+z[0] return x.sum() """ Explanation: Time wasting function that depends on two parameters. Here, I generate 1E5 random numbers based on the normal distribution and then sum them. The two parameters are $\mu$ and $\sigma$. End of explanation """ n=3000 X=numpy.linspace(-1,1,n) # mean Y=numpy.linspace(0.1,1,n) # std. dev. # creates list of arguments [Xi, Yi] pargs=[] # this is a list of lists! for i in range(X.size): pargs.append([X[i],Y[i]]) """ Explanation: Arrays of input parameters. You could easily modify this to take as input a matrix, not two arrays. End of explanation """ ncores=multiprocessing.cpu_count() # number of cores pool = multiprocessing.Pool(processes=ncores) # initializes parallel engine %%time t=pool.map(f, pargs) # parallel function map pool.close() # close the parallel engine """ Explanation: Parallel execution. Check out all the cores being used with a tool like htop. End of explanation """ %time t=map(f, pargs) """ Explanation: Serial execution End of explanation """ # test if n is prime def isprime(n): for i in range(3, n): if n % i == 0: return False return True # tests each element of an array if it is prime def f(x): return map(isprime,x) """ Explanation: If you want to convert the list to an array use y=array(t). Also note that there is a similar map method for ipyparallel. 2. Parallel execution of array function Uses ipyparallel. Consider a function $f(x)$ which takes an array $x$ containing the grid of input parameters. We want to split the function calls ("split the array") to the different cores in our machine: Make sure you start the parallel engines Alternatively, you can start the engines from the command-line: $ ipcluster start -n 4 Our time-waster function $f(x)$ that can be applied to an array of integers End of explanation """ x = scipy.random.randint(0,100000, (10000,)) """ Explanation: Generates big array (10k elements) of random integers between 0 and 100000 End of explanation """ %time y=f(x) """ Explanation: Serial execution End of explanation """ import ipyparallel client = ipyparallel.Client() """ Explanation: Now explain how IPython parallel works (here I show a slide). See documentation at the end of the notebook for details. End of explanation """ direct = client[:] direct.block = True """ Explanation: We are going to use the direct view, which means that commands always run on all nodes. This as opposed to a balanced view, which asynchronously executes code on nodes which are idle. In addition, we are going to turn blocking on. This means that jobs will block further execution until all nodes have finished. End of explanation """ direct.scatter('x',x) """ Explanation: Splits the input array $x$ between the cores End of explanation """ direct['x.size'] direct['x'] """ Explanation: Verify that the array was indeed divided equally End of explanation """ %%px y=f(x) """ Explanation: Let's try to apply the function in each different core End of explanation """ %%file myscript.py # test if n is prime def isprime(n): for i in range(3, n): if n % i == 0: return False return True # tests each element of an array if it is prime def f(x): return map(isprime,x) """ Explanation: Why the errors above? Because each core does not see the local engine. They work as separate machines and you have to load all variables and modules in each engine. That's easy. End of explanation """ direct.run("myscript.py") """ Explanation: Execute code which defines the methods on the different engines End of explanation """ %%time %px y=f(x) """ Explanation: Now compute the "model grid" correctly End of explanation """ %%px import numpy numpy.size(y) y=direct.gather('y') """ Explanation: Alternatively to the command above, you could use direct.apply(f,x) or direct.execute('y=f(x)') Now we have the separate arrays $y$ containing the results on each engine. How to get it back to the local engine? End of explanation """ # number of desired random sets nboot=100000 # number of sets that will be computed by each engine n=nboot/size(client.ids) """ Explanation: We have the array magically reassembled back in the local engine. :) 3. Easy parallel Monte Carlo Suppose you need to do 100k Monte Carlo simulations. Wouldn't it be great if you could easily split them among your (hopefully many) cores? In this example, I will perform 100k realizations of a 300x300 array of random floats. End of explanation """ direct.push(dict(n=n)) with direct.sync_imports(): import scipy """ Explanation: Passes variables to the engines End of explanation """ %%time %%px for i in range(n): x = scipy.random.random((300,300)) # 300x300 array of floats (values in the range [0,1) ) """ Explanation: Now everything below is executed in parallel! (IPython magic) <!--- Have a look also at the %autopx command. --> End of explanation """ %%time for i in range(nboot): x = scipy.random.random((300,300)) # 100x100 array of floats (values in the range [0,1) ) """ Explanation: For comparison, how long does it take to do the same simulation in serial mode? End of explanation """
Kaggle/learntools
notebooks/geospatial/raw/ex1.ipynb
apache-2.0
import geopandas as gpd from learntools.core import binder binder.bind(globals()) from learntools.geospatial.ex1 import * """ Explanation: Introduction Kiva.org is an online crowdfunding platform extending financial services to poor people around the world. Kiva lenders have provided over $1 billion dollars in loans to over 2 million people. <center> <img src="https://i.imgur.com/2G8C53X.png" width="500"><br/> </center> Kiva reaches some of the most remote places in the world through their global network of "Field Partners". These partners are local organizations working in communities to vet borrowers, provide services, and administer loans. In this exercise, you'll investigate Kiva loans in the Philippines. Can you identify regions that might be outside of Kiva's current network, in order to identify opportunities for recruiting new Field Partners? To get started, run the code cell below to set up our feedback system. End of explanation """ loans_filepath = "../input/geospatial-learn-course-data/kiva_loans/kiva_loans/kiva_loans.shp" # Your code here: Load the data world_loans = ____ # Check your answer q_1.check() # Uncomment to view the first five rows of the data #world_loans.head() #%%RM_IF(PROD)%% # Load the data world_loans = gpd.read_file(loans_filepath) q_1.assert_check_passed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ q_1.hint() #_COMMENT_IF(PROD)_ q_1.solution() """ Explanation: 1) Get the data. Use the next cell to load the shapefile located at loans_filepath to create a GeoDataFrame world_loans. End of explanation """ # This dataset is provided in GeoPandas world_filepath = gpd.datasets.get_path('naturalearth_lowres') world = gpd.read_file(world_filepath) world.head() """ Explanation: 2) Plot the data. Run the next code cell without changes to load a GeoDataFrame world containing country boundaries. End of explanation """ # Your code here ____ # Uncomment to see a hint #_COMMENT_IF(PROD)_ q_2.hint() #%%RM_IF(PROD)%% ax = world.plot(figsize=(20,20), color='whitesmoke', linestyle=':', edgecolor='black') world_loans.plot(ax=ax, markersize=2) # Get credit for your work after you have created a map q_2.check() # Uncomment to see our solution (your code may look different!) #_COMMENT_IF(PROD)_ q_2.solution() """ Explanation: Use the world and world_loans GeoDataFrames to visualize Kiva loan locations across the world. End of explanation """ # Your code here PHL_loans = ____ # Check your answer q_3.check() #%%RM_IF(PROD)%% PHL_loans = world_loans.loc[world_loans.country=="Philippines"].copy() q_3.assert_check_passed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ q_3.hint() #_COMMENT_IF(PROD)_ q_3.solution() """ Explanation: 3) Select loans based in the Philippines. Next, you'll focus on loans that are based in the Philippines. Use the next code cell to create a GeoDataFrame PHL_loans which contains all rows from world_loans with loans that are based in the Philippines. End of explanation """ # Load a KML file containing island boundaries gpd.io.file.fiona.drvsupport.supported_drivers['KML'] = 'rw' PHL = gpd.read_file("../input/geospatial-learn-course-data/Philippines_AL258.kml", driver='KML') PHL.head() """ Explanation: 4) Understand loans in the Philippines. Run the next code cell without changes to load a GeoDataFrame PHL containing boundaries for all islands in the Philippines. End of explanation """ # Your code here ____ # Uncomment to see a hint #_COMMENT_IF(PROD)_ q_4.a.hint() #%%RM_IF(PROD)%% ax = PHL.plot(figsize=(12,12), color='whitesmoke', linestyle=':', edgecolor='lightgray') PHL_loans.plot(ax=ax, markersize=2) # Get credit for your work after you have created a map q_4.a.check() # Uncomment to see our solution (your code may look different!) #_COMMENT_IF(PROD)_ q_4.a.solution() """ Explanation: Use the PHL and PHL_loans GeoDataFrames to visualize loans in the Philippines. End of explanation """ # View the solution (Run this code cell to receive credit!) q_4.b.solution() """ Explanation: Can you identify any islands where it might be useful to recruit new Field Partners? Do any islands currently look outside of Kiva's reach? You might find this map useful to answer the question. End of explanation """
ethen8181/machine-learning
trees/lightgbm.ipynb
mit
# code for loading the format for the notebook import os # path : store the current path to convert back to it later path = os.getcwd() os.chdir(os.path.join('..', 'notebook_format')) from formats import load_style load_style(css_style='custom2.css', plot_style=False) os.chdir(path) # 1. magic for inline plot # 2. magic to print version # 3. magic so that the notebook will reload external python modules # 4. magic to enable retina (high resolution) plots # https://gist.github.com/minrk/3301035 %matplotlib inline %load_ext watermark %load_ext autoreload %autoreload 2 %config InlineBackend.figure_format='retina' import os import re import time import requests import numpy as np import pandas as pd import matplotlib.pyplot as plt from xgboost import XGBClassifier from lightgbm import LGBMClassifier from lightgbm import plot_importance from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder %watermark -a 'Ethen' -d -t -v -p numpy,pandas,sklearn,matplotlib,xgboost,lightgbm """ Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#LightGBM" data-toc-modified-id="LightGBM-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>LightGBM</a></span><ul class="toc-item"><li><span><a href="#Data-Preprocessing" data-toc-modified-id="Data-Preprocessing-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Data Preprocessing</a></span></li><li><span><a href="#Benchmarking" data-toc-modified-id="Benchmarking-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Benchmarking</a></span></li><li><span><a href="#Categorical-Variables-in-Tree-based-Models" data-toc-modified-id="Categorical-Variables-in-Tree-based-Models-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Categorical Variables in Tree-based Models</a></span></li></ul></li><li><span><a href="#Reference" data-toc-modified-id="Reference-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Reference</a></span></li></ul></div> End of explanation """ def get_data(): file_path = 'adult.csv' if not os.path.isfile(file_path): def chunks(input_list, n_chunk): """take a list and break it up into n-size chunks""" for i in range(0, len(input_list), n_chunk): yield input_list[i:i + n_chunk] columns = [ 'age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income' ] url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data' r = requests.get(url) raw_text = r.text.replace('\n', ',') splitted_text = re.split(r',\s*', raw_text) data = list(chunks(splitted_text, n_chunk=len(columns))) data = pd.DataFrame(data, columns=columns).dropna(axis=0, how='any') data.to_csv(file_path, index=False) data = pd.read_csv(file_path) return data data = get_data() print('dimensions:', data.shape) data.head() label_col = 'income' cat_cols = [ 'workclass', 'education', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'native_country' ] num_cols = [ 'age', 'fnlwgt', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week' ] print('number of numerical features: ', len(num_cols)) print('number of categorical features: ', len(cat_cols)) label_encode = LabelEncoder() data[label_col] = label_encode.fit_transform(data[label_col]) y = data[label_col].values data = data.drop(label_col, axis=1) print('labels distribution:', np.bincount(y) / y.size) test_size = 0.1 split_random_state = 1234 df_train, df_test, y_train, y_test = train_test_split( data, y, test_size=test_size, random_state=split_random_state, stratify=y) df_train = df_train.reset_index(drop=True) df_test = df_test.reset_index(drop=True) print('dimensions:', df_train.shape) df_train.head() """ Explanation: LightGBM Gradient boosting is a machine learning technique that produces a prediction model in the form of an ensemble of weak classifiers, optimizing for a differentiable loss function. One of the most popular types of gradient boosting is gradient boosted trees, that internally is made up of an ensemble of week decision trees. There are two different ways to compute the trees: level-wise and leaf-wise as illustrated by the diagram below: <img src="img/levelwise.png" width="50%" height="50%"> <img src="img/leafwise.png" width="60%" height="60%"> The level-wise strategy adds complexity extending the depth of the tree level by level. As a contrary, the leaf-wise strategy generates branches by optimizing a loss. The level-wise strategy grows the tree level by level. In this strategy, each node splits the data prioritizing the nodes closer to the tree root. The leaf-wise strategy grows the tree by splitting the data at the nodes with the highest loss change. Level-wise growth is usually better for smaller datasets whereas leaf-wise tends to overfit. Leaf-wise growth tends to excel in larger datasets where it is considerably faster than level-wise growth. A key challenge in training boosted decision trees is the computational cost of finding the best split for each leaf. Conventional techniques find the exact split for each leaf, and require scanning through all the data in each iteration. A different approach approximates the split by building histograms of the features. That way, the algorithm doesn’t need to evaluate every single value of the features to compute the split, but only the bins of the histogram, which are bounded. This approach turns out to be much more efficient for large datasets, without adversely affecting accuracy. With all of that being said LightGBM is a fast, distributed, high performance gradient boosting that was open-source by Microsoft around August 2016. The main advantages of LightGBM includes: Faster training speed and higher efficiency: LightGBM use histogram based algorithm i.e it buckets continuous feature values into discrete bins which fasten the training procedure. Lower memory usage: Replaces continuous values to discrete bins which result in lower memory usage. Better accuracy than any other boosting algorithm: It produces much more complex trees by following leaf wise split approach rather than a level-wise approach which is the main factor in achieving higher accuracy. However, it can sometimes lead to overfitting which can be avoided by setting the max_depth parameter. Compatibility with Large Datasets: It is capable of performing equally good with large datasets with a significant reduction in training time as compared to XGBoost. Parallel learning supported. The significant speed advantage of LightGBM translates into the ability to do more iterations and/or quicker hyperparameter search, which can be very useful if we have a limited time budget for optimizing your model or want to experiment with different feature engineering ideas. Data Preprocessing This notebook compares LightGBM with XGBoost, another extremely popular gradient boosting framework by applying both the algorithms to a dataset and then comparing the model's performance and execution time. Here we will be using the Adult dataset that consists of 32561 observations and 14 features describing individuals from various countries. Our target is to predict whether a person makes <=50k or >50k annually on basis of the other information available. Dataset consists of 32561 observations and 14 features describing individuals. End of explanation """ from sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(sparse=False, dtype=np.int32) one_hot_encoder.fit(df_train[cat_cols]) cat_one_hot_cols = one_hot_encoder.get_feature_names(cat_cols) print('number of one hot encoded categorical columns: ', len(cat_one_hot_cols)) cat_one_hot_cols[:5] def preprocess_one_hot(df, one_hot_encoder, num_cols, cat_cols): df = df.copy() cat_one_hot_cols = one_hot_encoder.get_feature_names(cat_cols) df_one_hot = pd.DataFrame( one_hot_encoder.transform(df[cat_cols]), columns=cat_one_hot_cols ) df_preprocessed = pd.concat([ df[num_cols], df_one_hot ], axis=1) return df_preprocessed df_train_one_hot = preprocess_one_hot(df_train, one_hot_encoder, num_cols, cat_cols) df_test_one_hot = preprocess_one_hot(df_test, one_hot_encoder, num_cols, cat_cols) print(df_train_one_hot.shape) df_train_one_hot.dtypes """ Explanation: We'll perform very little feature engineering as that's not our main focus here. The following code chunk only one hot encodes the categorical features. There will be follow up discussions on this in later section. End of explanation """ time.sleep(5) lgb = LGBMClassifier( n_jobs=-1, max_depth=6, subsample=1, n_estimators=100, learning_rate=0.1, colsample_bytree=1, objective='binary', boosting_type='gbdt') start = time.time() lgb.fit(df_train_one_hot, y_train) lgb_elapse = time.time() - start print('elapse:, ', lgb_elapse) time.sleep(5) # raw xgboost xgb = XGBClassifier( n_jobs=-1, max_depth=6, subsample=1, n_estimators=100, learning_rate=0.1, colsample_bytree=1, objective='binary:logistic', booster='gbtree') start = time.time() xgb.fit(df_train_one_hot, y_train) xgb_elapse = time.time() - start print('elapse:, ', xgb_elapse) """ Explanation: Benchmarking The next section compares the xgboost and lightgbm's implementation in terms of both execution time and model performance. There are a bunch of other hyperparameters that we as the end-user can specify, but here we explicity specify arguably the most important ones. End of explanation """ time.sleep(5) xgb_hist = XGBClassifier( n_jobs=-1, max_depth=6, subsample=1, n_estimators=100, learning_rate=0.1, colsample_bytree=1, objective='binary:logistic', booster='gbtree', tree_method='hist', grow_policy='lossguide') start = time.time() xgb_hist.fit(df_train_one_hot, y_train) xgb_hist_elapse = time.time() - start print('elapse:, ', xgb_hist_elapse) # evaluate performance y_pred = lgb.predict_proba(df_test_one_hot)[:, 1] lgb_auc = roc_auc_score(y_test, y_pred) print('auc score: ', lgb_auc) y_pred = xgb.predict_proba(df_test_one_hot)[:, 1] xgb_auc = roc_auc_score(y_test, y_pred) print('auc score: ', xgb_auc) y_pred = xgb_hist.predict_proba(df_test_one_hot)[:, 1] xgb_hist_auc = roc_auc_score(y_test, y_pred) print('auc score: ', xgb_hist_auc) # comparison table results = pd.DataFrame({ 'elapse_time': [lgb_elapse, xgb_hist_elapse, xgb_elapse], 'auc_score': [lgb_auc, xgb_hist_auc, xgb_auc]}) results.index = ['LightGBM', 'XGBoostHist', 'XGBoost'] results """ Explanation: XGBoost includes a tree_method = 'hist'option that buckets continuous variables into bins to speed up training, we also set grow_policy = 'lossguide' to favor splitting at nodes with highest loss change, which mimics LightGBM. End of explanation """ ordinal_encoder = OrdinalEncoder(dtype=np.int32) ordinal_encoder.fit(df_train[cat_cols]) def preprocess_ordinal(df, ordinal_encoder, cat_cols, cat_dtype='int32'): df = df.copy() df[cat_cols] = ordinal_encoder.transform(df[cat_cols]) df[cat_cols] = df[cat_cols].astype(cat_dtype) return df df_train_ordinal = preprocess_ordinal(df_train, ordinal_encoder, cat_cols) df_test_ordinal = preprocess_ordinal(df_test, ordinal_encoder, cat_cols) print(df_train_ordinal.shape) df_train_ordinal.dtypes time.sleep(5) lgb = LGBMClassifier( n_jobs=-1, max_depth=6, subsample=1, n_estimators=100, learning_rate=0.1, colsample_bytree=1, objective='binary', boosting_type='gbdt') start = time.time() lgb.fit(df_train_ordinal, y_train) lgb_ordinal_elapse = time.time() - start print('elapse:, ', lgb_ordinal_elapse) y_pred = lgb.predict_proba(df_test_ordinal)[:, 1] lgb_ordinal_auc = roc_auc_score(y_test, y_pred) print('auc score: ', lgb_ordinal_auc) # comparison table results = pd.DataFrame({ 'elapse_time': [lgb_ordinal_elapse, lgb_elapse, xgb_hist_elapse, xgb_elapse], 'auc_score': [lgb_ordinal_auc, lgb_auc, xgb_hist_auc, xgb_auc]}) results.index = ['LightGBM Ordinal', 'LightGBM', 'XGBoostHist', 'XGBoost'] results """ Explanation: From the resulting table, we can see that there isn't a noticeable difference in auc score between the two implementations. On the other hand, there is a significant difference in the time it takes to finish the whole training procedure. This is a huge advantage and makes LightGBM a much better approach when dealing with large datasets. For those interested, the people at Microsoft has a blog that has a even more thorough benchmark result on various datasets. Link is included below along with a summary of their results: Blog: Lessons Learned From Benchmarking Fast Machine Learning Algorithms Our results, based on tests on six datasets, are summarized as follows: XGBoost and LightGBM achieve similar accuracy metrics. LightGBM has lower training time than XGBoost and its histogram-based variant, XGBoost hist, for all test datasets, on both CPU and GPU implementations. The training time difference between the two libraries depends on the dataset, and can be as big as 25 times. XGBoost GPU implementation does not scale well to large datasets and ran out of memory in half of the tests. XGBoost hist may be significantly slower than the original XGBoost when feature dimensionality is high. Categorical Variables in Tree-based Models Many real-world datasets include a mix of continuous and categorical variables. The property of the latter is that their values has zero inherent ordering. One major advantage of decision tree models and their ensemble counterparts, such as random forests, extra trees and gradient boosted trees, is that they are able to operate on both continuous and categorical variables directly (popular implementations of tree-based models differ as to whether they honor this fact). In contrast, most other popular models (e.g., generalized linear models, neural networks) must instead transform categorical variables into some numerical format, usually by one-hot encoding them to create a new dummy variable for each level of the original variable. e.g. <img src="img/onehot_encoding.png" width="80%" height="80%"> One drawback of one hot encoding is that they can lead to a huge increase in the dimensionality of the feature representations. For example, one hot encoding U.S. states adds 49 dimensions to to our feature representation. To understand why we don't need to perform one hot encoding for tree-based models, we need to refer back to the logic of tree-based algorithms. At the heart of the tree-based algorithm is a sub-algorithm that splits the samples into two bins by selecting a feature and a value. This splitting algorithm considers each of the features in turn, and for each feature selects the value of that feature that minimizes the impurity of the bins. This means tree-based models are essentially looking for places to split the data, they are not multiplying our inputs by weights. In contrast, most other popular models (e.g., generalized linear models, neural networks) would interpret categorical variables such as red=1, blue=2 as blue is twice the amount of red, which is obviously not what we want. End of explanation """ print('OneHot Encoding') print('number of columns: ', df_train_one_hot.shape[1]) print('memory usage: ', df_train_one_hot.memory_usage(deep=True).sum()) print() print('Ordinal Encoding') print('number of columns: ', df_train_ordinal.shape[1]) print('memory usage: ', df_train_ordinal.memory_usage(deep=True).sum()) # plotting the feature importance just out of curiosity # change default style figure and font size plt.rcParams['figure.figsize'] = 10, 8 plt.rcParams['font.size'] = 12 # like other tree-based models, it can also output the # feature importance plot plot_importance(lgb, importance_type='gain') plt.show() """ Explanation: From the result above, we can see that it requires even less training time without sacrificing any sort of performance. What's even more is that we now no longer need to perform the one hot encoding on our categorical features. The code chunk below shows this is highly advantageous from a memory-usage perspective when we have a bunch of categorical features. End of explanation """
DylanGification/PhleepGG
data/MultipleLinearRegression.ipynb
mit
import pandas as pd import numpy as np import statsmodels.api as sm data = pd.read_json("Overwatch090317.json") rank=[x for x in data['rank']] level=[x for x in data['level']] wins=[x for x in data.get('comp', {})] # winsdata=[x for x in wins.index[x]] # wins=[x for x in data['comp'] if x <1E100] a=wins[0] b=a.get('total',{}) b.get('wins',{}) def getWins(x): b=x.get('total',{}) return(b.get('wins',{})) winsExtracted =[getWins(x) for x in wins] #[x for x in rank if x <1e100] def filterNaN(x): if x<1e100: return(x) else: return(0)#0 ->treat NaN as no rank rankFiltered=[filterNaN(x) for x in rank ] levelFiltered= [filterNaN(x) for x in level] from pandas.stats.api import ols df = pd.DataFrame({"Rank": rankFiltered, "Level": level, "Wins":winsExtracted}) res = ols(y=df['Rank'], x=df[['Level','Wins']]) res data.to_xarray() rankdata=[x for x in data['rank'] if x <1E100] # is x < 'infinity' import matplotlib.pyplot as plt """ Explanation: $\mbox{rank} = \mbox{const}+ B\times \mbox{hoursplayed} + C \times \mbox{wins}$ End of explanation """ %matplotlib inline plt.plot(rankdata) plt.show() compdata=data['comp'] compdata.as_matrix() """ Explanation: plotting plot y vs x .plot(array[0],array[1]), where array[0] is the x-value and array[1] is the y value End of explanation """
rebeccabilbro/machine-learning
notebook/Wheat Classification.ipynb
mit
%matplotlib inline import os import json import time import pickle import requests import numpy as np import pandas as pd import matplotlib.pyplot as plt URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00236/seeds_dataset.txt" def fetch_data(fname='seeds_dataset.txt'): """ Helper method to retreive the ML Repository dataset. """ response = requests.get(URL) outpath = os.path.abspath(fname) with open(outpath, 'w') as f: f.write(response.content) return outpath # Fetch the data if required DATA = fetch_data() FEATURES = [ "area", "perimeter", "compactness", "length", "width", "asymmetry", "groove", "label" ] LABEL_MAP = { 1: "Kama", 2: "Rosa", 3: "Canadian", } # Read the data into a DataFrame df = pd.read_csv(DATA, sep='\s+', header=None, names=FEATURES) # Convert class labels into text for k,v in LABEL_MAP.items(): df.ix[df.label == k, 'label'] = v # Describe the dataset print df.describe() # Determine the shape of the data print "{} instances with {} features\n".format(*df.shape) # Determine the frequency of each class print df.groupby('label')['label'].count() # Create a scatter matrix of the dataframe features from pandas.tools.plotting import scatter_matrix scatter_matrix(df, alpha=0.2, figsize=(12, 12), diagonal='kde') plt.show() from pandas.tools.plotting import parallel_coordinates plt.figure(figsize=(12,12)) parallel_coordinates(df, 'label') plt.show() from pandas.tools.plotting import radviz plt.figure(figsize=(12,12)) radviz(df, 'label') plt.show() """ Explanation: Classifying Wheat Kernels by Physical Property In the workshop for this week, you are to select a data set from the UCI Machine Learning Repository and based on the recommended analysis type, wrangle the data into a fitted model, showing some model evaluation. In particular: Layout the data into a dataset X and targets y. Choose regression, classification, or clustering and build the best model you can from it. Report an evaluation of the model built Visualize aspects of your model (optional) Compare and contrast different model families When complete, I will review your code, so please submit your code via pull-request to the Introduction to Machine Learning with Scikit-Learn repository! Wheat Kernel Example Downloaded from the UCI Machine Learning Repository on February 26, 2015. The first thing is to fully describe your data in a README file. The dataset description is as follows: Data Set: Multivariate Attribute: Real Tasks: Classification, Clustering Instances: 210 Attributes: 7 Data Set Information: The examined group comprised kernels belonging to three different varieties of wheat: Kama, Rosa and Canadian, 70 elements each, randomly selected for the experiment. High quality visualization of the internal kernel structure was detected using a soft X-ray technique. It is non-destructive and considerably cheaper than other more sophisticated imaging techniques like scanning microscopy or laser technology. The images were recorded on 13x18 cm X-ray KODAK plates. Studies were conducted using combine harvested wheat grain originating from experimental fields, explored at the Institute of Agrophysics of the Polish Academy of Sciences in Lublin. The data set can be used for the tasks of classification and cluster analysis. Attribute Information: To construct the data, seven geometric parameters of wheat kernels were measured: area A, perimeter P, compactness C = 4piA/P^2, length of kernel, width of kernel, asymmetry coefficient length of kernel groove. All of these parameters were real-valued continuous. Relevant Papers: M. Charytanowicz, J. Niewczas, P. Kulczycki, P.A. Kowalski, S. Lukasik, S. Zak, 'A Complete Gradient Clustering Algorithm for Features Analysis of X-ray Images', in: Information Technologies in Biomedicine, Ewa Pietka, Jacek Kawa (eds.), Springer-Verlag, Berlin-Heidelberg, 2010, pp. 15-24. Data Exploration In this section we will begin to explore the dataset to determine relevant information. End of explanation """ from sklearn.datasets.base import Bunch DATA_DIR = os.path.abspath(os.path.join(".", "..", "data", "wheat")) # Show the contents of the data directory for name in os.listdir(DATA_DIR): if name.startswith("."): continue print "- {}".format(name) def load_data(root=DATA_DIR): # Construct the `Bunch` for the wheat dataset filenames = { 'meta': os.path.join(root, 'meta.json'), 'rdme': os.path.join(root, 'README.md'), 'data': os.path.join(root, 'seeds_dataset.txt'), } # Load the meta data from the meta json with open(filenames['meta'], 'r') as f: meta = json.load(f) target_names = meta['target_names'] feature_names = meta['feature_names'] # Load the description from the README. with open(filenames['rdme'], 'r') as f: DESCR = f.read() # Load the dataset from the text file. dataset = np.loadtxt(filenames['data']) # Extract the target from the data data = dataset[:, 0:-1] target = dataset[:, -1] # Create the bunch object return Bunch( data=data, target=target, filenames=filenames, target_names=target_names, feature_names=feature_names, DESCR=DESCR ) # Save the dataset as a variable we can use. dataset = load_data() print dataset.data.shape print dataset.target.shape """ Explanation: Data Extraction One way that we can structure our data for easy management is to save files on disk. The Scikit-Learn datasets are already structured this way, and when loaded into a Bunch (a class imported from the datasets module of Scikit-Learn) we can expose a data API that is very familiar to how we've trained on our toy datasets in the past. A Bunch object exposes some important properties: data: array of shape n_samples * n_features target: array of length n_samples feature_names: names of the features target_names: names of the targets filenames: names of the files that were loaded DESCR: contents of the readme Note: This does not preclude database storage of the data, in fact - a database can be easily extended to load the same Bunch API. Simply store the README and features in a dataset description table and load it from there. The filenames property will be redundant, but you could store a SQL statement that shows the data load. In order to manage our data set on disk, we'll structure our data as follows: End of explanation """ from sklearn import metrics from sklearn import cross_validation from sklearn.cross_validation import KFold from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier def fit_and_evaluate(dataset, model, label, **kwargs): """ Because of the Scikit-Learn API, we can create a function to do all of the fit and evaluate work on our behalf! """ start = time.time() # Start the clock! scores = {'precision':[], 'recall':[], 'accuracy':[], 'f1':[]} for train, test in KFold(dataset.data.shape[0], n_folds=12, shuffle=True): X_train, X_test = dataset.data[train], dataset.data[test] y_train, y_test = dataset.target[train], dataset.target[test] estimator = model(**kwargs) estimator.fit(X_train, y_train) expected = y_test predicted = estimator.predict(X_test) # Append our scores to the tracker scores['precision'].append(metrics.precision_score(expected, predicted, average="weighted")) scores['recall'].append(metrics.recall_score(expected, predicted, average="weighted")) scores['accuracy'].append(metrics.accuracy_score(expected, predicted)) scores['f1'].append(metrics.f1_score(expected, predicted, average="weighted")) # Report print "Build and Validation of {} took {:0.3f} seconds".format(label, time.time()-start) print "Validation scores are as follows:\n" print pd.DataFrame(scores).mean() # Write official estimator to disk estimator = model(**kwargs) estimator.fit(dataset.data, dataset.target) outpath = label.lower().replace(" ", "-") + ".pickle" with open(outpath, 'w') as f: pickle.dump(estimator, f) print "\nFitted model written to:\n{}".format(os.path.abspath(outpath)) # Perform SVC Classification fit_and_evaluate(dataset, SVC, "Wheat SVM Classifier") # Perform kNN Classification fit_and_evaluate(dataset, KNeighborsClassifier, "Wheat kNN Classifier", n_neighbors=12) # Perform Random Forest Classification fit_and_evaluate(dataset, RandomForestClassifier, "Wheat Random Forest Classifier") """ Explanation: Classification Now that we have a dataset Bunch loaded and ready, we can begin the classification process. Let's attempt to build a classifier with kNN, SVM, and Random Forest classifiers. End of explanation """
SJSlavin/phys202-2015-work
assignments/assignment07/AlgorithmsEx01.ipynb
mit
%matplotlib inline from matplotlib import pyplot as plt import numpy as np """ Explanation: Algorithms Exercise 1 Imports End of explanation """ file = open("mobydick_chapter1.txt") mobydick = file.read() mobydick = mobydick.splitlines() mobydick = " ".join(mobydick) punctuation = ["-", ",", "."] mobydick = list(mobydick) mobydick_f = list(filter(lambda c: c not in punctuation, mobydick)) mobydick_f = "".join(mobydick_f) stop_words = ["of", "or", "in"] mobydick_fs = mobydick_f.split() mobydick_fs = list(filter(lambda w: w not in stop_words, mobydick_fs)) print(mobydick_fs) phrase = ['the cat', 'ran away'] ' '.join(phrase).split(' ') def tokenize(s, stop_words=None, punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t'): s = s.splitlines() s = " ".join(s) punctuation_l = list(punctuation) s = list(s) s_f = list(filter(lambda c: c not in punctuation, s)) s_f = "".join(s_f) stop_words_l = [] #http://stackoverflow.com/questions/402504/how-to-determine-the-variable-type-in-python if type(stop_words) is str: stop_words_l = stop_words.split(" ") elif type(stop_words) is list: stop_words_l = stop_words else: stop_words_l = [] s_fs = s_f.split() s_fs = list(filter(lambda w: w not in stop_words_l, s_fs)) s_fs = [w.lower() for w in s_fs] return s_fs print() assert tokenize("This, is the way; that things will end", stop_words=['the', 'is']) == \ ['this', 'way', 'that', 'things', 'will', 'end'] wasteland = """ APRIL is the cruellest month, breeding Lilacs out of the dead land, mixing Memory and desire, stirring Dull roots with spring rain. """ assert tokenize(wasteland, stop_words='is the of and') == \ ['april','cruellest','month','breeding','lilacs','out','dead','land', 'mixing','memory','desire','stirring','dull','roots','with','spring', 'rain'] """ Explanation: Word counting Write a function tokenize that takes a string of English text returns a list of words. It should also remove stop words, which are common short words that are often removed before natural language processing. Your function should have the following logic: Split the string into lines using splitlines. Split each line into a list of words and merge the lists for each line. Use Python's builtin filter function to remove all punctuation. If stop_words is a list, remove all occurences of the words in the list. If stop_words is a space delimeted string of words, split them and remove them. Remove any remaining empty words. Make all words lowercase. End of explanation """ def count_words(data): count = {} for w in range(0, len(data)): if data[w] in count: count[data[w]] += 1 else: count[data[w]] = 1 #this does not sort correctly, and from what I can tell, dictionaries can't be sorted anyway return(count) assert count_words(tokenize('this and the this from and a a a')) == \ {'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2} """ Explanation: Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts. End of explanation """ def sort_word_counts(wc): """Return a list of 2-tuples of (word, count), sorted by count descending.""" wordlist = [] n = 0 for w in wc: wordlist.append((w, wc[w])) #http://stackoverflow.com/questions/3121979/how-to-sort-list-tuple-of-lists-tuples wordlist_s = sorted(wordlist, key=lambda tup: tup[1], reverse=True) print(wordlist_s) return(wordlist_s) assert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \ [('a', 4), ('this', 3), ('and', 2), ('the', 1)] """ Explanation: Write a function sort_word_counts that return a list of sorted word counts: Each element of the list should be a (word, count) tuple. The list should be sorted by the word counts, with the higest counts coming first. To perform this sort, look at using the sorted function with a custom key and reverse argument. End of explanation """ # YOUR CODE HERE file = open("mobydick_chapter1.txt") mobydick = file.read() mobydick_t = tokenize(mobydick, stop_words = "the of and a to in is it that as") mobydick_wc = count_words(mobydick_t) swc = sort_word_counts(mobydick_wc) assert swc[0]==('i',43) assert len(swc)==848 """ Explanation: Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt: Read the file into a string. Tokenize with stop words of 'the of and a to in is it that as'. Perform a word count, the sort and save the result in a variable named swc. End of explanation """ # YOUR CODE HERE words, freq = zip(*swc) plt.bar(np.arange(len(words)), freq, linestyle="dotted") plt.title("Word Frequency") plt.xlabel("Word") plt.ylabel("Frequency") #plt.xticks(words) #couldn't figure out how to format the plot correctly # YOUR CODE HERE words, freq = zip(*swc) plt.scatter(freq, np.arange(0, len(words), -1)) plt.title("Word Frequency") plt.xlabel("Word") plt.ylabel("Frequency") #plt.xticks(words) #couldn't figure out how to format the plot correctly assert True # use this for grading the dotplot """ Explanation: Create a "Cleveland Style" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research... End of explanation """
julienchastang/unidata-python-workshop
notebooks/Surface_Data/Surface Data with Siphon and MetPy.ipynb
mit
from siphon.catalog import TDSCatalog # copied from the browser url box metar_cat_url = ('http://thredds.ucar.edu/thredds/catalog/' 'irma/metar/catalog.xml?dataset=irma/metar/Metar_Station_Data_-_Irma_fc.cdmr') # Parse the xml catalog = TDSCatalog(metar_cat_url) # what datasets are here? print(list(catalog.datasets)) metar_dataset = catalog.datasets['Feature Collection'] """ Explanation: <a name="top"></a> <div style="width:1000 px"> <div style="float:right; width:98 px; height:98px;"> <img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;"> </div> <h1>Working with Surface Observations in Siphon and MetPy</h1> <h3>Unidata Python Workshop</h3> <div style="clear:both"></div> </div> <hr style="height:2px;"> <div style="float:right; width:250 px"><img src="http://weather-geek.net/images/metar_what.png" alt="METAR" style="height: 200px;"></div> Overview: Teaching: 20 minutes Exercises: 20 minutes Questions What's the best way to get surface station data from a THREDDS data server? What's the best way to make a station plot of data? How can I request a time series of data for a single station? Objectives <a href="#ncss">Use the netCDF Subset Service (NCSS) to request a portion of the data</a> <a href="#stationplot">Download data for a single time across stations and create a station plot</a> <a href="#timeseries">Request a time series of data and plot</a> <a name="ncss"></a> 1. Using NCSS to get point data End of explanation """ # Can safely ignore the warnings ncss = metar_dataset.subset() """ Explanation: Once we've grabbed the "Feature Collection" dataset, we can request a subset of the data: End of explanation """ ncss.variables """ Explanation: What variables do we have available? End of explanation """ from datetime import datetime query = ncss.query() query.lonlat_box(north=34, south=24, east=-80, west=-90) query.time(datetime(2017, 9, 10, 12)) query.variables('temperature', 'dewpoint', 'altimeter_setting', 'wind_speed', 'wind_direction', 'sky_coverage') query.accept('csv') # Get the data data = ncss.get_data(query) data """ Explanation: <a href="#top">Top</a> <hr style="height:2px;"> <a name="stationplot"></a> 2. Making a station plot Make new NCSS query Request data closest to a time End of explanation """ import numpy as np import metpy.calc as mpcalc from metpy.units import units # Since we used the CSV data, this is just a dictionary of arrays lats = data['latitude'] lons = data['longitude'] tair = data['temperature'] dewp = data['dewpoint'] alt = data['altimeter_setting'] # Convert wind to components u, v = mpcalc.wind_components(data['wind_speed'] * units.knots, data['wind_direction'] * units.degree) # Need to handle missing (NaN) and convert to proper code cloud_cover = 8 * data['sky_coverage'] / 100. cloud_cover[np.isnan(cloud_cover)] = 10 cloud_cover = cloud_cover.astype(np.int) # For some reason these come back as bytes instead of strings stid = np.array([s.tostring().decode() for s in data['station']]) """ Explanation: Now we need to pull apart the data and perform some modifications, like converting winds to components and convert sky coverage percent to codes (octets) suitable for plotting. End of explanation """ %matplotlib inline import cartopy.crs as ccrs import cartopy.feature as cfeature import matplotlib.pyplot as plt from metpy.plots import StationPlot, sky_cover # Set up a plot with map features fig = plt.figure(figsize=(12, 12)) proj = ccrs.Stereographic(central_longitude=-95, central_latitude=35) ax = fig.add_subplot(1, 1, 1, projection=proj) ax.add_feature(cfeature.STATES, edgecolor='black') ax.coastlines(resolution='50m') ax.gridlines() # Create a station plot pointing to an Axes to draw on as well as the location of points stationplot = StationPlot(ax, lons, lats, transform=ccrs.PlateCarree(), fontsize=12) stationplot.plot_parameter('NW', tair, color='red') # Add wind barbs stationplot.plot_barb(u, v) # Plot the sky cover symbols in the center. We give it the integer code values that # should be plotted, as well as a mapping class that can convert the integer values # to the appropriate font glyph. stationplot.plot_symbol('C', cloud_cover, sky_cover) """ Explanation: Create the map using cartopy and MetPy! One way to create station plots with MetPy is to create an instance of StationPlot and call various plot methods, like plot_parameter, to plot arrays of data at locations relative to the center point. In addition to plotting values, StationPlot has support for plotting text strings, symbols, and plotting values using custom formatting. Plotting symbols involves mapping integer values to various custom font glyphs in our custom weather symbols font. MetPy provides mappings for converting WMO codes to their appropriate symbol. The sky_cover function below is one such mapping. End of explanation """ # Project points so that we're filtering based on the way the stations are laid out on the map proj = ccrs.Stereographic(central_longitude=-95, central_latitude=35) xy = proj.transform_points(ccrs.PlateCarree(), lons, lats) # Reduce point density so that there's only one point within a 200km circle mask = mpcalc.reduce_point_density(xy, 200000) """ Explanation: Notice how there are so many overlapping stations? There's a utility in MetPy to help with that: reduce_point_density. This returns a mask we can apply to data to filter the points. End of explanation """ # Set up a plot with map features fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(1, 1, 1, projection=proj) ax.add_feature(cfeature.STATES, edgecolor='black') ax.coastlines(resolution='50m') ax.gridlines() # Create a station plot pointing to an Axes to draw on as well as the location of points stationplot = StationPlot(ax, lons[mask], lats[mask], transform=ccrs.PlateCarree(), fontsize=12) stationplot.plot_parameter('NW', tair[mask], color='red') stationplot.plot_barb(u[mask], v[mask]) stationplot.plot_symbol('C', cloud_cover[mask], sky_cover) """ Explanation: Now we just plot with arr[mask] for every arr of data we use in plotting. End of explanation """ # Use reduce_point_density # Set up a plot with map features fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(1, 1, 1, projection=proj) ax.add_feature(cfeature.STATES, edgecolor='black') ax.coastlines(resolution='50m') ax.gridlines() # Create a station plot pointing to an Axes to draw on as well as the location of points # Plot dewpoint # Plot altimeter setting--formatter can take a function that formats values # Plot station id # %load solutions/reduce_density.py """ Explanation: More examples for MetPy Station Plots: - MetPy Examples - MetPy Symbol list <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Modify the station plot (reproduced below) to include dewpoint, altimeter setting, as well as the station id. The station id can be added using the `plot_text` method on `StationPlot`.</li> <li>Re-mask the data to be a bit more finely spaced, say: 75km</li> <li>Bonus Points: Use the `formatter` argument to `plot_parameter` to only plot the 3 significant digits of altimeter setting. (Tens, ones, tenths)</li> </ul> </div> End of explanation """ from datetime import timedelta # define the time range we are interested in end_time = datetime(2017, 9, 12, 0) start_time = end_time - timedelta(days=2) # build the query query = ncss.query() query.lonlat_point(-80.25, 25.8) query.time_range(start_time, end_time) query.variables('altimeter_setting', 'temperature', 'dewpoint', 'wind_direction', 'wind_speed') query.accept('csv') """ Explanation: <a href="#top">Top</a> <hr style="height:2px;"> <a name="timeseries"></a> 3. Time Series request and plot Let's say we want the past days worth of data... ...for Boulder (i.e. the lat/lon) ...for the variables mean sea level pressure, air temperature, wind direction, and wind_speed End of explanation """ data = ncss.get_data(query) print(list(data.keys())) """ Explanation: Let's get the data! End of explanation """ station_id = data['station'][0].tostring() print(station_id) """ Explanation: What station did we get? End of explanation """ station_id = station_id.decode('ascii') print(station_id) """ Explanation: That indicates that we have a Python bytes object, containing the 0-255 values corresponding to 'K', 'M', 'I', 'A'. We can decode those bytes into a string: End of explanation """ data['time'] """ Explanation: Let's get the time into datetime objects. We see we have an array with byte strings in it, like station id above. End of explanation """ time = [datetime.strptime(s.decode('ascii'), '%Y-%m-%dT%H:%M:%SZ') for s in data['time']] """ Explanation: So we can use a list comprehension to turn this into a list of date time objects: End of explanation """ from matplotlib.dates import DateFormatter, AutoDateLocator fig, ax = plt.subplots(figsize=(10, 6)) ax.plot(time, data['wind_speed'], color='tab:blue') ax.set_title(f'Site: {station_id} Date: {time[0]:%Y/%m/%d}') ax.set_xlabel('Hour of day') ax.set_ylabel('Wind Speed') ax.grid(True) # Improve on the default ticking locator = AutoDateLocator() hoursFmt = DateFormatter('%H') ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(hoursFmt) """ Explanation: Now for the obligatory time series plot... End of explanation """ # Your code goes here # %load solutions/time_series.py """ Explanation: <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Pick a different location</li> <li>Plot temperature and dewpoint together on the same plot</li> </ul> </div> End of explanation """
jrieke/mhbf
4/4.ipynb
mit
from __future__ import division, print_function import numpy as np import matplotlib.pyplot as plt %matplotlib inline """ Explanation: Solution from Johannes Rieke and Alex Moore¶ End of explanation """ from scipy.integrate import odeint def step(x): return int(x >= 0) x = np.linspace(-10, 10, 1000) plt.plot(x, np.vectorize(step)(x)) def laing_chow(y, t, I1, I2, alpha=0.2, beta=0.4, phi=0.4, tau=20, g1=1, g2=1): u1, u2, a1, a2 = y du1 = -u1 + step(alpha * u1 * g1 - beta * u2 * g2 - a1 + I1) du2 = -u2 + step(alpha * u2 * g2 - beta * u1 * g1 - a2 + I2) da1 = 1 / tau * (-a1 + phi * step(alpha * u1 * g1 - beta * u2 * g2 - a1 + I1)) da2 = 1 / tau * (-a2 + phi * step(alpha * u2 * g2 - beta * u1 * g1 - a2 + I2)) return [du1, du2, da1, da2] x0 = [1, 0, 0.1, 0.25] t = np.linspace(0, 500, 10000) I1 = 0.43 I2 = 0.5 u1, u2, a1, a2 = odeint(laing_chow, x0, t, args=(I1, I2)).T def plot_results(u1, u2, a1, a2): plt.subplot(211) plt.plot(t, u1, label='u1') plt.plot(t, a1, label='a1') plt.grid() plt.xlim(0, 500) plt.legend() plt.subplot(212) plt.plot(t, u2, label='u2') plt.plot(t, a2, label='a2') plt.grid() plt.xlim(0, 500) plt.legend() plt.xlabel('t') plot_results(u1, u2, a1, a2) """ Explanation: Exercise 1 1. Integration End of explanation """ def dominance_durations(x1, x2, t): durations = [] start = None for X1, X2, T in zip(x1, x2, t): if start is None and X1 > X2: start = T elif start is not None and X1 < X2: durations.append(T - start) start = None if start is not None: # population 1 active at the end of the simulation durations.append(t[-1] - start) return durations dominance_durations(u1, u2, t) """ Explanation: 2. Dominance duration End of explanation """ u1, u2, a1, a2 = odeint(laing_chow, x0, t, args=(0.8, 0.8)).T plot_results(u1, u2, a1, a2) """ Explanation: Inserting the parameter values from above into eq. 9, one gets an analytical dominance duration of $T_1$ = 26.17. This is roughly in agreement with the simulated dominance durations (see above), but with a sligh deviation of ~2. 3. Oscillation stop With the parameter values from above, $- \alpha + \beta + \phi$ = 0.6. End of explanation """ u1, u2, a1, a2 = odeint(laing_chow, x0, t, args=(0.55, 0.55)).T plot_results(u1, u2, a1, a2) dominance_durations(u1, u2, t) """ Explanation: The simulation confirms that for high inputs (here: 0.8), both populations are active and oscillation stops. End of explanation """ def laing_chow(y, t, I1, I2, alpha=0.2, beta=0.4, phi=0.4, tau=20, g1=1, g2=1): u1, u2, a1, a2 = y du1 = -u1 + step(alpha * u1 * g1 - beta * u2 * g2 - a1 + I1) du2 = -u2 + step(alpha * u2 * g2 - beta * u1 * g1 - a2 + I2) da1 = 1 / tau * (-a1 + phi * step(alpha * u1 * g1 - beta * u2 * g2 - a1 + I1)) da2 = 1 / tau * (-a2 + phi * step(alpha * u2 * g2 - beta * u1 * g1 - a2 + I2)) return np.asarray([du1, du2, da1, da2]) def euler_maruyama(ffun, gfun, x_0, t_max, dt, *args, **params): print(params) """ Multidimensional Euler-Maruyama DE solver. """ x_0 = np.asarray(x_0) time = np.arange(0, t_max, dt) dx = np.zeros((x_0.size, time.size)).T dx[0,:] = x_0 for t in range(time.size-1): W = np.random.normal() dx[t+1,:] = dx[t, :] + ffun(dx[t, :], t*dt, *args, **params) * dt + gfun(dx[t, :], t*dt, *args, **params) * np.sqrt(dt) * W return dx I1, I2 = 0.43, 0.5 g_fun = lambda *args: 0 y0 = [1.0, 0, 0.1, 0.25] y1 = euler_maruyama(laing_chow, g_fun, y0, 500, 0.1, I1, I2) t=np.linspace(0, 500, 5000) plot_results(y4[:,0],y4[:,1],y4[:,2],y4[:,3]) """ Explanation: For intermediate inputs (here: 0.55) the populations are inactive for shorter periods of time. Analytically, $T_1$ = 10.22. This deviates strongly from the simulated dominance durations (see above). Exercise 2 1. Problems with scipy.integrate.odeint The Moereno-Bote model uses stochastic differential equations, therefore is not compatable with scipy.integrate.odeint 2. Euler-Maruyama End of explanation """ f_fun= lambda *args: 0 g_fun= lambda *args:(1/tau) y0=0 tau=0.1 y2=euler_maruyama(f_fun, g_fun, y0, t_max=5, dt=0.01) t=np.linspace(0,5,500) plt.plot(t,y2) plt.title("Tau=0.1") f_fun= lambda *args: 0 g_fun= lambda *args:(1/tau) y0=0 tau=1 y3=euler_maruyama(f_fun, g_fun, y0, t_max=5, dt=0.01) t=np.linspace(0,5,500) plt.plot(t,y3) plt.title("Tau=1") f_fun= lambda x, *args: (x/tau) g_fun= lambda *args:(1/tau) y0=0 tau=1 y4=euler_maruyama(f_fun, g_fun, y0, t_max=5, dt=0.01) t=np.linspace(0,5,500) plt.plot(t,y5) plt.title("Tau=1") """ Explanation: Laing-Chow calculated using Euler-Maruyama 3. Stochastic DEs End of explanation """ # This probably requires some more explanation. #Including reference to Wiener processes and Ornstein-Uhlenbeck processes """ Explanation: The scale is very different between the three simulations, tau=0.1 produces a much greater range of values. End of explanation """ def sigmoid(x, theta = 0.1, k = 0.05): return 1.0/(1+np.exp(-(x-theta)/k)) def f_moreno_bote(y, t, alpha = 0.75, beta= 0.5, gamma= 0.1, phi=0.5, tau= 0.01, tau_a= 2, tau_b= 2, tau_s= 0.1, eta= 0.5, g_a= 0.05, g_b= 0.05, f= sigmoid, sigma= 0.03): r_a, alpha_a, r_b, alpha_b, n_a_d, n_b_d = y r_pool = max(0, phi*(r_a + r_b) + g_a + g_b) r_a_inh = (r_pool + eta * r_a)**2 r_b_inh = (r_pool + eta * r_b)**2 dydt = np.asarray([ (-r_a + f(alpha * r_a - beta * r_a_inh + g_a - alpha_a + n_a_d))/tau, (-alpha_a + gamma * r_a)/tau_a, (-r_b + f(alpha * r_b - beta * r_b_inh + g_b - alpha_b + n_b_d))/tau, (-alpha_b + gamma * r_b)/tau_b, -n_a_d/tau_s, -n_b_d/tau_s]) return dydt def g_moreno_bote(y, t, tau_s = 0.1, sigma = 0.03): _, _, _, _, n_a_s, n_b_s = y dydt = np.asarray([ 0.0, 0.0, 0.0, 0.0, sigma * np.sqrt(2.0/tau_s) * np.random.normal(), sigma * np.sqrt(2.0/tau_s) * np.random.normal() ]) return dydt y0 = [1, 0, 0.01, 0.04, 0, 0] t=np.linspace(0,20,2000) y7 = euler_maruyama(f_moreno_bote, g_moreno_bote, y0, t_max = 20, dt=0.01) plt.subplot(211) plt.plot(t, y7[:,0], label='Rate a') plt.plot(t,y7[:,2], label='Rate b') plt.grid() plt.xlim(0, 20) plt.title("Evolution of Rates in Moreno-Bote Model") plt.legend() plt.subplot(211) plt.plot(t, y7[:,1], label='Current a') plt.plot(t,y7[:,3], label='Current b') plt.grid() plt.xlim(0, 20) plt.title("Evolution of Hyperpolarizing in Moreno-Bote Model") plt.legend() plt.subplot(211) plt.plot(t, y7[:,4], label='Noise a') plt.plot(t,y7[:,5], label='NOise b') plt.grid() plt.xlim(0, 20) plt.title("Evolution of Noise in Moreno-Bote Model") plt.legend() """ Explanation: 4. Moreno-Bote simulation End of explanation """ y0 = [1, 0, 0.01, 0.04, 0, 0] t=np.linspace(0,500,50000) y8 = euler_maruyama(f_moreno_bote, g_moreno_bote, y0, t_max = 500, dt=0.01) t=np.linspace(0,500,50000) moreno_dominace=dominance_durations(y8[:,0], y8[:,2], t) plt.hist(moreno_dominace) plt.title("Histogram of Dominance Durations") plt.xlabel("Dominance Duration [ms]") plt.ylabel("Frequency") np.mean(moreno_dominace) """ Explanation: 5. Dominance Durations End of explanation """ # I could not manage to paramaterise the function appologies for the hack def f_moreno_bote(y, t, alpha = 0.75, beta= 0.5, gamma= 0, phi=0.5, tau= 0.01, tau_a= 2, tau_b= 2, tau_s= 0.1, eta= 0.5, g_a= 0.05, g_b= 0.05, f= sigmoid, sigma= 0.03): r_a, alpha_a, r_b, alpha_b, n_a_d, n_b_d = y r_pool = max(0, phi*(r_a + r_b) + g_a + g_b) r_a_inh = (r_pool + eta * r_a)**2 r_b_inh = (r_pool + eta * r_b)**2 dydt = np.asarray([ (-r_a + f(alpha * r_a - beta * r_a_inh + g_a - alpha_a + n_a_d))/tau, (-alpha_a + gamma * r_a)/tau_a, (-r_b + f(alpha * r_b - beta * r_b_inh + g_b - alpha_b + n_b_d))/tau, (-alpha_b + gamma * r_b)/tau_b, -n_a_d/tau_s, -n_b_d/tau_s]) return dydt y9 = euler_maruyama(f_moreno_bote, g_moreno_bote, y0, t_max = 500, dt=0.01) moreno_dominace=dominance_durations(y9[:,0], y9[:,2], t) plt.hist(moreno_dominace) plt.title("Histogram of Dominance Durations") plt.xlabel("Dominance Duration [ms]") plt.ylabel("Frequency") np.mean(moreno_dominace) """ Explanation: 6. Effect of Gamma End of explanation """
bsipocz/AstroHackWeek2015
inference/straightline.ipynb
gpl-2.0
%load_ext autoreload %autoreload 2 from __future__ import print_function import numpy as np import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (8.0, 8.0) plt.rcParams['savefig.dpi'] = 100 from straightline_utils import * """ Explanation: Fitting a Straight Line Phil Marshall, Daniel Foreman-Mackey and Dustin Lang Astro Hack Week, New York, September 2015 Goals: Set up and carry out a simple Bayesian inference, characterizing a simple posterior PDF Compare brute force, analytic and MCMC sampled results Check models, with posterior predictive distributions of test statistics Use the Bayesian Evidence to compare various models, and understand its properties End of explanation """ (x,y,sigmay) = generate_data() plot_yerr(x, y, sigmay) """ Explanation: The Data Let's generate a simple dataset: observations of $y$ with reported uncertainties $\sigma_y$, at given $x$ values. End of explanation """ # Linear algebra: weighted least squares N = len(x) A = np.zeros((N,2)) A[:,0] = 1. / sigmay A[:,1] = x / sigmay b = y / sigmay theta,nil,nil,nil = np.linalg.lstsq(A, b) plot_yerr(x, y, sigmay) b_ls,m_ls = theta print('Least Squares (maximum likelihood) estimator:', b_ls,m_ls) plot_line(m_ls, b_ls); """ Explanation: Least squares fitting An industry standard: find the slope and intercept that minimize the mean square residual. Since the data depend linearly on the parameters, the least squares solution can be found by a matrix inversion and multiplication, conveneniently packed in numpy.linalg. End of explanation """ def straight_line_log_likelihood(x, y, sigmay, m, b): ''' Returns the log-likelihood of drawing data values *y* at known values *x* given Gaussian measurement noise with standard deviation with known *sigmay*, where the "true" y values are *y_t = m * x + b* x: list of x coordinates y: list of y coordinates sigmay: list of y uncertainties m: scalar slope b: scalar line intercept Returns: scalar log likelihood ''' return (np.sum(np.log(1./(np.sqrt(2.*np.pi) * sigmay))) + np.sum(-0.5 * (y - (m*x + b))**2 / sigmay**2)) def straight_line_log_prior(m, b, mlimits, blimits): # Uniform in m: if (m < mlimits[0]) | (m > mlimits[1]): log_m_prior = -np.inf else: log_m_prior = -np.log(mlimits[1] - mlimits[0]) # Uniform in b: if (b < blimits[0]) | (b > blimits[1]): log_b_prior = -np.inf else: log_b_prior = -np.log(blimits[1] - blimits[0]) return log_m_prior + log_b_prior def straight_line_log_posterior(x,y,sigmay,m,b,mlimits,blimits): return (straight_line_log_likelihood(x,y,sigmay,m,b) + straight_line_log_prior(m,b,mlimits,blimits)) # Evaluate log P(m,b | x,y,sigmay) on a grid. # Define uniform prior limits, enforcing positivity in both parameters: mlimits = [0.0, 2.0] blimits = [0.0, 200.0] # Set up grid: mgrid = np.linspace(mlimits[0], mlimits[1], 101) bgrid = np.linspace(blimits[0], blimits[1], 101) log_posterior = np.zeros((len(mgrid),len(bgrid))) # Evaluate log posterior PDF: for im,m in enumerate(mgrid): for ib,b in enumerate(bgrid): log_posterior[im,ib] = straight_line_log_posterior(x, y, sigmay, m, b, mlimits, blimits) # Convert to probability density and plot posterior = np.exp(log_posterior - log_posterior.max()) plt.imshow(posterior, extent=[blimits[0],blimits[1],mlimits[0],mlimits[1]],cmap='Blues', interpolation='none', origin='lower', aspect=(blimits[1]-blimits[0])/(mlimits[1]-mlimits[0]), vmin=0, vmax=1) plt.contour(bgrid, mgrid, posterior, pdf_contour_levels(posterior), colors='k') i = np.argmax(posterior) i,j = np.unravel_index(i, posterior.shape) print('Grid maximum posterior values (m,b) =', mgrid[i], bgrid[j]) plt.title('Straight line: posterior PDF for parameters'); plt.plot(b_ls, m_ls, 'r+', ms=12, mew=4); plot_mb_setup(mlimits,blimits); """ Explanation: Evaluating posterior probability on a grid This procedure will get us the Bayesian solution to the problem - not an estimator, but a probability distribution for the parameters m and b. This PDF captures the uncertainty in the model parameters given the data. For simple, 2-dimensional parameter spaces like this one, evaluating on a grid is not a bad way to go. We'll see that the least squares solution lies at the peak of the posterior PDF - for a certain set of assumptions about the data and the model. End of explanation """ def straight_line_posterior(x, y, sigmay, m, b, mlimits, blimits): return np.exp(straight_line_log_posterior(x, y, sigmay, m, b, mlimits, blimits)) # initial m, b, at center of ranges m,b = 0.5*(mlimits[0]+mlimits[1]), 0.5*(blimits[0]+blimits[1]) # step sizes, 5% of the prior mstep, bstep = 0.05*(mlimits[1]-mlimits[0]), 0.1*(blimits[1]-blimits[0]) # how many steps? nsteps = 10000 chain = [] probs = [] naccept = 0 print('Running MH for', nsteps, 'steps') # First point: L_old = straight_line_log_likelihood(x, y, sigmay, m, b) p_old = straight_line_log_prior(m, b, mlimits, blimits) logprob_old = L_old + p_old for i in range(nsteps): # step mnew = m + np.random.normal() * mstep bnew = b + np.random.normal() * bstep # evaluate probabilities L_new = straight_line_log_likelihood(x, y, sigmay, mnew, bnew) p_new = straight_line_log_prior(mnew, bnew, mlimits, blimits) logprob_new = L_new + p_new if (np.exp(logprob_new - logprob_old) > np.random.uniform()): # Accept the new sample: m = mnew b = bnew L_old = L_new p_old = p_new logprob_old = logprob_new naccept += 1 else: # Stay where we are; m,b stay the same, and we append them # to the chain below. pass chain.append((b,m)) probs.append((L_old,p_old)) print('Acceptance fraction:', naccept/float(nsteps)) # Pull m and b arrays out of the Markov chain and plot them: mm = [m for b,m in chain] bb = [b for b,m in chain] # Traces, for convergence inspection: plt.figure(figsize=(8,5)) plt.subplot(2,1,1) plt.plot(mm, 'k-') plt.ylim(mlimits) plt.ylabel('m') plt.subplot(2,1,2) plt.plot(bb, 'k-') plt.ylabel('Intercept b') plt.ylim(blimits) """ Explanation: MCMC Sampling In problems with higher dimensional parameter spaces, we need a more efficient way of approximating the posterior PDF - both when characterizing it in the first place, and then when doing integrals over that PDF (to get the marginalized PDFs for the parameters, or to compress them in to single numbers with uncertainties that can be easily reported). In most applications it's sufficient to approximate a PDF with a (relatively) small number of samples drawn from it; MCMC is a procedure for drawing samples from PDFs. End of explanation """ # Look at samples in parameter space. # First show contours from gridding calculation: plt.contour(bgrid, mgrid, posterior, pdf_contour_levels(posterior), colors='k') plt.gca().set_aspect((blimits[1]-blimits[0])/(mlimits[1]-mlimits[0])) # Scatterplot of m,b posterior samples, overlaid: plt.plot(bb, mm, 'b.', alpha=0.1) plot_mb_setup(mlimits,blimits) # 1 and 2D marginalised distributions: !pip install --upgrade --no-deps corner import corner corner.corner(chain, labels=['b','m'], range=[blimits,mlimits],quantiles=[0.16,0.5,0.84], show_titles=True, title_args={"fontsize": 12}, plot_datapoints=True, fill_contours=True, levels=[0.68, 0.95], color='b', bins=80, smooth=1.0); """ Explanation: This looks pretty good: no plateauing, or drift. A more rigorous test for convergence is due to Gelman & Rubin, and involves comparing the intrachain variance with the inter-chain variance in an ensemble. It's worth reading up on. Foreman-Mackey & Hogg recommend looking at the autocorrelation length, and whther it stablizes during the run. End of explanation """ # Posterior visual check, in data space X = np.array(xlimits) for i in (np.random.rand(100)*len(chain)).astype(int): b,m = chain[i] plt.plot(X, b+X*m, 'b-', alpha=0.1) plot_line(m_ls, b_ls); plot_yerr(x, y, sigmay) # Test statistics: functions of the data, not the parameters. # 1) Reduced chisq for the best fit model: def test_statistic(x,y,sigmay,b_ls,m_ls): return np.sum((y - m_ls*x - b_ls)**2.0/sigmay**2.0)/(len(y)-2) # 2) Reduced chisq for the best fit m=0 model: # def test_statistic(x,y,sigmay,dummy1,dummy2): # return np.sum((y - np.mean(y))**2.0/sigmay**2.0)/(len(y)-1) # 3) Weighted mean y: # def test_statistic(x,y,sigmay,dummy1,dummy2): # return np.sum(y/sigmay**2.0)/np.sum(1.0/sigmay**2.0) # 4) Variance of y: # def test_statistic(x,y,sigmay,dummy1,dummy2): # return np.var(y) # Approximate the posterior predictive distribution for T, # by drawing a replica dataset for each sample (m,b) and computing its T: T = np.zeros(len(chain)) for k,(b,m) in enumerate(chain): yrep = b + m*x + np.random.randn(len(x)) * sigmay T[k] = test_statistic(x,yrep,sigmay,b_ls,m_ls) # Compare with the test statistic of the data, on a plot: Td = test_statistic(x,y, sigmay, b_ls, m_ls) plt.hist(T, 100, histtype='step', color='blue', lw=2, range=(0.0,np.percentile(T,99.0))) plt.axvline(Td, color='black', linestyle='--', lw=2) plt.xlabel('Test statistic') plt.ylabel('Posterior predictive distribution') # What is Pr(T>T(d)|d)? greater = (T > Td) P = 100*len(T[greater])/(1.0*len(T)) print("Pr(T>T(d)|d) = ",P,"%") """ Explanation: Looks like a measurement - but let's do some checks first. Model Checking How do we know if our model is any good? There are two properties that "good" models have: the first is accuracy, and the second is efficiency. Accurate models generate data that is like the observed data. What does this mean? First we have to define what similarity is, in this context. Visual impression is one very important way. Test statistics that capture relevant features of the data are another. Let's look at the posterior predictive distributions for the datapoints, and for a particularly interesting test statistic, the reduced chi-squared. End of explanation """ # Discrepancy: functions of the data AND parameters. # 1) Reduced chisq for the model: def test_statistic(x,y,sigmay,b,m): return np.sum((y - m*x - b)**2.0/sigmay**2.0)/(len(y)-2) # Approximate the posterior predictive distribution for T, # by drawing a replica dataset for each sample (m,b) and computing its T, # AND ALSO its Td: T = np.zeros(len(chain)) Td = np.zeros(len(chain)) for k,(b,m) in enumerate(chain): yrep = b + m*x + np.random.randn(len(x)) * sigmay T[k] = test_statistic(x,yrep,sigmay,b,m) Td[k] = test_statistic(x,y,sigmay,b,m) # Compare T with Td, on a scatter plot: plt.scatter(Td, T, color='blue',alpha=0.1) plt.plot([0.0, 100.0], [0.0, 100.], color='k', linestyle='--', linewidth=2) plt.xlabel('Observed discrepancy $T(d,\\theta)$') plt.ylabel('Replicated discrepancy $T(d^{\\rm rep},\\theta)$') plt.ylim([0.0,np.percentile(Td,99.0)]) plt.xlim([0.0,np.percentile(Td,99.0)]) # Histogram of differences: diff = T-Td plt.hist(diff, 100, histtype='step', color='blue', lw=2, range=(np.percentile(diff,1.0),np.percentile(diff,99.0))) plt.axvline(0.0, color='black', linestyle='--', lw=2) plt.xlabel('Difference $T(d^{\\rm rep},\\theta) - T(d,\\theta)$') plt.ylabel('Posterior predictive distribution') # What is Pr(T>T(d)|d)? greater = (T > Td) Pline = 100*len(T[greater])/(1.0*len(T)) print("Pr(T>T(d)|d) = ",Pline,"%") """ Explanation: If our model is true (and we're just uncertain about its parameters, given the data), we can compute the probability of getting a $T$ less than that observed, where T is the reduced chisq relative to a straight line with some reference $(m,b)$. Note that we did not have to look up the "chi squared distribution" - we can simply compute the posterior predictive distribution given our generative model. Still, this test statistic looks a little bit strange: it's computed relative to the best fit straight line - which is a sensible reference but somehow not really in the spirit of fitting the data... Instead, lets look at a discrepancy $T(d,\theta)$ that is a function of both the data and the parameters, and compute the posterior probability of getting $T(d^{\rm rep},\theta) > T(d,\theta)$ ${\rm Pr}(T[d^{\rm rep},\theta] > T[d,\theta]|d) = {\rm Pr}(T[d^{\rm rep},\theta] > T[d,\theta]|\theta,d)\;{\rm Pr}(\theta|d)\;d\theta$ End of explanation """ def quadratic_log_likelihood(x, y, sigmay, m, b, q): ''' Returns the log-likelihood of drawing data values y at known values x given Gaussian measurement noise with standard deviation with known sigmay, where the "true" y values are y_t = m*x + b + q**2 x: list of x coordinates y: list of y coordinates sigmay: list of y uncertainties m: scalar slope b: scalar line intercept q: quadratic term coefficient Returns: scalar log likelihood ''' return (np.sum(np.log(1./(np.sqrt(2.*np.pi) * sigmay))) + np.sum(-0.5 * (y - (m*x + b + q*x**2))**2 / sigmay**2)) def quadratic_log_prior(m, b, q, mlimits, blimits, qpars): # m and b: log_mb_prior = straight_line_log_prior(m, b, mlimits, blimits) # q: log_q_prior = np.log(1./(np.sqrt(2.*np.pi) * qpars[1])) - \ 0.5 * (q - qpars[0])**2 / qpars[1]**2 return log_mb_prior + log_q_prior def quadratic_log_posterior(x,y,sigmay,m,b,q): return (quadratic_log_likelihood(x,y,sigmay,m,b,q) + quadratic_log_prior(m,b,q)) # Assign Gaussian prior for q: qpars = [0.0,0.003] # initial m, b, q, at center of ranges m,b,q = 0.5, 50, 0.0 # step sizes mstep, bstep, qstep = 0.05, 5.0, 0.0003 # how many steps? nsteps = 10000 chain = [] probs = [] naccept = 0 print('Running MH for', nsteps, 'steps') # First point: L_old = quadratic_log_likelihood(x, y, sigmay, m, b, q) p_old = quadratic_log_prior(m, b, q, mlimits, blimits, qpars) logprob_old = L_old + p_old for i in range(nsteps): # step mnew = m + np.random.normal() * mstep bnew = b + np.random.normal() * bstep qnew = q + np.random.normal() * qstep # evaluate probabilities L_new = quadratic_log_likelihood(x, y, sigmay, mnew, bnew, qnew) p_new = quadratic_log_prior(mnew, bnew, qnew, mlimits, blimits, qpars) logprob_new = L_new + p_new if (np.exp(logprob_new - logprob_old) > np.random.uniform()): # Accept the new sample: m = mnew b = bnew q = qnew L_old = L_new p_old = p_new logprob_old = logprob_new naccept += 1 else: # Stay where we are; m,b stay the same, and we append them # to the chain below. pass chain.append((b,m,q)) probs.append((L_old,p_old)) print('Acceptance fraction:', naccept/float(nsteps)) # Pull m, b and q arrays out of the Markov chain and plot them: mm = [m for b,m,q in chain] bb = [b for b,m,q in chain] qq = [q for b,m,q in chain] # Traces, for convergence inspection: plt.figure(figsize=(8,5)) plt.subplot(3,1,1) plt.plot(mm, 'k-') plt.ylim(mlimits) plt.ylabel('m') plt.subplot(3,1,2) plt.plot(bb, 'k-') plt.ylim(blimits) plt.ylabel('Intercept b') plt.subplot(3,1,3) plt.plot(qq, 'k-') plt.ylim([qpars[0]-3*qpars[1],qpars[0]+3*qpars[1]]) plt.ylabel('Quadratic coefficient q') corner.corner(chain, labels=['b','m','q'], range=[blimits,mlimits,(qpars[0]-3*qpars[1],qpars[0]+3*qpars[1])],quantiles=[0.16,0.5,0.84], show_titles=True, title_args={"fontsize": 12}, plot_datapoints=True, fill_contours=True, levels=[0.68, 0.95], color='green', bins=80, smooth=1.0); plt.show() """ Explanation: The conclusion drawn from the discrepancy is more conservative. All our $\theta = (m,b)$ samples are plausible, so replica datasets generated by them should also be plausible: the straight line defined by each $(m,b)$ should go through the real data points as readily (on average) as it does its replica dataset. Do our posterior predictive $p-$values suggest we need to improve our model? What about the visual check? Higher order polynomial? Maybe I see some curvature in the data. Let's try adding an extra parameter to the model, to see if our data are better modeled using a quadratic function than a straight line. $y = m x + b + qx*2$ The coefficient $q$ is probably pretty small (we were expecting to only have to use a straight line model for these data!), so we can assign a fairly narrow prior. End of explanation """ # Posterior visual check, in data space X = np.linspace(xlimits[0],xlimits[1],100) for i in (np.random.rand(100)*len(chain)).astype(int): b,m,q = chain[i] plt.plot(X, b + X*m + q*X**2, 'g-', alpha=0.1) plot_line(m_ls, b_ls); plot_yerr(x, y, sigmay) # Discrepancy: functions of the data AND parameters. # 1) Reduced chisq for the model: def test_statistic(x,y,sigmay,b,m,q): return np.sum((y - m*x - b - q*x**2)**2.0/sigmay**2.0)/(len(y)-3) # Approximate the posterior predictive distribution for T, # by drawing a replica dataset for each sample (m,b) and computing its T, # AND ALSO its Td: T = np.zeros(len(chain)) Td = np.zeros(len(chain)) for k,(b,m,q) in enumerate(chain): yp = b + m*x + q*x**2 + sigmay*np.random.randn(len(x)) T[k] = test_statistic(x,yp,sigmay,b,m,q) Td[k] = test_statistic(x,y,sigmay,b,m,q) # Histogram of differences: diff = T - Td plt.hist(diff, 100, histtype='step', color='green', lw=2, range=(np.percentile(diff,1.0),np.percentile(diff,99.0))) plt.axvline(0.0, color='black', linestyle='--', lw=2) plt.xlabel('Difference $T(d^{\\rm rep},\\theta) - T(d,\\theta)$') plt.ylabel('Posterior predictive distribution') # What is Pr(T>T(d)|d)? greater = (T > Td) Pquad = 100*len(T[greater])/(1.0*len(T)) print("Pr(T>T(d)|d,quadratic) = ",Pquad,"%, cf. Pr(T>T(d)|d,straightline) = ",Pline,"%") """ Explanation: Checking the Quadratic Model End of explanation """ from IPython.display import Image Image('evidence.png') """ Explanation: This model checks out too: both the quadratic and linear models provide reasonable fits to the data, with the posterior predictive replica datasets behaving acceptably similarly to the real data. But, the predictions seem comparably precise! Which model should we prefer? Model Comparison with the Bayesian Evidence The evidence for model $H$, ${\rm Pr}(d|H)$, enables a form of Bayesian hypothesis testing via the evidence ratio (or "odds ratio", or "Bayes Factor"): $R = \frac{{\rm Pr}(d|H_1)}{{\rm Pr}(d|H_0)}$ This quantity is similar to a likelihood ratio, but its a fully marginalized likelihood ratio - which is to say that it takes into account our uncertainty about values of the parameters of each model by integrating over them all. As well predictive accuracy, the other virtue a model can have is efficiency. The evidence summarizes all the information we put into our model inferences, via both the data and our prior beliefs. You can see this by inspection of the integrand of the fully marginalized likelihood (#FML) integral: ${\rm Pr}(d|H) = \int\;{\rm Pr}(d|\theta,H)\;{\rm Pr}(\theta|H)\;d\theta$ The following figure might help illustrate how the evidence depends on both goodness of fit (through the likelihood) and the complexity of the model (via the prior). In this 1D case, a Gaussian likelihood (red) is integrated over a uniform prior (blue): the evidence can be shown to be given by $E = f \times L_{\rm max}$, where $L_{\rm max}$ is the maximum possible likelihood, and $f$ is the fraction of the blue dashed area that is shaded red. $f$ is 0.31, 0.98, and 0.07 in each case. End of explanation """ # Draw a large number of prior samples and hope for the best: N=50000 mm = np.random.uniform(mlimits[0],mlimits[1], size=N) bb = np.random.uniform(blimits[0],blimits[1], size=N) qq = qpars[0] + qpars[1]*np.random.randn(N) log_likelihood_straight_line = np.zeros(N) log_likelihood_quadratic = np.zeros(N) for i in range(N): log_likelihood_straight_line[i] = straight_line_log_likelihood(x, y, sigmay, mm[i], bb[i]) log_likelihood_quadratic[i] = quadratic_log_likelihood(x, y, sigmay, mm[i], bb[i], qq[i]) def logaverage(x): mx = x.max() return np.log(np.sum(np.exp(x - mx))) + mx - np.log(len(x)) log_evidence_straight_line = logaverage(log_likelihood_straight_line) log_evidence_quadratic = logaverage(log_likelihood_quadratic) print('log Evidence for Straight Line Model:', log_evidence_straight_line) print('log Evidence for Quadratic Model:', log_evidence_quadratic) print ('Odds ratio in favour of the Quadratic Model:', np.exp(log_evidence_quadratic - log_evidence_straight_line)) """ Explanation: The illustration above shows us a few things: 1) The evidence can be made arbitrarily small by increasing the prior volume: the evidence is more conservative than focusing on the goodness of fit ($L_{\rm max}$) alone - and if you assign a prior you don't believe, you should not expect to get out a meaningful evidence value. 2) The evidence is linearly sensitive to prior volume ($f$), but exponentially sensitive to goodness of fit ($L_{\rm max}$). It's still a likelihood, after all. The odds ratio can, in principle, be combined with the ratio of priors for each model to give us the relative probability for each model being true, given the data: $\frac{{\rm Pr}(H_1|d)}{{\rm Pr}(H_0|d)} = \frac{{\rm Pr}(d|H_1)}{{\rm Pr}(d|H_0)} \; \frac{{\rm Pr}(H_1)}{{\rm Pr}(H_0)}$ Prior probabilities are very difficult to assign in most practical problems (notice that no theorist ever provides them). So, one way to interpret the evidence ratio is to note that: If you think that having seen the data, the two models are still equally probable, then the evidence ratio in favor of $H_1$ is you the odds that you would have had to have been willing to take against $H_1$, before seeing the data. That is: the evidence ratio updates the prior ratio into a posterior one - as usual. The FML is in general quite difficult to calculate, since it involves averaging the likelihood over the prior. MCMC gives us samples from the posterior - and these cannot, it turns out, be reprocessed so as to estimate the evidence stably. If we draw samples from the prior, however, we can then estimate the evidence via the usual sum over samples, ${\rm Pr}(d|H) \approx \sum_k\;{\rm Pr}(d|\theta,H)$ Sampling the prior and computing the likelihood at each sample position is called "Simple Monte Carlo", and while it works in certain low-dimensional situations, in general it is very inefficient (at best). Still, let's give it a try on our two models, and attempt to compute the Bayes Factor $R = \frac{{\rm Pr}(d\,|\,{\rm quadratic})}{{\rm Pr}(d\,|\,{\rm straight line})}$ End of explanation """ def straight_line_with_scatter_log_likelihood(x, y, sigmay, m, b, log_s): ''' Returns the log-likelihood of drawing data values *y* at known values *x* given Gaussian measurement noise with standard deviation with known *sigmay*, where the "true" y values have been drawn from N(mean=m * x + b, variance=(s^2)). x: list of x coordinates y: list of y coordinates sigmay: list of y uncertainties m: scalar slope b: scalar line intercept s: intrinsic scatter, Gaussian std.dev Returns: scalar log likelihood ''' s = np.exp(log_s) V = sigmay**2 + s**2 return (np.sum(np.log(1./(np.sqrt(2.*np.pi*V)))) + np.sum(-0.5 * (y - (m*x + b))**2 / V)) def straight_line_with_scatter_log_prior(m, b, log_s): if log_s < np.log(slo) or log_s > np.log(shi): return -np.inf return 0. def straight_line_with_scatter_log_posterior(x,y,sigmay, m,b,log_s): return (straight_line_with_scatter_log_likelihood(x,y,sigmay,m,b,log_s) + straight_line_with_scatter_log_prior(m,b,log_s)) def straight_line_with_scatter_posterior(x,y,sigmay,m,b,log_s): return np.exp(straight_line_with_scatter_log_posterior(x,y,sigmay,m,b,log_s)) # initial m, b, s m,b,log_s = 2, 20, 0. # step sizes mstep, bstep, log_sstep = 1., 10., 1. # how many steps? nsteps = 30000 schain = [] sprobs = [] naccept = 0 print 'Running MH for', nsteps, 'steps' L_old = straight_line_with_scatter_log_likelihood(x, y, sigmay, m, b, log_s) p_old = straight_line_with_scatter_log_prior(m, b, log_s) prob_old = np.exp(L_old + p_old) for i in range(nsteps): # step mnew = m + np.random.normal() * mstep bnew = b + np.random.normal() * bstep log_snew = log_s + np.random.normal() * log_sstep # evaluate probabilities # prob_new = straight_line_with_scatter_posterior(x, y, sigmay, mnew, bnew, log_snew) L_new = straight_line_with_scatter_log_likelihood(x, y, sigmay, mnew, bnew, log_snew) p_new = straight_line_with_scatter_log_prior(mnew, bnew, log_snew) prob_new = np.exp(L_new + p_new) if (prob_new / prob_old > np.random.uniform()): # accept m = mnew b = bnew log_s = log_snew L_old = L_new p_old = p_new prob_old = prob_new naccept += 1 else: # Stay where we are; m,b stay the same, and we append them # to the chain below. pass schain.append((b,m,np.exp(log_s))) sprobs.append((L_old,p_old)) print 'Acceptance fraction:', naccept/float(nsteps) # Histograms: import triangle slo,shi = [0,10] triangle.corner(schain, labels=['b','m','s'], range=[(blo,bhi),(mlo,mhi),(slo,shi)],quantiles=[0.16,0.5,0.84], show_titles=True, title_args={"fontsize": 12}, plot_datapoints=True, fill_contours=True, levels=[0.68, 0.95], color='b', bins=20, smooth=1.0); plt.show() # Traces: plt.clf() plt.subplot(3,1,1) plt.plot([b for b,m,s in schain], 'k-') plt.ylabel('b') plt.subplot(3,1,2) plt.plot([m for b,m,s in schain], 'k-') plt.ylabel('m') plt.subplot(3,1,3) plt.plot([s for b,m,s in schain], 'k-') plt.ylabel('s') plt.show() """ Explanation: Extras Intrinsic scatter Now let's add an extra parameter to the model: intrinsic scatter. What does this mean? We imagine the model $y$ values to be drawn from a PDF that is conditional on $m$, $b$ and also $s$, the intrinsic scatter of the population. This is the example we worked through in the breakout on Tuesday (although there we didn't do any analytic marginalisation). This scatter parameter can be inferred from the data as well: in this simple case we can introduce it, along with a "true" $y$ value for each data point, and analytically marginalize over the "true" y's. This yields a new (marginalized) likelihood function, which looks as though it has an additional source of uncertainty in the y values - which is what scatter is. End of explanation """ # Draw a buttload of prior samples and hope for the best N=50000 mm = np.random.uniform(mlo,mhi, size=N) bb = np.random.uniform(blo,bhi, size=N) slo,shi = [0.001,10] log_slo = np.log(slo) log_shi = np.log(shi) log_ss = np.random.uniform(log_slo, log_shi, size=N) log_likelihood_vanilla = np.zeros(N) log_likelihood_scatter = np.zeros(N) for i in range(N): log_likelihood_vanilla[i] = straight_line_log_likelihood(x, y, sigmay, mm[i], bb[i]) log_likelihood_scatter[i] = straight_line_with_scatter_log_likelihood(x, y, sigmay, mm[i], bb[i], log_ss[i]) def logsum(x): mx = x.max() return np.log(np.sum(np.exp(x - mx))) + mx log_evidence_vanilla = logsum(log_likelihood_vanilla) - np.log(N) log_evidence_scatter = logsum(log_likelihood_scatter) - np.log(N) print 'Log evidence vanilla:', log_evidence_vanilla print 'Log evidence scatter:', log_evidence_scatter print 'Odds ratio in favour of the vanilla model:', np.exp(log_evidence_vanilla - log_evidence_scatter) """ Explanation: Evidence for intrinsic scatter? End of explanation """ mean_log_L_vanilla = np.average(np.atleast_1d(probs).T[0]) mean_log_L_scatter = np.average(np.atleast_1d(sprobs).T[0]) print "No scatter: Evidence, mean log L, difference: ",log_evidence_vanilla,mean_log_L_vanilla,(mean_log_L_vanilla - log_evidence_vanilla) print " Scatter: Evidence, mean log L, difference: ",log_evidence_scatter,mean_log_L_scatter,(mean_log_L_scatter - log_evidence_scatter) """ Explanation: In this case there is very little to choose between the two models. Both provide comparably good fits to the data, so the only thing working against the scatter model is its extra parameter. However, the prior for s is very well -matched to the data (uniform in log s corresponds to a 1/s distribution, favoring small values, and so there is not a very big "Occam's Razor" factor in the evidence. Both models are appropriate for this dataset. Incidentally, let's look at a possible approximation for the evidence - the posterior mean log likelihood from our MCMC chains: End of explanation """ def likelihood_outliers((m, b, pbad), (x, y, sigmay, sigmabad)): return np.prod(pbad * 1./(np.sqrt(2.*np.pi)*sigmabad) * np.exp(-y**2 / (2.*sigmabad**2)) + (1.-pbad) * (1./(np.sqrt(2.*np.pi)*sigmay) * np.exp(-(y-(m*x+b))**2/(2.*sigmay**2)))) def prior_outliers((m, b, pbad)): if pbad < 0: return 0 if pbad > 1: return 0 return 1. def prob_outliers((m,b,pbad), x,y,sigmay,sigmabad): return (likelihood_outliers((m,b,pbad), (x,y,sigmay,sigmabad)) * prior_outliers((m,b,pbad))) x,y,sigmay = data1.T sigmabad = np.std(y) prob_args = (x,y,sigmay,sigmabad) mstep = 0.1 bstep = 1. pbadstep = 0.01 proposal_args = ((mstep, bstep,pbadstep),) m,b,pbad = 2.2, 30, 0.1 mh(prob_outliers, prob_args, gaussian_proposal, proposal_args, (m,b,pbad), 100000); def likelihood_t((m, b, nu), (x, y, sigmay)): return np.prod(pbad * 1./(np.sqrt(2.*np.pi)*sigmabad) * np.exp(-y**2 / (2.*sigmabad**2)) + (1.-pbad) * (1./(np.sqrt(2.*np.pi)*sigmay) * np.exp(-(y-(m*x+b))**2/(2.*sigmay**2)))) def complexity_brewer_likelihood((m, b, q), (x, y, sigmay)): # q: quadratic term if q < 0: q = 0 else: k = 0.01 q = -k * np.log(1 - q) return np.prod(np.exp(-(y-(b+m*x+q*(x - 150)**2))**2/(2.*sigmay**2))) def complexity_brewer_prior((m,b,q)): if q < -1: return 0. return 1. def complexity_brewer_prob(params, *args): return complexity_brewer_prior(params) * complexity_brewer_likelihood(params, args) x,y,sigmay = get_data_no_outliers() print 'x', x.min(), x.max() print 'y', y.min(), y.max() y = y + (0.001 * (x-150)**2) prob_args = (x,y,sigmay) mstep = 0.1 bstep = 1. qstep = 0.1 proposal_args = ((mstep, bstep, qstep, cstep),) m,b,q = 2.2, 30, 0. plt.errorbar(x, y, fmt='.', yerr=sigmay) plt.show() mh(complexity_brewer_prob, prob_args, gaussian_proposal, proposal_args, (m,b,q), 10000, pnames=['m','b','q']); """ Explanation: The difference between the posterior mean log likelihood and the Evidence is the Shannon information gained when we updated the prior into the posterior. In both cases we gained about 2 bits of information - perhaps corresponding to approximately 2 good measurements (regardless of the number of parameters being inferred)? Appendix: Some Other Model Extensions End of explanation """
flaxandteal/python-course-lecturer-notebooks
Python Course - 002a - And so we begin.ipynb
mit
import datetime print(datetime.date.today()) """ Explanation: ... and so we begin Critical information First steps Order of the day Learn to use Jupyter / iPython Notebook Get familiar with basic Python Start with Spyder, a traditional editor Fundamental Python-in-Science skills What is Jupyter (previously iPython Notebook) An interactive Q&A-style Python prompt, with output in formatted text, images, graphs and more (and it even works with other languages too) A bit like a cross between Mathematica and Wolfram Alpha, that runs in your browser, but you can save all your worksheets locally. We will explore this, as it is very useful for doing quick calculations, collaborating on research, as a whiteboard, nonlinear discussions where you can adjust graphs or calculations, as teaching tool (I hope), or simply storing your train of thought in computations and notes. This series of slides was prepared in Jupyter, which is why I can do this... It lets you do things like... End of explanation """ from sympy import * init_printing() x = Symbol("x") integrate(x ** 3, x) """ Explanation: You want to add in .weekday() Lets you output LaTeX-style (formatted) maths Example calculating the output of $ \int x^3 dx $: End of explanation """
trudake/Notebooks
PixieApp+for+Outlier+Detection.ipynb
mit
!pip install --user --upgrade pixiedust !pip install --user --upgrade scikit-learn import pixiedust import numpy as np import matplotlib.pyplot as plt import sklearn.ensemble import pandas as pd from sklearn import svm from pyspark.mllib.stat import Statistics from pyspark.mllib.clustering import * import pyspark.sql.functions as fn from re import sub from decimal import Decimal """ Explanation: PixieApp for Outlier Detection Demonstrated with a Boston Public Schools data set PixieApps from the PixieDust package are a great tool to facilitate data exploration. The PixieApp for outlier detection can currently run two unsupervised machine learning models and visualize their output. It's a work in progress, and the goal is also to make it more flexible to fit the needs of different data types. The next step after this notebook will be to allow users to interact more with the output and to tune the outlier algorithms inside the PixieApp. This notebook uses a data set composed of facility and administrative information for Boston Public Schools (BPS) from <a href="https://data.boston.gov" target="_blank" rel="noopener no referrer">Analyze Boston</a> to answer these two questions: What is the relationship between square footage of a school, enrollment, location, and energy costs? <br/> Can we predict how much a school will pay in energy costs with facility data? How often do Principals live in the zip code of their school? Can we predict how far from a school the principal will live using demographic information for zip codes and principal total earnings? This notebook runs on Python with Spark 2.1. Table of contents Install PixieDust Create the Boston Public Schools data set Create the PixieApp Run the PixieApp Explore data in two-dimensions Next steps 1. Install PixieDust<a class="anchor" id="install"></a> First, make sure PixieDust and scikit-learn are up-to-date. End of explanation """ #Import the data and transform to Pandas. Pixiedust works with Spark dataframes, but unfortunately, scikitlearn will not. schools = pixiedust.sampleData("https://data.boston.gov/dataset/b2c5a9d3-609d-49ec-906c-b0e850a8d62a/resource/33c5f44a-3c67-4390-a1d5-1bf018e4728c/download/buildbps.csv") schools = schools.toPandas() #Pull a subset of variables of interest schools = schools[['SMMA_Identifier', 'BPS_School_Name', 'BPS_Address', 'SMMA_latitude', 'SMMA_longitude', 'BPS_Grades_Offered', 'SMMA_Site_SF', 'DOE_Total', ' BPS_Total_Energy_Cost ', 'BPS_Principal']] #Separate the principals' first and last names to aid linking def findFirstName(name): if isinstance(name, basestring): if 'Dr' in name: name = name.split('. ')[-1] firstname = name.split(' ')[0] else: firstname = name return firstname schools['Principal_Firstname'] = schools['BPS_Principal'].apply(lambda x : findFirstName(x)) def findLastName(name): #takes the last word before an optional comma, which should be the last name in this dataset if isinstance(name, basestring): name = name.split(',')[0] if isinstance(name, basestring): lastname = name.split(' ')[-1] if lastname == 'Jr.': lastname = name.split(' ')[-2] else: lastname = name else: lastname = name return lastname schools['Principal_Lastname'] = schools['BPS_Principal'].apply(lambda x : findLastName(x)) #Print the first 5 rows to get a look at the data schools.head() """ Explanation: 2. Create the Boston Public Schools data set<a class="anchor" id="createdataset"></a> 2.1 Import the BuildBPS data set<br/> 2.2 Link BuildBPS data set to the Employee Earnings data set<br/> 2.3 Merge the employee information with the school data set<br/> 2.4 Format variables 2.1 Import the BuildBPS data set<a class="anchor" id="buildbps"></a> The backbone of the Boston Public Schools (BPS) data set, BuildBPS, includes: Latitude and longitude for schools in BPS Energy costs Facility information Enrollment information Administration information Principal name is separated into first and last name to link to the Employee Earnings data set. End of explanation """ #Import the data and transform to Pandas employees = pixiedust.sampleData("https://data.boston.gov/dataset/418983dc-7cae-42bb-88e4-d56f5adcf869/resource/2ff6343f-850d-46e7-98d1-aca79b619fd6/download/employee-earnings-report-2015.csv") employees = employees.toPandas() #Keep variables of interest employees = employees[['NAME', 'DEPARTMENT_NAME', 'TITLE', 'TOTAL EARNINGS', 'POSTAL']] #Keep employees with title containing "headmaster" or "principal" employees = employees[employees['TITLE'].str.contains('headmaster|principal', case = False)] #Separate first and last names for merging with the key dataset employees['Principal_Lastname'] = employees['NAME'].str.split(',', 1, expand = True)[0] employees['Principal_Firstname'] = employees['NAME'].str.split(',', 1, expand = True)[1] #Print the first 5 rows to get a look at the data employees.head() """ Explanation: 2.2 Link BuildBPS to the Employee Earnings data set<a class="anchor" id="linkdataset"></a> To make it more interesting, link the BuildBPS data set to get principal earnings and zip code from the Employee Earnings data set. Because principal name is the only available linking variable, this gets a bit messy. <br/> This data set includes: - Employee names - Departments - Titles - Earnings - Zip code End of explanation """ #Merge on last name schools_df = pd.merge(schools, employees, on = 'Principal_Lastname') #Keep those matches that match in the first 3 letters of first name schools_df['Firstname_match'] = schools_df['Principal_Firstname_x'].apply(lambda x : x[0:3].lower()) schools_df['Principal_Firstname_y'] = schools_df['Principal_Firstname_y'].apply(lambda x: x.lower()) schools_df['correct_name'] = schools_df.apply(lambda x: x['Firstname_match'] in x['Principal_Firstname_y'], axis = 1) schools_df = schools_df[schools_df['correct_name']] schools_df.describe(include = 'all') """ Explanation: 2.3 Merge the employee information with the school data set<a class="anchor" id="merge"></a> Next, merge the employee information into the school data set. First names differ slightly and nicknames are sometimes used (for example, "Rob" vs. "Robert"), so we match on last name and first three letters of first name. To make this more accurate in the future, consider using a fuzzy matching algorithm. 40 schools (about 30% of the data set) are lost in the matching, either due to missing principal name in the BuildBPS data set (11) or inadequate matching (29). End of explanation """ #Remove schools with missing energy cost (4 schools) schools_df['no_energy_cost'] = schools_df.apply(lambda x: '-' in x[' BPS_Total_Energy_Cost '], axis = 1) schools_df = schools_df[~schools_df['no_energy_cost']] #Restore numeric fields stored as strings to their former glory schools_df[' BPS_Total_Energy_Cost '] = schools_df[' BPS_Total_Energy_Cost '].apply(lambda x: float(sub(r'[^\d.]', '', x))) schools_df['TOTAL EARNINGS'] = schools_df['TOTAL EARNINGS'].apply(lambda x: float(sub(r'[^\d.]', '', x))) schools_df['DOE_Total'] = schools_df['DOE_Total'].apply(lambda x: int(x)) #Drop intermediate linking and boolean fields schools_df = schools_df[['SMMA_Identifier', 'BPS_School_Name', 'BPS_Address', 'SMMA_latitude', 'SMMA_longitude', 'BPS_Grades_Offered', 'SMMA_Site_SF', 'DOE_Total', ' BPS_Total_Energy_Cost ', 'BPS_Principal', 'TOTAL EARNINGS', 'POSTAL', 'TITLE']] #Summary statistics: schools_df.describe(include = 'all') """ Explanation: 2. 4 Format variables<a class="anchor" id="formatvar"></a> Some numeric variables, like earnings and energy cost, are saved as string and need to be converted to numeric. End of explanation """ from pixiedust.display.app import * @PixieApp class TestPixieApp: @route() def main(self): return""" <input pd_options="iForest_clicked=true" type="button" value = "Outliers by Isolation Forest"> <input pd_options="SVM_clicked=true" type="button" value = "Outliers by One-Class Support Vector Machine"> """ @route(iForest_clicked="true") def iForest_clicked(self): return """ <div id="target{{prefix}}"> <input pd_options="handlerId=dataframe" pd_entity = "trainIForest(a.pixieapp_entity, 256)" type="button" value="Table"> <input pd_options="handlerId=mapView;aggregation=SUM;rowCount=500;mapboxtoken=pk.eyJ1IjoidHJ1ZGFrZSIsImEiOiJjajN5c29wbWUwMDQwMnZvZWNmZWxiNmNuIn0.gFeF4D4TkU2Tro-EB7HF0w;valueFields=is_outlier_iforest;rendererId=mapbox" pd_entity = "trainIForest(a.pixieapp_entity, 256)" type="button" value="Map"> </div> """ def numeric_columns(self, df): return list(df.select_dtypes(include=[np.number]).columns.values) def trainIForest(self, df, maxsample_input): # fit an isolation forest (more adapted to large-dimensional settings) if type(df) is pyspark.sql.dataframe.DataFrame: df = df.toPandas() # fits on all numeric features numeric_columns = list(df.select_dtypes(include=[np.number]).columns.values) df_num = df[numeric_columns] # fill missing values with column mean df_num = df_num.fillna(df.mean()) clf = sklearn.ensemble.IsolationForest(max_samples=maxsample_input) clf.fit(df_num) y_pred_train = clf.predict(df_num) df['is_outlier_iforest'] = y_pred_train df['is_outlier_iforest'] = df['is_outlier_iforest'].map({1: 0, -1: 1}) return df.sort_values('is_outlier_iforest', ascending = False) @route(SVM_clicked="true") def SVM_clicked(self): return """ <div id="target{{prefix}}"> <div id="target{{prefix}}"> <input pd_options="handlerId=dataframe" pd_entity = "trainSVM(a.pixieapp_entity)" type="button" value="Table"> <input pd_options="handlerId=mapView;aggregation=SUM;rowCount=500;mapboxtoken=pk.eyJ1IjoidHJ1ZGFrZSIsImEiOiJjajN5c29wbWUwMDQwMnZvZWNmZWxiNmNuIn0.gFeF4D4TkU2Tro-EB7HF0w;valueFields=is_outlier_svm;rendererId=mapbox" pd_entity = "trainSVM(a.pixieapp_entity)" type="button" value="Map"> </div> </div> """ def trainSVM(self, df): # fit a one-class support vector machine if type(df) is pyspark.sql.dataframe.DataFrame: df = df.toPandas() # fits on all numeric columns numeric_columns = list(df.select_dtypes(include=[np.number]).columns.values) df_num = df[numeric_columns] # fill missing values with the mean df_num = df_num.fillna(df.mean()) clf = svm.OneClassSVM(nu=0.2, kernel="rbf", gamma = .01) clf.fit(df_num) y_pred_train = clf.predict(df_num) df['is_outlier_svm'] = y_pred_train df['is_outlier_svm'] = df['is_outlier_svm'].map({1: 0, -1: 1}) return df.sort_values('is_outlier_svm', ascending = False) a = TestPixieApp() """ Explanation: 3. Create the PixieApp<a class="anchor" id="createpixieapp"></a> There are currently two options for algorithms: Isolation Forest <a href="http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html" target="_blank" rel="noopener no referrer">Sklearn documentation</a> <a href="https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/icdm08b.pdf" target="_blank" rel="noopener no referrer">Paper</a> One Class SVM with RBF Kernel <a href="http://scikit-learn.org/stable/modules/generated/sklearn.svm.OneClassSVM.html" target="_blank" rel="noopener no referrer">Sklearn documentation</a> <a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041295/" target="_blank" rel="noopener no referrer">An academic application example</a> In each case, the PixieApp generates a binary column (is_outlier_iforest or is_outlier_svm) indicating how the algorithm classified the BPS school. Click map to plot the schools on a map. Outlier schools are dark blue. End of explanation """ a.run(schools_df, runInDialog='false') """ Explanation: 4. Run the PixieApp<a class="anchor" id="runpixieapp"></a> End of explanation """ display(schools_df) """ Explanation: The first screen of the PixieApp looks like this: <img width="545" alt="pixieapp1" src="https://user-images.githubusercontent.com/28162685/28128056-6d84f3d2-66fc-11e7-9f6b-2fecec3a87e9.png"> Click Isolation Forest to display the following screen: <img width="197" alt="pixieapp2" src="https://user-images.githubusercontent.com/28162685/28128070-7a5f24e2-66fc-11e7-8096-fe60b7a119df.png"> The map shows a map of all the BPS schools, with outliers in dark blue: <img width="993" alt="iforest_map" src="https://user-images.githubusercontent.com/28162685/28128035-55574ba2-66fc-11e7-881c-35303b3156e6.png"> Following the same path for One Class SVM, it yields almost twice as many outliers in this data set. The map looks like this: <img width="996" alt="svm_map" src="https://user-images.githubusercontent.com/28162685/28128046-60b2e164-66fc-11e7-962f-89e5261ed807.png"> 5. Explore the data in two-dimensions<a class="anchor" id="explore"></a> Now that the PixieApp has identified outliers, you can look at the data in one or two dimensions using PixieDust. Use PixieDust to display schools_df and navigate to scatterplot. This example uses "bokeh" as the renderer, and the key and value fields are switched out while using is_outlier_iforest as color. PixieDust makes it very easy to explore scatterplots with different variables quickly. Isolation Forest First, look at Total Energy Cost against Square Feet. It's clear that IForest labeled two schools with very large square footage as outliers. End of explanation """ display(schools_df) """ Explanation: Continuing with square feet as one-dimension (to give a sense of where our last data points fell), look at what differentiated the outliers with normal square footage. A few schools with very high enrollment (1500+) were labeled as outliers. End of explanation """ display(schools_df) """ Explanation: Finally (and most helpfully), look at Principal Earnings against Square feet. Several principals have very low earnings. This may be because they joined the district at the beginning of the school year and only earned about half of a full year's salary. These data points would need to be removed or adjusted going forward. End of explanation """ display(schools_df) """ Explanation: One Class SVM One Class SVM looks to be a poorer fit (or less well-tuned) for this data set. The graphs below show the outliers show no clear pattern. End of explanation """
zomansud/coursera
ml-classification/week-3/module-5-decision-tree-assignment-2-blank.ipynb
mit
import graphlab """ Explanation: Implementing binary decision trees The goal of this notebook is to implement your own binary decision tree classifier. You will: Use SFrames to do some feature engineering. Transform categorical variables into binary variables. Write a function to compute the number of misclassified examples in an intermediate node. Write a function to find the best feature to split on. Build a binary decision tree from scratch. Make predictions using the decision tree. Evaluate the accuracy of the decision tree. Visualize the decision at the root node. Important Note: In this assignment, we will focus on building decision trees where the data contain only binary (0 or 1) features. This allows us to avoid dealing with: * Multiple intermediate nodes in a split * The thresholding issues of real-valued features. This assignment may be challenging, so brace yourself :) Fire up Graphlab Create Make sure you have the latest version of GraphLab Create. End of explanation """ loans = graphlab.SFrame('lending-club-data.gl/') loans.head() """ Explanation: Load the lending club dataset We will be using the same LendingClub dataset as in the previous assignment. End of explanation """ loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1) loans = loans.remove_column('bad_loans') """ Explanation: Like the previous assignment, we reassign the labels to have +1 for a safe loan, and -1 for a risky (bad) loan. End of explanation """ features = ['grade', # grade of the loan 'term', # the term of the loan 'home_ownership', # home_ownership status: own, mortgage or rent 'emp_length', # number of years of employment ] target = 'safe_loans' loans = loans[features + [target]] """ Explanation: Unlike the previous assignment where we used several features, in this assignment, we will just be using 4 categorical features: grade of the loan the length of the loan term the home ownership status: own, mortgage, rent number of years of employment. Since we are building a binary decision tree, we will have to convert these categorical features to a binary representation in a subsequent section using 1-hot encoding. End of explanation """ loans """ Explanation: Let's explore what the dataset looks like. End of explanation """ safe_loans_raw = loans[loans[target] == 1] risky_loans_raw = loans[loans[target] == -1] # Since there are less risky loans than safe loans, find the ratio of the sizes # and use that percentage to undersample the safe loans. percentage = len(risky_loans_raw)/float(len(safe_loans_raw)) safe_loans = safe_loans_raw.sample(percentage, seed = 1) risky_loans = risky_loans_raw loans_data = risky_loans.append(safe_loans) print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data)) print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data)) print "Total number of loans in our new dataset :", len(loans_data) """ Explanation: Subsample dataset to make sure classes are balanced Just as we did in the previous assignment, we will undersample the larger class (safe loans) in order to balance out our dataset. This means we are throwing away many data points. We use seed=1 so everyone gets the same results. End of explanation """ loans_data = risky_loans.append(safe_loans) for feature in features: loans_data_one_hot_encoded = loans_data[feature].apply(lambda x: {x: 1}) loans_data_unpacked = loans_data_one_hot_encoded.unpack(column_name_prefix=feature) # Change None's to 0's for column in loans_data_unpacked.column_names(): loans_data_unpacked[column] = loans_data_unpacked[column].fillna(0) loans_data.remove_column(feature) loans_data.add_columns(loans_data_unpacked) """ Explanation: Note: There are many approaches for dealing with imbalanced data, including some where we modify the learning algorithm. These approaches are beyond the scope of this course, but some of them are reviewed in "Learning from Imbalanced Data" by Haibo He and Edwardo A. Garcia, IEEE Transactions on Knowledge and Data Engineering 21(9) (June 26, 2009), p. 1263–1284. For this assignment, we use the simplest possible approach, where we subsample the overly represented class to get a more balanced dataset. In general, and especially when the data is highly imbalanced, we recommend using more advanced methods. Transform categorical data into binary features In this assignment, we will implement binary decision trees (decision trees for binary features, a specific case of categorical variables taking on two values, e.g., true/false). Since all of our features are currently categorical features, we want to turn them into binary features. For instance, the home_ownership feature represents the home ownership status of the loanee, which is either own, mortgage or rent. For example, if a data point has the feature {'home_ownership': 'RENT'} we want to turn this into three features: { 'home_ownership = OWN' : 0, 'home_ownership = MORTGAGE' : 0, 'home_ownership = RENT' : 1 } Since this code requires a few Python and GraphLab tricks, feel free to use this block of code as is. Refer to the API documentation for a deeper understanding. End of explanation """ features = loans_data.column_names() features.remove('safe_loans') # Remove the response variable features print "Number of features (after binarizing categorical variables) = %s" % len(features) """ Explanation: Let's see what the feature columns look like now: End of explanation """ loans_data['grade.A'] """ Explanation: Let's explore what one of these columns looks like: End of explanation """ print "Total number of grade.A loans : %s" % loans_data['grade.A'].sum() print "Expexted answer : 6422" """ Explanation: This column is set to 1 if the loan grade is A and 0 otherwise. Checkpoint: Make sure the following answers match up. End of explanation """ train_data, test_data = loans_data.random_split(.8, seed=1) """ Explanation: Train-test split We split the data into a train test split with 80% of the data in the training set and 20% of the data in the test set. We use seed=1 so that everyone gets the same result. End of explanation """ def intermediate_node_num_mistakes(labels_in_node): # Corner case: If labels_in_node is empty, return 0 if len(labels_in_node) == 0: return 0 # Count the number of 1's (safe loans) safe_loans = (labels_in_node == 1).sum() # Count the number of -1's (risky loans) risky_loans = (labels_in_node == -1).sum() # Return the number of mistakes that the majority classifier makes. return risky_loans if safe_loans >= risky_loans else safe_loans """ Explanation: Decision tree implementation In this section, we will implement binary decision trees from scratch. There are several steps involved in building a decision tree. For that reason, we have split the entire assignment into several sections. Function to count number of mistakes while predicting majority class Recall from the lecture that prediction at an intermediate node works by predicting the majority class for all data points that belong to this node. Now, we will write a function that calculates the number of missclassified examples when predicting the majority class. This will be used to help determine which feature is the best to split on at a given node of the tree. Note: Keep in mind that in order to compute the number of mistakes for a majority classifier, we only need the label (y values) of the data points in the node. Steps to follow : * Step 1: Calculate the number of safe loans and risky loans. * Step 2: Since we are assuming majority class prediction, all the data points that are not in the majority class are considered mistakes. * Step 3: Return the number of mistakes. Now, let us write the function intermediate_node_num_mistakes which computes the number of misclassified examples of an intermediate node given the set of labels (y values) of the data points contained in the node. Fill in the places where you find ## YOUR CODE HERE. There are three places in this function for you to fill in. End of explanation """ # Test case 1 example_labels = graphlab.SArray([-1, -1, 1, 1, 1]) if intermediate_node_num_mistakes(example_labels) == 2: print 'Test passed!' else: print 'Test 1 failed... try again!' # Test case 2 example_labels = graphlab.SArray([-1, -1, 1, 1, 1, 1, 1]) if intermediate_node_num_mistakes(example_labels) == 2: print 'Test passed!' else: print 'Test 2 failed... try again!' # Test case 3 example_labels = graphlab.SArray([-1, -1, -1, -1, -1, 1, 1]) if intermediate_node_num_mistakes(example_labels) == 2: print 'Test passed!' else: print 'Test 3 failed... try again!' """ Explanation: Because there are several steps in this assignment, we have introduced some stopping points where you can check your code and make sure it is correct before proceeding. To test your intermediate_node_num_mistakes function, run the following code until you get a Test passed!, then you should proceed. Otherwise, you should spend some time figuring out where things went wrong. End of explanation """ def best_splitting_feature(data, features, target): best_feature = None # Keep track of the best feature best_error = 10 # Keep track of the best error so far # Note: Since error is always <= 1, we should intialize it with something larger than 1. # Convert to float to make sure error gets computed correctly. num_data_points = float(len(data)) # Loop through each feature to consider splitting on that feature for feature in features: # The left split will have all data points where the feature value is 0 left_split = data[data[feature] == 0] # The right split will have all data points where the feature value is 1 right_split = data[data[feature] == 1] # Calculate the number of misclassified examples in the left split. # Remember that we implemented a function for this! (It was called intermediate_node_num_mistakes) left_mistakes = intermediate_node_num_mistakes(left_split[target]) # Calculate the number of misclassified examples in the right split. right_mistakes = intermediate_node_num_mistakes(right_split[target]) # Compute the classification error of this split. # Error = (# of mistakes (left) + # of mistakes (right)) / (# of data points) error = float(left_mistakes + right_mistakes) / num_data_points # If this is the best error we have found so far, store the feature as best_feature and the error as best_error if error < best_error: best_error = error best_feature = feature return best_feature # Return the best feature we found """ Explanation: Function to pick best feature to split on The function best_splitting_feature takes 3 arguments: 1. The data (SFrame of data which includes all of the feature columns and label column) 2. The features to consider for splits (a list of strings of column names to consider for splits) 3. The name of the target/label column (string) The function will loop through the list of possible features, and consider splitting on each of them. It will calculate the classification error of each split and return the feature that had the smallest classification error when split on. Recall that the classification error is defined as follows: $$ \mbox{classification error} = \frac{\mbox{# mistakes}}{\mbox{# total examples}} $$ Follow these steps: * Step 1: Loop over each feature in the feature list * Step 2: Within the loop, split the data into two groups: one group where all of the data has feature value 0 or False (we will call this the left split), and one group where all of the data has feature value 1 or True (we will call this the right split). Make sure the left split corresponds with 0 and the right split corresponds with 1 to ensure your implementation fits with our implementation of the tree building process. * Step 3: Calculate the number of misclassified examples in both groups of data and use the above formula to compute the classification error. * Step 4: If the computed error is smaller than the best error found so far, store this feature and its error. This may seem like a lot, but we have provided pseudocode in the comments in order to help you implement the function correctly. Note: Remember that since we are only dealing with binary features, we do not have to consider thresholds for real-valued features. This makes the implementation of this function much easier. Fill in the places where you find ## YOUR CODE HERE. There are five places in this function for you to fill in. End of explanation """ if best_splitting_feature(train_data, features, 'safe_loans') == 'term. 36 months': print 'Test passed!' else: print 'Test failed... try again!' """ Explanation: To test your best_splitting_feature function, run the following code: End of explanation """ def create_leaf(target_values): # Create a leaf node leaf = {'splitting_feature' : None, 'left' : None, 'right' : None, 'is_leaf': True } # Count the number of data points that are +1 and -1 in this node. num_ones = len(target_values[target_values == +1]) num_minus_ones = len(target_values[target_values == -1]) # For the leaf node, set the prediction to be the majority class. # Store the predicted class (1 or -1) in leaf['prediction'] if num_ones > num_minus_ones: leaf['prediction'] = +1 else: leaf['prediction'] = -1 # Return the leaf node return leaf """ Explanation: Building the tree With the above functions implemented correctly, we are now ready to build our decision tree. Each node in the decision tree is represented as a dictionary which contains the following keys and possible values: { 'is_leaf' : True/False. 'prediction' : Prediction at the leaf node. 'left' : (dictionary corresponding to the left tree). 'right' : (dictionary corresponding to the right tree). 'splitting_feature' : The feature that this node splits on. } First, we will write a function that creates a leaf node given a set of target values. Fill in the places where you find ## YOUR CODE HERE. There are three places in this function for you to fill in. End of explanation """ def decision_tree_create(data, features, target, current_depth = 0, max_depth = 10): remaining_features = features[:] # Make a copy of the features. target_values = data[target] print "--------------------------------------------------------------------" print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values)) # Stopping condition 1 # (Check if there are mistakes at current node. # Recall you wrote a function intermediate_node_num_mistakes to compute this.) if intermediate_node_num_mistakes(target_values) == 0: ## YOUR CODE HERE print "Stopping condition 1 reached." # If not mistakes at current node, make current node a leaf node return create_leaf(target_values) # Stopping condition 2 (check if there are remaining features to consider splitting on) if len(remaining_features) == 0: ## YOUR CODE HERE print "Stopping condition 2 reached." # If there are no remaining features to consider, make current node a leaf node return create_leaf(target_values) # Additional stopping condition (limit tree depth) if current_depth >= max_depth: ## YOUR CODE HERE print "Reached maximum depth. Stopping for now." # If the max tree depth has been reached, make current node a leaf node return create_leaf(target_values) # Find the best splitting feature (recall the function best_splitting_feature implemented above) splitting_feature = best_splitting_feature(data, remaining_features, target) # Split on the best feature that we found. left_split = data[data[splitting_feature] == 0] right_split = data[data[splitting_feature] == 1] remaining_features.remove(splitting_feature) print "Split on feature %s. (%s, %s)" % (\ splitting_feature, len(left_split), len(right_split)) # Create a leaf node if the split is "perfect" if len(left_split) == len(data): print "Creating leaf node." return create_leaf(left_split[target]) if len(right_split) == len(data): print "Creating leaf node." return create_leaf(right_split[target]) # Repeat (recurse) on left and right subtrees left_tree = decision_tree_create(left_split, remaining_features, target, current_depth + 1, max_depth) ## YOUR CODE HERE right_tree = decision_tree_create(right_split, remaining_features, target, current_depth + 1, max_depth) return {'is_leaf' : False, 'prediction' : None, 'splitting_feature': splitting_feature, 'left' : left_tree, 'right' : right_tree} """ Explanation: We have provided a function that learns the decision tree recursively and implements 3 stopping conditions: 1. Stopping condition 1: All data points in a node are from the same class. 2. Stopping condition 2: No more features to split on. 3. Additional stopping condition: In addition to the above two stopping conditions covered in lecture, in this assignment we will also consider a stopping condition based on the max_depth of the tree. By not letting the tree grow too deep, we will save computational effort in the learning process. Now, we will write down the skeleton of the learning algorithm. Fill in the places where you find ## YOUR CODE HERE. There are seven places in this function for you to fill in. End of explanation """ def count_nodes(tree): if tree['is_leaf']: return 1 return 1 + count_nodes(tree['left']) + count_nodes(tree['right']) """ Explanation: Here is a recursive function to count the nodes in your tree: End of explanation """ small_data_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 3) if count_nodes(small_data_decision_tree) == 13: print 'Test passed!' else: print 'Test failed... try again!' print 'Number of nodes found :', count_nodes(small_data_decision_tree) print 'Number of nodes that should be there : 13' """ Explanation: Run the following test code to check your implementation. Make sure you get 'Test passed' before proceeding. End of explanation """ # Make sure to cap the depth at 6 by using max_depth = 6 my_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6) """ Explanation: Build the tree! Now that all the tests are passing, we will train a tree model on the train_data. Limit the depth to 6 (max_depth = 6) to make sure the algorithm doesn't run for too long. Call this tree my_decision_tree. Warning: This code block may take 1-2 minutes to learn. End of explanation """ def classify(tree, x, annotate = False): # if the node is a leaf node. if tree['is_leaf']: if annotate: print "At leaf, predicting %s" % tree['prediction'] return tree['prediction'] else: # split on feature. split_feature_value = x[tree['splitting_feature']] if annotate: print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value) if split_feature_value == 0: return classify(tree['left'], x, annotate) else: return classify(tree['right'], x, annotate) """ Explanation: Making predictions with a decision tree As discussed in the lecture, we can make predictions from the decision tree with a simple recursive function. Below, we call this function classify, which takes in a learned tree and a test point x to classify. We include an option annotate that describes the prediction path when set to True. Fill in the places where you find ## YOUR CODE HERE. There is one place in this function for you to fill in. End of explanation """ test_data[0] print 'Predicted class: %s ' % classify(my_decision_tree, test_data[0]) """ Explanation: Now, let's consider the first example of the test set and see what my_decision_tree model predicts for this data point. End of explanation """ classify(my_decision_tree, test_data[0], annotate=True) """ Explanation: Let's add some annotations to our prediction to see what the prediction path was that lead to this predicted class: End of explanation """ def evaluate_classification_error(tree, data, target): # Apply the classify(tree, x) to each row in your data prediction = data.apply(lambda x: classify(tree, x)) # Once you've made the predictions, calculate the classification error and return it accuracy = (prediction == data[target]).sum() error = 1 - float(accuracy) / len(data[target]) return error """ Explanation: Quiz question: What was the feature that my_decision_tree first split on while making the prediction for test_data[0]? Quiz question: What was the first feature that lead to a right split of test_data[0]? Quiz question: What was the last feature split on before reaching a leaf node for test_data[0]? Evaluating your decision tree Now, we will write a function to evaluate a decision tree by computing the classification error of the tree on the given dataset. Again, recall that the classification error is defined as follows: $$ \mbox{classification error} = \frac{\mbox{# mistakes}}{\mbox{# total examples}} $$ Now, write a function called evaluate_classification_error that takes in as input: 1. tree (as described above) 2. data (an SFrame) 3. target (a string - the name of the target/label column) This function should calculate a prediction (class label) for each row in data using the decision tree and return the classification error computed using the above formula. Fill in the places where you find ## YOUR CODE HERE. There is one place in this function for you to fill in. End of explanation """ round(evaluate_classification_error(my_decision_tree, test_data, target), 2) """ Explanation: Now, let's use this function to evaluate the classification error on the test set. End of explanation """ def print_stump(tree, name = 'root'): split_name = tree['splitting_feature'] # split_name is something like 'term. 36 months' if split_name is None: print "(leaf, label: %s)" % tree['prediction'] return None split_feature, split_value = split_name.split('.') print ' %s' % name print ' |---------------|----------------|' print ' | |' print ' | |' print ' | |' print ' [{0} == 0] [{0} == 1] '.format(split_name) print ' | |' print ' | |' print ' | |' print ' (%s) (%s)' \ % (('leaf, label: ' + str(tree['left']['prediction']) if tree['left']['is_leaf'] else 'subtree'), ('leaf, label: ' + str(tree['right']['prediction']) if tree['right']['is_leaf'] else 'subtree')) print_stump(my_decision_tree) """ Explanation: Quiz Question: Rounded to 2nd decimal point, what is the classification error of my_decision_tree on the test_data? Printing out a decision stump As discussed in the lecture, we can print out a single decision stump (printing out the entire tree is left as an exercise to the curious reader). End of explanation """ print_stump(my_decision_tree['left'], my_decision_tree['splitting_feature']) """ Explanation: Quiz Question: What is the feature that is used for the split at the root node? Exploring the intermediate left subtree The tree is a recursive dictionary, so we do have access to all the nodes! We can use * my_decision_tree['left'] to go left * my_decision_tree['right'] to go right End of explanation """ print_stump(my_decision_tree['left']['left'], my_decision_tree['left']['splitting_feature']) """ Explanation: Exploring the left subtree of the left subtree End of explanation """ print_stump(my_decision_tree['right'], my_decision_tree['splitting_feature']) print_stump(my_decision_tree['right']['right'], my_decision_tree['left']['splitting_feature']) """ Explanation: Quiz question: What is the path of the first 3 feature splits considered along the left-most branch of my_decision_tree? Quiz question: What is the path of the first 3 feature splits considered along the right-most branch of my_decision_tree? End of explanation """
JackDi/phys202-2015-work
assignments/assignment11/OptimizationEx01.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import scipy.optimize as opt """ Explanation: Optimization Exercise 1 Imports End of explanation """ # YOUR CODE HERE def hat(x,a,b): v=-1*a*x**2+b*x**4 return v assert hat(0.0, 1.0, 1.0)==0.0 assert hat(0.0, 1.0, 1.0)==0.0 assert hat(1.0, 10.0, 1.0)==-9.0 """ Explanation: Hat potential The following potential is often used in Physics and other fields to describe symmetry breaking and is often known as the "hat potential": $$ V(x) = -a x^2 + b x^4 $$ Write a function hat(x,a,b) that returns the value of this function: End of explanation """ x=np.linspace(-3,3) b=1.0 a=5.0 plt.plot(x,hat(x,a,b)) # YOUR CODE HERE x0=-2 a = 5.0 b = 1.0 y=opt.minimize(hat,x0,(a,b)) y.x assert True # leave this to grade the plot """ Explanation: Plot this function over the range $x\in\left[-3,3\right]$ with $b=1.0$ and $a=5.0$: End of explanation """ # YOUR CODE HERE x0=-2 a = 5.0 b = 1.0 i=0 y.x mini=[] x=np.linspace(-3,3) for i in x: y=opt.minimize(hat,i,(a,b)) z=int(y.x *100000) if np.any(mini[:] == z): i=i+1 else: mini=np.append(mini,z) mini=mini/100000 mini plt.plot(x,hat(x,a,b),label="Hat Function") plt.plot(mini[0],hat(mini[0],a,b),'ro',label="Minima") plt.plot(mini[1],hat(mini[1],a,b),'ro') plt.xlabel=("X-Axis") plt.ylabel=("Y-Axis") plt.title("Graph of Function and its Local Minima") plt.legend() assert True # leave this for grading the plot """ Explanation: Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$. Use scipy.optimize.minimize to find the minima. You will have to think carefully about how to get this function to find both minima. Print the x values of the minima. Plot the function as a blue line. On the same axes, show the minima as red circles. Customize your visualization to make it beatiful and effective. End of explanation """
sraejones/phys202-2015-work
assignments/midterm/AlgorithmsEx03.ipynb
mit
%matplotlib inline from matplotlib import pyplot as plt import numpy as np from IPython.html.widgets import interact """ Explanation: Algorithms Exercise 3 Imports End of explanation """ def char_probs(s): """Find the probabilities of the unique characters in the string s. Parameters ---------- s : str A string of characters. Returns ------- probs : dict A dictionary whose keys are the unique characters in s and whose values are the probabilities of those characters. """ # YOUR CODE HERE h = for i in s: h.append(str(s)) e = np.array(h) g = char_probs(e) y = np.frequency(g) test1 = char_probs('aaaa') assert np.allclose(test1['a'], 1.0) test2 = char_probs('aabb') assert np.allclose(test2['a'], 0.5) assert np.allclose(test2['b'], 0.5) test3 = char_probs('abcd') assert np.allclose(test3['a'], 0.25) assert np.allclose(test3['b'], 0.25) assert np.allclose(test3['c'], 0.25) assert np.allclose(test3['d'], 0.25) """ Explanation: Character counting and entropy Write a function char_probs that takes a string and computes the probabilities of each character in the string: First do a character count and store the result in a dictionary. Then divide each character counts by the total number of character to compute the normalized probabilties. Return the dictionary of characters (keys) and probabilities (values). End of explanation """ def entropy(d): """Compute the entropy of a dict d whose values are probabilities.""" # YOUR CODE HERE np.array(d) assert np.allclose(entropy({'a': 0.5, 'b': 0.5}), 1.0) assert np.allclose(entropy({'a': 1.0}), 0.0) """ Explanation: The entropy is a quantiative measure of the disorder of a probability distribution. It is used extensively in Physics, Statistics, Machine Learning, Computer Science and Information Science. Given a set of probabilities $P_i$, the entropy is defined as: $$H = - \Sigma_i P_i \log_2(P_i)$$ In this expression $\log_2$ is the base 2 log (np.log2), which is commonly used in information science. In Physics the natural log is often used in the definition of entropy. Write a funtion entropy that computes the entropy of a probability distribution. The probability distribution will be passed as a Python dict: the values in the dict will be the probabilities. To compute the entropy, you should: First convert the values (probabilities) of the dict to a Numpy array of probabilities. Then use other Numpy functions (np.log2, etc.) to compute the entropy. Don't use any for or while loops in your code. End of explanation """ # YOUR CODE HERE raise NotImplementedError() assert True # use this for grading the pi digits histogram """ Explanation: Use IPython's interact function to create a user interface that allows you to type a string into a text box and see the entropy of the character probabilities of the string. End of explanation """
eaton-lab/toytree
docs/10-treestyles.ipynb
bsd-3-clause
import toytree # generate a random tree tree = toytree.rtree.unittree(ntips=10, seed=123) """ Explanation: Using built-in Tree Styles The built-in treestyle or ts drawing options in toytree provide a base layer for styling drawings that can make it easier to achieve a desired style using fewer options. Additional styles can be applied on top of these base layers to extend styling. The currently supported types are: n: normal -- default style s: simple -- clean focus on topology and node labels c: coalescent -- clean focus on topology and timing o: umlaut -- just a nice looking style p: population -- species/population trees, auto-parses "Ne" d: dark -- light colors End of explanation """ tree.draw(ts='n'); tree.draw(ts='s'); tree.draw(ts='c'); tree.draw(ts='o'); tree.draw(ts='p'); tree.draw(ts='d'); """ Explanation: Examples End of explanation """ tree.draw(ts='c', tip_labels=True, node_colors='orange'); """ Explanation: Extend styling Additional styles can be applied on top of a ts style to extend styling. In this sense the treestyle should be thought of as changing the default base layer of style before additional drawing options are parsed and applied. End of explanation """ # generate a random tree stree = toytree.rtree.unittree(ntips=10, seed=4321) # modify some styles of a tree stree.style.use_edge_lengths = False stree.style.scalebar = True stree.style.edge_type = 'b' stree.style.layout = 'd' # draw the tree stree.draw(); # get 5 random trees trees = [toytree.rtree.bdtree(8) for i in range(5)] # set the edge_colors style on each tree for tre in trees: tre.style.edge_colors = next(toytree.icolors1) # draw multiple trees using their applied styles toytree.mtree(trees).draw(); """ Explanation: Tree style overrides style applied to toytree objects In addition to providing style options to the .draw() function, it is also possible to set a tree's style before calling draw by modify a tree's .style attribute. This is effectively a way to modify its default base style. This can be useful if, for example, you want to apply different styles to a bunch of trees that will be plotted in a multitree drawing. In general, though, I recommend just styling drawings by using options in the .draw() function. This is relevant to mention here because if you use the treestyle argument in .draw it overrides the default style of the tree. This means it will also override any changes that have been made to the .style attribute of a tree. This is demonstrated below. End of explanation """ # applying a tree style will override any .style modifications stree.draw(ts='c'); """ Explanation: This is the same tree object as above that has styles applied to it but when a treestyle is applied it erases the applied styles. End of explanation """ # get 5 random trees trees = [toytree.rtree.bdtree(8) for i in range(5)] # set the edge_colors style on each tree for tre in trees: tre.style.edge_colors = next(toytree.icolors1) # draw multiple trees using their applied styles toytree.mtree(trees).draw(ts='n'); """ Explanation: Here you can see that we set colors on the edges just like above, but when a treestyle is applied it erases the applied styles. End of explanation """
jeicher/cobrapy
documentation_builder/io.ipynb
lgpl-2.1
import cobra.test import os from os.path import join data_dir = cobra.test.data_directory print("mini test files: ") print(", ".join(i for i in os.listdir(data_dir) if i.startswith("mini"))) textbook_model = cobra.test.create_test_model("textbook") ecoli_model = cobra.test.create_test_model("ecoli") salmonella_model = cobra.test.create_test_model("salmonella") """ Explanation: Reading and Writing Models Cobrapy supports reading and writing models in SBML (with and without FBC), JSON, MAT, and pickle formats. Generally, SBML with FBC version 2 is the preferred format for general use. The JSON format may be more useful for cobrapy-specific functionality. The package also ships with test models in various formats for testing purposes. End of explanation """ cobra.io.read_sbml_model(join(data_dir, "mini_fbc2.xml")) cobra.io.write_sbml_model(textbook_model, "test_fbc2.xml") """ Explanation: SBML The Systems Biology Markup Language is an XML-based standard format for distributing models which has support for COBRA models through the FBC extension version 2. Cobrapy has native support for reading and writing SBML with FBCv2. Please note that all id's in the model must conform to the SBML SID requirements in order to generate a valid SBML file. End of explanation """ cobra.io.read_sbml_model(join(data_dir, "mini_cobra.xml")) cobra.io.write_sbml_model(textbook_model, "test_cobra.xml", use_fbc_package=False) """ Explanation: There are other dialects of SBML prior to FBC 2 which have previously been use to encode COBRA models. The primary ones is the "COBRA" dialect which used the "notes" fields in SBML files. Cobrapy can use libsbml, which must be installed separately (see installation instructions) to read and write these files. When reading in a model, it will automatically detect whether fbc was used or not. When writing a model, the use_fbc_package flag can be used can be used to write files in this legacy "cobra" format. End of explanation """ cobra.io.load_json_model(join(data_dir, "mini.json")) cobra.io.save_json_model(textbook_model, "test.json") """ Explanation: JSON cobrapy models have a JSON (JavaScript Object Notation) representation. This format was crated for interoperability with escher. End of explanation """ cobra.io.load_matlab_model(join(data_dir, "mini.mat"), variable_name="mini_textbook") """ Explanation: MATLAB Often, models may be imported and exported soley for the purposes of working with the same models in cobrapy and the MATLAB cobra toolbox. MATLAB has its own ".mat" format for storing variables. Reading and writing to these mat files from python requires scipy. A mat file can contain multiple MATLAB variables. Therefore, the variable name of the model in the MATLAB file can be passed into the reading function: End of explanation """ cobra.io.load_matlab_model(join(data_dir, "mini.mat")) """ Explanation: If the mat file contains only a single model, cobra can figure out which variable to read from, and the variable_name paramter is unnecessary. End of explanation """ cobra.io.save_matlab_model(textbook_model, "test.mat") """ Explanation: Saving models to mat files is also relatively straightforward End of explanation """
robblack007/clase-dinamica-robot
Practicas/practica1/inicio.ipynb
mit
2 + 3 2*3 2**3 sin(pi) """ Explanation: Práctica 1 - Introducción a Jupyter lab y libreria robots Introducción a Jupyter y el lenguaje de programación Python Expresiones aritmeticas y algebraicas Empezaremos esta práctica con algo de conocimientos previos de programación. Se que muchos de ustedes no han tenido la oportunidad de utilizar Python como lenguaje de programación y mucho menos Jupyter como ambiente de desarrollo para computo cientifico, asi que el primer objetivo de esta práctica será acostumbrarnos a la sintaxis del lenguaje y a las funciones que hacen especial a Jupyter. Primero tratemos de evaluar una expresión aritmetica. Para correr el código en la siguiente celda, tan solo tienes que hacer clic en cualquier punto de ella y presionar las teclas Shift + Return. End of explanation """ from math import sin, pi sin(pi) """ Explanation: Sin embargo no existen funciones trigonométricas cargadas por default. Para esto tenemos que importarlas de la libreria math: End of explanation """ a = 10 a """ Explanation: Variables Las variables pueden ser utilizadas en cualquier momento, sin necesidad de declararlas, tan solo usalas! End of explanation """ # ESCRIBE TU CODIGO AQUI raise NotImplementedError # ESCRIBE TU CODIGO AQUI raise NotImplementedError """ Explanation: Ejercicio Ejecuta el siguiente calculo y guardalo en una variable c: $$ c = \pi *10^2 $$ Nota: Una vez que hayas concluido el calculo y guardado el valor en una variable, despliega el valor de la variable al ejecutar en una celda el nombre de la variable End of explanation """ from nose.tools import assert_equal assert_equal(_, c) print("Sin errores") """ Explanation: Ejecuta la prueba de abajo para saber si has creado el codigo correcto End of explanation """ A = [2, 4, 8, 10] A """ Explanation: Listas Las listas son una manera de guardar varios datos en un mismo arreglo. Podemos tener por ejemplo: End of explanation """ A*2 """ Explanation: Pero si intentamos multiplicar estos datos por un numero, no tendrá el comportamiento esperado. End of explanation """ f = lambda x: x**2 + 1 """ Explanation: Funciones Podemos definir funciones propias de la siguiente manera: End of explanation """ f(2) """ Explanation: Esta linea de codigo es equivalente a definir una función matemática de la siguiente manera: $$ f(x) = x^2 + 1 $$ Por lo que si la evaluamos con $x = 2$, obviamente obtendremos como resultado $5$. End of explanation """ def g(x): y = x**2 + 1 return y """ Explanation: Esta notación que introducimos es muy util para funciones matemáticas, pero esto nos obliga a pensar en las definiciones de una manera funcional, lo cual no siempre es la solución (sobre todo en un lenguaje con un paradigma de programación orientado a objetos). Esta función tambien puede ser escrita de la siguiente manera: End of explanation """ g(2) """ Explanation: Con los mismos resultados: End of explanation """ def cel_a_faren(grados_cel): # ESCRIBE TU CODIGO AQUI raise NotImplementedError return grados_faren """ Explanation: Ejercicio Define una función que convierta grados Celsius a grados Farenheit, de acuerdo a la siguiente formula: $$ F = \frac{9}{5} C + 32 $$ End of explanation """ cel_a_faren(-1) from nose.tools import assert_equal assert_equal(cel_a_faren(10), 50) assert_equal(cel_a_faren(50), 122) print("Sin errores") """ Explanation: Y para probar trata de convertir algunos datos: End of explanation """ for dato in A: print(dato*2) """ Explanation: Ciclos de control Cuando queremos ejecutar código varias veces tenemos varias opciones, vamos a explorar rapidamente el ciclo for. python for paso in pasos: ... codigo_a_ejecutar(paso) ... En este caso el codigo se ejecutará tantas veces sean necesarias para usar todos los elementos que hay en pasos. Por ejemplo, pordemos ejecutar la multiplicacion por 2 en cada uno de los datos: End of explanation """ B = [] for dato in A: B.append(dato*2) B """ Explanation: ó agregarlo en una lista nueva: End of explanation """ # ESCRIBE TU CODIGO AQUI raise NotImplementedError C # ESCRIBE TU CODIGO AQUI raise NotImplementedError D """ Explanation: Ejercicio Crea una lista C con los enteros positivos de un solo digito, es decir: $\left{ x \in \mathbb{Z} \mid 0 \leq x < 10\right}$ Crea una segunda lista D con los cuadrados de cada elemento de C End of explanation """ from numpy.testing import assert_array_equal print("Sin errores") """ Explanation: Ejecuta las pruebas de abajo End of explanation """ from numpy import matrix A = matrix([[1, 2], [3, 4]]) A v1 = matrix([[1], [2]]) v1 # Dependiendo de la version de python que exista en tu computadora, # esta operacion pudiera no funcionar, en dado caso solo hay que # cambiar @ por * A@v1 # La siguiente linea no va a funcionar, porque? v1@A """ Explanation: Matrices Para trabajar con matrices el flujo de trabajo es un poco diferente, ya que los arreglos manejan un conjunto diferente de operaciones; afortunadamente la librería numpy ya tiene definido un objeto de tipo matriz, con el cual podemos hacer las operaciones que queramos: End of explanation """ from numpy import sin, cos, pi τ = 2*pi # ESCRIBE TU CODIGO AQUI raise NotImplementedError vec_rot from numpy.testing import assert_array_equal assert_array_equal(vec_rot, matrix([[2*(cos(τ/12)-sin(τ/12))], [2*(cos(τ/12)+sin(τ/12))]])) print("Sin errores") """ Explanation: Ejercicio Declara una matriz de rotación rot, en dos dimensiones con un angulo de 30º Aplica esta matriz de rotación al vector vec definido como $\begin{pmatrix} 2 \ 2\end{pmatrix}$ End of explanation """
cfjhallgren/shogun
doc/ipython-notebooks/structure/FGM.ipynb
gpl-3.0
%pylab inline %matplotlib inline import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') import numpy as np import scipy.io dataset = scipy.io.loadmat(os.path.join(SHOGUN_DATA_DIR, 'ocr/ocr_taskar.mat')) # patterns for training p_tr = dataset['patterns_train'] # patterns for testing p_ts = dataset['patterns_test'] # labels for training l_tr = dataset['labels_train'] # labels for testing l_ts = dataset['labels_test'] # feature dimension n_dims = p_tr[0,0].shape[0] # number of states n_stats = 26 # number of training samples n_tr_samples = p_tr.shape[1] # number of testing samples n_ts_samples = p_ts.shape[1] """ Explanation: General Structured Output Models with Shogun Machine Learning Toolbox Shell Hu (GitHub ID: hushell) Thanks Patrick Pletscher and Fernando J. Iglesias García for taking time to help me finish the project! Shoguners = awesome! Me = grateful! Introduction This notebook illustrates the training of a <a href="http://en.wikipedia.org/wiki/Factor_graph">factor graph</a> model using <a href="http://en.wikipedia.org/wiki/Structured_support_vector_machine">structured SVM</a> in Shogun. We begin by giving a brief outline of factor graphs and <a href="http://en.wikipedia.org/wiki/Structured_prediction">structured output learning</a> followed by the corresponding API in Shogun. Finally, we test the scalability by performing an experiment on a real <a href="http://en.wikipedia.org/wiki/Optical_character_recognition">OCR</a> data set for <a href="http://en.wikipedia.org/wiki/Handwriting_recognition">handwritten character recognition</a>. Factor Graph A factor graph explicitly represents the factorization of an undirected graphical model in terms of a set of factors (potentials), each of which is defined on a clique in the original graph [1]. For example, a MRF distribution can be factorized as $$ P(\mathbf{y}) = \frac{1}{Z} \prod_{F \in \mathcal{F}} \theta_F(\mathbf{y}_F), $$ where $F$ is the factor index, $\theta_F(\mathbf{y}_F)$ is the energy with respect to assignment $\mathbf{y}_F$. In this demo, we focus only on table representation of factors. Namely, each factor holds an energy table $\theta_F$, which can be viewed as an unnormalized CPD. According to different factorizations, there are different types of factors. Usually we assume the Markovian property is held, that is, factors have the same parameterization if they belong to the same type, no matter how location or time changes. In addition, we have parameter free factor type, but nothing to learn for such kinds of types. More detailed implementation will be explained later. Structured Prediction Structured prediction typically involves an input $\mathbf{x}$ (can be structured) and a structured output $\mathbf{y}$. A joint feature map $\Phi(\mathbf{x},\mathbf{y})$ is defined to incorporate structure information into the labels, such as chains, trees or general graphs. In general, the linear parameterization will be used to give the prediction rule. We leave the kernelized version for future work. $$ \hat{\mathbf{y}} = \underset{\mathbf{y} \in \mathcal{Y}}{\operatorname{argmax}} \langle \mathbf{w}, \Phi(\mathbf{x},\mathbf{y}) \rangle $$ where $\Phi(\mathbf{x},\mathbf{y})$ is the feature vector by mapping local factor features to corresponding locations in terms of $\mathbf{y}$, and $\mathbf{w}$ is the global parameter vector. In factor graph model, parameters are associated with a set of factor types. So $\mathbf{w}$ is a collection of local parameters. The parameters are learned by regularized risk minimization, where the risk defined by user provided loss function $\Delta(\mathbf{y},\mathbf{\hat{y}})$ is usually non-convex and non-differentiable, e.g. the Hamming loss. So the empirical risk is defined in terms of the surrogate hinge loss $H_i(\mathbf{w}) = \max_{\mathbf{y} \in \mathcal{Y}} \Delta(\mathbf{y}_i,\mathbf{y}) - \langle \mathbf{w}, \Psi_i(\mathbf{y}) \rangle $, which is an upper bound of the user defined loss. Here $\Psi_i(\mathbf{y}) = \Phi(\mathbf{x}_i,\mathbf{y}_i) - \Phi(\mathbf{x}_i,\mathbf{y})$. The training objective is given by $$ \min_{\mathbf{w}} \frac{\lambda}{2} ||\mathbf{w}||^2 + \frac{1}{N} \sum_{i=1}^N H_i(\mathbf{w}). $$ In Shogun's factor graph model, the corresponding implemented functions are: <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CStructuredModel.html#a15bd99e15bbf0daa8a727d03dbbf4bcd">FactorGraphModel::get_joint_feature_vector()</a> $\longleftrightarrow \Phi(\mathbf{x}_i,\mathbf{y})$ <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CFactorGraphModel.html#a36665cfdd7ea2dfcc9b3c590947fe67f">FactorGraphModel::argmax()</a> $\longleftrightarrow H_i(\mathbf{w})$ <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CFactorGraphModel.html#a17dac99e933f447db92482a6dce8489b">FactorGraphModel::delta_loss()</a> $\longleftrightarrow \Delta(\mathbf{y}_i,\mathbf{y})$ Experiment: OCR Show Data First of all, we load the OCR data from a prepared mat file. The raw data can be downloaded from <a href="http://www.seas.upenn.edu/~taskar/ocr/">http://www.seas.upenn.edu/~taskar/ocr/</a>. It has 6876 handwritten words with an average length of 8 letters from 150 different persons. Each letter is rasterized into a binary image of size 16 by 8 pixels. Thus, each $\mathbf{y}$ is a chain, and each node has 26 possible states denoting ${a,\cdots,z}$. End of explanation """ import matplotlib.pyplot as plt def show_word(patterns, index): """show a word with padding""" plt.rc('image', cmap='binary') letters = patterns[0,index][:128,:] n_letters = letters.shape[1] for l in xrange(n_letters): lett = np.transpose(np.reshape(letters[:,l], (8,16))) lett = np.hstack((np.zeros((16,1)), lett, np.zeros((16,1)))) lett = np.vstack((np.zeros((1,10)), lett, np.zeros((1,10)))) subplot(1,n_letters,l+1) imshow(lett) plt.xticks(()) plt.yticks(()) plt.tight_layout() show_word(p_tr, 174) show_word(p_tr, 471) show_word(p_tr, 57) """ Explanation: Few examples of the handwritten words are shown below. Note that the first capitalized letter has been removed. End of explanation """ from shogun import TableFactorType # unary, type_id = 0 cards_u = np.array([n_stats], np.int32) w_gt_u = np.zeros(n_stats*n_dims) fac_type_u = TableFactorType(0, cards_u, w_gt_u) # pairwise, type_id = 1 cards = np.array([n_stats,n_stats], np.int32) w_gt = np.zeros(n_stats*n_stats) fac_type = TableFactorType(1, cards, w_gt) # first bias, type_id = 2 cards_s = np.array([n_stats], np.int32) w_gt_s = np.zeros(n_stats) fac_type_s = TableFactorType(2, cards_s, w_gt_s) # last bias, type_id = 3 cards_t = np.array([n_stats], np.int32) w_gt_t = np.zeros(n_stats) fac_type_t = TableFactorType(3, cards_t, w_gt_t) # all initial parameters w_all = [w_gt_u,w_gt,w_gt_s,w_gt_t] # all factor types ftype_all = [fac_type_u,fac_type,fac_type_s,fac_type_t] """ Explanation: Define Factor Types and Build Factor Graphs Let's define 4 factor types, such that a word will be able to be modeled as a chain graph. The unary factor type will be used to define unary potentials that capture the appearance likelihoods of each letter. In our case, each letter has $16 \times 8$ pixels, thus there are $(16 \times 8 + 1) \times 26$ parameters. Here the additional bits in the parameter vector are bias terms. One for each state. The pairwise factor type will be used to define pairwise potentials between each pair of letters. This type in fact gives the Potts potentials. There are $26 \times 26$ parameters. The bias factor type for the first letter is a compensation factor type, since the interaction is one-sided. So there are $26$ parameters to be learned. The bias factor type for the last letter, which has the same intuition as the last item. There are also $26$ parameters. Putting all parameters together, the global parameter vector $\mathbf{w}$ has length $4082$. End of explanation """ def prepare_data(x, y, ftype, num_samples): """prepare FactorGraphFeatures and FactorGraphLabels """ from shogun import Factor, TableFactorType, FactorGraph from shogun import FactorGraphObservation, FactorGraphLabels, FactorGraphFeatures samples = FactorGraphFeatures(num_samples) labels = FactorGraphLabels(num_samples) for i in xrange(num_samples): n_vars = x[0,i].shape[1] data = x[0,i].astype(np.float64) vc = np.array([n_stats]*n_vars, np.int32) fg = FactorGraph(vc) # add unary factors for v in xrange(n_vars): datau = data[:,v] vindu = np.array([v], np.int32) facu = Factor(ftype[0], vindu, datau) fg.add_factor(facu) # add pairwise factors for e in xrange(n_vars-1): datap = np.array([1.0]) vindp = np.array([e,e+1], np.int32) facp = Factor(ftype[1], vindp, datap) fg.add_factor(facp) # add bias factor to first letter datas = np.array([1.0]) vinds = np.array([0], np.int32) facs = Factor(ftype[2], vinds, datas) fg.add_factor(facs) # add bias factor to last letter datat = np.array([1.0]) vindt = np.array([n_vars-1], np.int32) fact = Factor(ftype[3], vindt, datat) fg.add_factor(fact) # add factor graph samples.add_sample(fg) # add corresponding label states_gt = y[0,i].astype(np.int32) states_gt = states_gt[0,:]; # mat to vector loss_weights = np.array([1.0/n_vars]*n_vars) fg_obs = FactorGraphObservation(states_gt, loss_weights) labels.add_label(fg_obs) return samples, labels # prepare training pairs (factor graph, node states) n_tr_samples = 350 # choose a subset of training data to avoid time out on buildbot samples, labels = prepare_data(p_tr, l_tr, ftype_all, n_tr_samples) """ Explanation: Next, we write a function to construct the factor graphs and prepare labels for training. For each factor graph instance, the structure is a chain but the number of nodes and edges depend on the number of letters, where unary factors will be added for each letter, pairwise factors will be added for each pair of neighboring letters. Besides, the first and last letter will get an additional bias factor respectively. End of explanation """ try: import networkx as nx # pip install networkx except ImportError: import pip pip.main(['install', '--user', 'networkx']) import networkx as nx import matplotlib.pyplot as plt # create a graph G = nx.Graph() node_pos = {} # add variable nodes, assuming there are 3 letters G.add_nodes_from(['v0','v1','v2']) for i in xrange(3): node_pos['v%d' % i] = (2*i,1) # add factor nodes G.add_nodes_from(['F0','F1','F2','F01','F12','Fs','Ft']) for i in xrange(3): node_pos['F%d' % i] = (2*i,1.006) for i in xrange(2): node_pos['F%d%d' % (i,i+1)] = (2*i+1,1) node_pos['Fs'] = (-1,1) node_pos['Ft'] = (5,1) # add edges to connect variable nodes and factor nodes G.add_edges_from([('v%d' % i,'F%d' % i) for i in xrange(3)]) G.add_edges_from([('v%d' % i,'F%d%d' % (i,i+1)) for i in xrange(2)]) G.add_edges_from([('v%d' % (i+1),'F%d%d' % (i,i+1)) for i in xrange(2)]) G.add_edges_from([('v0','Fs'),('v2','Ft')]) # draw graph fig, ax = plt.subplots(figsize=(6,2)) nx.draw_networkx_nodes(G,node_pos,nodelist=['v0','v1','v2'],node_color='white',node_size=700,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['F0','F1','F2'],node_color='yellow',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['F01','F12'],node_color='blue',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['Fs'],node_color='green',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['Ft'],node_color='purple',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_edges(G,node_pos,alpha=0.7) plt.axis('off') plt.tight_layout() """ Explanation: An example of graph structure is visualized as below, from which you may have a better sense how a factor graph being built. Note that different colors are used to represent different factor types. End of explanation """ from shogun import FactorGraphModel, TREE_MAX_PROD # create model and register factor types model = FactorGraphModel(samples, labels, TREE_MAX_PROD) model.add_factor_type(ftype_all[0]) model.add_factor_type(ftype_all[1]) model.add_factor_type(ftype_all[2]) model.add_factor_type(ftype_all[3]) """ Explanation: Training Now we can create the factor graph model and start training. We will use the tree max-product belief propagation to do MAP inference. End of explanation """ from shogun import DualLibQPBMSOSVM from shogun import BmrmStatistics import pickle import time # create bundle method SOSVM, there are few variants can be chosen # BMRM, Proximal Point BMRM, Proximal Point P-BMRM, NCBM # usually the default one i.e. BMRM is good enough # lambda is set to 1e-2 bmrm = DualLibQPBMSOSVM(model, labels, 0.01) bmrm.set_TolAbs(20.0) bmrm.set_verbose(True) bmrm.set_store_train_info(True) # train t0 = time.time() bmrm.train() t1 = time.time() w_bmrm = bmrm.get_w() print "BMRM took", t1 - t0, "seconds." """ Explanation: In Shogun, we implemented several batch solvers and online solvers. Let's first try to train the model using a batch solver. We choose the dual bundle method solver (<a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDualLibQPBMSOSVM.html">DualLibQPBMSOSVM</a>) [2], since in practice it is slightly faster than the primal n-slack cutting plane solver (<a a href="http://www.shogun-toolbox.org/doc/en/latest/PrimalMosekSOSVM_8h.html">PrimalMosekSOSVM</a>) [3]. However, it still will take a while until convergence. Briefly, in each iteration, a gradually tighter piece-wise linear lower bound of the objective function will be constructed by adding more cutting planes (most violated constraints), then the approximate QP will be solved. Finding a cutting plane involves calling the max oracle $H_i(\mathbf{w})$ and in average $N$ calls are required in an iteration. This is basically why the training is time consuming. End of explanation """ import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) primal_bmrm = bmrm.get_helper().get_primal_values() dual_bmrm = bmrm.get_result().get_hist_Fd_vector() len_iter = min(primal_bmrm.size, dual_bmrm.size) primal_bmrm = primal_bmrm[1:len_iter] dual_bmrm = dual_bmrm[1:len_iter] # plot duality gaps xs = range(dual_bmrm.size) axes[0].plot(xs, (primal_bmrm-dual_bmrm), label='duality gap') axes[0].set_xlabel('iteration') axes[0].set_ylabel('duality gap') axes[0].legend(loc=1) axes[0].set_title('duality gaps'); axes[0].grid(True) # plot primal and dual values xs = range(dual_bmrm.size-1) axes[1].plot(xs, primal_bmrm[1:], label='primal') axes[1].plot(xs, dual_bmrm[1:], label='dual') axes[1].set_xlabel('iteration') axes[1].set_ylabel('objective') axes[1].legend(loc=1) axes[1].set_title('primal vs dual'); axes[1].grid(True) """ Explanation: Let's check the duality gap to see if the training has converged. We aim at minimizing the primal problem while maximizing the dual problem. By the weak duality theorem, the optimal value of the primal problem is always greater than or equal to dual problem. Thus, we could expect the duality gap will decrease during the time. A relative small and stable duality gap may indicate the convergence. In fact, the gap doesn't have to become zero, since we know it is not far away from the local minima. End of explanation """ # statistics bmrm_stats = bmrm.get_result() nCP = bmrm_stats.nCP nzA = bmrm_stats.nzA print 'number of cutting planes: %d' % nCP print 'number of active cutting planes: %d' % nzA """ Explanation: There are other statitics may also be helpful to check if the solution is good or not, such as the number of cutting planes, from which we may have a sense how tight the piece-wise lower bound is. In general, the number of cutting planes should be much less than the dimension of the parameter vector. End of explanation """ from shogun import StochasticSOSVM # the 3rd parameter is do_weighted_averaging, by turning this on, # a possibly faster convergence rate may be achieved. # the 4th parameter controls outputs of verbose training information sgd = StochasticSOSVM(model, labels, True, True) sgd.set_num_iter(100) sgd.set_lambda(0.01) # train t0 = time.time() sgd.train() t1 = time.time() w_sgd = sgd.get_w() print "SGD took", t1 - t0, "seconds." """ Explanation: In our case, we have 101 active cutting planes, which is much less than 4082, i.e. the number of parameters. We could expect a good model by looking at these statistics. Now come to the online solvers. Unlike the cutting plane algorithms re-optimizes over all the previously added dual variables, an online solver will update the solution based on a single point. This difference results in a faster convergence rate, i.e. less oracle calls, please refer to Table 1 in [4] for more detail. Here, we use the stochastic subgradient descent (<a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CStochasticSOSVM.html">StochasticSOSVM</a>) to compare with the BMRM algorithm shown before. End of explanation """ fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) primal_sgd = sgd.get_helper().get_primal_values() xs = range(dual_bmrm.size-1) axes[0].plot(xs, primal_bmrm[1:], label='BMRM') axes[0].plot(range(99), primal_sgd[1:100], label='SGD') axes[0].set_xlabel('effecitve passes') axes[0].set_ylabel('primal objective') axes[0].set_title('whole training progress') axes[0].legend(loc=1) axes[0].grid(True) axes[1].plot(range(99), primal_bmrm[1:100], label='BMRM') axes[1].plot(range(99), primal_sgd[1:100], label='SGD') axes[1].set_xlabel('effecitve passes') axes[1].set_ylabel('primal objective') axes[1].set_title('first 100 effective passes') axes[1].legend(loc=1) axes[1].grid(True) """ Explanation: We compare the SGD and BMRM in terms of the primal objectives versus effective passes. We first plot the training progress (until both algorithms converge) and then zoom in to check the first 100 passes. In order to make a fair comparison, we set the regularization constant to 1e-2 for both algorithms. End of explanation """ fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) terr_bmrm = bmrm.get_helper().get_train_errors() terr_sgd = sgd.get_helper().get_train_errors() xs = range(terr_bmrm.size-1) axes[0].plot(xs, terr_bmrm[1:], label='BMRM') axes[0].plot(range(99), terr_sgd[1:100], label='SGD') axes[0].set_xlabel('effecitve passes') axes[0].set_ylabel('training error') axes[0].set_title('whole training progress') axes[0].legend(loc=1) axes[0].grid(True) axes[1].plot(range(99), terr_bmrm[1:100], label='BMRM') axes[1].plot(range(99), terr_sgd[1:100], label='SGD') axes[1].set_xlabel('effecitve passes') axes[1].set_ylabel('training error') axes[1].set_title('first 100 effective passes') axes[1].legend(loc=1) axes[1].grid(True) """ Explanation: As is shown above, the SGD solver uses less oracle calls to get to converge. Note that the timing is 2 times slower than they actually need, since there are additional computations of primal objective and training error in each pass. The training errors of both algorithms for each pass are shown in below. End of explanation """ def hinton(matrix, max_weight=None, ax=None): """Draw Hinton diagram for visualizing a weight matrix.""" ax = ax if ax is not None else plt.gca() if not max_weight: max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2)) ax.patch.set_facecolor('gray') ax.set_aspect('equal', 'box') ax.xaxis.set_major_locator(plt.NullLocator()) ax.yaxis.set_major_locator(plt.NullLocator()) for (x,y),w in np.ndenumerate(matrix): color = 'white' if w > 0 else 'black' size = np.sqrt(np.abs(w)) rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, edgecolor=color) ax.add_patch(rect) ax.autoscale_view() ax.invert_yaxis() # get pairwise parameters, also accessible from # w[n_dims*n_stats:n_dims*n_stats+n_stats*n_stats] model.w_to_fparams(w_sgd) # update factor parameters w_p = ftype_all[1].get_w() w_p = np.reshape(w_p,(n_stats,n_stats)) hinton(w_p) """ Explanation: Interestingly, the training errors of SGD solver are lower than BMRM's in first 100 passes, but in the end the BMRM solver obtains a better training performance. A probable explanation is that BMRM uses very limited number of cutting planes at beginning, which form a poor approximation of the objective function. As the number of cutting planes increasing, we got a tighter piecewise lower bound, thus improve the performance. In addition, we would like to show the pairwise weights, which may learn important co-occurrances of letters. The hinton diagram is a wonderful tool for visualizing 2D data, in which positive and negative values are represented by white and black squares, respectively, and the size of each square represents the magnitude of each value. In our case, a smaller number i.e. a large black square indicates the two letters tend to coincide. End of explanation """ # get testing data samples_ts, labels_ts = prepare_data(p_ts, l_ts, ftype_all, n_ts_samples) from shogun import FactorGraphFeatures, FactorGraphObservation, TREE_MAX_PROD, MAPInference # get a factor graph instance from test data fg0 = samples_ts.get_sample(100) fg0.compute_energies() fg0.connect_components() # create a MAP inference using tree max-product infer_met = MAPInference(fg0, TREE_MAX_PROD) infer_met.inference() # get inference results y_pred = infer_met.get_structured_outputs() y_truth = FactorGraphObservation.obtain_from_generic(labels_ts.get_label(100)) print y_pred.get_data() print y_truth.get_data() """ Explanation: Inference Next, we show how to do inference with the learned model parameters for a given data point. End of explanation """ from shogun import LabelsFactory, SOSVMHelper # training error of BMRM method bmrm.set_w(w_bmrm) model.w_to_fparams(w_bmrm) lbs_bmrm = bmrm.apply() acc_loss = 0.0 ave_loss = 0.0 for i in xrange(n_tr_samples): y_pred = lbs_bmrm.get_label(i) y_truth = labels.get_label(i) acc_loss = acc_loss + model.delta_loss(y_truth, y_pred) ave_loss = acc_loss / n_tr_samples print('BMRM: Average training error is %.4f' % ave_loss) # training error of stochastic method print('SGD: Average training error is %.4f' % SOSVMHelper.average_loss(w_sgd, model)) # testing error bmrm.set_features(samples_ts) bmrm.set_labels(labels_ts) lbs_bmrm_ts = bmrm.apply() acc_loss = 0.0 ave_loss_ts = 0.0 for i in xrange(n_ts_samples): y_pred = lbs_bmrm_ts.get_label(i) y_truth = labels_ts.get_label(i) acc_loss = acc_loss + model.delta_loss(y_truth, y_pred) ave_loss_ts = acc_loss / n_ts_samples print('BMRM: Average testing error is %.4f' % ave_loss_ts) # testing error of stochastic method print('SGD: Average testing error is %.4f' % SOSVMHelper.average_loss(sgd.get_w(), model)) """ Explanation: Evaluation In the end, we check average training error and average testing error. The evaluation can be done by two methods. We can either use the apply() function in the structured output machine or use the <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CSOSVMHelper.html">SOSVMHelper</a>. End of explanation """
GeosoftInc/gxpy
examples/jupyter_notebooks/Tutorials/Geosoft Databases.ipynb
bsd-2-clause
from IPython.display import Image import numpy as np import geosoft.gxapi as gxapi import geosoft.gxpy.gx as gx import geosoft.gxpy.gdb as gxdb import geosoft.gxpy.utility as gxu gxc = gx.GXpy() url = 'https://github.com/GeosoftInc/gxpy/raw/9.3.1/examples/data/' gxu.url_retrieve(url + 'mag_data.csv') """ Explanation: Copyright (c) 2017 Geosoft Inc. https://github.com/GeosoftInc/gxpy BSD 2-clause License Geosoft Databases Lessons <!--- # Run this from a code cell to create TOC markdown: --> <!--- import geosoft.gxpy.utility; print(geosoft.gxpy.utility.jupyter_markdown_toc('geosoft databases')) --> What is a Geosoft database? Create a new database from a CSV-format data file Split the data into lines Read, modify and save data using numpy Read, modify and save data using a VV Apply an expression to a database What is a Geosoft database? A Geosoft database stores 3D spatial data in a form that allows for both very-large volume data storage and very efficient processing.  Geosoft databases are stored on a file system in a file that has extension .gdb. The Geosoft database was first designed in 1992 and has evolved to become a de facto standard for airborne and ground geophysical data, geochemical data, and drillhole data of all kinds.  All Geosoft programs and third-party applications that use the Geosoft API to read databases are able to work with databases created by any version of Geosoft since 1992, though database features and capabilities that are newer than the reading program will not be available to older programs. This cross-version stability of data has been key to the emergence of Geosoft databases as an industry standard, especially for airborne geophysical surveys. Fundamentally, Geosoft databases store located data which has either an (x, y) location for 2D data, or an (x, y, z) location for 3D data such that each location may have any number of fields of associated information.  Information fields are referred to as 'channels' in the context of a Geosoft Database, and channels may contain numeric data, strings or arrays of numeric data to store things like time-series.  For example an airborne magnetic survey may have (x, y, elevation, altimeter, mag) data, and a ground geochemical survey may contain (x, y, elevation, Cu, Au, Hg). Data in a database is grouped into the concept of 'lines' of data such that each 'line' contains the data from a single airborne survey line, a single ground survey line, a marine ship track, a set of data from a single drill hole, or from any other grouping construct that is suitable for the stored data.  Geosoft's desktop application, Oasis montaj, displays a single line of data in a spreadsheet-like form to the user, allowing a user to change lines and change which channels are displayed and the order channels are displayed. Simple databases might place all data into a single line.  All lines in a Geosoft database share the channel definitions, of course with data different for each line.  As you work through the exercises in this tutorial you will be introduced to other concepts of Geosoft databases. Imports, GX context, and get data from GitHub End of explanation """ # Open csv-format data file and skip the first line, which is a comment line f = open('mag_data.csv', 'r') f.readline() # the second line contains the channel/field names, from which we create a list of channel names channel_names = f.readline().strip().split(',') #the rest of the file contains data, which we load into a 2D numpy float array data = np.loadtxt(f, delimiter=',') #create a new database from list of channels and numpy data. All data is stored in a single line. with gxdb.Geosoft_gdb.new('mag_data', overwrite=True) as gdb: # create a valid line name for line 0 type random, and write all the data to this line gdb.write_line('L0', data, channel_names) # set the coordinate system to 'NAD 83 / UTM zone 15N' gdb.coordinate_system = 'NAD83 / UTM zone 15N' # set the mag data units to 'nT' gxdb.Channel(gdb, 'mag').unit_of_measure = 'nT' print(list(gdb.list_lines())) # ['L0'] print(list(gdb.list_channels())) # ['mag', 'X', 'Y', 'Z'] print(gdb.xyz_channels) # ('X', 'Y', 'Z') Image(gxdb.Geosoft_gdb.open('mag_data').figure_map(draw=gxdb.DRAW_AS_LINES).image_file(pix_width=800)) """ Explanation: Create a new database from a CSV-format data file A very common form for exchanging located data is in a simple ASCII Comma-Separated-Values (CSV) file. In this exercise we will create a new Geosoft database and import an example CSV file. The CSV file mag_data.csv contains located data as follows: ``` (X, Y) are on NAD83, UTM Zone 15N X,Y,Z,mag 330000,6629200,211.055369153511,5535 330050,6629200,210.654999327171,5500 330100,6629200,210.286555185181,5476 330150,6629200,209.952679145607,5479 330200,6629200,209.655742280775,5496 330250,6629200,209.397817201372,5544 etc... ``` In this example, the first line is simply a comment line that identified the spatial coordinate system for the located data. The second line identified the names of the data columns, and data follows starting in line 3. Note that columns of information after the first line are comma-delimited, and each line has 4 columns of information. Import CSV Script This script that will import the CSV data into a new Geosoft database.  All the data is imported to a single line - we wil split the line into segments later. End of explanation """ # split the line into sections knowing lines are E-W, and separated by 200 m. # the GXDU class requires a desktop license if gxc.entitled: # we need an int_ref() to hold values that are passed by reference, in this case for the first_line argument split_line_number_start = gxapi.int_ref() split_line_number_start.value = 1 # open the database, best practice is to use a 'with ...' construct with gxdb.Geosoft_gdb.open('mag_data') as gdb: # create instances to the lines and channels needed by the split_line_xy2 function line = gxdb.Line(gdb, 'L0') x_channel = gxdb.Channel(gdb, 'X') y_channel = gxdb.Channel(gdb, 'Y') # lock items as required line.lock = gxdb.SYMBOL_LOCK_READ x_channel.lock = gxdb.SYMBOL_LOCK_WRITE y_channel.lock = gxdb.SYMBOL_LOCK_WRITE # split the original line into segments, based on a lateral distance tolerance of 100 m. gxapi.GXDU.split_line_xy2( gdb.gxdb, # the gxdb property holds the gxapi.GXDB instance line.symbol, gxdb.Channel(gdb, 'X').symbol, gxdb.Channel(gdb, 'Y').symbol, 1, 100.0, gxapi.rDUMMY, gxapi.DU_SPLITLINE_SEQUENTIAL, split_line_number_start, 1, 1) #delete the original line as it is no longer needed gdb.delete_line('L0') # print a list of the new lines print('Lines: ', list(gdb.list_lines())) # ['L1', 'L2', 'L3', 'L4', 'L5', 'L6', ... # show the lines Image(gxdb.Geosoft_gdb.open('mag_data').figure_map(draw=gxdb.DRAW_AS_LINES).image_file(pix_width=800)) """ Explanation: Opening the database from a Geosoft application will shows the 'L0' line. Split the data into lines In the database line plot above we see that the data in Line 0 (L0E) would be better organized as separate survey lines rather than a single contuous lines. In this case the lines are horizontal, 200 m apart with sampled every 50 m along each line: Lets modify the script by adding some processing that will use a Geosoft method to split a line into separate lines based on a change in line direction. To split a line into sections we will use a processing function from the GXAPI, which exposes the full Geosoft API. We will use the geosoft.gxapi.GXDU.split_line_xy2 function (GXDU contains many Database Utility functions). This is part of the low-level geosoft.gxapi so a bit more care is required. Database functions often require locks on database symbols (handles to lines, channels and other database objects), which is the case for the Line, x_ch and y_ch arguments of split_line_xy2. Because Geosoft databases allow for concurrent access by multiple applications, symbols must be locked and unlocked as they are used. The geosoft.gxpy pythonic api will do this for you, but the geosoft.gxapi requires that you directly manage locks. Terminating a script removes any locks that may have remained on any symbols. End of explanation """ with gxdb.Geosoft_gdb.open('mag_data') as gdb: # make a new channel for the output, duplicate properties of 'mag' channel new_mag_channel = gxdb.Channel.new(gdb, 'mag_base', dup='mag', replace=True) # work through each line for line in gdb.list_lines(): # read data from the line. # The read_channel method returns the data as a numpy array, and the fiducial mag_data, fid = gdb.read_channel(line, 'mag') # use simple numpy math to subtract 5000, then save to the new_mag_channel mag_data = mag_data - 5000 gdb.write_channel(line, new_mag_channel, mag_data, fid) """ Explanation: Read, modify and save data using numpy Geosoft databases and data models are designed around the concept of data vector arrays, which support high performance data processing. Each channel in a line holds a single 1-dimensional or 2-dimentional array of data. Python's standard library for working with data arrays is numpy, and this is a standard part of any scientifically-oriented Python environment. The Geosoft_gdb class provides for reading and writing data directly to/from numpy arrays as well as Geosoft VV vector arrays. This example shows a simple use of numpy and the next section shows the same exercise using a Geosoft VV. In the open database above we see that the mag data has values around 5000 nT, which we will subtract from the data. End of explanation """ with gxdb.Geosoft_gdb.open('mag_data') as gdb: # make a new channel for the output, duplicate properties of 'mag' channel new_mag_channel = gxdb.Channel.new(gdb, 'mag_base', dup='mag', replace=True) # work through each line for line in gdb.list_lines(): # read data from the line. # The read_channel method returns the data in a geosoft VV mag_data = gdb.read_channel_vv(line, 'mag') # use Geosoft GXVVU.translate function to subtract 5000. gxapi.GXVVU.translate(mag_data.gxvv, -5000, 1) gdb.write_channel_vv(line, new_mag_channel, mag_data) """ Explanation: Read, modify and save data using a VV This example does the same thing as the previous example, but uses the Geosoft vector processing functions from the Geosoft VV library. Geosoft VV functions will deliver even higher performance than numpy, and while you may be limited by the capabilities of the Geosoft API, there are many functions that perform geoscience-specific operations on data (see gxapi.GXVVU). Note that data in a Geosoft VV can also be exposed as a numpy array to provide numpy access when needed. End of explanation """ with gxdb.Geosoft_gdb.open('mag_data') as gdb: # make a distance channel dist_channel = gxdb.Channel.new(gdb, 'distance', dup='x', replace=True) # work through each line for line in gdb.list_lines(): # Here we are reading multiple channels from a single line. # If we were to omit the list of channels ('x', 'y'), # all channels in the line would be returned in a 2D numpy array. # The returned channels_read is a list of the channels that were read. xy_data, channels_read, fid = gdb.read_line(line, ('x', 'y')) # get the first point (x0, y0) x0 = xy_data[0, 0] y0 = xy_data[0, 1] # use numpy array math to calculate distance in a 1D array dist_array dist_array = np.sqrt((xy_data[:, 0] - x0)**2 + (xy_data[:, 1] - y0)**2) # save the distance to the distance channel gdb.write_channel(line, dist_channel, dist_array, fid) """ Explanation: Apply an expression to a database In this exercise we calculate the distance along each line from the starting point of each line. The Pythagorean expression is simple and can be implemented in a single line of numpy code. This example reads multiple channels from the database and uses numpy slicing to work with individual channel columns from a 2D numpy array. End of explanation """ import math with gxdb.Geosoft_gdb.open('mag_data') as gdb: # make a distance channel dist_channel = gxdb.Channel.new(gdb, 'distance', dup='x', replace=True) # work through each line for line in gdb.list_lines(): # Here we are reading multiple channels from a single line. # If we were to omit the list of channels ('x', 'y'), # all channels in the line would be returned in a 2D numpy array. # The returned channels_read is a list of the channels that were read. xy_data, channels_read, fid = gdb.read_line(line, ('x', 'y')) # get the first point (x0, y0) x0 = xy_data[0, 0] y0 = xy_data[0, 1] # iterate elements in the data (slow) dist_array = np.zeros(len(xy_data)) for i in range(len(dist_array)): dx = xy_data[i, 0] - x0 dy = xy_data[i, 1] - y0 dist_array[i] = math.sqrt(dx **2 + dy **2) # save the distance to the distance channel gdb.write_channel(line, dist_channel, dist_array, fid) """ Explanation: Above we use numpy vector operations which are very fast. You could also iterate through the array, but this is much slower: End of explanation """
arnoldlu/lisa
ipynb/tutorial/04_ExecutorUsage.ipynb
apache-2.0
import logging from conf import LisaLogging LisaLogging.setup() # Execute this cell to enabled executor debugging statements logging.getLogger('Executor').setLevel(logging.DEBUG) """ Explanation: Tutorial Goal This tutorial aims to show how to configure and run a predefined set of synthetic workload using the executor module provided by LISA. Configure logging End of explanation """ from env import TestEnv # Setup a test environment with target configuration env = TestEnv({ # Target platform and board "platform" : 'linux', "board" : 'juno', # Target board IP/MAC address "host" : '192.168.0.1', # Login credentials "username" : 'root', "password" : 'test0000', # Folder where all the results will be collected "results_dir" : "ExecutorExample", # FTrace events to collect for all the tests configuration which have # the "ftrace" flag enabled "ftrace" : { "events" : [ "sched_switch", "sched_wakeup", "sched_wakeup_new", "cpu_frequency", ], "buffsize" : 80 * 1024, }, # Tools required by the experiments "tools" : [ 'trace-cmd', 'perf' ], # Modules required by these experiments "modules" : [ 'bl', 'cpufreq' ], }) """ Explanation: Target Configuration End of explanation """ my_tests_conf = { # Platform configurations to test "confs" : [ { "tag" : "base", "flags" : "ftrace", # Enable FTrace events "sched_features" : "NO_ENERGY_AWARE", # Disable EAS "cpufreq" : { # Use PERFORMANCE CpuFreq "governor" : "performance", }, }, { "tag" : "eas", "flags" : "ftrace", # Enable FTrace events "sched_features" : "ENERGY_AWARE", # Enable EAS "cpufreq" : { # Use PERFORMANCE CpuFreq "governor" : "performance", }, }, ], # Workloads to run (on each platform configuration) "wloads" : { # Run hackbench with 1 group using pipes "perf" : { "type" : "perf_bench", "conf" : { "class" : "messaging", "params" : { "group" : 1, "loop" : 10, "pipe" : True, "thread": True, } } }, # Run a 20% duty-cycle periodic task "rta" : { "type" : "rt-app", "loadref" : "big", "conf" : { "class" : "profile", "params" : { "p20" : { "kind" : "Periodic", "params" : { "duty_cycle_pct" : 20, }, }, }, }, }, }, # Number of iterations for each workload "iterations" : 1, } """ Explanation: Tests Configuration End of explanation """ from executor import Executor executor = Executor(env, my_tests_conf) executor.run() !tree {executor.te.res_dir} """ Explanation: Tests execution End of explanation """
mgorenstein/multiplot
tutorial.ipynb
mit
import pandas as pd import numpy as np import scipy.signal as signal from multiplot import PandasPlot, NumpyPlot %matplotlib inline """ Explanation: multiplot tutorial Although the forthcoming inline plots are static, running this code in a Python shell will produce interactive matplotlib windows. End of explanation """ samp_freq = 1000 # Hz duration = 5 # seconds first_signal_freq =1 # Hz signals = [] labels = [] for x in xrange(1,6): signal_freq = first_signal_freq * x time_points = np.arange(0, duration, 1/float(samp_freq)) sig = np.sin(2 * np.pi * signal_freq * time_points) sig_label = "Ch %d" %(x-1) labels.append(sig_label) signals.append(sig) df = pd.DataFrame(np.transpose(signals), columns=labels) nump = np.array(signals) """ Explanation: Generate a set of sample signals. End of explanation """ print 'DataFrame: ', df.shape print 'Numpy array: ', nump.shape PandasPlot(df) NumpyPlot(nump, labels=labels) # if labels aren't supplied, 'Ch x' labels are auto-generated """ Explanation: Note that PandasPlot expects a DataFrame where each series is a column, whereas NumpyPlot expects an array where each series is a row. End of explanation """ PandasPlot(df, num_display_chans=2) """ Explanation: Reduce number of channels displayed at once End of explanation """ PandasPlot(df, num_display_samps=2000) """ Explanation: Reduce number of samples displayed at once End of explanation """ highlights = {'Ch 0': [[2000, 3000]], 'Ch 2': [[1000, 2000], [3000, 4000]], 'Ch 4': [[2000, 3000]]} PandasPlot(df, highlights=highlights) """ Explanation: Highlight segments of the signals End of explanation """
CompPhysics/MachineLearning
doc/src/NeuralNet/notes/.ipynb_checkpoints/mlp-checkpoint.ipynb
cc0-1.0
# import necessary packages import numpy as np import matplotlib.pyplot as plt from sklearn import datasets #ensure the same random numbers appear every time np.random.seed(0) # display images in notebook %matplotlib inline plt.rcParams['figure.figsize'] = (10,10) # download MNIST dataset digits = datasets.load_digits() # define inputs and labels inputs = digits.images labels = digits.target print("inputs = (n_inputs, pixel_width, pixel_height) = " + str(inputs.shape)) print("labels = (n_inputs) = " + str(labels.shape)) # flatten the image # the value -1 means dimension is inferred from the remaining dimensions: 8x8 = 64 n_inputs = len(inputs) inputs = inputs.reshape(n_inputs, -1) print("X = (n_inputs, n_features) = " + str(inputs.shape)) # choose some random images to display indices = np.arange(n_inputs) random_indices = np.random.choice(indices, size=5) for i, image in enumerate(digits.images[random_indices]): plt.subplot(1, 5, i+1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title("Label: %d" % digits.target[random_indices[i]]) plt.show() """ Explanation: Building neural networks in numpy and scikit-learn Neural networks Artificial neural networks are computational systems that can learn to perform tasks by considering examples, generally without being programmed with any task-specific rules. It is supposed to mimic a biological system, wherein neurons interact by sending signals in the form of mathematical functions between layers. All layers can contain an arbitrary number of neurons, and each connection is represented by a weight variable. In this tutorial we will build a feed-forward neural network, where information moves in only in direction: forward through the layers. Each neuron or node is represented by a circle, while arrows display the connections between the nodes and indicate the direction of information flow. Each node in a layer is connected to all nodes in the subsequent layer, which makes this a so-called fully-connected feed-forward neural network. Via Wikipedia Prerequisites To follow this tutorial we require an installation of Python with the numerical package numpy, either: 1) Python 2.7.x 2) Python 3.5.x or greater With a version of Numpy 1.0.x or greater. We will also use the packages matplotlib, scikit-learn, Tensorflow and Keras, though these are not strictly necessary. To open and run this notebook you also need an installation of IPython and Jupyter Notebook. Anaconda Anaconda is a free and open source Python and R distribution, that aims to simplify package management and deployment. Anaconda comes with more than 1000 data packages, as well as the Conda package and package and virtual environment manager. Anaconda is available on Linux, OS X and Windows systems, and contains nearly all prerequisite software, it comes highly recommended. If Anaconda is installed you can install Tensorflow and Keras using: conda install tensorflow conda install keras (You may run into minor problems with conflicting package versions). Pip package manager If you do not wish to install Anaconda you may download Python from here, or you can use package managers like brew, apt, pacman,... Python distributions come with their own package manager, pip, and once you have Python installed you can run the following command: pip install numpy matplotlib scikit-learn ipython jupyter To install Tensorflow follow the instructions here. After you have installed tensorflow you can install keras: pip install keras Via xkcd Workflow One can identify a set of key steps when using neural networks to solve supervised learning problems: 1) Collect and pre-process data 2) Define model and architecture 3) Choose cost function and optimizer 4) Train the model 5) Evaluate model performance on test data 6) Adjust hyperparameters (if necessary, network architecture) 1) Collect and pre-process data In this tutorial we will be using the MNIST dataset, which is readily available through the scikit-learn package. You may also find it for example here. The MNIST (Modified National Institute of Standards and Technology) database is a large database of handwritten digits that is commonly used for training various image processing systems. The MNIST dataset consists of 70 000 images of size 28x28 pixels, each labeled from 0 to 9. To feed data into a feed-forward neural network we need to represent the inputs as a feature matrix $X = [n_{inputs}, n_{features}]$. Each row represents an input, in this case a handwritten digit, and each column represents a feature, in this case a pixel. The correct answers, also known as labels or targets are represented as a 1D array of integers $Y = [n_{inputs}] = [5, 3, 1, 8,...]$. Say I wanted to build a neural network using supervised learning to predict Body-Mass Index (BMI) from measurements of height (in m) and weight (in kg). If I had measurements of 5 people the feature matrix could be for example: $$ X = \begin{bmatrix} 1.85 & 81\ 1.71 & 65\ 1.95 & 103\ 1.55 & 42\ 1.63 & 56 \end{bmatrix} ,$$ and the targets would be: $$ Y = (23.7, 22.2, 27.1, 17.5, 21.1) $$ Since each input image is a 2D matrix, we need to flatten the image (i.e. "unravel" the 2D matrix into a 1D array) to turn the data into a feature matrix. This means we lose all spatial information in the image, such as locality and translational invariance (explanation). More complicated architectures such as Convolutional Neural Networks can take advantage of such information, and are most commonly applied when analyzing images. End of explanation """ from sklearn.model_selection import train_test_split # one-liner from scikit-learn library train_size = 0.8 test_size = 1 - train_size X_train, X_test, Y_train, Y_test = train_test_split(inputs, labels, train_size=train_size, test_size=test_size) # equivalently in numpy def train_test_split_numpy(inputs, labels, train_size, test_size): n_inputs = len(inputs) inputs_shuffled = inputs.copy() labels_shuffled = labels.copy() np.random.shuffle(inputs_shuffled) np.random.shuffle(labels_shuffled) train_end = int(n_inputs*train_size) X_train, X_test = inputs_shuffled[:train_end], inputs_shuffled[train_end:] Y_train, Y_test = labels_shuffled[:train_end], labels_shuffled[train_end:] return X_train, X_test, Y_train, Y_test #X_train, X_test, Y_train, Y_test = train_test_split_numpy(inputs, labels, train_size, test_size) """ Explanation: Train and test datasets Performing analysis before partitioning the dataset is a major error, that can lead to incorrect conclusions (see "Bias-Variance Tradeoff", for example here). We will reserve $80 \%$ of our dataset for training and $20 \%$ for testing. It is important that the train and test datasets are drawn randomly from our dataset, to ensure no bias in the sampling. Say you are taking measurements of weather data to predict the weather in the coming 5 days. You don't want to train your model on measurements taken from the hours 00.00 to 12.00, and then test it on data collected from 12.00 to 24.00. End of explanation """ # building our neural network n_inputs, n_features = X_train.shape n_hidden_neurons = 50 n_categories = 10 # we make the weights normally distributed using numpy.random.randn # weights and bias in the hidden layer hidden_weights = np.random.randn(n_features, n_hidden_neurons) hidden_bias = np.zeros(n_hidden_neurons) + 0.01 # weights and bias in the output layer output_weights = np.random.randn(n_hidden_neurons, n_categories) output_bias = np.zeros(n_categories) + 0.01 """ Explanation: 2) Define model and architecture Our simple feed-forward neural network will consist of an input layer, a single hidden layer and an output layer. The activation $y$ of each neuron is a weighted sum of inputs, passed through an activation function: $$ z = \sum_{i=1}^n w_i a_i ,$$ $$ y = f(z) ,$$ where $f$ is the activation function, $a_i$ represents input from neuron $i$ in the preceding layer and $w_i$ is the weight to neuron $i$. The activation of the neurons in the input layer is just the features (e.g. a pixel value). The simplest activation function for a binary classifier (e.g. two classes, 0 or 1, cat or not cat) is the Heaviside function: $$ f(z) = \begin{cases} 1, & z > 0\ 0, & \text{otherwise} \end{cases} $$ A feed-forward neural network with this activation is known as a perceptron. This activation can be generalized to $k$ classes (using e.g. the one-against-all strategy), and we call these architectures multiclass perceptrons. However, it is now common to use the terms Single Layer Perceptron (SLP) (1 hidden layer) and Multilayer Perceptron (MLP) (2 or more hidden layers) to refer to feed-forward neural networks with any activation function. Typical choices for activation functions include the sigmoid function, hyperbolic tangent, and Rectified Linear Unit (ReLU). We will be using the sigmoid function $\sigma(x)$: $$ f(x) = \sigma(x) = \frac{1}{1 + e^{-x}} ,$$ which is inspired by probability theory (see logistic regression) and was most commonly used until about 2011. Layers Input: Since each input image has 8x8 = 64 pixels or features, we have an input layer of 64 neurons. Hidden layer: We will use 50 neurons in the hidden layer receiving input from the neurons in the input layer. Since each neuron in the hidden layer is connected to the 64 inputs we have 64x50 = 3200 weights to the hidden layer. Output: If we were building a binary classifier, it would be sufficient with a single neuron in the output layer, which could output 0 or 1 according to the Heaviside function. This would be an example of a hard classifier, meaning it outputs the class of the input directly. However, if we are dealing with noisy data it is often beneficial to use a soft classifier, which outputs the probability of being in class 0 or 1. For a soft binary classifier, we could use a single neuron and interpret the output as either being the probability of being in class 0 or the probability of being in class 1. Alternatively we could use 2 neurons, and interpret each neuron as the probability of being in each class. Since we are doing multiclass classification, with 10 categories, it is natural to use 10 neurons in the output layer. We number the neurons $j = 0,1,...,9$. The activation of each output neuron $j$ will be according to the softmax function: $$ P(\text{class $j$} \mid \text{input $\boldsymbol{a}$}) = \frac{e^{\boldsymbol{a}^T \boldsymbol{w}j}} {\sum{k=0}^{9} e^{\boldsymbol{a}^T \boldsymbol{w}_k}} ,$$ i.e. each neuron $j$ outputs the probability of being in class $j$ given an input from the hidden layer $\boldsymbol{a}$, with $\boldsymbol{w}_j$ the weights of neuron $j$ to the inputs. The denominator is a normalization factor to ensure the outputs sum up to 1. The exponent is just the weighted sum of inputs as before: $$ z_j = \sum_{i=1}^n w_ {ij} a_i = \boldsymbol{a}^T \boldsymbol{w}_j .$$ Since each neuron in the output layer is connected to the 50 inputs from the hidden layer we have 50x10 = 500 weights to the output layer. Weights and biases Typically weights are initialized with small values distributed around zero, drawn from a uniform or normal distribution. Setting all weights to zero means all neurons give the same output, making the network useless. Adding a bias value to the weighted sum of inputs allows the neural network to represent a greater range of values. Without it, any input with the value 0 will be mapped to zero (before being passed through the activation). The bias unit has an output of 1, and a weight to each neuron $j$, $b_j$: $$ z_j = \sum_{i=1}^n w_ {ij} a_i + 1\cdot b_j = \boldsymbol{a}^T \boldsymbol{w}_j + b_j .$$ The bias weights $\boldsymbol{b}$ are often initialized to zero, but a small value like $0.01$ ensures all neurons have some output which can be backpropagated in the first training cycle. Via Stanford UFLDL End of explanation """ # setup the feed-forward pass def sigmoid(x): return 1/(1 + np.exp(-x)) def feed_forward(X): # weighted sum of inputs to the hidden layer z1 = np.matmul(X, hidden_weights) + hidden_bias # activation in the hidden layer a1 = sigmoid(z1) # weighted sum of inputs to the output layer z2 = np.matmul(a1, output_weights) + output_bias # softmax output # axis 0 holds each input and axis 1 the probabilities of each category exp_term = np.exp(z2) probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True) return probabilities probabilities = feed_forward(X_train) print("probabilities = (n_inputs, n_categories) = " + str(probabilities.shape)) print("probability that image 0 is in category 0,1,2,...,9 = \n" + str(probabilities[0])) print("probabilities sum up to: " + str(probabilities[0].sum())) print() # we obtain a prediction by taking the class with the highest likelihood def predict(X): probabilities = feed_forward(X) return np.argmax(probabilities, axis=1) predictions = predict(X_train) print("predictions = (n_inputs) = " + str(predictions.shape)) print("prediction for image 0: " + str(predictions[0])) print("correct label for image 0: " + str(Y_train[0])) """ Explanation: Feed-forward pass For each input image we calculate a weighted sum of input features (pixel values) to each neuron $j$ in the hidden layer: $$ z_{j}^{hidden} = \sum_{i=1}^{n_{features}} w_{ij}^{hidden} x_i + b_{j}^{hidden} = \boldsymbol{x}^T \boldsymbol{w}{j}^{hidden} + b{j}^{hidden} ,$$ this is then passed through our activation function $$ a_{j}^{hidden} = f(z_{j}^{hidden}) .$$ We calculate a weighted sum of inputs (activations in the hidden layer) to each neuron $j$ in the output layer: $$ z_{j}^{output} = \sum_{i=1}^{n_{hidden}} w_{ij}^{output} a_{i}^{hidden} + b_{j}^{output} = (\boldsymbol{a}^{hidden})^T \boldsymbol{w}{j}^{output} + b{j}^{output} .$$ Finally we calculate the output of neuron $j$ in the output layer using the softmax function: $$ a_{j}^{output} = \frac{\exp{(z_j^{output})}} {\sum_{k=1}^{n_{categories}} \exp{(z_k^{output})}} .$$ Matrix multiplication Since our data has the dimensions $X = (n_{inputs}, n_{features})$ and our weights to the hidden layer have the dimensions $W_{hidden} = (n_{features}, n_{hidden})$, we can easily feed the network all our training data in one go by taking the matrix product $$ X W^{hidden} = (n_{inputs}, n_{hidden}),$$ and obtain a matrix that holds the weighted sum of inputs to the hidden layer for each input image. We also add the bias to obtain a matrix of weighted sums $Z^{hidden}$: $$ Z^{hidden} = X W^{hidden} + B^{hidden} ,$$ meaning the same bias (1D array) is added to each input image. This is then passed through the activation $$ A^{hidden} = f(X W^{hidden} + B^{hidden}) .$$ This is fed to the output layer: $$ Z^{output} = A^{hidden} W^{output} + B^{output} .$$ Finally we receive our output values for each image and each category by passing it through the softmax function: $$ output = softmax (Z^{output}) = (n_{inputs}, n_{categories}) .$$ End of explanation """ # to categorical turns our integer vector into a onehot representation from keras.utils import to_categorical # calculate the accuracy score of our model from sklearn.metrics import accuracy_score Y_train_onehot, Y_test_onehot = to_categorical(Y_train), to_categorical(Y_test) # equivalently in numpy def to_categorical_numpy(integer_vector): n_inputs = len(integer_vector) n_categories = np.max(integer_vector) + 1 onehot_vector = np.zeros((n_inputs, n_categories)) onehot_vector[range(n_inputs), integer_vector] = 1 return onehot_vector #Y_train_onehot, Y_test_onehot = to_categorical_numpy(Y_train), to_categorical_numpy(Y_test) def feed_forward_train(X): # weighted sum of inputs to the hidden layer z1 = np.matmul(X, hidden_weights) + hidden_bias # activation in the hidden layer a1 = sigmoid(z1) # weighted sum of inputs to the output layer z2 = np.matmul(a1, output_weights) + output_bias # softmax output # axis 0 holds each input and axis 1 the probabilities of each category exp_term = np.exp(z2) probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True) return a1, probabilities def backpropagation(X, Y): a1, probabilities = feed_forward_train(X) # error in the output layer error_output = probabilities - Y # error in the hidden layer error_hidden = np.matmul(error_output, output_weights.T) * a1 * (1 - a1) # gradients for the output layer output_weights_gradient = np.matmul(a1.T, error_output) output_bias_gradient = np.sum(error_output, axis=0) # gradient for the hidden layer hidden_weights_gradient = np.matmul(X.T, error_hidden) hidden_bias_gradient = np.sum(error_hidden, axis=0) return output_weights_gradient, output_bias_gradient, hidden_weights_gradient, hidden_bias_gradient print("Old accuracy on training data: " + str(accuracy_score(predict(X_train), Y_train))) eta = 0.01 lmbd = 0.01 for i in range(1000): dWo, dBo, dWh, dBh = backpropagation(X_train, Y_train_onehot) dWo += lmbd * output_weights dWh += lmbd * hidden_weights output_weights -= eta * dWo output_bias -= eta * dBo hidden_weights -= eta * dWh hidden_bias -= eta * dBh print("New accuracy on training data: " + str(accuracy_score(predict(X_train), Y_train))) """ Explanation: 3) Choose cost function and optimizer (needs more work) To measure how well our neural network is doing we need to introduce a cost function. We will call the function that gives the error of a single sample output the loss function, and the function that gives the total error of our network across all samples the cost function. A typical choice for multiclass classification is the cross-entropy loss, also known as the negative log likelihood. In multiclass classification it is common to treat each integer label as a so called one-hot vector: $$ y = 5 \quad \rightarrow \quad \boldsymbol{y} = (0, 0, 0, 0, 0, 1, 0, 0, 0, 0) ,$$ $$ y = 1 \quad \rightarrow \quad \boldsymbol{y} = (0, 1, 0, 0, 0, 0, 0, 0, 0, 0) ,$$ i.e. a binary bit string of length $K$, where $K = 10$ is the number of classes. If $\boldsymbol{x}i$ is the $i$-th input (image), $y{ik}$ refers to the $k$-th component of the $i$-th output vector $\boldsymbol{y}_i$. The probability of $\boldsymbol{x}_i$ being in class $k$ is given by the softmax function: $$ P(y_{ik} = 1 \mid \boldsymbol{x}i, \boldsymbol{\theta}) = \frac{e^{(\boldsymbol{a}_i^{hidden})^T \boldsymbol{w}_k}} {\sum{k'=0}^{K-1} e^{(\boldsymbol{a}i^{hidden})^T \boldsymbol{w}{k'}}} ,$$ where $\boldsymbol{a}i^{hidden}$ is the activation in the hidden layer from input $\boldsymbol{x}_i$. The vector $\boldsymbol{\theta}$ represents the weights and biases of our network. The probability of not being in class $k$ is just $1 - P(y{ik} = 1 \mid \boldsymbol{x}_i)$. For Maximum Likelihood Estimation (MLE) we choose the label with the largest probability. Denote the output label $\hat{y}$ and the correct label $y$, for example $\hat{y} = 5$ and $y = 8$. The likelihood that input $\boldsymbol{x}$ gives an output $\hat{y} = k'$ is then $$ P(\hat{y} = k' \mid \boldsymbol{x}, \boldsymbol{\theta}) = \prod_{k=0}^{K-1} [P(y_{k} = 1 \mid \boldsymbol{x}, \boldsymbol{\theta})]^{y_{k}} \times [1 - P(y_{k} = 1 \mid \boldsymbol{x}, \boldsymbol{\theta})]^{1-y_{k}} ,$$ where $y_k$ is the $k$-th component of the one-hot vector of (correct) labels. A perfect classifier should give a $100 \%$ probability of the correct label, so the product should just be 1 if $y = k$ and 0 otherwise. If the network is not a perfect classifier, the likelihood should be a number between 0 and 1. If we take the log of this we can turn the product into a sum, which is often simpler to compute: $$ \log P(\hat{y} = k' \mid \boldsymbol{x}, \boldsymbol{\theta}) = \sum_{k=0}^{K-1} y_{k} \log P(y_{k} = 1 \mid \boldsymbol{x}, \boldsymbol{\theta}) + (1-y_{k})\log (1 - P(y_{k} = 1 \mid \boldsymbol{x}, \boldsymbol{\theta}))$$ For a perfect classifier this should just be $\log 1 = 0$. Otherwise we get a negative number. Since it is easier to think in terms of minimizing a positive number, we take our loss function to be the negative log-likelihood: $$ \mathcal{L}(\boldsymbol{\theta}) = - \log P(\hat{y} = k' \mid \boldsymbol{x}, \boldsymbol{\theta}) $$ We then take the average of the loss function over all input samples to define the cost function: $$ \begin{split} \mathcal{C}(\boldsymbol{\theta}) &= \frac{1}{N} \sum_{i=1}^N \mathcal{L}(\boldsymbol{w}) \ &= -\frac{1}{N}\sum_{i=1}^N \sum_{k=0}^{K-1} y_{k} \log P(y_{k} = 1 \mid \boldsymbol{x}, \boldsymbol{\theta}) + (1-y_{k})\log (1 - P(y_{k} = 1 \mid \boldsymbol{x}, \boldsymbol{\theta})) \end{split} .$$ Optimizing the cost function The network is trained by finding the weights and biases that minimize the cost function. One of the most widely used classes of methods is gradient descent and its generalizations. The idea behind gradient descent is simply to adjust the weights in the direction where the gradient of the cost function is large and negative. This ensures we flow toward a local minimum of the cost function. Each parameter $\theta$ is iteratively adjusted according to the rule $$ \theta_{i+1} = \theta_i - \eta \nabla \mathcal{C}(\theta) ,$$ where $\eta$ is known as the learning rate, which controls how big a step we take towards the minimum. This update can be repeated for any number of iterations, or until we are satisfied with the result. A simple and effective improvement is a variant called Stochastic Gradient Descent (SGD). Instead of calculating the gradient on the whole dataset, we calculate an approximation of the gradient on a subset of the data called a minibatch. If there are $N$ data points and we have a minibatch size of $M$, the total number of batches is $n/M$. We denote each minibatch $B_k$, with $k = 1, 2,...,n/M$. The gradient then becomes: $$ \nabla \mathcal{C}(\theta) = \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}(\theta) \quad \rightarrow \quad \frac{1}{M} \sum_{i \in B_k} \nabla \mathcal{L}(\theta) ,$$ i.e. instead of averaging the loss over the entire dataset, we average over a minibatch. This has two important benefits: 1) Introducing stochasticity decreases the chance that the algorithm becomes stuck in a local minima. 2) It significantly speeds up the calculation, since we do not have to use the entire dataset to calculate the gradient. Regularization It is common to add an extra term to the cost function, proportional to the size of the weights. This is equivalent to constraining the size of the weights, so that they do not grow out of control. Constraining the size of the weights means that the weights cannot grow arbitrarily large to fit the training data, and in this way reduces overfitting. We will measure the size of the weights using the so called L2-norm, meaning our cost function becomes: $$ \nabla \mathcal{C}(\theta) = \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}(\theta) \quad \rightarrow \quad \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}(\theta) + \lambda \lvert \lvert \boldsymbol{w}2^2 \rvert \rvert = \frac{1}{N} \sum{i=1}^N \nabla \mathcal{L}(\theta) + \lambda \sum_{ij} w_{ij}^2,$$ i.e. we sum up all the weights squared. The factor $\lambda$ is known as a regularization parameter. 4) Train the model In order to train the model, we need to calculate the derivative of the cost function with respect to every bias and weight in the network. Using an approximation to the derivative (e.g. using the finite difference method) is much too costly. In total our network has $(64 + 1) \times 50 = 3250$ weights in the hidden layer and $(50 + 1) \times 10 = 510$ weights to the output layer ($ + 1$ for the bias), and the gradient must be calculated for every parameter. The backpropagation algorithm is a clever use of the chain rule that allows us to calculate gradient efficently. Here we will simply state the backpropagation equations that we will use for our network, and then a derivation is given at the end of this tutorial. The error $\delta_i^o$ at each output neuron $i$ is just the difference between the output probability $\hat{y}_i$ and the correct label $y_i$ (0 or 1 using one-hot vectors): $$ \delta_i^o = \hat{y}_i - y_i .$$ The gradient of the cost function with respect to each output weight $w_{i,j}^o$ is then $$ \frac{\partial \mathcal{C}}{\partial w_{i,j}^o} = \delta_i^o a_j^h ,$$ where $a_j^h$ is the activation at the $j$-th neuron in the hidden layer. The gradient with respect to each output bias $b_i^o$ is $$ \frac{\partial \mathcal{C}}{\partial b_i^o} = \delta_i^o .$$ The error at each hidden layer neuron $\delta_i^h$ is given as $$ \delta_i^h = \sum_{k=0}^{K-1} \delta_k^o w_{ki}^o f'(z_i^h) ,$$ where $K$ is the number of output neurons or categories and $f'(z_i^h)$ is the derivative of the activation function: $$ f'(z_i^h) = \sigma '(z_i^h) = \sigma(z_i^h)(1 - \sigma(z_i^h)) = a_i^h (1 - a_i^h) ,$$ since our activation function is the sigmoid/logistic function. The gradient with respect to each hidden layer weight is: $$ \frac{\partial \mathcal{C}}{\partial w_{i,j}^h} = \delta_i^h x_j ,$$ and the gradient with respect to the hidden bias $$ \frac{\partial \mathcal{C}}{\partial b_i^h} = \delta_i^h .$$ The regularization terms using the L2-norm are just $$ \frac{\partial }{\partial w_{ij}} (\lambda \sum_{ij} w_{ij}^2) = 2 \lambda w_{ij} = \hat{\lambda} w_{ij} ,$$ for the weights in both the output and hidden layers. Matrix multiplication Text. End of explanation """ class NeuralNetwork: def __init__( self, X_data, Y_data, n_hidden_neurons=50, n_categories=10, epochs=10, batch_size=100, eta=0.1, lmbd=0.0, ): self.X_data_full = X_data self.Y_data_full = Y_data self.n_inputs = X_data.shape[0] self.n_features = X_data.shape[1] self.n_hidden_neurons = n_hidden_neurons self.n_categories = n_categories self.epochs = epochs self.batch_size = batch_size self.iterations = self.n_inputs // self.batch_size self.eta = eta self.lmbd = lmbd self.create_biases_and_weights() def create_biases_and_weights(self): self.hidden_weights = np.random.randn(self.n_features, self.n_hidden_neurons) self.hidden_bias = np.zeros(self.n_hidden_neurons) + 0.01 self.output_weights = np.random.randn(self.n_hidden_neurons, self.n_categories) self.output_bias = np.zeros(self.n_categories) + 0.01 def feed_forward(self): self.z1 = np.matmul(self.X_data, self.hidden_weights) + self.hidden_bias self.a1 = sigmoid(self.z1) self.z2 = np.matmul(self.a1, self.output_weights) + self.output_bias exp_term = np.exp(self.z2) self.probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True) def feed_forward_out(self, X): z1 = np.matmul(X, self.hidden_weights) + self.hidden_bias a1 = sigmoid(z1) z2 = np.matmul(a1, self.output_weights) + self.output_bias exp_term = np.exp(z2) probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True) return probabilities def backpropagation(self): error_output = self.probabilities - self.Y_data error_hidden = np.matmul(error_output, self.output_weights.T) * self.a1 * (1 - self.a1) self.output_weights_gradient = np.matmul(self.a1.T, error_output) self.output_bias_gradient = np.sum(error_output, axis=0) self.hidden_weights_gradient = np.matmul(self.X_data.T, error_hidden) self.hidden_bias_gradient = np.sum(error_hidden, axis=0) if self.lmbd > 0.0: self.output_weights_gradient += self.lmbd * self.output_weights self.hidden_weights_gradient += self.lmbd * self.hidden_weights self.output_weights -= self.eta * self.output_weights_gradient self.output_bias -= self.eta * self.output_bias_gradient self.hidden_weights -= self.eta * self.hidden_weights_gradient self.hidden_bias -= self.eta * self.hidden_bias_gradient def predict(self, X): probabilities = self.feed_forward_out(X) return np.argmax(probabilities, axis=1) def predict_probabilities(self, X): probabilities = self.feed_forward_out(X) return probabilities def train(self): data_indices = np.arange(self.n_inputs) for i in range(self.epochs): for j in range(self.iterations): chosen_datapoints = np.random.choice( data_indices, size=self.batch_size, replace=False ) self.X_data = self.X_data_full[chosen_datapoints] self.Y_data = self.Y_data_full[chosen_datapoints] self.feed_forward() self.backpropagation() """ Explanation: Full object-oriented implementation End of explanation """ eta = 0.1 lmbd = 0.1 epochs = 10 batch_size = 100 dnn = NeuralNetwork(X_train, Y_train_onehot, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size, n_hidden_neurons=n_hidden_neurons, n_categories=n_categories) dnn.train() test_predict = dnn.predict(X_test) print("Accuracy score on test set: ", accuracy_score(Y_test, test_predict)) """ Explanation: 5) Evaluate model performance on test data End of explanation """ eta_vals = np.logspace(-5, 0, 6) lmbd_vals = np.logspace(-5, 0, 6) DNN_numpy = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object) for i, eta in enumerate(eta_vals): for j, lmbd in enumerate(lmbd_vals): dnn = NeuralNetwork(X_train, Y_train_onehot, eta=eta, lmbd=lmbd, epochs=epochs, batch_size=batch_size, n_hidden_neurons=n_hidden_neurons, n_categories=n_categories) dnn.train() DNN_numpy[i][j] = dnn test_predict = dnn.predict(X_test) print("Learning rate = ", eta) print("Lambda = ", lmbd) print("Accuracy score on test set: ", accuracy_score(Y_test, test_predict)) print() """ Explanation: 6) Adjust hyperparameters (if necessary, network architecture) End of explanation """ from sklearn.neural_network import MLPClassifier DNN_scikit = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object) for i, eta in enumerate(eta_vals): for j, lmbd in enumerate(lmbd_vals): dnn = MLPClassifier(hidden_layer_sizes=(n_hidden_neurons), activation='logistic', alpha=lmbd, learning_rate_init=eta, max_iter=100) dnn.fit(X_train, Y_train) DNN_scikit[i][j] = dnn print("Learning rate = ", eta) print("Lambda = ", lmbd) print("Accuracy score on test set: ", dnn.score(X_test, Y_test)) print() """ Explanation: scikit-learn implementation End of explanation """
melissawm/oceanobiopython
exemplos/exemplo_5/CTD_Data.ipynb
gpl-3.0
import pandas as pd """ Explanation: Exemplo: manipulação de NaNs e limpeza de dados Neste exemplo, usaremos um arquivo com muitos dados ausentes (representados por NaNs) para explorar o conceito de filtros. Além disso, vamos fazer um gráfico simples para representar os dados. Para isso, vamos usar duas bibliotecas importantes: Pandas (que já mencionamos) e matplotlib. End of explanation """ dados = pd.read_csv('data_from_odv_data_carbon-sse_after_correction_spikes_v3_O2_corr.txt', sep = '\t', lineterminator='\n') dados """ Explanation: Primeiramente, vamos ler o arquivo usando tabs como separadores. End of explanation """ dados.Station """ Explanation: Agora, vamos tentar identificar de quantas estações temos dados disponíveis. End of explanation """ dados.Station.isnull() """ Explanation: Agora, queremos identificar quantas estações temos; para isso, vamos identificar todas as linhas em que a coluna Station tem valores válidos. Podemos fazer isso de duas maneiras: primeiro, vamos identificar todas as linhas em que a coluna Station contém NaNs: End of explanation """ dados.Station.notnull() """ Explanation: Agora, vamos fazer o oposto: verificar quais linhas da coluna Station não tem NaNs: End of explanation """ dados[dados.Station.notnull()] """ Explanation: Agora, podemos fazer um filtro: como o resultado da operação acima é uma coluna com valores Verdadeiro (True) ou Falso (False), podemos selecionar entre os dados apenas aquelas linhas para as quais a operação acima resultou em True. Isso é uma forma de slicing, mas como envolve uma operação lógica (algo que retorna verdadeiro ou falso) chamamos isso de filtro: End of explanation """ estacoes = dados[dados.Station.notnull()] """ Explanation: Desta forma, o resultado acima nos retorna todas as linhas da planilha dados que contém informações sobre estações; de fato, cada linha acima corresponde a uma estação. End of explanation """ estacoes.index """ Explanation: Podemos investigar os índices desta nova tabela estacoes (a coluna mais à esquerda mostrada acima, que não faz parte da tabela, mas é um índice criado pelo Pandas para acessar os elementos da tabela) End of explanation """ estacoes.index[4] estacoes.index[1] """ Explanation: Observe que os índices da tabela estacoes ainda se referem aos índices das linhas correspondentes na tabela original! Isso ocorre pois extraímos a tabela estacoes da tabela dados. End of explanation """ dados.loc[estacoes.index[0]+1:estacoes.index[1]-1,:] """ Explanation: Agora, vamos analisar o que acontece apenas na primeira estação; para isso, vamos selecionar da tabela original apenas as linhas que estão entre as informações da estação 1 e as informações da estação 2 (lembrando que os índices do Python iniciam-se no zero). Usando o método loc: End of explanation """ estacao1 = dados.loc[estacoes.index[0]+1:estacoes.index[1]-1,:] """ Explanation: Salvamos estes dados na nova tabela estacao1: End of explanation """ estacao1.columns estacao1['Oxygen [ml l]'] estacao1['Oxygen [ml l]'].mean() """ Explanation: Agora, queremos extrair dos dados desta estação a média das medições de oxigênio na água. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.plot(estacao1['Oxygen [ml l]']) """ Explanation: Agora, vamos fazer um gráfico das medições, marcando a média calculada acima no gráfico. End of explanation """ list(estacao1.index.values) """ Explanation: Observe que é possível obtermos uma lista dos índices de uma tabela do Pandas usando o método tabela.index.values. No nosso caso, para nossa tabela estacao1, temos: End of explanation """ plt.plot(list(estacao1.index.values),estacao1['Oxygen [ml l]']) """ Explanation: Assim, é possível obtermos o mesmo gráfico que o visto acima, com o seguinte comando: End of explanation """ plt.plot(estacao1['Oxygen [ml l]']) plt.axhline(y=estacao1['Oxygen [ml l]'].mean(), linestyle = "dashed", color = "r") plt.title("Oxygen [ml l] for Station 1") """ Explanation: Finalmente, vamos plotar nosso gráfico incluindo uma linha horizontal denotando a média dos valores. Calculamos a média usando o método mean do Pandas. End of explanation """
BinRoot/TensorFlow-Book
ch02_basics/Concept08_TensorBoard.ipynb
mit
import tensorflow as tf import numpy as np raw_data = np.random.normal(10, 1, 100) """ Explanation: Ch 02: Concept 08 Using TensorBoard TensorBoard is a great way to visualize what's happening behind the code. In this example, we'll loop through some numbers to improve our guess of the average value. Then we can visualize the results on TensorBoard. Let's just set ourselves up with some data to work with: End of explanation """ alpha = tf.constant(0.05) curr_value = tf.placeholder(tf.float32) prev_avg = tf.Variable(0.) update_avg = alpha * curr_value + (1 - alpha) * prev_avg """ Explanation: The moving average is defined as follows: End of explanation """ avg_hist = tf.summary.scalar("running_average", update_avg) value_hist = tf.summary.scalar("incoming_values", curr_value) merged = tf.summary.merge_all() writer = tf.summary.FileWriter("./logs") """ Explanation: Here's what we care to visualize: End of explanation """ init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for i in range(len(raw_data)): summary_str, curr_avg = sess.run([merged, update_avg], feed_dict={curr_value: raw_data[i]}) sess.run(tf.assign(prev_avg, curr_avg)) print(raw_data[i], curr_avg) writer.add_summary(summary_str, i) """ Explanation: Time to compute the moving averages. We'll also run the merged op to track how the values change: End of explanation """ #made the logs be written successfully writer.close() """ Explanation: Check out the visualization by running TensorBoard from the terminal: $ tensorboard --logdir=path/to/logs End of explanation """