markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Design 2A
sid='../Data/Design2a/design2a_11k_test5/' rp_sessionDes2 = ra.Session(stype='radical.pilot',src=sid) unitsDes2 = rp_sessionDes2.filter(etype='unit', inplace=False) execUnits = unitsDes2.filter(uid=['unit.000002','unit.000003','unit.000004','unit.000001'],inplace=False) exec_units_setup_des2 = execUnits.duration(event=[{ru.EVENT: 'exec_start'},{ru.EVENT: 'exec_stop'}]) exec_units_agent_des2 = execUnits.duration([rp.AGENT_STAGING_INPUT, rp.UMGR_STAGING_OUTPUT_PENDING]) exec_units_clientDes2 = execUnits.duration([rp.NEW, rp.DONE]) SetupUnit = unitsDes2.filter(uid=['unit.000000'],inplace=False) setup_units_clientDes2 = SetupUnit.duration(event=[{ru.STATE: rp.NEW},{ru.EVENT: 'exec_start'}]) pilotDes2 = rp_sessionDes2.filter(etype='pilot', inplace=False) Node1 = pd.DataFrame(columns=['Start','End','Type']) Node1Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000000/geolocate1.csv') for index,row in Node1Tilling.iterrows(): Node1.loc[len(Node1)] = [row['Start'],row['End'],'Geo1'] Node1Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000000/geolocate2.csv') for index,row in Node1Tilling.iterrows(): Node1.loc[len(Node1)] = [row['Start'],row['End'],'Geo2'] Node1Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000000/ransac1.csv') for index,row in Node1Tilling.iterrows(): Node1.loc[len(Node1)] = [row['Start'],row['End'],'Ransac1'] Node2 = pd.DataFrame(columns=['Start','End','Type']) Node2Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000001/geolocate3.csv') for index,row in Node2Tilling.iterrows(): Node2.loc[len(Node2)] = [row['Start'],row['End'],'Geo3'] Node2Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000001/geolocate4.csv') for index,row in Node2Tilling.iterrows(): Node2.loc[len(Node2)] = [row['Start'],row['End'],'Geo4'] Node2Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000001/ransac2.csv') for index,row in Node2Tilling.iterrows(): Node2.loc[len(Node2)] = [row['Start'],row['End'],'Ransac2'] Node3 = pd.DataFrame(columns=['Start','End','Type']) Node3Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000002/geolocate5.csv') for index,row in Node3Tilling.iterrows(): Node3.loc[len(Node3)] = [row['Start'],row['End'],'Geo5'] Node3Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000002/geolocate6.csv') for index,row in Node3Tilling.iterrows(): Node3.loc[len(Node3)] = [row['Start'],row['End'],'Geo6'] Node3Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000002/ransac3.csv') for index,row in Node3Tilling.iterrows(): Node3.loc[len(Node3)] = [row['Start'],row['End'],'Ransac3'] Node4 = pd.DataFrame(columns=['Start','End','Type']) Node4Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000003/geolocate7.csv') for index,row in Node4Tilling.iterrows(): Node4.loc[len(Node4)] = [row['Start'],row['End'],'Geo7'] Node4Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000003/geolocate8.csv') for index,row in Node4Tilling.iterrows(): Node4.loc[len(Node4)] = [row['Start'],row['End'],'Geo8'] Node4Tilling = pd.read_csv('../Data/Design2a/design2a_11k_test5/pilot.0000/unit.000003/ransac4.csv') for index,row in Node4Tilling.iterrows(): Node4.loc[len(Node4)] = [row['Start'],row['End'],'Ransac4'] des2ADF = pd.DataFrame(columns=['TTX','SetupOverhead','AgentOverhead','ClientOverhead']) AllNodes = pd.DataFrame(columns=['Start','End','Type']) AllNodes = AllNodes.append(Node1) AllNodes = AllNodes.append(Node2) AllNodes = AllNodes.append(Node3) AllNodes = AllNodes.append(Node4) AllNodes.reset_index(inplace=True,drop='index') rp_sessionDes2 = ra.Session(stype='radical.pilot',src=sid) unitsDes2 = rp_sessionDes2.filter(etype='unit', inplace=False) execUnits = unitsDes2.filter(uid=['unit.000000','unit.000001','unit.000002','unit.000003'],inplace=False) exec_units_setup_des2 = unitsDes2.duration(event=[{ru.EVENT: 'exec_start'},{ru.EVENT: 'exec_stop'}]) exec_units_agent_des2 = unitsDes2.duration([rp.AGENT_STAGING_INPUT, rp.UMGR_STAGING_OUTPUT_PENDING]) exec_units_clientDes2 = execUnits.duration([rp.NEW, rp.DONE]) pilotDes2 = rp_sessionDes2.filter(etype='pilot', inplace=False) pilot_duration = pilotDes2.duration([rp.PMGR_ACTIVE,rp.FINAL]) des2_duration = AllNodes['End'].max() - AllNodes['Start'].min() setupDes2_overhead = exec_units_setup_des2 - des2_duration agentDes2_overhead = exec_units_agent_des2 - exec_units_setup_des2 clientDes2_overhead = exec_units_clientDes2 - exec_units_agent_des2 queue_time = max(pilotDes2.timestamps(event=[{ru.STATE: rp.PMGR_ACTIVE}]))- max(execUnits.timestamps(event=[{ru.STATE: rp.AGENT_STAGING_INPUT_PENDING}])) des2ADF.loc[len(des2ADF)] = [des2_duration, setupDes2_overhead, agentDes2_overhead, clientDes2_overhead-queue_time] print(des2ADF) fig, axis = plt.subplots(nrows=1,ncols=1, figsize=(15,7.5)) x1 = np.arange(3) _ = axis.bar(x1[0], des1DF['TTX'].mean(), width=0.5, color=blues(300), label='Design 1 TTX') _ = axis.bar(x1[1], des2DF['TTX'].mean(), width=0.5, color=blues(200), label='Design 2 TTX') _ = axis.bar(x1[2], des2ADF['TTX'].mean(), width=0.5, color=blues(100), label='Design 2A TTX') _ = axis.set_xticks([0,1,2]) _ = axis.grid(which='both', linestyle=':', linewidth=1) _ = axis.set_xticklabels(['Design 1', 'Design 2','Design 2A'], fontsize=36) _ = axis.set_ylabel('Time in seconds', fontsize=26) _ = axis.set_yticklabels(axis.get_yticks().astype('int').tolist(),fontsize=24) #fig.savefig('geo_ttx.pdf',dpi=800,bbox='tight') dist_overhead = np.load('../Data/dist_dataset.npy') DiscDurations = [1861.404363739, 1872.631383787, 1870.355146581, 1852.347904858, 1857.771844937, 1868.644424397, 1873.176510421, 1851.527881958, 1870.128898667, 1856.676059379] fig, axis = plt.subplots(nrows=1, ncols=1, figsize=(9,7.5)) x1 = np.arange(3) _ = axis.bar(x1[0], des1DF['AgentOverhead'].mean(),width=0.5, color=reds(200),label='RP Agent Overhead Design 1') _ = axis.bar(x1[0], des1DF['ClientOverhead'].mean(), bottom=des1DF['AgentOverhead'].mean(),width=0.5, color=reds(150),label='RP Client Overhead Design 1') _ = axis.bar(x1[0], des1DF['EnTKOverhead'].mean(),bottom=des1DF['ClientOverhead'].mean()+des1DF['AgentOverhead'].mean(),width=0.5, color=reds(100),label='EnTK Overheads Design 1') _ = axis.bar(x1[0], np.mean(DiscDurations), yerr=np.std(DiscDurations), bottom=des1DF['ClientOverhead'].mean()+des1DF['AgentOverhead'].mean() + des1DF['EnTKOverhead'].mean(), width=0.5, color=reds(50),label='Design 1 Dataset Discovery') _ = axis.bar(x1[1],des2DF['AgentOverhead'].mean(),width=0.5, color=greens(200),label='RP Agent Overhead Design 2') _ = axis.bar(x1[1],des2DF['ClientOverhead'].mean(),bottom=des2DF['AgentOverhead'].mean(),width=0.5, color=greens(150),label='RP Client Overhead Design 2') _ = axis.bar(x1[1],(des2DF['SetupOverhead'] + des2DF['SetupOverhead2']).mean(),bottom=des2DF['ClientOverhead'].mean()+des2DF['AgentOverhead'].mean(),width=0.5, color=greens(100),label='Design 2 Setup Overhead') _ = axis.bar(x1[1],np.mean(DiscDurations), yerr=np.std(DiscDurations), bottom=des2DF['ClientOverhead'].mean()+des2DF['AgentOverhead'].mean() + (des2DF['SetupOverhead']+des2DF['SetupOverhead2']).mean(), width=0.5, color=greens(50),label='Design 2 Dataset Discovery') _ = axis.bar(x1[2],des2ADF['AgentOverhead'].mean(),#yerr=des2ADF['AgentOverhead'].std(), width=0.5, color=purples(250),label='RP Agent Overhead Design 2A',log=1) _ = axis.bar(x1[2],des2ADF['ClientOverhead'].mean(),#yerr=des2ADF['ClientOverhead'].std(), bottom=des2ADF['AgentOverhead'].mean(),width=0.5, color=purples(200),label='RP Client Overhead Design 2A') _ = axis.bar(x1[2],des2ADF['SetupOverhead'].mean(),#yerr=des2ADF['SetupOverhead'].std(), bottom=des2ADF['ClientOverhead'].mean()+des2ADF['AgentOverhead'].mean(),width=0.5, color=purples(150),label='Design 2A Setup Overhead') _ = axis.bar(x1[2],dist_overhead.mean(),yerr=dist_overhead.std(), bottom=des2ADF['ClientOverhead'].mean()+des2ADF['AgentOverhead'].mean()+des2ADF['SetupOverhead'].mean(),width=0.5, color=purples(100),label='Design 2A Distributing Overhead') _ = axis.bar(x1[2],np.mean(DiscDurations), yerr=np.std(DiscDurations), bottom=des2ADF['ClientOverhead'].mean()+des2ADF['AgentOverhead'].mean()+des2ADF['SetupOverhead'].mean() + dist_overhead.mean(), width=0.5, color=purples(50),label='Design 2A Dataset Discovery') _ = axis.set_xticks([0,1,2]) _ = axis.grid(which='both', linestyle=':', linewidth=1) _ = axis.set_ylabel('Time in seconds', fontsize=26) _ = axis.set_xticklabels(['Design 1', 'Design 2','Design 2A'], fontsize=26) _ = axis.set_yticks([1,10,100,1000,10000,100000]) _ = axis.set_yticklabels(axis.get_yticks().astype('int').tolist(),fontsize=24) #_ = axis.legend(fontsize=22,loc = 'lower center', bbox_to_anchor = (0,-.55,1,1), ncol=2) #_ = fig.subplots_adjust(bottom=.205) fig.savefig('geo_overheads.pdf',dpi=800,pad_inches=0)
_____no_output_____
MIT
Geolocation/Notebooks/Des1Des2OverheadComp.ipynb
radical-experiments/iceberg_escience
Running, Debugging, Testing & Packaging
!code ./1-helloconnectedworld
_____no_output_____
MIT
1_RunExtension.ipynb
kevcunnane/msbuild_ads_demo
NetworksIn this last exercise, you will start to put into practice some of the concepts learned in class. Start by importing the libraries that you will need.
import networkx as nx import matplotlib.pyplot as plt %matplotlib inline
_____no_output_____
MIT
ix-1-lab-intro/4-networks.ipynb
emlg/Internet-Analytics
We give you the network of the 34 members of [karate clubs](https://en.wikipedia.org/wiki/Zachary's_karate_club) on the campus. Each node is a member of one of two clubs ("Mr. Hi" or "Officer") and there is an edge between two nodes if the two are friends outside of the club.
karate_club = nx.karate_club_graph()
_____no_output_____
MIT
ix-1-lab-intro/4-networks.ipynb
emlg/Internet-Analytics
Visualization1. Visualize the network by - painting every 'Mr. Hi' member in **blue** and every 'Officer' member in **red**, and - drawing the label corresponding to the member index (between 1 and 34) on each node.2. What can you tell about the nodes and their connections? Do you see some nodes with more/less links than others? Who can they be?
# Your code goes here color_map = [] for i in range(0, karate_club.order()): if karate_club.node[i]['club'] == "Mr. Hi": color_map.append('blue') else: color_map.append('red') nx.draw_networkx(karate_club, node_color = color_map, with_labels = True) plt.axis('off');
_____no_output_____
MIT
ix-1-lab-intro/4-networks.ipynb
emlg/Internet-Analytics
Degree Distribution1. Plot the degree distribution of the network.2. Does it confirm your answer to the previous question?
# Your code goes here degre = list() for v in karate_club: degre.append(karate_club.degree(v)) plt.plot(degre, '*') plt.show()
_____no_output_____
MIT
ix-1-lab-intro/4-networks.ipynb
emlg/Internet-Analytics
Convolutional AutoencoderSticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data.
%matplotlib inline import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', validation_size=0) img = mnist.train.images[2] plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
_____no_output_____
MIT
autoencoder/Convolutional_Autoencoder_Solution.ipynb
vinidixit/deep-learning-repo
Network ArchitectureThe encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below.Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data. What's going on with the decoderOkay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers *aren't*. Usually, you'll see **transposed convolution** layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, [`tf.nn.conv2d_transpose`](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d_transpose). However, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In [this Distill article](http://distill.pub/2016/deconv-checkerboard/) from Augustus Odena, *et al*, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with [`tf.image.resize_images`](https://www.tensorflow.org/versions/r1.1/api_docs/python/tf/image/resize_images), followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling.> **Exercise:** Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by a factor of 2. Odena *et al* claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in `tf.image.resize_images` or use [`tf.image.resize_nearest_neighbor`]( `https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor). For convolutional layers, use [`tf.layers.conv2d`](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d). For example, you would write `conv1 = tf.layers.conv2d(inputs, 32, (5,5), padding='same', activation=tf.nn.relu)` for a layer with a depth of 32, a 5x5 kernel, stride of (1,1), padding is 'same', and a ReLU activation. Similarly, for the max-pool layers, use [`tf.layers.max_pooling2d`](https://www.tensorflow.org/api_docs/python/tf/layers/max_pooling2d).
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') ### Encoder conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x16 maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # Now 14x14x16 conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x8 maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # Now 7x7x8 conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x8 encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # Now 4x4x8 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7)) # Now 7x7x8 conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x8 upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14)) # Now 14x14x8 conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x8 upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28)) # Now 28x28x8 conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x16 logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None) #Now 28x28x1 decoded = tf.nn.sigmoid(logits, name='decoded') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost)
_____no_output_____
MIT
autoencoder/Convolutional_Autoencoder_Solution.ipynb
vinidixit/deep-learning-repo
TrainingAs before, here wi'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays.
sess = tf.Session() epochs = 1 batch_size = 200 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) imgs = batch[0].reshape((-1, 28, 28, 1)) batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([in_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) sess.close()
_____no_output_____
MIT
autoencoder/Convolutional_Autoencoder_Solution.ipynb
vinidixit/deep-learning-repo
DenoisingAs I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images.![Denoising autoencoder](assets/denoising.png)Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before.> **Exercise:** Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers.
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') ### Encoder conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x32 maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # Now 14x14x32 conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x32 maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # Now 7x7x32 conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x16 encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # Now 4x4x16 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7)) # Now 7x7x16 conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x16 upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14)) # Now 14x14x16 conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x32 upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28)) # Now 28x28x32 conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x32 logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None) #Now 28x28x1 decoded = tf.nn.sigmoid(logits, name='decoded') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost) sess = tf.Session() epochs = 100 batch_size = 200 # Set's how much noise we're adding to the MNIST images noise_factor = 0.5 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images from the batch imgs = batch[0].reshape((-1, 28, 28, 1)) # Add random noise to the input images noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape) # Clip the images to be between 0 and 1 noisy_imgs = np.clip(noisy_imgs, 0., 1.) # Noisy images as inputs, original images as targets batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost))
_____no_output_____
MIT
autoencoder/Convolutional_Autoencoder_Solution.ipynb
vinidixit/deep-learning-repo
Checking out the performanceHere I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is.
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape) noisy_imgs = np.clip(noisy_imgs, 0., 1.) reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([noisy_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1)
_____no_output_____
MIT
autoencoder/Convolutional_Autoencoder_Solution.ipynb
vinidixit/deep-learning-repo
1.1. Create_Data_Augmentation_With_Augmentorhttps://github.com/mdbloice/Augmentor
# install the package # !pip install Augmentor import Augmentor label = '200_frente' path_img = 'data_augmentation/source/' + label + '/' sample_per_configurator = 50 # random distortion + rotate p1 = Augmentor.Pipeline(path_img) p1.rotate(probability=1.0, max_left_rotation=20, max_right_rotation=20) p1.random_distortion(probability=1, grid_width=30, grid_height=30, magnitude=8) #p1.sample(sample_per_configurator) p1.process() # random distortion + rotate p1 = Augmentor.Pipeline(path_img) p1.rotate(probability=1.0, max_left_rotation=20, max_right_rotation=20) p1.random_distortion(probability=1, grid_width=30, grid_height=30, magnitude=8) p1.process() # random distortion + rotate p3_1 = Augmentor.Pipeline(path_img) p3_1.rotate(probability=1, max_left_rotation=25, max_right_rotation=25) p3_1.random_distortion(probability=1, grid_width=30, grid_height=30, magnitude=8) p3_1.process() # random skew + rotate + random distortion p4 = Augmentor.Pipeline(path_img) p4.rotate(probability=1, max_left_rotation=25, max_right_rotation=25) p4.skew(probability=1, magnitude=0.6) p4.random_distortion(probability=1, grid_width=30, grid_height=30, magnitude=8) p4.process() # random skew + rotate p4 = Augmentor.Pipeline(path_img) p4.rotate(probability=1, max_left_rotation=25, max_right_rotation=25) p4.skew(probability=1, magnitude=0.8) p4.process() # shear + rotate p5 = Augmentor.Pipeline(path_img) p5.shear(probability=1, max_shear_left=25, max_shear_right=25) p5.rotate(probability=1, max_left_rotation=25, max_right_rotation=25) #p5.random_distortion(probability=0.6, grid_width=40, grid_height=40, magnitude=8) p5.process() # shear + rotate + random distortion p5 = Augmentor.Pipeline(path_img) p5.shear(probability=1, max_shear_left=25, max_shear_right=25) p5.rotate(probability=1, max_left_rotation=25, max_right_rotation=25) p5.random_distortion(probability=1, grid_width=30, grid_height=30, magnitude=8) p5.process()
Executing Pipeline: 0%| | 0/25 [00:00<?, ? Samples/s]
MIT
1.1. Create_Data_Augmentation_With_Augmentor.ipynb
christianquicano/tflite_classification_banknotes
Practice geospatial aggregations in geopandas before writing them to .py files
%load_ext autoreload %autoreload 2 import sys sys.path.append('../utils') import wd_management wd_management.set_wd_root() import geopandas as gp import pandas as pd import requests res = requests.get('https://services5.arcgis.com/GfwWNkhOj9bNBqoJ/arcgis/rest/services/NYC_Public_Use_Microdata_Areas_PUMAs_2010/FeatureServer/0/query?where=1=1&outFields=*&outSR=4326&f=pgeojson') res_json = res.json() NYC_PUMAs = gp.GeoDataFrame.from_features(res_json['features']) NYC_PUMAs.set_crs('EPSG:4326',inplace=True) NYC_PUMAs.set_index('PUMA', inplace=True) NYC_PUMAs.head(5) NYC_PUMAs.plot()
_____no_output_____
MIT
notebooks/Area_within_Historic_District_Exploratory.ipynb
NYCPlanning/db-equitable-development-tool
Ok looks good. Load in historic districts. [This stackoverflow post](https://gis.stackexchange.com/questions/327197/typeerror-input-geometry-column-must-contain-valid-geometry-objects) was helpful
from shapely import wkt hd= gp.read_file('.library/lpc_historic_district_areas.csv') hd['the_geom'] = hd['the_geom'].apply(wkt.loads) hd.set_geometry(col='the_geom', inplace=True, crs='EPSG:4326') hd= hd.explode(column='the_geom') hd.set_geometry('the_geom',inplace=True) hd = hd.to_crs('EPSG:2263') hd = hd.reset_index() hd.plot()
/var/folders/tg/01b257jj0dzg5fzzc3p3gj0w0000gq/T/ipykernel_26102/3499734924.py:1: FutureWarning: Currently, index_parts defaults to True, but in the future, it will default to False to be consistent with Pandas. Use `index_parts=True` to keep the current behavior and True/False to silence the warning. hd= hd.explode(column='the_geom')
MIT
notebooks/Area_within_Historic_District_Exploratory.ipynb
NYCPlanning/db-equitable-development-tool
Ok great next do some geospatial analysis. Start only with PUMA 3807 as it has a lot of historic area
def fraction_area_historic(PUMA, hd): try: gdf = gp.GeoDataFrame(geometry = [PUMA.geometry], crs = 'EPSG:4326') gdf = gdf.to_crs('EPSG:2263') overlay = gp.overlay(hd, gdf, 'intersection') if overlay.empty: return 0, 0 else: fraction = overlay.area.sum()/gdf.geometry.area.sum() return fraction, overlay.area.sum()/(5280**2) except Exception as e: print(f'broke on {PUMA}') print(e) NYC_PUMAs[['fraction_area_historic', 'total_area_historic']] = NYC_PUMAs.apply(fraction_area_historic, axis=1, args=(hd,), result_type='expand') NYC_PUMAs.sort_values('fraction_area_historic', ascending=False)
_____no_output_____
MIT
notebooks/Area_within_Historic_District_Exploratory.ipynb
NYCPlanning/db-equitable-development-tool
Superimpose PUMA 3801's historic districts on it to see if 38% looks right
def visualize_overlay(PUMA): test_PUMA = NYC_PUMAs.loc[[PUMA]].to_crs('EPSG:2263') base = test_PUMA.plot(color='green', edgecolor='black') overlay = gp.overlay(hd, test_PUMA, 'intersection') overlay.plot(ax=base, color='red'); visualize_overlay('3810')
_____no_output_____
MIT
notebooks/Area_within_Historic_District_Exploratory.ipynb
NYCPlanning/db-equitable-development-tool
Ok great that looks like about a third to me From eyeballing map, more than 20% of PUMA 3806 on UWS looks to be historic
visualize_overlay('3806')
_____no_output_____
MIT
notebooks/Area_within_Historic_District_Exploratory.ipynb
NYCPlanning/db-equitable-development-tool
Ah ok the PUMA geography from includes central park. Worth flagging Question from Renae:Renae points out that description of historic districts says "including items that may have been denied designation or overturned."Look at dataset to see if columns point to this clearly
hd.head(5) hd.groupby('status_of_').size() hd.groupby('current_').size() hd.groupby('last_actio').size()
_____no_output_____
MIT
notebooks/Area_within_Historic_District_Exploratory.ipynb
NYCPlanning/db-equitable-development-tool
Install Transformers Library
!pip install transformers import numpy as np import pandas as pd import torch import torch.nn as nn from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import transformers from transformers import AutoModel, BertTokenizerFast # specify GPU device = torch.device("cuda")
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Load Dataset
df = pd.read_csv("spamdata_v2.csv") df.head() df.shape # check class distribution df['label'].value_counts(normalize = True)
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Split train dataset into train, validation and test sets
train_text, temp_text, train_labels, temp_labels = train_test_split(df['text'], df['label'], random_state=2018, test_size=0.3, stratify=df['label']) # we will use temp_text and temp_labels to create validation and test set val_text, test_text, val_labels, test_labels = train_test_split(temp_text, temp_labels, random_state=2018, test_size=0.5, stratify=temp_labels)
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Import BERT Model and BERT Tokenizer
# import BERT-base pretrained model bert = AutoModel.from_pretrained('bert-base-uncased') # Load the BERT tokenizer tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') # sample data text = ["this is a bert model tutorial", "we will fine-tune a bert model"] # encode text sent_id = tokenizer.batch_encode_plus(text, padding=True, return_token_type_ids=False) # output print(sent_id)
{'input_ids': [[101, 2023, 2003, 1037, 14324, 2944, 14924, 4818, 102, 0], [101, 2057, 2097, 2986, 1011, 8694, 1037, 14324, 2944, 102]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Tokenization
# get length of all the messages in the train set seq_len = [len(i.split()) for i in train_text] pd.Series(seq_len).hist(bins = 30) max_seq_len = 25 # tokenize and encode sequences in the training set tokens_train = tokenizer.batch_encode_plus( train_text.tolist(), max_length = max_seq_len, pad_to_max_length=True, truncation=True, return_token_type_ids=False ) # tokenize and encode sequences in the validation set tokens_val = tokenizer.batch_encode_plus( val_text.tolist(), max_length = max_seq_len, pad_to_max_length=True, truncation=True, return_token_type_ids=False ) # tokenize and encode sequences in the test set tokens_test = tokenizer.batch_encode_plus( test_text.tolist(), max_length = max_seq_len, pad_to_max_length=True, truncation=True, return_token_type_ids=False )
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Convert Integer Sequences to Tensors
# for train set train_seq = torch.tensor(tokens_train['input_ids']) train_mask = torch.tensor(tokens_train['attention_mask']) train_y = torch.tensor(train_labels.tolist()) # for validation set val_seq = torch.tensor(tokens_val['input_ids']) val_mask = torch.tensor(tokens_val['attention_mask']) val_y = torch.tensor(val_labels.tolist()) # for test set test_seq = torch.tensor(tokens_test['input_ids']) test_mask = torch.tensor(tokens_test['attention_mask']) test_y = torch.tensor(test_labels.tolist())
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Create DataLoaders
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler #define a batch size batch_size = 32 # wrap tensors train_data = TensorDataset(train_seq, train_mask, train_y) # sampler for sampling the data during training train_sampler = RandomSampler(train_data) # dataLoader for train set train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) # wrap tensors val_data = TensorDataset(val_seq, val_mask, val_y) # sampler for sampling the data during training val_sampler = SequentialSampler(val_data) # dataLoader for validation set val_dataloader = DataLoader(val_data, sampler = val_sampler, batch_size=batch_size)
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Freeze BERT Parameters
# freeze all the parameters for param in bert.parameters(): param.requires_grad = False
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Define Model Architecture
class BERT_Arch(nn.Module): def __init__(self, bert): super(BERT_Arch, self).__init__() self.bert = bert # dropout layer self.dropout = nn.Dropout(0.1) # relu activation function self.relu = nn.ReLU() # dense layer 1 self.fc1 = nn.Linear(768,512) # dense layer 2 (Output layer) self.fc2 = nn.Linear(512,2) #softmax activation function self.softmax = nn.LogSoftmax(dim=1) #define the forward pass def forward(self, sent_id, mask): #pass the inputs to the model _, cls_hs = self.bert(sent_id, attention_mask=mask) x = self.fc1(cls_hs) x = self.relu(x) x = self.dropout(x) # output layer x = self.fc2(x) # apply softmax activation x = self.softmax(x) return x # pass the pre-trained BERT to our define architecture model = BERT_Arch(bert) # push the model to GPU model = model.to(device) # optimizer from hugging face transformers from transformers import AdamW # define the optimizer optimizer = AdamW(model.parameters(), lr = 1e-3)
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Find Class Weights
from sklearn.utils.class_weight import compute_class_weight #compute the class weights class_wts = compute_class_weight('balanced', np.unique(train_labels), train_labels) print(class_wts) # convert class weights to tensor weights= torch.tensor(class_wts,dtype=torch.float) weights = weights.to(device) # loss function cross_entropy = nn.NLLLoss(weight=weights) # number of training epochs epochs = 10
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Fine-Tune BERT
# function to train the model def train(): model.train() total_loss, total_accuracy = 0, 0 # empty list to save model predictions total_preds=[] # iterate over batches for step,batch in enumerate(train_dataloader): # progress update after every 50 batches. if step % 50 == 0 and not step == 0: print(' Batch {:>5,} of {:>5,}.'.format(step, len(train_dataloader))) # push the batch to gpu batch = [r.to(device) for r in batch] sent_id, mask, labels = batch # clear previously calculated gradients model.zero_grad() # get model predictions for the current batch preds = model(sent_id, mask) # compute the loss between actual and predicted values loss = cross_entropy(preds, labels) # add on to the total loss total_loss = total_loss + loss.item() # backward pass to calculate the gradients loss.backward() # clip the the gradients to 1.0. It helps in preventing the exploding gradient problem torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # update parameters optimizer.step() # model predictions are stored on GPU. So, push it to CPU preds=preds.detach().cpu().numpy() # append the model predictions total_preds.append(preds) # compute the training loss of the epoch avg_loss = total_loss / len(train_dataloader) # predictions are in the form of (no. of batches, size of batch, no. of classes). # reshape the predictions in form of (number of samples, no. of classes) total_preds = np.concatenate(total_preds, axis=0) #returns the loss and predictions return avg_loss, total_preds # function for evaluating the model def evaluate(): print("\nEvaluating...") # deactivate dropout layers model.eval() total_loss, total_accuracy = 0, 0 # empty list to save the model predictions total_preds = [] # iterate over batches for step,batch in enumerate(val_dataloader): # Progress update every 50 batches. if step % 50 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader))) # push the batch to gpu batch = [t.to(device) for t in batch] sent_id, mask, labels = batch # deactivate autograd with torch.no_grad(): # model predictions preds = model(sent_id, mask) # compute the validation loss between actual and predicted values loss = cross_entropy(preds,labels) total_loss = total_loss + loss.item() preds = preds.detach().cpu().numpy() total_preds.append(preds) # compute the validation loss of the epoch avg_loss = total_loss / len(val_dataloader) # reshape the predictions in form of (number of samples, no. of classes) total_preds = np.concatenate(total_preds, axis=0) return avg_loss, total_preds
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Start Model Training
# set initial loss to infinite best_valid_loss = float('inf') # empty lists to store training and validation loss of each epoch train_losses=[] valid_losses=[] #for each epoch for epoch in range(epochs): print('\n Epoch {:} / {:}'.format(epoch + 1, epochs)) #train model train_loss, _ = train() #evaluate model valid_loss, _ = evaluate() #save the best model if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'saved_weights.pt') # append training and validation loss train_losses.append(train_loss) valid_losses.append(valid_loss) print(f'\nTraining Loss: {train_loss:.3f}') print(f'Validation Loss: {valid_loss:.3f}')
Epoch 1 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.526 Validation Loss: 0.656 Epoch 2 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.345 Validation Loss: 0.231 Epoch 3 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.344 Validation Loss: 0.194 Epoch 4 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.223 Validation Loss: 0.171 Epoch 5 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.219 Validation Loss: 0.178 Epoch 6 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.215 Validation Loss: 0.180 Epoch 7 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.247 Validation Loss: 0.262 Epoch 8 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.224 Validation Loss: 0.217 Epoch 9 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.217 Validation Loss: 0.148 Epoch 10 / 10 Batch 50 of 122. Batch 100 of 122. Evaluating... Training Loss: 0.231 Validation Loss: 0.639
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Load Saved Model
#load weights of best model path = 'saved_weights.pt' model.load_state_dict(torch.load(path))
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
Get Predictions for Test Data
# get predictions for test data with torch.no_grad(): preds = model(test_seq.to(device), test_mask.to(device)) preds = preds.detach().cpu().numpy() # model's performance preds = np.argmax(preds, axis = 1) print(classification_report(test_y, preds)) # confusion matrix pd.crosstab(test_y, preds)
_____no_output_____
Apache-2.0
Fine_Tuning_BERT_for_Spam_Classification.ipynb
lamiathu/Fine-Tuning-BERT
WeatherPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time import random import json import scipy.stats as st from scipy.stats import linregress # Import API key from api_keys import weather_api_key from api_keys import g_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180)
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Generate Cities List
# List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(lat_range[0], lat_range[1], size=1500) lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) city_names_ls = [] cloudiness_ls = [] country_ls = [] date_ls = [] humidity_ls = [] lat_ls = [] lng_ls = [] max_temp_ls = [] wind_speed_ls = [] index_counter = 0 set_counter = 1
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Perform API Calls* Perform a weather check on each city using a series of successive API calls.* Include a print log of each city as it'sbeing processed (with the city number and city name).
print("Beginning Data Retrieval") print("-------------------------------") base_url = "http://api.openweathermap.org/data/2.5/weather?" units = "imperial" url = f"{base_url}appid={weather_api_key}&units={units}&q=" for index, city in enumerate(cities, start = 1): try: response = requests.get(url + city).json() city_names_ls.append(response["name"]) cloudiness_ls.append(response["clouds"]["all"]) country_ls.append(response["sys"]["country"]) date_ls.append(response["dt"]) humidity_ls.append(response["main"]["humidity"]) lat_ls.append(response["coord"]["lat"]) lng_ls.append(response["coord"]["lon"]) max_temp_ls.append(response["main"]["temp_max"]) wind_speed_ls.append(response["wind"]["speed"]) if index_counter > 49: index_counter = 0 set_counter = set_counter + 1 else: index_counter = index_counter + 1 print(f"Processing Record {index_counter} of Set {set_counter} : {city}") except(KeyError, IndexError): print("City not found. Skipping...") print("-------------------------------") print("Data Retrieval Complete") print("-------------------------------")
Beginning Data Retrieval ------------------------------- Processing Record 1 of Set 1 : albany Processing Record 2 of Set 1 : terney City not found. Skipping... Processing Record 3 of Set 1 : pedasi Processing Record 4 of Set 1 : wakkanai Processing Record 5 of Set 1 : talnakh City not found. Skipping... Processing Record 6 of Set 1 : mataura Processing Record 7 of Set 1 : bilma City not found. Skipping... Processing Record 8 of Set 1 : makakilo city Processing Record 9 of Set 1 : hobart Processing Record 10 of Set 1 : brae Processing Record 11 of Set 1 : busselton Processing Record 12 of Set 1 : yar-sale Processing Record 13 of Set 1 : bluff Processing Record 14 of Set 1 : norman wells Processing Record 15 of Set 1 : qasigiannguit Processing Record 16 of Set 1 : claresholm Processing Record 17 of Set 1 : ribeira grande Processing Record 18 of Set 1 : hermanus Processing Record 19 of Set 1 : korem City not found. Skipping... Processing Record 20 of Set 1 : balabac Processing Record 21 of Set 1 : moose factory Processing Record 22 of Set 1 : butaritari Processing Record 23 of Set 1 : kapaa Processing Record 24 of Set 1 : longyearbyen Processing Record 25 of Set 1 : oda Processing Record 26 of Set 1 : uri Processing Record 27 of Set 1 : eucaliptus Processing Record 28 of Set 1 : chitradurga Processing Record 29 of Set 1 : rikitea Processing Record 30 of Set 1 : nikolskoye Processing Record 31 of Set 1 : hithadhoo Processing Record 32 of Set 1 : la ronge Processing Record 33 of Set 1 : rocha Processing Record 34 of Set 1 : general roca Processing Record 35 of Set 1 : cape town Processing Record 36 of Set 1 : mar del plata Processing Record 37 of Set 1 : almaznyy Processing Record 38 of Set 1 : dunedin Processing Record 39 of Set 1 : salalah Processing Record 40 of Set 1 : kavaratti Processing Record 41 of Set 1 : teguldet Processing Record 42 of Set 1 : tuatapere Processing Record 43 of Set 1 : ancud City not found. Skipping... City not found. Skipping... Processing Record 44 of Set 1 : port alfred Processing Record 45 of Set 1 : hilo Processing Record 46 of Set 1 : lebu City not found. Skipping... Processing Record 47 of Set 1 : hommelvik City not found. Skipping... Processing Record 48 of Set 1 : mundo novo Processing Record 49 of Set 1 : abnub Processing Record 50 of Set 1 : airai Processing Record 0 of Set 2 : qaanaaq Processing Record 1 of Set 2 : chuy Processing Record 2 of Set 2 : akureyri Processing Record 3 of Set 2 : ponta do sol Processing Record 4 of Set 2 : thompson Processing Record 5 of Set 2 : muqui Processing Record 6 of Set 2 : port elizabeth Processing Record 7 of Set 2 : caucaia Processing Record 8 of Set 2 : chokurdakh Processing Record 9 of Set 2 : nanortalik Processing Record 10 of Set 2 : castro Processing Record 11 of Set 2 : atuona Processing Record 12 of Set 2 : coquimbo Processing Record 13 of Set 2 : alofi Processing Record 14 of Set 2 : punta arenas Processing Record 15 of Set 2 : erzin Processing Record 16 of Set 2 : jalu Processing Record 17 of Set 2 : ushuaia Processing Record 18 of Set 2 : torbay Processing Record 19 of Set 2 : roma Processing Record 20 of Set 2 : parys Processing Record 21 of Set 2 : aitape Processing Record 22 of Set 2 : shache Processing Record 23 of Set 2 : langsa Processing Record 24 of Set 2 : asau Processing Record 25 of Set 2 : ahipara Processing Record 26 of Set 2 : loandjili Processing Record 27 of Set 2 : carnarvon Processing Record 28 of Set 2 : pierre Processing Record 29 of Set 2 : marsh harbour Processing Record 30 of Set 2 : masumbwe Processing Record 31 of Set 2 : puerto ayora Processing Record 32 of Set 2 : pitimbu Processing Record 33 of Set 2 : saldanha Processing Record 34 of Set 2 : igrim Processing Record 35 of Set 2 : yumen Processing Record 36 of Set 2 : tasiilaq City not found. Skipping... Processing Record 37 of Set 2 : new norfolk Processing Record 38 of Set 2 : deputatskiy Processing Record 39 of Set 2 : vao Processing Record 40 of Set 2 : taltal Processing Record 41 of Set 2 : el estor Processing Record 42 of Set 2 : emporia Processing Record 43 of Set 2 : vila velha Processing Record 44 of Set 2 : nelson bay Processing Record 45 of Set 2 : kavieng City not found. Skipping... Processing Record 46 of Set 2 : moate Processing Record 47 of Set 2 : yellowknife Processing Record 48 of Set 2 : ahuimanu Processing Record 49 of Set 2 : jamestown Processing Record 50 of Set 2 : east london Processing Record 0 of Set 3 : deep river Processing Record 1 of Set 3 : hede Processing Record 2 of Set 3 : la orilla Processing Record 3 of Set 3 : hovd City not found. Skipping... Processing Record 4 of Set 3 : atar Processing Record 5 of Set 3 : pevek Processing Record 6 of Set 3 : fortuna Processing Record 7 of Set 3 : constitucion City not found. Skipping... Processing Record 8 of Set 3 : west lake stevens Processing Record 9 of Set 3 : kyaikto Processing Record 10 of Set 3 : constantine Processing Record 11 of Set 3 : monzon Processing Record 12 of Set 3 : bethel Processing Record 13 of Set 3 : san miguel Processing Record 14 of Set 3 : pangnirtung City not found. Skipping... Processing Record 15 of Set 3 : nalut City not found. Skipping... Processing Record 16 of Set 3 : arraial do cabo Processing Record 17 of Set 3 : nedjo Processing Record 18 of Set 3 : bambous virieux Processing Record 19 of Set 3 : birjand Processing Record 20 of Set 3 : sorland Processing Record 21 of Set 3 : victoria Processing Record 22 of Set 3 : nandyal Processing Record 23 of Set 3 : dakar Processing Record 24 of Set 3 : paengaroa Processing Record 25 of Set 3 : vaini Processing Record 26 of Set 3 : port hardy Processing Record 27 of Set 3 : klaksvik Processing Record 28 of Set 3 : culebra Processing Record 29 of Set 3 : mehamn Processing Record 30 of Set 3 : guisa Processing Record 31 of Set 3 : male City not found. Skipping... Processing Record 32 of Set 3 : salym Processing Record 33 of Set 3 : barrow Processing Record 34 of Set 3 : jining Processing Record 35 of Set 3 : patiya Processing Record 36 of Set 3 : itoman City not found. Skipping... Processing Record 37 of Set 3 : korla Processing Record 38 of Set 3 : saint george Processing Record 39 of Set 3 : caravelas Processing Record 40 of Set 3 : touros City not found. Skipping... Processing Record 41 of Set 3 : saint-augustin Processing Record 42 of Set 3 : college Processing Record 43 of Set 3 : hami Processing Record 44 of Set 3 : guerrero negro City not found. Skipping... Processing Record 45 of Set 3 : verkh-usugli Processing Record 46 of Set 3 : krasnoarmeysk Processing Record 47 of Set 3 : kirkwall Processing Record 48 of Set 3 : bredasdorp Processing Record 49 of Set 3 : huilong Processing Record 50 of Set 3 : paita Processing Record 0 of Set 4 : berdigestyakh Processing Record 1 of Set 4 : tiksi Processing Record 2 of Set 4 : hasaki Processing Record 3 of Set 4 : kodiak Processing Record 4 of Set 4 : dikson Processing Record 5 of Set 4 : mahebourg Processing Record 6 of Set 4 : kaseda Processing Record 7 of Set 4 : margate Processing Record 8 of Set 4 : hay river Processing Record 9 of Set 4 : dharchula Processing Record 10 of Set 4 : daru Processing Record 11 of Set 4 : codrington Processing Record 12 of Set 4 : pasni City not found. Skipping... Processing Record 13 of Set 4 : evensk Processing Record 14 of Set 4 : shiyan Processing Record 15 of Set 4 : lexington park Processing Record 16 of Set 4 : buraydah Processing Record 17 of Set 4 : port-gentil Processing Record 18 of Set 4 : maralal Processing Record 19 of Set 4 : agua dulce Processing Record 20 of Set 4 : jian Processing Record 21 of Set 4 : mafinga Processing Record 22 of Set 4 : pisco Processing Record 23 of Set 4 : preobrazheniye Processing Record 24 of Set 4 : ramshir Processing Record 25 of Set 4 : puerto narino Processing Record 26 of Set 4 : keetmanshoop Processing Record 27 of Set 4 : waipawa Processing Record 28 of Set 4 : winneba City not found. Skipping... Processing Record 29 of Set 4 : bitung Processing Record 30 of Set 4 : sept-iles Processing Record 31 of Set 4 : provideniya Processing Record 32 of Set 4 : chapais Processing Record 33 of Set 4 : xushan Processing Record 34 of Set 4 : mount isa
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Convert Raw Data to DataFrame* Export the city data into a .csv.* Display the DataFrame
weather_df = pd.DataFrame({ "City" : city_names_ls, "Cloudiness" : cloudiness_ls, "Country" : country_ls, "Date" : date_ls, "Humidity" : humidity_ls, "Lat" : lat_ls, "Lng" : lng_ls, "Max Temp" : max_temp_ls, "Wind Speed" : wind_speed_ls }) weather_df.count() weather_df weather_df.to_csv('../output_data/cities.csv')
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Inspect the data and remove the cities where the humidity > 100%.----Skip this step if there are no cities that have humidity > 100%.
# Get the indices of cities that have humidity over 100%. # Make a new DataFrame equal to the city data to drop all humidity outliers by index. # Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data". # Extract relevant fields from the data frame # Export the City_Data into a csv
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Plotting the Data* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.* Save the plotted figures as .pngs. Latitude vs. Temperature Plot
plt.scatter(weather_df["Lat"], weather_df["Max Temp"], facecolor = "darkblue", edgecolor = "darkgrey") plt.title("City Latitude vs. Max Temperature (07/13/20)") plt.ylabel("Max Temperature (F)") plt.xlabel("Latitude") plt.grid(True) plt.savefig("../Images/City Latitude vs Max Temperature.png") plt.show() print("This plot shows that as cities get further away from equator that they are cooler")
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Latitude vs. Humidity Plot
plt.scatter(weather_df["Lat"], weather_df["Humidity"], facecolor = "darkblue", edgecolor = "darkgrey") plt.title("City Latitude vs. Humidity (07/13/20)") plt.ylabel("Humidity (%)") plt.xlabel("Latitude") plt.grid(True) plt.savefig("../Images/City Latitude vs Humidity.png") plt.show() print("This plot shows that humidity is well spread throughout cities despite location")
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Latitude vs. Cloudiness Plot
plt.scatter(weather_df["Lat"], weather_df["Cloudiness"], facecolor = "darkblue", edgecolor = "darkgrey") plt.title("City Latitude vs. Cloudiness (07/13/20)") plt.ylabel("Cloudiness (%)") plt.xlabel("Latitude") plt.grid(True) plt.savefig("../Images/City Latitude vs Cloudiness.png") plt.show() print("This plot shows that cloudiness is well spread throughout cities despite location")
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Latitude vs. Wind Speed Plot
plt.scatter(weather_df["Lat"], weather_df["Wind Speed"], facecolor = "darkblue", edgecolor = "darkgrey") plt.title("City Latitude vs. Windspeed (07/13/20)") plt.ylabel("Windspeed (mph)") plt.xlabel("Latitude") plt.grid(True) plt.savefig("../Images/City Latitude vs Windspeed.png") plt.show() print("This plot shows that windspeed is well spread throughout cities despite location")
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Linear Regression
# OPTIONAL: Create a function to create Linear Regression plots nor_hemi = weather_df.loc[weather_df["Lat"] >= 0] sou_hemi = weather_df.loc[weather_df["Lat"] < 0] def linear_agression(x,y): print(f"The r-squared is : {round(st.pearsonr(x, y)[0],2)}") (slope, intercept, rvalue, pvalue, stderr) = linregress(x, y) regress_values = x * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.scatter(x, y) plt.plot(x,regress_values,"r-") return line_eq def annotate(line_eq, a, b): plt.annotate(line_eq,(a,b),fontsize=15,color="red")
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Northern Hemisphere - Max Temp vs. Latitude Linear Regression
equation = linear_agression(nor_hemi["Lat"], nor_hemi["Max Temp"]) annotate(equation, 10, 40) plt.ylabel("Max Temp (F)") plt.xlabel("Latitude") plt.savefig("../Images/NorHemi Max Temp vs. Latitude Linear Regression.png") print("This linear regression shows that cities gets hotter as get closer to equator in northern hemisphere")
The r-squared is : -0.65 This linear regression shows that cities gets hotter as get closer to equator in northern hemisphere
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Southern Hemisphere - Max Temp vs. Latitude Linear Regression
equation = linear_agression(sou_hemi["Lat"], sou_hemi["Max Temp"]) annotate(equation, -50, 80) plt.ylabel("Max Temperature (F)") plt.xlabel("Latitude") plt.savefig("../Images/SouHemi Max Temp vs. Latitude Linear Regression.png") print("This linear regression shows that cities gets colder as get away to equator in southern hemisphere")
The r-squared is : 0.74
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
equation = linear_agression(nor_hemi["Lat"], nor_hemi["Humidity"]) annotate(equation, 1, 5) plt.ylabel("Humidity") plt.xlabel("Latitude") plt.savefig("../Images/NorHemi Humidity vs. Latitude Linear Regression.png") print("This linear regression shows that cities' humidity doesn't change much as get closer to equator in northern hemisphere")
The r-squared is : -0.08
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
equation = linear_agression(sou_hemi["Lat"], sou_hemi["Humidity"]) annotate(equation, -50, 20) plt.ylabel("Humidity") plt.xlabel("Latitude") plt.savefig("../Images/SouHemi Humidity vs. Latitude Linear Regression.png") print("This linear regression shows that cities' humidity doesn't change much as get closer to equator in southern hemisphere")
The r-squared is : 0.09
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
equation = linear_agression(nor_hemi["Lat"], nor_hemi["Cloudiness"]) annotate(equation, 0, 0) plt.ylabel("Cloudiness") plt.xlabel("Latitude") plt.savefig("../Images/NorHemi Cloudiness vs. Latitude Linear Regression.png") print("This linear regression shows that cities' cloudiness doesn't change much as get closer to equator in northern hemisphere")
The r-squared is : -0.01
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
equation = linear_agression(sou_hemi["Lat"], sou_hemi["Cloudiness"]) annotate(equation, -50, 50) plt.ylabel("Cloudiness") plt.xlabel("Latitude") plt.savefig("../Images/SouHemi Cloudiness vs. Latitude Linear Regression.png") print("This linear regression shows that cities' cloudiness doesn't change much as get closer to equator in southern hemisphere")
The r-squared is : 0.1
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
equation = linear_agression(nor_hemi["Lat"], nor_hemi["Wind Speed"]) annotate(equation, 40, 25) plt.ylabel("Wind Speed") plt.xlabel("Latitude") plt.savefig("../Images/NorHemi Wind Speed vs. Latitude Linear Regression.png") print("This linear regression shows that cities' windiness doesn't change much as get closer to equator in northern hemisphere")
The r-squared is : -0.0
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
equation = linear_agression(sou_hemi["Lat"], sou_hemi["Wind Speed"]) annotate(equation, -50, 25) plt.ylabel("Wind Speed") plt.xlabel("Latitude") plt.savefig("../Images/SouHemi Wind Speed vs. Latitude Linear Regression.png") print("This linear regression shows that cities' windiness doesn't change much as get closer to equator in northern hemisphere") #Three Observable Trends #1- Out of the cities analyzed, tends to be hotter by the equatator #2- This time of year there tends to be more wind in cities away from equator in southern hemisphere #3- Humidty, cloudiness, and wind speeds did not any obvious trends in the nothern hemisphere.
_____no_output_____
ADSL
06-Python-APIs_HW/Instructions/starter_code/WeatherPy.ipynb
jmbruner37/6_python_api
""" Array2D """ class Array2D: def __init__(self,rows, cols, value): self.__cols = cols self.__rows = rows self.__array=[[value for x in range(self.__cols)] for y in range(self.__rows)] def to_string(self): [print("---",end="") for x in range(self.__cols)] print("") for ren in self.__array: print(ren) [print("---",end="") for x in range(self.__cols)] print("") def get_num_rows(self): return self.__rows def get_num_cols(self): return self.__cols def get_item(self,row,col): return self.__array[row][col] def set_item( self , row , col , valor ): self.__array[row][col]=valor def clearing(self, valor=0): for ren in range(self.__rows): for col in range(self.__cols): self.__array[ren][col]=valor class Stack: def __init__(self): self.__data = [] self.__size = 0 def pop(self): return self.__data.pop() def get_size(self): return self.__size def peak(self): if len(self.__data) > 0: return self.__data[-1] else: return None def push(self,value): self.__data.append(value) self.__size += 1 def to_string(self): print("-"*6) for dato in self.__data[::-1]: print(f"| {dato} |") print("/" * 6) print("") class Laberinto_ADT: def __init__(self, archivo): self.__laberinto = Array2D(0, 0, 0) self.__camino = Stack() self.__rens = 0 self.__cols = 0 self.__entrada = (0, 0) entrada = open(archivo, 'rt') datos = entrada.readlines() #print(datos) self.__rens = int(datos.pop(0).strip()) self.__cols = int(datos.pop(0).strip()) self.__entrada = list(datos[0].strip().split(',')) self.__entrada[0] = int(self.__entrada[0]) self.__entrada[1] = int(self.__entrada[1]) self.__camino.push((self.__entrada[1],[0])) datos.pop(0) #eleminamos la tupla print(self.__rens, self.__cols, self.__entrada) print(datos) self.__laberinto = Array2D(self.__rens, self.__cols, '1') for renglon in range(self.__rens): info_ren = datos[renglon].strip().split(',') for columna in range(self.__cols): self.__laberinto.set_item(renglon, columna, info_ren[columna]) self.__laberinto.to_string() def resolver(self): actual = self.__camino.peek() def imprime_camino(self): self.__camino.to_string() def mostrar(self): self.__laberinto.to_string() if self.__laberinto.get_item( actual[0], actual[1] - 1 ) == '0' and self.__laberinto.get_item( actual[0], actual[1] - 1 ) != 'X' and self.__previa ! = actual[0], actual[1] - 1: self.__previa = actual self.__camino.push(actual[0], actual[1]-1) elif self.__laberinto.get_item(actual[0]-1 , actual[1]) == '0' and self.__ elif l ==2 : pass elif l== 2: pass else: self.__laberinto.set_item(actual[0]) def otros(): pass #main laberinto = Laberinto_ADT("entrada.txt") laberinto.mostrar() laberinto.imprime_camino()
7 7 [6, 2] ['1,1,1,1,1,1,1\n', '1,1,1,1,1,1,1\n', '1,1,1,1,1,1,1\n', '1,1,1,1,1,1,1\n', '1,0,0,0,0,0,S\n', '1,1,0,1,1,1,1\n', '1,1,E,1,1,1,1'] --------------------- ['1', '1', '1', '1', '1', '1', '1'] ['1', '1', '1', '1', '1', '1', '1'] ['1', '1', '1', '1', '1', '1', '1'] ['1', '1', '1', '1', '1', '1', '1'] ['1', '0', '0', '0', '0', '0', 'S'] ['1', '1', '0', '1', '1', '1', '1'] ['1', '1', 'E', '1', '1', '1', '1'] --------------------- --------------------- ['1', '1', '1', '1', '1', '1', '1'] ['1', '1', '1', '1', '1', '1', '1'] ['1', '1', '1', '1', '1', '1', '1'] ['1', '1', '1', '1', '1', '1', '1'] ['1', '0', '0', '0', '0', '0', 'S'] ['1', '1', '0', '1', '1', '1', '1'] ['1', '1', 'E', '1', '1', '1', '1'] --------------------- ------ | (2, [0]) | //////
MIT
4_diciembre1358.ipynb
jorge23amury/daa_2021_1
Run hacked AlphaFold2 on the designed bound states Imports
%load_ext lab_black # Python standard library from glob import glob import os import socket import sys # 3rd party library imports import dask import matplotlib.pyplot as plt import pandas as pd import pyrosetta import numpy as np import scipy import seaborn as sns from tqdm.auto import tqdm # jupyter compatible progress bar tqdm.pandas() # link tqdm to pandas # Notebook magic # save plots in the notebook %matplotlib inline # reloads modules automatically before executing cells %load_ext autoreload %autoreload 2 print(f"running in directory: {os.getcwd()}") # where are we? print(f"running on node: {socket.gethostname()}") # what node are we on?
running in directory: /mnt/home/pleung/projects/crispy_shifty/projects/crispy_shifties running on node: dig154
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Set working directory to the root of the crispy_shifty repoTODO set to projects dir
os.chdir("/home/pleung/projects/crispy_shifty") # os.chdir("/projects/crispy_shifty")
_____no_output_____
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Run AF2 on the designed bound statesTODO
from crispy_shifty.utils.io import gen_array_tasks simulation_name = "03_fold_bound_states" design_list_file = os.path.join( os.getcwd(), "projects/crispy_shifties/02_mpnn_bound_states/test_mpnn_states.pair", # TODO ) output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}") options = " ".join( [ "out:level 200", ] ) extra_kwargs = {"models": "1"} gen_array_tasks( distribute_func="crispy_shifty.protocols.folding.fold_bound_state", design_list_file=design_list_file, output_path=output_path, queue="gpu", # TODO cores=2, memory="16G", # TODO gres="--gres=gpu:rtx2080:1", # TODO # TODO perlmutter_mode=True, nstruct=1, nstruct_per_task=1, options=options, extra_kwargs=extra_kwargs, simulation_name=simulation_name, ) # !sbatch -a 1-$(cat /mnt/home/pleung/projects/crispy_shifty/projects/crispy_shifties/02_mpnn_bound_states/tasks.cmds | wc -l) /mnt/home/pleung/projects/crispy_shifty/projects/crispy_shifties/02_mpnn_bound_states/run.sh
_____no_output_____
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Collect scorefiles of designed bound states and concatenateTODO change to projects dir
sys.path.insert(0, "~/projects/crispy_shifty") # TODO from crispy_shifty.utils.io import collect_score_file simulation_name = "03_fold_bound_states" output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}") if not os.path.exists(os.path.join(output_path, "scores.json")): collect_score_file(output_path, "scores")
/projects/crispy_shifty/envs/crispy/lib/python3.8/site-packages/dask_jobqueue/core.py:20: FutureWarning: tmpfile is deprecated and will be removed in a future release. Please use dask.utils.tmpfile instead. from distributed.utils import tmpfile
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Load resulting concatenated scorefileTODO change to projects dir
sys.path.insert(0, "~/projects/crispy_shifty") # TODO from crispy_shifty.utils.io import parse_scorefile_linear output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}") scores_df = parse_scorefile_linear(os.path.join(output_path, "scores.json")) scores_df = scores_df.convert_dtypes()
_____no_output_____
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Setup for plotting
sns.set( context="talk", font_scale=1, # make the font larger; default is pretty small style="ticks", # make the background white with black lines palette="colorblind", # a color palette that is colorblind friendly! )
_____no_output_____
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Data explorationGonna remove the Rosetta sfxn scoreterms for now
from crispy_shifty.protocols.design import beta_nov16_terms scores_df = scores_df[ [term for term in scores_df.columns if term not in beta_nov16_terms] ] print(len(scores_df)) j = 0 for i, r in scores_df.iterrows(): if (r["designed_by"]) == "rosetta": j += 1 print(j)
10
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Save a list of outputs
# simulation_name = "03_fold_bound_states" # output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}") # with open(os.path.join(output_path, "folded_states.list"), "w") as f: # for path in tqdm(scores_df.index): # print(path, file=f)
_____no_output_____
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Prototyping blocks test `fold_bound_state`
%%time from operator import gt, lt import pyrosetta filter_dict = { "mean_plddt": (gt, 85.0), "rmsd_to_reference": (lt, 2.2), "mean_pae_interaction": (lt, 10.0), } rank_on = "mean_plddt" prefix = "mpnn_seq" pyrosetta.init() sys.path.insert(0, "~/projects/crispy_shifty/") # TODO projects from crispy_shifty.protocols.folding import fold_bound_state t = fold_bound_state( None, **{ 'fasta_path': '/mnt/home/pleung/projects/crispy_shifty/projects/crispy_shifties/02_mpnn_bound_states/fastas/0000/02_mpnn_bound_states_25a76fae39514121922e2b477b5b9813.fa', "filter_dict": filter_dict, "models": [1], # TODO 'pdb_path': '/mnt/home/pleung/projects/crispy_shifty/projects/crispy_shifties/02_mpnn_bound_states/decoys/0000/02_mpnn_bound_states_25a76fae39514121922e2b477b5b9813.pdb.bz2', 'prefix': prefix, 'rank_on': rank_on, # 'fasta_path': 'bar.fa', # "models": [1, 2], # TODO # 'pdb_path': 'foo.pdb.bz2', } ) for i, tppose in enumerate(t): tppose.pose.dump_pdb(f"{i}.pdb") tppose.pose.scores
_____no_output_____
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
test `generate_decoys_from_pose`
from operator import gt, lt from crispy_shifty.protocols.folding import generate_decoys_from_pose filter_dict = { "mean_plddt": (gt, 85.0), "rmsd_to_reference": (lt, 2.2), "mean_pae_interaction": (lt, 10.0), } rank_on = "mean_plddt" prefix = "mpnn_seq" tpose = tppose.pose.clone() genr = generate_decoys_from_pose( tpose, prefix=prefix, rank_on=rank_on, filter_dict=filter_dict ) for d in genr: print(d.sequence())
_____no_output_____
MIT
projects/crispy_shifties/03_fold_bound_states.ipynb
proleu/crispy_shifty
Segmentation of Road from Satellite imagery Importing Libraries
import warnings warnings.filterwarnings('ignore') import os import cv2 #from google.colab.patches import cv2_imshow import numpy as np import tensorflow as tf import pandas as pd from keras.models import Model, load_model from skimage.morphology import label import pickle from keras import backend as K from matplotlib import pyplot as plt from tqdm import tqdm_notebook import random from skimage.io import imread, imshow, imread_collection, concatenate_images from matplotlib import pyplot as plt import h5py seed = 56 from google.colab import drive drive.mount('/content/gdrive/') base_path = "gdrive/My\ Drive/MapSegClean/" %cd gdrive/My\ Drive/MapSegClean/
Drive already mounted at /content/gdrive/; to attempt to forcibly remount, call drive.mount("/content/gdrive/", force_remount=True). /content/gdrive/My Drive/MapSegClean
MIT
Train_and_Test_Notebooks/ResNet101_RoadTest.ipynb
parshwa1999/Map-Segmentation
Defining Custom Loss functions and accuracy Metric.
#Source: https://towardsdatascience.com/metrics-to-evaluate-your-semantic-segmentation-model-6bcb99639aa2 from keras import backend as K def iou_coef(y_true, y_pred, smooth=1): intersection = K.sum(K.abs(y_true * y_pred), axis=[1,2,3]) union = K.sum(y_true,[1,2,3])+K.sum(y_pred,[1,2,3])-intersection iou = K.mean((intersection + smooth) / (union + smooth), axis=0) return iou def dice_coef(y_true, y_pred, smooth = 1): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) def soft_dice_loss(y_true, y_pred): return 1-dice_coef(y_true, y_pred)
_____no_output_____
MIT
Train_and_Test_Notebooks/ResNet101_RoadTest.ipynb
parshwa1999/Map-Segmentation
Defining Our Model
pip install -U segmentation-models from keras.models import Model, load_model import tensorflow as tf from keras.layers import Input from keras.layers.core import Dropout, Lambda from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.pooling import MaxPooling2D from keras.layers.merge import concatenate from keras import optimizers from keras.layers import BatchNormalization import keras from segmentation_models import Unet from segmentation_models import get_preprocessing from segmentation_models.losses import bce_jaccard_loss from segmentation_models.metrics import iou_score model = Unet('resnet101', input_shape=(256, 256, 3), encoder_weights=None) #model = Unet(input_shape=(256, 256, 3), weights=None, activation='elu') model.summary() # fit model
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:541: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:66: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:203: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:207: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:216: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:223: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:2041: The name tf.nn.fused_batch_norm is deprecated. Please use tf.compat.v1.nn.fused_batch_norm instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:148: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4432: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4267: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:2239: The name tf.image.resize_nearest_neighbor is deprecated. Please use tf.compat.v1.image.resize_nearest_neighbor instead. Model: "model_2" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== data (InputLayer) (None, 256, 256, 3) 0 __________________________________________________________________________________________________ bn_data (BatchNormalization) (None, 256, 256, 3) 9 data[0][0] __________________________________________________________________________________________________ zero_padding2d_1 (ZeroPadding2D (None, 262, 262, 3) 0 bn_data[0][0] __________________________________________________________________________________________________ conv0 (Conv2D) (None, 128, 128, 64) 9408 zero_padding2d_1[0][0] __________________________________________________________________________________________________ bn0 (BatchNormalization) (None, 128, 128, 64) 256 conv0[0][0] __________________________________________________________________________________________________ relu0 (Activation) (None, 128, 128, 64) 0 bn0[0][0] __________________________________________________________________________________________________ zero_padding2d_2 (ZeroPadding2D (None, 130, 130, 64) 0 relu0[0][0] __________________________________________________________________________________________________ pooling0 (MaxPooling2D) (None, 64, 64, 64) 0 zero_padding2d_2[0][0] __________________________________________________________________________________________________ stage1_unit1_bn1 (BatchNormaliz (None, 64, 64, 64) 256 pooling0[0][0] __________________________________________________________________________________________________ stage1_unit1_relu1 (Activation) (None, 64, 64, 64) 0 stage1_unit1_bn1[0][0] __________________________________________________________________________________________________ stage1_unit1_conv1 (Conv2D) (None, 64, 64, 64) 4096 stage1_unit1_relu1[0][0] __________________________________________________________________________________________________ stage1_unit1_bn2 (BatchNormaliz (None, 64, 64, 64) 256 stage1_unit1_conv1[0][0] __________________________________________________________________________________________________ stage1_unit1_relu2 (Activation) (None, 64, 64, 64) 0 stage1_unit1_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_3 (ZeroPadding2D (None, 66, 66, 64) 0 stage1_unit1_relu2[0][0] __________________________________________________________________________________________________ stage1_unit1_conv2 (Conv2D) (None, 64, 64, 64) 36864 zero_padding2d_3[0][0] __________________________________________________________________________________________________ stage1_unit1_bn3 (BatchNormaliz (None, 64, 64, 64) 256 stage1_unit1_conv2[0][0] __________________________________________________________________________________________________ stage1_unit1_relu3 (Activation) (None, 64, 64, 64) 0 stage1_unit1_bn3[0][0] __________________________________________________________________________________________________ stage1_unit1_conv3 (Conv2D) (None, 64, 64, 256) 16384 stage1_unit1_relu3[0][0] __________________________________________________________________________________________________ stage1_unit1_sc (Conv2D) (None, 64, 64, 256) 16384 stage1_unit1_relu1[0][0] __________________________________________________________________________________________________ add_1 (Add) (None, 64, 64, 256) 0 stage1_unit1_conv3[0][0] stage1_unit1_sc[0][0] __________________________________________________________________________________________________ stage1_unit2_bn1 (BatchNormaliz (None, 64, 64, 256) 1024 add_1[0][0] __________________________________________________________________________________________________ stage1_unit2_relu1 (Activation) (None, 64, 64, 256) 0 stage1_unit2_bn1[0][0] __________________________________________________________________________________________________ stage1_unit2_conv1 (Conv2D) (None, 64, 64, 64) 16384 stage1_unit2_relu1[0][0] __________________________________________________________________________________________________ stage1_unit2_bn2 (BatchNormaliz (None, 64, 64, 64) 256 stage1_unit2_conv1[0][0] __________________________________________________________________________________________________ stage1_unit2_relu2 (Activation) (None, 64, 64, 64) 0 stage1_unit2_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_4 (ZeroPadding2D (None, 66, 66, 64) 0 stage1_unit2_relu2[0][0] __________________________________________________________________________________________________ stage1_unit2_conv2 (Conv2D) (None, 64, 64, 64) 36864 zero_padding2d_4[0][0] __________________________________________________________________________________________________ stage1_unit2_bn3 (BatchNormaliz (None, 64, 64, 64) 256 stage1_unit2_conv2[0][0] __________________________________________________________________________________________________ stage1_unit2_relu3 (Activation) (None, 64, 64, 64) 0 stage1_unit2_bn3[0][0] __________________________________________________________________________________________________ stage1_unit2_conv3 (Conv2D) (None, 64, 64, 256) 16384 stage1_unit2_relu3[0][0] __________________________________________________________________________________________________ add_2 (Add) (None, 64, 64, 256) 0 stage1_unit2_conv3[0][0] add_1[0][0] __________________________________________________________________________________________________ stage1_unit3_bn1 (BatchNormaliz (None, 64, 64, 256) 1024 add_2[0][0] __________________________________________________________________________________________________ stage1_unit3_relu1 (Activation) (None, 64, 64, 256) 0 stage1_unit3_bn1[0][0] __________________________________________________________________________________________________ stage1_unit3_conv1 (Conv2D) (None, 64, 64, 64) 16384 stage1_unit3_relu1[0][0] __________________________________________________________________________________________________ stage1_unit3_bn2 (BatchNormaliz (None, 64, 64, 64) 256 stage1_unit3_conv1[0][0] __________________________________________________________________________________________________ stage1_unit3_relu2 (Activation) (None, 64, 64, 64) 0 stage1_unit3_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_5 (ZeroPadding2D (None, 66, 66, 64) 0 stage1_unit3_relu2[0][0] __________________________________________________________________________________________________ stage1_unit3_conv2 (Conv2D) (None, 64, 64, 64) 36864 zero_padding2d_5[0][0] __________________________________________________________________________________________________ stage1_unit3_bn3 (BatchNormaliz (None, 64, 64, 64) 256 stage1_unit3_conv2[0][0] __________________________________________________________________________________________________ stage1_unit3_relu3 (Activation) (None, 64, 64, 64) 0 stage1_unit3_bn3[0][0] __________________________________________________________________________________________________ stage1_unit3_conv3 (Conv2D) (None, 64, 64, 256) 16384 stage1_unit3_relu3[0][0] __________________________________________________________________________________________________ add_3 (Add) (None, 64, 64, 256) 0 stage1_unit3_conv3[0][0] add_2[0][0] __________________________________________________________________________________________________ stage2_unit1_bn1 (BatchNormaliz (None, 64, 64, 256) 1024 add_3[0][0] __________________________________________________________________________________________________ stage2_unit1_relu1 (Activation) (None, 64, 64, 256) 0 stage2_unit1_bn1[0][0] __________________________________________________________________________________________________ stage2_unit1_conv1 (Conv2D) (None, 64, 64, 128) 32768 stage2_unit1_relu1[0][0] __________________________________________________________________________________________________ stage2_unit1_bn2 (BatchNormaliz (None, 64, 64, 128) 512 stage2_unit1_conv1[0][0] __________________________________________________________________________________________________ stage2_unit1_relu2 (Activation) (None, 64, 64, 128) 0 stage2_unit1_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_6 (ZeroPadding2D (None, 66, 66, 128) 0 stage2_unit1_relu2[0][0] __________________________________________________________________________________________________ stage2_unit1_conv2 (Conv2D) (None, 32, 32, 128) 147456 zero_padding2d_6[0][0] __________________________________________________________________________________________________ stage2_unit1_bn3 (BatchNormaliz (None, 32, 32, 128) 512 stage2_unit1_conv2[0][0] __________________________________________________________________________________________________ stage2_unit1_relu3 (Activation) (None, 32, 32, 128) 0 stage2_unit1_bn3[0][0] __________________________________________________________________________________________________ stage2_unit1_conv3 (Conv2D) (None, 32, 32, 512) 65536 stage2_unit1_relu3[0][0] __________________________________________________________________________________________________ stage2_unit1_sc (Conv2D) (None, 32, 32, 512) 131072 stage2_unit1_relu1[0][0] __________________________________________________________________________________________________ add_4 (Add) (None, 32, 32, 512) 0 stage2_unit1_conv3[0][0] stage2_unit1_sc[0][0] __________________________________________________________________________________________________ stage2_unit2_bn1 (BatchNormaliz (None, 32, 32, 512) 2048 add_4[0][0] __________________________________________________________________________________________________ stage2_unit2_relu1 (Activation) (None, 32, 32, 512) 0 stage2_unit2_bn1[0][0] __________________________________________________________________________________________________ stage2_unit2_conv1 (Conv2D) (None, 32, 32, 128) 65536 stage2_unit2_relu1[0][0] __________________________________________________________________________________________________ stage2_unit2_bn2 (BatchNormaliz (None, 32, 32, 128) 512 stage2_unit2_conv1[0][0] __________________________________________________________________________________________________ stage2_unit2_relu2 (Activation) (None, 32, 32, 128) 0 stage2_unit2_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_7 (ZeroPadding2D (None, 34, 34, 128) 0 stage2_unit2_relu2[0][0] __________________________________________________________________________________________________ stage2_unit2_conv2 (Conv2D) (None, 32, 32, 128) 147456 zero_padding2d_7[0][0] __________________________________________________________________________________________________ stage2_unit2_bn3 (BatchNormaliz (None, 32, 32, 128) 512 stage2_unit2_conv2[0][0] __________________________________________________________________________________________________ stage2_unit2_relu3 (Activation) (None, 32, 32, 128) 0 stage2_unit2_bn3[0][0] __________________________________________________________________________________________________ stage2_unit2_conv3 (Conv2D) (None, 32, 32, 512) 65536 stage2_unit2_relu3[0][0] __________________________________________________________________________________________________ add_5 (Add) (None, 32, 32, 512) 0 stage2_unit2_conv3[0][0] add_4[0][0] __________________________________________________________________________________________________ stage2_unit3_bn1 (BatchNormaliz (None, 32, 32, 512) 2048 add_5[0][0] __________________________________________________________________________________________________ stage2_unit3_relu1 (Activation) (None, 32, 32, 512) 0 stage2_unit3_bn1[0][0] __________________________________________________________________________________________________ stage2_unit3_conv1 (Conv2D) (None, 32, 32, 128) 65536 stage2_unit3_relu1[0][0] __________________________________________________________________________________________________ stage2_unit3_bn2 (BatchNormaliz (None, 32, 32, 128) 512 stage2_unit3_conv1[0][0] __________________________________________________________________________________________________ stage2_unit3_relu2 (Activation) (None, 32, 32, 128) 0 stage2_unit3_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_8 (ZeroPadding2D (None, 34, 34, 128) 0 stage2_unit3_relu2[0][0] __________________________________________________________________________________________________ stage2_unit3_conv2 (Conv2D) (None, 32, 32, 128) 147456 zero_padding2d_8[0][0] __________________________________________________________________________________________________ stage2_unit3_bn3 (BatchNormaliz (None, 32, 32, 128) 512 stage2_unit3_conv2[0][0] __________________________________________________________________________________________________ stage2_unit3_relu3 (Activation) (None, 32, 32, 128) 0 stage2_unit3_bn3[0][0] __________________________________________________________________________________________________ stage2_unit3_conv3 (Conv2D) (None, 32, 32, 512) 65536 stage2_unit3_relu3[0][0] __________________________________________________________________________________________________ add_6 (Add) (None, 32, 32, 512) 0 stage2_unit3_conv3[0][0] add_5[0][0] __________________________________________________________________________________________________ stage2_unit4_bn1 (BatchNormaliz (None, 32, 32, 512) 2048 add_6[0][0] __________________________________________________________________________________________________ stage2_unit4_relu1 (Activation) (None, 32, 32, 512) 0 stage2_unit4_bn1[0][0] __________________________________________________________________________________________________ stage2_unit4_conv1 (Conv2D) (None, 32, 32, 128) 65536 stage2_unit4_relu1[0][0] __________________________________________________________________________________________________ stage2_unit4_bn2 (BatchNormaliz (None, 32, 32, 128) 512 stage2_unit4_conv1[0][0] __________________________________________________________________________________________________ stage2_unit4_relu2 (Activation) (None, 32, 32, 128) 0 stage2_unit4_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_9 (ZeroPadding2D (None, 34, 34, 128) 0 stage2_unit4_relu2[0][0] __________________________________________________________________________________________________ stage2_unit4_conv2 (Conv2D) (None, 32, 32, 128) 147456 zero_padding2d_9[0][0] __________________________________________________________________________________________________ stage2_unit4_bn3 (BatchNormaliz (None, 32, 32, 128) 512 stage2_unit4_conv2[0][0] __________________________________________________________________________________________________ stage2_unit4_relu3 (Activation) (None, 32, 32, 128) 0 stage2_unit4_bn3[0][0] __________________________________________________________________________________________________ stage2_unit4_conv3 (Conv2D) (None, 32, 32, 512) 65536 stage2_unit4_relu3[0][0] __________________________________________________________________________________________________ add_7 (Add) (None, 32, 32, 512) 0 stage2_unit4_conv3[0][0] add_6[0][0] __________________________________________________________________________________________________ stage3_unit1_bn1 (BatchNormaliz (None, 32, 32, 512) 2048 add_7[0][0] __________________________________________________________________________________________________ stage3_unit1_relu1 (Activation) (None, 32, 32, 512) 0 stage3_unit1_bn1[0][0] __________________________________________________________________________________________________ stage3_unit1_conv1 (Conv2D) (None, 32, 32, 256) 131072 stage3_unit1_relu1[0][0] __________________________________________________________________________________________________ stage3_unit1_bn2 (BatchNormaliz (None, 32, 32, 256) 1024 stage3_unit1_conv1[0][0] __________________________________________________________________________________________________ stage3_unit1_relu2 (Activation) (None, 32, 32, 256) 0 stage3_unit1_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_10 (ZeroPadding2 (None, 34, 34, 256) 0 stage3_unit1_relu2[0][0] __________________________________________________________________________________________________ stage3_unit1_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_10[0][0] __________________________________________________________________________________________________ stage3_unit1_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit1_conv2[0][0] __________________________________________________________________________________________________ stage3_unit1_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit1_bn3[0][0] __________________________________________________________________________________________________ stage3_unit1_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit1_relu3[0][0] __________________________________________________________________________________________________ stage3_unit1_sc (Conv2D) (None, 16, 16, 1024) 524288 stage3_unit1_relu1[0][0] __________________________________________________________________________________________________ add_8 (Add) (None, 16, 16, 1024) 0 stage3_unit1_conv3[0][0] stage3_unit1_sc[0][0] __________________________________________________________________________________________________ stage3_unit2_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_8[0][0] __________________________________________________________________________________________________ stage3_unit2_relu1 (Activation) (None, 16, 16, 1024) 0 stage3_unit2_bn1[0][0] __________________________________________________________________________________________________ stage3_unit2_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit2_relu1[0][0] __________________________________________________________________________________________________ stage3_unit2_bn2 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit2_conv1[0][0] __________________________________________________________________________________________________ stage3_unit2_relu2 (Activation) (None, 16, 16, 256) 0 stage3_unit2_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_11 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit2_relu2[0][0] __________________________________________________________________________________________________ stage3_unit2_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_11[0][0] __________________________________________________________________________________________________ stage3_unit2_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit2_conv2[0][0] __________________________________________________________________________________________________ stage3_unit2_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit2_bn3[0][0] __________________________________________________________________________________________________ stage3_unit2_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit2_relu3[0][0] __________________________________________________________________________________________________ add_9 (Add) (None, 16, 16, 1024) 0 stage3_unit2_conv3[0][0] add_8[0][0] __________________________________________________________________________________________________ stage3_unit3_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_9[0][0] __________________________________________________________________________________________________ stage3_unit3_relu1 (Activation) (None, 16, 16, 1024) 0 stage3_unit3_bn1[0][0] __________________________________________________________________________________________________ stage3_unit3_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit3_relu1[0][0] __________________________________________________________________________________________________ stage3_unit3_bn2 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit3_conv1[0][0] __________________________________________________________________________________________________ stage3_unit3_relu2 (Activation) (None, 16, 16, 256) 0 stage3_unit3_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_12 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit3_relu2[0][0] __________________________________________________________________________________________________ stage3_unit3_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_12[0][0] __________________________________________________________________________________________________ stage3_unit3_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit3_conv2[0][0] __________________________________________________________________________________________________ stage3_unit3_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit3_bn3[0][0] __________________________________________________________________________________________________ stage3_unit3_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit3_relu3[0][0] __________________________________________________________________________________________________ add_10 (Add) (None, 16, 16, 1024) 0 stage3_unit3_conv3[0][0] add_9[0][0] __________________________________________________________________________________________________ stage3_unit4_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_10[0][0] __________________________________________________________________________________________________ stage3_unit4_relu1 (Activation) (None, 16, 16, 1024) 0 stage3_unit4_bn1[0][0] __________________________________________________________________________________________________ stage3_unit4_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit4_relu1[0][0] __________________________________________________________________________________________________ stage3_unit4_bn2 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit4_conv1[0][0] __________________________________________________________________________________________________ stage3_unit4_relu2 (Activation) (None, 16, 16, 256) 0 stage3_unit4_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_13 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit4_relu2[0][0] __________________________________________________________________________________________________ stage3_unit4_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_13[0][0] __________________________________________________________________________________________________ stage3_unit4_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit4_conv2[0][0] __________________________________________________________________________________________________ stage3_unit4_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit4_bn3[0][0] __________________________________________________________________________________________________ stage3_unit4_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit4_relu3[0][0] __________________________________________________________________________________________________ add_11 (Add) (None, 16, 16, 1024) 0 stage3_unit4_conv3[0][0] add_10[0][0] __________________________________________________________________________________________________ stage3_unit5_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_11[0][0] __________________________________________________________________________________________________ stage3_unit5_relu1 (Activation) (None, 16, 16, 1024) 0 stage3_unit5_bn1[0][0] __________________________________________________________________________________________________ stage3_unit5_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit5_relu1[0][0] __________________________________________________________________________________________________ stage3_unit5_bn2 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit5_conv1[0][0] __________________________________________________________________________________________________ stage3_unit5_relu2 (Activation) (None, 16, 16, 256) 0 stage3_unit5_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_14 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit5_relu2[0][0] __________________________________________________________________________________________________ stage3_unit5_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_14[0][0] __________________________________________________________________________________________________ stage3_unit5_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit5_conv2[0][0] __________________________________________________________________________________________________ stage3_unit5_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit5_bn3[0][0] __________________________________________________________________________________________________ stage3_unit5_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit5_relu3[0][0] __________________________________________________________________________________________________ add_12 (Add) (None, 16, 16, 1024) 0 stage3_unit5_conv3[0][0] add_11[0][0] __________________________________________________________________________________________________ stage3_unit6_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_12[0][0] __________________________________________________________________________________________________ stage3_unit6_relu1 (Activation) (None, 16, 16, 1024) 0 stage3_unit6_bn1[0][0] __________________________________________________________________________________________________ stage3_unit6_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit6_relu1[0][0] __________________________________________________________________________________________________ stage3_unit6_bn2 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit6_conv1[0][0] __________________________________________________________________________________________________ stage3_unit6_relu2 (Activation) (None, 16, 16, 256) 0 stage3_unit6_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_15 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit6_relu2[0][0] __________________________________________________________________________________________________ stage3_unit6_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_15[0][0] __________________________________________________________________________________________________ stage3_unit6_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit6_conv2[0][0] __________________________________________________________________________________________________ stage3_unit6_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit6_bn3[0][0] __________________________________________________________________________________________________ stage3_unit6_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit6_relu3[0][0] __________________________________________________________________________________________________ add_13 (Add) (None, 16, 16, 1024) 0 stage3_unit6_conv3[0][0] add_12[0][0] __________________________________________________________________________________________________ stage3_unit7_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_13[0][0] __________________________________________________________________________________________________ stage3_unit7_relu1 (Activation) (None, 16, 16, 1024) 0 stage3_unit7_bn1[0][0] __________________________________________________________________________________________________ stage3_unit7_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit7_relu1[0][0] __________________________________________________________________________________________________ stage3_unit7_bn2 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit7_conv1[0][0] __________________________________________________________________________________________________ stage3_unit7_relu2 (Activation) (None, 16, 16, 256) 0 stage3_unit7_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_16 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit7_relu2[0][0] __________________________________________________________________________________________________ stage3_unit7_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_16[0][0] __________________________________________________________________________________________________ stage3_unit7_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit7_conv2[0][0] __________________________________________________________________________________________________ stage3_unit7_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit7_bn3[0][0] __________________________________________________________________________________________________ stage3_unit7_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit7_relu3[0][0] __________________________________________________________________________________________________ add_14 (Add) (None, 16, 16, 1024) 0 stage3_unit7_conv3[0][0] add_13[0][0] __________________________________________________________________________________________________ stage3_unit8_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_14[0][0] __________________________________________________________________________________________________ stage3_unit8_relu1 (Activation) (None, 16, 16, 1024) 0 stage3_unit8_bn1[0][0] __________________________________________________________________________________________________ stage3_unit8_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit8_relu1[0][0] __________________________________________________________________________________________________ stage3_unit8_bn2 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit8_conv1[0][0] __________________________________________________________________________________________________ stage3_unit8_relu2 (Activation) (None, 16, 16, 256) 0 stage3_unit8_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_17 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit8_relu2[0][0] __________________________________________________________________________________________________ stage3_unit8_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_17[0][0] __________________________________________________________________________________________________ stage3_unit8_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit8_conv2[0][0] __________________________________________________________________________________________________ stage3_unit8_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit8_bn3[0][0] __________________________________________________________________________________________________ stage3_unit8_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit8_relu3[0][0] __________________________________________________________________________________________________ add_15 (Add) (None, 16, 16, 1024) 0 stage3_unit8_conv3[0][0] add_14[0][0] __________________________________________________________________________________________________ stage3_unit9_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_15[0][0] __________________________________________________________________________________________________ stage3_unit9_relu1 (Activation) (None, 16, 16, 1024) 0 stage3_unit9_bn1[0][0] __________________________________________________________________________________________________ stage3_unit9_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit9_relu1[0][0] __________________________________________________________________________________________________ stage3_unit9_bn2 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit9_conv1[0][0] __________________________________________________________________________________________________ stage3_unit9_relu2 (Activation) (None, 16, 16, 256) 0 stage3_unit9_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_18 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit9_relu2[0][0] __________________________________________________________________________________________________ stage3_unit9_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_18[0][0] __________________________________________________________________________________________________ stage3_unit9_bn3 (BatchNormaliz (None, 16, 16, 256) 1024 stage3_unit9_conv2[0][0] __________________________________________________________________________________________________ stage3_unit9_relu3 (Activation) (None, 16, 16, 256) 0 stage3_unit9_bn3[0][0] __________________________________________________________________________________________________ stage3_unit9_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit9_relu3[0][0] __________________________________________________________________________________________________ add_16 (Add) (None, 16, 16, 1024) 0 stage3_unit9_conv3[0][0] add_15[0][0] __________________________________________________________________________________________________ stage3_unit10_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_16[0][0] __________________________________________________________________________________________________ stage3_unit10_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit10_bn1[0][0] __________________________________________________________________________________________________ stage3_unit10_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit10_relu1[0][0] __________________________________________________________________________________________________ stage3_unit10_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit10_conv1[0][0] __________________________________________________________________________________________________ stage3_unit10_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit10_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_19 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit10_relu2[0][0] __________________________________________________________________________________________________ stage3_unit10_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_19[0][0] __________________________________________________________________________________________________ stage3_unit10_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit10_conv2[0][0] __________________________________________________________________________________________________ stage3_unit10_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit10_bn3[0][0] __________________________________________________________________________________________________ stage3_unit10_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit10_relu3[0][0] __________________________________________________________________________________________________ add_17 (Add) (None, 16, 16, 1024) 0 stage3_unit10_conv3[0][0] add_16[0][0] __________________________________________________________________________________________________ stage3_unit11_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_17[0][0] __________________________________________________________________________________________________ stage3_unit11_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit11_bn1[0][0] __________________________________________________________________________________________________ stage3_unit11_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit11_relu1[0][0] __________________________________________________________________________________________________ stage3_unit11_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit11_conv1[0][0] __________________________________________________________________________________________________ stage3_unit11_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit11_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_20 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit11_relu2[0][0] __________________________________________________________________________________________________ stage3_unit11_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_20[0][0] __________________________________________________________________________________________________ stage3_unit11_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit11_conv2[0][0] __________________________________________________________________________________________________ stage3_unit11_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit11_bn3[0][0] __________________________________________________________________________________________________ stage3_unit11_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit11_relu3[0][0] __________________________________________________________________________________________________ add_18 (Add) (None, 16, 16, 1024) 0 stage3_unit11_conv3[0][0] add_17[0][0] __________________________________________________________________________________________________ stage3_unit12_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_18[0][0] __________________________________________________________________________________________________ stage3_unit12_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit12_bn1[0][0] __________________________________________________________________________________________________ stage3_unit12_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit12_relu1[0][0] __________________________________________________________________________________________________ stage3_unit12_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit12_conv1[0][0] __________________________________________________________________________________________________ stage3_unit12_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit12_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_21 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit12_relu2[0][0] __________________________________________________________________________________________________ stage3_unit12_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_21[0][0] __________________________________________________________________________________________________ stage3_unit12_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit12_conv2[0][0] __________________________________________________________________________________________________ stage3_unit12_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit12_bn3[0][0] __________________________________________________________________________________________________ stage3_unit12_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit12_relu3[0][0] __________________________________________________________________________________________________ add_19 (Add) (None, 16, 16, 1024) 0 stage3_unit12_conv3[0][0] add_18[0][0] __________________________________________________________________________________________________ stage3_unit13_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_19[0][0] __________________________________________________________________________________________________ stage3_unit13_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit13_bn1[0][0] __________________________________________________________________________________________________ stage3_unit13_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit13_relu1[0][0] __________________________________________________________________________________________________ stage3_unit13_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit13_conv1[0][0] __________________________________________________________________________________________________ stage3_unit13_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit13_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_22 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit13_relu2[0][0] __________________________________________________________________________________________________ stage3_unit13_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_22[0][0] __________________________________________________________________________________________________ stage3_unit13_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit13_conv2[0][0] __________________________________________________________________________________________________ stage3_unit13_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit13_bn3[0][0] __________________________________________________________________________________________________ stage3_unit13_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit13_relu3[0][0] __________________________________________________________________________________________________ add_20 (Add) (None, 16, 16, 1024) 0 stage3_unit13_conv3[0][0] add_19[0][0] __________________________________________________________________________________________________ stage3_unit14_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_20[0][0] __________________________________________________________________________________________________ stage3_unit14_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit14_bn1[0][0] __________________________________________________________________________________________________ stage3_unit14_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit14_relu1[0][0] __________________________________________________________________________________________________ stage3_unit14_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit14_conv1[0][0] __________________________________________________________________________________________________ stage3_unit14_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit14_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_23 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit14_relu2[0][0] __________________________________________________________________________________________________ stage3_unit14_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_23[0][0] __________________________________________________________________________________________________ stage3_unit14_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit14_conv2[0][0] __________________________________________________________________________________________________ stage3_unit14_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit14_bn3[0][0] __________________________________________________________________________________________________ stage3_unit14_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit14_relu3[0][0] __________________________________________________________________________________________________ add_21 (Add) (None, 16, 16, 1024) 0 stage3_unit14_conv3[0][0] add_20[0][0] __________________________________________________________________________________________________ stage3_unit15_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_21[0][0] __________________________________________________________________________________________________ stage3_unit15_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit15_bn1[0][0] __________________________________________________________________________________________________ stage3_unit15_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit15_relu1[0][0] __________________________________________________________________________________________________ stage3_unit15_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit15_conv1[0][0] __________________________________________________________________________________________________ stage3_unit15_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit15_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_24 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit15_relu2[0][0] __________________________________________________________________________________________________ stage3_unit15_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_24[0][0] __________________________________________________________________________________________________ stage3_unit15_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit15_conv2[0][0] __________________________________________________________________________________________________ stage3_unit15_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit15_bn3[0][0] __________________________________________________________________________________________________ stage3_unit15_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit15_relu3[0][0] __________________________________________________________________________________________________ add_22 (Add) (None, 16, 16, 1024) 0 stage3_unit15_conv3[0][0] add_21[0][0] __________________________________________________________________________________________________ stage3_unit16_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_22[0][0] __________________________________________________________________________________________________ stage3_unit16_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit16_bn1[0][0] __________________________________________________________________________________________________ stage3_unit16_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit16_relu1[0][0] __________________________________________________________________________________________________ stage3_unit16_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit16_conv1[0][0] __________________________________________________________________________________________________ stage3_unit16_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit16_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_25 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit16_relu2[0][0] __________________________________________________________________________________________________ stage3_unit16_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_25[0][0] __________________________________________________________________________________________________ stage3_unit16_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit16_conv2[0][0] __________________________________________________________________________________________________ stage3_unit16_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit16_bn3[0][0] __________________________________________________________________________________________________ stage3_unit16_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit16_relu3[0][0] __________________________________________________________________________________________________ add_23 (Add) (None, 16, 16, 1024) 0 stage3_unit16_conv3[0][0] add_22[0][0] __________________________________________________________________________________________________ stage3_unit17_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_23[0][0] __________________________________________________________________________________________________ stage3_unit17_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit17_bn1[0][0] __________________________________________________________________________________________________ stage3_unit17_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit17_relu1[0][0] __________________________________________________________________________________________________ stage3_unit17_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit17_conv1[0][0] __________________________________________________________________________________________________ stage3_unit17_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit17_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_26 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit17_relu2[0][0] __________________________________________________________________________________________________ stage3_unit17_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_26[0][0] __________________________________________________________________________________________________ stage3_unit17_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit17_conv2[0][0] __________________________________________________________________________________________________ stage3_unit17_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit17_bn3[0][0] __________________________________________________________________________________________________ stage3_unit17_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit17_relu3[0][0] __________________________________________________________________________________________________ add_24 (Add) (None, 16, 16, 1024) 0 stage3_unit17_conv3[0][0] add_23[0][0] __________________________________________________________________________________________________ stage3_unit18_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_24[0][0] __________________________________________________________________________________________________ stage3_unit18_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit18_bn1[0][0] __________________________________________________________________________________________________ stage3_unit18_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit18_relu1[0][0] __________________________________________________________________________________________________ stage3_unit18_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit18_conv1[0][0] __________________________________________________________________________________________________ stage3_unit18_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit18_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_27 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit18_relu2[0][0] __________________________________________________________________________________________________ stage3_unit18_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_27[0][0] __________________________________________________________________________________________________ stage3_unit18_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit18_conv2[0][0] __________________________________________________________________________________________________ stage3_unit18_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit18_bn3[0][0] __________________________________________________________________________________________________ stage3_unit18_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit18_relu3[0][0] __________________________________________________________________________________________________ add_25 (Add) (None, 16, 16, 1024) 0 stage3_unit18_conv3[0][0] add_24[0][0] __________________________________________________________________________________________________ stage3_unit19_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_25[0][0] __________________________________________________________________________________________________ stage3_unit19_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit19_bn1[0][0] __________________________________________________________________________________________________ stage3_unit19_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit19_relu1[0][0] __________________________________________________________________________________________________ stage3_unit19_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit19_conv1[0][0] __________________________________________________________________________________________________ stage3_unit19_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit19_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_28 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit19_relu2[0][0] __________________________________________________________________________________________________ stage3_unit19_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_28[0][0] __________________________________________________________________________________________________ stage3_unit19_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit19_conv2[0][0] __________________________________________________________________________________________________ stage3_unit19_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit19_bn3[0][0] __________________________________________________________________________________________________ stage3_unit19_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit19_relu3[0][0] __________________________________________________________________________________________________ add_26 (Add) (None, 16, 16, 1024) 0 stage3_unit19_conv3[0][0] add_25[0][0] __________________________________________________________________________________________________ stage3_unit20_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_26[0][0] __________________________________________________________________________________________________ stage3_unit20_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit20_bn1[0][0] __________________________________________________________________________________________________ stage3_unit20_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit20_relu1[0][0] __________________________________________________________________________________________________ stage3_unit20_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit20_conv1[0][0] __________________________________________________________________________________________________ stage3_unit20_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit20_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_29 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit20_relu2[0][0] __________________________________________________________________________________________________ stage3_unit20_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_29[0][0] __________________________________________________________________________________________________ stage3_unit20_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit20_conv2[0][0] __________________________________________________________________________________________________ stage3_unit20_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit20_bn3[0][0] __________________________________________________________________________________________________ stage3_unit20_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit20_relu3[0][0] __________________________________________________________________________________________________ add_27 (Add) (None, 16, 16, 1024) 0 stage3_unit20_conv3[0][0] add_26[0][0] __________________________________________________________________________________________________ stage3_unit21_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_27[0][0] __________________________________________________________________________________________________ stage3_unit21_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit21_bn1[0][0] __________________________________________________________________________________________________ stage3_unit21_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit21_relu1[0][0] __________________________________________________________________________________________________ stage3_unit21_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit21_conv1[0][0] __________________________________________________________________________________________________ stage3_unit21_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit21_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_30 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit21_relu2[0][0] __________________________________________________________________________________________________ stage3_unit21_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_30[0][0] __________________________________________________________________________________________________ stage3_unit21_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit21_conv2[0][0] __________________________________________________________________________________________________ stage3_unit21_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit21_bn3[0][0] __________________________________________________________________________________________________ stage3_unit21_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit21_relu3[0][0] __________________________________________________________________________________________________ add_28 (Add) (None, 16, 16, 1024) 0 stage3_unit21_conv3[0][0] add_27[0][0] __________________________________________________________________________________________________ stage3_unit22_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_28[0][0] __________________________________________________________________________________________________ stage3_unit22_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit22_bn1[0][0] __________________________________________________________________________________________________ stage3_unit22_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit22_relu1[0][0] __________________________________________________________________________________________________ stage3_unit22_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit22_conv1[0][0] __________________________________________________________________________________________________ stage3_unit22_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit22_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_31 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit22_relu2[0][0] __________________________________________________________________________________________________ stage3_unit22_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_31[0][0] __________________________________________________________________________________________________ stage3_unit22_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit22_conv2[0][0] __________________________________________________________________________________________________ stage3_unit22_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit22_bn3[0][0] __________________________________________________________________________________________________ stage3_unit22_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit22_relu3[0][0] __________________________________________________________________________________________________ add_29 (Add) (None, 16, 16, 1024) 0 stage3_unit22_conv3[0][0] add_28[0][0] __________________________________________________________________________________________________ stage3_unit23_bn1 (BatchNormali (None, 16, 16, 1024) 4096 add_29[0][0] __________________________________________________________________________________________________ stage3_unit23_relu1 (Activation (None, 16, 16, 1024) 0 stage3_unit23_bn1[0][0] __________________________________________________________________________________________________ stage3_unit23_conv1 (Conv2D) (None, 16, 16, 256) 262144 stage3_unit23_relu1[0][0] __________________________________________________________________________________________________ stage3_unit23_bn2 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit23_conv1[0][0] __________________________________________________________________________________________________ stage3_unit23_relu2 (Activation (None, 16, 16, 256) 0 stage3_unit23_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_32 (ZeroPadding2 (None, 18, 18, 256) 0 stage3_unit23_relu2[0][0] __________________________________________________________________________________________________ stage3_unit23_conv2 (Conv2D) (None, 16, 16, 256) 589824 zero_padding2d_32[0][0] __________________________________________________________________________________________________ stage3_unit23_bn3 (BatchNormali (None, 16, 16, 256) 1024 stage3_unit23_conv2[0][0] __________________________________________________________________________________________________ stage3_unit23_relu3 (Activation (None, 16, 16, 256) 0 stage3_unit23_bn3[0][0] __________________________________________________________________________________________________ stage3_unit23_conv3 (Conv2D) (None, 16, 16, 1024) 262144 stage3_unit23_relu3[0][0] __________________________________________________________________________________________________ add_30 (Add) (None, 16, 16, 1024) 0 stage3_unit23_conv3[0][0] add_29[0][0] __________________________________________________________________________________________________ stage4_unit1_bn1 (BatchNormaliz (None, 16, 16, 1024) 4096 add_30[0][0] __________________________________________________________________________________________________ stage4_unit1_relu1 (Activation) (None, 16, 16, 1024) 0 stage4_unit1_bn1[0][0] __________________________________________________________________________________________________ stage4_unit1_conv1 (Conv2D) (None, 16, 16, 512) 524288 stage4_unit1_relu1[0][0] __________________________________________________________________________________________________ stage4_unit1_bn2 (BatchNormaliz (None, 16, 16, 512) 2048 stage4_unit1_conv1[0][0] __________________________________________________________________________________________________ stage4_unit1_relu2 (Activation) (None, 16, 16, 512) 0 stage4_unit1_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_33 (ZeroPadding2 (None, 18, 18, 512) 0 stage4_unit1_relu2[0][0] __________________________________________________________________________________________________ stage4_unit1_conv2 (Conv2D) (None, 8, 8, 512) 2359296 zero_padding2d_33[0][0] __________________________________________________________________________________________________ stage4_unit1_bn3 (BatchNormaliz (None, 8, 8, 512) 2048 stage4_unit1_conv2[0][0] __________________________________________________________________________________________________ stage4_unit1_relu3 (Activation) (None, 8, 8, 512) 0 stage4_unit1_bn3[0][0] __________________________________________________________________________________________________ stage4_unit1_conv3 (Conv2D) (None, 8, 8, 2048) 1048576 stage4_unit1_relu3[0][0] __________________________________________________________________________________________________ stage4_unit1_sc (Conv2D) (None, 8, 8, 2048) 2097152 stage4_unit1_relu1[0][0] __________________________________________________________________________________________________ add_31 (Add) (None, 8, 8, 2048) 0 stage4_unit1_conv3[0][0] stage4_unit1_sc[0][0] __________________________________________________________________________________________________ stage4_unit2_bn1 (BatchNormaliz (None, 8, 8, 2048) 8192 add_31[0][0] __________________________________________________________________________________________________ stage4_unit2_relu1 (Activation) (None, 8, 8, 2048) 0 stage4_unit2_bn1[0][0] __________________________________________________________________________________________________ stage4_unit2_conv1 (Conv2D) (None, 8, 8, 512) 1048576 stage4_unit2_relu1[0][0] __________________________________________________________________________________________________ stage4_unit2_bn2 (BatchNormaliz (None, 8, 8, 512) 2048 stage4_unit2_conv1[0][0] __________________________________________________________________________________________________ stage4_unit2_relu2 (Activation) (None, 8, 8, 512) 0 stage4_unit2_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_34 (ZeroPadding2 (None, 10, 10, 512) 0 stage4_unit2_relu2[0][0] __________________________________________________________________________________________________ stage4_unit2_conv2 (Conv2D) (None, 8, 8, 512) 2359296 zero_padding2d_34[0][0] __________________________________________________________________________________________________ stage4_unit2_bn3 (BatchNormaliz (None, 8, 8, 512) 2048 stage4_unit2_conv2[0][0] __________________________________________________________________________________________________ stage4_unit2_relu3 (Activation) (None, 8, 8, 512) 0 stage4_unit2_bn3[0][0] __________________________________________________________________________________________________ stage4_unit2_conv3 (Conv2D) (None, 8, 8, 2048) 1048576 stage4_unit2_relu3[0][0] __________________________________________________________________________________________________ add_32 (Add) (None, 8, 8, 2048) 0 stage4_unit2_conv3[0][0] add_31[0][0] __________________________________________________________________________________________________ stage4_unit3_bn1 (BatchNormaliz (None, 8, 8, 2048) 8192 add_32[0][0] __________________________________________________________________________________________________ stage4_unit3_relu1 (Activation) (None, 8, 8, 2048) 0 stage4_unit3_bn1[0][0] __________________________________________________________________________________________________ stage4_unit3_conv1 (Conv2D) (None, 8, 8, 512) 1048576 stage4_unit3_relu1[0][0] __________________________________________________________________________________________________ stage4_unit3_bn2 (BatchNormaliz (None, 8, 8, 512) 2048 stage4_unit3_conv1[0][0] __________________________________________________________________________________________________ stage4_unit3_relu2 (Activation) (None, 8, 8, 512) 0 stage4_unit3_bn2[0][0] __________________________________________________________________________________________________ zero_padding2d_35 (ZeroPadding2 (None, 10, 10, 512) 0 stage4_unit3_relu2[0][0] __________________________________________________________________________________________________ stage4_unit3_conv2 (Conv2D) (None, 8, 8, 512) 2359296 zero_padding2d_35[0][0] __________________________________________________________________________________________________ stage4_unit3_bn3 (BatchNormaliz (None, 8, 8, 512) 2048 stage4_unit3_conv2[0][0] __________________________________________________________________________________________________ stage4_unit3_relu3 (Activation) (None, 8, 8, 512) 0 stage4_unit3_bn3[0][0] __________________________________________________________________________________________________ stage4_unit3_conv3 (Conv2D) (None, 8, 8, 2048) 1048576 stage4_unit3_relu3[0][0] __________________________________________________________________________________________________ add_33 (Add) (None, 8, 8, 2048) 0 stage4_unit3_conv3[0][0] add_32[0][0] __________________________________________________________________________________________________ bn1 (BatchNormalization) (None, 8, 8, 2048) 8192 add_33[0][0] __________________________________________________________________________________________________ relu1 (Activation) (None, 8, 8, 2048) 0 bn1[0][0] __________________________________________________________________________________________________ decoder_stage0_upsampling (UpSa (None, 16, 16, 2048) 0 relu1[0][0] __________________________________________________________________________________________________ decoder_stage0_concat (Concaten (None, 16, 16, 3072) 0 decoder_stage0_upsampling[0][0] stage4_unit1_relu1[0][0] __________________________________________________________________________________________________ decoder_stage0a_conv (Conv2D) (None, 16, 16, 256) 7077888 decoder_stage0_concat[0][0] __________________________________________________________________________________________________ decoder_stage0a_bn (BatchNormal (None, 16, 16, 256) 1024 decoder_stage0a_conv[0][0] __________________________________________________________________________________________________ decoder_stage0a_relu (Activatio (None, 16, 16, 256) 0 decoder_stage0a_bn[0][0] __________________________________________________________________________________________________ decoder_stage0b_conv (Conv2D) (None, 16, 16, 256) 589824 decoder_stage0a_relu[0][0] __________________________________________________________________________________________________ decoder_stage0b_bn (BatchNormal (None, 16, 16, 256) 1024 decoder_stage0b_conv[0][0] __________________________________________________________________________________________________ decoder_stage0b_relu (Activatio (None, 16, 16, 256) 0 decoder_stage0b_bn[0][0] __________________________________________________________________________________________________ decoder_stage1_upsampling (UpSa (None, 32, 32, 256) 0 decoder_stage0b_relu[0][0] __________________________________________________________________________________________________ decoder_stage1_concat (Concaten (None, 32, 32, 768) 0 decoder_stage1_upsampling[0][0] stage3_unit1_relu1[0][0] __________________________________________________________________________________________________ decoder_stage1a_conv (Conv2D) (None, 32, 32, 128) 884736 decoder_stage1_concat[0][0] __________________________________________________________________________________________________ decoder_stage1a_bn (BatchNormal (None, 32, 32, 128) 512 decoder_stage1a_conv[0][0] __________________________________________________________________________________________________ decoder_stage1a_relu (Activatio (None, 32, 32, 128) 0 decoder_stage1a_bn[0][0] __________________________________________________________________________________________________ decoder_stage1b_conv (Conv2D) (None, 32, 32, 128) 147456 decoder_stage1a_relu[0][0] __________________________________________________________________________________________________ decoder_stage1b_bn (BatchNormal (None, 32, 32, 128) 512 decoder_stage1b_conv[0][0] __________________________________________________________________________________________________ decoder_stage1b_relu (Activatio (None, 32, 32, 128) 0 decoder_stage1b_bn[0][0] __________________________________________________________________________________________________ decoder_stage2_upsampling (UpSa (None, 64, 64, 128) 0 decoder_stage1b_relu[0][0] __________________________________________________________________________________________________ decoder_stage2_concat (Concaten (None, 64, 64, 384) 0 decoder_stage2_upsampling[0][0] stage2_unit1_relu1[0][0] __________________________________________________________________________________________________ decoder_stage2a_conv (Conv2D) (None, 64, 64, 64) 221184 decoder_stage2_concat[0][0] __________________________________________________________________________________________________ decoder_stage2a_bn (BatchNormal (None, 64, 64, 64) 256 decoder_stage2a_conv[0][0] __________________________________________________________________________________________________ decoder_stage2a_relu (Activatio (None, 64, 64, 64) 0 decoder_stage2a_bn[0][0] __________________________________________________________________________________________________ decoder_stage2b_conv (Conv2D) (None, 64, 64, 64) 36864 decoder_stage2a_relu[0][0] __________________________________________________________________________________________________ decoder_stage2b_bn (BatchNormal (None, 64, 64, 64) 256 decoder_stage2b_conv[0][0] __________________________________________________________________________________________________ decoder_stage2b_relu (Activatio (None, 64, 64, 64) 0 decoder_stage2b_bn[0][0] __________________________________________________________________________________________________ decoder_stage3_upsampling (UpSa (None, 128, 128, 64) 0 decoder_stage2b_relu[0][0] __________________________________________________________________________________________________ decoder_stage3_concat (Concaten (None, 128, 128, 128 0 decoder_stage3_upsampling[0][0] relu0[0][0] __________________________________________________________________________________________________ decoder_stage3a_conv (Conv2D) (None, 128, 128, 32) 36864 decoder_stage3_concat[0][0] __________________________________________________________________________________________________ decoder_stage3a_bn (BatchNormal (None, 128, 128, 32) 128 decoder_stage3a_conv[0][0] __________________________________________________________________________________________________ decoder_stage3a_relu (Activatio (None, 128, 128, 32) 0 decoder_stage3a_bn[0][0] __________________________________________________________________________________________________ decoder_stage3b_conv (Conv2D) (None, 128, 128, 32) 9216 decoder_stage3a_relu[0][0] __________________________________________________________________________________________________ decoder_stage3b_bn (BatchNormal (None, 128, 128, 32) 128 decoder_stage3b_conv[0][0] __________________________________________________________________________________________________ decoder_stage3b_relu (Activatio (None, 128, 128, 32) 0 decoder_stage3b_bn[0][0] __________________________________________________________________________________________________ decoder_stage4_upsampling (UpSa (None, 256, 256, 32) 0 decoder_stage3b_relu[0][0] __________________________________________________________________________________________________ decoder_stage4a_conv (Conv2D) (None, 256, 256, 16) 4608 decoder_stage4_upsampling[0][0] __________________________________________________________________________________________________ decoder_stage4a_bn (BatchNormal (None, 256, 256, 16) 64 decoder_stage4a_conv[0][0] __________________________________________________________________________________________________ decoder_stage4a_relu (Activatio (None, 256, 256, 16) 0 decoder_stage4a_bn[0][0] __________________________________________________________________________________________________ decoder_stage4b_conv (Conv2D) (None, 256, 256, 16) 2304 decoder_stage4a_relu[0][0] __________________________________________________________________________________________________ decoder_stage4b_bn (BatchNormal (None, 256, 256, 16) 64 decoder_stage4b_conv[0][0] __________________________________________________________________________________________________ decoder_stage4b_relu (Activatio (None, 256, 256, 16) 0 decoder_stage4b_bn[0][0] __________________________________________________________________________________________________ final_conv (Conv2D) (None, 256, 256, 1) 145 decoder_stage4b_relu[0][0] __________________________________________________________________________________________________ sigmoid (Activation) (None, 256, 256, 1) 0 final_conv[0][0] ================================================================================================== Total params: 51,605,466 Trainable params: 51,505,684 Non-trainable params: 99,782 __________________________________________________________________________________________________
MIT
Train_and_Test_Notebooks/ResNet101_RoadTest.ipynb
parshwa1999/Map-Segmentation
HYPER_PARAMETERS
LEARNING_RATE = 0.0001
_____no_output_____
MIT
Train_and_Test_Notebooks/ResNet101_RoadTest.ipynb
parshwa1999/Map-Segmentation
Initializing Callbacks
#from tensorboardcolab import TensorBoardColab, TensorBoardColabCallback from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from datetime import datetime model_path = "./Models/Resnet_road_weights.h5" checkpointer = ModelCheckpoint(model_path, monitor="val_loss", mode="min", save_best_only = True, verbose=1) earlystopper = EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 5, verbose = 1, restore_best_weights = True) lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=4, verbose=1, epsilon=1e-4)
_____no_output_____
MIT
Train_and_Test_Notebooks/ResNet101_RoadTest.ipynb
parshwa1999/Map-Segmentation
Compiling the model
opt = keras.optimizers.adam(LEARNING_RATE) model.compile( optimizer=opt, loss=soft_dice_loss, metrics=[iou_coef])
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
MIT
Train_and_Test_Notebooks/ResNet101_RoadTest.ipynb
parshwa1999/Map-Segmentation
Testing our Model On Test Images
model.load_weights("Models/Resnet_road_weights.h5") import cv2 import glob import numpy as np import h5py #test_images = np.array([cv2.imread(file) for file in glob.glob("/home/bisag/Desktop/Road-Segmentation/I/")]) #test_masks = np.array([cv2.imread(file) for file in glob.glob("/home/bisag/Desktop/Road-Segmentation/M/")]) test_masks = [] test_images = [] files = glob.glob ("TestI/*.png") for myFile in files: print(myFile) image = cv2.imread (myFile) test_images.append (image) myFile = 'TestM' + myFile[5:len(myFile)] image = cv2.cvtColor(cv2.imread (myFile), cv2.COLOR_BGR2GRAY) test_masks.append (image) #files = glob.glob ("TestM/*.png") #for myFile in files: # print(myFile) #test_images = cv2.imread("/home/bisag/Desktop/Road-Segmentation/I/1.png") #test_masks = cv2.imread("/home/bisag/Desktop/Road-Segmentation/M/1.png") test_images = np.array(test_images) test_masks = np.array(test_masks) test_masks = np.expand_dims(test_masks, -1) print("Unique elements in the train mask:", np.unique(test_masks)) print(test_images.shape) print(test_masks.shape) test_images = test_images.astype(np.float16)/255 test_masks = test_masks.astype(np.float16)/255 import sys def sizeof_fmt(num, suffix='B'): ''' by Fred Cirera, https://stackoverflow.com/a/1094933/1870254, modified''' for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']: if abs(num) < 1024.0: return "%3.1f %s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f %s%s" % (num, 'Yi', suffix) for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()), key= lambda x: -x[1])[:10]: print("{:>30}: {:>8}".format(name, sizeof_fmt(size))) test_masks_tmp = [] for i in test_masks: image = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY) test_masks_tmp.append (image) test_images = np.array(test_images) test_masks = np.array(test_masks_tmp) test_masks = np.expand_dims(test_masks, -1) #print(np.unique(test_masks)) print(test_images.shape) print(test_masks.shape) del test_masks_tmp model.evaluate(test_images, test_masks) predictions = model.predict(test_images, verbose=1) thresh_val = 0.1 predicton_threshold = (predictions > thresh_val).astype(np.uint8) plt.figure() #plt.subplot(2, 1, 1) plt.imshow(np.squeeze(predictions[19][:,:,0])) plt.show() import matplotlib for i in range(len(predictions)): #print("Results/" + str(i) + "Image.png") matplotlib.image.imsave( "Results/" + str(i) + "Image.png" , np.squeeze(test_images[i][:,:,0])) matplotlib.image.imsave( "Results/" + str(i) + "GroundTruth.png" , np.squeeze(test_masks[i][:,:,0])) #cv2.imwrite( "/home/bisag/Desktop/Road-Segmentation/Results/" + str(i) + "Prediction.png" , np.squeeze(predictions[i][:,:,0])) #cv2.imwrite( "/home/bisag/Desktop/Road-Segmentation/Results/" + str(i) + "Prediction_Threshold.png" , np.squeeze(predicton_threshold[i][:,:,0])) #matplotlib.image.imsave('/home/bisag/Desktop/Road-Segmentation/Results/000.png', np.squeeze(predicton_threshold[0][:,:,0])) matplotlib.image.imsave("Results/" + str(i) + "Prediction.png" , np.squeeze(predictions[i][:,:,0])) matplotlib.image.imsave( "Results/" + str(i) + "Prediction_Threshold.png" , np.squeeze(predicton_threshold[i][:,:,0])) #imshow(np.squeeze(predictions[0][:,:,0])) #import scipy.misc #scipy.misc.imsave('/home/bisag/Desktop/Road-Segmentation/Results/00.png', np.squeeze(predictions[0][:,:,0])) model.load_weights("/home/parshwa/Desktop/Road-Segmentation/Models/weights.h5")
_____no_output_____
MIT
Train_and_Test_Notebooks/ResNet101_RoadTest.ipynb
parshwa1999/Map-Segmentation
Just Test
"""Test""" import cv2 import glob import numpy as np import h5py #test_images = np.array([cv2.imread(file) for file in glob.glob("/home/bisag/Desktop/Road-Segmentation/I/")]) #test_masks = np.array([cv2.imread(file) for file in glob.glob("/home/bisag/Desktop/Road-Segmentation/M/")]) test_images = [] files = glob.glob ("/home/parshwa/Desktop/Road-Segmentation/Test/*.png") for myFile in files: print(myFile) image = cv2.imread (myFile) test_images.append (image) #test_images = cv2.imread("/home/bisag/Desktop/Road-Segmentation/I/1.png") #test_masks = cv2.imread("/home/bisag/Desktop/Road-Segmentation/M/1.png") test_images = np.array(test_images) print(test_images.shape) predictions = model.predict(test_images, verbose=1) thresh_val = 0.1 predicton_threshold = (predictions > thresh_val).astype(np.uint8) import matplotlib for i in range(len(predictions)): cv2.imwrite( "/home/parshwa/Desktop/Road-Segmentation/Results/" + str(i) + "Image.png" , np.squeeze(test_images[i][:,:,0])) #cv2.imwrite( "/home/bisag/Desktop/Road-Segmentation/Results/" + str(i) + "Prediction.png" , np.squeeze(predictions[i][:,:,0])) #cv2.imwrite( "/home/bisag/Desktop/Road-Segmentation/Results/" + str(i) + "Prediction_Threshold.png" , np.squeeze(predicton_threshold[i][:,:,0])) #matplotlib.image.imsave('/home/bisag/Desktop/Road-Segmentation/Results/000.png', np.squeeze(predicton_threshold[0][:,:,0])) matplotlib.image.imsave("/home/parshwa/Desktop/Road-Segmentation/Results/" + str(i) + "Prediction.png" , np.squeeze(predictions[i][:,:,0])) matplotlib.image.imsave( "/home/parshwa/Desktop/Road-Segmentation/Results/" + str(i) + "Prediction_Threshold.png" , np.squeeze(predicton_threshold[i][:,:,0])) #imshow(np.squeeze(predictions[0][:,:,0])) imshow(np.squeeze(predictions[0][:,:,0])) #import scipy.misc #scipy.misc.imsave('/home/bisag/Desktop/Road-Segmentation/Results/00.png', np.squeeze(predictions[0][:,:,0])) """Visualise""" def layer_to_visualize(layer): inputs = [K.learning_phase()] + model.inputs _convout1_f = K.function(inputs, [layer.output]) def convout1_f(X): # The [0] is to disable the training phase flag return _convout1_f([0] + [X]) convolutions = convout1_f(img_to_visualize) convolutions = np.squeeze(convolutions) print ('Shape of conv:', convolutions.shape) n = convolutions.shape[0] n = int(np.ceil(np.sqrt(n))) # Visualization of each filter of the layer fig = plt.figure(figsize=(12,8)) for i in range(len(convolutions)): ax = fig.add_subplot(n,n,i+1) ax.imshow(convolutions[i], cmap='gray')
_____no_output_____
MIT
Train_and_Test_Notebooks/ResNet101_RoadTest.ipynb
parshwa1999/Map-Segmentation
Seaborn (Working on the diabetes dataset)
import seaborn as sns import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('diabetes.csv') sns.lmplot(x='Age', y='BloodPressure', data=df) plt.show() sns.lmplot(x='Age', y='Glucose', data=df, fit_reg=False, # No regression line hue='Outcome' # color by evaluation stage ) plt.show() sns.swarmplot(x='Outcome', y='Glucose', data=df) plt.show() sns.lmplot(x='Outcome', y='Glucose', data=df) plt.show() sns.boxplot(data=df.loc[df['Outcome']==0, ['Glucose']]) plt.show() sns.boxplot(data=df.loc[df['Outcome']==1, ['Glucose']]) plt.show() df.corr sns.set(style='ticks', color_codes=True) sns.pairplot(df) plt.show() y = df['Outcome'] y x = df.iloc[:, 0:8] x
_____no_output_____
MIT
Session_2_ and_3_Seaborn.ipynb
reeha-parkar/applied-statistics
Homework 4Today we'll start by reproducing the DQN and then try improving it with the tricks we learned on the lecture:* Target networks* Double q-learning* Prioritized experience replay* Dueling DQN* Bootstrap DQN
import matplotlib.pyplot as plt import numpy as np %matplotlib inline # If you are running on a server, launch xvfb to record game videos # Please make sure you have xvfb installed import os if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: !bash ../xvfb start os.environ['DISPLAY'] = ':1'
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Processing game image (2 pts)Raw Atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape* Converting to grayscale* Cropping irrelevant image parts
from gym.core import ObservationWrapper from gym.spaces import Box from scipy.misc import imresize class PreprocessAtari(ObservationWrapper): def __init__(self, env): """A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it.""" ObservationWrapper.__init__(self, env) self.img_size = (64, 64) self.observation_space = Box(0.0, 1.0, self.img_size) def observation(self, img): """what happens to each observation""" # Here's what you need to do: # * crop image, remove irrelevant parts # * resize image to self.img_size # (use imresize imported above or any library you want, # e.g. opencv, skimage, PIL, keras) # * cast image to grayscale # * convert image pixels to (0,1) range, float32 type <Your code here> return <YOUR CODE> import gym def make_env(): env = gym.make("KungFuMasterDeterministic-v0") # create raw env return PreprocessAtari(env) # apply your wrapper # spawn game instance for tests env = make_env() observation_shape = env.observation_space.shape n_actions = env.action_space.n obs = env.reset() # test observation assert obs.shape == observation_shape assert obs.dtype == 'float32' assert len(np.unique(obs)) > 2, "your image must not be binary" assert 0 <= np.min(obs) and np.max( obs) <= 1, "convert image pixels to (0,1) range" print "Formal tests seem fine. Here's an example of what you'll get." plt.title("what your network gonna see") plt.imshow(obs, interpolation='none', cmap='gray') plt.figure(figsize=[12, 12]) env.reset() for i in range(16): for _ in range(10): new_obs = env.step(env.action_space.sample())[0] plt.subplot(4, 4, i+1) plt.imshow(new_obs, interpolation='none', cmap='gray') # dispose of the game instance del env
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Building a DQN (2 pts)Here we define a simple agent that maps game images into Qvalues using simple convolutional neural network.![scheme](https://s18.postimg.cc/gbmsq6gmx/dqn_scheme.png)
# setup theano/lasagne. Prefer GPU. Fallback to CPU (will print warning) %env THEANO_FLAGS = floatX = float32 import theano import lasagne from lasagne.layers import * from theano import tensor as T # observation observation_layer = InputLayer( (None,)+observation_shape) # game image, [batch,64,64] # 4-tick window over images from agentnet.memory import WindowAugmentation # window size [batch,4,64,64] prev_wnd = InputLayer((None, 4)+observation_shape) new_wnd = WindowAugmentation( < current observation layer> , prev_wnd) # if you changed img size, remove assert assert new_wnd.output_shape == (None, 4, 64, 64) from lasagne.nonlinearities import elu, tanh, softmax, rectify <network body, growing from new_wnd. several conv layers or something similar would do> dense = <final dense layer with 256 neurons> # qvalues layer qvalues_layer = <a dense layer that predicts q-values> assert qvalues_layer.nonlinearity is not rectify # sample actions proportionally to policy_layer from agentnet.resolver import EpsilonGreedyResolver action_layer = EpsilonGreedyResolver(qvalues_layer)
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Define agentHere you will need to declare how your agent works* `observation_layers` and `action_layers` are the input and output of agent in MDP.* `policy_estimators` must contain whatever you need for training * In our case, that's `qvalues_layer`, but you'll need to add more when implementing target network.* agent_states contains our frame buffer. * The code `{new_wnd:prev_wnd}` reads as "`new_wnd becomes prev_wnd next turn`"
from agentnet.agent import Agent # agent agent = Agent(observation_layers=<YOUR CODE>, policy_estimators=<YOUR CODE>, action_layers=<YOUR CODE>, agent_states={new_wnd: prev_wnd},)
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Create and manage a pool of Atari sessions to play with* To make training more stable, we shall have an entire batch of game sessions each happening independent of others* Why several parallel agents help training: http://arxiv.org/pdf/1602.01783v1.pdf* Alternative approach: store more sessions: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
from agentnet.experiments.openai_gym.pool import EnvPool pool = EnvPool(agent, make_env, n_games=16) # 16 parallel game sessions %%time # interact for 7 ticks _, action_log, reward_log, _, _, _ = pool.interact(5) print('actions:') print(action_log[0]) print("rewards") print(reward_log[0]) # load first sessions (this function calls interact and remembers sessions) SEQ_LENGTH = 10 # sub-session length pool.update(SEQ_LENGTH)
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Q-learningWe train our agent based on sessions it has played in `pool.update(SEQ_LENGTH)`To do so, we first obtain sequences of observations, rewards, actions, q-values, etc.Actions and rewards have shape `[n_games,seq_length]`, q-values are `[n_games,seq_length,n_actions]`
# get agent's Qvalues obtained via experience replay replay = pool.experience_replay actions, rewards, is_alive = replay.actions[0], replay.rewards, replay.is_alive _, _, _, _, qvalues = agent.get_sessions( replay, session_length=SEQ_LENGTH, experience_replay=True, ) assert actions.ndim == rewards.ndim == is_alive.ndim == 2, "actions, rewards and is_alive must have shape [batch,time]" assert qvalues.ndim == 3, "q-values must have shape [batch,time,n_actions]" # compute V(s) as Qvalues of best actions. # For homework assignment, you will need to use target net # or special double q-learning objective here state_values_target = <YOUR CODE: compute V(s) 2d tensor by taking T.argmax of qvalues over correct axis> assert state_values_target.eval().shape = qvalues.eval().shape[:2] from agentnet.learning.generic import get_n_step_value_reference # get reference Q-values via Q-learning algorithm reference_qvalues = get_n_step_value_reference( state_values=state_values_target, rewards=rewards/100., is_alive=is_alive, n_steps=10, gamma_or_gammas=0.99, ) # consider it constant from theano.gradient import disconnected_grad reference_qvalues = disconnected_grad(reference_qvalues) # get predicted Q-values for committed actions by both current and target networks from agentnet.learning.generic import get_values_for_actions action_qvalues = get_values_for_actions(qvalues, actions) # loss for Qlearning = # (Q(s,a) - (r+ gamma*r' + gamma^2*r'' + ... +gamma^10*Q(s_{t+10},a_max)))^2 elwise_mse_loss = <mean squared error between action qvalues and reference qvalues> # mean over all batches and time ticks loss = (elwise_mse_loss*is_alive).mean() # Since it's a single lasagne network, one can get it's weights, output, etc weights = <YOUR CODE: get all trainable params> weights # Compute weight updates updates = <your favorite optimizer> # compile train function train_step = theano.function([], loss, updates=updates)
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Demo runas usual...
action_layer.epsilon.set_value(0.05) untrained_reward = np.mean(pool.evaluate(save_path="./records", record_video=True)) # show video from IPython.display import HTML import os video_names = list( filter(lambda s: s.endswith(".mp4"), os.listdir("./records/"))) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format("./records/" + video_names[-1])) # this may or may not be _last_ video. Try other indices
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Training loop
# starting epoch epoch_counter = 1 # full game rewards rewards = {} loss, reward_per_tick, reward = 0, 0, 0 from tqdm import trange from IPython.display import clear_output for i in trange(150000): # update agent's epsilon (in e-greedy policy) current_epsilon = 0.05 + 0.45*np.exp(-epoch_counter/20000.) action_layer.epsilon.set_value(np.float32(current_epsilon)) # play pool.update(SEQ_LENGTH) # train loss = 0.95*loss + 0.05*train_step() if epoch_counter % 10 == 0: # average reward per game tick in current experience replay pool reward_per_tick = 0.95*reward_per_tick + 0.05 * \ pool.experience_replay.rewards.get_value().mean() print("iter=%i\tepsilon=%.3f\tloss=%.3f\treward/tick=%.3f" % (epoch_counter, current_epsilon, loss, reward_per_tick)) # record current learning progress and show learning curves if epoch_counter % 100 == 0: action_layer.epsilon.set_value(0.05) reward = 0.95*reward + 0.05*np.mean(pool.evaluate(record_video=False)) action_layer.epsilon.set_value(np.float32(current_epsilon)) rewards[epoch_counter] = reward clear_output(True) plt.plot(*zip(*sorted(rewards.items(), key=lambda (t, r): t))) plt.show() epoch_counter += 1 # Time to drink some coffee!
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Evaluating results * Here we plot learning curves and sample testimonials
import pandas as pd plt.plot(*zip(*sorted(rewards.items(), key=lambda k: k[0]))) from agentnet.utils.persistence import save, load save(action_layer, "pacman.pcl") action_layer.epsilon.set_value(0.05) rw = pool.evaluate(n_games=20, save_path="./records", record_video=False) print("mean session score=%f.5" % np.mean(rw)) # show video from IPython.display import HTML import os video_names = list( filter(lambda s: s.endswith(".mp4"), os.listdir("./records/"))) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format("./videos/" + video_names[-1])) # this may or may not be _last_ video. Try other indices
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
Assignment part I (5 pts)We'll start by implementing target network to stabilize training.There are two ways to do so: __1)__ Manually write lasagne network, or clone it via [one of those methods](https://github.com/Lasagne/Lasagne/issues/720).You will need to implement loading weights from original network to target network.We recommend thoroughly debugging your code on simple tests before applying it in Atari dqn.__2)__ Use pre-build functionality from [here](http://agentnet.readthedocs.io/en/master/modules/target_network.html)```from agentnet.target_network import TargetNetworktarget_net = TargetNetwork(qvalues_layer)old_qvalues = target_net.output_layersagent's policy_estimators must now become (qvalues,old_qvalues)_,_,_,_,(qvalues,old_qvalues) = agent.get_sessions(...) replaying experiencetarget_net.load_weights()loads weights, so target network is now exactly same as main networktarget_net.load_weights(0.01) w_target = 0.99*w_target + 0.01*w_new``` Bonus I (2+ pts)Implement and train double q-learning.This task contains of* Implementing __double q-learning__ or __dueling q-learning__ or both (see tips below)* Training a network till convergence * Full points will be awwarded if your network gets average score of >=10 (see "evaluating results") * Higher score = more points as usual * If you're running out of time, it's okay to submit a solution that hasn't converged yet and updating it when it converges. _Lateness penalty will not increase for second submission_, so submitting first one in time gets you no penalty. Tips:* Implementing __double q-learning__ shouldn't be a problem if you've already have target networks in place. * As one option, use `get_values_for_actions(,)`. * You will probably need `T.argmax` to select best actions * Here's an original [article](https://arxiv.org/abs/1509.06461)* __Dueling__ architecture is also quite straightforward if you have standard DQN. * You will need to change network architecture, namely the q-values layer * It must now contain two heads: V(s) and A(s,a), both dense layers * You should then add them up via elemwise sum layer or a [custom](http://lasagne.readthedocs.io/en/latest/user/custom_layers.html) layer. * Here's an [article](https://arxiv.org/pdf/1511.06581.pdf) Here's a template for your convenience:
from lasagne.layers import * class DuelingQvaluesLayer(MergeLayer): def get_output_for(self, inputs, **tags): V, A = inputs return <YOUR CODE: add them up :)> def get_output_shape_for(self, input_shapes, **tags): V_shape, A_shape=input_shapes assert len( V_shape) == 2 and V_shape[-1] == 1, "V layer (first param) shape must be [batch,tick,1]" return A_shape # shape of q-values is same as predicted advantages # mock-up tests import theano.tensor as T v_tensor = -T.arange(10).reshape((10, 1)) V = InputLayer((None, 1), v_tensor) a_tensor = T.arange(30).reshape((10, 3)) A = InputLayer((None, 1), a_tensor) Q = DuelingQvaluesLayer([V, A]) import numpy as np assert np.allclose(get_output(Q).eval(), (v_tensor+a_tensor).eval()) print("looks good")
_____no_output_____
Unlicense
week04_approx_rl/homework_lasagne.ipynb
Kirili4ik/Practical_RL
__mlmachine - GroupbyImputer, KFoldEncoder, and Skew Correction__Welcome to Example Notebook 2. If you're new to mlmachine, check out [Example Notebook 1](https://github.com/petersontylerd/mlmachine/blob/master/notebooks/mlmachine_part_1.ipynb).Check out the [GitHub repository](https://github.com/petersontylerd/mlmachine).1. [Missing Values - Assessment & GroupbyImputer](Missing-Values-Assessment-&-GroupbyImputer) 1. [Assessment](Assessment) 1. [GroupbyImputer](GroupbyImputer) 1. [Imputation](Imputation)1. [KFold Encoding - Exotic Encoding Without the Leakage](KFold-Encoding-Exotic-Encoding-Without-the-Leakage) 1. [KFoldEncoder](KFoldEncoder)1. [Box, Cox, Yeo & Johnson - Skew Correctors](Box,-Cox,-Yeo-&-Johnson-Skew-Correctors) 1. [Assessment](Assessment-1) 1. [Skew correction](Skew-correction) --- Missing Values - Assessment & GroupbyImputer---Let's start by instantiating a couple `Machine()` objects, one for our training data and a second for our validation data:
# import libraries import numpy as np import pandas as pd # import mlmachine tools import mlmachine as mlm from mlmachine.data import titanic # use titanic() function to create DataFrames for training and validation datasets df_train, df_valid = titanic() # ordinal encoding hierarchy ordinal_encodings = {"Pclass": [1, 2, 3]} # instantiate a Machine object for the training data mlmachine_titanic_train = mlm.Machine( data=df_train, target="Survived", remove_features=["PassengerId","Ticket","Name"], identify_as_continuous=["Age","Fare"], identify_as_count=["Parch","SibSp"], identify_as_nominal=["Embarked"], identify_as_ordinal=["Pclass"], ordinal_encodings=ordinal_encodings, is_classification=True, ) # instantiate a Machine object for the validation data mlmachine_titanic_valid = mlm.Machine( data=df_valid, remove_features=["PassengerId","Ticket","Name"], identify_as_continuous=["Age","Fare"], identify_as_count=["Parch","SibSp"], identify_as_nominal=["Embarked"], identify_as_ordinal=["Pclass"], ordinal_encodings=ordinal_encodings, is_classification=True, )
~/.pyenv/versions/main37/lib/python3.7/site-packages/sklearn/externals/joblib/__init__.py:15: FutureWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+. warnings.warn(msg, category=FutureWarning)
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
--- Assessment---Each `Machine()` object contains a method for summarizing missingness in tabular form and in graphical form:
# generate missingness summary for training data mlmachine_titanic_train.eda_missing_summary(display_df=True)
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---By default, this method acts on the `data` attribute associated with `mlmachine_train`. Let's do the same for the validation dataset:
# generate missingness summary for validation data mlmachine_titanic_valid.eda_missing_summary(display_df=True)
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---Next, we need to determine if there are features with missing values in the training data, but not the validation data, and vice versa. This informs how we should set up our transformation pipeline. For example, if a feature has missing values in the validation dataset, but not the training dataset, we will still want to `fit_transform()` this feature on the training data to learn imputation values to apply on the nulls in the validation dataset.We could eyeball the tables and visuals above to compare the state of missingness in the two datasets, but this can be tedious, particularly with large datasets. Instead, we will leverage a method within our `Machine()` object. We simply pass the validation dataset to `mlmachine_titanic_train`'s method `missing_col_compare`, which returns a bidirectional missingness summary.
# generate missingness comparison summary mlmachine_titanic_train.missing_column_compare( validation_data=mlmachine_titanic_valid.data, )
Feature has missing values in validation data, not training data. {'Fare'} Feature has missing values in training data, not validation data. {'Embarked'}
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---The key observation here is that "Fare" is fully populated in the training data, but not the validation data. We need to make sure our pipeline learns how to impute these missing values based on the training data, despite the fact that the training data is not missing any values in this feature. --- GroupbyImputer---mlmachine includes a transformer called `GroupbyImputer()`, which makes it easy to perform the same basic imputation techniques provided by Scikit-learn's `SimpleImputer()`, but with the added ability to group by another feature in the dataset. Let's see an example:
# import mlmachine tools from mlmachine.features.preprocessing import GroupbyImputer # instantiate GroupbyImputer to fill "Age" mean, grouped by "SibSp" impute = GroupbyImputer(null_column="Age", groupby_column="SibSp", strategy="mean") impute.fit_transform(mlmachine_titanic_train.data[["Age","SibSp"]]) display(impute.train_value)
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---In the code snippet above, we mean impute "Age", grouped by "SibSp". We pass "Age" to the `null_column` parameter to indicate which column contains the nulls, and pass "SibSp" to the `groupby_column` parameter. The strategy parameter receives the same instructions as Scikit-learn's `SimpleImputer()` - "mean", "median" and "most_frequent".To inspect the learned values, we can display the object's `train_value` attribute, which is a `DataFrame` containing the category/value pairs`GroupbyImputer` uses these pairs to impute the missing values in "Age". If, in the unlikely circumstance, a level in `groupby_column` has only null values in `null_column`, then the missing values associated with that level will be imputed with the mean, median or mode of the entire feature. --- Imputation---Now we're going to use `GroupbyImputer()` within `PandasFeatureUnion()` to impute nulls in both the training and validation datasets.
# import libraries from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline # import mlmachine tools from mlmachine.features.preprocessing import ( DataFrameSelector, PandasTransformer, PandasFeatureUnion, ) # create imputation PandasFeatureUnion pipeline impute_pipe = PandasFeatureUnion([ ("age", make_pipeline( DataFrameSelector(include_columns=["Age","SibSp"]), GroupbyImputer(null_column="Age", groupby_column="SibSp", strategy="mean") )), ("fare", make_pipeline( DataFrameSelector(include_columns=["Fare","Pclass"]), GroupbyImputer(null_column="Fare", groupby_column="Pclass", strategy="mean") )), ("embarked", make_pipeline( DataFrameSelector(include_columns=["Embarked"]), PandasTransformer(SimpleImputer(strategy="most_frequent")) )), ("cabin", make_pipeline( DataFrameSelector(include_columns=["Cabin"]), PandasTransformer(SimpleImputer(strategy="constant", fill_value="X")) )), ("diff", make_pipeline( DataFrameSelector(exclude_columns=["Age","Fare","Embarked","Cabin"]) )), ]) # fit and transform training data, transform validation data mlmachine_titanic_train.data = impute_pipe.fit_transform(mlmachine_titanic_train.data) mlmachine_titanic_valid.data = impute_pipe.transform(mlmachine_titanic_valid.data) mlmachine_titanic_train.data[:20]
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---`GroupbyImputer()` makes two appearances in this `PandasFeatureUnion()` operation. On line 4, we groupby the feature "SibSp" to impute the mean "Age" value, and on line 8 we groupby the feature "Pclass" to impute the mean "Fare" value. Imputations for "Embarked" and "Cabin" are completed in straightforward fashion - "Embarked" is simply imputed with the mode, and "Cabin" is imputed with the constant value of "X".Lastly, we `fit_transform()` the `PandasFeatureUnion()` on `mlmachine_titanic_train.data` and finish filling our nulls by calling `transform()` on `mlmachine_titanic_valid.data`. --- KFold Encoding - Exotic Encoding Without the Leakage---Target value-based encoding techniques such as mean encoding, CatBoost Encoding, and Weight of Evidence encoding are often discussed in the context of Kaggle competitions. The primary advantage of these techniques is that they use the target variable to inform the encoded feature's values. However, this comes with the risk of leaking target information into the encoded values. KFold cross-validation assists in avoiding this problem. The key is to apply the encoded values to the out-of-fold observations only. This visualization illustrates the general pattern:![alt text](images/p3_kfold.jpeg "EDA Panel")- Separate a validation subset from the training dataset.- Learn the encoded values from the training data and the associated target values.- Apply the learned values to the validation observations only.- Repeat the process on the K-1 remaining folds. --- KFoldEncoder---mlmachine has a class called `KFoldEncoder` that facilitates KFold encoding with an encoder of choice. Let's use a small subset of our features to see how this works. We want to target encode two features: "Pclass" and "Age". Since "Age" is a continuous feature, we first need to map the values to bins, which is effectively an ordinal categorical column. We handle all of this in the simple `PandasFeatureUnion` below:
# import libraries from sklearn.preprocessing import KBinsDiscretizer # create simple encoding PandasFeatureUnion pipeline encode_pipe = PandasFeatureUnion([ ("bin", make_pipeline( DataFrameSelector(include_columns=["Age"]), PandasTransformer(KBinsDiscretizer(encode="ordinal")) )), ("select", make_pipeline( DataFrameSelector(include_columns=["Age","Pclass"]) )), ]) # fit and transform training data, transform validation data mlmachine_titanic_train.data = encode_pipe.fit_transform(mlmachine_titanic_train.data) mlmachine_titanic_valid.data = encode_pipe.fit_transform(mlmachine_titanic_valid.data) # update mlm_dtypes mlmachine_titanic_train.update_dtypes() mlmachine_titanic_valid.update_dtypes()
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---This operation returns a binned version of "Age", as well as the original "Age" and "Pclass" features.
mlmachine_titanic_train.data[:10]
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---Next, we target encode both "Pclass" and "Age_binned_5" using mean encoding, CatBoost encoding and Weight of Evidence encoding as provided by the package category_encoders. 
# import libraries from sklearn.model_selection import KFold from category_encoders import WOEEncoder, TargetEncoder, CatBoostEncoder # import mlmachine tools from mlmachine.features.preprocessing import KFoldEncoder # create KFold encoding PandasFeatureUnion pipeline target_encode_pipe = PandasFeatureUnion([ ("target", make_pipeline( DataFrameSelector(include_mlm_dtypes=["category"], exclude_columns=["Cabin"]), KFoldEncoder( target=mlmachine_titanic_train.target, cv=KFold(n_splits=5, shuffle=True, random_state=0), encoder=TargetEncoder, ), )), ("woe", make_pipeline( DataFrameSelector(include_mlm_dtypes=["category"]), KFoldEncoder( target=mlmachine_titanic_train.target, cv=KFold(n_splits=5, shuffle=False), encoder=WOEEncoder, ), )), ("catboost", make_pipeline( DataFrameSelector(include_mlm_dtypes=["category"]), KFoldEncoder( target=mlmachine_titanic_train.target, cv=KFold(n_splits=5, shuffle=False), encoder=CatBoostEncoder, ), )), ("diff", make_pipeline( DataFrameSelector(exclude_mlm_dtypes=["category"]), )), ]) # fit and transform training data, transform validation data mlmachine_titanic_train.data = target_encode_pipe.fit_transform(mlmachine_titanic_train.data) mlmachine_titanic_valid.data = target_encode_pipe.transform(mlmachine_titanic_valid.data) # update mlm_dtypes mlmachine_titanic_train.update_dtypes() mlmachine_titanic_valid.update_dtypes() mlmachine_titanic_train.data[:10]
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---Let's review the key `KFoldEncoder()` parameters:- `target`: the target attribute of our mlmachine_titanic_train object- `cv`: a cross-validation object- `encoder`: a target encoder class`KFoldEncoder()` learns the encoded values on the training data, and applies the values to the out-of-fold observations. On the validation data, the process is simpler: we calculate the average out-of-fold encodings applied to the training data and apply these values to all validation observations. --- Box, Cox, Yeo & Johnson - Skew Correctors--- --- Assessment---Just as we have a quick method for evaluating missingness, we have a quick method for evaluating skew.
# generate skewness summary mlmachine_titanic_train.skew_summary()
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---The `skew_summary()` method returns a `DataFrame` that summarizes the skew for each feature, along with a "Percent zero" column, which informs us of the percentage of values in the feature that are zero. --- Skew correction---mlmachine contains a class called `DualTransformer()`, which, by default, applies both Yeo-Johnson and Box-Cox transformations to the specified features with the intent of correcting skew. The Box-Cox transformation automatically seeks the lambda value which maximizes the log-likelihood function. Since Box-Cox transformation requires all values in a feature to be greater than zero, `DualTransformer()` applies one of two simple feature adjustments when this rule is violated:- If the minimum value in a feature is zero, each value in that feature is increased by a value of 1 prior to transformation. - If the minimum value is less than zero, then each feature value is increased by the absolute value of the minimum value in the feature plus 1 prior to transformation.Let's use `DualTransformer()` to see if we can minimize the skew in the original "Age" feature:
# import mlmachine tools from mlmachine.features.preprocessing import DualTransformer # create skew correction PandasFeatureUnion pipeline skew_pipe = PandasFeatureUnion([ ("skew", make_pipeline( DataFrameSelector(include_columns=["Age"]), DualTransformer(), )), ]) # fit and transform training data, transform validation data mlmachine_titanic_train.data = skew_pipe.fit_transform(mlmachine_titanic_train.data) mlmachine_titanic_valid.data = skew_pipe.transform(mlmachine_titanic_valid.data) # update mlm_dtypes mlmachine_titanic_train.update_dtypes() mlmachine_titanic_valid.update_dtypes() mlmachine_titanic_train.data[:10]
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
---`DualTransformer()` adds the features "Age_BoxCox" and "Age_YeoJohnson". Let's execute `skew_summary()` again to see if `DualTransformer()` addressed the skew in our original feature:"Age_BoxCox" and "Age_YeoJohnson" have a skew of 0.0286 and 0.0483, respectively.
# generate skewness summary mlmachine_titanic_train.skew_summary()
_____no_output_____
MIT
notebooks/mlmachine_part_2.ipynb
klahrich/mlmachine
Tools to analyze the results of Gate simulations
#hide from nbdev.showdoc import *
_____no_output_____
Apache-2.0
nbs/01_analysis.ipynb
dmitryhits/ProtonBeamTherapy
Dependencies
#export import pandas as pd import uproot as rt import awkward as ak from scipy.stats import moyal import matplotlib.pyplot as plt import numpy as np import math from scipy import stats from scipy.stats import rv_continuous # import pylandau from matplotlib.pyplot import hist2d import matplotlib.colors as mcolors import glob #export def find_max_nonzero(array_hist): """returns an upper boundary of the continuos non-zero bins input a histogram array output from plt.hist """ previous = -1 preprevious = -1 p_b = -1 pp_b = -1 for v, b in zip(array_hist[0],array_hist[1]): if preprevious != 0 and previous == 0 and v == 0: return math.ceil(p_b) pp_b = p_b p_b = b preprevious = previous previous = v show_doc(find_max_nonzero) #export def find_range(param): """removes a tail in the upper range of the histogram""" array_hist = plt.hist(param, bins=100) upper_limit = find_max_nonzero(array_hist) ret = -1 for _ in range(10): print(f'upper limit: {upper_limit}') ret = upper_limit array_hist = plt.hist(param[param < upper_limit], bins=100) upper_limit = find_max_nonzero(array_hist) if ret == upper_limit: break return ret show_doc(find_range) #export def get_edep_data(df, sensor=-1): """returns an array of energies deposited in each event (keV)""" # sum all energy deposited in each event and convert the result to keV if sensor == -1: edep = df.groupby(['eventID'])['edep'].sum()*1000 else: edep = (df[df['volumeID'] == sensor].groupby(['eventID']))['edep'].sum()*1000 return edep show_doc(get_edep_data) #export def get_df_subentry2(root_file_name): """returns a dataframe that contains only subentry 2 data This subentry seems to contain all the relevant information""" df = pd.DataFrame() with rt.open(f'{root_file_name}:Hits') as tree: df = ak.to_pandas(tree.arrays()) return df.xs(2, level='subentry') show_doc(get_df_subentry2) #export def get_phasespace_df(timestamp, layer): root_file = f"../results/tracker_{timestamp}_{layer}.root:PhaseSpace" df = pd.DataFrame() with rt.open(root_file) as tree: df = ak.to_pandas(tree.arrays()) return df root_file = "../results/dose_2021May10_181812_1-Dose.root" file = rt.open(root_file) file.keys() #export def get_Ekin(df, particle='proton'): return df[df['ParticleName'] == particle]['Ekine'] #export def extract_dose(timestamp): '''return numpy array of the dose for all phantom layers ''' # get all the Dose files for a give timestamp files = glob.glob(f'../results/dose_{timestamp}_*-Dose.txt') # sort them by the layer number files.sort(key=lambda x: int(x.split('_')[-1].rstrip('-Dose.txt'))) dose = [] for file in files: d = [] with open(file) as f: for line in f: # ignore the lines starting with # if not line.startswith('#'): d.append(float(line)) # The beam is in the negative 'y' direction # so is the numbering of layers # while the data in the files is in positive direction # so it needs to be reversed dose += reversed(d) return np.array(dose)
_____no_output_____
Apache-2.0
nbs/01_analysis.ipynb
dmitryhits/ProtonBeamTherapy
ph0 = get_phasespace_df('2021May03_141349', 0)ph5 = get_phasespace_df('2021May03_141349', 5)ph10 = get_phasespace_df('2021May03_141349', 10)ph15 = get_phasespace_df('2021May03_141349', 15)ph19 = get_phasespace_df('2021May03_141349', 19)
get_Ekin(ph0).hist(bins=50) get_Ekin(ph19).hist(bins=50) print(round(stats.tstd(get_Ekin(ph0), (175, 200)), 2), 'MeV') print(round(stats.tstd(get_Ekin(ph19), (55, 90)), 2), 'MeV') from matplotlib import colors x0 = ph19[(ph19["ParticleName"]=='proton') & (ph19['CreatorProcess'] == 0) & (ph19['NuclearProcess'] == 0)]['X'] z0 = ph19[(ph19["ParticleName"]=='proton') & (ph19['CreatorProcess'] == 0) & (ph19['NuclearProcess'] == 0)]['Z'] x1 = ph19[(ph19["ParticleName"]=='proton')]['X'] z1 = ph19[(ph19["ParticleName"]=='proton')]['Z'] fig, (ax_all, ax_prim) = plt.subplots(1,2, figsize = (14, 5)) __ =ax_all.hist2d(x1, z1, bins=50, range=[[-50, 50],[-50,50]], norm=colors.LogNorm()) ax_all.set_title("lateral position of all protons in the last phantom layer") ax_all.set_xlabel('X (mm)') ax_all.set_ylabel('Z (mm)') __ = ax_prim.hist2d(x0, z0, bins=50, range=[[-50, 50],[-50,50]], norm=colors.LogNorm()) ax_prim.set_xlabel('X (mm)') ax_prim.set_ylabel('Z (mm)') _ =ax_prim.set_title("lateral position of primary protons that only experienced Coulomb scattering")
_____no_output_____
Apache-2.0
nbs/01_analysis.ipynb
dmitryhits/ProtonBeamTherapy
Lateral positions of protons in the last phantom layer. On the left are all protons, on the right are only the primary protons that did not experience nuclear scattering.
x0 = ph19[(ph19["ParticleName"]=='proton') & (ph19['CreatorProcess'] == 0) & (ph19['NuclearProcess'] == 0)]['X'] ekin0 = ph19[(ph19["ParticleName"]=='proton') & (ph19['CreatorProcess'] == 0) & (ph19['NuclearProcess'] == 0)]['Ekine'] x1 = ph19[(ph19["ParticleName"]=='proton')]['X'] ekin1 = ph19[(ph19["ParticleName"]=='proton')]['Ekine'] fig, (ax_all, ax_prim) = plt.subplots(1,2, figsize = (14, 5), sharey=True) __ =ax_all.hist2d(x1, ekin1, bins=50, norm=colors.LogNorm(), range=[[-70, 70],[0, 90]]) ax_all.set_ylabel('E_kin (MeV)') ax_all.set_xlabel('X (mm)') __ = ax_prim.hist2d(x0, ekin0, bins=50, norm=colors.LogNorm(), range=[[-70, 70],[0, 90]]) ax_prim.set_xlabel('X (mm)')
_____no_output_____
Apache-2.0
nbs/01_analysis.ipynb
dmitryhits/ProtonBeamTherapy
Kinetic energy deposited by particle versus the position of the hit (left) all protons (right) protons from the primary beam that did not experience nuclear scattering
ph = pd.merge(ph0, ph5, on="EventID", suffixes=("", "_5")) ph = pd.merge(ph, ph10, on="EventID", suffixes=("", "_10")) ph = pd.merge(ph, ph15, on="EventID", suffixes=("", "_15")) ph = pd.merge(ph, ph19, on="EventID", suffixes=("", "_19")) def select(ph): result = (ph[f"CreatorProcess"] == 0) & (ph[f"ParticleName"] == "proton") for x in [ "_5", "_10", "_15", "_19"]: result = result & (ph[f"CreatorProcess{x}"] == 0) & (ph[f"ParticleName{x}"] == "proton") return result ph100 = ph[(ph["EventID"]<100) & select(ph)] x = np.array(ph100[ph100["EventID"] == 1][["X", "X_5", "X_10", "X_15", "X_19"]]).flatten() y = np.array(ph100[ph100["EventID"] == 1][["Y", "Y_5", "Y_10", "Y_15", "Y_19"]]).flatten() y = np.array(y).flatten() x = np.array(x).flatten() plt.plot(y,x, 'o') ph100["EventID"].head() from mpl_toolkits.mplot3d import Axes3D
_____no_output_____
Apache-2.0
nbs/01_analysis.ipynb
dmitryhits/ProtonBeamTherapy
Example showing energy deposition with 3 sensors
df2 = get_df_subentry2('results/TrackerHits.root') edep = get_edep_data(df2, sensor=0) _ = plt.hist(edep, bins=100, range=(0,1000)) _ = plt.hist(get_edep_data(df2, sensor=1), bins=100, range=(0,1000)) _ = plt.hist(get_edep_data(df2, sensor=2), bins=100, range=(0,1000)) null_columns = [col for col in df2.columns if df2[col].max() == 0 and df2[col].min() == 0] df2.drop(columns=null_columns, inplace=True) single_value_columns = [col for col in df2.columns if df2[col].max() == df2[col].min()] df2.drop(columns=single_value_columns, inplace=True) df2.head() _ = plt.hist2d(df2['posX']-df2['sourcePosX'], df2['posY'], bins=(100, 80), norm=mcolors.LogNorm()) df2_sensor0 = df2[df2.volumeID == 0] _= plt.hist((df2_sensor0[(df2_sensor0['processName']=='Transportation') & (df2_sensor0['posY']==-47.25)]).edep,log=True, density=True, bins = 100) _= plt.hist((df2_sensor0[(df2_sensor0['processName']=='Transportation') & (df2_sensor0['posY']==-47.75)]).edep,log=True, density=True,bins = 100) _= hist2d(df2.volumeID, df2.posY, bins=(12,100), norm=mcolors.LogNorm()) _ = hist2d(df2.trackLength, df2.volumeID, bins=(100, 12), norm=mcolors.LogNorm()) import pylandau class landau_gen(rv_continuous): r"""A Landau continuous random variable. %(before_notes)s Notes ----- The probability density function for `Landau` is: for a real number :math:`x`. %(after_notes)s This distribution has utility in high-energy physics and radiation detection. It describes the energy loss of a charged relativistic particle due to ionization of the medium . """ def _pdf(self, x): return pylandau.landau_pdf(np.float64(x)) landau = landau_gen(name="landau") #hide from nbdev.export import notebook2script; notebook2script() loc,scale = moyal.fit(edep) print(loc, scale) fig1, ax1 = plt.subplots(figsize=(7, 3)) x = np.linspace(0, 100, 200) ax1.plot(x, moyal.pdf(x, loc, scale), label = 'Moyal MLE fit') _ = ax1.hist(edep[edep < 100], bins = 100, histtype='step', density= True, label = 'sim data') ax1.plot(x, landau.pdf(x, 23.973851592546183, 2.921658875656049), label='Landau MLE fit') ax1.plot(x, landau.pdf(x, 24.13, 2.629), label='Meine Landau mit deine fit Parametern') #ax1.scatter(GeV8_data.energy, GeV8_data.counts/4400, label = 'data', marker='o', c = 'green', alpha = 0.5) plt.xlabel('keV') ax1.legend() loc,scale = moyal.fit(edep[edep < 50]) print(loc, scale) m = np.mean(edep) em = stats.sem(edep) tm = stats.tmean(edep, limits=(edep.min(),np.mean(edep) + 1 * np.std(edep) + 2)) etm = stats.tsem(edep, limits=(edep.min(),np.mean(edep) + 1 * np.std(edep) + 2)) print(f'Mean: {m}, Error on mean: {em}, SNR: {m/em}') print(f'Trimmed mean {tm}, Error on trimmed mean: {etm}, SNR: {tm/etm}') #print(stats.mode(np.round(edep, 0))) ## edep.to_csv('simdata.csv', sep =',', mode='w')
_____no_output_____
Apache-2.0
nbs/01_analysis.ipynb
dmitryhits/ProtonBeamTherapy
序列到序列学习(seq2seq)在`seq2seq`中,特定的“&lt;eos&gt;”表示序列结束词元。一旦输出序列生成此词元,模型就会停止预测。在循环神经网络解码器的初始化时间步,有两个特定的设计决定:首先,特定的“&lt;bos&gt;”表示序列开始词元,它是解码器的输入序列的第一个词元。其次,使用循环神经网络编码器最终的隐状态来初始化解码器的隐状态。这种设计将输入序列的编码信息送入到解码器中来生成输出序列的。在其他一些设计中 :cite:`Cho.Van-Merrienboer.Gulcehre.ea.2014`,编码器最终的隐状态在每一个时间步都作为解码器的输入序列的一部分。类似于 :`language_model`中语言模型的训练,可以允许标签成为原始的输出序列,从源序列词元“&lt;bos&gt;”、“Ils”、“regardent”、“.”到新序列词元“Ils”、“regardent”、“.”、“&lt;eos&gt;”来移动预测的位置。下面,我们动手构建 :`seq2seq`的设计,并将基于 :`machine_translation`中介绍的“英-法”数据集来训练这个机器翻译模型。
import collections import math import tensorflow as tf from d2l import tensorflow as d2l
_____no_output_____
MIT
09_recurrent_neural_networks/tf_seq2seq.ipynb
JiaheXu/Machine-Learning-Codebase
编码器从技术上讲,编码器将长度可变的输入序列转换成形状固定的上下文变量$\mathbf{c}$,并且将输入序列的信息在该上下文变量中进行编码。如 :numref:`fig_seq2seq`所示,可以使用循环神经网络来设计编码器。考虑由一个序列组成的样本(批量大小是$1$)。假设输入序列是$x_1, \ldots, x_T$,其中$x_t$是输入文本序列中的第$t$个词元。在时间步$t$,循环神经网络将词元$x_t$的输入特征向量$\mathbf{x}_t$和$\mathbf{h} _{t-1}$(即上一时间步的隐状态)转换为$\mathbf{h}_t$(即当前步的隐状态)。使用一个函数$f$来描述循环神经网络的循环层所做的变换:$$\mathbf{h}_t = f(\mathbf{x}_t, \mathbf{h}_{t-1}). $$总之,编码器通过选定的函数$q$,将所有时间步的隐状态转换为上下文变量:$$\mathbf{c} = q(\mathbf{h}_1, \ldots, \mathbf{h}_T).$$比如,当选择$q(\mathbf{h}_1, \ldots, \mathbf{h}_T) = \mathbf{h}_T$时(就像 :numref:`fig_seq2seq`中一样),上下文变量仅仅是输入序列在最后时间步的隐状态$\mathbf{h}_T$。到目前为止,我们使用的是一个单向循环神经网络来设计编码器,其中隐状态只依赖于输入子序列,这个子序列是由输入序列的开始位置到隐状态所在的时间步的位置(包括隐状态所在的时间步)组成。我们也可以使用双向循环神经网络构造编码器,其中隐状态依赖于两个输入子序列,两个子序列是由隐状态所在的时间步的位置之前的序列和之后的序列(包括隐状态所在的时间步),因此隐状态对整个序列的信息都进行了编码。现在,让我们[**实现循环神经网络编码器**]。注意,我们使用了*嵌入层*(embedding layer)来获得输入序列中每个词元的特征向量。嵌入层的权重是一个矩阵,其行数等于输入词表的大小(`vocab_size`),其列数等于特征向量的维度(`embed_size`)。对于任意输入词元的索引$i$,嵌入层获取权重矩阵的第$i$行(从$0$开始)以返回其特征向量。另外,本文选择了一个多层门控循环单元来实现编码器。
class Encoder(tf.keras.layers.Layer): """编码器-解码器架构的基本编码器接口""" def __init__(self, **kwargs): super(Encoder, self).__init__(**kwargs) def call(self, X, *args, **kwargs): raise NotImplementedError #@save class Seq2SeqEncoder(d2l.Encoder): """用于序列到序列学习的循环神经网络编码器""" def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super().__init__(*kwargs) # 嵌入层 self.embedding = tf.keras.layers.Embedding(vocab_size, embed_size) self.rnn = tf.keras.layers.RNN(tf.keras.layers.StackedRNNCells( [tf.keras.layers.GRUCell(num_hiddens, dropout=dropout) for _ in range(num_layers)]), return_sequences=True, return_state=True) def call(self, X, *args, **kwargs): # 输入'X'的形状:(`batch_size`, `num_steps`) # 输出'X'的形状:(`batch_size`, `num_steps`, `embed_size`) X = self.embedding(X) output = self.rnn(X, **kwargs) state = output[1:] return output[0], state
_____no_output_____
MIT
09_recurrent_neural_networks/tf_seq2seq.ipynb
JiaheXu/Machine-Learning-Codebase